head 1.80; access; symbols netbsd-10-0-RELEASE:1.80 netbsd-10-0-RC6:1.80 netbsd-10-0-RC5:1.80 netbsd-10-0-RC4:1.80 netbsd-10-0-RC3:1.80 netbsd-10-0-RC2:1.80 thorpej-ifq:1.80.0.24 thorpej-ifq-base:1.80 thorpej-altq-separation:1.80.0.22 thorpej-altq-separation-base:1.80 netbsd-10-0-RC1:1.80 netbsd-10:1.80.0.20 netbsd-10-base:1.80 bouyer-sunxi-drm:1.80.0.18 bouyer-sunxi-drm-base:1.80 netbsd-9-3-RELEASE:1.74 thorpej-i2c-spi-conf2:1.80.0.16 thorpej-i2c-spi-conf2-base:1.80 thorpej-futex2:1.80.0.14 thorpej-futex2-base:1.80 thorpej-cfargs2:1.80.0.12 thorpej-cfargs2-base:1.80 cjep_sun2x-base1:1.80 cjep_sun2x:1.80.0.10 cjep_sun2x-base:1.80 cjep_staticlib_x-base1:1.80 netbsd-9-2-RELEASE:1.74 cjep_staticlib_x:1.80.0.8 cjep_staticlib_x-base:1.80 thorpej-i2c-spi-conf:1.80.0.6 thorpej-i2c-spi-conf-base:1.80 thorpej-cfargs:1.80.0.4 thorpej-cfargs-base:1.80 thorpej-futex:1.80.0.2 thorpej-futex-base:1.80 netbsd-9-1-RELEASE:1.74 bouyer-xenpvh-base2:1.79 phil-wifi-20200421:1.79 bouyer-xenpvh-base1:1.79 phil-wifi-20200411:1.79 bouyer-xenpvh:1.79.0.2 bouyer-xenpvh-base:1.79 is-mlppp:1.78.0.2 is-mlppp-base:1.78 phil-wifi-20200406:1.79 netbsd-8-2-RELEASE:1.74 ad-namecache-base3:1.78 netbsd-9-0-RELEASE:1.74 netbsd-9-0-RC2:1.74 ad-namecache-base2:1.77 ad-namecache-base1:1.77 ad-namecache:1.76.0.2 ad-namecache-base:1.76 netbsd-9-0-RC1:1.74 phil-wifi-20191119:1.75 netbsd-9:1.74.0.14 netbsd-9-base:1.74 phil-wifi-20190609:1.74 netbsd-8-1-RELEASE:1.74 netbsd-8-1-RC1:1.74 isaki-audio2:1.74.0.12 isaki-audio2-base:1.74 pgoyette-compat-merge-20190127:1.74 pgoyette-compat-20190127:1.74 pgoyette-compat-20190118:1.74 pgoyette-compat-1226:1.74 pgoyette-compat-1126:1.74 pgoyette-compat-1020:1.74 pgoyette-compat-0930:1.74 pgoyette-compat-0906:1.74 netbsd-7-2-RELEASE:1.72 pgoyette-compat-0728:1.74 netbsd-8-0-RELEASE:1.74 phil-wifi:1.74.0.10 phil-wifi-base:1.74 pgoyette-compat-0625:1.74 netbsd-8-0-RC2:1.74 pgoyette-compat-0521:1.74 pgoyette-compat-0502:1.74 pgoyette-compat-0422:1.74 netbsd-8-0-RC1:1.74 pgoyette-compat-0415:1.74 pgoyette-compat-0407:1.74 pgoyette-compat-0330:1.74 pgoyette-compat-0322:1.74 pgoyette-compat-0315:1.74 netbsd-7-1-2-RELEASE:1.72 pgoyette-compat:1.74.0.8 pgoyette-compat-base:1.74 netbsd-7-1-1-RELEASE:1.72 tls-maxphys-base-20171202:1.74 matt-nb8-mediatek:1.74.0.6 matt-nb8-mediatek-base:1.74 nick-nhusb-base-20170825:1.74 perseant-stdc-iso10646:1.74.0.4 perseant-stdc-iso10646-base:1.74 netbsd-8:1.74.0.2 netbsd-8-base:1.74 prg-localcount2-base3:1.74 prg-localcount2-base2:1.73 prg-localcount2-base1:1.73 prg-localcount2:1.73.0.8 prg-localcount2-base:1.73 pgoyette-localcount-20170426:1.73 bouyer-socketcan-base1:1.73 jdolecek-ncq:1.73.0.6 jdolecek-ncq-base:1.73 pgoyette-localcount-20170320:1.73 netbsd-7-1:1.72.0.20 netbsd-7-1-RELEASE:1.72 netbsd-7-1-RC2:1.72 nick-nhusb-base-20170204:1.73 netbsd-7-nhusb-base-20170116:1.72 bouyer-socketcan:1.73.0.4 bouyer-socketcan-base:1.73 pgoyette-localcount-20170107:1.73 netbsd-7-1-RC1:1.72 nick-nhusb-base-20161204:1.73 pgoyette-localcount-20161104:1.73 netbsd-7-0-2-RELEASE:1.72 nick-nhusb-base-20161004:1.73 localcount-20160914:1.73 netbsd-7-nhusb:1.72.0.18 netbsd-7-nhusb-base:1.72 pgoyette-localcount-20160806:1.73 pgoyette-localcount-20160726:1.73 pgoyette-localcount:1.73.0.2 pgoyette-localcount-base:1.73 nick-nhusb-base-20160907:1.73 nick-nhusb-base-20160529:1.73 netbsd-7-0-1-RELEASE:1.72 nick-nhusb-base-20160422:1.72 nick-nhusb-base-20160319:1.72 nick-nhusb-base-20151226:1.72 netbsd-7-0:1.72.0.16 netbsd-7-0-RELEASE:1.72 nick-nhusb-base-20150921:1.72 netbsd-7-0-RC3:1.72 netbsd-7-0-RC2:1.72 netbsd-7-0-RC1:1.72 nick-nhusb-base-20150606:1.72 nick-nhusb-base-20150406:1.72 nick-nhusb:1.72.0.14 nick-nhusb-base:1.72 netbsd-5-2-3-RELEASE:1.62 netbsd-5-1-5-RELEASE:1.62 netbsd-6-0-6-RELEASE:1.70 netbsd-6-1-5-RELEASE:1.70 netbsd-7:1.72.0.12 netbsd-7-base:1.72 yamt-pagecache-base9:1.72 yamt-pagecache-tag8:1.67.2.2 netbsd-6-1-4-RELEASE:1.70 netbsd-6-0-5-RELEASE:1.70 tls-earlyentropy:1.72.0.10 tls-earlyentropy-base:1.72 riastradh-xf86-video-intel-2-7-1-pre-2-21-15:1.72 riastradh-drm2-base3:1.72 netbsd-6-1-3-RELEASE:1.70 netbsd-6-0-4-RELEASE:1.70 netbsd-5-2-2-RELEASE:1.62 netbsd-5-1-4-RELEASE:1.62 netbsd-6-1-2-RELEASE:1.70 netbsd-6-0-3-RELEASE:1.70 netbsd-5-2-1-RELEASE:1.62 netbsd-5-1-3-RELEASE:1.62 rmind-smpnet-nbase:1.72 netbsd-6-1-1-RELEASE:1.70 riastradh-drm2-base2:1.72 riastradh-drm2-base1:1.72 riastradh-drm2:1.72.0.8 riastradh-drm2-base:1.72 rmind-smpnet:1.72.0.2 rmind-smpnet-base:1.72 netbsd-6-1:1.70.0.8 netbsd-6-0-2-RELEASE:1.70 netbsd-6-1-RELEASE:1.70 khorben-n900:1.72.0.6 netbsd-6-1-RC4:1.70 netbsd-6-1-RC3:1.70 agc-symver:1.72.0.4 agc-symver-base:1.72 netbsd-6-1-RC2:1.70 netbsd-6-1-RC1:1.70 yamt-pagecache-base8:1.72 netbsd-5-2:1.62.0.18 netbsd-6-0-1-RELEASE:1.70 yamt-pagecache-base7:1.72 netbsd-5-2-RELEASE:1.62 netbsd-5-2-RC1:1.62 matt-nb6-plus-nbase:1.70 yamt-pagecache-base6:1.72 netbsd-6-0:1.70.0.6 netbsd-6-0-RELEASE:1.70 netbsd-6-0-RC2:1.70 tls-maxphys:1.71.0.2 tls-maxphys-base:1.72 matt-nb6-plus:1.70.0.4 matt-nb6-plus-base:1.70 netbsd-6-0-RC1:1.70 jmcneill-usbmp-base10:1.71 yamt-pagecache-base5:1.71 jmcneill-usbmp-base9:1.71 yamt-pagecache-base4:1.71 jmcneill-usbmp-base8:1.71 jmcneill-usbmp-base7:1.71 jmcneill-usbmp-base6:1.71 jmcneill-usbmp-base5:1.71 jmcneill-usbmp-base4:1.71 jmcneill-usbmp-base3:1.71 jmcneill-usbmp-pre-base2:1.67 jmcneill-usbmp-base2:1.70 netbsd-6:1.70.0.2 netbsd-6-base:1.70 netbsd-5-1-2-RELEASE:1.62 netbsd-5-1-1-RELEASE:1.62 jmcneill-usbmp:1.67.0.6 jmcneill-usbmp-base:1.67 jmcneill-audiomp3:1.67.0.4 jmcneill-audiomp3-base:1.67 yamt-pagecache-base3:1.67 yamt-pagecache-base2:1.67 yamt-pagecache:1.67.0.2 yamt-pagecache-base:1.67 rmind-uvmplock-nbase:1.66 cherry-xenmp:1.66.0.2 cherry-xenmp-base:1.66 uebayasi-xip-base7:1.66 bouyer-quota2-nbase:1.66 bouyer-quota2:1.65.0.4 bouyer-quota2-base:1.66 jruoho-x86intr:1.65.0.2 jruoho-x86intr-base:1.65 matt-mips64-premerge-20101231:1.65 matt-nb5-mips64-premerge-20101231:1.62 matt-nb5-pq3:1.62.0.16 matt-nb5-pq3-base:1.62 netbsd-5-1:1.62.0.14 uebayasi-xip-base6:1.65 uebayasi-xip-base5:1.65 netbsd-5-1-RELEASE:1.62 uebayasi-xip-base4:1.65 uebayasi-xip-base3:1.65 yamt-nfs-mp-base11:1.65 netbsd-5-1-RC4:1.62 matt-nb5-mips64-k15:1.62 uebayasi-xip-base2:1.64 yamt-nfs-mp-base10:1.64 netbsd-5-1-RC3:1.62 netbsd-5-1-RC2:1.62 uebayasi-xip-base1:1.64 netbsd-5-1-RC1:1.62 rmind-uvmplock:1.64.0.4 rmind-uvmplock-base:1.66 yamt-nfs-mp-base9:1.64 uebayasi-xip:1.64.0.2 uebayasi-xip-base:1.64 netbsd-5-0-2-RELEASE:1.62 matt-nb5-mips64-premerge-20091211:1.62 matt-premerge-20091211:1.64 yamt-nfs-mp-base8:1.64 matt-nb5-mips64-u2-k2-k4-k7-k8-k9:1.62 matt-nb4-mips64-k7-u2a-k9b:1.62 matt-nb5-mips64-u1-k1-k5:1.62 yamt-nfs-mp-base7:1.64 matt-nb5-mips64:1.62.0.12 netbsd-5-0-1-RELEASE:1.62 jymxensuspend-base:1.63 yamt-nfs-mp-base6:1.63 yamt-nfs-mp-base5:1.63 yamt-nfs-mp-base4:1.62 jym-xensuspend-nbase:1.64 yamt-nfs-mp-base3:1.62 nick-hppapmap-base4:1.62 nick-hppapmap-base3:1.62 netbsd-5-0:1.62.0.10 netbsd-5-0-RELEASE:1.62 netbsd-5-0-RC4:1.62 netbsd-5-0-RC3:1.62 nick-hppapmap-base2:1.62 netbsd-5-0-RC2:1.62 jym-xensuspend:1.62.0.8 jym-xensuspend-base:1.62 netbsd-5-0-RC1:1.62 haad-dm-base2:1.62 haad-nbase2:1.62 ad-audiomp2:1.62.0.6 ad-audiomp2-base:1.62 netbsd-5:1.62.0.4 netbsd-5-base:1.62 nick-hppapmap:1.62.0.2 nick-hppapmap-base:1.62 matt-mips64-base2:1.62 matt-mips64:1.58.0.22 haad-dm-base1:1.62 wrstuden-revivesa-base-4:1.62 netbsd-4-0-1-RELEASE:1.54 wrstuden-revivesa-base-3:1.62 wrstuden-revivesa-base-2:1.62 wrstuden-fixsa-newbase:1.54 nick-csl-alignment-base5:1.58 haad-dm:1.61.0.8 haad-dm-base:1.62 wrstuden-revivesa-base-1:1.61 simonb-wapbl-nbase:1.62 yamt-pf42-base4:1.61 simonb-wapbl:1.61.0.6 simonb-wapbl-base:1.62 yamt-pf42-base3:1.61 hpcarm-cleanup-nbase:1.61 yamt-pf42-baseX:1.60 yamt-pf42-base2:1.61 yamt-nfs-mp-base2:1.61 wrstuden-revivesa:1.61.0.4 wrstuden-revivesa-base:1.61 yamt-nfs-mp:1.61.0.2 yamt-nfs-mp-base:1.61 yamt-pf42:1.60.0.8 yamt-pf42-base:1.60 ad-socklock-base1:1.60 yamt-lazymbuf-base15:1.60 yamt-lazymbuf-base14:1.60 keiichi-mipv6-nbase:1.60 mjf-devfs2:1.60.0.6 mjf-devfs2-base:1.62 nick-net80211-sync:1.60.0.4 nick-net80211-sync-base:1.60 keiichi-mipv6:1.60.0.2 keiichi-mipv6-base:1.60 bouyer-xeni386-merge1:1.58.18.2 matt-armv6-prevmlocking:1.58 wrstuden-fixsa-base-1:1.54 vmlocking2-base3:1.58 netbsd-4-0:1.54.0.18 netbsd-4-0-RELEASE:1.54 bouyer-xeni386-nbase:1.60 yamt-kmem-base3:1.58 cube-autoconf:1.58.0.20 cube-autoconf-base:1.58 yamt-kmem-base2:1.58 bouyer-xeni386:1.58.0.18 bouyer-xeni386-base:1.60 yamt-kmem:1.58.0.16 yamt-kmem-base:1.58 vmlocking2-base2:1.58 reinoud-bufcleanup-nbase:1.58 vmlocking2:1.58.0.14 vmlocking2-base1:1.58 netbsd-4-0-RC5:1.54 matt-nb4-arm:1.54.0.16 matt-nb4-arm-base:1.54 matt-armv6-nbase:1.60 jmcneill-base:1.58 netbsd-4-0-RC4:1.54 mjf-devfs:1.58.0.12 mjf-devfs-base:1.60 bouyer-xenamd64-base2:1.58 vmlocking-nbase:1.58 yamt-x86pmap-base4:1.58 bouyer-xenamd64:1.58.0.10 bouyer-xenamd64-base:1.58 netbsd-4-0-RC3:1.54 yamt-x86pmap-base3:1.58 yamt-x86pmap-base2:1.58 netbsd-4-0-RC2:1.54 yamt-x86pmap:1.58.0.8 yamt-x86pmap-base:1.58 netbsd-4-0-RC1:1.54 matt-armv6:1.58.0.6 matt-armv6-base:1.60 matt-mips64-base:1.58 jmcneill-pm:1.58.0.4 jmcneill-pm-base:1.58 hpcarm-cleanup:1.58.0.2 hpcarm-cleanup-base:1.60 nick-csl-alignment:1.56.0.12 nick-csl-alignment-base:1.56 netbsd-3-1-1-RELEASE:1.45 netbsd-3-0-3-RELEASE:1.45 yamt-idlelwp-base8:1.56 wrstuden-fixsa:1.54.0.14 wrstuden-fixsa-base:1.54 thorpej-atomic:1.56.0.10 thorpej-atomic-base:1.56 reinoud-bufcleanup:1.56.0.8 reinoud-bufcleanup-base:1.58 mjf-ufs-trans:1.56.0.6 mjf-ufs-trans-base:1.56 vmlocking:1.56.0.4 vmlocking-base:1.58 ad-audiomp:1.56.0.2 ad-audiomp-base:1.56 yamt-idlelwp:1.54.0.12 post-newlock2-merge:1.54 newlock2-nbase:1.54 yamt-splraiseipl-base5:1.54 yamt-splraiseipl-base4:1.54 yamt-splraiseipl-base3:1.54 abandoned-netbsd-4-base:1.54 abandoned-netbsd-4:1.54.0.4 netbsd-3-1:1.45.0.8 netbsd-3-1-RELEASE:1.45 netbsd-3-0-2-RELEASE:1.45 yamt-splraiseipl-base2:1.54 netbsd-3-1-RC4:1.45 yamt-splraiseipl:1.54.0.8 yamt-splraiseipl-base:1.54 netbsd-3-1-RC3:1.45 yamt-pdpolicy-base9:1.54 newlock2:1.54.0.6 newlock2-base:1.54 yamt-pdpolicy-base8:1.54 netbsd-3-1-RC2:1.45 netbsd-3-1-RC1:1.45 yamt-pdpolicy-base7:1.54 netbsd-4:1.54.0.10 netbsd-4-base:1.54 yamt-pdpolicy-base6:1.54 chap-midi-nbase:1.54 netbsd-3-0-1-RELEASE:1.45 gdamore-uart:1.54.0.2 gdamore-uart-base:1.54 simonb-timcounters-final:1.50.4.2 yamt-pdpolicy-base5:1.53 chap-midi:1.53.0.2 chap-midi-base:1.54 yamt-pdpolicy-base4:1.52 yamt-pdpolicy-base3:1.52 peter-altq-base:1.52 peter-altq:1.52.0.6 yamt-pdpolicy-base2:1.52 elad-kernelauth-base:1.53 elad-kernelauth:1.52.0.4 yamt-pdpolicy:1.52.0.2 yamt-pdpolicy-base:1.52 yamt-uio_vmspace-base5:1.52 simonb-timecounters:1.50.0.4 simonb-timecounters-base:1.54 rpaulo-netinet-merge-pcb:1.50.0.2 rpaulo-netinet-merge-pcb-base:1.54 yamt-uio_vmspace:1.49.0.2 netbsd-3-0:1.45.0.6 netbsd-3-0-RELEASE:1.45 netbsd-3-0-RC6:1.45 yamt-readahead-base3:1.47 netbsd-3-0-RC5:1.45 netbsd-3-0-RC4:1.45 netbsd-3-0-RC3:1.45 yamt-readahead-base2:1.47 netbsd-3-0-RC2:1.45 yamt-readahead-pervnode:1.47 yamt-readahead-perfile:1.47 yamt-readahead:1.47.0.8 yamt-readahead-base:1.47 netbsd-3-0-RC1:1.45 yamt-vop-base3:1.47 netbsd-2-0-3-RELEASE:1.39 netbsd-2-1:1.39.0.8 yamt-vop-base2:1.47 thorpej-vnode-attr:1.47.0.6 thorpej-vnode-attr-base:1.47 netbsd-2-1-RELEASE:1.39 yamt-vop:1.47.0.4 yamt-vop-base:1.47 netbsd-2-1-RC6:1.39 netbsd-2-1-RC5:1.39 netbsd-2-1-RC4:1.39 netbsd-2-1-RC3:1.39 netbsd-2-1-RC2:1.39 netbsd-2-1-RC1:1.39 yamt-lazymbuf:1.47.0.2 yamt-km-base4:1.45 netbsd-2-0-2-RELEASE:1.39 yamt-km-base3:1.45 netbsd-3:1.45.0.4 netbsd-3-base:1.45 yamt-km-base2:1.45 yamt-km:1.44.0.4 yamt-km-base:1.44 kent-audio2:1.44.0.2 kent-audio2-base:1.46 netbsd-2-0-1-RELEASE:1.39 kent-audio1-beforemerge:1.42 netbsd-2:1.39.0.6 netbsd-2-base:1.39 kent-audio1:1.39.0.4 kent-audio1-base:1.39 netbsd-2-0-RELEASE:1.39 netbsd-2-0-RC5:1.39 netbsd-2-0-RC4:1.39 netbsd-2-0-RC3:1.39 netbsd-2-0-RC2:1.39 netbsd-2-0-RC1:1.39 netbsd-2-0:1.39.0.2 netbsd-2-0-base:1.39 netbsd-1-6-PATCH002-RELEASE:1.31 netbsd-1-6-PATCH002:1.31 netbsd-1-6-PATCH002-RC4:1.31 netbsd-1-6-PATCH002-RC3:1.31 netbsd-1-6-PATCH002-RC2:1.31 netbsd-1-6-PATCH002-RC1:1.31 ktrace-lwp:1.34.0.2 ktrace-lwp-base:1.47 netbsd-1-6-PATCH001:1.31 netbsd-1-6-PATCH001-RELEASE:1.31 netbsd-1-6-PATCH001-RC3:1.31 netbsd-1-6-PATCH001-RC2:1.31 netbsd-1-6-PATCH001-RC1:1.31 nathanw_sa_end:1.24.2.9 nathanw_sa_before_merge:1.33 fvdl_fs64_base:1.33 gmcgarry_ctxsw:1.33.0.4 gmcgarry_ctxsw_base:1.33 gmcgarry_ucred:1.33.0.2 gmcgarry_ucred_base:1.33 nathanw_sa_base:1.33 kqueue-aftermerge:1.32 kqueue-beforemerge:1.32 netbsd-1-6-RELEASE:1.31 netbsd-1-6-RC3:1.31 netbsd-1-6-RC2:1.31 netbsd-1-6-RC1:1.31 netbsd-1-6:1.31.0.12 netbsd-1-6-base:1.31 gehenna-devsw:1.31.0.10 gehenna-devsw-base:1.31 netbsd-1-5-PATCH003:1.17 eeh-devprop:1.31.0.8 eeh-devprop-base:1.31 newlock:1.31.0.6 newlock-base:1.31 ifpoll-base:1.31 thorpej-mips-cache:1.31.0.2 thorpej-mips-cache-base:1.31 thorpej-devvp-base3:1.31 thorpej-devvp-base2:1.30 post-chs-ubcperf:1.30 pre-chs-ubcperf:1.30 thorpej-devvp:1.29.0.4 thorpej-devvp-base:1.29 netbsd-1-5-PATCH002:1.17 kqueue:1.29.0.2 kqueue-base:1.32 netbsd-1-5-PATCH001:1.17 thorpej_scsipi_beforemerge:1.25 nathanw_sa:1.24.0.2 thorpej_scsipi_nbase:1.25 netbsd-1-5-RELEASE:1.17 netbsd-1-5-BETA2:1.17 netbsd-1-5-BETA:1.17 netbsd-1-4-PATCH003:1.11.2.1 netbsd-1-5-ALPHA2:1.17 netbsd-1-5:1.17.0.4 netbsd-1-5-base:1.17 minoura-xpg4dl-base:1.17 minoura-xpg4dl:1.17.0.2 netbsd-1-4-PATCH002:1.11.2.1 chs-ubc2-newbase:1.15 wrstuden-devbsize-19991221:1.15 wrstuden-devbsize:1.15.0.8 wrstuden-devbsize-base:1.15 kame_141_19991130:1.11.6.1 comdex-fall-1999:1.15.0.6 comdex-fall-1999-base:1.15 fvdl-softdep:1.15.0.4 fvdl-softdep-base:1.15 thorpej_scsipi:1.15.0.2 thorpej_scsipi_base:1.25 netbsd-1-4-PATCH001:1.11.2.1 kame_14_19990705:1.11 kame_14_19990628:1.11 kame:1.11.0.6 chs-ubc2:1.11.0.4 chs-ubc2-base:1.15 netbsd-1-4-RELEASE:1.11 netbsd-1-4:1.11.0.2 netbsd-1-4-base:1.11 kenh-if-detach:1.10.0.4 kenh-if-detach-base:1.10 chs-ubc:1.10.0.2 chs-ubc-base:1.10 eeh-paddr_t:1.6.0.2 eeh-paddr_t-base:1.6 uvm980205:1.1.1.1 CDC:1.1.1; locks; strict; comment @ * @; 1.80 date 2020.05.26.00.50.53; author kamil; state Exp; branches; next 1.79; commitid P9DmwK5uKag7AG9C; 1.79 date 2020.03.14.14.15.43; author ad; state Exp; branches; next 1.78; commitid vdJBOT95L3bvnn0C; 1.78 date 2020.02.23.15.46.43; author ad; state Exp; branches; next 1.77; commitid DJJO1ciCDgZlwOXB; 1.77 date 2020.01.12.17.46.55; author ad; state Exp; branches; next 1.76; commitid wiF9a0RzSplaxqSB; 1.76 date 2020.01.05.15.57.15; author para; state Exp; branches 1.76.2.1; next 1.75; commitid aLtd9PaoiVjO9wRB; 1.75 date 2019.08.01.02.28.55; author riastradh; state Exp; branches; next 1.74; commitid UVkhB81WCXmEJgxB; 1.74 date 2017.05.18.02.21.05; author christos; state Exp; branches 1.74.10.1; next 1.73; 1.73 date 2016.05.25.17.43.58; author christos; state Exp; branches 1.73.8.1; next 1.72; 1.72 date 2012.10.29.16.00.05; author para; state Exp; branches 1.72.14.1; next 1.71; 1.71 date 2012.02.19.00.05.56; author rmind; state Exp; branches 1.71.2.1; next 1.70; 1.70 date 2012.01.27.19.48.41; author para; state Exp; branches; next 1.69; 1.69 date 2012.01.21.16.51.38; author chs; state Exp; branches; next 1.68; 1.68 date 2011.12.20.15.41.01; author reinoud; state Exp; branches; next 1.67; 1.67 date 2011.06.12.03.36.03; author rmind; state Exp; branches 1.67.2.1 1.67.6.1; next 1.66; 1.66 date 2011.02.02.15.25.27; author chuck; state Exp; branches 1.66.2.1; next 1.65; 1.65 date 2010.09.25.01.42.40; author matt; state Exp; branches 1.65.2.1 1.65.4.1; next 1.64; 1.64 date 2009.08.01.16.35.51; author yamt; state Exp; branches 1.64.2.1 1.64.4.1; next 1.63; 1.63 date 2009.06.10.01.55.33; author yamt; state Exp; branches; next 1.62; 1.62 date 2008.07.29.00.03.06; author matt; state Exp; branches 1.62.8.1; next 1.61; 1.61 date 2008.04.26.13.44.00; author yamt; state Exp; branches 1.61.2.1 1.61.4.1 1.61.6.1 1.61.8.1; next 1.60; 1.60 date 2008.01.08.13.10.01; author yamt; state Exp; branches 1.60.6.1 1.60.8.1; next 1.59; 1.59 date 2008.01.02.11.49.18; author ad; state Exp; branches; next 1.58; 1.58 date 2007.07.22.21.07.47; author he; state Exp; branches 1.58.6.1 1.58.12.1 1.58.14.1 1.58.16.1 1.58.18.1 1.58.22.1; next 1.57; 1.57 date 2007.07.21.19.21.54; author ad; state Exp; branches; next 1.56; 1.56 date 2007.02.22.06.05.01; author thorpej; state Exp; branches 1.56.4.1 1.56.12.1; next 1.55; 1.55 date 2007.02.21.23.00.13; author thorpej; state Exp; branches; next 1.54; 1.54 date 2006.05.25.14.27.28; author yamt; state Exp; branches 1.54.12.1; next 1.53; 1.53 date 2006.05.03.14.12.01; author yamt; state Exp; branches 1.53.2.1; next 1.52; 1.52 date 2006.02.16.20.17.20; author perry; state Exp; branches 1.52.2.1 1.52.4.1 1.52.6.1; next 1.51; 1.51 date 2006.02.11.12.45.07; author yamt; state Exp; branches; next 1.50; 1.50 date 2006.01.21.13.34.15; author yamt; state Exp; branches 1.50.2.1 1.50.4.1; next 1.49; 1.49 date 2005.12.24.20.45.10; author perry; state Exp; branches 1.49.2.1; next 1.48; 1.48 date 2005.12.11.12.25.29; author christos; state Exp; branches; next 1.47; 1.47 date 2005.05.17.13.55.33; author yamt; state Exp; branches 1.47.2.1; next 1.46; 1.46 date 2005.04.01.11.59.39; author yamt; state Exp; branches; next 1.45; 1.45 date 2005.02.11.02.12.03; author chs; state Exp; branches; next 1.44; 1.44 date 2005.01.13.11.50.32; author yamt; state Exp; branches 1.44.2.1 1.44.4.1; next 1.43; 1.43 date 2005.01.12.09.34.35; author yamt; state Exp; branches; next 1.42; 1.42 date 2005.01.01.21.08.02; author yamt; state Exp; branches; next 1.41; 1.41 date 2005.01.01.21.02.14; author yamt; state Exp; branches; next 1.40; 1.40 date 2005.01.01.21.00.06; author yamt; state Exp; branches; next 1.39; 1.39 date 2004.02.10.01.30.49; author matt; state Exp; branches; next 1.38; 1.38 date 2004.01.29.12.06.02; author yamt; state Exp; branches; next 1.37; 1.37 date 2003.11.01.11.09.02; author yamt; state Exp; branches; next 1.36; 1.36 date 2003.10.01.22.50.15; author enami; state Exp; branches; next 1.35; 1.35 date 2003.09.10.13.38.20; author enami; state Exp; branches; next 1.34; 1.34 date 2003.02.20.22.16.08; author atatat; state Exp; branches 1.34.2.1; next 1.33; 1.33 date 2002.11.02.07.40.49; author perry; state Exp; branches; next 1.32; 1.32 date 2002.09.22.07.21.31; author chs; state Exp; branches; next 1.31; 1.31 date 2001.10.03.13.32.23; author christos; state Exp; branches; next 1.30; 1.30 date 2001.09.09.19.38.23; author chs; state Exp; branches; next 1.29; 1.29 date 2001.06.26.17.55.15; author thorpej; state Exp; branches 1.29.2.1 1.29.4.1; next 1.28; 1.28 date 2001.06.02.18.09.27; author chs; state Exp; branches; next 1.27; 1.27 date 2001.05.26.16.32.47; author chs; state Exp; branches; next 1.26; 1.26 date 2001.05.25.04.06.15; author chs; state Exp; branches; next 1.25; 1.25 date 2001.03.15.06.10.57; author chs; state Exp; branches; next 1.24; 1.24 date 2001.02.18.21.19.08; author chs; state Exp; branches 1.24.2.1; next 1.23; 1.23 date 2000.12.13.08.06.12; author enami; state Exp; branches; next 1.22; 1.22 date 2000.09.13.15.00.25; author thorpej; state Exp; branches; next 1.21; 1.21 date 2000.08.16.16.32.06; author thorpej; state Exp; branches; next 1.20; 1.20 date 2000.08.12.17.44.02; author sommerfeld; state Exp; branches; next 1.19; 1.19 date 2000.06.26.17.18.40; author mrg; state Exp; branches; next 1.18; 1.18 date 2000.06.26.15.32.28; author mrg; state Exp; branches; next 1.17; 1.17 date 2000.03.29.04.05.47; author simonb; state Exp; branches; next 1.16; 1.16 date 2000.03.26.20.54.47; author kleink; state Exp; branches; next 1.15; 1.15 date 99.06.21.17.25.11; author thorpej; state Exp; branches 1.15.2.1; next 1.14; 1.14 date 99.05.26.19.16.36; author thorpej; state Exp; branches; next 1.13; 1.13 date 99.05.23.06.27.13; author mrg; state Exp; branches; next 1.12; 1.12 date 99.05.20.23.03.23; author thorpej; state Exp; branches; next 1.11; 1.11 date 99.03.25.18.48.52; author mrg; state Exp; branches 1.11.2.1 1.11.4.1 1.11.6.1; next 1.10; 1.10 date 98.10.11.23.14.48; author chuck; state Exp; branches; next 1.9; 1.9 date 98.08.31.01.54.14; author thorpej; state Exp; branches; next 1.8; 1.8 date 98.08.31.01.50.10; author thorpej; state Exp; branches; next 1.7; 1.7 date 98.08.13.02.11.01; author eeh; state Exp; branches; next 1.6; 1.6 date 98.02.10.14.12.20; author mrg; state Exp; branches 1.6.2.1; next 1.5; 1.5 date 98.02.10.02.34.46; author perry; state Exp; branches; next 1.4; 1.4 date 98.02.07.11.09.01; author mrg; state Exp; branches; next 1.3; 1.3 date 98.02.07.02.22.24; author chs; state Exp; branches; next 1.2; 1.2 date 98.02.06.22.32.03; author thorpej; state Exp; branches; next 1.1; 1.1 date 98.02.05.06.25.09; author mrg; state Exp; branches 1.1.1.1; next ; 1.76.2.1 date 2020.01.17.21.47.38; author ad; state Exp; branches; next 1.76.2.2; commitid T9pwLWote7xbI5TB; 1.76.2.2 date 2020.02.29.20.21.11; author ad; state Exp; branches; next ; commitid OjSb8ro7YQETQBYB; 1.74.10.1 date 2020.04.13.08.05.21; author martin; state Exp; branches; next ; commitid X01YhRUPVUDaec4C; 1.73.8.1 date 2017.05.19.00.22.58; author pgoyette; state Exp; branches; next ; 1.72.14.1 date 2016.05.29.08.44.40; author skrll; state Exp; branches; next 1.72.14.2; 1.72.14.2 date 2017.08.28.17.53.17; author skrll; state Exp; branches; next ; commitid UQQpnjvcNkUZn05A; 1.71.2.1 date 2012.11.20.03.02.54; author tls; state Exp; branches; next 1.71.2.2; 1.71.2.2 date 2017.12.03.11.39.22; author jdolecek; state Exp; branches; next ; commitid XcIYRZTAh1LmerhA; 1.67.2.1 date 2012.04.17.00.08.59; author yamt; state Exp; branches; next 1.67.2.2; 1.67.2.2 date 2012.10.30.17.23.02; author yamt; state Exp; branches; next ; 1.67.6.1 date 2012.02.18.07.36.00; author mrg; state Exp; branches; next 1.67.6.2; 1.67.6.2 date 2012.02.24.09.11.53; author mrg; state Exp; branches; next ; 1.66.2.1 date 2011.06.23.14.20.36; author cherry; state Exp; branches; next ; 1.65.2.1 date 2011.06.06.09.10.23; author jruoho; state Exp; branches; next ; 1.65.4.1 date 2011.02.08.16.20.07; author bouyer; state Exp; branches; next ; 1.64.2.1 date 2010.10.22.07.22.57; author uebayasi; state Exp; branches; next ; 1.64.4.1 date 2010.03.17.06.03.18; author rmind; state Exp; branches; next 1.64.4.2; 1.64.4.2 date 2011.03.05.20.56.36; author rmind; state Exp; branches; next ; 1.62.8.1 date 2009.07.23.23.33.04; author jym; state Exp; branches; next ; 1.61.2.1 date 2009.05.04.08.14.39; author yamt; state Exp; branches; next 1.61.2.2; 1.61.2.2 date 2009.06.20.07.20.38; author yamt; state Exp; branches; next 1.61.2.3; 1.61.2.3 date 2009.08.19.18.48.36; author yamt; state Exp; branches; next 1.61.2.4; 1.61.2.4 date 2010.10.09.03.32.47; author yamt; state Exp; branches; next ; 1.61.4.1 date 2008.09.18.04.37.06; author wrstuden; state Exp; branches; next ; 1.61.6.1 date 2008.07.31.04.51.05; author simonb; state Exp; branches; next ; 1.61.8.1 date 2008.10.19.22.18.11; author haad; state Exp; branches; next ; 1.60.6.1 date 2008.06.02.13.24.37; author mjf; state Exp; branches; next 1.60.6.2; 1.60.6.2 date 2008.09.28.10.41.07; author mjf; state Exp; branches; next ; 1.60.8.1 date 2008.05.18.12.35.56; author yamt; state Exp; branches; next ; 1.58.6.1 date 2008.01.09.01.58.41; author matt; state Exp; branches; next ; 1.58.12.1 date 2008.02.18.21.07.33; author mjf; state Exp; branches; next ; 1.58.14.1 date 2007.12.21.15.39.24; author ad; state Exp; branches; next 1.58.14.2; 1.58.14.2 date 2007.12.28.14.33.13; author ad; state Exp; branches; next ; 1.58.16.1 date 2007.12.10.08.41.13; author yamt; state Exp; branches; next ; 1.58.18.1 date 2008.01.02.21.58.40; author bouyer; state Exp; branches; next 1.58.18.2; 1.58.18.2 date 2008.01.08.22.12.07; author bouyer; state Exp; branches; next ; 1.58.22.1 date 2007.07.22.21.07.47; author he; state dead; branches; next 1.58.22.2; 1.58.22.2 date 2007.07.22.21.07.48; author he; state Exp; branches; next ; 1.56.4.1 date 2007.03.13.17.51.56; author ad; state Exp; branches; next 1.56.4.2; 1.56.4.2 date 2007.04.05.21.32.53; author ad; state Exp; branches; next ; 1.56.12.1 date 2007.08.15.13.51.22; author skrll; state Exp; branches; next ; 1.54.12.1 date 2007.02.27.16.55.27; author yamt; state Exp; branches; next ; 1.53.2.1 date 2006.06.19.04.11.44; author chap; state Exp; branches; next ; 1.52.2.1 date 2006.05.24.10.59.30; author yamt; state Exp; branches; next 1.52.2.2; 1.52.2.2 date 2006.06.26.12.55.08; author yamt; state Exp; branches; next ; 1.52.4.1 date 2006.05.11.23.32.03; author elad; state Exp; branches; next ; 1.52.6.1 date 2006.05.24.15.50.48; author tron; state Exp; branches; next ; 1.50.2.1 date 2006.09.09.03.00.13; author rpaulo; state Exp; branches; next ; 1.50.4.1 date 2006.04.22.11.40.29; author simonb; state Exp; branches; next 1.50.4.2; 1.50.4.2 date 2006.06.01.22.39.45; author kardel; state Exp; branches; next ; 1.49.2.1 date 2006.02.01.14.52.48; author yamt; state Exp; branches; next 1.49.2.2; 1.49.2.2 date 2006.02.18.15.39.31; author yamt; state Exp; branches; next ; 1.47.2.1 date 2006.06.21.15.12.40; author yamt; state Exp; branches; next 1.47.2.2; 1.47.2.2 date 2007.02.26.09.12.30; author yamt; state Exp; branches; next 1.47.2.3; 1.47.2.3 date 2007.09.03.14.47.08; author yamt; state Exp; branches; next 1.47.2.4; 1.47.2.4 date 2008.01.21.09.48.22; author yamt; state Exp; branches; next ; 1.44.2.1 date 2005.04.29.11.29.45; author kent; state Exp; branches; next ; 1.44.4.1 date 2005.01.25.12.58.29; author yamt; state Exp; branches; next 1.44.4.2; 1.44.4.2 date 2005.02.12.18.17.57; author yamt; state Exp; branches; next ; 1.34.2.1 date 2004.08.03.10.57.07; author skrll; state Exp; branches; next 1.34.2.2; 1.34.2.2 date 2004.09.18.14.57.12; author skrll; state Exp; branches; next 1.34.2.3; 1.34.2.3 date 2004.09.21.13.39.28; author skrll; state Exp; branches; next 1.34.2.4; 1.34.2.4 date 2005.01.17.19.33.11; author skrll; state Exp; branches; next 1.34.2.5; 1.34.2.5 date 2005.02.15.21.34.02; author skrll; state Exp; branches; next 1.34.2.6; 1.34.2.6 date 2005.04.01.14.32.12; author skrll; state Exp; branches; next 1.34.2.7; 1.34.2.7 date 2005.11.10.14.12.40; author skrll; state Exp; branches; next ; 1.29.2.1 date 2001.09.13.01.16.33; author thorpej; state Exp; branches; next 1.29.2.2; 1.29.2.2 date 2002.01.10.20.05.40; author thorpej; state Exp; branches; next 1.29.2.3; 1.29.2.3 date 2002.10.10.18.45.07; author jdolecek; state Exp; branches; next ; 1.29.4.1 date 2001.10.01.12.48.43; author fvdl; state Exp; branches; next 1.29.4.2; 1.29.4.2 date 2001.10.11.00.02.36; author fvdl; state Exp; branches; next ; 1.24.2.1 date 2001.03.05.22.50.11; author nathanw; state Exp; branches; next 1.24.2.2; 1.24.2.2 date 2001.04.09.01.59.18; author nathanw; state Exp; branches; next 1.24.2.3; 1.24.2.3 date 2001.06.21.20.10.35; author nathanw; state Exp; branches; next 1.24.2.4; 1.24.2.4 date 2001.08.24.00.13.39; author nathanw; state Exp; branches; next 1.24.2.5; 1.24.2.5 date 2001.09.21.22.37.15; author nathanw; state Exp; branches; next 1.24.2.6; 1.24.2.6 date 2001.10.08.20.11.56; author nathanw; state Exp; branches; next 1.24.2.7; 1.24.2.7 date 2002.07.12.01.40.44; author nathanw; state Exp; branches; next 1.24.2.8; 1.24.2.8 date 2002.10.18.02.45.59; author nathanw; state Exp; branches; next 1.24.2.9; 1.24.2.9 date 2002.11.11.22.17.05; author nathanw; state Exp; branches; next ; 1.15.2.1 date 2000.11.20.18.12.02; author bouyer; state Exp; branches; next 1.15.2.2; 1.15.2.2 date 2000.12.13.15.50.44; author bouyer; state Exp; branches; next 1.15.2.3; 1.15.2.3 date 2001.03.12.13.32.12; author bouyer; state Exp; branches; next 1.15.2.4; 1.15.2.4 date 2001.03.27.15.32.50; author bouyer; state Exp; branches; next ; 1.11.2.1 date 99.06.18.17.03.15; author perry; state Exp; branches; next ; 1.11.4.1 date 99.06.21.01.47.21; author thorpej; state Exp; branches; next 1.11.4.2; 1.11.4.2 date 99.07.01.23.55.16; author thorpej; state Exp; branches; next 1.11.4.3; 1.11.4.3 date 99.08.09.00.05.56; author chs; state Exp; branches; next ; 1.11.6.1 date 99.11.30.13.36.27; author itojun; state Exp; branches; next ; 1.6.2.1 date 98.07.30.14.04.13; author eeh; state Exp; branches; next ; 1.1.1.1 date 98.02.05.06.25.09; author mrg; state Exp; branches; next ; desc @@ 1.80 log @Catch up with the usage of struct vmspace::vm_refcnt Use the dedicated reference counting routines. Change the type of struct vmspace::vm_refcnt and struct vm_map::ref_count to volatile. Remove the unnecessary vm->vm_map.misc_lock locking in process_domem(). Reviewed by @ text @/* $NetBSD: uvm_map.h,v 1.79 2020/03/14 14:15:43 ad Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. * Copyright (c) 1991, 1993, The Regents of the University of California. * * All rights reserved. * * This code is derived from software contributed to Berkeley by * The Mach Operating System project at Carnegie-Mellon University. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @@(#)vm_map.h 8.3 (Berkeley) 3/15/94 * from: Id: uvm_map.h,v 1.1.2.3 1998/02/07 01:16:55 chs Exp * * * Copyright (c) 1987, 1990 Carnegie-Mellon University. * All rights reserved. * * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ #ifndef _UVM_UVM_MAP_H_ #define _UVM_UVM_MAP_H_ /* * uvm_map.h */ #ifdef _KERNEL /* * macros */ /* * UVM_MAP_CLIP_START: ensure that the entry begins at or after * the starting address, if it doesn't we split the entry. * * => map must be locked by caller */ #define UVM_MAP_CLIP_START(MAP,ENTRY,VA) { \ if ((VA) > (ENTRY)->start && (VA) < (ENTRY)->end) { \ uvm_map_clip_start(MAP,ENTRY,VA); \ } \ } /* * UVM_MAP_CLIP_END: ensure that the entry ends at or before * the ending address, if it does't we split the entry. * * => map must be locked by caller */ #define UVM_MAP_CLIP_END(MAP,ENTRY,VA) { \ if ((VA) > (ENTRY)->start && (VA) < (ENTRY)->end) { \ uvm_map_clip_end(MAP,ENTRY,VA); \ } \ } /* * extract flags */ #define UVM_EXTRACT_REMOVE 0x01 /* remove mapping from old map */ #define UVM_EXTRACT_CONTIG 0x02 /* try to keep it contig */ #define UVM_EXTRACT_QREF 0x04 /* use quick refs */ #define UVM_EXTRACT_FIXPROT 0x08 /* set prot to maxprot as we go */ #define UVM_EXTRACT_RESERVED 0x10 /* caller did uvm_map_reserve() */ #define UVM_EXTRACT_PROT_ALL 0x20 /* set prot to UVM_PROT_ALL */ #endif /* _KERNEL */ #include #include #include #include #include #include /* * Address map entries consist of start and end addresses, * a VM object (or sharing map) and offset into that object, * and user-exported inheritance and protection information. * Also included is control information for virtual copy operations. * * At runtime this is aligned on a cacheline boundary, with fields * used during fault processing to do RB tree lookup clustered at * the beginning. */ struct vm_map_entry { struct rb_node rb_node; /* tree information */ vaddr_t start; /* start address */ vaddr_t end; /* end address */ vsize_t gap; /* free space after */ vsize_t maxgap; /* space in subtree */ struct vm_map_entry *prev; /* previous entry */ struct vm_map_entry *next; /* next entry */ union { struct uvm_object *uvm_obj; /* uvm object */ struct vm_map *sub_map; /* belongs to another map */ } object; /* object I point to */ voff_t offset; /* offset into object */ uint8_t etype; /* entry type */ uint8_t flags; /* flags */ uint8_t advice; /* madvise advice */ uint8_t unused; /* unused */ vm_prot_t protection; /* protection code */ vm_prot_t max_protection; /* maximum protection */ vm_inherit_t inheritance; /* inheritance */ int wired_count; /* can be paged if == 0 */ struct vm_aref aref; /* anonymous overlay */ }; /* flags */ #define UVM_MAP_KERNEL 0x01 /* kernel map entry */ #define UVM_MAP_STATIC 0x04 /* special static entries */ #define UVM_MAP_NOMERGE 0x08 /* this entry is not mergable */ #define VM_MAPENT_ISWIRED(entry) ((entry)->wired_count != 0) /* * Maps are doubly-linked lists of map entries, kept sorted * by address. A single hint is provided to start * searches again from the last successful search, * insertion, or removal. * * LOCKING PROTOCOL NOTES: * ----------------------- * * VM map locking is a little complicated. There are both shared * and exclusive locks on maps. However, it is sometimes required * to downgrade an exclusive lock to a shared lock, and upgrade to * an exclusive lock again (to perform error recovery). However, * another thread *must not* queue itself to receive an exclusive * lock while before we upgrade back to exclusive, otherwise the * error recovery becomes extremely difficult, if not impossible. * * In order to prevent this scenario, we introduce the notion of * a `busy' map. A `busy' map is read-locked, but other threads * attempting to write-lock wait for this flag to clear before * entering the lock manager. A map may only be marked busy * when the map is write-locked (and then the map must be downgraded * to read-locked), and may only be marked unbusy by the thread * which marked it busy (holding *either* a read-lock or a * write-lock, the latter being gained by an upgrade). * * Access to the map `flags' member is controlled by the `flags_lock' * simple lock. Note that some flags are static (set once at map * creation time, and never changed), and thus require no locking * to check those flags. All flags which are r/w must be set or * cleared while the `flags_lock' is asserted. Additional locking * requirements are: * * VM_MAP_PAGEABLE r/o static flag; no locking required * * VM_MAP_WIREFUTURE r/w; may only be set or cleared when * map is write-locked. may be tested * without asserting `flags_lock'. * * VM_MAP_DYING r/o; set when a vmspace is being * destroyed to indicate that updates * to the pmap can be skipped. * * VM_MAP_TOPDOWN r/o; set when the vmspace is * created if the unspecified map * allocations are to be arranged in * a "top down" manner. */ struct vm_map { struct pmap * pmap; /* Physical map */ krwlock_t lock; /* Non-intrsafe lock */ struct lwp * busy; /* LWP holding map busy */ kmutex_t misc_lock; /* Lock for cv, busy */ kcondvar_t cv; /* For signalling */ int flags; /* flags */ struct rb_tree rb_tree; /* Tree for entries */ struct vm_map_entry header; /* List of entries */ int nentries; /* Number of entries */ vsize_t size; /* virtual size */ volatile int ref_count; /* Reference count */ struct vm_map_entry * hint; /* hint for quick lookups */ struct vm_map_entry * first_free; /* First free space hint */ unsigned int timestamp; /* Version number */ }; #if defined(_KERNEL) #include #endif /* defined(_KERNEL) */ #define VM_MAP_IS_KERNEL(map) (vm_map_pmap(map) == pmap_kernel()) /* vm_map flags */ #define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable */ #define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */ #define VM_MAP_DYING 0x20 /* rw: map is being destroyed */ #define VM_MAP_TOPDOWN 0x40 /* ro: arrange map top-down */ #define VM_MAP_WANTVA 0x100 /* rw: want va */ #define VM_MAP_BITS "\177\020\ b\0PAGEABLE\0\ b\2WIREFUTURE\0\ b\5DYING\0\ b\6TOPDOWN\0\ b\10WANTVA\0" #ifdef _KERNEL struct uvm_map_args { struct vm_map_entry *uma_prev; vaddr_t uma_start; vsize_t uma_size; struct uvm_object *uma_uobj; voff_t uma_uoffset; uvm_flag_t uma_flags; }; #endif /* _KERNEL */ /* * globals: */ #ifdef _KERNEL #include #ifdef PMAP_GROWKERNEL extern vaddr_t uvm_maxkaddr; #endif /* * protos: the following prototypes define the interface to vm_map */ void uvm_map_deallocate(struct vm_map *); int uvm_map_willneed(struct vm_map *, vaddr_t, vaddr_t); int uvm_map_clean(struct vm_map *, vaddr_t, vaddr_t, int); void uvm_map_clip_start(struct vm_map *, struct vm_map_entry *, vaddr_t); void uvm_map_clip_end(struct vm_map *, struct vm_map_entry *, vaddr_t); int uvm_map_extract(struct vm_map *, vaddr_t, vsize_t, struct vm_map *, vaddr_t *, int); struct vm_map_entry * uvm_map_findspace(struct vm_map *, vaddr_t, vsize_t, vaddr_t *, struct uvm_object *, voff_t, vsize_t, int); int uvm_map_inherit(struct vm_map *, vaddr_t, vaddr_t, vm_inherit_t); int uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int); void uvm_map_init(void); void uvm_map_init_caches(void); bool uvm_map_lookup_entry(struct vm_map *, vaddr_t, struct vm_map_entry **); void uvm_map_reference(struct vm_map *); int uvm_map_reserve(struct vm_map *, vsize_t, vaddr_t, vsize_t, vaddr_t *, uvm_flag_t); void uvm_map_setup(struct vm_map *, vaddr_t, vaddr_t, int); int uvm_map_submap(struct vm_map *, vaddr_t, vaddr_t, struct vm_map *); void uvm_unmap1(struct vm_map *, vaddr_t, vaddr_t, int); #define uvm_unmap(map, s, e) uvm_unmap1((map), (s), (e), 0) void uvm_unmap_detach(struct vm_map_entry *,int); void uvm_unmap_remove(struct vm_map *, vaddr_t, vaddr_t, struct vm_map_entry **, int); int uvm_map_prepare(struct vm_map *, vaddr_t, vsize_t, struct uvm_object *, voff_t, vsize_t, uvm_flag_t, struct uvm_map_args *); int uvm_map_enter(struct vm_map *, const struct uvm_map_args *, struct vm_map_entry *); int uvm_mapent_trymerge(struct vm_map *, struct vm_map_entry *, int); #define UVM_MERGE_COPYING 1 /* * VM map locking operations. */ bool vm_map_lock_try(struct vm_map *); void vm_map_lock(struct vm_map *); void vm_map_unlock(struct vm_map *); void vm_map_unbusy(struct vm_map *); void vm_map_lock_read(struct vm_map *); void vm_map_unlock_read(struct vm_map *); void vm_map_busy(struct vm_map *); bool vm_map_locked_p(struct vm_map *); void uvm_map_lock_entry(struct vm_map_entry *, krw_t); void uvm_map_unlock_entry(struct vm_map_entry *); #endif /* _KERNEL */ /* * Functions implemented as macros */ #define vm_map_min(map) ((map)->header.end) #define vm_map_max(map) ((map)->header.start) #define vm_map_setmin(map, v) ((map)->header.end = (v)) #define vm_map_setmax(map, v) ((map)->header.start = (v)) #define vm_map_pmap(map) ((map)->pmap) #endif /* _UVM_UVM_MAP_H_ */ @ 1.79 log @- uvmspace_exec(), uvmspace_free(): if pmap_remove_all() returns true the pmap is emptied. Pass UVM_FLAG_VAONLY when clearing out the map and avoid needless extra work to tear down each mapping individually. - uvm_map_lookup_entry(): remove the code to do a linear scan of map entries for small maps, in preference to using the RB tree. It's questionable, and I think the code is almost never triggered because the average number of map entries has probably exceeded the hard-coded threshold for quite some time. - vm_map_entry: get it aligned on a cacheline boundary, and cluster fields used during rbtree lookup at the beginning. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.78 2020/02/23 15:46:43 ad Exp $ */ d223 1 a223 1 int ref_count; /* Reference count */ @ 1.78 log @UVM locking changes, proposed on tech-kern: - Change the lock on uvm_object, vm_amap and vm_anon to be a RW lock. - Break v_interlock and vmobjlock apart. v_interlock remains a mutex. - Do partial PV list locking in the x86 pmap. Others to follow later. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.77 2020/01/12 17:46:55 ad Exp $ */ d128 4 d135 2 a140 2 vaddr_t start; /* start address */ vaddr_t end; /* end address */ d146 4 a149 1 int etype; /* entry type */ d155 1 a155 3 int advice; /* madvise advice */ #define uvm_map_entry_stop_copy flags u_int8_t flags; /* flags */ d157 1 a161 2 }; @ 1.77 log @- uvm_unmap_remove(): need to call pmap_update() with the object still locked, otherwise the page could gain a new identity and still be visible via a stale mapping. - Adjust reference counts with atomics. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.76 2020/01/05 15:57:15 para Exp $ */ d332 1 a332 1 void uvm_map_lock_entry(struct vm_map_entry *); @ 1.76 log @remove unused predicate function likely unused since kmem changes @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.75 2019/08/01 02:28:55 riastradh Exp $ */ d212 1 a212 1 kmutex_t misc_lock; /* Lock for ref_count, cv */ @ 1.76.2.1 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.77 2020/01/12 17:46:55 ad Exp $ */ d212 1 a212 1 kmutex_t misc_lock; /* Lock for cv, busy */ @ 1.76.2.2 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.78 2020/02/23 15:46:43 ad Exp $ */ d332 1 a332 1 void uvm_map_lock_entry(struct vm_map_entry *, krw_t); @ 1.75 log @Remove last trace of never-used map_attrib. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.74 2017/05/18 02:21:05 christos Exp $ */ a318 2 bool vm_map_starved_p(struct vm_map *); @ 1.74 log @more snprintb bits @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.73 2016/05/25 17:43:58 christos Exp $ */ a148 1 uint32_t map_attrib; /* uvm-external map attributes */ @ 1.74.10.1 log @Mostly merge changes from HEAD upto 20200411 @ text @d1 1 a1 1 /* $NetBSD$ */ a127 4 * * At runtime this is aligned on a cacheline boundary, with fields * used during fault processing to do RB tree lookup clustered at * the beginning. a130 2 vaddr_t start; /* start address */ vaddr_t end; /* end address */ d135 2 d142 1 a142 4 uint8_t etype; /* entry type */ uint8_t flags; /* flags */ uint8_t advice; /* madvise advice */ uint8_t unused; /* unused */ d148 4 a151 1 }; a152 1 /* flags */ d157 2 d213 1 a213 1 kmutex_t misc_lock; /* Lock for cv, busy */ d320 2 d335 1 a335 1 void uvm_map_lock_entry(struct vm_map_entry *, krw_t); @ 1.73 log @Introduce security.pax.mprotect.ptrace sysctl which can be used to bypass mprotect settings so that debuggers can write to the text segment of traced processes so that they can insert breakpoints. Turned off by default. Ok: chuq (for now) @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.72 2012/10/29 16:00:05 para Exp $ */ d241 7 @ 1.73.8.1 log @Resolve conflicts from previous merge (all resulting from $NetBSD keywork expansion) @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.74 2017/05/18 02:21:05 christos Exp $ */ a240 7 #define VM_MAP_BITS "\177\020\ b\0PAGEABLE\0\ b\2WIREFUTURE\0\ b\5DYING\0\ b\6TOPDOWN\0\ b\10WANTVA\0" @ 1.72 log @get rid of not used uvm_map flag (UVM_MAP_KMAPENT) @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.71 2012/02/19 00:05:56 rmind Exp $ */ d111 1 @ 1.72.14.1 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.73 2016/05/25 17:43:58 christos Exp $ */ a110 1 #define UVM_EXTRACT_PROT_ALL 0x20 /* set prot to UVM_PROT_ALL */ @ 1.72.14.2 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.72.14.1 2016/05/29 08:44:40 skrll Exp $ */ a240 7 #define VM_MAP_BITS "\177\020\ b\0PAGEABLE\0\ b\2WIREFUTURE\0\ b\5DYING\0\ b\6TOPDOWN\0\ b\10WANTVA\0" @ 1.71 log @Remove VM_MAP_INTRSAFE and related code. Not used since the "kmem changes". @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.70 2012/01/27 19:48:41 para Exp $ */ a152 1 #define UVM_MAP_KMAPENT 0x02 /* contains map entries */ @ 1.71.2.1 log @Resync to 2012-11-19 00:00:00 UTC @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.72 2012/10/29 16:00:05 para Exp $ */ d153 1 @ 1.71.2.2 log @update from HEAD @ text @d1 1 a1 1 /* $NetBSD$ */ a110 1 #define UVM_EXTRACT_PROT_ALL 0x20 /* set prot to UVM_PROT_ALL */ a239 7 #define VM_MAP_BITS "\177\020\ b\0PAGEABLE\0\ b\2WIREFUTURE\0\ b\5DYING\0\ b\6TOPDOWN\0\ b\10WANTVA\0" @ 1.70 log @extending vmem(9) to be able to allocated resources for it's own needs. simplifying uvm_map handling (no special kernel entries anymore no relocking) make malloc(9) a thin wrapper around kmem(9) (with private interface for interrupt safety reasons) releng@@ acknowledged @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.69 2012/01/21 16:51:38 chs Exp $ */ a195 2 * VM_MAP_INTRSAFE r/o static flag; no locking required * a212 1 kmutex_t mutex; /* INTRSAFE lock */ a235 1 #define VM_MAP_INTRSAFE 0x02 /* ro: interrupt safe map */ @ 1.69 log @fix UVM_MAP_CLIP_* to only clip if the clip address is within the entry (which would only not be true if the clip address is at one of the boundaries of the entry). fixes PR 44788. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.68 2011/12/20 15:41:01 reinoud Exp $ */ d84 1 a84 1 #define UVM_MAP_CLIP_START(MAP,ENTRY,VA,UMR) { \ d86 1 a86 1 uvm_map_clip_start(MAP,ENTRY,VA,UMR); \ d97 1 a97 1 #define UVM_MAP_CLIP_END(MAP,ENTRY,VA,UMR) { \ d99 1 a99 1 uvm_map_clip_end(MAP,ENTRY,VA,UMR); \ d101 1 a101 1 } d154 2 a155 4 #define UVM_MAP_FIRST 0x04 /* the first special entry */ #define UVM_MAP_QUANTUM 0x08 /* allocated with * UVM_FLAG_QUANTUM */ #define UVM_MAP_NOMERGE 0x10 /* this entry is not mergable */ a232 13 struct vm_map_kernel { struct vm_map vmk_map; LIST_HEAD(, uvm_kmapent_hdr) vmk_kentry_free; /* Freelist of map entry */ struct vm_map_entry *vmk_merged_entries; /* Merged entries, kept for later splitting */ struct callback_head vmk_reclaim_callback; #if !defined(PMAP_MAP_POOLPAGE) struct pool vmk_vacache; /* kva cache */ struct pool_allocator vmk_vacache_allocator; /* ... and its allocator */ #endif }; a242 1 #define VM_MAP_VACACHE 0x80 /* ro: use kva cache */ a245 9 struct uvm_mapent_reservation { struct vm_map_entry *umr_entries[2]; int umr_nentries; }; #define UMR_EMPTY(umr) ((umr) == NULL || (umr)->umr_nentries == 0) #define UMR_GETENTRY(umr) ((umr)->umr_entries[--(umr)->umr_nentries]) #define UMR_PUTENTRY(umr, ent) \ (umr)->umr_entries[(umr)->umr_nentries++] = (ent) d280 1 a280 1 vaddr_t, struct uvm_mapent_reservation *); d282 1 a282 2 vaddr_t, struct uvm_mapent_reservation *); struct vm_map *uvm_map_create(pmap_t, vaddr_t, vaddr_t, int); d292 1 a298 4 void uvm_map_setup_kernel(struct vm_map_kernel *, vaddr_t, vaddr_t, int); struct vm_map_kernel * vm_map_to_kernel(struct vm_map *); d305 1 a305 2 struct vm_map_entry **, struct uvm_mapent_reservation *, int); a312 7 int uvm_mapent_reserve(struct vm_map *, struct uvm_mapent_reservation *, int, int); void uvm_mapent_unreserve(struct vm_map *, struct uvm_mapent_reservation *); vsize_t uvm_mapent_overhead(vsize_t, int); @ 1.68 log @Ooops forgot the uvm_map.h @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.67 2011/06/12 03:36:03 rmind Exp $ */ d85 4 a88 1 if ((VA) > (ENTRY)->start) uvm_map_clip_start(MAP,ENTRY,VA,UMR); } d98 4 a101 1 if ((VA) < (ENTRY)->end) uvm_map_clip_end(MAP,ENTRY,VA,UMR); } @ 1.67 log @Welcome to 5.99.53! Merge rmind-uvmplock branch: - Reorganize locking in UVM and provide extra serialisation for pmap(9). New lock order: [vmpage-owner-lock] -> pmap-lock. - Simplify locking in some pmap(9) modules by removing P->V locking. - Use lock object on vmobjlock (and thus vnode_t::v_interlock) to share the locks amongst UVM objects where necessary (tmpfs, layerfs, unionfs). - Rewrite and optimise x86 TLB shootdown code, make it simpler and cleaner. Add TLBSTATS option for x86 to collect statistics about TLB shootdowns. - Unify /dev/mem et al in MI code and provide required locking (removes kernel-lock on some ports). Also, avoid cache-aliasing issues. Thanks to Andrew Doran and Joerg Sonnenberger, as their initial patches formed the core changes of this branch. @ text @d1 1 a1 1 /* $NetBSD$ */ d142 1 @ 1.67.2.1 log @sync with head @ text @d84 2 a85 5 #define UVM_MAP_CLIP_START(MAP,ENTRY,VA) { \ if ((VA) > (ENTRY)->start && (VA) < (ENTRY)->end) { \ uvm_map_clip_start(MAP,ENTRY,VA); \ } \ } d94 2 a95 5 #define UVM_MAP_CLIP_END(MAP,ENTRY,VA) { \ if ((VA) > (ENTRY)->start && (VA) < (ENTRY)->end) { \ uvm_map_clip_end(MAP,ENTRY,VA); \ } \ } a141 1 uint32_t map_attrib; /* uvm-external map attributes */ d147 4 a150 2 #define UVM_MAP_STATIC 0x04 /* special static entries */ #define UVM_MAP_NOMERGE 0x08 /* this entry is not mergable */ d191 2 d210 1 d228 13 d247 1 d251 1 d255 9 d298 1 a298 1 vaddr_t); d300 2 a301 1 vaddr_t); a310 1 void uvm_map_init_caches(void); d317 4 d327 2 a328 1 struct vm_map_entry **, int); d336 7 @ 1.67.2.2 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.67.2.1 2012/04/17 00:08:59 yamt Exp $ */ d153 1 @ 1.67.6.1 log @merge to -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.70 2012/01/27 19:48:41 para Exp $ */ d84 2 a85 5 #define UVM_MAP_CLIP_START(MAP,ENTRY,VA) { \ if ((VA) > (ENTRY)->start && (VA) < (ENTRY)->end) { \ uvm_map_clip_start(MAP,ENTRY,VA); \ } \ } d94 2 a95 5 #define UVM_MAP_CLIP_END(MAP,ENTRY,VA) { \ if ((VA) > (ENTRY)->start && (VA) < (ENTRY)->end) { \ uvm_map_clip_end(MAP,ENTRY,VA); \ } \ } a141 1 uint32_t map_attrib; /* uvm-external map attributes */ d147 4 a150 2 #define UVM_MAP_STATIC 0x04 /* special static entries */ #define UVM_MAP_NOMERGE 0x08 /* this entry is not mergable */ d228 13 d251 1 d255 9 d298 1 a298 1 vaddr_t); d300 2 a301 1 vaddr_t); a310 1 void uvm_map_init_caches(void); d317 4 d327 2 a328 1 struct vm_map_entry **, int); d336 7 @ 1.67.6.2 log @sync to -current. @ text @d1 1 a1 1 /* $NetBSD$ */ d196 2 d215 1 d239 1 @ 1.66 log @udpate license clauses on my code to match the new-style BSD licenses. based on second diff that rmind@@ sent me. no functional change with this commit. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.65 2010/09/25 01:42:40 matt Exp $ */ d362 3 @ 1.66.2.1 log @Catchup with rmind-uvmplock merge. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.67 2011/06/12 03:36:03 rmind Exp $ */ a361 3 void uvm_map_lock_entry(struct vm_map_entry *); void uvm_map_unlock_entry(struct vm_map_entry *); @ 1.65 log @Rename rb.h to rbtree.h, as it is more appropriate (c.f. ptree.h). Also helps find code that hasn't been updated to use the new rbtree API. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.64 2009/08/01 16:35:51 yamt Exp $ */ d20 1 a20 6 * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Charles D. Cranor, * Washington University, the University of California, Berkeley and * its contributors. * 4. Neither the name of the University nor the names of its contributors @ 1.65.2.1 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.66 2011/02/02 15:25:27 chuck Exp $ */ d20 6 a25 1 * 3. Neither the name of the University nor the names of its contributors @ 1.65.4.1 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.66 2011/02/02 15:25:27 chuck Exp $ */ d20 6 a25 1 * 3. Neither the name of the University nor the names of its contributors @ 1.64 log @- uvm_map_extract: update map->size correctly for !UVM_EXTRACT_CONTIG. - uvm_map_extract: panic on zero-sized entries. - make uvm_map_replace static. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.63 2009/06/10 01:55:33 yamt Exp $ */ d113 1 a113 1 #include @ 1.64.2.1 log @Sync with HEAD (-D20101022). @ text @d1 1 a1 1 /* $NetBSD$ */ d113 1 a113 1 #include @ 1.64.4.1 log @Reorganise UVM locking to protect P->V state and serialise pmap(9) operations on the same page(s) by always locking their owner. Hence lock order: "vmpage"-lock -> pmap-lock. Patch, proposed on tech-kern@@, from Andrew Doran. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.64 2009/08/01 16:35:51 yamt Exp $ */ a366 3 void uvm_map_lock_entry(struct vm_map_entry *); void uvm_map_unlock_entry(struct vm_map_entry *); @ 1.64.4.2 log @sync with head @ text @d1 1 a1 1 /* $NetBSD$ */ d20 6 a25 1 * 3. Neither the name of the University nor the names of its contributors d113 1 a113 1 #include @ 1.63 log @on MADV_WILLNEED, start prefetching backing object's pages. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.62 2008/07/29 00:03:06 matt Exp $ */ a318 2 int uvm_map_replace(struct vm_map *, vaddr_t, vaddr_t, struct vm_map_entry *, int, struct vm_map_entry **); @ 1.62 log @Make uvm_map.? use instead of . Change the ambiguous members ownspace/space to gap/maxgap. Add some evcnt for evaluation of lookups using tree/list. Drop threshold of using tree for lookups from > 30 to > 15. Bump kernel version to 4.99.71 @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.61 2008/04/26 13:44:00 yamt Exp $ */ d300 1 @ 1.62.8.1 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.63 2009/06/10 01:55:33 yamt Exp $ */ a299 1 int uvm_map_willneed(struct vm_map *, vaddr_t, vaddr_t); @ 1.61 log @fix a locking botch. PR/38415 from Wolfgang Solfrank. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.60 2008/01/08 13:10:01 yamt Exp $ */ d113 1 a113 1 #include d128 3 a130 3 RB_ENTRY(vm_map_entry) rb_entry; /* tree information */ vaddr_t ownspace; /* free space after */ vaddr_t space; /* space in subtree */ d219 1 a219 1 RB_HEAD(uvm_tree, vm_map_entry) rbhead; /* Tree for entries */ @ 1.61.2.1 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.61 2008/04/26 13:44:00 yamt Exp $ */ d113 1 a113 1 #include d128 3 a130 3 struct rb_node rb_node; /* tree information */ vsize_t gap; /* free space after */ vsize_t maxgap; /* space in subtree */ d219 1 a219 1 struct rb_tree rb_tree; /* Tree for entries */ @ 1.61.2.2 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.61.2.1 2009/05/04 08:14:39 yamt Exp $ */ a299 1 int uvm_map_willneed(struct vm_map *, vaddr_t, vaddr_t); @ 1.61.2.3 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.61.2.2 2009/06/20 07:20:38 yamt Exp $ */ d319 2 @ 1.61.2.4 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.61.2.3 2009/08/19 18:48:36 yamt Exp $ */ d113 1 a113 1 #include @ 1.61.8.1 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.62 2008/07/29 00:03:06 matt Exp $ */ d113 1 a113 1 #include d128 3 a130 3 struct rb_node rb_node; /* tree information */ vsize_t gap; /* free space after */ vsize_t maxgap; /* space in subtree */ d219 1 a219 1 struct rb_tree rb_tree; /* Tree for entries */ @ 1.61.4.1 log @Sync with wrstuden-revivesa-base-2. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.61 2008/04/26 13:44:00 yamt Exp $ */ d113 1 a113 1 #include d128 3 a130 3 struct rb_node rb_node; /* tree information */ vsize_t gap; /* free space after */ vsize_t maxgap; /* space in subtree */ d219 1 a219 1 struct rb_tree rb_tree; /* Tree for entries */ @ 1.61.6.1 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.62 2008/07/29 00:03:06 matt Exp $ */ d113 1 a113 1 #include d128 3 a130 3 struct rb_node rb_node; /* tree information */ vsize_t gap; /* free space after */ vsize_t maxgap; /* space in subtree */ d219 1 a219 1 struct rb_tree rb_tree; /* Tree for entries */ @ 1.60 log @simplify locking and remove vm_map_upgrade/downgrade. this fixes a deadlock due to read-lock recursion of map->lock. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.59 2008/01/02 11:49:18 ad Exp $ */ d319 1 a319 1 struct vm_map_entry *, int); @ 1.60.6.1 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD$ */ d319 1 a319 1 struct vm_map_entry *, int, struct vm_map_entry **); @ 1.60.6.2 log @Sync with HEAD. @ text @d113 1 a113 1 #include d128 3 a130 3 struct rb_node rb_node; /* tree information */ vsize_t gap; /* free space after */ vsize_t maxgap; /* space in subtree */ d219 1 a219 1 struct rb_tree rb_tree; /* Tree for entries */ @ 1.60.8.1 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.60 2008/01/08 13:10:01 yamt Exp $ */ d319 1 a319 1 struct vm_map_entry *, int, struct vm_map_entry **); @ 1.59 log @Merge vmlocking2 to head. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.58 2007/07/22 21:07:47 he Exp $ */ a361 1 void vm_map_upgrade(struct vm_map *); a364 1 void vm_map_downgrade(struct vm_map *); @ 1.58 log @When _KERNEL is defined, we have now grown a dependency on , since one of the inline functions now refer to curlwp. Fix this by including when _KERNEL is defined. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.57 2007/07/21 19:21:54 ad Exp $ */ a216 1 kmutex_t hint_lock; /* lock for hint storage */ d364 5 a368 52 /* * vm_map_lock_read: acquire a shared (read) lock on a map. */ static inline void vm_map_lock_read(struct vm_map *map) { KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); rw_enter(&map->lock, RW_READER); } /* * vm_map_unlock_read: release a shared lock on a map. */ static inline void vm_map_unlock_read(struct vm_map *map) { KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); rw_exit(&map->lock); } /* * vm_map_downgrade: downgrade an exclusive lock to a shared lock. */ static inline void vm_map_downgrade(struct vm_map *map) { rw_downgrade(&map->lock); } /* * vm_map_busy: mark a map as busy. * * => the caller must hold the map write locked */ static inline void vm_map_busy(struct vm_map *map) { KASSERT(rw_write_held(&map->lock)); KASSERT(map->busy == NULL); map->busy = curlwp; } @ 1.58.22.1 log @file uvm_map.h was added on branch matt-mips64 on 2007-07-22 21:07:48 +0000 @ text @d1 430 @ 1.58.22.2 log @When _KERNEL is defined, we have now grown a dependency on , since one of the inline functions now refer to curlwp. Fix this by including when _KERNEL is defined. @ text @a0 430 /* $NetBSD: uvm_map.h,v 1.58 2007/07/22 21:07:47 he Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. * Copyright (c) 1991, 1993, The Regents of the University of California. * * All rights reserved. * * This code is derived from software contributed to Berkeley by * The Mach Operating System project at Carnegie-Mellon University. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Charles D. Cranor, * Washington University, the University of California, Berkeley and * its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @@(#)vm_map.h 8.3 (Berkeley) 3/15/94 * from: Id: uvm_map.h,v 1.1.2.3 1998/02/07 01:16:55 chs Exp * * * Copyright (c) 1987, 1990 Carnegie-Mellon University. * All rights reserved. * * Permission to use, copy, modify and distribute this software and * its documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ #ifndef _UVM_UVM_MAP_H_ #define _UVM_UVM_MAP_H_ /* * uvm_map.h */ #ifdef _KERNEL /* * macros */ /* * UVM_MAP_CLIP_START: ensure that the entry begins at or after * the starting address, if it doesn't we split the entry. * * => map must be locked by caller */ #define UVM_MAP_CLIP_START(MAP,ENTRY,VA,UMR) { \ if ((VA) > (ENTRY)->start) uvm_map_clip_start(MAP,ENTRY,VA,UMR); } /* * UVM_MAP_CLIP_END: ensure that the entry ends at or before * the ending address, if it does't we split the entry. * * => map must be locked by caller */ #define UVM_MAP_CLIP_END(MAP,ENTRY,VA,UMR) { \ if ((VA) < (ENTRY)->end) uvm_map_clip_end(MAP,ENTRY,VA,UMR); } /* * extract flags */ #define UVM_EXTRACT_REMOVE 0x01 /* remove mapping from old map */ #define UVM_EXTRACT_CONTIG 0x02 /* try to keep it contig */ #define UVM_EXTRACT_QREF 0x04 /* use quick refs */ #define UVM_EXTRACT_FIXPROT 0x08 /* set prot to maxprot as we go */ #define UVM_EXTRACT_RESERVED 0x10 /* caller did uvm_map_reserve() */ #endif /* _KERNEL */ #include #include #include #include #include #include /* * Address map entries consist of start and end addresses, * a VM object (or sharing map) and offset into that object, * and user-exported inheritance and protection information. * Also included is control information for virtual copy operations. */ struct vm_map_entry { RB_ENTRY(vm_map_entry) rb_entry; /* tree information */ vaddr_t ownspace; /* free space after */ vaddr_t space; /* space in subtree */ struct vm_map_entry *prev; /* previous entry */ struct vm_map_entry *next; /* next entry */ vaddr_t start; /* start address */ vaddr_t end; /* end address */ union { struct uvm_object *uvm_obj; /* uvm object */ struct vm_map *sub_map; /* belongs to another map */ } object; /* object I point to */ voff_t offset; /* offset into object */ int etype; /* entry type */ vm_prot_t protection; /* protection code */ vm_prot_t max_protection; /* maximum protection */ vm_inherit_t inheritance; /* inheritance */ int wired_count; /* can be paged if == 0 */ struct vm_aref aref; /* anonymous overlay */ int advice; /* madvise advice */ #define uvm_map_entry_stop_copy flags u_int8_t flags; /* flags */ #define UVM_MAP_KERNEL 0x01 /* kernel map entry */ #define UVM_MAP_KMAPENT 0x02 /* contains map entries */ #define UVM_MAP_FIRST 0x04 /* the first special entry */ #define UVM_MAP_QUANTUM 0x08 /* allocated with * UVM_FLAG_QUANTUM */ #define UVM_MAP_NOMERGE 0x10 /* this entry is not mergable */ }; #define VM_MAPENT_ISWIRED(entry) ((entry)->wired_count != 0) /* * Maps are doubly-linked lists of map entries, kept sorted * by address. A single hint is provided to start * searches again from the last successful search, * insertion, or removal. * * LOCKING PROTOCOL NOTES: * ----------------------- * * VM map locking is a little complicated. There are both shared * and exclusive locks on maps. However, it is sometimes required * to downgrade an exclusive lock to a shared lock, and upgrade to * an exclusive lock again (to perform error recovery). However, * another thread *must not* queue itself to receive an exclusive * lock while before we upgrade back to exclusive, otherwise the * error recovery becomes extremely difficult, if not impossible. * * In order to prevent this scenario, we introduce the notion of * a `busy' map. A `busy' map is read-locked, but other threads * attempting to write-lock wait for this flag to clear before * entering the lock manager. A map may only be marked busy * when the map is write-locked (and then the map must be downgraded * to read-locked), and may only be marked unbusy by the thread * which marked it busy (holding *either* a read-lock or a * write-lock, the latter being gained by an upgrade). * * Access to the map `flags' member is controlled by the `flags_lock' * simple lock. Note that some flags are static (set once at map * creation time, and never changed), and thus require no locking * to check those flags. All flags which are r/w must be set or * cleared while the `flags_lock' is asserted. Additional locking * requirements are: * * VM_MAP_PAGEABLE r/o static flag; no locking required * * VM_MAP_INTRSAFE r/o static flag; no locking required * * VM_MAP_WIREFUTURE r/w; may only be set or cleared when * map is write-locked. may be tested * without asserting `flags_lock'. * * VM_MAP_DYING r/o; set when a vmspace is being * destroyed to indicate that updates * to the pmap can be skipped. * * VM_MAP_TOPDOWN r/o; set when the vmspace is * created if the unspecified map * allocations are to be arranged in * a "top down" manner. */ struct vm_map { struct pmap * pmap; /* Physical map */ krwlock_t lock; /* Non-intrsafe lock */ struct lwp * busy; /* LWP holding map busy */ kmutex_t mutex; /* INTRSAFE lock */ kmutex_t misc_lock; /* Lock for ref_count, cv */ kmutex_t hint_lock; /* lock for hint storage */ kcondvar_t cv; /* For signalling */ int flags; /* flags */ RB_HEAD(uvm_tree, vm_map_entry) rbhead; /* Tree for entries */ struct vm_map_entry header; /* List of entries */ int nentries; /* Number of entries */ vsize_t size; /* virtual size */ int ref_count; /* Reference count */ struct vm_map_entry * hint; /* hint for quick lookups */ struct vm_map_entry * first_free; /* First free space hint */ unsigned int timestamp; /* Version number */ }; #if defined(_KERNEL) #include struct vm_map_kernel { struct vm_map vmk_map; LIST_HEAD(, uvm_kmapent_hdr) vmk_kentry_free; /* Freelist of map entry */ struct vm_map_entry *vmk_merged_entries; /* Merged entries, kept for later splitting */ struct callback_head vmk_reclaim_callback; #if !defined(PMAP_MAP_POOLPAGE) struct pool vmk_vacache; /* kva cache */ struct pool_allocator vmk_vacache_allocator; /* ... and its allocator */ #endif }; #endif /* defined(_KERNEL) */ #define VM_MAP_IS_KERNEL(map) (vm_map_pmap(map) == pmap_kernel()) /* vm_map flags */ #define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable */ #define VM_MAP_INTRSAFE 0x02 /* ro: interrupt safe map */ #define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */ #define VM_MAP_DYING 0x20 /* rw: map is being destroyed */ #define VM_MAP_TOPDOWN 0x40 /* ro: arrange map top-down */ #define VM_MAP_VACACHE 0x80 /* ro: use kva cache */ #define VM_MAP_WANTVA 0x100 /* rw: want va */ #ifdef _KERNEL struct uvm_mapent_reservation { struct vm_map_entry *umr_entries[2]; int umr_nentries; }; #define UMR_EMPTY(umr) ((umr) == NULL || (umr)->umr_nentries == 0) #define UMR_GETENTRY(umr) ((umr)->umr_entries[--(umr)->umr_nentries]) #define UMR_PUTENTRY(umr, ent) \ (umr)->umr_entries[(umr)->umr_nentries++] = (ent) struct uvm_map_args { struct vm_map_entry *uma_prev; vaddr_t uma_start; vsize_t uma_size; struct uvm_object *uma_uobj; voff_t uma_uoffset; uvm_flag_t uma_flags; }; #endif /* _KERNEL */ /* * globals: */ #ifdef _KERNEL #include #ifdef PMAP_GROWKERNEL extern vaddr_t uvm_maxkaddr; #endif /* * protos: the following prototypes define the interface to vm_map */ void uvm_map_deallocate(struct vm_map *); int uvm_map_clean(struct vm_map *, vaddr_t, vaddr_t, int); void uvm_map_clip_start(struct vm_map *, struct vm_map_entry *, vaddr_t, struct uvm_mapent_reservation *); void uvm_map_clip_end(struct vm_map *, struct vm_map_entry *, vaddr_t, struct uvm_mapent_reservation *); struct vm_map *uvm_map_create(pmap_t, vaddr_t, vaddr_t, int); int uvm_map_extract(struct vm_map *, vaddr_t, vsize_t, struct vm_map *, vaddr_t *, int); struct vm_map_entry * uvm_map_findspace(struct vm_map *, vaddr_t, vsize_t, vaddr_t *, struct uvm_object *, voff_t, vsize_t, int); int uvm_map_inherit(struct vm_map *, vaddr_t, vaddr_t, vm_inherit_t); int uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int); void uvm_map_init(void); bool uvm_map_lookup_entry(struct vm_map *, vaddr_t, struct vm_map_entry **); void uvm_map_reference(struct vm_map *); int uvm_map_replace(struct vm_map *, vaddr_t, vaddr_t, struct vm_map_entry *, int); int uvm_map_reserve(struct vm_map *, vsize_t, vaddr_t, vsize_t, vaddr_t *, uvm_flag_t); void uvm_map_setup(struct vm_map *, vaddr_t, vaddr_t, int); void uvm_map_setup_kernel(struct vm_map_kernel *, vaddr_t, vaddr_t, int); struct vm_map_kernel * vm_map_to_kernel(struct vm_map *); int uvm_map_submap(struct vm_map *, vaddr_t, vaddr_t, struct vm_map *); void uvm_unmap1(struct vm_map *, vaddr_t, vaddr_t, int); #define uvm_unmap(map, s, e) uvm_unmap1((map), (s), (e), 0) void uvm_unmap_detach(struct vm_map_entry *,int); void uvm_unmap_remove(struct vm_map *, vaddr_t, vaddr_t, struct vm_map_entry **, struct uvm_mapent_reservation *, int); int uvm_map_prepare(struct vm_map *, vaddr_t, vsize_t, struct uvm_object *, voff_t, vsize_t, uvm_flag_t, struct uvm_map_args *); int uvm_map_enter(struct vm_map *, const struct uvm_map_args *, struct vm_map_entry *); int uvm_mapent_reserve(struct vm_map *, struct uvm_mapent_reservation *, int, int); void uvm_mapent_unreserve(struct vm_map *, struct uvm_mapent_reservation *); vsize_t uvm_mapent_overhead(vsize_t, int); int uvm_mapent_trymerge(struct vm_map *, struct vm_map_entry *, int); #define UVM_MERGE_COPYING 1 bool vm_map_starved_p(struct vm_map *); /* * VM map locking operations. */ bool vm_map_lock_try(struct vm_map *); void vm_map_lock(struct vm_map *); void vm_map_unlock(struct vm_map *); void vm_map_upgrade(struct vm_map *); void vm_map_unbusy(struct vm_map *); /* * vm_map_lock_read: acquire a shared (read) lock on a map. */ static inline void vm_map_lock_read(struct vm_map *map) { KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); rw_enter(&map->lock, RW_READER); } /* * vm_map_unlock_read: release a shared lock on a map. */ static inline void vm_map_unlock_read(struct vm_map *map) { KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); rw_exit(&map->lock); } /* * vm_map_downgrade: downgrade an exclusive lock to a shared lock. */ static inline void vm_map_downgrade(struct vm_map *map) { rw_downgrade(&map->lock); } /* * vm_map_busy: mark a map as busy. * * => the caller must hold the map write locked */ static inline void vm_map_busy(struct vm_map *map) { KASSERT(rw_write_held(&map->lock)); KASSERT(map->busy == NULL); map->busy = curlwp; } #endif /* _KERNEL */ /* * Functions implemented as macros */ #define vm_map_min(map) ((map)->header.end) #define vm_map_max(map) ((map)->header.start) #define vm_map_setmin(map, v) ((map)->header.end = (v)) #define vm_map_setmax(map, v) ((map)->header.start = (v)) #define vm_map_pmap(map) ((map)->pmap) #endif /* _UVM_UVM_MAP_H_ */ @ 1.58.12.1 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.60 2008/01/08 13:10:01 yamt Exp $ */ d217 1 d363 1 d365 52 a416 4 void vm_map_lock_read(struct vm_map *); void vm_map_unlock_read(struct vm_map *); void vm_map_busy(struct vm_map *); bool vm_map_locked_p(struct vm_map *); @ 1.58.6.1 log @sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.58 2007/07/22 21:07:47 he Exp $ */ d217 1 d363 1 d365 52 a416 4 void vm_map_lock_read(struct vm_map *); void vm_map_unlock_read(struct vm_map *); void vm_map_busy(struct vm_map *); bool vm_map_locked_p(struct vm_map *); @ 1.58.18.1 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD$ */ d217 1 d365 52 a416 5 void vm_map_lock_read(struct vm_map *); void vm_map_unlock_read(struct vm_map *); void vm_map_downgrade(struct vm_map *); void vm_map_busy(struct vm_map *); bool vm_map_locked_p(struct vm_map *); @ 1.58.18.2 log @Sync with HEAD @ text @d362 1 d366 1 @ 1.58.14.1 log @Kill vm_map::hint_lock. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.58 2007/07/22 21:07:47 he Exp $ */ d217 1 @ 1.58.14.2 log @- Move remaining map locking functions into uvm_map.c. They depend on proc.h. - Lock vm_map_kernel::vmk_merged_entries with the map's own lock. There was a race where a thread legitimately expects to find cached entries, but can find none because they have not been freed yet. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.58.14.1 2007/12/21 15:39:24 ad Exp $ */ d364 52 a415 5 void vm_map_lock_read(struct vm_map *); void vm_map_unlock_read(struct vm_map *); void vm_map_downgrade(struct vm_map *); void vm_map_busy(struct vm_map *); bool vm_map_locked_p(struct vm_map *); @ 1.58.16.1 log @add a function to call pmap_growkernel if necessary. will be used by vmem. @ text @d1 1 a1 1 /* $NetBSD$ */ a292 9 static inline void uvm_growkernel(vaddr_t endva) { if (__predict_false(uvm_maxkaddr < endva)) { uvm_maxkaddr = pmap_growkernel(endva); } } @ 1.57 log @Merge unobtrusive locking changes from the vmlocking branch. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.56 2007/02/22 06:05:01 thorpej Exp $ */ d289 2 @ 1.56 log @TRUE -> true, FALSE -> false @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.55 2007/02/21 23:00:13 thorpej Exp $ */ d115 3 a201 11 * VM_MAP_BUSY r/w; may only be set when map is * write-locked, may only be cleared by * thread which set it, map read-locked * or write-locked. must be tested * while `flags_lock' is asserted. * * VM_MAP_WANTLOCK r/w; may only be set when the map * is busy, and thread is attempting * to write-lock. must be tested * while `flags_lock' is asserted. * d213 7 a219 1 struct lock lock; /* Lock for map data */ a224 1 struct simplelock ref_lock; /* Lock for ref_count field */ a225 1 struct simplelock hint_lock; /* lock for hint storage */ a226 2 int flags; /* flags */ struct simplelock flags_lock; /* Lock for flags field */ a254 2 #define VM_MAP_BUSY 0x08 /* rw: map is busy */ #define VM_MAP_WANTLOCK 0x10 /* rw: want to write-lock */ a282 9 #ifdef _KERNEL #define vm_map_modflags(map, set, clear) \ do { \ simple_lock(&(map)->flags_lock); \ (map)->flags = ((map)->flags | (set)) & ~(clear); \ simple_unlock(&(map)->flags_lock); \ } while (/*CONSTCOND*/ 0) #endif /* _KERNEL */ d352 1 a352 1 #endif /* _KERNEL */ d355 1 a355 25 * VM map locking operations: * * These operations perform locking on the data portion of the * map. * * vm_map_lock_try: try to lock a map, failing if it is already locked. * * vm_map_lock: acquire an exclusive (write) lock on a map. * * vm_map_lock_read: acquire a shared (read) lock on a map. * * vm_map_unlock: release an exclusive lock on a map. * * vm_map_unlock_read: release a shared lock on a map. * * vm_map_downgrade: downgrade an exclusive lock to a shared lock. * * vm_map_upgrade: upgrade a shared lock to an exclusive lock. * * vm_map_busy: mark a map as busy. * * vm_map_unbusy: clear busy status on a map. * * Note that "intrsafe" maps use only exclusive, spin locks. We simply * use the sleep lock's interlock for this. d358 9 a366 9 #ifdef _KERNEL /* XXX: clean up later */ #include #include /* for tsleep(), wakeup() */ #include /* for panic() */ static __inline bool vm_map_lock_try(struct vm_map *); static __inline void vm_map_lock(struct vm_map *); extern const char vmmapbsy[]; d368 2 a369 2 static __inline bool vm_map_lock_try(struct vm_map *map) a370 1 bool rv; d372 12 a383 11 if (map->flags & VM_MAP_INTRSAFE) rv = simple_lock_try(&map->lock.lk_interlock); else { simple_lock(&map->flags_lock); if (map->flags & VM_MAP_BUSY) { simple_unlock(&map->flags_lock); return (false); } rv = (lockmgr(&map->lock, LK_EXCLUSIVE|LK_NOWAIT|LK_INTERLOCK, &map->flags_lock) == 0); } d385 1 a385 2 if (rv) map->timestamp++; d387 1 a387 1 return (rv); d389 3 d393 2 a394 2 static __inline void vm_map_lock(struct vm_map *map) a395 21 int error; if (map->flags & VM_MAP_INTRSAFE) { simple_lock(&map->lock.lk_interlock); return; } try_again: simple_lock(&map->flags_lock); while (map->flags & VM_MAP_BUSY) { map->flags |= VM_MAP_WANTLOCK; ltsleep(&map->flags, PVM, vmmapbsy, 0, &map->flags_lock); } error = lockmgr(&map->lock, LK_EXCLUSIVE|LK_SLEEPFAIL|LK_INTERLOCK, &map->flags_lock); if (error) { KASSERT(error == ENOLCK); goto try_again; } d397 1 a397 1 (map)->timestamp++; d400 5 a404 11 #ifdef DIAGNOSTIC #define vm_map_lock_read(map) \ do { \ if ((map)->flags & VM_MAP_INTRSAFE) \ panic("vm_map_lock_read: intrsafe Map"); \ (void) lockmgr(&(map)->lock, LK_SHARED, NULL); \ } while (/*CONSTCOND*/ 0) #else #define vm_map_lock_read(map) \ (void) lockmgr(&(map)->lock, LK_SHARED, NULL) #endif d406 3 a408 24 #define vm_map_unlock(map) \ do { \ if ((map)->flags & VM_MAP_INTRSAFE) \ simple_unlock(&(map)->lock.lk_interlock); \ else \ (void) lockmgr(&(map)->lock, LK_RELEASE, NULL); \ } while (/*CONSTCOND*/ 0) #define vm_map_unlock_read(map) \ (void) lockmgr(&(map)->lock, LK_RELEASE, NULL) #define vm_map_downgrade(map) \ (void) lockmgr(&(map)->lock, LK_DOWNGRADE, NULL) #ifdef DIAGNOSTIC #define vm_map_upgrade(map) \ do { \ if (lockmgr(&(map)->lock, LK_UPGRADE, NULL) != 0) \ panic("vm_map_upgrade: failed to upgrade lock"); \ } while (/*CONSTCOND*/ 0) #else #define vm_map_upgrade(map) \ (void) lockmgr(&(map)->lock, LK_UPGRADE, NULL) #endif d410 2 a411 18 #define vm_map_busy(map) \ do { \ simple_lock(&(map)->flags_lock); \ (map)->flags |= VM_MAP_BUSY; \ simple_unlock(&(map)->flags_lock); \ } while (/*CONSTCOND*/ 0) #define vm_map_unbusy(map) \ do { \ int oflags; \ \ simple_lock(&(map)->flags_lock); \ oflags = (map)->flags; \ (map)->flags &= ~(VM_MAP_BUSY|VM_MAP_WANTLOCK); \ simple_unlock(&(map)->flags_lock); \ if (oflags & VM_MAP_WANTLOCK) \ wakeup(&(map)->flags); \ } while (/*CONSTCOND*/ 0) d413 2 a414 1 bool vm_map_starved_p(struct vm_map *); @ 1.56.12.1 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.58 2007/07/22 21:07:47 he Exp $ */ a114 3 #include #include #include d199 11 d221 1 a221 7 krwlock_t lock; /* Non-intrsafe lock */ struct lwp * busy; /* LWP holding map busy */ kmutex_t mutex; /* INTRSAFE lock */ kmutex_t misc_lock; /* Lock for ref_count, cv */ kmutex_t hint_lock; /* lock for hint storage */ kcondvar_t cv; /* For signalling */ int flags; /* flags */ d227 1 d229 1 d231 2 d261 2 d291 9 a305 2 #include d369 1 a369 1 bool vm_map_starved_p(struct vm_map *); d372 25 a396 1 * VM map locking operations. d399 9 a407 5 bool vm_map_lock_try(struct vm_map *); void vm_map_lock(struct vm_map *); void vm_map_unlock(struct vm_map *); void vm_map_upgrade(struct vm_map *); void vm_map_unbusy(struct vm_map *); d409 4 a412 3 /* * vm_map_lock_read: acquire a shared (read) lock on a map. */ d414 11 a424 3 static inline void vm_map_lock_read(struct vm_map *map) { d426 2 a427 1 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); d429 1 a429 1 rw_enter(&map->lock, RW_READER); d432 2 a433 6 /* * vm_map_unlock_read: release a shared lock on a map. */ static inline void vm_map_unlock_read(struct vm_map *map) d435 1 d437 19 a455 1 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); d457 1 a457 1 rw_exit(&map->lock); a458 3 /* * vm_map_downgrade: downgrade an exclusive lock to a shared lock. */ d460 11 a470 3 static inline void vm_map_downgrade(struct vm_map *map) { d472 24 a495 2 rw_downgrade(&map->lock); } d497 18 a514 5 /* * vm_map_busy: mark a map as busy. * * => the caller must hold the map write locked */ d516 1 a516 9 static inline void vm_map_busy(struct vm_map *map) { KASSERT(rw_write_held(&map->lock)); KASSERT(map->busy == NULL); map->busy = curlwp; } @ 1.56.4.1 log @Pull in the initial set of changes for the vmlocking branch. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.56 2007/02/22 06:05:01 thorpej Exp $ */ d227 1 a227 1 kmutex_t ref_lock; /* Lock for ref_count field */ d229 1 a229 1 kmutex_t hint_lock; /* lock for hint storage */ d232 1 a232 1 kmutex_t flags_lock; /* Lock for flags field */ d294 1 a294 1 mutex_enter(&(map)->flags_lock); \ d296 1 a296 1 mutex_exit(&(map)->flags_lock); \ d415 1 a415 1 rv = mutex_tryenter(&map->lock.lk_interlock); d417 1 a417 1 mutex_enter(&map->flags_lock); d419 1 a419 1 mutex_exit(&map->flags_lock); d438 1 a438 1 mutex_enter(&map->lock.lk_interlock); d443 1 a443 1 mutex_enter(&map->flags_lock); d446 1 a446 1 mtsleep(&map->flags, PVM, vmmapbsy, 0, &map->flags_lock); d475 1 a475 1 mutex_exit(&(map)->lock.lk_interlock); \ d499 1 a499 1 mutex_enter(&(map)->flags_lock); \ d501 1 a501 1 mutex_exit(&(map)->flags_lock); \ d508 1 a508 1 mutex_enter(&(map)->flags_lock); \ d511 1 a511 1 mutex_exit(&(map)->flags_lock); \ @ 1.56.4.2 log @- Put a per-LWP lock around swapin / swapout. - Replace use of lockmgr(). - Minor locking fixes and assertions. - uvm_map.h no longer pulls in proc.h, etc. - Use kpause where appropriate. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.56.4.1 2007/03/13 17:51:56 ad Exp $ */ a114 3 #include #include #include d199 11 d221 1 a221 7 krwlock_t lock; /* Non-intrsafe lock */ struct lwp * busy; /* LWP holding map busy */ kmutex_t mutex; /* INTRSAFE lock */ kmutex_t misc_lock; /* Lock for ref_count, cv */ kmutex_t hint_lock; /* lock for hint storage */ kcondvar_t cv; /* For signalling */ int flags; /* flags */ d227 1 d229 1 d231 2 d261 2 d291 9 d369 1 a369 1 bool vm_map_starved_p(struct vm_map *); d372 25 a396 1 * VM map locking operations. d399 9 a407 5 bool vm_map_lock_try(struct vm_map *); void vm_map_lock(struct vm_map *); void vm_map_unlock(struct vm_map *); void vm_map_upgrade(struct vm_map *); void vm_map_unbusy(struct vm_map *); d409 4 a412 3 /* * vm_map_lock_read: acquire a shared (read) lock on a map. */ d414 11 a424 3 static inline void vm_map_lock_read(struct vm_map *map) { d426 2 a427 1 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); d429 1 a429 1 rw_enter(&map->lock, RW_READER); d432 2 a433 6 /* * vm_map_unlock_read: release a shared lock on a map. */ static inline void vm_map_unlock_read(struct vm_map *map) d435 1 d437 19 a455 1 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); d457 1 a457 1 rw_exit(&map->lock); a458 3 /* * vm_map_downgrade: downgrade an exclusive lock to a shared lock. */ d460 11 a470 3 static inline void vm_map_downgrade(struct vm_map *map) { d472 24 a495 8 rw_downgrade(&map->lock); } /* * vm_map_busy: mark a map as busy. * * => the caller must hold the map write locked */ d497 18 a514 3 static inline void vm_map_busy(struct vm_map *map) { d516 1 a516 5 KASSERT(rw_write_held(&map->lock)); KASSERT(map->busy == NULL); map->busy = curlwp; } @ 1.55 log @Replace the Mach-derived boolean_t type with the C99 bool type. A future commit will replace use of TRUE and FALSE with true and false. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.54 2006/05/25 14:27:28 yamt Exp $ */ d420 1 a420 1 return (FALSE); @ 1.54 log @move wait points for kva from upper layers to vm_map. PR/33185 #1. XXX there is a concern about interaction with kva fragmentation. see: http://mail-index.NetBSD.org/tech-kern/2006/05/11/0000.html @ text @d1 1 a1 1 /* $NetBSD$ */ d331 1 a331 1 boolean_t uvm_map_lookup_entry(struct vm_map *, vaddr_t, d405 1 a405 1 static __inline boolean_t vm_map_lock_try(struct vm_map *); d409 1 a409 1 static __inline boolean_t d412 1 a412 1 boolean_t rv; d516 1 a516 1 boolean_t vm_map_starved_p(struct vm_map *); @ 1.54.12.1 log @- sync with head. - move sched_changepri back to kern_synch.c as it doesn't know PPQ anymore. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.54 2006/05/25 14:27:28 yamt Exp $ */ d331 1 a331 1 bool uvm_map_lookup_entry(struct vm_map *, vaddr_t, d405 1 a405 1 static __inline bool vm_map_lock_try(struct vm_map *); d409 1 a409 1 static __inline bool d412 1 a412 1 bool rv; d420 1 a420 1 return (false); d516 1 a516 1 bool vm_map_starved_p(struct vm_map *); @ 1.53 log @uvm_km_suballoc: consider kva overhead of "kmapent". fixes PR/31275 (me) and PR/32287 (Christian Biere). @ text @d237 3 d247 1 d516 2 @ 1.53.2.1 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.54 2006/05/25 14:27:28 yamt Exp $ */ a236 3 #include a243 1 struct callback_head vmk_reclaim_callback; a511 2 boolean_t vm_map_starved_p(struct vm_map *); @ 1.52 log @Change "inline" back to "__inline" in .h files -- C99 is still too new, and some apps compile things in C89 mode. C89 keywords stay. As per core@@. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.51 2006/02/11 12:45:07 yamt Exp $ */ d359 2 @ 1.52.6.1 log @Merge 2006-05-24 NetBSD-current into the "peter-altq" branch. @ text @d1 1 a1 1 /* $NetBSD$ */ a358 2 vsize_t uvm_mapent_overhead(vsize_t, int); @ 1.52.2.1 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.52 2006/02/16 20:17:20 perry Exp $ */ a358 2 vsize_t uvm_mapent_overhead(vsize_t, int); @ 1.52.2.2 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.52.2.1 2006/05/24 10:59:30 yamt Exp $ */ a236 3 #include a243 1 struct callback_head vmk_reclaim_callback; a511 2 boolean_t vm_map_starved_p(struct vm_map *); @ 1.52.4.1 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.53 2006/05/03 14:12:01 yamt Exp $ */ a358 2 vsize_t uvm_mapent_overhead(vsize_t, int); @ 1.51 log @remove the following options. no objections on tech-kern@@. UVM_PAGER_INLINE UVM_AMAP_INLINE UVM_PAGE_INLINE UVM_MAP_INLINE @ text @d1 1 a1 1 /* $NetBSD$ */ d399 2 a400 2 static inline boolean_t vm_map_lock_try(struct vm_map *); static inline void vm_map_lock(struct vm_map *); d403 1 a403 1 static inline boolean_t d426 1 a426 1 static inline void @ 1.50 log @implement compat_linux mremap. @ text @a296 10 * handle inline options */ #ifdef UVM_MAP_INLINE #define MAP_INLINE static inline #else #define MAP_INLINE /* nothing */ #endif /* UVM_MAP_INLINE */ /* a309 1 MAP_INLINE a316 1 MAP_INLINE a328 1 MAP_INLINE a336 1 MAP_INLINE a340 1 MAP_INLINE @ 1.50.2.1 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.54 2006/05/25 14:27:28 yamt Exp $ */ a236 3 #include a243 1 struct callback_head vmk_reclaim_callback; d297 10 d320 1 d328 1 d341 1 d350 1 d355 1 a373 2 vsize_t uvm_mapent_overhead(vsize_t, int); d414 2 a415 2 static __inline boolean_t vm_map_lock_try(struct vm_map *); static __inline void vm_map_lock(struct vm_map *); d418 1 a418 1 static __inline boolean_t d441 1 a441 1 static __inline void a524 2 boolean_t vm_map_starved_p(struct vm_map *); @ 1.50.4.1 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.52 2006/02/16 20:17:20 perry Exp $ */ d297 10 d320 1 d328 1 d341 1 d350 1 d355 1 d414 2 a415 2 static __inline boolean_t vm_map_lock_try(struct vm_map *); static __inline void vm_map_lock(struct vm_map *); d418 1 a418 1 static __inline boolean_t d441 1 a441 1 static __inline void @ 1.50.4.2 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.50.4.1 2006/04/22 11:40:29 simonb Exp $ */ a236 3 #include a243 1 struct callback_head vmk_reclaim_callback; a358 2 vsize_t uvm_mapent_overhead(vsize_t, int); a509 2 boolean_t vm_map_starved_p(struct vm_map *); @ 1.49 log @Remove leading __ from __(const|inline|signed|volatile) -- it is obsolete. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.48 2005/12/11 12:25:29 christos Exp $ */ d105 5 a109 4 #define UVM_EXTRACT_REMOVE 0x1 /* remove mapping from old map */ #define UVM_EXTRACT_CONTIG 0x2 /* try to keep it contig */ #define UVM_EXTRACT_QREF 0x4 /* use quick refs */ #define UVM_EXTRACT_FIXPROT 0x8 /* set prot to maxprot as we go */ d346 1 a346 1 vaddr_t *); @ 1.49.2.1 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.49 2005/12/24 20:45:10 perry Exp $ */ d105 4 a108 5 #define UVM_EXTRACT_REMOVE 0x01 /* remove mapping from old map */ #define UVM_EXTRACT_CONTIG 0x02 /* try to keep it contig */ #define UVM_EXTRACT_QREF 0x04 /* use quick refs */ #define UVM_EXTRACT_FIXPROT 0x08 /* set prot to maxprot as we go */ #define UVM_EXTRACT_RESERVED 0x10 /* caller did uvm_map_reserve() */ d345 1 a345 1 vaddr_t *, uvm_flag_t); @ 1.49.2.2 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.49.2.1 2006/02/01 14:52:48 yamt Exp $ */ d297 10 d320 1 d328 1 d341 1 d350 1 d355 1 d414 2 a415 2 static __inline boolean_t vm_map_lock_try(struct vm_map *); static __inline void vm_map_lock(struct vm_map *); d418 1 a418 1 static __inline boolean_t d441 1 a441 1 static __inline void @ 1.48 log @merge ktrace-lwp. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.34.2.7 2005/11/10 14:12:40 skrll Exp $ */ d300 1 a300 1 #define MAP_INLINE static __inline d413 2 a414 2 static __inline boolean_t vm_map_lock_try(struct vm_map *); static __inline void vm_map_lock(struct vm_map *); d417 1 a417 1 static __inline boolean_t d440 1 a440 1 static __inline void @ 1.47 log @(try to) merge map entries in fault handler. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.46 2005/04/01 11:59:39 yamt Exp $ */ @ 1.47.2.1 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.47 2005/05/17 13:55:33 yamt Exp $ */ d105 4 a108 5 #define UVM_EXTRACT_REMOVE 0x01 /* remove mapping from old map */ #define UVM_EXTRACT_CONTIG 0x02 /* try to keep it contig */ #define UVM_EXTRACT_QREF 0x04 /* use quick refs */ #define UVM_EXTRACT_FIXPROT 0x08 /* set prot to maxprot as we go */ #define UVM_EXTRACT_RESERVED 0x10 /* caller did uvm_map_reserve() */ a235 3 #include a242 1 struct callback_head vmk_reclaim_callback; d296 10 d319 1 d327 1 d340 1 d345 1 a345 1 vaddr_t *, uvm_flag_t); d349 1 d354 1 a372 2 vsize_t uvm_mapent_overhead(vsize_t, int); a523 2 boolean_t vm_map_starved_p(struct vm_map *); @ 1.47.2.2 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.47.2.1 2006/06/21 15:12:40 yamt Exp $ */ d331 1 a331 1 bool uvm_map_lookup_entry(struct vm_map *, vaddr_t, d405 1 a405 1 static __inline bool vm_map_lock_try(struct vm_map *); d409 1 a409 1 static __inline bool d412 1 a412 1 bool rv; d420 1 a420 1 return (false); d516 1 a516 1 bool vm_map_starved_p(struct vm_map *); @ 1.47.2.3 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.47.2.2 2007/02/26 09:12:30 yamt Exp $ */ a114 3 #include #include #include d199 11 d221 1 a221 7 krwlock_t lock; /* Non-intrsafe lock */ struct lwp * busy; /* LWP holding map busy */ kmutex_t mutex; /* INTRSAFE lock */ kmutex_t misc_lock; /* Lock for ref_count, cv */ kmutex_t hint_lock; /* lock for hint storage */ kcondvar_t cv; /* For signalling */ int flags; /* flags */ d227 1 d229 1 d231 2 d261 2 d291 9 a305 2 #include d369 1 a369 1 bool vm_map_starved_p(struct vm_map *); d372 25 a396 1 * VM map locking operations. d399 9 a407 5 bool vm_map_lock_try(struct vm_map *); void vm_map_lock(struct vm_map *); void vm_map_unlock(struct vm_map *); void vm_map_upgrade(struct vm_map *); void vm_map_unbusy(struct vm_map *); d409 4 a412 3 /* * vm_map_lock_read: acquire a shared (read) lock on a map. */ d414 11 a424 3 static inline void vm_map_lock_read(struct vm_map *map) { d426 2 a427 1 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); d429 1 a429 1 rw_enter(&map->lock, RW_READER); d432 2 a433 6 /* * vm_map_unlock_read: release a shared lock on a map. */ static inline void vm_map_unlock_read(struct vm_map *map) d435 1 d437 19 a455 1 KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); d457 1 a457 1 rw_exit(&map->lock); a458 3 /* * vm_map_downgrade: downgrade an exclusive lock to a shared lock. */ d460 11 a470 3 static inline void vm_map_downgrade(struct vm_map *map) { d472 24 a495 2 rw_downgrade(&map->lock); } d497 18 a514 5 /* * vm_map_busy: mark a map as busy. * * => the caller must hold the map write locked */ d516 1 a516 9 static inline void vm_map_busy(struct vm_map *map) { KASSERT(rw_write_held(&map->lock)); KASSERT(map->busy == NULL); map->busy = curlwp; } @ 1.47.2.4 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.47.2.3 2007/09/03 14:47:08 yamt Exp $ */ d217 1 d363 1 d365 52 a416 4 void vm_map_lock_read(struct vm_map *); void vm_map_unlock_read(struct vm_map *); void vm_map_busy(struct vm_map *); bool vm_map_locked_p(struct vm_map *); @ 1.46 log @merge yamt-km branch. - don't use managed mappings/backing objects for wired memory allocations. save some resources like pv_entry. also fix (most of) PR/27030. - simplify kernel memory management API. - simplify pmap bootstrap of some ports. - some related cleanups. @ text @d1 1 a1 1 /* $NetBSD$ */ d373 3 @ 1.45 log @use vm_map_{min,max}() instead of dereferencing the vm_map pointer directly. define and use vm_map_set{min,max}() for modifying these values. remove the {min,max}_offset aliases for these vm_map fields to be more namespace-friendly. PR 26475. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.44 2005/01/13 11:50:32 yamt Exp $ */ d359 2 a360 1 struct vm_map_entry **, struct uvm_mapent_reservation *); @ 1.44 log @in uvm_unmap_remove, always wakeup va waiters if any. uvm_km_free_wakeup is now a synonym of uvm_km_free. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.43 2005/01/12 09:34:35 yamt Exp $ */ a232 3 #define min_offset header.end #define max_offset header.start d525 5 a529 2 #define vm_map_min(map) ((map)->min_offset) #define vm_map_max(map) ((map)->max_offset) @ 1.44.2.1 log @sync with -current @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.46 2005/04/01 11:59:39 yamt Exp $ */ d233 3 d362 1 a362 2 struct vm_map_entry **, struct uvm_mapent_reservation *, int); d528 2 a529 5 #define vm_map_min(map) ((map)->header.end) #define vm_map_max(map) ((map)->header.start) #define vm_map_setmin(map, v) ((map)->header.end = (v)) #define vm_map_setmax(map, v) ((map)->header.start = (v)) @ 1.44.4.1 log @- don't use uvm_object or managed mappings for wired allocations. (eg. malloc(9)) - simplify uvm_km_* apis. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.44 2005/01/13 11:50:32 yamt Exp $ */ d362 1 a362 2 struct vm_map_entry **, struct uvm_mapent_reservation *, int); @ 1.44.4.2 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD$ */ d233 3 d529 2 a530 5 #define vm_map_min(map) ((map)->header.end) #define vm_map_max(map) ((map)->header.start) #define vm_map_setmin(map, v) ((map)->header.end = (v)) #define vm_map_setmax(map, v) ((map)->header.start = (v)) @ 1.43 log @don't reserve (uvm_mapent_reserve) entries for malloc/pool backends because it isn't necessary or safe. reported and tested by Denis Lagno. PR/28897. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.42 2005/01/01 21:08:02 yamt Exp $ */ d264 1 @ 1.42 log @in the case of !PMAP_MAP_POOLPAGE, gather pool backend allocations to large chunks for kernel_map and kmem_map to ease kva fragmentation. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.41 2005/01/01 21:02:14 yamt Exp $ */ d357 2 a358 1 void uvm_unmap(struct vm_map *, vaddr_t, vaddr_t); @ 1.41 log @introduce vm_map_kernel, a subclass of vm_map, and move some kernel-only members of vm_map to it. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.40 2005/01/01 21:00:06 yamt Exp $ */ d113 1 d245 5 d263 1 @ 1.40 log @for in-kernel maps, - allocate kva for vm_map_entry from the map itsself and remove the static limit, MAX_KMAPENT. - keep merged entries for later splitting to fix allocate-to-free problem. PR/24039. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.39 2004/02/10 01:30:49 matt Exp $ */ a231 4 LIST_HEAD(, uvm_kmapent_hdr) kentry_free; /* Freelist of map entry */ struct vm_map_entry *merged_entries;/* Merged entries, kept for * later splitting */ d237 12 d342 5 @ 1.39 log @Back out the changes in http://mail-index.netbsd.org/source-changes/2004/01/29/0027.html since they don't really fix the problem. Incorpate one fix: Mark uvm_map_entry's that were created with UVM_FLAG_NOMERGE so that they will not be used as future merge candidates. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.37 2003/11/01 11:09:02 yamt Exp $ */ d89 2 a90 2 #define UVM_MAP_CLIP_START(MAP,ENTRY,VA) { \ if ((VA) > (ENTRY)->start) uvm_map_clip_start(MAP,ENTRY,VA); } d99 2 a100 2 #define UVM_MAP_CLIP_END(MAP,ENTRY,VA) { \ if ((VA) < (ENTRY)->end) uvm_map_clip_end(MAP,ENTRY,VA); } d145 5 a149 2 #define UVM_MAP_STATIC 0x01 /* static map entry */ #define UVM_MAP_KMEM 0x02 /* from kmem entry pool */ d232 5 d250 15 a264 1 /* XXX: number of kernel maps and entries to statically allocate */ d266 6 a271 7 #if !defined(MAX_KMAPENT) #if (50 + (2 * NPROC) > 1000) #define MAX_KMAPENT (50 + (2 * NPROC)) #else #define MAX_KMAPENT 1000 /* XXXCDC: no crash */ #endif #endif /* !defined MAX_KMAPENT */ d311 1 a311 1 vaddr_t); d313 1 a313 1 vaddr_t); d340 13 a352 1 struct vm_map_entry **); d500 1 @ 1.38 log @- split uvm_map() into two functions for the followings. - for in-kernel maps, disable map entry merging so that unmap operations won't block. (workaround for PR/24039) - for in-kernel maps, allocate kva for vm_map_entry from the map itsself and eliminate MAX_KMAPENT and uvm_map_entry_kmem_pool. @ text @d145 4 a148 3 #define UVM_MAP_KERNEL 0x01 /* kernel map entry */ #define UVM_MAP_KMAPENT 0x02 /* contains map entries */ #define UVM_MAP_FIRST 0x04 /* the first special entry */ a228 1 LIST_HEAD(, uvm_kmapent_hdr) kentry_free; /* freelist of map entry */ d242 1 a242 3 #ifdef _KERNEL struct uvm_map_args { struct vm_map_entry *uma_prev; d244 7 a250 9 vaddr_t uma_start; vsize_t uma_size; struct uvm_object *uma_uobj; voff_t uma_uoffset; uvm_flag_t uma_flags; }; #endif /* _KERNEL */ a320 6 int uvm_map_prepare(struct vm_map *, vaddr_t, vsize_t, struct uvm_object *, voff_t, vsize_t, uvm_flag_t, struct uvm_map_args *); int uvm_map_enter(struct vm_map *, const struct uvm_map_args *, struct vm_map_entry **); @ 1.37 log @track map entries and free spaces using red-black tree to improve scalability of operations on the map. originally done by Niels Provos for OpenBSD. tweaked for NetBSD by me with some advices from enami tsugutomo. discussed on tech-kern@@ and tech-perform@@. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.36 2003/10/01 22:50:15 enami Exp $ */ d145 3 a147 3 #define UVM_MAP_STATIC 0x01 /* static map entry */ #define UVM_MAP_KMEM 0x02 /* from kmem entry pool */ d228 1 d242 6 a247 1 /* XXX: number of kernel maps and entries to statically allocate */ d249 6 a254 7 #if !defined(MAX_KMAPENT) #if (50 + (2 * NPROC) > 1000) #define MAX_KMAPENT (50 + (2 * NPROC)) #else #define MAX_KMAPENT 1000 /* XXXCDC: no crash */ #endif #endif /* !defined MAX_KMAPENT */ d325 6 @ 1.36 log @ansi'fy. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.35 2003/09/10 13:38:20 enami Exp $ */ d112 2 d123 3 d216 1 @ 1.35 log @Swap where the vm map's max and min offset are stored so that they can be used during map traversal. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.34 2003/02/20 22:16:08 atatat Exp $ */ d279 1 a279 1 void uvm_map_deallocate __P((struct vm_map *)); d281 5 a285 5 int uvm_map_clean __P((struct vm_map *, vaddr_t, vaddr_t, int)); void uvm_map_clip_start __P((struct vm_map *, struct vm_map_entry *, vaddr_t)); void uvm_map_clip_end __P((struct vm_map *, struct vm_map_entry *, vaddr_t)); d287 12 a298 11 struct vm_map *uvm_map_create __P((pmap_t, vaddr_t, vaddr_t, int)); int uvm_map_extract __P((struct vm_map *, vaddr_t, vsize_t, struct vm_map *, vaddr_t *, int)); struct vm_map_entry *uvm_map_findspace __P((struct vm_map *, vaddr_t, vsize_t, vaddr_t *, struct uvm_object *, voff_t, vsize_t, int)); int uvm_map_inherit __P((struct vm_map *, vaddr_t, vaddr_t, vm_inherit_t)); int uvm_map_advice __P((struct vm_map *, vaddr_t, vaddr_t, int)); void uvm_map_init __P((void)); boolean_t uvm_map_lookup_entry __P((struct vm_map *, vaddr_t, struct vm_map_entry **)); d300 8 a307 8 void uvm_map_reference __P((struct vm_map *)); int uvm_map_replace __P((struct vm_map *, vaddr_t, vaddr_t, struct vm_map_entry *, int)); int uvm_map_reserve __P((struct vm_map *, vsize_t, vaddr_t, vsize_t, vaddr_t *)); void uvm_map_setup __P((struct vm_map *, vaddr_t, vaddr_t, int)); int uvm_map_submap __P((struct vm_map *, vaddr_t, vaddr_t, struct vm_map *)); d309 4 a312 4 void uvm_unmap __P((struct vm_map *, vaddr_t, vaddr_t)); void uvm_unmap_detach __P((struct vm_map_entry *,int)); void uvm_unmap_remove __P((struct vm_map *, vaddr_t, vaddr_t, struct vm_map_entry **)); d350 2 a351 2 static __inline boolean_t vm_map_lock_try __P((struct vm_map *)); static __inline void vm_map_lock __P((struct vm_map *)); d355 1 a355 2 vm_map_lock_try(map) struct vm_map *map; d378 1 a378 2 vm_map_lock(map) struct vm_map *map; @ 1.34 log @Introduce "top down" memory management for mmap()ed allocations. This means that the dynamic linker gets mapped in at the top of available user virtual memory (typically just below the stack), shared libraries get mapped downwards from that point, and calls to mmap() that don't specify a preferred address will get mapped in below those. This means that the heap and the mmap()ed allocations will grow towards each other, allowing one or the other to grow larger than before. Previously, the heap was limited to MAXDSIZ by the placement of the dynamic linker (and the process's rlimits) and the space available to mmap was hobbled by this reservation. This is currently only enabled via an *option* for the i386 platform (though other platforms are expected to follow). Add "options USE_TOPDOWN_VM" to your kernel config file, rerun config, and rebuild your kernel to take advantage of this. Note that the pmap_prefer() interface has not yet been modified to play nicely with this, so those platforms require a bit more work (most notably the sparc) before they can use this new memory arrangement. This change also introduces a VM_DEFAULT_ADDRESS() macro that picks the appropriate default address based on the size of the allocation or the size of the process's text segment accordingly. Several drivers and the SYSV SHM address assignment were changed to use this instead of each one picking their own "default". @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.33 2002/11/02 07:40:49 perry Exp $ */ d222 2 a223 2 #define min_offset header.start #define max_offset header.end @ 1.34.2.1 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.39 2004/02/10 01:30:49 matt Exp $ */ a111 2 #include a120 3 RB_ENTRY(vm_map_entry) rb_entry; /* tree information */ vaddr_t ownspace; /* free space after */ vaddr_t space; /* space in subtree */ a141 1 #define UVM_MAP_NOMERGE 0x10 /* this entry is not mergable */ a210 1 RB_HEAD(uvm_tree, vm_map_entry) rbhead; /* Tree for entries */ d222 2 a223 2 #define min_offset header.end #define max_offset header.start d279 1 a279 1 void uvm_map_deallocate(struct vm_map *); d281 5 a285 5 int uvm_map_clean(struct vm_map *, vaddr_t, vaddr_t, int); void uvm_map_clip_start(struct vm_map *, struct vm_map_entry *, vaddr_t); void uvm_map_clip_end(struct vm_map *, struct vm_map_entry *, vaddr_t); d287 11 a297 12 struct vm_map *uvm_map_create(pmap_t, vaddr_t, vaddr_t, int); int uvm_map_extract(struct vm_map *, vaddr_t, vsize_t, struct vm_map *, vaddr_t *, int); struct vm_map_entry * uvm_map_findspace(struct vm_map *, vaddr_t, vsize_t, vaddr_t *, struct uvm_object *, voff_t, vsize_t, int); int uvm_map_inherit(struct vm_map *, vaddr_t, vaddr_t, vm_inherit_t); int uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int); void uvm_map_init(void); boolean_t uvm_map_lookup_entry(struct vm_map *, vaddr_t, struct vm_map_entry **); d299 8 a306 8 void uvm_map_reference(struct vm_map *); int uvm_map_replace(struct vm_map *, vaddr_t, vaddr_t, struct vm_map_entry *, int); int uvm_map_reserve(struct vm_map *, vsize_t, vaddr_t, vsize_t, vaddr_t *); void uvm_map_setup(struct vm_map *, vaddr_t, vaddr_t, int); int uvm_map_submap(struct vm_map *, vaddr_t, vaddr_t, struct vm_map *); d308 4 a311 4 void uvm_unmap(struct vm_map *, vaddr_t, vaddr_t); void uvm_unmap_detach(struct vm_map_entry *,int); void uvm_unmap_remove(struct vm_map *, vaddr_t, vaddr_t, struct vm_map_entry **); d349 2 a350 2 static __inline boolean_t vm_map_lock_try(struct vm_map *); static __inline void vm_map_lock(struct vm_map *); d354 2 a355 1 vm_map_lock_try(struct vm_map *map) d378 2 a379 1 vm_map_lock(struct vm_map *map) @ 1.34.2.2 log @Sync with HEAD. @ text @@ 1.34.2.3 log @Fix the sync with head I botched. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.34.2.1 2004/08/03 10:57:07 skrll Exp $ */ @ 1.34.2.4 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.34.2.3 2004/09/21 13:39:28 skrll Exp $ */ d89 2 a90 2 #define UVM_MAP_CLIP_START(MAP,ENTRY,VA,UMR) { \ if ((VA) > (ENTRY)->start) uvm_map_clip_start(MAP,ENTRY,VA,UMR); } d99 2 a100 2 #define UVM_MAP_CLIP_END(MAP,ENTRY,VA,UMR) { \ if ((VA) < (ENTRY)->end) uvm_map_clip_end(MAP,ENTRY,VA,UMR); } a112 1 #include d145 2 a146 5 #define UVM_MAP_KERNEL 0x01 /* kernel map entry */ #define UVM_MAP_KMAPENT 0x02 /* contains map entries */ #define UVM_MAP_FIRST 0x04 /* the first special entry */ #define UVM_MAP_QUANTUM 0x08 /* allocated with * UVM_FLAG_QUANTUM */ a228 1 a232 17 #if defined(_KERNEL) struct vm_map_kernel { struct vm_map vmk_map; LIST_HEAD(, uvm_kmapent_hdr) vmk_kentry_free; /* Freelist of map entry */ struct vm_map_entry *vmk_merged_entries; /* Merged entries, kept for later splitting */ #if !defined(PMAP_MAP_POOLPAGE) struct pool vmk_vacache; /* kva cache */ struct pool_allocator vmk_vacache_allocator; /* ... and its allocator */ #endif }; #endif /* defined(_KERNEL) */ #define VM_MAP_IS_KERNEL(map) (vm_map_pmap(map) == pmap_kernel()) a240 2 #define VM_MAP_VACACHE 0x80 /* ro: use kva cache */ #define VM_MAP_WANTVA 0x100 /* rw: want va */ d242 1 a242 9 #ifdef _KERNEL struct uvm_mapent_reservation { struct vm_map_entry *umr_entries[2]; int umr_nentries; }; #define UMR_EMPTY(umr) ((umr) == NULL || (umr)->umr_nentries == 0) #define UMR_GETENTRY(umr) ((umr)->umr_entries[--(umr)->umr_nentries]) #define UMR_PUTENTRY(umr, ent) \ (umr)->umr_entries[(umr)->umr_nentries++] = (ent) d244 7 a250 12 struct uvm_map_args { struct vm_map_entry *uma_prev; vaddr_t uma_start; vsize_t uma_size; struct uvm_object *uma_uobj; voff_t uma_uoffset; uvm_flag_t uma_flags; }; #endif /* _KERNEL */ d290 1 a290 1 vaddr_t, struct uvm_mapent_reservation *); d292 1 a292 1 vaddr_t, struct uvm_mapent_reservation *); a312 5 void uvm_map_setup_kernel(struct vm_map_kernel *, vaddr_t, vaddr_t, int); MAP_INLINE struct vm_map_kernel * vm_map_to_kernel(struct vm_map *); d316 1 a316 2 void uvm_unmap1(struct vm_map *, vaddr_t, vaddr_t, int); #define uvm_unmap(map, s, e) uvm_unmap1((map), (s), (e), 0) d319 1 a319 13 struct vm_map_entry **, struct uvm_mapent_reservation *); int uvm_map_prepare(struct vm_map *, vaddr_t, vsize_t, struct uvm_object *, voff_t, vsize_t, uvm_flag_t, struct uvm_map_args *); int uvm_map_enter(struct vm_map *, const struct uvm_map_args *, struct vm_map_entry *); int uvm_mapent_reserve(struct vm_map *, struct uvm_mapent_reservation *, int, int); void uvm_mapent_unreserve(struct vm_map *, struct uvm_mapent_reservation *); a466 1 @ 1.34.2.5 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.34.2.4 2005/01/17 19:33:11 skrll Exp $ */ d233 3 d528 2 a529 5 #define vm_map_min(map) ((map)->header.end) #define vm_map_max(map) ((map)->header.start) #define vm_map_setmin(map, v) ((map)->header.end = (v)) #define vm_map_setmax(map, v) ((map)->header.start = (v)) @ 1.34.2.6 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.34.2.5 2005/02/15 21:34:02 skrll Exp $ */ d359 1 a359 2 struct vm_map_entry **, struct uvm_mapent_reservation *, int); @ 1.34.2.7 log @Sync with HEAD. Here we go again... @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.34.2.6 2005/04/01 14:32:12 skrll Exp $ */ a372 3 int uvm_mapent_trymerge(struct vm_map *, struct vm_map_entry *, int); #define UVM_MERGE_COPYING 1 @ 1.33 log @/*CONTCOND*/ while (0)'ed macros @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.32 2002/09/22 07:21:31 chs Exp $ */ d198 9 d233 1 @ 1.32 log @add a new flag VM_MAP_DYING, which is set before we start tearing down a vm_map. use this to skip the pmap_update() at the end of all the removes, which allows pmaps to optimize pmap tear-down. also, use the new pmap_remove_all() hook to let the pmap implemenation know what we're up to. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.31 2001/10/03 13:32:23 christos Exp $ */ d241 1 a241 1 } while (0) d402 1 a402 1 } while (0) d414 1 a414 1 } while (0) d427 1 a427 1 } while (0) d438 1 a438 1 } while (0) d450 1 a450 1 } while (0) @ 1.31 log @protect against traditional macro expansion. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.30 2001/09/09 19:38:23 chs Exp $ */ d223 1 @ 1.30 log @create a new pool for map entries, allocated from kmem_map instead of kernel_map. use this instead of the static map entries when allocating map entries for kernel_map. this greatly reduces the number of static map entries used and should eliminate the problems with running out. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.29 2001/06/26 17:55:15 thorpej Exp $ */ d398 2 a399 2 if (map->flags & VM_MAP_INTRSAFE) \ panic("vm_map_lock_read: intrsafe map"); \ @ 1.29 log @Reduce some complexity in the fault path -- Rather than maintaining an spl-protected "interrupt safe map" list, simply require that callers of uvm_fault() never call us in interrupt context (MD code must make the assertion), and check for interrupt-safe maps in uvmfault_lookup() before we lock the map. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.28 2001/06/02 18:09:27 chs Exp $ */ d141 1 @ 1.29.4.1 log @Catch up with -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.30 2001/09/09 19:38:23 chs Exp $ */ a140 1 #define UVM_MAP_KMEM 0x02 /* from kmem entry pool */ @ 1.29.4.2 log @Catch up with -current. Fix some bogons in the sparc64 kbd/ms attach code. cd18xx conversion provided by mrg. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.29.4.1 2001/10/01 12:48:43 fvdl Exp $ */ d398 2 a399 2 if ((map)->flags & VM_MAP_INTRSAFE) \ panic("vm_map_lock_read: intrsafe Map"); \ @ 1.29.2.1 log @Update the kqueue branch to HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.30 2001/09/09 19:38:23 chs Exp $ */ a140 1 #define UVM_MAP_KMEM 0x02 /* from kmem entry pool */ @ 1.29.2.2 log @Sync kqueue branch with -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.29.2.1 2001/09/13 01:16:33 thorpej Exp $ */ d398 2 a399 2 if ((map)->flags & VM_MAP_INTRSAFE) \ panic("vm_map_lock_read: intrsafe Map"); \ @ 1.29.2.3 log @sync kqueue with -current; this includes merge of gehenna-devsw branch, merge of i386 MP branch, and part of autoconf rototil work @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.29.2.2 2002/01/10 20:05:40 thorpej Exp $ */ a222 1 #define VM_MAP_DYING 0x20 /* rw: map is being destroyed */ @ 1.28 log @replace vm_map{,_entry}_t with struct vm_map{,_entry} *. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.27 2001/05/26 16:32:47 chs Exp $ */ a239 37 #endif /* _KERNEL */ /* * Interrupt-safe maps must also be kept on a special list, * to assist uvm_fault() in avoiding locking problems. */ struct vm_map_intrsafe { struct vm_map vmi_map; LIST_ENTRY(vm_map_intrsafe) vmi_list; }; LIST_HEAD(vmi_list, vm_map_intrsafe); #ifdef _KERNEL extern struct simplelock vmi_list_slock; extern struct vmi_list vmi_list; static __inline int vmi_list_lock __P((void)); static __inline void vmi_list_unlock __P((int)); static __inline int vmi_list_lock() { int s; s = splhigh(); simple_lock(&vmi_list_slock); return (s); } static __inline void vmi_list_unlock(s) int s; { simple_unlock(&vmi_list_slock); splx(s); } @ 1.27 log @replace {simple_,}lock{_data,}_t with struct {simple,}lock {,*}. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.26 2001/05/25 04:06:15 chs Exp $ */ a114 20 * types defined: * * vm_map_t the high-level address map data structure. * vm_map_entry_t an entry in an address map. * vm_map_version_t a timestamp of a map, for use with vm_map_lookup */ /* * Objects which live in maps may be either VM objects, or another map * (called a "sharing map") which denotes read-write sharing with other maps. * * XXXCDC: private pager data goes here now */ union vm_map_object { struct uvm_object *uvm_obj; /* UVM OBJECT */ struct vm_map *sub_map; /* belongs to another map */ }; /* d125 4 a128 1 union vm_map_object object; /* object I point to */ d206 1 a206 1 vm_map_entry_t hint; /* hint for quick lookups */ d208 1 a208 1 vm_map_entry_t first_free; /* First free space hint */ d304 1 a304 1 void uvm_map_deallocate __P((vm_map_t)); d306 5 a310 3 int uvm_map_clean __P((vm_map_t, vaddr_t, vaddr_t, int)); void uvm_map_clip_start __P((vm_map_t, vm_map_entry_t, vaddr_t)); void uvm_map_clip_end __P((vm_map_t, vm_map_entry_t, vaddr_t)); d312 8 a319 7 vm_map_t uvm_map_create __P((pmap_t, vaddr_t, vaddr_t, int)); int uvm_map_extract __P((vm_map_t, vaddr_t, vsize_t, vm_map_t, vaddr_t *, int)); vm_map_entry_t uvm_map_findspace __P((vm_map_t, vaddr_t, vsize_t, vaddr_t *, struct uvm_object *, voff_t, vsize_t, int)); int uvm_map_inherit __P((vm_map_t, vaddr_t, vaddr_t, vm_inherit_t)); int uvm_map_advice __P((vm_map_t, vaddr_t, vaddr_t, int)); d321 2 a322 1 boolean_t uvm_map_lookup_entry __P((vm_map_t, vaddr_t, vm_map_entry_t *)); d324 8 a331 7 void uvm_map_reference __P((vm_map_t)); int uvm_map_replace __P((vm_map_t, vaddr_t, vaddr_t, vm_map_entry_t, int)); int uvm_map_reserve __P((vm_map_t, vsize_t, vaddr_t, vsize_t, vaddr_t *)); void uvm_map_setup __P((vm_map_t, vaddr_t, vaddr_t, int)); int uvm_map_submap __P((vm_map_t, vaddr_t, vaddr_t, vm_map_t)); d333 4 a336 4 void uvm_unmap __P((vm_map_t, vaddr_t, vaddr_t)); void uvm_unmap_detach __P((vm_map_entry_t,int)); void uvm_unmap_remove __P((vm_map_t, vaddr_t, vaddr_t, vm_map_entry_t *)); d374 2 a375 2 static __inline boolean_t vm_map_lock_try __P((vm_map_t)); static __inline void vm_map_lock __P((vm_map_t)); d380 1 a380 1 vm_map_t map; d404 1 a404 1 vm_map_t map; @ 1.26 log @remove trailing whitespace. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.25 2001/03/15 06:10:57 chs Exp $ */ d217 1 a217 1 lock_data_t lock; /* Lock for map data */ d222 1 a222 1 simple_lock_data_t ref_lock; /* Lock for ref_count field */ d224 1 a224 1 simple_lock_data_t hint_lock; /* lock for hint storage */ d227 1 a227 1 simple_lock_data_t flags_lock; /* Lock for flags field */ d270 1 a270 1 extern simple_lock_data_t vmi_list_slock; @ 1.25 log @eliminate the KERN_* error codes in favor of the traditional E* codes. the mapping is: KERN_SUCCESS 0 KERN_INVALID_ADDRESS EFAULT KERN_PROTECTION_FAILURE EACCES KERN_NO_SPACE ENOMEM KERN_INVALID_ARGUMENT EINVAL KERN_FAILURE various, mostly turn into KASSERTs KERN_RESOURCE_SHORTAGE ENOMEM KERN_NOT_RECEIVER KERN_NO_ACCESS KERN_PAGES_LOCKED @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */ d3 1 a3 1 /* d5 1 a5 1 * Copyright (c) 1991, 1993, The Regents of the University of California. d23 1 a23 1 * Washington University, the University of California, Berkeley and d47 1 a47 1 * d53 3 a55 3 * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND d57 1 a57 1 * d85 1 a85 1 * d302 1 a302 1 #else d328 1 a328 1 int uvm_map_extract __P((vm_map_t, vaddr_t, vsize_t, @ 1.24 log @clean up DIAGNOSTIC checks, use KASSERT(). @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.23 2000/12/13 08:06:12 enami Exp $ */ d345 1 a345 1 int uvm_unmap __P((vm_map_t, vaddr_t, vaddr_t)); d347 1 a347 1 int uvm_unmap_remove __P((vm_map_t, vaddr_t, vaddr_t, @ 1.24.2.1 log @Initial commit of scheduler activations and lightweight process support. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */ a382 1 #include @ 1.24.2.2 log @Catch up with -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.25 2001/03/15 06:10:57 chs Exp $ */ d345 1 a345 1 void uvm_unmap __P((vm_map_t, vaddr_t, vaddr_t)); d347 1 a347 1 void uvm_unmap_remove __P((vm_map_t, vaddr_t, vaddr_t, @ 1.24.2.3 log @Catch up to -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.24.2.2 2001/04/09 01:59:18 nathanw Exp $ */ d3 1 a3 1 /* d5 1 a5 1 * Copyright (c) 1991, 1993, The Regents of the University of California. d23 1 a23 1 * Washington University, the University of California, Berkeley and d47 1 a47 1 * d53 3 a55 3 * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND d57 1 a57 1 * d85 1 a85 1 * d115 20 d145 1 a145 4 union { struct uvm_object *uvm_obj; /* uvm object */ struct vm_map *sub_map; /* belongs to another map */ } object; /* object I point to */ d217 1 a217 1 struct lock lock; /* Lock for map data */ d222 4 a225 4 struct simplelock ref_lock; /* Lock for ref_count field */ struct vm_map_entry * hint; /* hint for quick lookups */ struct simplelock hint_lock; /* lock for hint storage */ struct vm_map_entry * first_free; /* First free space hint */ d227 1 a227 1 struct simplelock flags_lock; /* Lock for flags field */ d270 1 a270 1 extern struct simplelock vmi_list_slock; d302 1 a302 1 #else d321 1 a321 1 void uvm_map_deallocate __P((struct vm_map *)); d323 3 a325 5 int uvm_map_clean __P((struct vm_map *, vaddr_t, vaddr_t, int)); void uvm_map_clip_start __P((struct vm_map *, struct vm_map_entry *, vaddr_t)); void uvm_map_clip_end __P((struct vm_map *, struct vm_map_entry *, vaddr_t)); d327 7 a333 8 struct vm_map *uvm_map_create __P((pmap_t, vaddr_t, vaddr_t, int)); int uvm_map_extract __P((struct vm_map *, vaddr_t, vsize_t, struct vm_map *, vaddr_t *, int)); struct vm_map_entry *uvm_map_findspace __P((struct vm_map *, vaddr_t, vsize_t, vaddr_t *, struct uvm_object *, voff_t, vsize_t, int)); int uvm_map_inherit __P((struct vm_map *, vaddr_t, vaddr_t, vm_inherit_t)); int uvm_map_advice __P((struct vm_map *, vaddr_t, vaddr_t, int)); d335 1 a335 2 boolean_t uvm_map_lookup_entry __P((struct vm_map *, vaddr_t, struct vm_map_entry **)); d337 7 a343 8 void uvm_map_reference __P((struct vm_map *)); int uvm_map_replace __P((struct vm_map *, vaddr_t, vaddr_t, struct vm_map_entry *, int)); int uvm_map_reserve __P((struct vm_map *, vsize_t, vaddr_t, vsize_t, vaddr_t *)); void uvm_map_setup __P((struct vm_map *, vaddr_t, vaddr_t, int)); int uvm_map_submap __P((struct vm_map *, vaddr_t, vaddr_t, struct vm_map *)); d345 4 a348 4 void uvm_unmap __P((struct vm_map *, vaddr_t, vaddr_t)); void uvm_unmap_detach __P((struct vm_map_entry *,int)); void uvm_unmap_remove __P((struct vm_map *, vaddr_t, vaddr_t, struct vm_map_entry **)); d387 2 a388 2 static __inline boolean_t vm_map_lock_try __P((struct vm_map *)); static __inline void vm_map_lock __P((struct vm_map *)); d393 1 a393 1 struct vm_map *map; d417 1 a417 1 struct vm_map *map; @ 1.24.2.4 log @Catch up with -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.24.2.3 2001/06/21 20:10:35 nathanw Exp $ */ d240 37 @ 1.24.2.5 log @Catch up to -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.24.2.4 2001/08/24 00:13:39 nathanw Exp $ */ a140 1 #define UVM_MAP_KMEM 0x02 /* from kmem entry pool */ @ 1.24.2.6 log @Catch up to -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.24.2.5 2001/09/21 22:37:15 nathanw Exp $ */ d399 2 a400 2 if ((map)->flags & VM_MAP_INTRSAFE) \ panic("vm_map_lock_read: intrsafe Map"); \ @ 1.24.2.7 log @No longer need to pull in lwp.h; proc.h pulls it in for us. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.24.2.6 2001/10/08 20:11:56 nathanw Exp $ */ d335 1 @ 1.24.2.8 log @Catch up to -current. @ text @d1 1 a1 1 /* $NetBSD$ */ a222 1 #define VM_MAP_DYING 0x20 /* rw: map is being destroyed */ @ 1.24.2.9 log @Catch up to -current @ text @d241 1 a241 1 } while (/*CONSTCOND*/ 0) d402 1 a402 1 } while (/*CONSTCOND*/ 0) d414 1 a414 1 } while (/*CONSTCOND*/ 0) d427 1 a427 1 } while (/*CONSTCOND*/ 0) d438 1 a438 1 } while (/*CONSTCOND*/ 0) d450 1 a450 1 } while (/*CONSTCOND*/ 0) @ 1.23 log @Use single const char array instead of over 200 string constant. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.22 2000/09/13 15:00:25 thorpej Exp $ */ d436 1 a436 4 #ifdef DIAGNOSTIC if (error != ENOLCK) panic("vm_map_lock: failed to get lock"); #endif @ 1.22 log @Add an align argument to uvm_map() and some callers of that routine. Works similarly fto pmap_prefer(), but allows callers to specify a minimum power-of-two alignment of the region. How we ever got along without this for so long is beyond me. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.21 2000/08/16 16:32:06 thorpej Exp $ */ d388 1 d429 1 a429 1 ltsleep(&map->flags, PVM, "vmmapbsy", 0, &map->flags_lock); @ 1.21 log @Garbage-collect a constant that nothing uses. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.20 2000/08/12 17:44:02 sommerfeld Exp $ */ d331 1 a331 1 struct uvm_object *, voff_t, boolean_t)); d340 2 a341 1 int uvm_map_reserve __P((vm_map_t, vsize_t, vaddr_t, vaddr_t *)); @ 1.20 log @Use ltsleep in a loop instead of simple_unlock/tsleep/goto try_again @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.19 2000/06/26 17:18:40 mrg Exp $ */ a240 1 #define MAX_KMAP 10 @ 1.19 log @restore a dropped #ifdef _KERNEL @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.18 2000/06/26 15:32:28 mrg Exp $ */ d426 1 a426 1 if (map->flags & VM_MAP_BUSY) { d428 1 a428 3 simple_unlock(&map->flags_lock); (void) tsleep(&map->flags, PVM, "vmmapbsy", 0); goto try_again; @ 1.18 log @ gets merged into @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.17 2000/03/29 04:05:47 simonb Exp $ */ d110 2 d310 2 @ 1.17 log @Remove redundant decl for uvmspace_fork() - it's in . @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.16 2000/03/26 20:54:47 kleink Exp $ */ d110 184 d347 159 @ 1.16 log @Merge parts of chs-ubc2 into the trunk: Add a new type voff_t (defined as a synonym for off_t) to describe offsets into uvm objects, and update the appropriate interfaces to use it, the most visible effect being the ability to mmap() file offsets beyond the range of a vaddr_t. Originally by Chuck Silvers; blame me for problems caused by merging this into non-UBC. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.15 1999/06/21 17:25:11 thorpej Exp $ */ a160 2 struct vmspace *uvmspace_fork __P((struct vmspace *)); @ 1.15 log @Protect prototypes, certain macros, and inlines from userland. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.14 1999/05/26 19:16:36 thorpej Exp $ */ d137 2 a138 4 void uvm_map_clip_start __P((vm_map_t, vm_map_entry_t, vaddr_t)); void uvm_map_clip_end __P((vm_map_t, vm_map_entry_t, vaddr_t)); d140 1 a140 2 vm_map_t uvm_map_create __P((pmap_t, vaddr_t, vaddr_t, int)); d143 3 a145 5 vm_map_entry_t uvm_map_findspace __P((vm_map_t, vaddr_t, vsize_t, vaddr_t *, struct uvm_object *, vaddr_t, boolean_t)); int uvm_map_inherit __P((vm_map_t, vaddr_t, vaddr_t, vm_inherit_t)); d148 1 a148 2 boolean_t uvm_map_lookup_entry __P((vm_map_t, vaddr_t, vm_map_entry_t *)); d151 1 a151 1 int uvm_map_replace __P((vm_map_t, vaddr_t, vaddr_t, d153 3 a155 6 int uvm_map_reserve __P((vm_map_t, vsize_t, vaddr_t, vaddr_t *)); void uvm_map_setup __P((vm_map_t, vaddr_t, vaddr_t, int)); int uvm_map_submap __P((vm_map_t, vaddr_t, vaddr_t, vm_map_t)); @ 1.15.2.1 log @Update thorpej_scsipi to -current as of a month ago @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.22 2000/09/13 15:00:25 thorpej Exp $ */ a109 185 #endif /* _KERNEL */ #include /* * types defined: * * vm_map_t the high-level address map data structure. * vm_map_entry_t an entry in an address map. * vm_map_version_t a timestamp of a map, for use with vm_map_lookup */ /* * Objects which live in maps may be either VM objects, or another map * (called a "sharing map") which denotes read-write sharing with other maps. * * XXXCDC: private pager data goes here now */ union vm_map_object { struct uvm_object *uvm_obj; /* UVM OBJECT */ struct vm_map *sub_map; /* belongs to another map */ }; /* * Address map entries consist of start and end addresses, * a VM object (or sharing map) and offset into that object, * and user-exported inheritance and protection information. * Also included is control information for virtual copy operations. */ struct vm_map_entry { struct vm_map_entry *prev; /* previous entry */ struct vm_map_entry *next; /* next entry */ vaddr_t start; /* start address */ vaddr_t end; /* end address */ union vm_map_object object; /* object I point to */ voff_t offset; /* offset into object */ int etype; /* entry type */ vm_prot_t protection; /* protection code */ vm_prot_t max_protection; /* maximum protection */ vm_inherit_t inheritance; /* inheritance */ int wired_count; /* can be paged if == 0 */ struct vm_aref aref; /* anonymous overlay */ int advice; /* madvise advice */ #define uvm_map_entry_stop_copy flags u_int8_t flags; /* flags */ #define UVM_MAP_STATIC 0x01 /* static map entry */ }; #define VM_MAPENT_ISWIRED(entry) ((entry)->wired_count != 0) /* * Maps are doubly-linked lists of map entries, kept sorted * by address. A single hint is provided to start * searches again from the last successful search, * insertion, or removal. * * LOCKING PROTOCOL NOTES: * ----------------------- * * VM map locking is a little complicated. There are both shared * and exclusive locks on maps. However, it is sometimes required * to downgrade an exclusive lock to a shared lock, and upgrade to * an exclusive lock again (to perform error recovery). However, * another thread *must not* queue itself to receive an exclusive * lock while before we upgrade back to exclusive, otherwise the * error recovery becomes extremely difficult, if not impossible. * * In order to prevent this scenario, we introduce the notion of * a `busy' map. A `busy' map is read-locked, but other threads * attempting to write-lock wait for this flag to clear before * entering the lock manager. A map may only be marked busy * when the map is write-locked (and then the map must be downgraded * to read-locked), and may only be marked unbusy by the thread * which marked it busy (holding *either* a read-lock or a * write-lock, the latter being gained by an upgrade). * * Access to the map `flags' member is controlled by the `flags_lock' * simple lock. Note that some flags are static (set once at map * creation time, and never changed), and thus require no locking * to check those flags. All flags which are r/w must be set or * cleared while the `flags_lock' is asserted. Additional locking * requirements are: * * VM_MAP_PAGEABLE r/o static flag; no locking required * * VM_MAP_INTRSAFE r/o static flag; no locking required * * VM_MAP_WIREFUTURE r/w; may only be set or cleared when * map is write-locked. may be tested * without asserting `flags_lock'. * * VM_MAP_BUSY r/w; may only be set when map is * write-locked, may only be cleared by * thread which set it, map read-locked * or write-locked. must be tested * while `flags_lock' is asserted. * * VM_MAP_WANTLOCK r/w; may only be set when the map * is busy, and thread is attempting * to write-lock. must be tested * while `flags_lock' is asserted. */ struct vm_map { struct pmap * pmap; /* Physical map */ lock_data_t lock; /* Lock for map data */ struct vm_map_entry header; /* List of entries */ int nentries; /* Number of entries */ vsize_t size; /* virtual size */ int ref_count; /* Reference count */ simple_lock_data_t ref_lock; /* Lock for ref_count field */ vm_map_entry_t hint; /* hint for quick lookups */ simple_lock_data_t hint_lock; /* lock for hint storage */ vm_map_entry_t first_free; /* First free space hint */ int flags; /* flags */ simple_lock_data_t flags_lock; /* Lock for flags field */ unsigned int timestamp; /* Version number */ #define min_offset header.start #define max_offset header.end }; /* vm_map flags */ #define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable */ #define VM_MAP_INTRSAFE 0x02 /* ro: interrupt safe map */ #define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */ #define VM_MAP_BUSY 0x08 /* rw: map is busy */ #define VM_MAP_WANTLOCK 0x10 /* rw: want to write-lock */ /* XXX: number of kernel maps and entries to statically allocate */ #if !defined(MAX_KMAPENT) #if (50 + (2 * NPROC) > 1000) #define MAX_KMAPENT (50 + (2 * NPROC)) #else #define MAX_KMAPENT 1000 /* XXXCDC: no crash */ #endif #endif /* !defined MAX_KMAPENT */ #ifdef _KERNEL #define vm_map_modflags(map, set, clear) \ do { \ simple_lock(&(map)->flags_lock); \ (map)->flags = ((map)->flags | (set)) & ~(clear); \ simple_unlock(&(map)->flags_lock); \ } while (0) #endif /* _KERNEL */ /* * Interrupt-safe maps must also be kept on a special list, * to assist uvm_fault() in avoiding locking problems. */ struct vm_map_intrsafe { struct vm_map vmi_map; LIST_ENTRY(vm_map_intrsafe) vmi_list; }; LIST_HEAD(vmi_list, vm_map_intrsafe); #ifdef _KERNEL extern simple_lock_data_t vmi_list_slock; extern struct vmi_list vmi_list; static __inline int vmi_list_lock __P((void)); static __inline void vmi_list_unlock __P((int)); static __inline int vmi_list_lock() { int s; s = splhigh(); simple_lock(&vmi_list_slock); return (s); } static __inline void vmi_list_unlock(s) int s; { simple_unlock(&vmi_list_slock); splx(s); } #endif /* _KERNEL */ a124 2 #ifdef _KERNEL d137 4 a140 2 void uvm_map_clip_start __P((vm_map_t, vm_map_entry_t, vaddr_t)); void uvm_map_clip_end __P((vm_map_t, vm_map_entry_t, vaddr_t)); d142 2 a143 1 vm_map_t uvm_map_create __P((pmap_t, vaddr_t, vaddr_t, int)); d146 5 a150 3 vm_map_entry_t uvm_map_findspace __P((vm_map_t, vaddr_t, vsize_t, vaddr_t *, struct uvm_object *, voff_t, vsize_t, int)); int uvm_map_inherit __P((vm_map_t, vaddr_t, vaddr_t, vm_inherit_t)); d153 2 a154 1 boolean_t uvm_map_lookup_entry __P((vm_map_t, vaddr_t, vm_map_entry_t *)); d157 1 a157 1 int uvm_map_replace __P((vm_map_t, vaddr_t, vaddr_t, d159 1 a159 1 int uvm_map_reserve __P((vm_map_t, vsize_t, vaddr_t, vsize_t, d161 4 a164 2 void uvm_map_setup __P((vm_map_t, vaddr_t, vaddr_t, int)); int uvm_map_submap __P((vm_map_t, vaddr_t, vaddr_t, vm_map_t)); d171 1 a171 91 #endif /* _KERNEL */ /* * VM map locking operations: * * These operations perform locking on the data portion of the * map. * * vm_map_lock_try: try to lock a map, failing if it is already locked. * * vm_map_lock: acquire an exclusive (write) lock on a map. * * vm_map_lock_read: acquire a shared (read) lock on a map. * * vm_map_unlock: release an exclusive lock on a map. * * vm_map_unlock_read: release a shared lock on a map. * * vm_map_downgrade: downgrade an exclusive lock to a shared lock. * * vm_map_upgrade: upgrade a shared lock to an exclusive lock. * * vm_map_busy: mark a map as busy. * * vm_map_unbusy: clear busy status on a map. * * Note that "intrsafe" maps use only exclusive, spin locks. We simply * use the sleep lock's interlock for this. */ #ifdef _KERNEL /* XXX: clean up later */ #include #include /* for tsleep(), wakeup() */ #include /* for panic() */ static __inline boolean_t vm_map_lock_try __P((vm_map_t)); static __inline void vm_map_lock __P((vm_map_t)); static __inline boolean_t vm_map_lock_try(map) vm_map_t map; { boolean_t rv; if (map->flags & VM_MAP_INTRSAFE) rv = simple_lock_try(&map->lock.lk_interlock); else { simple_lock(&map->flags_lock); if (map->flags & VM_MAP_BUSY) { simple_unlock(&map->flags_lock); return (FALSE); } rv = (lockmgr(&map->lock, LK_EXCLUSIVE|LK_NOWAIT|LK_INTERLOCK, &map->flags_lock) == 0); } if (rv) map->timestamp++; return (rv); } static __inline void vm_map_lock(map) vm_map_t map; { int error; if (map->flags & VM_MAP_INTRSAFE) { simple_lock(&map->lock.lk_interlock); return; } try_again: simple_lock(&map->flags_lock); while (map->flags & VM_MAP_BUSY) { map->flags |= VM_MAP_WANTLOCK; ltsleep(&map->flags, PVM, "vmmapbsy", 0, &map->flags_lock); } error = lockmgr(&map->lock, LK_EXCLUSIVE|LK_SLEEPFAIL|LK_INTERLOCK, &map->flags_lock); if (error) { #ifdef DIAGNOSTIC if (error != ENOLCK) panic("vm_map_lock: failed to get lock"); #endif goto try_again; } a172 58 (map)->timestamp++; } #ifdef DIAGNOSTIC #define vm_map_lock_read(map) \ do { \ if (map->flags & VM_MAP_INTRSAFE) \ panic("vm_map_lock_read: intrsafe map"); \ (void) lockmgr(&(map)->lock, LK_SHARED, NULL); \ } while (0) #else #define vm_map_lock_read(map) \ (void) lockmgr(&(map)->lock, LK_SHARED, NULL) #endif #define vm_map_unlock(map) \ do { \ if ((map)->flags & VM_MAP_INTRSAFE) \ simple_unlock(&(map)->lock.lk_interlock); \ else \ (void) lockmgr(&(map)->lock, LK_RELEASE, NULL); \ } while (0) #define vm_map_unlock_read(map) \ (void) lockmgr(&(map)->lock, LK_RELEASE, NULL) #define vm_map_downgrade(map) \ (void) lockmgr(&(map)->lock, LK_DOWNGRADE, NULL) #ifdef DIAGNOSTIC #define vm_map_upgrade(map) \ do { \ if (lockmgr(&(map)->lock, LK_UPGRADE, NULL) != 0) \ panic("vm_map_upgrade: failed to upgrade lock"); \ } while (0) #else #define vm_map_upgrade(map) \ (void) lockmgr(&(map)->lock, LK_UPGRADE, NULL) #endif #define vm_map_busy(map) \ do { \ simple_lock(&(map)->flags_lock); \ (map)->flags |= VM_MAP_BUSY; \ simple_unlock(&(map)->flags_lock); \ } while (0) #define vm_map_unbusy(map) \ do { \ int oflags; \ \ simple_lock(&(map)->flags_lock); \ oflags = (map)->flags; \ (map)->flags &= ~(VM_MAP_BUSY|VM_MAP_WANTLOCK); \ simple_unlock(&(map)->flags_lock); \ if (oflags & VM_MAP_WANTLOCK) \ wakeup(&(map)->flags); \ } while (0) a173 7 /* * Functions implemented as macros */ #define vm_map_min(map) ((map)->min_offset) #define vm_map_max(map) ((map)->max_offset) #define vm_map_pmap(map) ((map)->pmap) @ 1.15.2.2 log @Sync with HEAD (for UBC fixes). @ text @d1 1 a1 1 /* $NetBSD$ */ a387 1 extern const char vmmapbsy[]; d428 1 a428 1 ltsleep(&map->flags, PVM, vmmapbsy, 0, &map->flags_lock); @ 1.15.2.3 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.15.2.2 2000/12/13 15:50:44 bouyer Exp $ */ d436 4 a439 1 KASSERT(error == ENOLCK); @ 1.15.2.4 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD$ */ d345 1 a345 1 void uvm_unmap __P((vm_map_t, vaddr_t, vaddr_t)); d347 1 a347 1 void uvm_unmap_remove __P((vm_map_t, vaddr_t, vaddr_t, @ 1.14 log @Change the vm_map's "entries_pageable" member to a r/o flags member, which has PAGEABLE and INTRSAFE flags. PAGEABLE now really means "pageable", not "allocate vm_map_entry's from non-static pool", so update all map creations to reflect that. INTRSAFE maps are maps that are used in interrupt context (e.g. kmem_map, mb_map), and thus use the static map entry pool (XXX as does kernel_map, for now). This will eventually change now these maps are locked, as well. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.13 1999/05/23 06:27:13 mrg Exp $ */ d76 2 d172 2 @ 1.13 log @implement madvice() for MADV_{NORMAL,RANDOM,SEQUENTIAL}, others are not yet done. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.12 1999/05/20 23:03:23 thorpej Exp $ */ d141 1 a141 1 vaddr_t, boolean_t)); d160 1 a160 1 vaddr_t, boolean_t)); @ 1.12 log @Make a slight modification of pmap_growkernel() -- it now returns the end of the mappable kernel virtual address space. Previously, it would get called more often than necessary, because the caller only new what was requested. Also, export uvm_maxkaddr so that uvm_pageboot_alloc() can grow the kernel pmap if necessary, as well. Note that pmap_growkernel() must now be able to handle being called before pmap_init(). @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.11 1999/03/25 18:48:52 mrg Exp $ */ d149 1 @ 1.11 log @remove now >1 year old pre-release message. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.10 1998/10/11 23:14:48 chuck Exp $ */ d118 8 @ 1.11.6.1 log @bring in latest KAME (as of 19991130, KAME/NetBSD141) into kame branch just for reference purposes. This commit includes 1.4 -> 1.4.1 sync for kame branch. The branch does not compile at all (due to the lack of ALTQ and some other source code). Please do not try to modify the branch, this is just for referenre purposes. synchronization to latest KAME will take place on HEAD branch soon. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.11 1999/03/25 18:48:52 mrg Exp $ */ a117 8 /* * globals: */ #ifdef PMAP_GROWKERNEL extern vaddr_t uvm_maxkaddr; #endif @ 1.11.4.1 log @Sync w/ -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.14 1999/05/26 19:16:36 thorpej Exp $ */ a119 8 * globals: */ #ifdef PMAP_GROWKERNEL extern vaddr_t uvm_maxkaddr; #endif /* d133 1 a133 1 vaddr_t, int)); a140 1 int uvm_map_advice __P((vm_map_t, vaddr_t, vaddr_t, int)); d151 1 a151 1 vaddr_t, int)); @ 1.11.4.2 log @Sync w/ -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.11.4.1 1999/06/21 01:47:21 thorpej Exp $ */ a75 2 #ifdef _KERNEL a169 2 #endif /* _KERNEL */ @ 1.11.4.3 log @create a new type "voff_t" for uvm_object offsets and define it to be "off_t". also, remove pgo_asyncget(). @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.11.4.2 1999/07/01 23:55:16 thorpej Exp $ */ d137 4 a140 2 void uvm_map_clip_start __P((vm_map_t, vm_map_entry_t, vaddr_t)); void uvm_map_clip_end __P((vm_map_t, vm_map_entry_t, vaddr_t)); d142 2 a143 1 vm_map_t uvm_map_create __P((pmap_t, vaddr_t, vaddr_t, int)); d146 5 a150 3 vm_map_entry_t uvm_map_findspace __P((vm_map_t, vaddr_t, vsize_t, vaddr_t *, struct uvm_object *, voff_t, boolean_t)); int uvm_map_inherit __P((vm_map_t, vaddr_t, vaddr_t, vm_inherit_t)); d153 2 a154 1 boolean_t uvm_map_lookup_entry __P((vm_map_t, vaddr_t, vm_map_entry_t *)); d157 1 a157 1 int uvm_map_replace __P((vm_map_t, vaddr_t, vaddr_t, d159 6 a164 3 int uvm_map_reserve __P((vm_map_t, vsize_t, vaddr_t, vaddr_t *)); void uvm_map_setup __P((vm_map_t, vaddr_t, vaddr_t, int)); int uvm_map_submap __P((vm_map_t, vaddr_t, vaddr_t, vm_map_t)); @ 1.11.2.1 log @pullup 1.11->1.12 (thorpej): fix the 1G RAM bug @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.12 1999/05/20 23:03:23 thorpej Exp $ */ a117 8 /* * globals: */ #ifdef PMAP_GROWKERNEL extern vaddr_t uvm_maxkaddr; #endif @ 1.10 log @remove unused share map code from UVM: - replace map checks with submap checks - get rid of unused 'mainonly' arg in uvm_unmap/uvm_unmap_remove, simplify code. update all calls to reflect this. - don't worry about unmapping or changing the protection of shared share map mappings (is_main_map no longer used). - remove unused uvm_map_sharemapcopy() function from fork code. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.9 1998/08/31 01:54:14 thorpej Exp $ */ a2 4 /* * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE! * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<< */ @ 1.9 log @Back out previous; I should have instrumented the benefit of this one first. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.7 1998/08/13 02:11:01 eeh Exp $ */ a155 2 void uvm_map_sharemapcopy __P((vm_map_t, vm_map_entry_t, vm_map_t)); d159 1 a159 1 int uvm_unmap __P((vm_map_t, vaddr_t, vaddr_t, boolean_t)); d162 1 a162 1 boolean_t, vm_map_entry_t *)); @ 1.8 log @Use the pool allocator and the "nointr" pool page allocator for vm_map's. @ text @a111 5 /* * pool for vm_map's; needed by inline functions in uvm_map_i.h */ extern struct pool uvm_map_pool; @ 1.7 log @Merge paddr_t changes into the main branch. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.6.2.1 1998/07/30 14:04:13 eeh Exp $ */ d112 5 @ 1.6 log @- add defopt's for UVM, UVMHIST and PMAP_NEW. - remove unnecessary UVMHIST_DECL's. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.5 1998/02/10 02:34:46 perry Exp $ */ d130 1 a130 1 int uvm_map_clean __P((vm_map_t, vm_offset_t, vm_offset_t, int)); d132 1 a132 1 vm_map_entry_t, vm_offset_t)); d134 1 a134 1 vm_offset_t)); d136 6 a141 6 vm_map_t uvm_map_create __P((pmap_t, vm_offset_t, vm_offset_t, boolean_t)); int uvm_map_extract __P((vm_map_t, vm_offset_t, vm_size_t, vm_map_t, vm_offset_t *, int)); vm_map_entry_t uvm_map_findspace __P((vm_map_t, vm_offset_t, vm_size_t, vm_offset_t *, struct uvm_object *, vm_offset_t, d143 1 a143 1 int uvm_map_inherit __P((vm_map_t, vm_offset_t, vm_offset_t, d146 1 a146 1 boolean_t uvm_map_lookup_entry __P((vm_map_t, vm_offset_t, d150 1 a150 1 int uvm_map_replace __P((vm_map_t, vm_offset_t, vm_offset_t, d152 4 a155 4 int uvm_map_reserve __P((vm_map_t, vm_size_t, vm_offset_t, vm_offset_t *)); void uvm_map_setup __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t)); d158 2 a159 2 int uvm_map_submap __P((vm_map_t, vm_offset_t, vm_offset_t, vm_map_t)); d161 1 a161 1 int uvm_unmap __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t)); d163 1 a163 1 int uvm_unmap_remove __P((vm_map_t, vm_offset_t, vm_offset_t, @ 1.6.2.1 log @Split vm_offset_t and vm_size_t into paddr_t, psize_t, vaddr_t, and vsize_t. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.6 1998/02/10 14:12:20 mrg Exp $ */ d130 1 a130 1 int uvm_map_clean __P((vm_map_t, vaddr_t, vaddr_t, int)); d132 1 a132 1 vm_map_entry_t, vaddr_t)); d134 1 a134 1 vaddr_t)); d136 6 a141 6 vm_map_t uvm_map_create __P((pmap_t, vaddr_t, vaddr_t, boolean_t)); int uvm_map_extract __P((vm_map_t, vaddr_t, vsize_t, vm_map_t, vaddr_t *, int)); vm_map_entry_t uvm_map_findspace __P((vm_map_t, vaddr_t, vsize_t, vaddr_t *, struct uvm_object *, vaddr_t, d143 1 a143 1 int uvm_map_inherit __P((vm_map_t, vaddr_t, vaddr_t, d146 1 a146 1 boolean_t uvm_map_lookup_entry __P((vm_map_t, vaddr_t, d150 1 a150 1 int uvm_map_replace __P((vm_map_t, vaddr_t, vaddr_t, d152 4 a155 4 int uvm_map_reserve __P((vm_map_t, vsize_t, vaddr_t, vaddr_t *)); void uvm_map_setup __P((vm_map_t, vaddr_t, vaddr_t, boolean_t)); d158 2 a159 2 int uvm_map_submap __P((vm_map_t, vaddr_t, vaddr_t, vm_map_t)); d161 1 a161 1 int uvm_unmap __P((vm_map_t, vaddr_t, vaddr_t, boolean_t)); d163 1 a163 1 int uvm_unmap_remove __P((vm_map_t, vaddr_t, vaddr_t, @ 1.5 log @add/cleanup multiple inclusion protection. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.4 1998/02/07 11:09:01 mrg Exp $ */ a78 2 UVMHIST_DECL(maphist); @ 1.4 log @restore rcsids @ text @d1 1 a1 1 /* $NetBSD$ */ d73 3 d169 2 @ 1.3 log @prototype for uvm_map_checkprot() moved to uvm_extern.h. @ text @d1 1 a1 1 /* $NetBSD: uvm_map.h,v 1.2 1998/02/06 22:32:03 thorpej Exp $ */ d46 1 @ 1.2 log @RCS ID police. @ text @d1 1 a1 1 /* $NetBSD$ */ a127 2 boolean_t uvm_map_checkprot __P((vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t)); @ 1.1 log @Initial revision @ text @d1 1 a1 1 /* $Id: uvm_map.h,v 1.1.2.2 1998/01/05 17:59:14 chuck Exp $ */ @ 1.1.1.1 log @initial import of the new virtual memory system, UVM, into -current. UVM was written by chuck cranor , with some minor portions derived from the old Mach code. i provided some help getting swap and paging working, and other bug fixes/ideas. chuck silvers also provided some other fixes. this is the UVM kernel code portion. this will be KNF'd shortly. :-) @ text @@