head 1.233; access; symbols netbsd-10-0-RELEASE:1.232 netbsd-10-0-RC6:1.232 netbsd-10-0-RC5:1.232 netbsd-10-0-RC4:1.232 netbsd-10-0-RC3:1.232 netbsd-10-0-RC2:1.232 thorpej-ifq:1.233.0.4 thorpej-ifq-base:1.233 thorpej-altq-separation:1.233.0.2 thorpej-altq-separation-base:1.233 netbsd-10-0-RC1:1.232 netbsd-10:1.232.0.12 netbsd-10-base:1.232 bouyer-sunxi-drm:1.232.0.10 bouyer-sunxi-drm-base:1.232 netbsd-9-3-RELEASE:1.213 thorpej-i2c-spi-conf2:1.232.0.8 thorpej-i2c-spi-conf2-base:1.232 thorpej-futex2:1.232.0.6 thorpej-futex2-base:1.232 thorpej-cfargs2:1.232.0.4 thorpej-cfargs2-base:1.232 cjep_sun2x-base1:1.232 cjep_sun2x:1.232.0.2 cjep_sun2x-base:1.232 cjep_staticlib_x-base1:1.232 netbsd-9-2-RELEASE:1.213 cjep_staticlib_x:1.231.0.8 cjep_staticlib_x-base:1.231 thorpej-i2c-spi-conf:1.231.0.6 thorpej-i2c-spi-conf-base:1.232 thorpej-cfargs:1.231.0.4 thorpej-cfargs-base:1.231 thorpej-futex:1.231.0.2 thorpej-futex-base:1.231 netbsd-9-1-RELEASE:1.213 bouyer-xenpvh-base2:1.224 phil-wifi-20200421:1.223 bouyer-xenpvh-base1:1.223 phil-wifi-20200411:1.222 bouyer-xenpvh:1.222.0.2 bouyer-xenpvh-base:1.222 is-mlppp:1.221.0.2 is-mlppp-base:1.221 phil-wifi-20200406:1.222 netbsd-8-2-RELEASE:1.206 ad-namecache-base3:1.221 netbsd-9-0-RELEASE:1.213 netbsd-9-0-RC2:1.213 ad-namecache-base2:1.219 ad-namecache-base1:1.219 ad-namecache:1.218.0.2 ad-namecache-base:1.218 netbsd-9-0-RC1:1.213 phil-wifi-20191119:1.213 netbsd-9:1.213.0.6 netbsd-9-base:1.213 phil-wifi-20190609:1.213 netbsd-8-1-RELEASE:1.206 netbsd-8-1-RC1:1.206 isaki-audio2:1.213.0.4 isaki-audio2-base:1.213 pgoyette-compat-merge-20190127:1.208.2.3 pgoyette-compat-20190127:1.213 pgoyette-compat-20190118:1.213 pgoyette-compat-1226:1.213 pgoyette-compat-1126:1.213 pgoyette-compat-1020:1.213 pgoyette-compat-0930:1.213 pgoyette-compat-0906:1.213 netbsd-7-2-RELEASE:1.191.2.2 pgoyette-compat-0728:1.213 netbsd-8-0-RELEASE:1.206 phil-wifi:1.213.0.2 phil-wifi-base:1.213 pgoyette-compat-0625:1.213 netbsd-8-0-RC2:1.206 pgoyette-compat-0521:1.212 pgoyette-compat-0502:1.210 pgoyette-compat-0422:1.210 netbsd-8-0-RC1:1.206 pgoyette-compat-0415:1.208 pgoyette-compat-0407:1.208 pgoyette-compat-0330:1.208 pgoyette-compat-0322:1.208 pgoyette-compat-0315:1.208 netbsd-7-1-2-RELEASE:1.191.2.2 pgoyette-compat:1.208.0.2 pgoyette-compat-base:1.208 netbsd-7-1-1-RELEASE:1.191.2.2 tls-maxphys-base-20171202:1.207 matt-nb8-mediatek:1.206.0.6 matt-nb8-mediatek-base:1.206 nick-nhusb-base-20170825:1.206 perseant-stdc-iso10646:1.206.0.4 perseant-stdc-iso10646-base:1.206 netbsd-8:1.206.0.2 netbsd-8-base:1.206 prg-localcount2-base3:1.205 prg-localcount2-base2:1.204 prg-localcount2-base1:1.203 prg-localcount2:1.203.0.6 prg-localcount2-base:1.203 pgoyette-localcount-20170426:1.203 bouyer-socketcan-base1:1.203 jdolecek-ncq:1.203.0.4 jdolecek-ncq-base:1.203 pgoyette-localcount-20170320:1.203 netbsd-7-1:1.191.2.2.0.6 netbsd-7-1-RELEASE:1.191.2.2 netbsd-7-1-RC2:1.191.2.2 nick-nhusb-base-20170204:1.203 netbsd-7-nhusb-base-20170116:1.191.2.2 bouyer-socketcan:1.203.0.2 bouyer-socketcan-base:1.203 pgoyette-localcount-20170107:1.203 netbsd-7-1-RC1:1.191.2.2 nick-nhusb-base-20161204:1.198 pgoyette-localcount-20161104:1.198 netbsd-7-0-2-RELEASE:1.191.2.2 nick-nhusb-base-20161004:1.198 localcount-20160914:1.198 netbsd-7-nhusb:1.191.2.2.0.4 netbsd-7-nhusb-base:1.191.2.2 pgoyette-localcount-20160806:1.198 pgoyette-localcount-20160726:1.198 pgoyette-localcount:1.197.0.2 pgoyette-localcount-base:1.197 nick-nhusb-base-20160907:1.197 nick-nhusb-base-20160529:1.197 netbsd-7-0-1-RELEASE:1.191.2.2 nick-nhusb-base-20160422:1.196 nick-nhusb-base-20160319:1.196 nick-nhusb-base-20151226:1.195 netbsd-7-0:1.191.2.2.0.2 netbsd-7-0-RELEASE:1.191.2.2 nick-nhusb-base-20150921:1.194 netbsd-7-0-RC3:1.191.2.2 netbsd-7-0-RC2:1.191.2.2 netbsd-7-0-RC1:1.191.2.2 nick-nhusb-base-20150606:1.194 nick-nhusb-base-20150406:1.194 nick-nhusb:1.191.0.4 nick-nhusb-base:1.191 netbsd-5-2-3-RELEASE:1.148.4.2 netbsd-5-1-5-RELEASE:1.148.4.2 netbsd-6-0-6-RELEASE:1.181.2.1 netbsd-6-1-5-RELEASE:1.181.2.1 netbsd-7:1.191.0.2 netbsd-7-base:1.191 yamt-pagecache-base9:1.189 yamt-pagecache-tag8:1.176.2.10 netbsd-6-1-4-RELEASE:1.181.2.1 netbsd-6-0-5-RELEASE:1.181.2.1 tls-earlyentropy:1.189.0.2 tls-earlyentropy-base:1.191 riastradh-xf86-video-intel-2-7-1-pre-2-21-15:1.189 riastradh-drm2-base3:1.189 netbsd-6-1-3-RELEASE:1.181.2.1 netbsd-6-0-4-RELEASE:1.181.2.1 netbsd-5-2-2-RELEASE:1.148.4.2 netbsd-5-1-4-RELEASE:1.148.4.2 netbsd-6-1-2-RELEASE:1.181.2.1 netbsd-6-0-3-RELEASE:1.181.2.1 netbsd-5-2-1-RELEASE:1.148.4.2 netbsd-5-1-3-RELEASE:1.148.4.2 rmind-smpnet-nbase:1.189 netbsd-6-1-1-RELEASE:1.181.2.1 riastradh-drm2-base2:1.184 riastradh-drm2-base1:1.184 riastradh-drm2:1.184.0.10 riastradh-drm2-base:1.184 rmind-smpnet:1.184.0.4 rmind-smpnet-base:1.189 netbsd-6-1:1.181.2.1.0.6 netbsd-6-0-2-RELEASE:1.181.2.1 netbsd-6-1-RELEASE:1.181.2.1 khorben-n900:1.184.0.8 netbsd-6-1-RC4:1.181.2.1 netbsd-6-1-RC3:1.181.2.1 agc-symver:1.184.0.6 agc-symver-base:1.184 netbsd-6-1-RC2:1.181.2.1 netbsd-6-1-RC1:1.181.2.1 yamt-pagecache-base8:1.184 netbsd-5-2:1.148.4.2.0.10 netbsd-6-0-1-RELEASE:1.181.2.1 yamt-pagecache-base7:1.184 netbsd-5-2-RELEASE:1.148.4.2 netbsd-5-2-RC1:1.148.4.2 matt-nb6-plus-nbase:1.181.2.1 yamt-pagecache-base6:1.184 netbsd-6-0:1.181.2.1.0.4 netbsd-6-0-RELEASE:1.181.2.1 netbsd-6-0-RC2:1.181.2.1 tls-maxphys:1.184.0.2 tls-maxphys-base:1.191 matt-nb6-plus:1.181.2.1.0.2 matt-nb6-plus-base:1.181.2.1 netbsd-6-0-RC1:1.181.2.1 jmcneill-usbmp-base10:1.183 yamt-pagecache-base5:1.183 jmcneill-usbmp-base9:1.183 yamt-pagecache-base4:1.183 jmcneill-usbmp-base8:1.182 jmcneill-usbmp-base7:1.181 jmcneill-usbmp-base6:1.181 jmcneill-usbmp-base5:1.181 jmcneill-usbmp-base4:1.181 jmcneill-usbmp-base3:1.181 jmcneill-usbmp-pre-base2:1.176 jmcneill-usbmp-base2:1.181 netbsd-6:1.181.0.2 netbsd-6-base:1.181 netbsd-5-1-2-RELEASE:1.148.4.2 netbsd-5-1-1-RELEASE:1.148.4.2 jmcneill-usbmp:1.176.0.6 jmcneill-usbmp-base:1.176 jmcneill-audiomp3:1.176.0.4 jmcneill-audiomp3-base:1.176 yamt-pagecache-base3:1.176 yamt-pagecache-base2:1.176 yamt-pagecache:1.176.0.2 yamt-pagecache-base:1.176 rmind-uvmplock-nbase:1.172 cherry-xenmp:1.172.0.2 cherry-xenmp-base:1.172 uebayasi-xip-base7:1.170 bouyer-quota2-nbase:1.171 bouyer-quota2:1.168.0.4 bouyer-quota2-base:1.170 jruoho-x86intr:1.168.0.2 jruoho-x86intr-base:1.168 matt-mips64-premerge-20101231:1.167 matt-nb5-mips64-premerge-20101231:1.148.4.2.4.1 matt-nb5-pq3:1.148.4.2.0.8 matt-nb5-pq3-base:1.148.4.2 netbsd-5-1:1.148.4.2.0.6 uebayasi-xip-base6:1.166 uebayasi-xip-base5:1.163 netbsd-5-1-RELEASE:1.148.4.2 uebayasi-xip-base4:1.163 uebayasi-xip-base3:1.163 yamt-nfs-mp-base11:1.163 netbsd-5-1-RC4:1.148.4.2 matt-nb5-mips64-k15:1.148.4.2.4.1 uebayasi-xip-base2:1.163 yamt-nfs-mp-base10:1.163 netbsd-5-1-RC3:1.148.4.2 netbsd-5-1-RC2:1.148.4.2 uebayasi-xip-base1:1.163 netbsd-5-1-RC1:1.148.4.2 rmind-uvmplock:1.162.0.2 rmind-uvmplock-base:1.172 yamt-nfs-mp-base9:1.162 uebayasi-xip:1.161.0.2 uebayasi-xip-base:1.161 netbsd-5-0-2-RELEASE:1.148.4.2 matt-nb5-mips64-premerge-20091211:1.148.4.2 matt-premerge-20091211:1.161 yamt-nfs-mp-base8:1.159 matt-nb5-mips64-u2-k2-k4-k7-k8-k9:1.148.4.2 matt-nb4-mips64-k7-u2a-k9b:1.148.4.2 matt-nb5-mips64-u1-k1-k5:1.148.4.2 yamt-nfs-mp-base7:1.159 matt-nb5-mips64:1.148.4.2.0.4 netbsd-5-0-1-RELEASE:1.148.4.2 jymxensuspend-base:1.155 yamt-nfs-mp-base6:1.155 yamt-nfs-mp-base5:1.154 yamt-nfs-mp-base4:1.154 jym-xensuspend-nbase:1.160 yamt-nfs-mp-base3:1.154 nick-hppapmap-base4:1.154 nick-hppapmap-base3:1.154 netbsd-5-0:1.148.4.2.0.2 netbsd-5-0-RELEASE:1.148.4.2 netbsd-5-0-RC4:1.148.4.2 netbsd-5-0-RC3:1.148.4.1 nick-hppapmap-base2:1.151 netbsd-5-0-RC2:1.148.4.1 jym-xensuspend:1.150.0.4 jym-xensuspend-base:1.154 netbsd-5-0-RC1:1.148.4.1 haad-dm-base2:1.150 haad-nbase2:1.150 ad-audiomp2:1.150.0.2 ad-audiomp2-base:1.150 netbsd-5:1.148.0.4 netbsd-5-base:1.148 nick-hppapmap:1.148.0.2 nick-hppapmap-base:1.154 matt-mips64-base2:1.148 matt-mips64:1.134.0.6 haad-dm-base1:1.148 wrstuden-revivesa-base-4:1.148 netbsd-4-0-1-RELEASE:1.122.2.1 wrstuden-revivesa-base-3:1.148 wrstuden-revivesa-base-2:1.148 wrstuden-fixsa-newbase:1.122.2.1 nick-csl-alignment-base5:1.135 haad-dm:1.146.0.4 haad-dm-base:1.150 wrstuden-revivesa-base-1:1.146 simonb-wapbl-nbase:1.147 yamt-pf42-base4:1.146 simonb-wapbl:1.146.0.2 simonb-wapbl-base:1.147 yamt-pf42-base3:1.145 hpcarm-cleanup-nbase:1.145 yamt-pf42-baseX:1.145 yamt-pf42-base2:1.145 yamt-nfs-mp-base2:1.145 wrstuden-revivesa:1.145.0.6 wrstuden-revivesa-base:1.146 yamt-nfs-mp:1.145.0.4 yamt-nfs-mp-base:1.145 yamt-pf42:1.145.0.2 yamt-pf42-base:1.145 ad-socklock-base1:1.145 yamt-lazymbuf-base15:1.145 yamt-lazymbuf-base14:1.145 keiichi-mipv6-nbase:1.145 mjf-devfs2:1.144.0.6 mjf-devfs2-base:1.150 nick-net80211-sync:1.144.0.4 nick-net80211-sync-base:1.144 keiichi-mipv6:1.144.0.2 keiichi-mipv6-base:1.145 bouyer-xeni386-merge1:1.139.4.2 matt-armv6-prevmlocking:1.135.2.1 wrstuden-fixsa-base-1:1.122.2.1 vmlocking2-base3:1.141 netbsd-4-0:1.122.2.1.0.6 netbsd-4-0-RELEASE:1.122.2.1 bouyer-xeni386-nbase:1.143 yamt-kmem-base3:1.140 cube-autoconf:1.139.0.6 cube-autoconf-base:1.139 yamt-kmem-base2:1.139 bouyer-xeni386:1.139.0.4 bouyer-xeni386-base:1.143 yamt-kmem:1.139.0.2 yamt-kmem-base:1.139 vmlocking2-base2:1.139 reinoud-bufcleanup-nbase:1.139 vmlocking2:1.137.0.2 vmlocking2-base1:1.137 netbsd-4-0-RC5:1.122.2.1 matt-nb4-arm:1.122.2.1.0.4 matt-nb4-arm-base:1.122.2.1 matt-armv6-nbase:1.145 jmcneill-base:1.136 netbsd-4-0-RC4:1.122.2.1 mjf-devfs:1.135.0.8 mjf-devfs-base:1.144 bouyer-xenamd64-base2:1.136 vmlocking-nbase:1.137 yamt-x86pmap-base4:1.135 bouyer-xenamd64:1.135.0.6 bouyer-xenamd64-base:1.136 netbsd-4-0-RC3:1.122.2.1 yamt-x86pmap-base3:1.135 yamt-x86pmap-base2:1.135 netbsd-4-0-RC2:1.122.2.1 yamt-x86pmap:1.135.0.4 yamt-x86pmap-base:1.135 netbsd-4-0-RC1:1.122.2.1 matt-armv6:1.135.0.2 matt-armv6-base:1.143 matt-mips64-base:1.134 jmcneill-pm:1.134.0.4 jmcneill-pm-base:1.139 hpcarm-cleanup:1.134.0.2 hpcarm-cleanup-base:1.144 nick-csl-alignment:1.132.0.2 nick-csl-alignment-base:1.132 netbsd-3-1-1-RELEASE:1.98.8.1 netbsd-3-0-3-RELEASE:1.98.8.1 yamt-idlelwp-base8:1.129 wrstuden-fixsa:1.122.2.1.0.2 wrstuden-fixsa-base:1.122.2.1 thorpej-atomic:1.129.0.2 thorpej-atomic-base:1.129 reinoud-bufcleanup:1.128.0.6 reinoud-bufcleanup-base:1.139 mjf-ufs-trans:1.128.0.4 mjf-ufs-trans-base:1.131 vmlocking:1.128.0.2 vmlocking-base:1.135 ad-audiomp:1.127.0.2 ad-audiomp-base:1.127 yamt-idlelwp:1.125.0.2 post-newlock2-merge:1.124 newlock2-nbase:1.124 yamt-splraiseipl-base5:1.123 yamt-splraiseipl-base4:1.123 yamt-splraiseipl-base3:1.123 abandoned-netbsd-4-base:1.116 abandoned-netbsd-4:1.116.0.2 netbsd-3-1:1.98.8.1.0.4 netbsd-3-1-RELEASE:1.98.8.1 netbsd-3-0-2-RELEASE:1.98.8.1 yamt-splraiseipl-base2:1.121 netbsd-3-1-RC4:1.98.8.1 yamt-splraiseipl:1.118.0.2 yamt-splraiseipl-base:1.118 netbsd-3-1-RC3:1.98.8.1 yamt-pdpolicy-base9:1.117 newlock2:1.117.0.2 newlock2-base:1.124 yamt-pdpolicy-base8:1.117 netbsd-3-1-RC2:1.98.8.1 netbsd-3-1-RC1:1.98.8.1 yamt-pdpolicy-base7:1.116 netbsd-4:1.122.0.2 netbsd-4-base:1.122 yamt-pdpolicy-base6:1.114 chap-midi-nbase:1.114 netbsd-3-0-1-RELEASE:1.98.8.1 gdamore-uart:1.114.0.4 gdamore-uart-base:1.114 simonb-timcounters-final:1.109.4.2 yamt-pdpolicy-base5:1.114 chap-midi:1.114.0.2 chap-midi-base:1.114 yamt-pdpolicy-base4:1.112 yamt-pdpolicy-base3:1.112 peter-altq-base:1.112 peter-altq:1.112.0.2 yamt-pdpolicy-base2:1.111 elad-kernelauth-base:1.112 elad-kernelauth:1.111.0.4 yamt-pdpolicy:1.111.0.2 yamt-pdpolicy-base:1.111 yamt-uio_vmspace-base5:1.110 simonb-timecounters:1.109.0.4 simonb-timecounters-base:1.114 rpaulo-netinet-merge-pcb:1.109.0.2 rpaulo-netinet-merge-pcb-base:1.117 yamt-uio_vmspace:1.108.0.2 netbsd-3-0:1.98.8.1.0.2 netbsd-3-0-RELEASE:1.98.8.1 netbsd-3-0-RC6:1.98.8.1 yamt-readahead-base3:1.106 netbsd-3-0-RC5:1.98.8.1 netbsd-3-0-RC4:1.98.8.1 netbsd-3-0-RC3:1.98.8.1 yamt-readahead-base2:1.106 netbsd-3-0-RC2:1.98.8.1 yamt-readahead-pervnode:1.106.6.1 yamt-readahead-perfile:1.106.6.1 yamt-readahead:1.106.0.6 yamt-readahead-base:1.106 netbsd-3-0-RC1:1.98.8.1 yamt-vop-base3:1.106 netbsd-2-0-3-RELEASE:1.91 netbsd-2-1:1.91.0.6 yamt-vop-base2:1.106 thorpej-vnode-attr:1.106.0.4 thorpej-vnode-attr-base:1.106 netbsd-2-1-RELEASE:1.91 yamt-vop:1.106.0.2 yamt-vop-base:1.106 netbsd-2-1-RC6:1.91 netbsd-2-1-RC5:1.91 netbsd-2-1-RC4:1.91 netbsd-2-1-RC3:1.91 netbsd-2-1-RC2:1.91 netbsd-2-1-RC1:1.91 yamt-lazymbuf:1.103.0.2 yamt-km-base4:1.99 netbsd-2-0-2-RELEASE:1.91 yamt-km-base3:1.98 netbsd-3:1.98.0.8 netbsd-3-base:1.98 yamt-km-base2:1.98 yamt-km:1.98.0.4 yamt-km-base:1.98 kent-audio2:1.98.0.2 kent-audio2-base:1.100 netbsd-2-0-1-RELEASE:1.91 kent-audio1-beforemerge:1.97 netbsd-2:1.91.0.4 netbsd-2-base:1.91 kent-audio1:1.93.0.2 kent-audio1-base:1.93 netbsd-2-0-RELEASE:1.91 netbsd-2-0-RC5:1.91 netbsd-2-0-RC4:1.91 netbsd-2-0-RC3:1.91 netbsd-2-0-RC2:1.91 netbsd-2-0-RC1:1.91 netbsd-2-0:1.91.0.2 netbsd-2-0-base:1.91 netbsd-1-6-PATCH002-RELEASE:1.71.2.1 netbsd-1-6-PATCH002:1.71.2.1 netbsd-1-6-PATCH002-RC4:1.71.2.1 netbsd-1-6-PATCH002-RC3:1.71.2.1 netbsd-1-6-PATCH002-RC2:1.71.2.1 netbsd-1-6-PATCH002-RC1:1.71.2.1 ktrace-lwp:1.82.0.2 ktrace-lwp-base:1.107 netbsd-1-6-PATCH001:1.71 netbsd-1-6-PATCH001-RELEASE:1.71 netbsd-1-6-PATCH001-RC3:1.71 netbsd-1-6-PATCH001-RC2:1.71 netbsd-1-6-PATCH001-RC1:1.71 nathanw_sa_end:1.56.2.11 nathanw_sa_before_merge:1.75 fvdl_fs64_base:1.75 gmcgarry_ctxsw:1.75.0.4 gmcgarry_ctxsw_base:1.75 gmcgarry_ucred:1.75.0.2 gmcgarry_ucred_base:1.75 nathanw_sa_base:1.75 kqueue-aftermerge:1.73 kqueue-beforemerge:1.73 netbsd-1-6-RELEASE:1.71 netbsd-1-6-RC3:1.71 netbsd-1-6-RC2:1.71 netbsd-1-6-RC1:1.71 netbsd-1-6:1.71.0.2 netbsd-1-6-base:1.71 gehenna-devsw:1.70.0.8 gehenna-devsw-base:1.71 netbsd-1-5-PATCH003:1.42 eeh-devprop:1.70.0.6 eeh-devprop-base:1.70 newlock:1.70.0.4 newlock-base:1.70 ifpoll-base:1.70 thorpej-mips-cache:1.67.0.2 thorpej-mips-cache-base:1.67 thorpej-devvp-base3:1.67 thorpej-devvp-base2:1.67 post-chs-ubcperf:1.67 pre-chs-ubcperf:1.66 thorpej-devvp:1.66.0.2 thorpej-devvp-base:1.66 netbsd-1-5-PATCH002:1.42 kqueue:1.65.0.2 kqueue-base:1.73 netbsd-1-5-PATCH001:1.42 thorpej_scsipi_beforemerge:1.58 nathanw_sa:1.56.0.2 thorpej_scsipi_nbase:1.58 netbsd-1-5-RELEASE:1.42 netbsd-1-5-BETA2:1.42 netbsd-1-5-BETA:1.42 netbsd-1-4-PATCH003:1.23.2.1 netbsd-1-5-ALPHA2:1.42 netbsd-1-5:1.42.0.2 netbsd-1-5-base:1.42 minoura-xpg4dl-base:1.40 minoura-xpg4dl:1.40.0.2 netbsd-1-4-PATCH002:1.23.2.1 chs-ubc2-newbase:1.37 wrstuden-devbsize-19991221:1.34 wrstuden-devbsize:1.34.0.8 wrstuden-devbsize-base:1.34 kame_141_19991130:1.23.2.1 comdex-fall-1999:1.34.0.6 comdex-fall-1999-base:1.34 fvdl-softdep:1.34.0.4 fvdl-softdep-base:1.34 thorpej_scsipi:1.34.0.2 thorpej_scsipi_base:1.58 netbsd-1-4-PATCH001:1.23.2.1 kame_14_19990705:1.23.2.1 kame_14_19990628:1.23.2.1 kame:1.23.2.1.0.4 chs-ubc2:1.23.2.1.0.2 chs-ubc2-base:1.34 netbsd-1-4-RELEASE:1.23.2.1 netbsd-1-4:1.23.0.2 netbsd-1-4-base:1.23 kenh-if-detach:1.21.0.4 kenh-if-detach-base:1.21 chs-ubc:1.21.0.2 chs-ubc-base:1.21 eeh-paddr_t:1.16.0.2 eeh-paddr_t-base:1.16 uvm980205:1.1.1.1 CDC:1.1.1; locks; strict; comment @ * @; 1.233 date 2023.02.26.07.27.14; author skrll; state Exp; branches; next 1.232; commitid bfc1Agn1sCNBq0fE; 1.232 date 2021.05.31.10.57.02; author riastradh; state Exp; branches; next 1.231; commitid 7KTPhr976sPI2iVC; 1.231 date 2020.08.14.09.06.15; author chs; state Exp; branches 1.231.6.1 1.231.8.1; next 1.230; commitid LOTESSEHlNtCK0kC; 1.230 date 2020.06.14.22.25.15; author ad; state Exp; branches; next 1.229; commitid HHN0UyojAbUj8fcC; 1.229 date 2020.06.13.19.55.58; author ad; state Exp; branches; next 1.228; commitid Q0xeZLxXo5M6l6cC; 1.228 date 2020.06.11.19.20.47; author ad; state Exp; branches; next 1.227; commitid jTCHRbpHeFCNcQbC; 1.227 date 2020.05.26.00.50.53; author kamil; state Exp; branches; next 1.226; commitid P9DmwK5uKag7AG9C; 1.226 date 2020.05.09.15.13.19; author thorpej; state Exp; branches; next 1.225; commitid xAKdJE9OUdAvTz7C; 1.225 date 2020.04.27.02.47.26; author rin; state Exp; branches; next 1.224; commitid lRB26gmfT88C9Y5C; 1.224 date 2020.04.23.21.47.09; author ad; state Exp; branches; next 1.223; commitid iebm2AyZ440RAy5C; 1.223 date 2020.04.18.03.27.13; author thorpej; state Exp; branches; next 1.222; commitid zqExCyeYEB6nBO4C; 1.222 date 2020.03.22.18.32.42; author ad; state Exp; branches 1.222.2.1; next 1.221; commitid GrnnOJcv6kiTxq1C; 1.221 date 2020.02.23.15.46.43; author ad; state Exp; branches; next 1.220; commitid DJJO1ciCDgZlwOXB; 1.220 date 2020.02.18.20.23.17; author chs; state Exp; branches; next 1.219; commitid a2v6DEDHVufwdcXB; 1.219 date 2020.01.15.17.55.45; author ad; state Exp; branches; next 1.218; commitid 3y5oCFDwuvhmuOSB; 1.218 date 2019.12.31.22.42.51; author ad; state Exp; branches 1.218.2.1; next 1.217; commitid ZG5NIGuTJgRUyUQB; 1.217 date 2019.12.31.13.07.14; author ad; state Exp; branches; next 1.216; commitid 1enRDiWCklcrnRQB; 1.216 date 2019.12.27.12.51.57; author ad; state Exp; branches; next 1.215; commitid kavNWDzTg832qlQB; 1.215 date 2019.12.21.12.58.26; author ad; state Exp; branches; next 1.214; commitid XjiuiVjnRnPxEzPB; 1.214 date 2019.12.16.22.47.55; author ad; state Exp; branches; next 1.213; commitid v7oZdpKCmLHV3ZOB; 1.213 date 2018.05.28.21.04.35; author chs; state Exp; branches 1.213.2.1; next 1.212; commitid PlA678yEJsdmJ6EA; 1.212 date 2018.05.19.11.39.37; author jdolecek; state Exp; branches; next 1.211; commitid LXB4vu1ygVwCSTCA; 1.211 date 2018.05.08.19.33.57; author christos; state Exp; branches; next 1.210; commitid KbYaGl17DuGHRwBA; 1.210 date 2018.04.20.19.02.18; author jdolecek; state Exp; branches; next 1.209; commitid RqK94cjtQj9ggdzA; 1.209 date 2018.04.20.18.58.10; author jdolecek; state Exp; branches; next 1.208; commitid QUVhewNhX1ZhedzA; 1.208 date 2017.12.15.16.03.29; author maya; state Exp; branches 1.208.2.1; next 1.207; commitid EGihYNlkh2rLk0jA; 1.207 date 2017.12.02.08.15.43; author mrg; state Exp; branches; next 1.206; commitid Qd469to6OryJ9ihA; 1.206 date 2017.05.20.07.27.15; author chs; state Exp; branches; next 1.205; 1.205 date 2017.05.17.22.43.12; author christos; state Exp; branches; next 1.204; 1.204 date 2017.05.06.21.34.52; author joerg; state Exp; branches; next 1.203; 1.203 date 2017.01.04.23.59.49; author christos; state Exp; branches 1.203.6.1; next 1.202; 1.202 date 2017.01.02.20.22.20; author cherry; state Exp; branches; next 1.201; 1.201 date 2016.12.24.19.21.29; author cherry; state Exp; branches; next 1.200; 1.200 date 2016.12.22.13.26.24; author cherry; state Exp; branches; next 1.199; 1.199 date 2016.12.22.12.55.21; author cherry; state Exp; branches; next 1.198; 1.198 date 2016.07.20.12.38.43; author maxv; state Exp; branches; next 1.197; 1.197 date 2016.05.25.17.43.58; author christos; state Exp; branches 1.197.2.1; next 1.196; 1.196 date 2016.02.05.04.18.55; author christos; state Exp; branches; next 1.195; 1.195 date 2015.11.26.13.15.34; author martin; state Exp; branches; next 1.194; 1.194 date 2015.03.20.15.41.43; author riastradh; state Exp; branches; next 1.193; 1.193 date 2015.02.06.18.19.22; author maxv; state Exp; branches; next 1.192; 1.192 date 2014.12.14.23.48.58; author chs; state Exp; branches; next 1.191; 1.191 date 2014.07.07.20.14.43; author riastradh; state Exp; branches 1.191.2.1 1.191.4.1; next 1.190; 1.190 date 2014.05.22.14.01.46; author riastradh; state Exp; branches; next 1.189; 1.189 date 2014.02.21.22.08.07; author skrll; state Exp; branches 1.189.2.1; next 1.188; 1.188 date 2014.01.03.21.12.18; author dsl; state Exp; branches; next 1.187; 1.187 date 2014.01.03.15.15.02; author dsl; state Exp; branches; next 1.186; 1.186 date 2014.01.01.18.57.16; author dsl; state Exp; branches; next 1.185; 1.185 date 2013.11.14.12.07.11; author martin; state Exp; branches; next 1.184; 1.184 date 2012.09.01.00.26.37; author matt; state Exp; branches 1.184.2.1 1.184.4.1; next 1.183; 1.183 date 2012.04.08.11.27.45; author martin; state Exp; branches; next 1.182; 1.182 date 2012.03.18.13.31.14; author uebayasi; state Exp; branches; next 1.181; 1.181 date 2012.02.02.18.59.45; author para; state Exp; branches 1.181.2.1; next 1.180; 1.180 date 2012.01.27.19.48.41; author para; state Exp; branches; next 1.179; 1.179 date 2012.01.05.15.19.53; author reinoud; state Exp; branches; next 1.178; 1.178 date 2011.12.22.13.12.50; author reinoud; state Exp; branches; next 1.177; 1.177 date 2011.12.20.15.39.35; author reinoud; state Exp; branches; next 1.176; 1.176 date 2011.09.01.06.40.28; author matt; state Exp; branches 1.176.2.1 1.176.6.1; next 1.175; 1.175 date 2011.08.27.09.11.53; author christos; state Exp; branches; next 1.174; 1.174 date 2011.06.16.09.21.03; author hannken; state Exp; branches; next 1.173; 1.173 date 2011.06.12.03.36.02; author rmind; state Exp; branches; next 1.172; 1.172 date 2011.04.23.18.14.12; author rmind; state Exp; branches 1.172.2.1; next 1.171; 1.171 date 2011.02.17.19.27.13; author matt; state Exp; branches; next 1.170; 1.170 date 2011.02.10.14.46.44; author pooka; state Exp; branches; next 1.169; 1.169 date 2011.02.02.15.13.34; author chuck; state Exp; branches; next 1.168; 1.168 date 2011.01.04.08.26.33; author matt; state Exp; branches 1.168.2.1 1.168.4.1; next 1.167; 1.167 date 2010.12.20.00.25.47; author matt; state Exp; branches; next 1.166; 1.166 date 2010.11.13.05.52.55; author uebayasi; state Exp; branches; next 1.165; 1.165 date 2010.11.12.12.02.35; author uebayasi; state Exp; branches; next 1.164; 1.164 date 2010.11.12.02.36.02; author uebayasi; state Exp; branches; next 1.163; 1.163 date 2010.04.16.03.21.49; author rmind; state Exp; branches; next 1.162; 1.162 date 2010.02.08.19.02.33; author joerg; state Exp; branches 1.162.2.1; next 1.161; 1.161 date 2009.11.21.17.45.02; author rmind; state Exp; branches 1.161.2.1; next 1.160; 1.160 date 2009.10.21.21.12.07; author rmind; state Exp; branches; next 1.159; 1.159 date 2009.08.18.02.43.49; author yamt; state Exp; branches; next 1.158; 1.158 date 2009.08.10.23.17.29; author haad; state Exp; branches; next 1.157; 1.157 date 2009.08.05.14.11.32; author pooka; state Exp; branches; next 1.156; 1.156 date 2009.08.05.14.10.33; author pooka; state Exp; branches; next 1.155; 1.155 date 2009.06.28.15.18.50; author rmind; state Exp; branches; next 1.154; 1.154 date 2009.03.30.16.36.36; author yamt; state Exp; branches; next 1.153; 1.153 date 2009.03.29.01.02.51; author mrg; state Exp; branches; next 1.152; 1.152 date 2009.03.12.12.55.16; author abs; state Exp; branches; next 1.151; 1.151 date 2009.02.18.13.16.58; author yamt; state Exp; branches; next 1.150; 1.150 date 2008.11.26.20.17.33; author pooka; state Exp; branches 1.150.4.1; next 1.149; 1.149 date 2008.10.31.20.42.41; author christos; state Exp; branches; next 1.148; 1.148 date 2008.08.08.14.41.50; author skrll; state Exp; branches 1.148.2.1 1.148.4.1; next 1.147; 1.147 date 2008.07.11.07.09.18; author skrll; state Exp; branches; next 1.146; 1.146 date 2008.06.04.12.45.28; author ad; state Exp; branches 1.146.2.1 1.146.4.1; next 1.145; 1.145 date 2008.02.29.20.35.23; author yamt; state Exp; branches 1.145.2.1 1.145.4.1 1.145.6.1; next 1.144; 1.144 date 2008.01.28.12.22.47; author yamt; state Exp; branches 1.144.2.1 1.144.6.1; next 1.143; 1.143 date 2008.01.02.11.49.16; author ad; state Exp; branches; next 1.142; 1.142 date 2007.12.26.22.11.53; author christos; state Exp; branches; next 1.141; 1.141 date 2007.12.24.15.46.46; author perry; state Exp; branches; next 1.140; 1.140 date 2007.12.13.02.45.11; author yamt; state Exp; branches; next 1.139; 1.139 date 2007.12.05.09.37.34; author yamt; state Exp; branches 1.139.2.1 1.139.4.1; next 1.138; 1.138 date 2007.12.05.09.35.46; author yamt; state Exp; branches; next 1.137; 1.137 date 2007.11.30.22.43.17; author ad; state Exp; branches 1.137.2.1; next 1.136; 1.136 date 2007.11.06.00.42.46; author ad; state Exp; branches; next 1.135; 1.135 date 2007.08.18.00.21.11; author ad; state Exp; branches 1.135.2.1 1.135.6.1 1.135.8.1; next 1.134; 1.134 date 2007.07.27.09.50.37; author yamt; state Exp; branches 1.134.4.1 1.134.6.1; next 1.133; 1.133 date 2007.07.22.19.16.06; author pooka; state Exp; branches; next 1.132; 1.132 date 2007.07.17.17.42.08; author joerg; state Exp; branches 1.132.2.1; next 1.131; 1.131 date 2007.07.09.21.11.36; author ad; state Exp; branches; next 1.130; 1.130 date 2007.06.05.12.31.35; author yamt; state Exp; branches; next 1.129; 1.129 date 2007.03.24.21.15.39; author rmind; state Exp; branches; next 1.128; 1.128 date 2007.03.04.06.03.48; author christos; state Exp; branches 1.128.2.1 1.128.4.1 1.128.6.1; next 1.127; 1.127 date 2007.02.22.06.05.00; author thorpej; state Exp; branches; next 1.126; 1.126 date 2007.02.21.23.00.12; author thorpej; state Exp; branches; next 1.125; 1.125 date 2007.02.15.20.21.13; author ad; state Exp; branches 1.125.2.1; next 1.124; 1.124 date 2006.12.21.15.55.26; author yamt; state Exp; branches; next 1.123; 1.123 date 2006.12.07.14.06.51; author elad; state Exp; branches; next 1.122; 1.122 date 2006.12.01.16.06.09; author elad; state Exp; branches 1.122.2.1; next 1.121; 1.121 date 2006.10.12.10.14.20; author yamt; state Exp; branches; next 1.120; 1.120 date 2006.10.12.10.11.57; author yamt; state Exp; branches; next 1.119; 1.119 date 2006.10.05.14.48.33; author chs; state Exp; branches; next 1.118; 1.118 date 2006.09.15.15.51.13; author yamt; state Exp; branches 1.118.2.1; next 1.117; 1.117 date 2006.09.01.20.39.05; author cherry; state Exp; branches 1.117.2.1; next 1.116; 1.116 date 2006.08.04.22.42.36; author he; state Exp; branches; next 1.115; 1.115 date 2006.07.05.14.26.42; author drochner; state Exp; branches; next 1.114; 1.114 date 2006.05.19.15.08.14; author yamt; state Exp; branches 1.114.2.1 1.114.4.1; next 1.113; 1.113 date 2006.05.14.21.38.17; author elad; state Exp; branches; next 1.112; 1.112 date 2006.03.15.18.09.25; author drochner; state Exp; branches 1.112.2.1; next 1.111; 1.111 date 2006.03.01.12.38.44; author yamt; state Exp; branches 1.111.2.1 1.111.4.1; next 1.110; 1.110 date 2006.02.10.00.53.04; author simonb; state Exp; branches; next 1.109; 1.109 date 2006.01.21.13.34.15; author yamt; state Exp; branches 1.109.2.1 1.109.4.1; next 1.108; 1.108 date 2005.12.21.12.19.04; author yamt; state Exp; branches 1.108.2.1; next 1.107; 1.107 date 2005.11.29.22.52.03; author yamt; state Exp; branches; next 1.106; 1.106 date 2005.09.01.02.21.12; author yamt; state Exp; branches 1.106.6.1; next 1.105; 1.105 date 2005.09.01.02.16.46; author yamt; state Exp; branches; next 1.104; 1.104 date 2005.08.27.16.11.32; author yamt; state Exp; branches; next 1.103; 1.103 date 2005.06.10.05.10.13; author matt; state Exp; branches 1.103.2.1; next 1.102; 1.102 date 2005.06.02.17.01.44; author matt; state Exp; branches; next 1.101; 1.101 date 2005.05.15.08.01.06; author yamt; state Exp; branches; next 1.100; 1.100 date 2005.04.01.11.59.38; author yamt; state Exp; branches; next 1.99; 1.99 date 2005.03.26.05.12.36; author fvdl; state Exp; branches; next 1.98; 1.98 date 2005.01.13.11.50.32; author yamt; state Exp; branches 1.98.2.1 1.98.4.1 1.98.8.1; next 1.97; 1.97 date 2005.01.09.16.42.44; author chs; state Exp; branches; next 1.96; 1.96 date 2005.01.01.21.08.02; author yamt; state Exp; branches; next 1.95; 1.95 date 2005.01.01.21.02.13; author yamt; state Exp; branches; next 1.94; 1.94 date 2005.01.01.21.00.06; author yamt; state Exp; branches; next 1.93; 1.93 date 2004.08.28.22.12.40; author thorpej; state Exp; branches; next 1.92; 1.92 date 2004.05.04.21.33.40; author pk; state Exp; branches; next 1.91; 1.91 date 2004.03.24.07.55.01; author junyoung; state Exp; branches; next 1.90; 1.90 date 2004.03.14.16.47.23; author jdolecek; state Exp; branches; next 1.89; 1.89 date 2004.02.13.13.47.16; author yamt; state Exp; branches; next 1.88; 1.88 date 2004.01.04.11.33.32; author jdolecek; state Exp; branches; next 1.87; 1.87 date 2003.12.18.15.02.04; author pk; state Exp; branches; next 1.86; 1.86 date 2003.12.18.08.15.42; author pk; state Exp; branches; next 1.85; 1.85 date 2003.11.13.03.09.30; author chs; state Exp; branches; next 1.84; 1.84 date 2003.08.11.16.33.30; author pk; state Exp; branches; next 1.83; 1.83 date 2003.08.07.16.34.48; author agc; state Exp; branches; next 1.82; 1.82 date 2003.06.29.22.32.49; author fvdl; state Exp; branches 1.82.2.1; next 1.81; 1.81 date 2003.06.28.14.22.29; author darrenr; state Exp; branches; next 1.80; 1.80 date 2003.05.10.21.10.23; author thorpej; state Exp; branches; next 1.79; 1.79 date 2003.05.08.18.13.27; author thorpej; state Exp; branches; next 1.78; 1.78 date 2003.05.03.19.01.06; author wiz; state Exp; branches; next 1.77; 1.77 date 2003.02.01.06.23.54; author thorpej; state Exp; branches; next 1.76; 1.76 date 2003.01.18.09.42.58; author thorpej; state Exp; branches; next 1.75; 1.75 date 2002.12.11.07.10.20; author thorpej; state Exp; branches; next 1.74; 1.74 date 2002.11.17.08.32.45; author chs; state Exp; branches; next 1.73; 1.73 date 2002.09.22.07.20.31; author chs; state Exp; branches; next 1.72; 1.72 date 2002.09.15.16.54.29; author chs; state Exp; branches; next 1.71; 1.71 date 2002.05.17.22.00.50; author enami; state Exp; branches 1.71.2.1; next 1.70; 1.70 date 2001.12.10.01.52.26; author thorpej; state Exp; branches 1.70.8.1; next 1.69; 1.69 date 2001.12.09.03.07.19; author chs; state Exp; branches; next 1.68; 1.68 date 2001.12.08.00.35.33; author thorpej; state Exp; branches; next 1.67; 1.67 date 2001.09.15.20.36.45; author chs; state Exp; branches; next 1.66; 1.66 date 2001.08.16.01.37.50; author chs; state Exp; branches 1.66.2.1; next 1.65; 1.65 date 2001.06.02.18.09.26; author chs; state Exp; branches 1.65.2.1; next 1.64; 1.64 date 2001.05.26.21.27.21; author chs; state Exp; branches; next 1.63; 1.63 date 2001.05.25.04.06.12; author chs; state Exp; branches; next 1.62; 1.62 date 2001.05.02.01.22.19; author thorpej; state Exp; branches; next 1.61; 1.61 date 2001.05.01.19.36.56; author thorpej; state Exp; branches; next 1.60; 1.60 date 2001.04.29.04.23.21; author thorpej; state Exp; branches; next 1.59; 1.59 date 2001.04.25.18.09.52; author thorpej; state Exp; branches; next 1.58; 1.58 date 2001.03.15.06.10.56; author chs; state Exp; branches; next 1.57; 1.57 date 2001.03.09.01.02.12; author chs; state Exp; branches; next 1.56; 1.56 date 2001.02.06.17.01.52; author eeh; state Exp; branches 1.56.2.1; next 1.55; 1.55 date 2000.11.30.11.04.43; author simonb; state Exp; branches; next 1.54; 1.54 date 2000.11.29.09.52.18; author simonb; state Exp; branches; next 1.53; 1.53 date 2000.11.27.08.40.03; author chs; state Exp; branches; next 1.52; 1.52 date 2000.11.27.04.36.40; author nisimura; state Exp; branches; next 1.51; 1.51 date 2000.09.28.19.05.06; author eeh; state Exp; branches; next 1.50; 1.50 date 2000.09.21.17.46.04; author thorpej; state Exp; branches; next 1.49; 1.49 date 2000.09.13.15.00.25; author thorpej; state Exp; branches; next 1.48; 1.48 date 2000.08.12.22.41.55; author thorpej; state Exp; branches; next 1.47; 1.47 date 2000.08.01.00.53.09; author wiz; state Exp; branches; next 1.46; 1.46 date 2000.07.24.20.10.51; author jeffs; state Exp; branches; next 1.45; 1.45 date 2000.06.27.16.16.43; author mrg; state Exp; branches; next 1.44; 1.44 date 2000.06.27.09.00.14; author mrg; state Exp; branches; next 1.43; 1.43 date 2000.06.26.14.21.17; author mrg; state Exp; branches; next 1.42; 1.42 date 2000.06.08.05.52.34; author thorpej; state Exp; branches; next 1.41; 1.41 date 2000.05.28.05.49.06; author thorpej; state Exp; branches; next 1.40; 1.40 date 2000.04.24.17.12.00; author thorpej; state Exp; branches 1.40.2.1; next 1.39; 1.39 date 2000.04.10.00.28.05; author thorpej; state Exp; branches; next 1.38; 1.38 date 2000.03.26.20.54.46; author kleink; state Exp; branches; next 1.37; 1.37 date 2000.02.11.19.22.54; author thorpej; state Exp; branches; next 1.36; 1.36 date 2000.01.11.06.57.49; author chs; state Exp; branches; next 1.35; 1.35 date 99.12.30.16.09.47; author eeh; state Exp; branches; next 1.34; 1.34 date 99.07.22.22.58.38; author thorpej; state Exp; branches 1.34.2.1; next 1.33; 1.33 date 99.07.17.21.35.49; author thorpej; state Exp; branches; next 1.32; 1.32 date 99.07.02.23.20.58; author thorpej; state Exp; branches; next 1.31; 1.31 date 99.06.21.17.25.11; author thorpej; state Exp; branches; next 1.30; 1.30 date 99.06.18.05.13.46; author thorpej; state Exp; branches; next 1.29; 1.29 date 99.06.17.15.47.22; author thorpej; state Exp; branches; next 1.28; 1.28 date 99.06.15.23.27.47; author thorpej; state Exp; branches; next 1.27; 1.27 date 99.05.26.19.16.36; author thorpej; state Exp; branches; next 1.26; 1.26 date 99.05.26.01.05.24; author thorpej; state Exp; branches; next 1.25; 1.25 date 99.05.13.21.58.38; author thorpej; state Exp; branches; next 1.24; 1.24 date 99.04.11.04.04.11; author chs; state Exp; branches; next 1.23; 1.23 date 99.03.26.17.34.15; author chs; state Exp; branches 1.23.2.1; next 1.22; 1.22 date 99.03.25.18.48.50; author mrg; state Exp; branches; next 1.21; 1.21 date 98.09.08.23.44.21; author thorpej; state Exp; branches 1.21.2.1; next 1.20; 1.20 date 98.08.28.20.05.49; author thorpej; state Exp; branches; next 1.19; 1.19 date 98.08.13.02.11.00; author eeh; state Exp; branches; next 1.18; 1.18 date 98.08.01.01.39.03; author thorpej; state Exp; branches; next 1.17; 1.17 date 98.07.31.20.46.37; author thorpej; state Exp; branches; next 1.16; 1.16 date 98.07.24.20.28.48; author thorpej; state Exp; branches 1.16.2.1; next 1.15; 1.15 date 98.07.08.04.28.27; author thorpej; state Exp; branches; next 1.14; 1.14 date 98.07.04.22.18.53; author jonathan; state Exp; branches; next 1.13; 1.13 date 98.05.09.15.04.40; author kleink; state Exp; branches; next 1.12; 1.12 date 98.04.30.06.28.59; author thorpej; state Exp; branches; next 1.11; 1.11 date 98.03.30.06.24.42; author mycroft; state Exp; branches; next 1.10; 1.10 date 98.03.27.01.47.06; author thorpej; state Exp; branches; next 1.9; 1.9 date 98.03.09.00.58.56; author mrg; state Exp; branches; next 1.8; 1.8 date 98.02.10.02.34.31; author perry; state Exp; branches; next 1.7; 1.7 date 98.02.09.13.08.22; author mrg; state Exp; branches; next 1.6; 1.6 date 98.02.08.06.15.58; author thorpej; state Exp; branches; next 1.5; 1.5 date 98.02.07.17.00.36; author mrg; state Exp; branches; next 1.4; 1.4 date 98.02.07.11.08.22; author mrg; state Exp; branches; next 1.3; 1.3 date 98.02.07.02.24.02; author chs; state Exp; branches; next 1.2; 1.2 date 98.02.06.22.31.43; author thorpej; state Exp; branches; next 1.1; 1.1 date 98.02.05.06.25.10; author mrg; state Exp; branches 1.1.1.1; next ; 1.231.6.1 date 2021.06.17.04.46.37; author thorpej; state Exp; branches; next ; commitid d7CrUzY34skBrrXC; 1.231.8.1 date 2021.05.31.22.15.23; author cjep; state Exp; branches; next ; commitid eWz9SBW0XqKjJlVC; 1.222.2.1 date 2020.04.20.11.29.14; author bouyer; state Exp; branches; next 1.222.2.2; commitid 4WLfIgNPymVsg75C; 1.222.2.2 date 2020.04.25.11.24.08; author bouyer; state Exp; branches; next ; commitid d0banFLkg9JL4L5C; 1.218.2.1 date 2020.01.17.21.47.38; author ad; state Exp; branches; next 1.218.2.2; commitid T9pwLWote7xbI5TB; 1.218.2.2 date 2020.02.29.20.21.11; author ad; state Exp; branches; next ; commitid OjSb8ro7YQETQBYB; 1.213.2.1 date 2020.04.08.14.09.04; author martin; state Exp; branches; next 1.213.2.2; commitid Qli2aW9E74UFuA3C; 1.213.2.2 date 2020.04.21.18.42.46; author martin; state Exp; branches; next ; commitid 86tA4aEmdr3VCh5C; 1.208.2.1 date 2018.04.22.07.20.29; author pgoyette; state Exp; branches; next 1.208.2.2; commitid W6xykws0Zbl4kpzA; 1.208.2.2 date 2018.05.21.04.36.17; author pgoyette; state Exp; branches; next 1.208.2.3; commitid X5L8kSrBWQcDt7DA; 1.208.2.3 date 2018.06.25.07.26.08; author pgoyette; state Exp; branches; next ; commitid 8PtAu9af7VvhiDHA; 1.203.6.1 date 2017.05.11.02.58.42; author pgoyette; state Exp; branches; next 1.203.6.2; 1.203.6.2 date 2017.05.19.00.22.58; author pgoyette; state Exp; branches; next ; 1.197.2.1 date 2016.07.26.03.24.24; author pgoyette; state Exp; branches; next 1.197.2.2; 1.197.2.2 date 2017.01.07.08.56.53; author pgoyette; state Exp; branches; next ; 1.191.2.1 date 2014.12.31.06.44.01; author snj; state Exp; branches; next 1.191.2.2; 1.191.2.2 date 2015.03.25.16.54.37; author snj; state Exp; branches; next ; 1.191.4.1 date 2015.04.06.15.18.33; author skrll; state Exp; branches; next 1.191.4.2; 1.191.4.2 date 2015.12.27.12.10.19; author skrll; state Exp; branches; next 1.191.4.3; 1.191.4.3 date 2016.03.19.11.30.39; author skrll; state Exp; branches; next 1.191.4.4; 1.191.4.4 date 2016.05.29.08.44.40; author skrll; state Exp; branches; next 1.191.4.5; 1.191.4.5 date 2016.10.05.20.56.12; author skrll; state Exp; branches; next 1.191.4.6; 1.191.4.6 date 2017.02.05.13.41.01; author skrll; state Exp; branches; next 1.191.4.7; 1.191.4.7 date 2017.08.28.17.53.17; author skrll; state Exp; branches; next ; commitid UQQpnjvcNkUZn05A; 1.189.2.1 date 2014.08.10.06.57.00; author tls; state Exp; branches; next ; 1.184.2.1 date 2014.08.20.00.04.45; author tls; state Exp; branches; next 1.184.2.2; 1.184.2.2 date 2017.12.03.11.39.22; author jdolecek; state Exp; branches; next ; commitid XcIYRZTAh1LmerhA; 1.184.4.1 date 2014.05.18.17.46.22; author rmind; state Exp; branches; next ; 1.181.2.1 date 2012.04.12.17.05.37; author riz; state Exp; branches 1.181.2.1.2.1; next ; 1.181.2.1.2.1 date 2012.11.28.22.59.09; author matt; state Exp; branches; next ; 1.176.2.1 date 2011.11.11.10.34.24; author yamt; state Exp; branches; next 1.176.2.2; 1.176.2.2 date 2011.11.12.02.54.04; author yamt; state Exp; branches; next 1.176.2.3; 1.176.2.3 date 2011.11.14.14.24.54; author yamt; state Exp; branches; next 1.176.2.4; 1.176.2.4 date 2011.11.20.10.52.33; author yamt; state Exp; branches; next 1.176.2.5; 1.176.2.5 date 2011.12.20.13.46.17; author yamt; state Exp; branches; next 1.176.2.6; 1.176.2.6 date 2011.12.26.16.03.10; author yamt; state Exp; branches; next 1.176.2.7; 1.176.2.7 date 2012.01.11.00.08.40; author yamt; state Exp; branches; next 1.176.2.8; 1.176.2.8 date 2012.02.05.04.58.29; author yamt; state Exp; branches; next 1.176.2.9; 1.176.2.9 date 2012.04.17.00.08.58; author yamt; state Exp; branches; next 1.176.2.10; 1.176.2.10 date 2012.10.30.17.23.01; author yamt; state Exp; branches; next 1.176.2.11; 1.176.2.11 date 2014.05.22.11.41.19; author yamt; state Exp; branches; next ; 1.176.6.1 date 2012.02.18.07.35.58; author mrg; state Exp; branches; next 1.176.6.2; 1.176.6.2 date 2012.04.05.21.33.52; author mrg; state Exp; branches; next 1.176.6.3; 1.176.6.3 date 2012.04.29.23.05.09; author mrg; state Exp; branches; next ; 1.172.2.1 date 2011.06.23.14.20.34; author cherry; state Exp; branches; next ; 1.168.2.1 date 2011.06.06.09.10.21; author jruoho; state Exp; branches; next ; 1.168.4.1 date 2011.02.08.16.20.06; author bouyer; state Exp; branches; next 1.168.4.2; 1.168.4.2 date 2011.02.17.12.00.52; author bouyer; state Exp; branches; next 1.168.4.3; 1.168.4.3 date 2011.03.05.15.10.52; author bouyer; state Exp; branches; next ; 1.162.2.1 date 2010.03.16.15.38.17; author rmind; state Exp; branches; next 1.162.2.2; 1.162.2.2 date 2010.03.18.04.36.54; author rmind; state Exp; branches; next 1.162.2.3; 1.162.2.3 date 2010.04.23.21.18.00; author rmind; state Exp; branches; next 1.162.2.4; 1.162.2.4 date 2010.04.26.02.20.59; author rmind; state Exp; branches; next 1.162.2.5; 1.162.2.5 date 2010.05.30.05.18.09; author rmind; state Exp; branches; next 1.162.2.6; 1.162.2.6 date 2011.03.05.20.56.35; author rmind; state Exp; branches; next 1.162.2.7; 1.162.2.7 date 2011.05.19.03.43.05; author rmind; state Exp; branches; next 1.162.2.8; 1.162.2.8 date 2011.05.31.03.05.14; author rmind; state Exp; branches; next ; 1.161.2.1 date 2010.02.23.07.44.25; author uebayasi; state Exp; branches; next 1.161.2.2; 1.161.2.2 date 2010.04.27.08.40.22; author uebayasi; state Exp; branches; next 1.161.2.3; 1.161.2.3 date 2010.04.28.08.22.04; author uebayasi; state Exp; branches; next 1.161.2.4; 1.161.2.4 date 2010.04.28.13.28.42; author uebayasi; state Exp; branches; next 1.161.2.5; 1.161.2.5 date 2010.04.29.03.15.10; author uebayasi; state Exp; branches; next 1.161.2.6; 1.161.2.6 date 2010.04.30.14.44.37; author uebayasi; state Exp; branches; next 1.161.2.7; 1.161.2.7 date 2010.05.31.13.26.38; author uebayasi; state Exp; branches; next 1.161.2.8; 1.161.2.8 date 2010.07.26.10.11.39; author uebayasi; state Exp; branches; next 1.161.2.9; 1.161.2.9 date 2010.10.30.05.56.00; author uebayasi; state Exp; branches; next 1.161.2.10; 1.161.2.10 date 2010.11.02.14.05.28; author uebayasi; state Exp; branches; next 1.161.2.11; 1.161.2.11 date 2010.11.15.08.41.44; author uebayasi; state Exp; branches; next 1.161.2.12; 1.161.2.12 date 2010.11.16.07.44.24; author uebayasi; state Exp; branches; next 1.161.2.13; 1.161.2.13 date 2010.11.18.16.16.36; author uebayasi; state Exp; branches; next ; 1.150.4.1 date 2009.05.13.17.23.10; author jym; state Exp; branches; next 1.150.4.2; 1.150.4.2 date 2009.07.23.23.33.04; author jym; state Exp; branches; next ; 1.148.2.1 date 2009.01.19.13.20.36; author skrll; state Exp; branches; next 1.148.2.2; 1.148.2.2 date 2009.03.03.18.34.40; author skrll; state Exp; branches; next 1.148.2.3; 1.148.2.3 date 2009.04.28.07.37.58; author skrll; state Exp; branches; next ; 1.148.4.1 date 2008.11.02.23.08.56; author snj; state Exp; branches; next 1.148.4.2; 1.148.4.2 date 2009.04.01.00.25.23; author snj; state Exp; branches 1.148.4.2.4.1; next ; 1.148.4.2.4.1 date 2010.01.26.21.26.28; author matt; state Exp; branches; next 1.148.4.2.4.2; 1.148.4.2.4.2 date 2011.05.25.23.58.49; author matt; state Exp; branches; next 1.148.4.2.4.3; 1.148.4.2.4.3 date 2011.06.03.02.43.41; author matt; state Exp; branches; next 1.148.4.2.4.4; 1.148.4.2.4.4 date 2011.06.03.07.56.08; author matt; state Exp; branches; next 1.148.4.2.4.5; 1.148.4.2.4.5 date 2012.02.09.03.04.59; author matt; state Exp; branches; next 1.148.4.2.4.6; 1.148.4.2.4.6 date 2012.04.12.01.40.26; author matt; state Exp; branches; next ; 1.146.2.1 date 2008.07.18.16.37.57; author simonb; state Exp; branches; next ; 1.146.4.1 date 2008.10.19.22.18.10; author haad; state Exp; branches; next 1.146.4.2; 1.146.4.2 date 2008.12.13.01.15.42; author haad; state Exp; branches; next ; 1.145.2.1 date 2008.06.17.09.15.17; author yamt; state Exp; branches; next ; 1.145.4.1 date 2009.05.04.08.14.39; author yamt; state Exp; branches; next 1.145.4.2; 1.145.4.2 date 2009.07.18.14.53.28; author yamt; state Exp; branches; next 1.145.4.3; 1.145.4.3 date 2009.08.19.18.48.35; author yamt; state Exp; branches; next 1.145.4.4; 1.145.4.4 date 2010.03.11.15.04.46; author yamt; state Exp; branches; next 1.145.4.5; 1.145.4.5 date 2010.08.11.22.55.16; author yamt; state Exp; branches; next ; 1.145.6.1 date 2008.06.23.04.32.06; author wrstuden; state Exp; branches; next 1.145.6.2; 1.145.6.2 date 2008.09.18.04.37.06; author wrstuden; state Exp; branches; next ; 1.144.2.1 date 2008.03.24.07.16.33; author keiichi; state Exp; branches; next ; 1.144.6.1 date 2008.04.03.12.43.14; author mjf; state Exp; branches; next 1.144.6.2; 1.144.6.2 date 2008.06.05.19.14.38; author mjf; state Exp; branches; next 1.144.6.3; 1.144.6.3 date 2008.09.28.10.41.07; author mjf; state Exp; branches; next 1.144.6.4; 1.144.6.4 date 2009.01.17.13.29.43; author mjf; state Exp; branches; next ; 1.139.2.1 date 2007.12.10.12.56.12; author yamt; state Exp; branches; next 1.139.2.2; 1.139.2.2 date 2007.12.13.05.06.04; author yamt; state Exp; branches; next ; 1.139.4.1 date 2007.12.13.21.57.04; author bouyer; state Exp; branches; next 1.139.4.2; 1.139.4.2 date 2008.01.02.21.58.35; author bouyer; state Exp; branches; next ; 1.137.2.1 date 2007.12.04.13.03.58; author ad; state Exp; branches; next 1.137.2.2; 1.137.2.2 date 2007.12.08.17.58.09; author ad; state Exp; branches; next 1.137.2.3; 1.137.2.3 date 2007.12.26.21.40.05; author ad; state Exp; branches; next ; 1.135.2.1 date 2007.11.06.23.35.27; author matt; state Exp; branches; next 1.135.2.2; 1.135.2.2 date 2008.01.09.01.58.39; author matt; state Exp; branches; next 1.135.2.3; 1.135.2.3 date 2008.03.23.02.05.13; author matt; state Exp; branches; next ; 1.135.6.1 date 2007.11.13.16.03.33; author bouyer; state Exp; branches; next ; 1.135.8.1 date 2007.11.19.00.49.38; author mjf; state Exp; branches; next 1.135.8.2; 1.135.8.2 date 2007.12.08.18.21.45; author mjf; state Exp; branches; next 1.135.8.3; 1.135.8.3 date 2007.12.27.00.46.53; author mjf; state Exp; branches; next 1.135.8.4; 1.135.8.4 date 2008.02.18.21.07.32; author mjf; state Exp; branches; next ; 1.134.4.1 date 2007.09.03.16.49.16; author jmcneill; state Exp; branches; next 1.134.4.2; 1.134.4.2 date 2007.11.06.19.25.41; author joerg; state Exp; branches; next 1.134.4.3; 1.134.4.3 date 2007.12.03.16.15.25; author joerg; state Exp; branches; next 1.134.4.4; 1.134.4.4 date 2007.12.09.19.38.57; author jmcneill; state Exp; branches; next ; 1.134.6.1 date 2007.07.27.09.50.37; author yamt; state dead; branches; next 1.134.6.2; 1.134.6.2 date 2007.07.27.09.50.38; author yamt; state Exp; branches; next ; 1.132.2.1 date 2007.08.15.13.51.20; author skrll; state Exp; branches; next 1.132.2.2; 1.132.2.2 date 2007.09.03.10.24.24; author skrll; state Exp; branches; next ; 1.128.2.1 date 2007.04.05.21.32.52; author ad; state Exp; branches; next 1.128.2.2; 1.128.2.2 date 2007.04.10.13.26.56; author ad; state Exp; branches; next 1.128.2.3; 1.128.2.3 date 2007.06.09.23.58.21; author ad; state Exp; branches; next 1.128.2.4; 1.128.2.4 date 2007.07.15.15.53.07; author ad; state Exp; branches; next 1.128.2.5; 1.128.2.5 date 2007.08.20.21.28.31; author ad; state Exp; branches; next 1.128.2.6; 1.128.2.6 date 2007.08.21.22.32.25; author yamt; state Exp; branches; next 1.128.2.7; 1.128.2.7 date 2007.10.18.22.45.54; author ad; state Exp; branches; next ; 1.128.4.1 date 2007.07.11.20.12.54; author mjf; state Exp; branches; next ; 1.128.6.1 date 2007.03.29.19.28.04; author reinoud; state Exp; branches; next ; 1.125.2.1 date 2007.02.27.16.55.25; author yamt; state Exp; branches; next 1.125.2.2; 1.125.2.2 date 2007.03.12.06.01.11; author rmind; state Exp; branches; next 1.125.2.3; 1.125.2.3 date 2007.04.15.16.04.09; author yamt; state Exp; branches; next ; 1.122.2.1 date 2006.12.09.11.53.42; author bouyer; state Exp; branches; next ; 1.118.2.1 date 2006.10.22.06.07.52; author yamt; state Exp; branches; next 1.118.2.2; 1.118.2.2 date 2006.10.22.08.07.53; author yamt; state Exp; branches; next ; 1.117.2.1 date 2006.11.18.21.39.49; author ad; state Exp; branches; next 1.117.2.2; 1.117.2.2 date 2007.01.12.01.04.25; author ad; state Exp; branches; next ; 1.114.2.1 date 2006.05.19.15.08.14; author yamt; state dead; branches; next 1.114.2.2; 1.114.2.2 date 2006.05.19.15.08.15; author yamt; state Exp; branches; next ; 1.114.4.1 date 2006.07.13.17.50.13; author gdamore; state Exp; branches; next ; 1.112.2.1 date 2006.05.24.15.50.48; author tron; state Exp; branches; next ; 1.111.2.1 date 2006.03.05.12.51.09; author yamt; state Exp; branches; next 1.111.2.2; 1.111.2.2 date 2006.04.01.12.07.57; author yamt; state Exp; branches; next 1.111.2.3; 1.111.2.3 date 2006.05.24.10.59.30; author yamt; state Exp; branches; next 1.111.2.4; 1.111.2.4 date 2006.08.11.15.47.46; author yamt; state Exp; branches; next 1.111.2.5; 1.111.2.5 date 2006.09.03.15.26.08; author yamt; state Exp; branches; next ; 1.111.4.1 date 2006.04.19.03.58.21; author elad; state Exp; branches; next 1.111.4.2; 1.111.4.2 date 2006.05.06.23.32.59; author christos; state Exp; branches; next ; 1.109.2.1 date 2006.09.09.03.00.13; author rpaulo; state Exp; branches; next ; 1.109.4.1 date 2006.04.22.11.40.28; author simonb; state Exp; branches; next 1.109.4.2; 1.109.4.2 date 2006.06.01.22.39.44; author kardel; state Exp; branches; next ; 1.108.2.1 date 2005.12.31.11.21.26; author yamt; state Exp; branches; next 1.108.2.2; 1.108.2.2 date 2006.01.15.10.44.52; author yamt; state Exp; branches; next 1.108.2.3; 1.108.2.3 date 2006.02.01.14.52.48; author yamt; state Exp; branches; next 1.108.2.4; 1.108.2.4 date 2006.02.18.15.39.31; author yamt; state Exp; branches; next ; 1.106.6.1 date 2005.11.17.03.56.00; author yamt; state Exp; branches; next 1.106.6.2; 1.106.6.2 date 2005.11.19.17.37.00; author yamt; state Exp; branches; next ; 1.103.2.1 date 2006.06.21.15.12.39; author yamt; state Exp; branches; next 1.103.2.2; 1.103.2.2 date 2006.12.30.20.51.05; author yamt; state Exp; branches; next 1.103.2.3; 1.103.2.3 date 2007.02.26.09.12.28; author yamt; state Exp; branches; next 1.103.2.4; 1.103.2.4 date 2007.09.03.14.47.05; author yamt; state Exp; branches; next 1.103.2.5; 1.103.2.5 date 2007.11.15.11.45.38; author yamt; state Exp; branches; next 1.103.2.6; 1.103.2.6 date 2007.12.07.17.35.26; author yamt; state Exp; branches; next 1.103.2.7; 1.103.2.7 date 2008.01.21.09.48.20; author yamt; state Exp; branches; next 1.103.2.8; 1.103.2.8 date 2008.02.04.09.25.09; author yamt; state Exp; branches; next 1.103.2.9; 1.103.2.9 date 2008.03.17.09.15.52; author yamt; state Exp; branches; next ; 1.98.2.1 date 2005.04.29.11.29.40; author kent; state Exp; branches; next ; 1.98.4.1 date 2005.01.25.12.55.32; author yamt; state Exp; branches; next 1.98.4.2; 1.98.4.2 date 2005.01.25.12.58.28; author yamt; state Exp; branches; next 1.98.4.3; 1.98.4.3 date 2005.03.26.18.19.20; author yamt; state Exp; branches; next ; 1.98.8.1 date 2005.09.18.20.09.50; author tron; state Exp; branches; next ; 1.82.2.1 date 2003.07.02.15.27.29; author darrenr; state Exp; branches; next 1.82.2.2; 1.82.2.2 date 2004.08.03.10.57.04; author skrll; state Exp; branches; next 1.82.2.3; 1.82.2.3 date 2004.09.03.12.45.55; author skrll; state Exp; branches; next 1.82.2.4; 1.82.2.4 date 2004.09.18.14.57.11; author skrll; state Exp; branches; next 1.82.2.5; 1.82.2.5 date 2004.09.21.13.39.24; author skrll; state Exp; branches; next 1.82.2.6; 1.82.2.6 date 2004.10.31.07.12.40; author skrll; state Exp; branches; next 1.82.2.7; 1.82.2.7 date 2005.01.17.19.33.11; author skrll; state Exp; branches; next 1.82.2.8; 1.82.2.8 date 2005.04.01.14.32.12; author skrll; state Exp; branches; next 1.82.2.9; 1.82.2.9 date 2005.11.10.14.12.39; author skrll; state Exp; branches; next 1.82.2.10; 1.82.2.10 date 2005.12.11.10.29.42; author christos; state Exp; branches; next ; 1.71.2.1 date 2003.06.02.14.30.01; author tron; state Exp; branches; next ; 1.70.8.1 date 2002.05.30.13.52.44; author gehenna; state Exp; branches; next ; 1.66.2.1 date 2001.10.01.12.48.38; author fvdl; state Exp; branches; next ; 1.65.2.1 date 2001.08.25.06.17.20; author thorpej; state Exp; branches; next 1.65.2.2; 1.65.2.2 date 2002.01.10.20.05.32; author thorpej; state Exp; branches; next 1.65.2.3; 1.65.2.3 date 2002.06.23.17.52.16; author jdolecek; state Exp; branches; next 1.65.2.4; 1.65.2.4 date 2002.10.10.18.45.04; author jdolecek; state Exp; branches; next ; 1.56.2.1 date 2001.03.05.22.50.09; author nathanw; state Exp; branches; next 1.56.2.2; 1.56.2.2 date 2001.04.09.01.59.12; author nathanw; state Exp; branches; next 1.56.2.3; 1.56.2.3 date 2001.06.21.20.10.26; author nathanw; state Exp; branches; next 1.56.2.4; 1.56.2.4 date 2001.08.24.00.13.35; author nathanw; state Exp; branches; next 1.56.2.5; 1.56.2.5 date 2001.09.21.22.37.12; author nathanw; state Exp; branches; next 1.56.2.6; 1.56.2.6 date 2002.01.08.00.35.00; author nathanw; state Exp; branches; next 1.56.2.7; 1.56.2.7 date 2002.06.20.03.50.39; author nathanw; state Exp; branches; next 1.56.2.8; 1.56.2.8 date 2002.09.17.21.24.05; author nathanw; state Exp; branches; next 1.56.2.9; 1.56.2.9 date 2002.10.18.02.45.58; author nathanw; state Exp; branches; next 1.56.2.10; 1.56.2.10 date 2002.12.11.06.51.53; author thorpej; state Exp; branches; next 1.56.2.11; 1.56.2.11 date 2002.12.11.15.44.49; author thorpej; state Exp; branches; next ; 1.40.2.1 date 2000.06.22.17.10.43; author minoura; state Exp; branches; next ; 1.34.2.1 date 2000.11.20.18.11.59; author bouyer; state Exp; branches; next 1.34.2.2; 1.34.2.2 date 2000.12.08.09.20.53; author bouyer; state Exp; branches; next 1.34.2.3; 1.34.2.3 date 2001.02.11.19.17.48; author bouyer; state Exp; branches; next 1.34.2.4; 1.34.2.4 date 2001.03.12.13.32.11; author bouyer; state Exp; branches; next 1.34.2.5; 1.34.2.5 date 2001.03.27.15.32.48; author bouyer; state Exp; branches; next ; 1.23.2.1 date 99.04.16.16.27.36; author chs; state Exp; branches 1.23.2.1.2.1; next ; 1.23.2.1.2.1 date 99.06.07.04.25.35; author chs; state Exp; branches; next 1.23.2.1.2.2; 1.23.2.1.2.2 date 99.06.21.01.47.19; author thorpej; state Exp; branches; next 1.23.2.1.2.3; 1.23.2.1.2.3 date 99.07.01.23.55.15; author thorpej; state Exp; branches; next 1.23.2.1.2.4; 1.23.2.1.2.4 date 99.07.04.01.57.35; author chs; state Exp; branches; next 1.23.2.1.2.5; 1.23.2.1.2.5 date 99.07.11.05.44.00; author chs; state Exp; branches; next 1.23.2.1.2.6; 1.23.2.1.2.6 date 99.08.02.23.16.14; author thorpej; state Exp; branches; next 1.23.2.1.2.7; 1.23.2.1.2.7 date 99.08.09.00.05.55; author chs; state Exp; branches; next ; 1.21.2.1 date 98.11.09.06.06.37; author chs; state Exp; branches; next 1.21.2.2; 1.21.2.2 date 99.02.25.04.11.15; author chs; state Exp; branches; next 1.21.2.3; 1.21.2.3 date 99.04.09.04.37.27; author chs; state Exp; branches; next 1.21.2.4; 1.21.2.4 date 99.04.30.04.32.08; author chs; state Exp; branches; next 1.21.2.5; 1.21.2.5 date 99.05.30.15.17.57; author chs; state Exp; branches; next 1.21.2.6; 1.21.2.6 date 99.06.02.05.01.45; author chs; state Exp; branches; next ; 1.16.2.1 date 98.07.30.14.04.10; author eeh; state Exp; branches; next 1.16.2.2; 1.16.2.2 date 98.08.08.03.07.02; author eeh; state Exp; branches; next ; 1.1.1.1 date 98.02.05.06.25.10; author mrg; state Exp; branches; next ; desc @@ 1.233 log @nkmempages should be size_t @ text @/* $NetBSD: uvm_extern.h,v 1.232 2021/05/31 10:57:02 riastradh Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * from: Id: uvm_extern.h,v 1.1.2.21 1998/02/07 01:16:53 chs Exp */ /*- * Copyright (c) 1991, 1992, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @@(#)vm_extern.h 8.5 (Berkeley) 5/3/95 */ #ifndef _UVM_UVM_EXTERN_H_ #define _UVM_UVM_EXTERN_H_ /* * uvm_extern.h: this file defines the external interface to the VM system. * * this should be the only file included by non-VM parts of the kernel * which need access to VM services. if you want to know the interface * to the MI VM layer without knowing the details, this is the file to * learn. * * NOTE: vm system calls are prototyped in syscallargs.h */ #include /* * defines */ /* * the following defines are for uvm_map and functions which call it. */ /* protections bits */ #define UVM_PROT_MASK 0x07 /* protection mask */ #define UVM_PROT_NONE 0x00 /* protection none */ #define UVM_PROT_ALL 0x07 /* everything */ #define UVM_PROT_READ 0x01 /* read */ #define UVM_PROT_WRITE 0x02 /* write */ #define UVM_PROT_EXEC 0x04 /* exec */ /* protection short codes */ #define UVM_PROT_R 0x01 /* read */ #define UVM_PROT_W 0x02 /* write */ #define UVM_PROT_RW 0x03 /* read-write */ #define UVM_PROT_X 0x04 /* exec */ #define UVM_PROT_RX 0x05 /* read-exec */ #define UVM_PROT_WX 0x06 /* write-exec */ #define UVM_PROT_RWX 0x07 /* read-write-exec */ /* 0x08: not used */ /* inherit codes */ #define UVM_INH_MASK 0x30 /* inherit mask */ #define UVM_INH_SHARE 0x00 /* "share" */ #define UVM_INH_COPY 0x10 /* "copy" */ #define UVM_INH_NONE 0x20 /* "none" */ #define UVM_INH_DONATE 0x30 /* "donate" << not used */ /* 0x40, 0x80: not used */ /* bits 0x700: max protection, 0x800: not used */ /* bits 0x7000: advice, 0x8000: not used */ /* advice: matches MADV_* from sys/mman.h and POSIX_FADV_* from sys/fcntl.h */ #define UVM_ADV_NORMAL 0x0 /* 'normal' */ #define UVM_ADV_RANDOM 0x1 /* 'random' */ #define UVM_ADV_SEQUENTIAL 0x2 /* 'sequential' */ #define UVM_ADV_WILLNEED 0x3 /* pages will be needed */ #define UVM_ADV_DONTNEED 0x4 /* pages won't be needed */ #define UVM_ADV_NOREUSE 0x5 /* pages will be used only once */ #define UVM_ADV_MASK 0x7 /* mask */ /* bits 0xffff0000: mapping flags */ #define UVM_FLAG_FIXED 0x00010000 /* find space */ #define UVM_FLAG_OVERLAY 0x00020000 /* establish overlay */ #define UVM_FLAG_NOMERGE 0x00040000 /* don't merge map entries */ #define UVM_FLAG_COPYONW 0x00080000 /* set copy_on_write flag */ #define UVM_FLAG_AMAPPAD 0x00100000 /* for bss: pad amap */ #define UVM_FLAG_TRYLOCK 0x00200000 /* fail if we can not lock map */ #define UVM_FLAG_NOWAIT 0x00400000 /* not allowed to sleep */ #define UVM_FLAG_WAITVA 0x00800000 /* wait for va */ #define UVM_FLAG_VAONLY 0x02000000 /* unmap: no pages are mapped */ #define UVM_FLAG_COLORMATCH 0x04000000 /* match color given in off */ #define UVM_FLAG_UNMAP 0x08000000 /* unmap existing entries */ #define UVM_FLAG_BITS "\177\020\ F\0\3\ :\0PROT=NONE\0\ :\1PROT=R\0\ :\2PROT=W\0\ :\3PROT=RW\0\ :\4PROT=X\0\ :\5PROT=RX\0\ :\6PROT=WX\0\ :\7PROT=RWX\0\ F\4\2\ :\0INH=SHARE\0\ :\1INH=COPY\0\ :\2INH=NONE\0\ :\3INH=DONATE\0\ F\10\3\ :\0MAXPROT=NONE\0\ :\1MAXPROT=R\0\ :\2MAXPROT=W\0\ :\3MAXPROT=RW\0\ :\4MAXPROT=X\0\ :\5MAXPROT=RX\0\ :\6MAXPROT=WX\0\ :\7MAXPROT=RWX\0\ F\14\3\ :\0ADV=NORMAL\0\ :\1ADV=RANDOM\0\ :\2ADV=SEQUENTIAL\0\ :\3ADV=WILLNEED\0\ :\4ADV=DONTNEED\0\ :\5ADV=NOREUSE\0\ b\20FIXED\0\ b\21OVERLAY\0\ b\22NOMERGE\0\ b\23COPYONW\0\ b\24AMAPPAD\0\ b\25TRYLOCK\0\ b\26NOWAIT\0\ b\27WAITVA\0\ b\30VAONLY\0\ b\31COLORMATCH\0\ b\32UNMAP\0\ " /* macros to extract info */ #define UVM_PROTECTION(X) ((X) & UVM_PROT_MASK) #define UVM_INHERIT(X) (((X) & UVM_INH_MASK) >> 4) #define UVM_MAXPROTECTION(X) (((X) >> 8) & UVM_PROT_MASK) #define UVM_ADVICE(X) (((X) >> 12) & UVM_ADV_MASK) #define UVM_MAPFLAG(PROT,MAXPROT,INH,ADVICE,FLAGS) \ (((MAXPROT) << 8)|(PROT)|(INH)|((ADVICE) << 12)|(FLAGS)) /* magic offset value: offset not known(obj) or don't care(!obj) */ #define UVM_UNKNOWN_OFFSET ((voff_t) -1) /* * the following defines are for uvm_km_alloc/free's flags */ #define UVM_KMF_WIRED 0x1 /* allocation type: wired */ #define UVM_KMF_PAGEABLE 0x2 /* allocation type: pageable */ #define UVM_KMF_VAONLY 0x4 /* allocation type: VA only */ #define UVM_KMF_TYPEMASK (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE | UVM_KMF_WIRED) #define UVM_KMF_CANFAIL 0x8 /* caller handles failure */ #define UVM_KMF_ZERO 0x10 /* want zero filled memory */ #define UVM_KMF_EXEC 0x20 /* need executable mapping */ #define UVM_KMF_TRYLOCK UVM_FLAG_TRYLOCK /* try locking only */ #define UVM_KMF_NOWAIT UVM_FLAG_NOWAIT /* not allowed to sleep */ #define UVM_KMF_WAITVA UVM_FLAG_WAITVA /* sleep for va */ #define UVM_KMF_COLORMATCH UVM_FLAG_COLORMATCH /* start at color in align */ /* * the following defines the strategies for uvm_pagealloc_strat() */ #define UVM_PGA_STRAT_NORMAL 0 /* priority (low id to high) walk */ #define UVM_PGA_STRAT_ONLY 1 /* only specified free list */ #define UVM_PGA_STRAT_FALLBACK 2 /* ONLY falls back on NORMAL */ #define UVM_PGA_STRAT_NUMA 3 /* strongly prefer ideal bucket */ /* * flags for uvm_pagealloc_strat() */ #define UVM_PGA_USERESERVE 0x0001 /* ok to use reserve pages */ #define UVM_PGA_ZERO 0x0002 /* returned page must be zero'd */ /* * flags for ubc_uiomove() */ #define UBC_READ 0x001 /* reading from object */ #define UBC_WRITE 0x002 /* writing to object */ #define UBC_FAULTBUSY 0x004 /* nobody else is using these pages, so busy * them at alloc and unbusy at release (e.g., * for writes extending a file) */ #define UBC_ISMAPPED 0x008 /* object may be mapped by a process */ /* * flags for ubc_release() */ #define UBC_UNMAP 0x010 /* unmap pages now -- don't leave the * mappings cached indefinitely */ /* * flags for ubc_uiomove() */ #define UBC_PARTIALOK 0x100 /* return early on error; otherwise, zero all * remaining bytes after error */ /* * flags for uvn_findpages(). */ #define UFP_ALL 0x00 #define UFP_NOWAIT 0x01 #define UFP_NOALLOC 0x02 #define UFP_NOCACHE 0x04 #define UFP_NORDONLY 0x08 #define UFP_DIRTYONLY 0x10 #define UFP_BACKWARD 0x20 #define UFP_NOBUSY 0x40 /* * lockflags that control the locking behavior of various functions. */ #define UVM_LK_ENTER 0x00000001 /* map locked on entry */ #define UVM_LK_EXIT 0x00000002 /* leave map locked on exit */ /* * Default number of pages to allocate on the stack */ #define UBC_MAX_PAGES 8 /* * Value representing inactive emap. */ #define UVM_EMAP_INACTIVE (0) /* * structures */ struct buf; struct core; struct loadavg; struct mount; struct pglist; struct proc; struct uio; struct uvm_object; struct vm_anon; struct vmspace; struct pmap; struct vnode; struct vm_map_entry; struct vm_map; struct vm_page; struct vmtotal; /* * uvm_pctparam: parameter to be shown as percentage to user. */ #define UVM_PCTPARAM_SHIFT 8 #define UVM_PCTPARAM_SCALE (1 << UVM_PCTPARAM_SHIFT) #define UVM_PCTPARAM_APPLY(pct, x) \ (((x) * (pct)->pct_scaled) >> UVM_PCTPARAM_SHIFT) struct uvm_pctparam { int pct_pct; /* percent [0, 100] */ /* should be the first member */ int pct_scaled; int (*pct_check)(struct uvm_pctparam *, int); }; /* * uvmexp: global data structures that are exported to parts of the kernel * other than the vm system. */ struct uvmexp { /* vm_page constants */ int pagesize; /* size of a page (PAGE_SIZE): must be power of 2 */ int pagemask; /* page mask */ int pageshift; /* page shift */ /* vm_page counters */ int npages; /* number of pages we manage */ int free; /* number of free pages */ int paging; /* number of pages in the process of being paged out */ int wired; /* number of wired pages */ /* * Adding anything before this line will break binary compatibility * with top(1) on NetBSD 1.5. */ int ncolors; /* number of page color buckets: must be p-o-2 */ int colormask; /* color bucket mask */ int zeropages; /* number of zero'd pages */ int reserve_pagedaemon; /* number of pages reserved for pagedaemon */ int reserve_kernel; /* number of pages reserved for kernel */ unsigned anonpages; /* number of pages used by anon mappings */ unsigned filepages; /* number of pages used by cached file data */ unsigned execpages; /* number of pages used by cached exec data */ /* pageout params */ int freemin; /* min number of free pages */ int freetarg; /* target number of free pages */ int wiredmax; /* max number of wired pages */ /* swap */ int nswapdev; /* number of configured swap devices in system */ int swpages; /* number of PAGE_SIZE'ed swap pages */ int swpgavail; /* number of swap pages currently available */ int swpginuse; /* number of swap pages in use */ int swpgonly; /* number of swap pages in use, not also in RAM */ int nswget; /* number of times fault calls uvm_swap_get() */ /* stat counters. XXX: should be 64-bit counters */ int faults; /* page fault count */ int traps; /* trap count */ int intrs; /* interrupt count */ int swtch; /* context switch count */ int softs; /* software interrupt count */ int syscalls; /* system calls */ int pageins; /* pagein operation count */ /* pageouts are in pdpageouts below */ int _unused1; int _unused2; int pgswapin; /* pages swapped in */ int pgswapout; /* pages swapped out */ int forks; /* forks */ int forks_ppwait; /* forks where parent waits */ int forks_sharevm; /* forks where vmspace is shared */ int pga_zerohit; /* pagealloc where zero wanted and zero was available */ int pga_zeromiss; /* pagealloc where zero wanted and zero not available */ int zeroaborts; /* number of times page zeroing was aborted */ int colorhit; /* pagealloc where we got optimal color */ int colormiss; /* pagealloc where we didn't */ int cpuhit; /* pagealloc where we allocated locally */ int cpumiss; /* pagealloc where we didn't */ /* fault subcounters. XXX: should be 64-bit counters */ int fltnoram; /* number of times fault was out of ram */ int fltnoanon; /* number of times fault was out of anons */ int fltpgwait; /* number of times fault had to wait on a page */ int fltpgrele; /* number of times fault found a released page */ int fltrelck; /* number of times fault relock called */ int fltrelckok; /* number of times fault relock is a success */ int fltanget; /* number of times fault gets anon page */ int fltanretry; /* number of times fault retrys an anon get */ int fltamcopy; /* number of times fault clears "needs copy" */ int fltnamap; /* number of times fault maps a neighbor anon page */ int fltnomap; /* number of times fault maps a neighbor obj page */ int fltlget; /* number of times fault does a locked pgo_get */ int fltget; /* number of times fault does an unlocked get */ int flt_anon; /* number of times fault anon (case 1a) */ int flt_acow; /* number of times fault anon cow (case 1b) */ int flt_obj; /* number of times fault is on object page (2a) */ int flt_prcopy; /* number of times fault promotes with copy (2b) */ int flt_przero; /* number of times fault promotes with zerofill (2b) */ /* daemon counters. XXX: should be 64-bit counters */ int pdwoke; /* number of times daemon woke up */ int pdrevs; /* number of times daemon rev'd clock hand */ int _unused3; int pdfreed; /* number of pages daemon freed since boot */ int pdscans; /* number of pages daemon scanned since boot */ int pdanscan; /* number of anonymous pages scanned by daemon */ int pdobscan; /* number of object pages scanned by daemon */ int pdreact; /* number of pages daemon reactivated since boot */ int pdbusy; /* number of times daemon found a busy page */ int pdpageouts; /* number of times daemon started a pageout */ int pdpending; /* number of times daemon got a pending pagout */ int pddeact; /* number of pages daemon deactivates */ int pdreanon; /* anon pages reactivated due to thresholds */ int pdrefile; /* file pages reactivated due to thresholds */ int pdreexec; /* executable pages reactivated due to thresholds */ int bootpages; /* number of pages stolen at boot */ }; /* * The following structure is 64-bit alignment safe. New elements * should only be added to the end of this structure so binary * compatibility can be preserved. */ struct uvmexp_sysctl { int64_t pagesize; int64_t pagemask; int64_t pageshift; int64_t npages; int64_t free; int64_t active; int64_t inactive; int64_t paging; int64_t wired; int64_t zeropages; int64_t reserve_pagedaemon; int64_t reserve_kernel; int64_t freemin; int64_t freetarg; int64_t inactarg; /* unused */ int64_t wiredmax; int64_t nswapdev; int64_t swpages; int64_t swpginuse; int64_t swpgonly; int64_t nswget; int64_t unused1; /* unused; was nanon */ int64_t cpuhit; int64_t cpumiss; int64_t faults; int64_t traps; int64_t intrs; int64_t swtch; int64_t softs; int64_t syscalls; int64_t pageins; int64_t swapins; /* unused */ int64_t swapouts; /* unused */ int64_t pgswapin; /* unused */ int64_t pgswapout; int64_t forks; int64_t forks_ppwait; int64_t forks_sharevm; int64_t pga_zerohit; int64_t pga_zeromiss; int64_t zeroaborts; int64_t fltnoram; int64_t fltnoanon; int64_t fltpgwait; int64_t fltpgrele; int64_t fltrelck; int64_t fltrelckok; int64_t fltanget; int64_t fltanretry; int64_t fltamcopy; int64_t fltnamap; int64_t fltnomap; int64_t fltlget; int64_t fltget; int64_t flt_anon; int64_t flt_acow; int64_t flt_obj; int64_t flt_prcopy; int64_t flt_przero; int64_t pdwoke; int64_t pdrevs; int64_t unused4; int64_t pdfreed; int64_t pdscans; int64_t pdanscan; int64_t pdobscan; int64_t pdreact; int64_t pdbusy; int64_t pdpageouts; int64_t pdpending; int64_t pddeact; int64_t anonpages; int64_t filepages; int64_t execpages; int64_t colorhit; int64_t colormiss; int64_t ncolors; int64_t bootpages; int64_t poolpages; int64_t countsyncone; int64_t countsyncall; int64_t anonunknown; int64_t anonclean; int64_t anondirty; int64_t fileunknown; int64_t fileclean; int64_t filedirty; int64_t fltup; int64_t fltnoup; }; #ifdef _KERNEL /* we need this before including uvm_page.h on some platforms */ extern struct uvmexp uvmexp; #endif /* * Finally, bring in standard UVM headers. */ #include #include #include #ifdef _KERNEL #include #endif #include #include #include #if defined(_KERNEL) || defined(_KMEMUSER) #include #include #endif #ifdef _KERNEL /* * Include the uvm_hotplug(9) API unconditionally until * uvm_page_physload() et. al. are obsoleted * * After this, MD code will have to explicitly include it if needed. */ #include #endif /* * helpers for calling ubc_release() */ #ifdef PMAP_CACHE_VIVT #define UBC_VNODE_FLAGS(vp) \ ((((vp)->v_iflag & VI_TEXT) != 0 ? UBC_UNMAP : 0) | \ (((vp)->v_vflag & VV_MAPPED) != 0 ? UBC_ISMAPPED : 0)) #else #define UBC_VNODE_FLAGS(vp) \ (((vp)->v_vflag & VV_MAPPED) != 0 ? UBC_ISMAPPED : 0) #endif #if defined(_KERNEL) || defined(_KMEMUSER) /* * Shareable process virtual address space. * May eventually be merged with vm_map. * Several fields are temporary (text, data stuff). */ struct vmspace { struct vm_map vm_map; /* VM address map */ volatile int vm_refcnt; /* number of references */ void * vm_shm; /* SYS5 shared memory private data XXX */ /* we copy from vm_startcopy to the end of the structure on fork */ #define vm_startcopy vm_rssize segsz_t vm_rssize; /* current resident set size in pages */ segsz_t vm_rssmax; /* max resident size in pages */ segsz_t vm_tsize; /* text size (pages) XXX */ segsz_t vm_dsize; /* data size (pages) XXX */ segsz_t vm_ssize; /* stack size (pages) */ segsz_t vm_issize; /* initial unmapped stack size (pages) */ void * vm_taddr; /* user virtual address of text XXX */ void * vm_daddr; /* user virtual address of data XXX */ void *vm_maxsaddr; /* user VA at max stack growth */ void *vm_minsaddr; /* user VA at top of stack */ size_t vm_aslr_delta_mmap; /* mmap() random delta for ASLR */ }; #define VMSPACE_IS_KERNEL_P(vm) VM_MAP_IS_KERNEL(&(vm)->vm_map) #endif #ifdef _KERNEL /* * used to keep state while iterating over the map for a core dump. */ struct uvm_coredump_state { void *cookie; /* opaque for the caller */ vaddr_t start; /* start of region */ vaddr_t realend; /* real end of region */ vaddr_t end; /* virtual end of region */ vm_prot_t prot; /* protection of region */ int flags; /* flags; see below */ }; #define UVM_COREDUMP_STACK 0x01 /* region is user stack */ /* * the various kernel maps, owned by MD code */ extern struct vm_map *kernel_map; extern struct vm_map *phys_map; /* * uvm_voaddr: * * This structure encapsulates UVM's unique virtual object address * for an individual byte inside a pageable page. Pageable pages can * be owned by either a uvm_object or a vm_anon. * * In each case, the byte offset into the owning object * (uvm_object or vm_anon) is included in the ID, so that * two different offsets into the same page have distinct * IDs. * * Note that the page does not necessarily have to be resident * in order to know the virtual object address. However, it * is required that any pending copy-on-write is resolved. * * When someone wants a virtual object address, an extra reference * is taken on the owner while the caller uses the ID. This * ensures that the identity is stable for the duration of its * use. */ struct uvm_voaddr { uintptr_t object; voff_t offset; }; /* * macros */ #define vm_resident_count(vm) (pmap_resident_count((vm)->vm_map.pmap)) /* vm_machdep.c */ int vmapbuf(struct buf *, vsize_t); void vunmapbuf(struct buf *, vsize_t); void ktext_write(void *, const void *, size_t); /* uvm_aobj.c */ struct uvm_object *uao_create(voff_t, int); void uao_set_pgfl(struct uvm_object *, int); void uao_detach(struct uvm_object *); void uao_reference(struct uvm_object *); /* uvm_bio.c */ void ubc_init(void); void ubchist_init(void); int ubc_uiomove(struct uvm_object *, struct uio *, vsize_t, int, int); void ubc_zerorange(struct uvm_object *, off_t, size_t, int); void ubc_purge(struct uvm_object *); /* uvm_fault.c */ #define uvm_fault(m, a, p) uvm_fault_internal(m, a, p, 0) int uvm_fault_internal(struct vm_map *, vaddr_t, vm_prot_t, int); /* handle a page fault */ /* uvm_glue.c */ #if defined(KGDB) void uvm_chgkprot(void *, size_t, int); #endif void uvm_proc_fork(struct proc *, struct proc *, bool); void uvm_lwp_fork(struct lwp *, struct lwp *, void *, size_t, void (*)(void *), void *); int uvm_coredump_walkmap(struct proc *, int (*)(struct uvm_coredump_state *), void *); int uvm_coredump_count_segs(struct proc *); void uvm_proc_exit(struct proc *); void uvm_lwp_exit(struct lwp *); void uvm_idle(void); void uvm_init_limits(struct proc *); bool uvm_kernacc(void *, size_t, vm_prot_t); __dead void uvm_scheduler(void); vaddr_t uvm_uarea_alloc(void); void uvm_uarea_free(vaddr_t); vaddr_t uvm_uarea_system_alloc(struct cpu_info *); void uvm_uarea_system_free(vaddr_t); vaddr_t uvm_lwp_getuarea(lwp_t *); void uvm_lwp_setuarea(lwp_t *, vaddr_t); int uvm_vslock(struct vmspace *, void *, size_t, vm_prot_t); void uvm_vsunlock(struct vmspace *, void *, size_t); void uvm_cpu_attach(struct cpu_info *); /* uvm_init.c */ void uvm_md_init(void); void uvm_init(void); /* uvm_io.c */ int uvm_io(struct vm_map *, struct uio *, int); /* uvm_km.c */ vaddr_t uvm_km_alloc(struct vm_map *, vsize_t, vsize_t, uvm_flag_t); int uvm_km_protect(struct vm_map *, vaddr_t, vsize_t, vm_prot_t); void uvm_km_free(struct vm_map *, vaddr_t, vsize_t, uvm_flag_t); struct vm_map *uvm_km_suballoc(struct vm_map *, vaddr_t *, vaddr_t *, vsize_t, int, bool, struct vm_map *); int uvm_km_kmem_alloc(vmem_t *, vmem_size_t, vm_flag_t, vmem_addr_t *); void uvm_km_kmem_free(vmem_t *, vmem_addr_t, vmem_size_t); bool uvm_km_va_starved_p(void); /* uvm_map.c */ int uvm_map(struct vm_map *, vaddr_t *, vsize_t, struct uvm_object *, voff_t, vsize_t, uvm_flag_t); int uvm_map_pageable(struct vm_map *, vaddr_t, vaddr_t, bool, int); int uvm_map_pageable_all(struct vm_map *, int, vsize_t); bool uvm_map_checkprot(struct vm_map *, vaddr_t, vaddr_t, vm_prot_t); int uvm_map_protect(struct vm_map *, vaddr_t, vaddr_t, vm_prot_t, bool); int uvm_map_protect_user(struct lwp *, vaddr_t, vaddr_t, vm_prot_t); struct vmspace *uvmspace_alloc(vaddr_t, vaddr_t, bool); void uvmspace_init(struct vmspace *, struct pmap *, vaddr_t, vaddr_t, bool); void uvmspace_exec(struct lwp *, vaddr_t, vaddr_t, bool); void uvmspace_spawn(struct lwp *, vaddr_t, vaddr_t, bool); struct vmspace *uvmspace_fork(struct vmspace *); void uvmspace_addref(struct vmspace *); void uvmspace_free(struct vmspace *); void uvmspace_share(struct proc *, struct proc *); void uvmspace_unshare(struct lwp *); bool uvm_voaddr_acquire(struct vm_map *, vaddr_t, struct uvm_voaddr *); void uvm_voaddr_release(struct uvm_voaddr *); int uvm_voaddr_compare(const struct uvm_voaddr *, const struct uvm_voaddr *); void uvm_whatis(uintptr_t, void (*)(const char *, ...)); /* uvm_meter.c */ int uvm_sysctl(int *, u_int, void *, size_t *, void *, size_t, struct proc *); int uvm_pctparam_check(struct uvm_pctparam *, int); void uvm_pctparam_set(struct uvm_pctparam *, int); int uvm_pctparam_get(struct uvm_pctparam *); void uvm_pctparam_init(struct uvm_pctparam *, int, int (*)(struct uvm_pctparam *, int)); int uvm_pctparam_createsysctlnode(struct uvm_pctparam *, const char *, const char *); void uvm_update_uvmexp(void); /* uvm_mmap.c */ int uvm_mmap_dev(struct proc *, void **, size_t, dev_t, off_t); int uvm_mmap_anon(struct proc *, void **, size_t); vaddr_t uvm_default_mapaddr(struct proc *, vaddr_t, vsize_t, int); /* uvm_mremap.c */ int uvm_mremap(struct vm_map *, vaddr_t, vsize_t, struct vm_map *, vaddr_t *, vsize_t, struct proc *, int); /* uvm_object.c */ void uvm_obj_init(struct uvm_object *, const struct uvm_pagerops *, bool, u_int); void uvm_obj_setlock(struct uvm_object *, krwlock_t *); void uvm_obj_destroy(struct uvm_object *, bool); int uvm_obj_wirepages(struct uvm_object *, off_t, off_t, struct pglist *); void uvm_obj_unwirepages(struct uvm_object *, off_t, off_t); bool uvm_obj_clean_p(struct uvm_object *); bool uvm_obj_nowriteback_p(struct uvm_object *); bool uvm_obj_page_dirty_p(struct vm_page *); void uvm_obj_page_set_dirty(struct vm_page *); void uvm_obj_page_clear_dirty(struct vm_page *); bool uvm_obj_page_writeback_p(struct vm_page *); void uvm_obj_page_set_writeback(struct vm_page *); void uvm_obj_page_clear_writeback(struct vm_page *); /* uvm_page.c */ int uvm_availmem(bool); void uvm_page_numa_load(paddr_t, paddr_t, u_int); struct vm_page *uvm_pagealloc_strat(struct uvm_object *, voff_t, struct vm_anon *, int, int, int); #define uvm_pagealloc(obj, off, anon, flags) \ uvm_pagealloc_strat((obj), (off), (anon), (flags), \ UVM_PGA_STRAT_NORMAL, 0) void uvm_pagereplace(struct vm_page *, struct vm_page *); int uvm_pagerealloc(struct vm_page *, struct uvm_object *, voff_t); void uvm_setpagesize(void); /* uvm_pager.c */ void uvm_aio_aiodone(struct buf *); void uvm_aio_aiodone_pages(struct vm_page **, int, bool, int); /* uvm_pdaemon.c */ void uvm_pageout(void *); struct work; void uvm_aiodone_worker(struct work *, void *); void uvm_pageout_start(int); void uvm_pageout_done(int); void uvm_estimatepageable(int *, int *); /* uvm_pglist.c */ int uvm_pglistalloc(psize_t, paddr_t, paddr_t, paddr_t, paddr_t, struct pglist *, int, int); void uvm_pglistfree(struct pglist *); /* uvm_swap.c */ void uvm_swap_init(void); /* uvm_unix.c */ int uvm_grow(struct proc *, vaddr_t); /* uvm_user.c */ void uvm_deallocate(struct vm_map *, vaddr_t, vsize_t); /* uvm_vnode.c */ struct uvm_page_array; void uvm_vnp_setsize(struct vnode *, voff_t); void uvm_vnp_setwritesize(struct vnode *, voff_t); int uvn_findpages(struct uvm_object *, voff_t, unsigned int *, struct vm_page **, struct uvm_page_array *, unsigned int); bool uvn_text_p(struct uvm_object *); bool uvn_needs_writefault_p(struct uvm_object *); /* kern_malloc.c */ void kmeminit_nkmempages(void); extern size_t nkmempages; #endif /* _KERNEL */ #endif /* _UVM_UVM_EXTERN_H_ */ @ 1.232 log @uvm: Make uvm_extern.h (more) self-contained, needs sys/types.h. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.231 2020/08/14 09:06:15 chs Exp $ */ d843 1 a843 1 extern int nkmempages; @ 1.231 log @centralize calls from UVM to radixtree into a few functions. in those functions, assert that the object lock is held in the correct mode. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.230 2020/06/14 22:25:15 ad Exp $ */ d75 2 @ 1.231.6.1 log @Sync w/ HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.232 2021/05/31 10:57:02 riastradh Exp $ */ a74 2 #include @ 1.231.8.1 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.232 2021/05/31 10:57:02 riastradh Exp $ */ a74 2 #include @ 1.230 log @g/c vm_page_zero_enable @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.229 2020/06/13 19:55:58 ad Exp $ */ d779 8 a836 1 bool uvn_clean_p(struct uvm_object *); @ 1.229 log @uvm_pagerealloc(): resurrect the insertion case. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.228 2020/06/11 19:20:47 ad Exp $ */ a517 2 /* MD code needs this without including */ extern bool vm_page_zero_enable; @ 1.228 log @uvm_availmem(): give it a boolean argument to specify whether a recent cached value will do, or if the very latest total must be fetched. It can be called thousands of times a second and fetching the totals impacts not only the calling LWP but other CPUs doing unrelated activity in the VM system. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.227 2020/05/26 00:50:53 kamil Exp $ */ d792 1 a792 1 void uvm_pagerealloc(struct vm_page *, @ 1.227 log @Catch up with the usage of struct vmspace::vm_refcnt Use the dedicated reference counting routines. Change the type of struct vmspace::vm_refcnt and struct vm_map::ref_count to volatile. Remove the unnecessary vm->vm_map.misc_lock locking in process_domem(). Reviewed by @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.226 2020/05/09 15:13:19 thorpej Exp $ */ d783 1 a783 1 int uvm_availmem(void); @ 1.226 log @Make the uvm_voaddr structure more compact, only occupying 2 pointers worth of space, by encoding the type in the lower bits of the object pointer. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.225 2020/04/27 02:47:26 rin Exp $ */ d569 1 a569 2 int vm_refcnt; /* number of references * * note: protected by vm_map.misc_lock */ @ 1.225 log @Add missing \ to fix build for PMAP_CACHE_VIVT, i.e., ARMv4 and prior. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.224 2020/04/23 21:47:09 ad Exp $ */ d616 1 a616 2 * be owned by either a uvm_object (UVM_VOADDR_TYPE_OBJECT) or a * vm_anon (UVM_VOADDR_TYPE_ANON). d633 1 a633 8 enum { UVM_VOADDR_TYPE_OBJECT = 1, UVM_VOADDR_TYPE_ANON = 2, } type; union { struct uvm_object *uobj; struct vm_anon *anon; }; @ 1.224 log @PR kern/54759 (vm.ubc_direct deadlock when read()/write() into mapping of itself) - Add new flag UBC_ISMAPPED which tells ubc_uiomove() the object is mmap()ed somewhere. Use it to decide whether to do direct-mapped copy, rather than poking around directly in the vnode in ubc_uiomove(), which is ugly and doesn't work for tmpfs. It would be nicer to contain all this in UVM but the filesystem provides the needed locking here (VV_MAPPED) and to reinvent that would suck more. - Rename UBC_UNMAP_FLAG() to UBC_VNODE_FLAGS(). Pass in UBC_ISMAPPED where appropriate. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.223 2020/04/18 03:27:13 thorpej Exp $ */ d554 1 a554 1 ((((vp)->v_iflag & VI_TEXT) != 0 ? UBC_UNMAP : 0) | @ 1.223 log @Add an API to get a reference on the identity of an individual byte of virtual memory, a "virtual object address". This is not a reference to a physical byte of memory, per se, but a reference to a byte residing in a page, owned by a unique UVM object (either a uobj or an anon). Two separate address+addresses space tuples that reference the same byte in an object (such as a location in a shared memory segment) will resolve to equivalent virtual object addresses. Even if the residency status of the page changes, the virtual object address remains unchanged. struct uvm_voaddr -- a structure that encapsulates this address reference. uvm_voaddr_acquire() -- a function to acquire this address reference, given a vm_map and a vaddr_t. uvm_voaddr_release() -- a function to release this address reference. uvm_voaddr_compare() -- a function to compare two such address references. uvm_voaddr_acquire() resolves the COW status of the object address before acquiring. In collaboration with riastradh@@ and chs@@. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.222 2020/03/22 18:32:42 ad Exp $ */ d229 1 d553 3 a555 1 #define UBC_WANT_UNMAP(vp) (((vp)->v_iflag & VI_TEXT) != 0) d557 2 a558 1 #define UBC_WANT_UNMAP(vp) false a559 1 #define UBC_UNMAP_FLAG(vp) (UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0) @ 1.222 log @Process concurrent page faults on individual uvm_objects / vm_amaps in parallel, where the relevant pages are already in-core. Proposed on tech-kern. Temporarily disabled on MP architectures with __HAVE_UNLOCKED_PMAP until adjustments are made to their pmaps. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.221 2020/02/23 15:46:43 ad Exp $ */ d609 34 d747 6 @ 1.222.2.1 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.223 2020/04/18 03:27:13 thorpej Exp $ */ a608 34 * uvm_voaddr: * * This structure encapsulates UVM's unique virtual object address * for an individual byte inside a pageable page. Pageable pages can * be owned by either a uvm_object (UVM_VOADDR_TYPE_OBJECT) or a * vm_anon (UVM_VOADDR_TYPE_ANON). * * In each case, the byte offset into the owning object * (uvm_object or vm_anon) is included in the ID, so that * two different offsets into the same page have distinct * IDs. * * Note that the page does not necessarily have to be resident * in order to know the virtual object address. However, it * is required that any pending copy-on-write is resolved. * * When someone wants a virtual object address, an extra reference * is taken on the owner while the caller uses the ID. This * ensures that the identity is stable for the duration of its * use. */ struct uvm_voaddr { enum { UVM_VOADDR_TYPE_OBJECT = 1, UVM_VOADDR_TYPE_ANON = 2, } type; union { struct uvm_object *uobj; struct vm_anon *anon; }; voff_t offset; }; /* a712 6 bool uvm_voaddr_acquire(struct vm_map *, vaddr_t, struct uvm_voaddr *); void uvm_voaddr_release(struct uvm_voaddr *); int uvm_voaddr_compare(const struct uvm_voaddr *, const struct uvm_voaddr *); @ 1.222.2.2 log @Sync with bouyer-xenpvh-base2 (HEAD) @ text @d1 1 a1 1 /* $NetBSD$ */ a228 1 #define UBC_ISMAPPED 0x008 /* object may be mapped by a process */ d552 1 a552 3 #define UBC_VNODE_FLAGS(vp) \ ((((vp)->v_iflag & VI_TEXT) != 0 ? UBC_UNMAP : 0) | (((vp)->v_vflag & VV_MAPPED) != 0 ? UBC_ISMAPPED : 0)) d554 1 a554 2 #define UBC_VNODE_FLAGS(vp) \ (((vp)->v_vflag & VV_MAPPED) != 0 ? UBC_ISMAPPED : 0) d556 1 @ 1.221 log @UVM locking changes, proposed on tech-kern: - Change the lock on uvm_object, vm_amap and vm_anon to be a RW lock. - Break v_interlock and vmobjlock apart. v_interlock remains a mutex. - Do partial PV list locking in the x86 pmap. Others to follow later. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.220 2020/02/18 20:23:17 chs Exp $ */ d252 1 d510 2 @ 1.220 log @remove the aiodoned thread. I originally added this to provide a thread context for doing page cache iodone work, but since then biodone() has changed to hand off all iodone work to a softint thread, so we no longer need the special-purpose aiodoned thread. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.219 2020/01/15 17:55:45 ad Exp $ */ d739 1 a739 1 void uvm_obj_setlock(struct uvm_object *, kmutex_t *); @ 1.219 log @Merge from yamt-pagecache (after much testing): - Reduce unnecessary page scan in putpages esp. when an object has a ton of pages cached but only a few of them are dirty. - Reduce the number of pmap operations by tracking page dirtiness more precisely in uvm layer. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.218 2019/12/31 22:42:51 ad Exp $ */ a759 1 void uvm_aio_biodone(struct buf *); @ 1.218 log @- Add and use wrapper functions that take and acquire page interlocks, and pairs of page interlocks. Require that the page interlock be held over calls to uvm_pageactivate(), uvm_pagewire() and similar. - Solve the concurrency problem with page replacement state. Rather than updating the global state synchronously, set an intended state on individual pages (active, inactive, enqueued, dequeued) while holding the page interlock. After the interlock is released put the pages on a 128 entry per-CPU queue for their state changes to be made real in batch. This results in in a ~400 fold decrease in contention on my test system. Proposed on tech-kern but modified to use the page interlock rather than atomics to synchronise as it's much easier to maintain that way, and cheaper. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.217 2019/12/31 13:07:14 ad Exp $ */ d503 6 d788 1 d792 2 a793 1 int *, struct vm_page **, int); @ 1.218.2.1 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.219 2020/01/15 17:55:45 ad Exp $ */ a502 6 int64_t anonunknown; int64_t anonclean; int64_t anondirty; int64_t fileunknown; int64_t fileclean; int64_t filedirty; a781 1 struct uvm_page_array; d785 1 a785 2 unsigned int *, struct vm_page **, struct uvm_page_array *, unsigned int); @ 1.218.2.2 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.221 2020/02/23 15:46:43 ad Exp $ */ d739 1 a739 1 void uvm_obj_setlock(struct uvm_object *, krwlock_t *); d760 1 @ 1.217 log @Rename uvm_free() -> uvm_availmem(). @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.216 2019/12/27 12:51:57 ad Exp $ */ d642 1 @ 1.216 log @Redo the page allocator to perform better, especially on multi-core and multi-socket systems. Proposed on tech-kern. While here: - add rudimentary NUMA support - needs more work. - remove now unused "listq" from vm_page. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.215 2019/12/21 12:58:26 ad Exp $ */ d739 1 a739 1 int uvm_free(void); @ 1.215 log @Add uvm_free(): returns number of free pages in system. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.214 2019/12/16 22:47:55 ad Exp $ */ d213 1 d740 1 @ 1.214 log @- Extend the per-CPU counters matt@@ did to include all of the hot counters in UVM, excluding uvmexp.free, which needs special treatment and will be done with a separate commit. Cuts system time for a build by 20-25% on a 48 CPU machine w/DIAGNOSTIC. - Avoid 64-bit integer divide on every fault (for rnd_add_uint32). @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.213 2018/05/28 21:04:35 chs Exp $ */ d738 1 @ 1.213 log @allow tmpfs files to be larger than 4GB. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.212 2018/05/19 11:39:37 jdolecek Exp $ */ d349 6 a354 6 int _unused_faults; /* page fault count */ int _unused_traps; /* trap count */ int _unused_intrs; /* interrupt count */ int _unused_swtch; /* context switch count */ int _unused_softs; /* software interrupt count */ int _unused_syscalls; /* system calls */ d454 1 a454 1 int64_t pgswapin; d500 2 d714 1 @ 1.213.2.1 log @Merge changes from current as of 20200406 @ text @d1 1 a1 1 /* $NetBSD$ */ a212 1 #define UVM_PGA_STRAT_NUMA 3 /* strongly prefer ideal bucket */ a250 1 #define UFP_NOBUSY 0x40 d349 6 a354 6 int faults; /* page fault count */ int traps; /* trap count */ int intrs; /* interrupt count */ int swtch; /* context switch count */ int softs; /* software interrupt count */ int syscalls; /* system calls */ d454 1 a454 1 int64_t pgswapin; /* unused */ a499 10 int64_t countsyncone; int64_t countsyncall; int64_t anonunknown; int64_t anonclean; int64_t anondirty; int64_t fileunknown; int64_t fileclean; int64_t filedirty; int64_t fltup; int64_t fltnoup; a638 1 void uvm_idle(void); a711 1 void uvm_update_uvmexp(void); d728 1 a728 1 void uvm_obj_setlock(struct uvm_object *, krwlock_t *); a734 2 int uvm_availmem(void); void uvm_page_numa_load(paddr_t, paddr_t, u_int); d747 1 a774 1 struct uvm_page_array; d778 1 a778 2 unsigned int *, struct vm_page **, struct uvm_page_array *, unsigned int); @ 1.213.2.2 log @Sync with HEAD @ text @a608 34 * uvm_voaddr: * * This structure encapsulates UVM's unique virtual object address * for an individual byte inside a pageable page. Pageable pages can * be owned by either a uvm_object (UVM_VOADDR_TYPE_OBJECT) or a * vm_anon (UVM_VOADDR_TYPE_ANON). * * In each case, the byte offset into the owning object * (uvm_object or vm_anon) is included in the ID, so that * two different offsets into the same page have distinct * IDs. * * Note that the page does not necessarily have to be resident * in order to know the virtual object address. However, it * is required that any pending copy-on-write is resolved. * * When someone wants a virtual object address, an extra reference * is taken on the owner while the caller uses the ID. This * ensures that the identity is stable for the duration of its * use. */ struct uvm_voaddr { enum { UVM_VOADDR_TYPE_OBJECT = 1, UVM_VOADDR_TYPE_ANON = 2, } type; union { struct uvm_object *uobj; struct vm_anon *anon; }; voff_t offset; }; /* a712 6 bool uvm_voaddr_acquire(struct vm_map *, vaddr_t, struct uvm_voaddr *); void uvm_voaddr_release(struct uvm_voaddr *); int uvm_voaddr_compare(const struct uvm_voaddr *, const struct uvm_voaddr *); @ 1.212 log @Remove emap support. Unfortunately it never got to state where it would be used and usable, due to reliability and limited & complicated MD support. Going forward, we need to concentrate on interface which do not map anything into kernel in first place (such as direct map or KVA-less I/O), rather than making those mappings cheaper to do. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.211 2018/05/08 19:33:57 christos Exp $ */ d606 1 d609 1 a609 1 struct uvm_object *uao_create(vsize_t, int); @ 1.211 log @don't store the rssmax in the lwp rusage, it is a per proc property. Instead utilize an unused field in the vmspace struct to store it. Also conditionalize on platforms that have pmap statistics available. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.210 2018/04/20 19:02:18 jdolecek Exp $ */ a620 26 /* uvm_emap.c */ void uvm_emap_sysinit(void); #ifdef __HAVE_PMAP_EMAP void uvm_emap_switch(lwp_t *); #else #define uvm_emap_switch(l) #endif u_int uvm_emap_gen_return(void); void uvm_emap_update(u_int); vaddr_t uvm_emap_alloc(vsize_t, bool); void uvm_emap_free(vaddr_t, size_t); void uvm_emap_enter(vaddr_t, struct vm_page **, u_int, vm_prot_t); void uvm_emap_remove(vaddr_t, vsize_t); #ifdef __HAVE_PMAP_EMAP void uvm_emap_consume(u_int); u_int uvm_emap_produce(void); #else #define uvm_emap_consume(x) #define uvm_emap_produce() UVM_EMAP_INACTIVE #endif @ 1.210 log @add prot parameter for uvm_emap_enter(), so that it's possible to enter also read/write mappings @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.209 2018/04/20 18:58:10 jdolecek Exp $ */ d560 1 a560 1 segsz_t vm_swrss; /* resident set size before last swap */ @ 1.209 log @make ubc_alloc() and ubc_release() static, they should not be used outside of ubc_uiomove()/ubc_zeropage(); for now mark as noinline to keep them available as breakpoints @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.208 2017/12/15 16:03:29 maya Exp $ */ d635 2 a636 1 void uvm_emap_enter(vaddr_t, struct vm_page **, u_int); @ 1.208 log @Match locking notes with reality. misc_lock is used to protect vm_refcnt. ok chuq @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.207 2017/12/02 08:15:43 mrg Exp $ */ d221 1 a221 1 * flags for ubc_alloc() a615 3 void * ubc_alloc(struct uvm_object *, voff_t, vsize_t *, int, int); void ubc_release(void *, int); @ 1.208.2.1 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.210 2018/04/20 19:02:18 jdolecek Exp $ */ d221 1 a221 1 * flags for ubc_uiomove() d616 3 d638 1 a638 2 void uvm_emap_enter(vaddr_t, struct vm_page **, u_int, vm_prot_t); @ 1.208.2.2 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.212 2018/05/19 11:39:37 jdolecek Exp $ */ d560 1 a560 1 segsz_t vm_rssmax; /* max resident size in pages */ d621 26 @ 1.208.2.3 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.213 2018/05/28 21:04:35 chs Exp $ */ a605 1 void ktext_write(void *, const void *, size_t); d608 1 a608 1 struct uvm_object *uao_create(voff_t, int); @ 1.207 log @add two new members to uvmexp_sysctl{}: bootpages and poolpages. bootpages is set to the pages allocated via uvm_pageboot_alloc(). poolpages is calculated from the list of pools nr_pages members. this brings us closer to having a valid total of pages known by the system, vs actual pages originally managed. XXX: poolpages needs some handling for PR_RECURSIVE pools still. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.206 2017/05/20 07:27:15 chs Exp $ */ d555 1 a555 1 * note: protected by vm_map.ref_lock */ @ 1.206 log @MAP_FIXED means something different for mremap() than it does for mmap(), so we cannot use UVM_FLAG_FIXED to specify both behaviors. keep UVM_FLAG_FIXED with its earlier meaning (prior to my previous change) of whether to use uvm_map_findspace() to locate space for the new mapping or to use the hint address that the caller passed in, and add a new flag UVM_FLAG_UNMAP to indicate that any existing entries in the range should be unmapped as part of creating the new mapping. the new UVM_FLAG_UNMAP flag may only be used if UVM_FLAG_FIXED is also specified. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.205 2017/05/17 22:43:12 christos Exp $ */ d411 2 d498 2 @ 1.205 log @snprintb(3) for UVM_FLAGS. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.204 2017/05/06 21:34:52 joerg Exp $ */ d124 11 a134 10 #define UVM_FLAG_FIXED 0x010000 /* find space */ #define UVM_FLAG_OVERLAY 0x020000 /* establish overlay */ #define UVM_FLAG_NOMERGE 0x040000 /* don't merge map entries */ #define UVM_FLAG_COPYONW 0x080000 /* set copy_on_write flag */ #define UVM_FLAG_AMAPPAD 0x100000 /* for bss: pad amap to reduce allocations */ #define UVM_FLAG_TRYLOCK 0x200000 /* fail if we can not lock map */ #define UVM_FLAG_NOWAIT 0x400000 /* not allowed to sleep */ #define UVM_FLAG_WAITVA 0x800000 /* wait for va */ #define UVM_FLAG_VAONLY 0x2000000 /* unmap: no pages are mapped */ #define UVM_FLAG_COLORMATCH 0x4000000 /* match color given in off */ d176 3 a178 1 b\31COLORMATCH\0" @ 1.204 log @Extend the mmap(2) interface to allow requesting protections for later use with mprotect(2), but without enabling them immediately. Extend the mremap(2) interface to allow duplicating mappings, i.e. create a second range of virtual addresses references the same physical pages. Duplicated mappings can have different effective protections. Adjust PAX mprotect logic to disallow effective protections of W&X, but allow one mapping W and another X protections. This obsoletes using temporary files for purposes like JIT. Adjust PAX logic for mmap(2) and mprotect(2) to fail if W&X is requested and not silently drop the X protection. Improve test cases to ensure correct operation of the changed interfaces. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.203 2017/01/04 23:59:49 christos Exp $ */ d135 42 @ 1.203 log @don't include uvm_physseg.h for kmem grovellers. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.202 2017/01/02 20:22:20 cherry Exp $ */ d665 2 @ 1.203.6.1 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.204 2017/05/06 21:34:52 joerg Exp $ */ a664 2 int uvm_map_protect_user(struct lwp *, vaddr_t, vaddr_t, vm_prot_t); @ 1.203.6.2 log @Resolve conflicts from previous merge (all resulting from $NetBSD keywork expansion) @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.205 2017/05/17 22:43:12 christos Exp $ */ a134 42 #define UVM_FLAG_BITS "\177\020\ F\0\3\ :\0PROT=NONE\0\ :\1PROT=R\0\ :\2PROT=W\0\ :\3PROT=RW\0\ :\4PROT=X\0\ :\5PROT=RX\0\ :\6PROT=WX\0\ :\7PROT=RWX\0\ F\4\2\ :\0INH=SHARE\0\ :\1INH=COPY\0\ :\2INH=NONE\0\ :\3INH=DONATE\0\ F\10\3\ :\0MAXPROT=NONE\0\ :\1MAXPROT=R\0\ :\2MAXPROT=W\0\ :\3MAXPROT=RW\0\ :\4MAXPROT=X\0\ :\5MAXPROT=RX\0\ :\6MAXPROT=WX\0\ :\7MAXPROT=RWX\0\ F\14\3\ :\0ADV=NORMAL\0\ :\1ADV=RANDOM\0\ :\2ADV=SEQUENTIAL\0\ :\3ADV=WILLNEED\0\ :\4ADV=DONTNEED\0\ :\5ADV=NOREUSE\0\ b\20FIXED\0\ b\21OVERLAY\0\ b\22NOMERGE\0\ b\23COPYONW\0\ b\24AMAPPAD\0\ b\25TRYLOCK\0\ b\26NOWAIT\0\ b\27WAITVA\0\ b\30VAONLY\0\ b\31COLORMATCH\0" @ 1.202 log @Remove a redundant #ifdef _KERNEL/#endif pair. ok mrg@@ @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.201 2016/12/24 19:21:29 cherry Exp $ */ d475 1 d477 1 @ 1.201 log @uvm_extern.h is has both a _KERNEL only, and a non _KERNEL only API. Since we unconditionally expose the uvm_physseg.h API via uvm_extern.h right now, and since uvm_physseg.h uses a kernel only datatype, viz psize_t, we restrict exposure of uvm_physseg.h API exposure to kernel only. This is in conformance of its documentation via uvm_hotplug(9) as a kernel internal API. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.200 2016/12/22 13:26:24 cherry Exp $ */ a646 1 #ifdef _KERNEL a650 1 #endif @ 1.200 log @Use uvm_physseg.h:uvm_page_physload() instead of uvm_extern.h For this, include uvm_physseg.h in the build and include tree, make a cosmetic modification to the prototype for uvm_page_physload(). @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.199 2016/12/22 12:55:21 cherry Exp $ */ a474 1 #endif d483 1 @ 1.199 log @Add a new function called uvm_md_init() that can be called at the appropriate time in the boot path by MD code. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.198 2016/07/20 12:38:43 maxv Exp $ */ d478 8 a719 3 /* Actually, uvm_page_physload takes PF#s which need their own type */ void uvm_page_physload(paddr_t, paddr_t, paddr_t, paddr_t, int); @ 1.198 log @Introduce uvm_km_protect. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.197 2016/05/25 17:43:58 christos Exp $ */ d622 1 @ 1.197 log @Introduce security.pax.mprotect.ptrace sysctl which can be used to bypass mprotect settings so that debuggers can write to the text segment of traced processes so that they can insert breakpoints. Turned off by default. Ok: chuq (for now) @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.196 2016/02/05 04:18:55 christos Exp $ */ d630 2 @ 1.197.2.1 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.198 2016/07/20 12:38:43 maxv Exp $ */ a629 2 int uvm_km_protect(struct vm_map *, vaddr_t, vsize_t, vm_prot_t); @ 1.197.2.2 log @Sync with HEAD. (Note that most of these changes are simply $NetBSD$ tag issues.) @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.203 2017/01/04 23:59:49 christos Exp $ */ a476 10 #ifdef _KERNEL /* * Include the uvm_hotplug(9) API unconditionally until * uvm_page_physload() et. al. are obsoleted * * After this, MD code will have to explicitly include it if needed. */ #include #endif a621 1 void uvm_md_init(void); d638 1 d643 1 d711 3 @ 1.196 log @PR/50744: NONAKA Kimihiro: Protect more stuff with _KERNEL && _KMEMUSER to make uvm_extern.h compile standalone again for net-snmp. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.195 2015/11/26 13:15:34 martin Exp $ */ d625 1 a625 1 int uvm_io(struct vm_map *, struct uio *); @ 1.195 log @We never exec(2) with a kernel vmspace, so do not test for that, but instead KASSERT() that we don't. When calculating the load address for the interpreter (e.g. ld.elf_so), we need to take into account wether the exec'd process will run with topdown memory or bottom up. We can not use the current vmspace's flags to test for that, as this happens too early. Luckily the execpack already knows what the new state will be later, so instead of testing the current vmspace, pass the info as additional argument to struct emul e_vm_default_addr. Fix all such functions and adopt all callers. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.194 2015/03/20 15:41:43 riastradh Exp $ */ d472 1 d475 1 d487 1 d513 1 @ 1.194 log @Comments explaining UBC_* flags. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.193 2015/02/06 18:19:22 maxv Exp $ */ d678 2 a679 1 vaddr_t uvm_default_mapaddr(struct proc *, vaddr_t, vsize_t); @ 1.193 log @Kill kmeminit(). @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.192 2014/12/14 23:48:58 chs Exp $ */ d178 5 a182 3 #define UBC_READ 0x001 #define UBC_WRITE 0x002 #define UBC_FAULTBUSY 0x004 d187 2 a188 1 #define UBC_UNMAP 0x010 d191 1 a191 1 * flags for ubc_uiomve() d193 2 a194 1 #define UBC_PARTIALOK 0x100 @ 1.192 log @add a new "fo_mmap" fileops method to allow use of arbitrary uvm_objects for mappings of file objects. move vnode-specific details of mmap()ing a vnode from uvm_mmap() to the new vnode-specific vn_mmap(). add new uvm_mmap_dev() and uvm_mmap_anon() convenience functions for mapping character devices and anonymous memory, and replace all other calls to uvm_mmap() with those. use the new fileop in drm2 so that libdrm can use mmap() to map things like on other platforms (instead of the ioctl that we have used so far). @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.191 2014/07/07 20:14:43 riastradh Exp $ */ a743 1 void kmeminit(void); @ 1.191 log @Initialize ubchist earlier. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.190 2014/05/22 14:01:46 riastradh Exp $ */ d671 3 a673 3 int uvm_mmap(struct vm_map *, vaddr_t *, vsize_t, vm_prot_t, vm_prot_t, int, void *, voff_t, vsize_t); @ 1.191.4.1 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.194 2015/03/20 15:41:43 riastradh Exp $ */ d178 3 a180 5 #define UBC_READ 0x001 /* reading from object */ #define UBC_WRITE 0x002 /* writing to object */ #define UBC_FAULTBUSY 0x004 /* nobody else is using these pages, so busy * them at alloc and unbusy at release (e.g., * for writes extending a file) */ d185 1 a185 2 #define UBC_UNMAP 0x010 /* unmap pages now -- don't leave the * mappings cached indefinitely */ d188 1 a188 1 * flags for ubc_uiomove() d190 1 a190 2 #define UBC_PARTIALOK 0x100 /* return early on error; otherwise, zero all * remaining bytes after error */ d671 3 a673 3 int uvm_mmap_dev(struct proc *, void **, size_t, dev_t, off_t); int uvm_mmap_anon(struct proc *, void **, size_t); d744 1 @ 1.191.4.2 log @Sync with HEAD (as of 26th Dec) @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.191.4.1 2015/04/06 15:18:33 skrll Exp $ */ d678 1 a678 2 vaddr_t uvm_default_mapaddr(struct proc *, vaddr_t, vsize_t, int); @ 1.191.4.3 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.191.4.2 2015/12/27 12:10:19 skrll Exp $ */ a471 1 #if defined(_KERNEL) || defined(_KMEMUSER) a473 1 #endif a484 1 #if defined(_KERNEL) || defined(_KMEMUSER) a509 1 #endif @ 1.191.4.4 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.191.4.3 2016/03/19 11:30:39 skrll Exp $ */ d625 1 a625 1 int uvm_io(struct vm_map *, struct uio *, int); @ 1.191.4.5 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.191.4.4 2016/05/29 08:44:40 skrll Exp $ */ a629 2 int uvm_km_protect(struct vm_map *, vaddr_t, vsize_t, vm_prot_t); @ 1.191.4.6 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.191.4.5 2016/10/05 20:56:12 skrll Exp $ */ a476 10 #ifdef _KERNEL /* * Include the uvm_hotplug(9) API unconditionally until * uvm_page_physload() et. al. are obsoleted * * After this, MD code will have to explicitly include it if needed. */ #include #endif a621 1 void uvm_md_init(void); d638 1 d643 1 d711 3 @ 1.191.4.7 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.191.4.6 2017/02/05 13:41:01 skrll Exp $ */ d124 10 a133 55 #define UVM_FLAG_FIXED 0x00010000 /* find space */ #define UVM_FLAG_OVERLAY 0x00020000 /* establish overlay */ #define UVM_FLAG_NOMERGE 0x00040000 /* don't merge map entries */ #define UVM_FLAG_COPYONW 0x00080000 /* set copy_on_write flag */ #define UVM_FLAG_AMAPPAD 0x00100000 /* for bss: pad amap */ #define UVM_FLAG_TRYLOCK 0x00200000 /* fail if we can not lock map */ #define UVM_FLAG_NOWAIT 0x00400000 /* not allowed to sleep */ #define UVM_FLAG_WAITVA 0x00800000 /* wait for va */ #define UVM_FLAG_VAONLY 0x02000000 /* unmap: no pages are mapped */ #define UVM_FLAG_COLORMATCH 0x04000000 /* match color given in off */ #define UVM_FLAG_UNMAP 0x08000000 /* unmap existing entries */ #define UVM_FLAG_BITS "\177\020\ F\0\3\ :\0PROT=NONE\0\ :\1PROT=R\0\ :\2PROT=W\0\ :\3PROT=RW\0\ :\4PROT=X\0\ :\5PROT=RX\0\ :\6PROT=WX\0\ :\7PROT=RWX\0\ F\4\2\ :\0INH=SHARE\0\ :\1INH=COPY\0\ :\2INH=NONE\0\ :\3INH=DONATE\0\ F\10\3\ :\0MAXPROT=NONE\0\ :\1MAXPROT=R\0\ :\2MAXPROT=W\0\ :\3MAXPROT=RW\0\ :\4MAXPROT=X\0\ :\5MAXPROT=RX\0\ :\6MAXPROT=WX\0\ :\7MAXPROT=RWX\0\ F\14\3\ :\0ADV=NORMAL\0\ :\1ADV=RANDOM\0\ :\2ADV=SEQUENTIAL\0\ :\3ADV=WILLNEED\0\ :\4ADV=DONTNEED\0\ :\5ADV=NOREUSE\0\ b\20FIXED\0\ b\21OVERLAY\0\ b\22NOMERGE\0\ b\23COPYONW\0\ b\24AMAPPAD\0\ b\25TRYLOCK\0\ b\26NOWAIT\0\ b\27WAITVA\0\ b\30VAONLY\0\ b\31COLORMATCH\0\ b\32UNMAP\0\ " a664 2 int uvm_map_protect_user(struct lwp *, vaddr_t, vaddr_t, vm_prot_t); @ 1.191.2.1 log @Pull up following revision(s) (requested by chs in ticket #363): common/lib/libprop/prop_kern.c: revision 1.18 sys/arch/mac68k/dev/grf_compat.c: revision 1.27 sys/arch/x68k/dev/grf.c: revision 1.45 sys/external/bsd/drm/dist/bsd-core/drm_bufs.c: revision 1.12 sys/external/bsd/drm2/drm/drm_drv.c: revision 1.12 sys/external/bsd/drm2/drm/drm_vm.c: revision 1.6 sys/external/bsd/drm2/include/linux/mm.h: revision 1.4 sys/kern/vfs_vnops.c: revision 1.192 via patch sys/rump/librump/rumpkern/vm.c: revision 1.160 sys/sys/file.h: revision 1.78 via patch sys/uvm/uvm_device.c: revision 1.64 sys/uvm/uvm_device.h: revision 1.13 sys/uvm/uvm_extern.h: revision 1.192 sys/uvm/uvm_mmap.c: revision 1.150 via patch add a new "fo_mmap" fileops method to allow use of arbitrary uvm_objects for mappings of file objects. move vnode-specific details of mmap()ing a vnode from uvm_mmap() to the new vnode-specific vn_mmap(). add new uvm_mmap_dev() and uvm_mmap_anon() convenience functions for mapping character devices and anonymous memory, and replace all other calls to uvm_mmap() with those. use the new fileop in drm2 so that libdrm can use mmap() to map things like on other platforms (instead of the ioctl that we have used so far). @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.191 2014/07/07 20:14:43 riastradh Exp $ */ d671 3 a673 3 int uvm_mmap_dev(struct proc *, void **, size_t, dev_t, off_t); int uvm_mmap_anon(struct proc *, void **, size_t); @ 1.191.2.2 log @Pull up following revision(s) (requested by maxv in ticket #617): sys/kern/kern_malloc.c: revision 1.144, 1.145 sys/kern/kern_pmf.c: revision 1.37 sys/rump/librump/rumpkern/rump.c: revision 1.316 sys/uvm/uvm_extern.h: revision 1.193 sys/uvm/uvm_km.c: revision 1.139 Don't include -- Kill kmeminit(). -- Remove this MALLOC_DEFINE (M_PMF unused). @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.191.2.1 2014/12/31 06:44:01 snj Exp $ */ d744 1 @ 1.190 log @Add uao_set_pgfl to limit a uvm_aobj's pages to a specified freelist. Brought up on tech-kern: https://mail-index.netbsd.org/tech-kern/2014/05/20/msg017095.html @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.189 2014/02/21 22:08:07 skrll Exp $ */ d548 1 @ 1.189 log @Remove unnecessary struct simplelock forward declaration. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.188 2014/01/03 21:12:18 dsl Exp $ */ d542 1 @ 1.189.2.1 log @Rebase. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.191 2014/07/07 20:14:43 riastradh Exp $ */ a541 1 void uao_set_pgfl(struct uvm_object *, int); a546 1 void ubchist_init(void); @ 1.188 log @There is no need for uvm_coredump_walkmap() to explicity pass the proc_t pointer to the calller's function. If the code needs the process its address can be placed in the caller's cookie. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.187 2014/01/03 15:15:02 dsl Exp $ */ a234 1 struct simplelock; @ 1.187 log @Minor changes to the process coredump code. - Add some extra comments. - Add some XXX comments because the process state might not be stable, - Add uvm_coredump_count_segs() to simplify the calling code. - uvm code now only returns non-empty sections/segments. - Put the 'iocookie' into the 'cookie' block passed to uvm_coredump_walkmap() instead of passing it through as an additional parameter. amd64 can still generate core dumps that gdb can read. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.186 2014/01/01 18:57:16 dsl Exp $ */ d593 2 a594 3 struct coredump_iostate; int uvm_coredump_walkmap(struct proc *, int (*)(struct proc *, struct uvm_coredump_state *), void *); @ 1.186 log @Change the type of the 'cookie' that holds the state of the core dump file from 'void *' to the actual type 'struct coredump_iostate *'. In most of the code the contents of the structure are still unknown. This just stops the wrong type of pointer being passed to the 'void *' parameter. I hope I've found everything, amd64 GENERIC and i386 GENERIC & ALL compile. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.185 2013/11/14 12:07:11 martin Exp $ */ d594 3 a596 4 int uvm_coredump_walkmap(struct proc *, struct coredump_iostate *, int (*)(struct proc *, struct coredump_iostate *, struct uvm_coredump_state *), void *); @ 1.185 log @As discussed on tech-kern: make TOPDOWN-VM runtime selectable per process (offer MD code or emulations to override it). @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.184 2012/09/01 00:26:37 matt Exp $ */ d593 1 d595 2 a596 2 void *, int (*)(struct proc *, void *, @ 1.184 log @Add a __HAVE_CPU_UAREA_IDLELWP hook so that the MD code can allocate special UAREAs for idle lwp's. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.183 2012/04/08 11:27:45 martin Exp $ */ d646 1 a646 1 struct vmspace *uvmspace_alloc(vaddr_t, vaddr_t); d648 3 a650 3 vaddr_t, vaddr_t); void uvmspace_exec(struct lwp *, vaddr_t, vaddr_t); void uvmspace_spawn(struct lwp *, vaddr_t, vaddr_t); @ 1.184.2.1 log @Rebase to HEAD as of a few days ago. @ text @d1 1 a1 1 /* $NetBSD$ */ d235 1 a542 1 void uao_set_pgfl(struct uvm_object *, int); a547 1 void ubchist_init(void); d594 3 a596 2 int (*)(struct uvm_coredump_state *), void *); int uvm_coredump_count_segs(struct proc *); d646 1 a646 1 struct vmspace *uvmspace_alloc(vaddr_t, vaddr_t, bool); d648 3 a650 3 vaddr_t, vaddr_t, bool); void uvmspace_exec(struct lwp *, vaddr_t, vaddr_t, bool); void uvmspace_spawn(struct lwp *, vaddr_t, vaddr_t, bool); @ 1.184.2.2 log @update from HEAD @ text @d124 10 a133 55 #define UVM_FLAG_FIXED 0x00010000 /* find space */ #define UVM_FLAG_OVERLAY 0x00020000 /* establish overlay */ #define UVM_FLAG_NOMERGE 0x00040000 /* don't merge map entries */ #define UVM_FLAG_COPYONW 0x00080000 /* set copy_on_write flag */ #define UVM_FLAG_AMAPPAD 0x00100000 /* for bss: pad amap */ #define UVM_FLAG_TRYLOCK 0x00200000 /* fail if we can not lock map */ #define UVM_FLAG_NOWAIT 0x00400000 /* not allowed to sleep */ #define UVM_FLAG_WAITVA 0x00800000 /* wait for va */ #define UVM_FLAG_VAONLY 0x02000000 /* unmap: no pages are mapped */ #define UVM_FLAG_COLORMATCH 0x04000000 /* match color given in off */ #define UVM_FLAG_UNMAP 0x08000000 /* unmap existing entries */ #define UVM_FLAG_BITS "\177\020\ F\0\3\ :\0PROT=NONE\0\ :\1PROT=R\0\ :\2PROT=W\0\ :\3PROT=RW\0\ :\4PROT=X\0\ :\5PROT=RX\0\ :\6PROT=WX\0\ :\7PROT=RWX\0\ F\4\2\ :\0INH=SHARE\0\ :\1INH=COPY\0\ :\2INH=NONE\0\ :\3INH=DONATE\0\ F\10\3\ :\0MAXPROT=NONE\0\ :\1MAXPROT=R\0\ :\2MAXPROT=W\0\ :\3MAXPROT=RW\0\ :\4MAXPROT=X\0\ :\5MAXPROT=RX\0\ :\6MAXPROT=WX\0\ :\7MAXPROT=RWX\0\ F\14\3\ :\0ADV=NORMAL\0\ :\1ADV=RANDOM\0\ :\2ADV=SEQUENTIAL\0\ :\3ADV=WILLNEED\0\ :\4ADV=DONTNEED\0\ :\5ADV=NOREUSE\0\ b\20FIXED\0\ b\21OVERLAY\0\ b\22NOMERGE\0\ b\23COPYONW\0\ b\24AMAPPAD\0\ b\25TRYLOCK\0\ b\26NOWAIT\0\ b\27WAITVA\0\ b\30VAONLY\0\ b\31COLORMATCH\0\ b\32UNMAP\0\ " d178 3 a180 5 #define UBC_READ 0x001 /* reading from object */ #define UBC_WRITE 0x002 /* writing to object */ #define UBC_FAULTBUSY 0x004 /* nobody else is using these pages, so busy * them at alloc and unbusy at release (e.g., * for writes extending a file) */ d185 1 a185 2 #define UBC_UNMAP 0x010 /* unmap pages now -- don't leave the * mappings cached indefinitely */ d188 1 a188 1 * flags for ubc_uiomove() d190 1 a190 2 #define UBC_PARTIALOK 0x100 /* return early on error; otherwise, zero all * remaining bytes after error */ a361 2 int bootpages; /* number of pages stolen at boot */ a446 2 int64_t bootpages; int64_t poolpages; a467 1 #if defined(_KERNEL) || defined(_KMEMUSER) a469 11 #endif #ifdef _KERNEL /* * Include the uvm_hotplug(9) API unconditionally until * uvm_page_physload() et. al. are obsoleted * * After this, MD code will have to explicitly include it if needed. */ #include #endif a480 1 #if defined(_KERNEL) || defined(_KMEMUSER) a505 1 #endif a613 1 void uvm_md_init(void); d617 1 a617 1 int uvm_io(struct vm_map *, struct uio *, int); a621 2 int uvm_km_protect(struct vm_map *, vaddr_t, vsize_t, vm_prot_t); d628 1 d633 1 a645 2 int uvm_map_protect_user(struct lwp *, vaddr_t, vaddr_t, vm_prot_t); d671 4 a674 5 int uvm_mmap_dev(struct proc *, void **, size_t, dev_t, off_t); int uvm_mmap_anon(struct proc *, void **, size_t); vaddr_t uvm_default_mapaddr(struct proc *, vaddr_t, vsize_t, int); d700 3 d744 1 @ 1.184.4.1 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.184 2012/09/01 00:26:37 matt Exp $ */ d235 1 d594 3 a596 2 int (*)(struct uvm_coredump_state *), void *); int uvm_coredump_count_segs(struct proc *); d646 1 a646 1 struct vmspace *uvmspace_alloc(vaddr_t, vaddr_t, bool); d648 3 a650 3 vaddr_t, vaddr_t, bool); void uvmspace_exec(struct lwp *, vaddr_t, vaddr_t, bool); void uvmspace_spawn(struct lwp *, vaddr_t, vaddr_t, bool); @ 1.183 log @Rework posix_spawn locking and memory management: - always provide a vmspace for the new proc, initially borrowing from proc0 (this part fixes PR 46286) - increase parallelism between parent and child if arguments allow this, avoiding a potential deadlock on exec_lock - add a new flag for userland to request old (lockstepped) behaviour for better error reporting - adapt test cases to the previous two and add a new variant to test the diagnostics flag - fix a few memory (and lock) leaks - provide netbsd32 compat @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.182 2012/03/18 13:31:14 uebayasi Exp $ */ d604 1 a604 1 vaddr_t uvm_uarea_system_alloc(void); @ 1.182 log @Move base type definitions from uvm_extern.h to uvm_param.h so that other sources can easily include part of UVM headers without the whole uvm_extern.h (e.g. sys/vnode.h wants only uvm_object.h). @ text @d1 1 a1 1 /* $NetBSD$ */ d650 1 @ 1.181 log @- bringing kmeminit_nkmempages back and revert pmaps that called this early - use nkmempages to scale the kmem_arena - reducing diff to pre kmem/vmem change (NKMEMPAGES_MAX_DEFAULT will need adjusting on some archs) @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.180 2012/01/27 19:48:41 para Exp $ */ a75 10 * typedefs, necessary for standard UVM headers. */ typedef unsigned int uvm_flag_t; typedef int vm_inherit_t; /* XXX: inheritance codes */ typedef off_t voff_t; /* XXX: offset within a uvm_object */ typedef voff_t pgoff_t; /* XXX: number of pages within a uvm object */ /* @ 1.181.2.1 log @Pull up following revision(s) (requested by martin in ticket #175): sys/kern/kern_exit.c: revision 1.238 tests/lib/libc/gen/posix_spawn/t_fileactions.c: revision 1.4 tests/lib/libc/gen/posix_spawn/t_fileactions.c: revision 1.5 sys/uvm/uvm_extern.h: revision 1.183 lib/libc/gen/posix_spawn_fileactions.c: revision 1.2 sys/kern/kern_exec.c: revision 1.348 sys/kern/kern_exec.c: revision 1.349 sys/compat/netbsd32/syscalls.master: revision 1.95 sys/uvm/uvm_glue.c: revision 1.159 sys/uvm/uvm_map.c: revision 1.317 sys/compat/netbsd32/netbsd32.h: revision 1.95 sys/kern/exec_elf.c: revision 1.38 sys/sys/spawn.h: revision 1.2 sys/sys/exec.h: revision 1.135 sys/compat/netbsd32/netbsd32_execve.c: revision 1.34 Rework posix_spawn locking and memory management: - always provide a vmspace for the new proc, initially borrowing from proc0 (this part fixes PR 46286) - increase parallelism between parent and child if arguments allow this, avoiding a potential deadlock on exec_lock - add a new flag for userland to request old (lockstepped) behaviour for better error reporting - adapt test cases to the previous two and add a new variant to test the diagnostics flag - fix a few memory (and lock) leaks - provide netbsd32 compat Fix asynchronous posix_spawn child exit status (and test for it). @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.181 2012/02/02 18:59:45 para Exp $ */ a659 1 void uvmspace_spawn(struct lwp *, vaddr_t, vaddr_t); @ 1.181.2.1.2.1 log @Pull from HEAD: Add a __HAVE_CPU_UAREA_IDLELWP hook so that the MD code can allocate special UAREAs for idle lwp's. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.181.2.1 2012/04/12 17:05:37 riz Exp $ */ d614 1 a614 1 vaddr_t uvm_uarea_system_alloc(struct cpu_info *); @ 1.180 log @extending vmem(9) to be able to allocated resources for it's own needs. simplifying uvm_map handling (no special kernel entries anymore no relocking) make malloc(9) a thin wrapper around kmem(9) (with private interface for interrupt safety reasons) releng@@ acknowledged @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.179 2012/01/05 15:19:53 reinoud Exp $ */ d752 1 d754 1 a754 1 void kmeminit_nkmempages(void); @ 1.179 log @Revert MAP_NOSYSCALLS patch. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.178 2011/12/22 13:12:50 reinoud Exp $ */ d141 1 a141 2 #define UVM_FLAG_QUANTUM 0x800000 /* entry can never be split later */ #define UVM_FLAG_WAITVA 0x1000000 /* wait for va */ d473 3 a534 13 * Structure containig uvm reclaim hooks, uvm_reclaim_list is guarded by * uvm_reclaim_lock. */ struct uvm_reclaim_hook { void (*uvm_reclaim_hook)(void); SLIST_ENTRY(uvm_reclaim_hook) uvm_reclaim_next; }; void uvm_reclaim_init(void); void uvm_reclaim_hook_add(struct uvm_reclaim_hook *); void uvm_reclaim_hook_del(struct uvm_reclaim_hook *); /* a537 1 extern struct vm_map *kmem_map; a545 3 #include MALLOC_DECLARE(M_VMMAP); MALLOC_DECLARE(M_VMPMAP); d637 7 a643 7 struct vm_map_kernel *); vaddr_t uvm_km_alloc_poolpage(struct vm_map *, bool); void uvm_km_free_poolpage(struct vm_map *, vaddr_t); vaddr_t uvm_km_alloc_poolpage_cache(struct vm_map *, bool); void uvm_km_free_poolpage_cache(struct vm_map *, vaddr_t); void uvm_km_vacache_init(struct vm_map *, const char *, size_t); d752 1 a753 2 void kmeminit(void); extern int nkmempages; @ 1.178 log @Redo uvm_map_setattr() to never fail and remove the possible panic. The possibility of failure was a C&P error. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.177 2011/12/20 15:39:35 reinoud Exp $ */ a682 5 void uvm_map_setattr(struct vm_map *, vaddr_t, vaddr_t, uint32_t); bool uvm_map_checkattr(struct vm_map *, vaddr_t, vaddr_t, uint32_t); @ 1.177 log @Add a MAP_NOSYSCALLS flag to mmap. This flag prohibits executing of system calls from the mapped region. This can be used for emulation perposed or for extra security in the case of generated code. Its implemented by adding mapping-attributes to each uvm_map_entry. These can then be queried when needed. Currently the MAP_NOSYSCALLS is only implemented for x86 but other architectures are easy to adapt; see the sys/arch/x86/x86/syscall.c patch. Port maintainers are encouraged to add them for their processor ports too. When this feature is not yet implemented for an architecture the MAP_NOSYSCALLS is simply ignored with virtually no cpu cost.. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.176 2011/09/01 06:40:28 matt Exp $ */ d683 1 a683 1 bool uvm_map_setattr(struct vm_map *, vaddr_t, @ 1.176 log @Forward some UVM from matt-nb5-mips64. Add UVM_KMF_COLORMATCH flag. When uvm_map gets passed UVM_FLAG_COLORMATCH, the align argument contains the color of the starting address to be allocated (0..colormask). When uvm_km_alloc is passed UVM_KMF_COLORMATCH (which can only be used with UVM_KMF_VAONLY), the align argument contain the color of the starting address to be allocated. Change uvm_pagermapin to use this. When mapping user pages in the kernel, if colormatch is used with the color of the starting user page then the kernel mapping will be congruent with the existing user mappings. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.175 2011/08/27 09:11:53 christos Exp $ */ d683 5 @ 1.176.6.1 log @merge to -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.181 2012/02/02 18:59:45 para Exp $ */ d141 2 a142 1 #define UVM_FLAG_WAITVA 0x800000 /* wait for va */ a473 3 #ifdef _KERNEL #include #endif d533 13 d549 1 d558 3 d652 7 a658 7 struct vm_map *); #ifdef _KERNEL int uvm_km_kmem_alloc(vmem_t *, vmem_size_t, vm_flag_t, vmem_addr_t *); void uvm_km_kmem_free(vmem_t *, vmem_addr_t, vmem_size_t); bool uvm_km_va_starved_p(void); #endif @ 1.176.6.2 log @sync to latest -current. @ text @d1 1 a1 1 /* $NetBSD$ */ d76 10 @ 1.176.6.3 log @sync to latest -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.176.6.2 2012/04/05 21:33:52 mrg Exp $ */ a649 1 void uvmspace_spawn(struct lwp *, vaddr_t, vaddr_t); @ 1.176.2.1 log @- track the number of clean/dirty/unknown pages in the system. - g/c PG_MARKER @ text @d1 1 a1 1 /* $NetBSD$ */ a458 3 int64_t mightdirtypages; int64_t cleanpages; int64_t dirtypages; @ 1.176.2.2 log @redo the page clean/dirty/unknown accounting separately for file and anonymous pages @ text @a461 3 int64_t mightdirtyanonpages; int64_t cleananonpages; int64_t dirtyanonpages; @ 1.176.2.3 log @might dirty -> possibly dirty suggested by wiz@@ @ text @d459 1 a459 1 int64_t possiblydirtypages; d462 1 a462 1 int64_t possiblydirtyanonpages; @ 1.176.2.4 log @- fix page loaning XXX make O->A loaning further - add some statistics @ text @a458 1 a464 13 int64_t loan_obj; /* O->K loan */ int64_t unloan_obj; /* O->K unloan */ int64_t loanbreak_obj; /* O->K loan resolved on write */ int64_t loanfree_obj; /* O->K loan resolved on free */ int64_t loan_anon; /* A->K loan */ int64_t unloan_anon; /* A->K unloan */ int64_t loanbreak_anon; /* A->K loan resolved on write */ int64_t loanfree_anon; /* A->K loan resolved on free */ int64_t loan_zero; /* O->K loan (zero) */ int64_t unloan_zero; /* O->K unloan (zero) */ @ 1.176.2.5 log @don't inline uvn_findpages in genfs_io. @ text @a212 2 #define UFP_ONLYPAGER1 0x40 #define UFP_NOPAGER1 0x80 a777 1 struct uvm_page_array; d781 1 a781 2 unsigned int *, struct vm_page **, struct uvm_page_array *, unsigned int); @ 1.176.2.6 log @- use O->A loan to serve read(2). based on a patch from Chuck Silvers - associated O->A loan fixes. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.176.2.5 2011/12/20 13:46:17 yamt Exp $ */ d471 2 a472 2 int64_t loanbreak_obj; /* O->K loan resolved on write to O */ int64_t loanfree_obj; /* O->K loan resolved on free of O */ d476 2 a477 5 int64_t loanbreak_anon; /* A->K loan resolved on write to A */ int64_t loanfree_anon; /* A->K loan resolved on free of A */ int64_t loan_oa; /* O->A->K loan */ int64_t unloan_oa; /* O->A->K unloan */ a480 16 int64_t loanbreak_orphaned; /* O->A->K loan turned into A->K loan due to write to O */ int64_t loanfree_orphaned; /* O->A->K loan turned into A->K loan due to free of O */ int64_t loanbreak_orphaned_anon; /* O->A->K loan turned into O->K loan due to write to A */ int64_t loanfree_orphaned_anon; /* O->A->K loan turned into O->K loan due to free of A */ int64_t loanbreak_oa_obj; /* O->A loan resolved on write to O */ int64_t loanfree_oa_obj; /* O->A loan resolved on free of O */ int64_t loanbreak_oa_anon; /* O->A loan resolved on write to A */ int64_t loanfree_oa_anon; /* O->A loan resolved on free of A */ int64_t loan_resolve_orphan; /* O->A loaned page taken over by anon */ int64_t loan_obj_read; /* O->A loan for read(2) */ @ 1.176.2.7 log @create a sysctl knob to turn on/off loaned read. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.176.2.6 2011/12/26 16:03:10 yamt Exp $ */ a506 2 extern bool vm_loan_read; @ 1.176.2.8 log @turn vm.loanread sysctl to a threshold. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.176.2.7 2012/01/11 00:08:40 yamt Exp $ */ d508 1 a508 1 extern int vm_loan_read_thresh; @ 1.176.2.9 log @sync with head @ text @d1 1 a1 1 /* $NetBSD$ */ d76 10 d141 2 a142 1 #define UVM_FLAG_WAITVA 0x800000 /* wait for va */ a516 3 #ifdef _KERNEL #include #endif d576 13 d592 1 d601 3 d695 7 a701 7 struct vm_map *); #ifdef _KERNEL int uvm_km_kmem_alloc(vmem_t *, vmem_size_t, vm_flag_t, vmem_addr_t *); void uvm_km_kmem_free(vmem_t *, vmem_addr_t, vmem_size_t); bool uvm_km_va_starved_p(void); #endif a717 1 void uvmspace_spawn(struct lwp *, vaddr_t, vaddr_t); @ 1.176.2.10 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.176.2.9 2012/04/17 00:08:58 yamt Exp $ */ d647 1 a647 1 vaddr_t uvm_uarea_system_alloc(struct cpu_info *); @ 1.176.2.11 log @sync with head. for a reference, the tree before this commit was tagged as yamt-pagecache-tag8. this commit was splitted into small chunks to avoid a limitation of cvs. ("Protocol error: too many arguments") @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.176.2.10 2012/10/30 17:23:01 yamt Exp $ */ d237 1 d637 3 a639 2 int (*)(struct uvm_coredump_state *), void *); int uvm_coredump_count_segs(struct proc *); d689 1 a689 1 struct vmspace *uvmspace_alloc(vaddr_t, vaddr_t, bool); d691 3 a693 3 vaddr_t, vaddr_t, bool); void uvmspace_exec(struct lwp *, vaddr_t, vaddr_t, bool); void uvmspace_spawn(struct lwp *, vaddr_t, vaddr_t, bool); @ 1.175 log @Add an optional pglist argument to uvm_obj_wirepages, to be filled with the list of pages that were wired. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.174 2011/06/16 09:21:03 hannken Exp $ */ d171 1 @ 1.174 log @Rename uvm_vnp_zerorange(struct vnode *, off_t, size_t) to ubc_zerorange(struct uvm_object *, off_t, size_t, int) changing the first argument to an uvm_object and adding a flags argument. Modify tmpfs_reg_resize() to zero the backing store (aobj) instead of the vnode. Ubc_purge() no longer panics when unmounting tmpfs. Keep uvm_vnp_zerorange() until the next kernel version bump. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.173 2011/06/12 03:36:02 rmind Exp $ */ d709 2 a710 1 int uvm_obj_wirepages(struct uvm_object *, off_t, off_t); @ 1.173 log @Welcome to 5.99.53! Merge rmind-uvmplock branch: - Reorganize locking in UVM and provide extra serialisation for pmap(9). New lock order: [vmpage-owner-lock] -> pmap-lock. - Simplify locking in some pmap(9) modules by removing P->V locking. - Use lock object on vmobjlock (and thus vnode_t::v_interlock) to share the locks amongst UVM objects where necessary (tmpfs, layerfs, unionfs). - Rewrite and optimise x86 TLB shootdown code, make it simpler and cleaner. Add TLBSTATS option for x86 to collect statistics about TLB shootdowns. - Unify /dev/mem et al in MI code and provide required locking (removes kernel-lock on some ports). Also, avoid cache-aliasing issues. Thanks to Andrew Doran and Joerg Sonnenberger, as their initial patches formed the core changes of this branch. @ text @d1 1 a1 1 /* $NetBSD$ */ d577 1 a759 1 void uvm_vnp_zerorange(struct vnode *, off_t, size_t); @ 1.172 log @Replace "malloc" in comments, remove unnecessary header inclusions. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.171 2011/02/17 19:27:13 matt Exp $ */ d577 1 d623 1 a623 1 bool uvm_kernacc(void *, size_t, int); d704 6 a709 4 int uobj_wirepages(struct uvm_object *uobj, off_t start, off_t end); void uobj_unwirepages(struct uvm_object *uobj, off_t start, off_t end); @ 1.172.2.1 log @Catchup with rmind-uvmplock merge. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.174 2011/06/16 09:21:03 hannken Exp $ */ a576 2 void ubc_zerorange(struct uvm_object *, off_t, size_t, int); void ubc_purge(struct uvm_object *); d622 1 a622 1 bool uvm_kernacc(void *, size_t, vm_prot_t); d703 4 a706 6 void uvm_obj_init(struct uvm_object *, const struct uvm_pagerops *, bool, u_int); void uvm_obj_setlock(struct uvm_object *, kmutex_t *); void uvm_obj_destroy(struct uvm_object *, bool); int uvm_obj_wirepages(struct uvm_object *, off_t, off_t); void uvm_obj_unwirepages(struct uvm_object *, off_t, off_t); d756 1 @ 1.171 log @Add support for cpu-specific uarea allocation routines. Allows different allocation for user and system lwps. MIPS will use this to map uareas of system lwp used direct-mapped addresses (to reduce the overhead of switching to kernel threads). ibm4xx could use to map uareas via direct mapped addresses and avoid the problem of having the kernel stack not in the TLB. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.170 2011/02/10 14:46:44 pooka Exp $ */ d138 1 a138 1 #define UVM_FLAG_AMAPPAD 0x100000 /* for bss: pad amap to reduce malloc() */ @ 1.170 log @Make vmapbuf() return success/error and make physio deal with a failure. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.169 2011/02/02 15:13:34 chuck Exp $ */ d626 2 @ 1.169 log @udpate license clauses on my code to match the new-style BSD licenses. based on diff that rmind@@ sent me. no functional change with this commit. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.168 2011/01/04 08:26:33 matt Exp $ */ d562 1 a562 1 void vmapbuf(struct buf *, vsize_t); @ 1.168 log @Add better color matching selecting free pages. KM pages will now allocated so that VA and PA have the same color. On a page fault, choose a physical page that has the same color as the virtual address. When allocating kernel memory pages, allow the MD to specify a preferred VM_FREELIST from which to choose pages. For machines with large amounts of memory (> 4GB), all kernel memory to come from <4GB to reduce the amount of bounce buffering needed with 32bit DMA devices. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.167 2010/12/20 00:25:47 matt Exp $ */ a3 1 * a14 6 * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Charles D. Cranor and * Washington University. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. @ 1.168.2.1 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.172 2011/04/23 18:14:12 rmind Exp $ */ d4 1 d16 6 d145 1 a145 1 #define UVM_FLAG_AMAPPAD 0x100000 /* for bss: pad amap to reduce allocations */ d569 1 a569 1 int vmapbuf(struct buf *, vsize_t); a632 2 vaddr_t uvm_uarea_system_alloc(void); void uvm_uarea_system_free(vaddr_t); @ 1.168.4.1 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.169 2011/02/02 15:13:34 chuck Exp $ */ d4 1 d16 6 @ 1.168.4.2 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.170 2011/02/10 14:46:44 pooka Exp $ */ d562 1 a562 1 int vmapbuf(struct buf *, vsize_t); @ 1.168.4.3 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD$ */ a625 2 vaddr_t uvm_uarea_system_alloc(void); void uvm_uarea_system_free(vaddr_t); @ 1.167 log @Move counting of faults, traps, intrs, soft[intr]s, syscalls, and nswtch from uvmexp to per-cpu cpu_data and move them to 64bits. Remove unneeded includes of and/or . @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.166 2010/11/13 05:52:55 uebayasi Exp $ */ d151 1 d470 2 @ 1.166 log @Hide uvm/uvm_page.h again to ensure its internal structures are MD. GENERIC or at least one kernel compile tested for: acorn26, acorn32, algor, all, alpha, amd64, amiga, amigappc, arc, bebox, bighill, cats, cobalt, dreamcast, ews4800mips, hp300, hp700, hpcarm, hpcmips, hpcsh, i386, ibmnws, integrator, ixm1200, iyonix, landisk, luna68k, mac68k, macppc, mipsco, mmeye, mvme68k, mvmeppc, netwinder, news68k, newsmips, next68k, obs266a, ofppc, pmax, pmppc, prep, rs6000, sandpoint, sbmips, shark, sidebeach, sparc, sparc64, sun2, sun3, usermode, vax, x68k, zaurus @ text @d1 1 a1 1 /* $NetBSD$ */ d317 6 a322 6 int faults; /* page fault count */ int traps; /* trap count */ int intrs; /* interrupt count */ int swtch; /* context switch count */ int softs; /* software interrupt count */ int syscalls; /* system calls */ @ 1.165 log @Put back uvm_page.h for now. Sorry for mess. @ text @a479 1 #include @ 1.164 log @Abstraction fix; don't pull in physical segment/page definitions in UVM external API, uvm_extern.h. Because most users care only virtual memory. Device drivers use bus_dma(9) to manage physical memory. Device drivers pull in bus_dma(9) API, bus_dma.h. bus_dma(9) implementations pull in UVM internal API, uvm.h. Tested By: Compiling i386 ALL kernel @ text @d480 1 @ 1.163 log @- Merge sched_pstats() and uvm_meter()/uvm_loadav(). Avoids double loop through all LWPs and duplicate locking overhead. - Move sched_pstats() from soft-interrupt context to process 0 main loop. Avoids blocking effect on real-time threads. Mostly fixes PR/38792. Note: it might be worth to move the loop above PRI_PGDAEMON. Also, sched_pstats() might be cleaned-up slightly. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.162 2010/02/08 19:02:33 joerg Exp $ */ a478 1 #include @ 1.162 log @Remove separate mb_map. The nmbclusters is computed at boot time based on the amount of physical memory and limited by NMBCLUSTERS if present. Architectures without direct mapping also limit it based on the kmem_map size, which is used as backing store. On i386 and ARM, the maximum KVA used for mbuf clusters is limited to 64MB by default. The old default limits and limits based on GATEWAY have been removed. key_registered_sb_max is hard-wired to a value derived from 2048 clusters. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.161 2009/11/21 17:45:02 rmind Exp $ */ a683 1 void uvm_meter(void); @ 1.162.2.1 log @Change struct uvm_object::vmobjlock to be dynamically allocated with mutex_obj_alloc(). It allows us to share the locks among UVM objects. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.162 2010/02/08 19:02:33 joerg Exp $ */ a706 3 void uvm_obj_init(struct uvm_object *, const struct uvm_pagerops *, kmutex_t *, u_int); void uvm_obj_destroy(struct uvm_object *, kmutex_t *); @ 1.162.2.2 log @Unify /dev/{mem,kmem,zero,null} implementations in MI code. Based on patch from Joerg Sonnenberger, proposed on tech-kern@@, in February 2008. Work and depression still in progress. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.162.2.1 2010/03/16 15:38:17 rmind Exp $ */ d627 1 a627 1 bool uvm_kernacc(void *, vm_prot_t); @ 1.162.2.3 log @Use consistent naming - uvm_obj_*(). @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.162.2.2 2010/03/18 04:36:54 rmind Exp $ */ d627 1 a627 1 bool uvm_kernacc(void *, size_t, vm_prot_t); d710 4 a713 2 int uvm_obj_wirepages(struct uvm_object *, off_t, off_t); void uvm_obj_unwirepages(struct uvm_object *, off_t, off_t); @ 1.162.2.4 log @Add ubc_purge() and purge/deassociate any related UBC entries during object (usually, vnode) destruction. Since locking (and thus object) is required to enter/remove mappings - object is not allowed anymore to disappear with any UBC entries left. From original patch by ad@@ with some modifications. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.162.2.3 2010/04/23 21:18:00 rmind Exp $ */ a581 1 void ubc_purge(struct uvm_object *); @ 1.162.2.5 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.162.2.4 2010/04/26 02:20:59 rmind Exp $ */ d685 1 @ 1.162.2.6 log @sync with head @ text @d1 1 a1 1 /* $NetBSD$ */ d4 1 d16 6 a150 1 #define UVM_FLAG_COLORMATCH 0x4000000 /* match color given in off */ d317 6 a322 6 int _unused_faults; /* page fault count */ int _unused_traps; /* trap count */ int _unused_intrs; /* interrupt count */ int _unused_swtch; /* context switch count */ int _unused_softs; /* software interrupt count */ int _unused_syscalls; /* system calls */ a468 2 /* MD code needs this without including */ extern bool vm_page_zero_enable; d479 1 d567 1 a567 1 int vmapbuf(struct buf *, vsize_t); a631 2 vaddr_t uvm_uarea_system_alloc(void); void uvm_uarea_system_free(vaddr_t); @ 1.162.2.7 log @Implement sharing of vnode_t::v_interlock amongst vnodes: - Lock is shared amongst UVM objects using uvm_obj_setlock() or getnewvnode(). - Adjust vnode cache to handle unsharing, add VI_LOCKSHARE flag for that. - Use sharing in tmpfs and layerfs for underlying object. - Simplify locking in ubc_fault(). - Sprinkle some asserts. Discussed with ad@@. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.162.2.6 2011/03/05 20:56:35 rmind Exp $ */ d705 2 a706 3 const struct uvm_pagerops *, bool, u_int); void uvm_obj_setlock(struct uvm_object *, kmutex_t *); void uvm_obj_destroy(struct uvm_object *, bool); @ 1.162.2.8 log @sync with head @ text @d1 1 a1 1 /* $NetBSD$ */ d138 1 a138 1 #define UVM_FLAG_AMAPPAD 0x100000 /* for bss: pad amap to reduce allocations */ @ 1.161 log @Add uvm_lwp_getuarea() and uvm_lwp_setuarea(). OK matt@@. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.160 2009/10/21 21:12:07 rmind Exp $ */ a553 1 extern struct vm_map *mb_map; @ 1.161.2.1 log @Introduce uvm_page_physload_device(). This registers a physical address range of a device, similar to uvm_page_physload() for memories. For now, this is supposed to be called by MD code. We have to consider the design when we'll manage mmap'able character devices. Expose paddr_t -> struct vm_page * conversion function for device pages, uvm_phys_to_vm_page_device(). This will be called by XIP vnode pager. Because it knows if a given vnode is a device page (and its physical address base) or not. Don't look up device segments, but directly make a cookie. @ text @d1 1 a1 1 /* $NetBSD$ */ d724 2 a725 4 void uvm_page_physload(paddr_t, paddr_t, paddr_t, paddr_t, int); void uvm_page_physload_device(paddr_t, paddr_t, paddr_t, paddr_t, int); @ 1.161.2.2 log @Forgotten to check this in; now uvm_page_physload() and uvm_page_physload_device() returns struct vm_physseg * (which is not used yet). @ text @d724 1 a724 1 void *uvm_page_physload(paddr_t, paddr_t, d726 1 a726 1 void *uvm_page_physload_device(paddr_t, paddr_t, @ 1.161.2.3 log @Don't expose uvm_page.h internal for usual uvm(9) users. @ text @d479 1 @ 1.161.2.4 log @Initial support of uvm_page_physunload(9) and uvm_page_physunload_device(9). Note that callers of these functions are responsible to ensure that the segment is not used. @ text @a724 1 void uvm_page_physunload(void *); a726 1 void uvm_page_physunload_device(void *); @ 1.161.2.5 log @"int free_list" (VM_FREELIST_*) is specific to struct vm_page (memory page). Handle it only in memory physseg parts. Record device page's properties in struct vm_physseg for future uses. For example, framebuffers that is capable of some accelarated bus access (e.g. write-combining) should register its capability through "int flags". @ text @d727 1 a727 1 paddr_t, paddr_t, int, int); @ 1.161.2.6 log @Sync with HEAD. @ text @d553 1 d684 1 @ 1.161.2.7 log @Re-define the definition of "device page"; device pages are pages of device memory. Pages which don't have vm_page (== can't be used for generic use), but whose PV are tracked, are called "direct pages" from now. @ text @d724 1 a724 1 void *uvm_page_physload_direct(paddr_t, paddr_t, d726 1 a726 1 void uvm_page_physunload_direct(void *); @ 1.161.2.8 log @After much consideration, rename bus_space_physload_direct(9) back to bus_space_physload_device(9). The latter registers a segment as "device pages". "Device pages" are managed, but not used for general purpose memory. Most typically XIP pages. @ text @d724 1 a724 1 void *uvm_page_physload_device(paddr_t, paddr_t, d726 1 a726 1 void uvm_page_physunload_device(void *); @ 1.161.2.9 log @Put back #include for now, to avoid build erros. This should be removed again later, because exposing page-level definitions out of UVM is totally unnecessary. @ text @a478 1 #include @ 1.161.2.10 log @Drop the 'paddr_t avail_start' and 'paddr_t avail_end' arguments from uvm_page_physload_device(9). Those two arguments are used by uvm_page_physload(9) to specify a range of physical memory available for general purpose pages (pages which are linked to freelists). Totally irrelevant to device segments. @ text @d726 1 a726 1 int, int); @ 1.161.2.11 log @Hide uvm/uvm_page.h here again. @ text @d479 1 @ 1.161.2.12 log @Factor out the part which lookups physical page "identity" from UVM object, into sys/uvm/uvm_vnode.c:uvn_findpage_xip(). Eventually this will become a call to cdev UVM object pager. @ text @a765 1 struct vm_page *uvn_findpage_xip(struct uvm_object *, off_t); @ 1.161.2.13 log @Make XIP pager use cdev_mmap() instead of struct vm_physseg. @ text @d766 1 a766 2 struct vm_page *uvn_findpage_xip(struct vnode *, struct uvm_object *, off_t); @ 1.160 log @Remove uarea swap-out functionality: - Addresses the issue described in PR/38828. - Some simplification in threading and sleepq subsystems. - Eliminates pmap_collect() and, as a side note, allows pmap optimisations. - Eliminates XS_CTL_DATA_ONSTACK in scsipi code. - Avoids few scans on LWP list and thus potentially long holds of proc_lock. - Cuts ~1.5k lines of code. Reduces amd64 kernel size by ~4k. - Removes __SWAP_BROKEN cases. Tested on x86, mips, acorn32 (thanks ) and partly tested on acorn26 (thanks to ). Discussed on , reviewed by . @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.159 2009/08/18 02:43:49 yamt Exp $ */ d632 2 @ 1.159 log @whitespace fixes. no functional changes. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.158 2009/08/10 23:17:29 haad Exp $ */ d325 2 a326 2 int swapins; /* swapins */ int swapouts; /* swapouts */ d366 1 a366 1 int pdswout; /* number of times daemon called for swapout */ d401 1 a401 1 int64_t inactarg; /* unused */ d408 1 a408 1 int64_t unused1; /* used to be nanon */ d418 2 a419 2 int64_t swapins; int64_t swapouts; d448 1 a448 1 int64_t pdswout; a569 6 #ifndef cpu_swapin void cpu_swapin(struct lwp *); #endif #ifndef cpu_swapout void cpu_swapout(struct lwp *); #endif d630 2 a631 4 void uvm_kick_scheduler(void); void uvm_swapin(struct lwp *); bool uvm_uarea_alloc(vaddr_t *); void uvm_uarea_free(vaddr_t, struct cpu_info *); a633 2 void uvm_lwp_hold(struct lwp *); void uvm_lwp_rele(struct lwp *); @ 1.158 log @Add uvm_reclaim_hooks support for reclaiming kernel KVA space and memory. This is used only by zfs where uvm_reclaim hook is added from arc cache. Oked ad@@. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.157 2009/08/05 14:11:32 pooka Exp $ */ d545 3 a547 3 void uvm_reclaim_init(void); void uvm_reclaim_hook_add(struct uvm_reclaim_hook *); void uvm_reclaim_hook_del(struct uvm_reclaim_hook *); @ 1.157 log @kill uvm_aio_biodone1(). only user was lfs and that uses nestiobuf now. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.156 2009/08/05 14:10:33 pooka Exp $ */ d537 13 @ 1.156 log @add some advice symbols we'll eventually need @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.155 2009/06/28 15:18:50 rmind Exp $ */ a723 1 void uvm_aio_biodone1(struct buf *); @ 1.155 log @Ephemeral mapping (emap) implementation. Concept is based on the idea that activity of other threads will perform the TLB flush for the processes using emap as a side effect. To track that, global and per-CPU generation numbers are used. This idea was suggested by Andrew Doran; various improvements to it by me. Notes: - For now, zero-copy on pipe is not yet enabled. - TCP socket code would likely need more work. - Additional UVM loaning improvements are needed. Proposed on , silence there. Quickly reviewed by . @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.154 2009/03/30 16:36:36 yamt Exp $ */ d135 3 a137 1 /* 0x3: will need, 0x4: dontneed */ @ 1.154 log @g/c uvm_aiobuf_pool. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.153 2009/03/29 01:02:51 mrg Exp $ */ d229 5 d575 25 @ 1.153 log @- add new RLIMIT_AS (aka RLIMIT_VMEM) resource that limits the total address space available to processes. this limit exists in most other modern unix variants, and like most of them, our defaults are unlimited. remove the old mmap / rlimit.datasize hack. - adds the VMCMD_STACK flag to all the stack-creation vmcmd callers. it is currently unused, but was added a few years ago. - add a pair of new process size values to kinfo_proc2{}. one is the total size of the process memory map, and the other is the total size adjusted for unused stack space (since most processes have a lot of this...) - patch sh, and csh to notice RLIMIT_AS. (in some cases, the alias RLIMIT_VMEM was already present and used if availble.) - patch ps, top and systat to notice the new k_vm_vsize member of kinfo_proc2{}. - update irix, svr4, svr4_32, linux and osf1 emulations to support this information. (freebsd could be done, but that it's best left as part of the full-update of compat/freebsd.) this addresses PR 7897. it also gives correct memory usage values, which have never been entirely correct (since mmap), and have been very incorrect since jemalloc() was enabled. tested on i386 and sparc64, build tested on several other platforms. thanks to many folks for feedback and testing but most espcially chuq and yamt for critical suggestions that lead to this patch not having a special ugliness i wasn't happy with anyway :-) @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.152 2009/03/12 12:55:16 abs Exp $ */ a243 1 struct pool; a514 2 extern struct pool *uvm_aiobuf_pool; @ 1.152 log @Clarify free_list usage in uvm_page_physload() regarding faster/slower RAM. Slower RAM should be assigned a higher free_list id. No functional change to code, just comments and manpage @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.151 2009/02/18 13:16:58 yamt Exp $ */ d505 1 @ 1.151 log @make some functions static. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.150 2008/11/26 20:17:33 pooka Exp $ */ d179 1 a179 1 #define UVM_PGA_STRAT_NORMAL 0 /* high -> low free list walk */ @ 1.150 log @Rototill all remaining file systems to use ubc_uiomove() instead of the ubc_alloc() - uiomove() - ubc_release() dance. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.149 2008/10/31 20:42:41 christos Exp $ */ a561 1 void uao_detach_locked(struct uvm_object *); a562 1 void uao_reference_locked(struct uvm_object *); @ 1.150.4.1 log @Sync with HEAD. Commit is split, to avoid a "too many arguments" protocol error. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.154 2009/03/30 16:36:36 yamt Exp $ */ d179 1 a179 1 #define UVM_PGA_STRAT_NORMAL 0 /* priority (low id to high) walk */ d244 1 a504 1 segsz_t vm_issize; /* initial unmapped stack size (pages) */ d515 2 d562 1 d564 1 @ 1.150.4.2 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.155 2009/06/28 15:18:50 rmind Exp $ */ a228 5 * Value representing inactive emap. */ #define UVM_EMAP_INACTIVE (0) /* a569 25 /* uvm_emap.c */ void uvm_emap_sysinit(void); #ifdef __HAVE_PMAP_EMAP void uvm_emap_switch(lwp_t *); #else #define uvm_emap_switch(l) #endif u_int uvm_emap_gen_return(void); void uvm_emap_update(u_int); vaddr_t uvm_emap_alloc(vsize_t, bool); void uvm_emap_free(vaddr_t, size_t); void uvm_emap_enter(vaddr_t, struct vm_page **, u_int); void uvm_emap_remove(vaddr_t, vsize_t); #ifdef __HAVE_PMAP_EMAP void uvm_emap_consume(u_int); u_int uvm_emap_produce(void); #else #define uvm_emap_consume(x) #define uvm_emap_produce() UVM_EMAP_INACTIVE #endif @ 1.149 log @- allocate 8 pointers on the stack to avoid stack overflow in nfs. - make that 8 a constant - remove bogus panic @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.148 2008/08/08 14:41:50 skrll Exp $ */ d486 1 a570 1 void ubc_flush(struct uvm_object *, voff_t, voff_t); @ 1.148 log @g/c exec_map @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.147 2008/07/11 07:09:18 skrll Exp $ */ d224 5 @ 1.148.2.1 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.150 2008/11/26 20:17:33 pooka Exp $ */ a223 5 * Default number of pages to allocate on the stack */ #define UBC_MAX_PAGES 8 /* a480 1 #define UBC_UNMAP_FLAG(vp) (UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0) d565 1 @ 1.148.2.2 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.148.2.1 2009/01/19 13:20:36 skrll Exp $ */ d562 1 d564 1 @ 1.148.2.3 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.148.2.2 2009/03/03 18:34:40 skrll Exp $ */ d179 1 a179 1 #define UVM_PGA_STRAT_NORMAL 0 /* priority (low id to high) walk */ d244 1 a504 1 segsz_t vm_issize; /* initial unmapped stack size (pages) */ d515 2 @ 1.148.4.1 log @Pull up following revision(s) (requested by tron in ticket #9): sys/nfs/nfs_bio.c: revision 1.180 sys/miscfs/genfs/genfs_io.c: revision 1.14 sys/uvm/uvm_extern.h: revision 1.149 - allocate 8 pointers on the stack to avoid stack overflow in nfs. - make that 8 a constant - remove bogus panic @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.148 2008/08/08 14:41:50 skrll Exp $ */ a223 5 * Default number of pages to allocate on the stack */ #define UBC_MAX_PAGES 8 /* @ 1.148.4.2 log @Pull up following revision(s) (requested by mrg in ticket #622): bin/csh/csh.1: revision 1.46 bin/csh/func.c: revision 1.37 bin/ps/print.c: revision 1.111 bin/ps/ps.c: revision 1.74 bin/sh/miscbltin.c: revision 1.38 bin/sh/sh.1: revision 1.92 via patch external/bsd/top/dist/machine/m_netbsd.c: revision 1.7 lib/libkvm/kvm_proc.c: revision 1.82 sys/arch/mips/mips/cpu_exec.c: revision 1.55 sys/compat/darwin/darwin_exec.c: revision 1.57 sys/compat/ibcs2/ibcs2_exec.c: revision 1.73 sys/compat/irix/irix_resource.c: revision 1.15 sys/compat/linux/arch/amd64/linux_exec_machdep.c: revision 1.16 sys/compat/linux/arch/i386/linux_exec_machdep.c: revision 1.12 sys/compat/linux/common/linux_limit.h: revision 1.5 sys/compat/osf1/osf1_resource.c: revision 1.14 sys/compat/svr4/svr4_resource.c: revision 1.18 sys/compat/svr4_32/svr4_32_resource.c: revision 1.17 sys/kern/exec_subr.c: revision 1.62 sys/kern/init_sysctl.c: revision 1.160 sys/kern/kern_exec.c: revision 1.288 sys/kern/kern_resource.c: revision 1.151 sys/sys/param.h: patch sys/sys/resource.h: revision 1.31 sys/sys/sysctl.h: revision 1.184 sys/uvm/uvm_extern.h: revision 1.153 sys/uvm/uvm_glue.c: revision 1.136 sys/uvm/uvm_mmap.c: revision 1.128 usr.bin/systat/ps.c: revision 1.32 - - add new RLIMIT_AS (aka RLIMIT_VMEM) resource that limits the total address space available to processes. this limit exists in most other modern unix variants, and like most of them, our defaults are unlimited. remove the old mmap / rlimit.datasize hack. - - adds the VMCMD_STACK flag to all the stack-creation vmcmd callers. it is currently unused, but was added a few years ago. - - add a pair of new process size values to kinfo_proc2{}. one is the total size of the process memory map, and the other is the total size adjusted for unused stack space (since most processes have a lot of this...) - - patch sh, and csh to notice RLIMIT_AS. (in some cases, the alias RLIMIT_VMEM was already present and used if availble.) - - patch ps, top and systat to notice the new k_vm_vsize member of kinfo_proc2{}. - - update irix, svr4, svr4_32, linux and osf1 emulations to support this information. (freebsd could be done, but that it's best left as part of the full-update of compat/freebsd.) this addresses PR 7897. it also gives correct memory usage values, which have never been entirely correct (since mmap), and have been very incorrect since jemalloc() was enabled. tested on i386 and sparc64, build tested on several other platforms. thanks to many folks for feedback and testing but most espcially chuq and yamt for critical suggestions that lead to this patch not having a special ugliness i wasn't happy with anyway :-) @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.148.4.1 2008/11/02 23:08:56 snj Exp $ */ a503 1 segsz_t vm_issize; /* initial unmapped stack size (pages) */ @ 1.148.4.2.4.1 log @Pass hints to uvm_pagealloc* to get it to use the right page color rather than guess the right page color. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.148.4.2 2009/04/01 00:25:23 snj Exp $ */ a148 1 #define UVM_FLAG_COLORMATCH 0x4000000 /* match color given in off */ @ 1.148.4.2.4.2 log @Make uvm_map recognize UVM_FLAG_COLORMATCH which tells uvm_map that the 'align' argument specifies the starting color of the KVA range to be returned. When calling uvm_km_alloc with UVM_KMF_VAONLY, also specify the starting color of the kva range returned (UMV_KMF_COLORMATCH) and pass those to uvm_map. In uvm_pglistalloc, make sure the pages being returned have sequentially advancing colors (so they can be mapped in a contiguous address range). Add a few missing UVM_FLAG_COLORMATCH flags to uvm_pagealloc calls. Make the socket and pipe loan color-safe. Make the mips pmap enforce strict page color (color(VA) == color(PA)). @ text @d1 1 a1 1 /* uvm_extern.h,v 1.148.4.2.4.1 2010/01/26 21:26:28 matt Exp */ a175 1 #define UVM_KMF_COLORMATCH UVM_FLAG_COLORMATCH /* start at color in align */ @ 1.148.4.2.4.3 log @Rework page free lists to be sorted by color first rather than free_list. Kept per color PGFL_* counter in each page free list. Minor cleanups. @ text @a333 1 int colorfail; /* pagealloc where we got no page */ a335 1 int colorany; /* pagealloc where we wanted any color */ a459 2 int64_t colorany; int64_t colorfail; @ 1.148.4.2.4.4 log @Restore $NetBSD$ @ text @d1 1 a1 1 /* $NetBSD$ */ @ 1.148.4.2.4.5 log @Major changes to uvm. Support multiple collections (groups) of free pages and run the page reclaimation algorithm on each group independently. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.148.4.2.4.4 2011/06/03 07:56:08 matt Exp $ */ a333 1 #if 0 a339 4 #else int _padcolor[4]; int _padcpu[2]; #endif a362 1 #if 0 a370 3 #else int _pdpad0[8]; #endif a372 1 #if 0 a376 4 #else int _pdpad1[4]; #endif u_int npggroups; a633 2 void uvm_km_pageclaim(struct vm_page *); void uvm_km_pagefree(struct vm_page *); d713 3 a715 4 struct uvm_pggroup; void uvm_pageout_start(struct uvm_pggroup *, u_int); void uvm_pageout_done(struct vm_page *, bool); void uvm_estimatepageable(u_int *, u_int *); @ 1.148.4.2.4.6 log @Separate object-less anon pages out of the active list if there is no swap device. Make uvm_reclaimable and uvm.*estimatable understand colors and kmem allocations. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.148.4.2.4.5 2012/02/09 03:04:59 matt Exp $ */ d732 1 a732 2 void uvm_estimatepageable(const struct uvm_pggroup *, u_int *, u_int *); @ 1.147 log @English improvement in comments. "seems good to me :)" from yamt. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.146 2008/06/04 12:45:28 ad Exp $ */ a527 1 extern struct vm_map *exec_map; @ 1.146 log @- vm_page: put listq, pageq into a union alongside a LIST_ENTRY, so we can use both types of list. - Make page coloring and idle zero state per-CPU. - Maintain per-CPU page freelists. When freeing, put pages onto the local CPU's lists and the global lists. When allocating, prefer to take pages from the local CPU. If none are available take from the global list as done now. Proposed on tech-kern@@. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.145 2008/02/29 20:35:23 yamt Exp $ */ d146 1 a146 1 #define UVM_FLAG_QUANTUM 0x800000 /* entry never be splitted later */ @ 1.146.4.1 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.148 2008/08/08 14:41:50 skrll Exp $ */ d146 1 a146 1 #define UVM_FLAG_QUANTUM 0x800000 /* entry can never be split later */ d528 1 @ 1.146.4.2 log @Update haad-dm branch to haad-dm-base2. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.150 2008/11/26 20:17:33 pooka Exp $ */ a223 5 * Default number of pages to allocate on the stack */ #define UBC_MAX_PAGES 8 /* a480 1 #define UBC_UNMAP_FLAG(vp) (UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0) d565 1 @ 1.146.2.1 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.147 2008/07/11 07:09:18 skrll Exp $ */ d146 1 a146 1 #define UVM_FLAG_QUANTUM 0x800000 /* entry can never be split later */ @ 1.145 log @uvm_swap_io: if pagedaemon, don't wait for iobuf. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.144 2008/01/28 12:22:47 yamt Exp $ */ d329 2 d398 2 a399 2 int64_t unused2; /* used to be nanonneeded */ int64_t unused3; /* used to be nfreeanon */ @ 1.145.4.1 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.145 2008/02/29 20:35:23 yamt Exp $ */ d146 1 a146 1 #define UVM_FLAG_QUANTUM 0x800000 /* entry can never be split later */ d179 1 a179 1 #define UVM_PGA_STRAT_NORMAL 0 /* priority (low id to high) walk */ a223 5 * Default number of pages to allocate on the stack */ #define UBC_MAX_PAGES 8 /* d239 1 a328 2 int cpuhit; /* pagealloc where we allocated locally */ int cpumiss; /* pagealloc where we didn't */ d396 2 a397 2 int64_t cpuhit; int64_t cpumiss; a478 1 #define UBC_UNMAP_FLAG(vp) (UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0) a496 1 segsz_t vm_issize; /* initial unmapped stack size (pages) */ d507 2 d526 1 d555 1 d557 1 d564 1 @ 1.145.4.2 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.145.4.1 2009/05/04 08:14:39 yamt Exp $ */ a228 5 * Value representing inactive emap. */ #define UVM_EMAP_INACTIVE (0) /* a569 25 /* uvm_emap.c */ void uvm_emap_sysinit(void); #ifdef __HAVE_PMAP_EMAP void uvm_emap_switch(lwp_t *); #else #define uvm_emap_switch(l) #endif u_int uvm_emap_gen_return(void); void uvm_emap_update(u_int); vaddr_t uvm_emap_alloc(vsize_t, bool); void uvm_emap_free(vaddr_t, size_t); void uvm_emap_enter(vaddr_t, struct vm_page **, u_int); void uvm_emap_remove(vaddr_t, vsize_t); #ifdef __HAVE_PMAP_EMAP void uvm_emap_consume(u_int); u_int uvm_emap_produce(void); #else #define uvm_emap_consume(x) #define uvm_emap_produce() UVM_EMAP_INACTIVE #endif @ 1.145.4.3 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.145.4.2 2009/07/18 14:53:28 yamt Exp $ */ d135 1 a135 3 #define UVM_ADV_WILLNEED 0x3 /* pages will be needed */ #define UVM_ADV_DONTNEED 0x4 /* pages won't be needed */ #define UVM_ADV_NOREUSE 0x5 /* pages will be used only once */ a534 13 * Structure containig uvm reclaim hooks, uvm_reclaim_list is guarded by * uvm_reclaim_lock. */ struct uvm_reclaim_hook { void (*uvm_reclaim_hook)(void); SLIST_ENTRY(uvm_reclaim_hook) uvm_reclaim_next; }; void uvm_reclaim_init(void); void uvm_reclaim_hook_add(struct uvm_reclaim_hook *); void uvm_reclaim_hook_del(struct uvm_reclaim_hook *); /* d722 1 @ 1.145.4.4 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.145.4.3 2009/08/19 18:48:35 yamt Exp $ */ d325 2 a326 2 int _unused1; int _unused2; d366 1 a366 1 int _unused3; d401 1 a401 1 int64_t inactarg; /* unused */ d408 1 a408 1 int64_t unused1; /* unused; was nanon */ d418 2 a419 2 int64_t swapins; /* unused */ int64_t swapouts; /* unused */ d448 1 a448 1 int64_t unused4; d554 1 d570 6 d636 4 a639 4 vaddr_t uvm_uarea_alloc(void); void uvm_uarea_free(vaddr_t); vaddr_t uvm_lwp_getuarea(lwp_t *); void uvm_lwp_setuarea(lwp_t *, vaddr_t); d642 2 @ 1.145.4.5 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.145.4.4 2010/03/11 15:04:46 yamt Exp $ */ d684 1 @ 1.145.6.1 log @Sync w/ -current. 34 merge conflicts to follow. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.145 2008/02/29 20:35:23 yamt Exp $ */ a328 2 int cpuhit; /* pagealloc where we allocated locally */ int cpumiss; /* pagealloc where we didn't */ d396 2 a397 2 int64_t cpuhit; int64_t cpumiss; @ 1.145.6.2 log @Sync with wrstuden-revivesa-base-2. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.145.6.1 2008/06/23 04:32:06 wrstuden Exp $ */ d146 1 a146 1 #define UVM_FLAG_QUANTUM 0x800000 /* entry can never be split later */ d528 1 @ 1.145.2.1 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.145 2008/02/29 20:35:23 yamt Exp $ */ a328 2 int cpuhit; /* pagealloc where we allocated locally */ int cpumiss; /* pagealloc where we didn't */ d396 2 a397 2 int64_t cpuhit; int64_t cpumiss; @ 1.144 log @remove a special allocator for uareas, which is no longer necessary. use pool_cache instead. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.143 2008/01/02 11:49:16 ad Exp $ */ d693 2 @ 1.144.6.1 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD$ */ a692 2 void uvm_aio_aiodone_pages(struct vm_page **, int, bool, int); @ 1.144.6.2 log @Sync with HEAD. Also fix build. @ text @a328 2 int cpuhit; /* pagealloc where we allocated locally */ int cpumiss; /* pagealloc where we didn't */ d396 2 a397 2 int64_t cpuhit; int64_t cpumiss; @ 1.144.6.3 log @Sync with HEAD. @ text @d146 1 a146 1 #define UVM_FLAG_QUANTUM 0x800000 /* entry can never be split later */ d528 1 @ 1.144.6.4 log @Sync with HEAD. @ text @a223 5 * Default number of pages to allocate on the stack */ #define UBC_MAX_PAGES 8 /* a480 1 #define UBC_UNMAP_FLAG(vp) (UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0) d565 1 @ 1.144.2.1 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.145 2008/02/29 20:35:23 yamt Exp $ */ a692 2 void uvm_aio_aiodone_pages(struct vm_page **, int, bool, int); @ 1.143 log @Merge vmlocking2 to head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.142 2007/12/26 22:11:53 christos Exp $ */ a592 1 void uvm_uarea_drain(bool); @ 1.142 log @Add PaX ASLR (Address Space Layout Randomization) [from elad and myself] For regular (non PIE) executables randomization is enabled for: 1. The data segment 2. The stack For PIE executables(*) randomization is enabled for: 1. The program itself 2. All shared libraries 3. The data segment 4. The stack (*) To generate a PIE executable: - compile everything with -fPIC - link with -shared-libgcc -Wl,-pie This feature is experimental, and might change. To use selectively add options PAX_ASLR=0 in your kernel. Currently we are using 12 bits for the stack, program, and data segment and 16 or 24 bits for mmap, depending on __LP64__. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.141 2007/12/24 15:46:46 perry Exp $ */ d699 2 @ 1.141 log @Remove __attribute__((__noreturn__)) from things already marked __dead Found by the department of redundancy department. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.140 2007/12/13 02:45:11 yamt Exp $ */ d501 1 @ 1.140 log @add ddb "whatis" command. inspired from solaris ::whatis dcmd. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.139 2007/12/05 09:37:34 yamt Exp $ */ d587 1 a587 1 __dead void uvm_scheduler(void) __attribute__((noreturn)); @ 1.139 log @g/c uvm_vnp_sync @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.138 2007/12/05 09:35:46 yamt Exp $ */ d643 1 @ 1.139.4.1 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD$ */ a642 1 void uvm_whatis(uintptr_t, void (*)(const char *, ...)); @ 1.139.4.2 log @Sync with HEAD @ text @a500 1 size_t vm_aslr_delta_mmap; /* mmap() random delta for ASLR */ d587 1 a587 1 __dead void uvm_scheduler(void); a697 2 void uvm_pageout_start(int); void uvm_pageout_done(int); @ 1.139.2.1 log @- separate kernel va allocation (kernel_va_arena) from in-kernel fault handling (kernel_map). - add vmem bootstrap code. vmem doesn't rely on malloc anymore. - make kmem_alloc interrupt-safe. - kill kmem_map. make malloc a wrapper of kmem_alloc. @ text @d1 1 a1 1 /* $NetBSD$ */ a461 3 #if defined(_KERNEL) #include #endif /* defined(_KERNEL) */ a470 4 #if defined(_KERNEL) extern vmem_t *kernel_va_arena; #endif /* defined(_KERNEL) */ @ 1.139.2.2 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.139.2.1 2007/12/10 12:56:12 yamt Exp $ */ a649 1 void uvm_whatis(uintptr_t, void (*)(const char *, ...)); @ 1.138 log @fix UBC_WANT_UNMAP. - check PMAP_CACHE_VIVT after pulling pmap.h. - VTEXT -> VI_TEXT. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.137 2007/11/30 22:43:17 ad Exp $ */ a715 1 void uvm_vnp_sync(struct mount *); @ 1.137 log @Make {anon,file,exec}pages unsigned. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.136 2007/11/06 00:42:46 ad Exp $ */ a206 9 * helpers for calling ubc_release() */ #ifdef PMAP_CACHE_VIVT #define UBC_WANT_UNMAP(vp) (((vp)->v_flag & VTEXT) != 0) #else #define UBC_WANT_UNMAP(vp) false #endif /* d472 9 @ 1.137.2.1 log @Pull the vmlocking changes into a new branch. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.137 2007/11/30 22:43:17 ad Exp $ */ a696 2 void uvm_pageout_start(int); void uvm_pageout_done(int); @ 1.137.2.2 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.139 2007/12/05 09:37:34 yamt Exp $ */ d207 9 a480 9 * helpers for calling ubc_release() */ #ifdef PMAP_CACHE_VIVT #define UBC_WANT_UNMAP(vp) (((vp)->v_iflag & VI_TEXT) != 0) #else #define UBC_WANT_UNMAP(vp) false #endif /* d718 1 @ 1.137.2.3 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.137.2.2 2007/12/08 17:58:09 ad Exp $ */ d587 1 a587 1 __dead void uvm_scheduler(void); a642 1 void uvm_whatis(uintptr_t, void (*)(const char *, ...)); @ 1.136 log @Merge scheduler changes from the vmlocking branch. All discussed on tech-kern: - Invert priority space so that zero is the lowest priority. Rearrange number and type of priority levels into bands. Add new bands like 'kernel real time'. - Ignore the priority level passed to tsleep. Compute priority for sleep dynamically. - For SCHED_4BSD, make priority adjustment per-LWP, not per-process. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.135 2007/08/18 00:21:11 ad Exp $ */ d297 3 a299 3 int anonpages; /* number of pages used by anon mappings */ int filepages; /* number of pages used by cached file data */ int execpages; /* number of pages used by cached exec data */ @ 1.135 log @Make the uarea cache per-CPU and drain in batches of 4. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.134 2007/07/27 09:50:37 yamt Exp $ */ d591 1 a591 1 void uvm_uarea_free(vaddr_t uaddr); @ 1.135.8.1 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.136 2007/11/06 00:42:46 ad Exp $ */ d591 1 a591 1 void uvm_uarea_free(vaddr_t, struct cpu_info *); @ 1.135.8.2 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.139 2007/12/05 09:37:34 yamt Exp $ */ d207 9 d297 3 a299 3 unsigned anonpages; /* number of pages used by anon mappings */ unsigned filepages; /* number of pages used by cached file data */ unsigned execpages; /* number of pages used by cached exec data */ a480 9 * helpers for calling ubc_release() */ #ifdef PMAP_CACHE_VIVT #define UBC_WANT_UNMAP(vp) (((vp)->v_iflag & VI_TEXT) != 0) #else #define UBC_WANT_UNMAP(vp) false #endif /* d716 1 @ 1.135.8.3 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.141 2007/12/24 15:46:46 perry Exp $ */ d587 1 a587 1 __dead void uvm_scheduler(void); a642 1 void uvm_whatis(uintptr_t, void (*)(const char *, ...)); @ 1.135.8.4 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.144 2008/01/28 12:22:47 yamt Exp $ */ a500 1 size_t vm_aslr_delta_mmap; /* mmap() random delta for ASLR */ d592 1 a697 2 void uvm_pageout_start(int); void uvm_pageout_done(int); @ 1.135.6.1 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD$ */ d591 1 a591 1 void uvm_uarea_free(vaddr_t, struct cpu_info *); @ 1.135.2.1 log @sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.136 2007/11/06 00:42:46 ad Exp $ */ d591 1 a591 1 void uvm_uarea_free(vaddr_t, struct cpu_info *); @ 1.135.2.2 log @sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.135.2.1 2007/11/06 23:35:27 matt Exp $ */ d207 9 d297 3 a299 3 unsigned anonpages; /* number of pages used by anon mappings */ unsigned filepages; /* number of pages used by cached file data */ unsigned execpages; /* number of pages used by cached exec data */ a480 9 * helpers for calling ubc_release() */ #ifdef PMAP_CACHE_VIVT #define UBC_WANT_UNMAP(vp) (((vp)->v_iflag & VI_TEXT) != 0) #else #define UBC_WANT_UNMAP(vp) false #endif /* a500 1 size_t vm_aslr_delta_mmap; /* mmap() random delta for ASLR */ d587 1 a587 1 __dead void uvm_scheduler(void); a642 1 void uvm_whatis(uintptr_t, void (*)(const char *, ...)); a696 2 void uvm_pageout_start(int); void uvm_pageout_done(int); d716 1 @ 1.135.2.3 log @sync with HEAD @ text @d1 1 a1 1 /* uvm_extern.h,v 1.135.2.2 2008/01/09 01:58:39 matt Exp */ d593 1 a693 2 void uvm_aio_aiodone_pages(struct vm_page **, int, bool, int); @ 1.134 log @ubc_uiomove: add an "advice" argument rather than using UVM_ADV_RANDOM blindly. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.133 2007/07/22 19:16:06 pooka Exp $ */ d597 1 @ 1.134.6.1 log @file uvm_extern.h was added on branch matt-mips64 on 2007-07-27 09:50:38 +0000 @ text @d1 730 @ 1.134.6.2 log @ubc_uiomove: add an "advice" argument rather than using UVM_ADV_RANDOM blindly. @ text @a0 730 /* $NetBSD: uvm_extern.h,v 1.134 2007/07/27 09:50:37 yamt Exp $ */ /* * * Copyright (c) 1997 Charles D. Cranor and Washington University. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Charles D. Cranor and * Washington University. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * from: Id: uvm_extern.h,v 1.1.2.21 1998/02/07 01:16:53 chs Exp */ /*- * Copyright (c) 1991, 1992, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @@(#)vm_extern.h 8.5 (Berkeley) 5/3/95 */ #ifndef _UVM_UVM_EXTERN_H_ #define _UVM_UVM_EXTERN_H_ /* * uvm_extern.h: this file defines the external interface to the VM system. * * this should be the only file included by non-VM parts of the kernel * which need access to VM services. if you want to know the interface * to the MI VM layer without knowing the details, this is the file to * learn. * * NOTE: vm system calls are prototyped in syscallargs.h */ /* * typedefs, necessary for standard UVM headers. */ typedef unsigned int uvm_flag_t; typedef int vm_inherit_t; /* XXX: inheritance codes */ typedef off_t voff_t; /* XXX: offset within a uvm_object */ typedef voff_t pgoff_t; /* XXX: number of pages within a uvm object */ /* * defines */ /* * the following defines are for uvm_map and functions which call it. */ /* protections bits */ #define UVM_PROT_MASK 0x07 /* protection mask */ #define UVM_PROT_NONE 0x00 /* protection none */ #define UVM_PROT_ALL 0x07 /* everything */ #define UVM_PROT_READ 0x01 /* read */ #define UVM_PROT_WRITE 0x02 /* write */ #define UVM_PROT_EXEC 0x04 /* exec */ /* protection short codes */ #define UVM_PROT_R 0x01 /* read */ #define UVM_PROT_W 0x02 /* write */ #define UVM_PROT_RW 0x03 /* read-write */ #define UVM_PROT_X 0x04 /* exec */ #define UVM_PROT_RX 0x05 /* read-exec */ #define UVM_PROT_WX 0x06 /* write-exec */ #define UVM_PROT_RWX 0x07 /* read-write-exec */ /* 0x08: not used */ /* inherit codes */ #define UVM_INH_MASK 0x30 /* inherit mask */ #define UVM_INH_SHARE 0x00 /* "share" */ #define UVM_INH_COPY 0x10 /* "copy" */ #define UVM_INH_NONE 0x20 /* "none" */ #define UVM_INH_DONATE 0x30 /* "donate" << not used */ /* 0x40, 0x80: not used */ /* bits 0x700: max protection, 0x800: not used */ /* bits 0x7000: advice, 0x8000: not used */ /* advice: matches MADV_* from sys/mman.h and POSIX_FADV_* from sys/fcntl.h */ #define UVM_ADV_NORMAL 0x0 /* 'normal' */ #define UVM_ADV_RANDOM 0x1 /* 'random' */ #define UVM_ADV_SEQUENTIAL 0x2 /* 'sequential' */ /* 0x3: will need, 0x4: dontneed */ #define UVM_ADV_MASK 0x7 /* mask */ /* bits 0xffff0000: mapping flags */ #define UVM_FLAG_FIXED 0x010000 /* find space */ #define UVM_FLAG_OVERLAY 0x020000 /* establish overlay */ #define UVM_FLAG_NOMERGE 0x040000 /* don't merge map entries */ #define UVM_FLAG_COPYONW 0x080000 /* set copy_on_write flag */ #define UVM_FLAG_AMAPPAD 0x100000 /* for bss: pad amap to reduce malloc() */ #define UVM_FLAG_TRYLOCK 0x200000 /* fail if we can not lock map */ #define UVM_FLAG_NOWAIT 0x400000 /* not allowed to sleep */ #define UVM_FLAG_QUANTUM 0x800000 /* entry never be splitted later */ #define UVM_FLAG_WAITVA 0x1000000 /* wait for va */ #define UVM_FLAG_VAONLY 0x2000000 /* unmap: no pages are mapped */ /* macros to extract info */ #define UVM_PROTECTION(X) ((X) & UVM_PROT_MASK) #define UVM_INHERIT(X) (((X) & UVM_INH_MASK) >> 4) #define UVM_MAXPROTECTION(X) (((X) >> 8) & UVM_PROT_MASK) #define UVM_ADVICE(X) (((X) >> 12) & UVM_ADV_MASK) #define UVM_MAPFLAG(PROT,MAXPROT,INH,ADVICE,FLAGS) \ (((MAXPROT) << 8)|(PROT)|(INH)|((ADVICE) << 12)|(FLAGS)) /* magic offset value: offset not known(obj) or don't care(!obj) */ #define UVM_UNKNOWN_OFFSET ((voff_t) -1) /* * the following defines are for uvm_km_alloc/free's flags */ #define UVM_KMF_WIRED 0x1 /* allocation type: wired */ #define UVM_KMF_PAGEABLE 0x2 /* allocation type: pageable */ #define UVM_KMF_VAONLY 0x4 /* allocation type: VA only */ #define UVM_KMF_TYPEMASK (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE | UVM_KMF_WIRED) #define UVM_KMF_CANFAIL 0x8 /* caller handles failure */ #define UVM_KMF_ZERO 0x10 /* want zero filled memory */ #define UVM_KMF_EXEC 0x20 /* need executable mapping */ #define UVM_KMF_TRYLOCK UVM_FLAG_TRYLOCK /* try locking only */ #define UVM_KMF_NOWAIT UVM_FLAG_NOWAIT /* not allowed to sleep */ #define UVM_KMF_WAITVA UVM_FLAG_WAITVA /* sleep for va */ /* * the following defines the strategies for uvm_pagealloc_strat() */ #define UVM_PGA_STRAT_NORMAL 0 /* high -> low free list walk */ #define UVM_PGA_STRAT_ONLY 1 /* only specified free list */ #define UVM_PGA_STRAT_FALLBACK 2 /* ONLY falls back on NORMAL */ /* * flags for uvm_pagealloc_strat() */ #define UVM_PGA_USERESERVE 0x0001 /* ok to use reserve pages */ #define UVM_PGA_ZERO 0x0002 /* returned page must be zero'd */ /* * flags for ubc_alloc() */ #define UBC_READ 0x001 #define UBC_WRITE 0x002 #define UBC_FAULTBUSY 0x004 /* * flags for ubc_release() */ #define UBC_UNMAP 0x010 /* * flags for ubc_uiomve() */ #define UBC_PARTIALOK 0x100 /* * helpers for calling ubc_release() */ #ifdef PMAP_CACHE_VIVT #define UBC_WANT_UNMAP(vp) (((vp)->v_flag & VTEXT) != 0) #else #define UBC_WANT_UNMAP(vp) false #endif /* * flags for uvn_findpages(). */ #define UFP_ALL 0x00 #define UFP_NOWAIT 0x01 #define UFP_NOALLOC 0x02 #define UFP_NOCACHE 0x04 #define UFP_NORDONLY 0x08 #define UFP_DIRTYONLY 0x10 #define UFP_BACKWARD 0x20 /* * lockflags that control the locking behavior of various functions. */ #define UVM_LK_ENTER 0x00000001 /* map locked on entry */ #define UVM_LK_EXIT 0x00000002 /* leave map locked on exit */ /* * structures */ struct buf; struct core; struct loadavg; struct mount; struct pglist; struct proc; struct uio; struct uvm_object; struct vm_anon; struct vmspace; struct pmap; struct vnode; struct pool; struct simplelock; struct vm_map_entry; struct vm_map; struct vm_page; struct vmtotal; /* * uvm_pctparam: parameter to be shown as percentage to user. */ #define UVM_PCTPARAM_SHIFT 8 #define UVM_PCTPARAM_SCALE (1 << UVM_PCTPARAM_SHIFT) #define UVM_PCTPARAM_APPLY(pct, x) \ (((x) * (pct)->pct_scaled) >> UVM_PCTPARAM_SHIFT) struct uvm_pctparam { int pct_pct; /* percent [0, 100] */ /* should be the first member */ int pct_scaled; int (*pct_check)(struct uvm_pctparam *, int); }; /* * uvmexp: global data structures that are exported to parts of the kernel * other than the vm system. */ struct uvmexp { /* vm_page constants */ int pagesize; /* size of a page (PAGE_SIZE): must be power of 2 */ int pagemask; /* page mask */ int pageshift; /* page shift */ /* vm_page counters */ int npages; /* number of pages we manage */ int free; /* number of free pages */ int paging; /* number of pages in the process of being paged out */ int wired; /* number of wired pages */ /* * Adding anything before this line will break binary compatibility * with top(1) on NetBSD 1.5. */ int ncolors; /* number of page color buckets: must be p-o-2 */ int colormask; /* color bucket mask */ int zeropages; /* number of zero'd pages */ int reserve_pagedaemon; /* number of pages reserved for pagedaemon */ int reserve_kernel; /* number of pages reserved for kernel */ int anonpages; /* number of pages used by anon mappings */ int filepages; /* number of pages used by cached file data */ int execpages; /* number of pages used by cached exec data */ /* pageout params */ int freemin; /* min number of free pages */ int freetarg; /* target number of free pages */ int wiredmax; /* max number of wired pages */ /* swap */ int nswapdev; /* number of configured swap devices in system */ int swpages; /* number of PAGE_SIZE'ed swap pages */ int swpgavail; /* number of swap pages currently available */ int swpginuse; /* number of swap pages in use */ int swpgonly; /* number of swap pages in use, not also in RAM */ int nswget; /* number of times fault calls uvm_swap_get() */ /* stat counters. XXX: should be 64-bit counters */ int faults; /* page fault count */ int traps; /* trap count */ int intrs; /* interrupt count */ int swtch; /* context switch count */ int softs; /* software interrupt count */ int syscalls; /* system calls */ int pageins; /* pagein operation count */ /* pageouts are in pdpageouts below */ int swapins; /* swapins */ int swapouts; /* swapouts */ int pgswapin; /* pages swapped in */ int pgswapout; /* pages swapped out */ int forks; /* forks */ int forks_ppwait; /* forks where parent waits */ int forks_sharevm; /* forks where vmspace is shared */ int pga_zerohit; /* pagealloc where zero wanted and zero was available */ int pga_zeromiss; /* pagealloc where zero wanted and zero not available */ int zeroaborts; /* number of times page zeroing was aborted */ int colorhit; /* pagealloc where we got optimal color */ int colormiss; /* pagealloc where we didn't */ /* fault subcounters. XXX: should be 64-bit counters */ int fltnoram; /* number of times fault was out of ram */ int fltnoanon; /* number of times fault was out of anons */ int fltpgwait; /* number of times fault had to wait on a page */ int fltpgrele; /* number of times fault found a released page */ int fltrelck; /* number of times fault relock called */ int fltrelckok; /* number of times fault relock is a success */ int fltanget; /* number of times fault gets anon page */ int fltanretry; /* number of times fault retrys an anon get */ int fltamcopy; /* number of times fault clears "needs copy" */ int fltnamap; /* number of times fault maps a neighbor anon page */ int fltnomap; /* number of times fault maps a neighbor obj page */ int fltlget; /* number of times fault does a locked pgo_get */ int fltget; /* number of times fault does an unlocked get */ int flt_anon; /* number of times fault anon (case 1a) */ int flt_acow; /* number of times fault anon cow (case 1b) */ int flt_obj; /* number of times fault is on object page (2a) */ int flt_prcopy; /* number of times fault promotes with copy (2b) */ int flt_przero; /* number of times fault promotes with zerofill (2b) */ /* daemon counters. XXX: should be 64-bit counters */ int pdwoke; /* number of times daemon woke up */ int pdrevs; /* number of times daemon rev'd clock hand */ int pdswout; /* number of times daemon called for swapout */ int pdfreed; /* number of pages daemon freed since boot */ int pdscans; /* number of pages daemon scanned since boot */ int pdanscan; /* number of anonymous pages scanned by daemon */ int pdobscan; /* number of object pages scanned by daemon */ int pdreact; /* number of pages daemon reactivated since boot */ int pdbusy; /* number of times daemon found a busy page */ int pdpageouts; /* number of times daemon started a pageout */ int pdpending; /* number of times daemon got a pending pagout */ int pddeact; /* number of pages daemon deactivates */ int pdreanon; /* anon pages reactivated due to thresholds */ int pdrefile; /* file pages reactivated due to thresholds */ int pdreexec; /* executable pages reactivated due to thresholds */ }; /* * The following structure is 64-bit alignment safe. New elements * should only be added to the end of this structure so binary * compatibility can be preserved. */ struct uvmexp_sysctl { int64_t pagesize; int64_t pagemask; int64_t pageshift; int64_t npages; int64_t free; int64_t active; int64_t inactive; int64_t paging; int64_t wired; int64_t zeropages; int64_t reserve_pagedaemon; int64_t reserve_kernel; int64_t freemin; int64_t freetarg; int64_t inactarg; /* unused */ int64_t wiredmax; int64_t nswapdev; int64_t swpages; int64_t swpginuse; int64_t swpgonly; int64_t nswget; int64_t unused1; /* used to be nanon */ int64_t unused2; /* used to be nanonneeded */ int64_t unused3; /* used to be nfreeanon */ int64_t faults; int64_t traps; int64_t intrs; int64_t swtch; int64_t softs; int64_t syscalls; int64_t pageins; int64_t swapins; int64_t swapouts; int64_t pgswapin; int64_t pgswapout; int64_t forks; int64_t forks_ppwait; int64_t forks_sharevm; int64_t pga_zerohit; int64_t pga_zeromiss; int64_t zeroaborts; int64_t fltnoram; int64_t fltnoanon; int64_t fltpgwait; int64_t fltpgrele; int64_t fltrelck; int64_t fltrelckok; int64_t fltanget; int64_t fltanretry; int64_t fltamcopy; int64_t fltnamap; int64_t fltnomap; int64_t fltlget; int64_t fltget; int64_t flt_anon; int64_t flt_acow; int64_t flt_obj; int64_t flt_prcopy; int64_t flt_przero; int64_t pdwoke; int64_t pdrevs; int64_t pdswout; int64_t pdfreed; int64_t pdscans; int64_t pdanscan; int64_t pdobscan; int64_t pdreact; int64_t pdbusy; int64_t pdpageouts; int64_t pdpending; int64_t pddeact; int64_t anonpages; int64_t filepages; int64_t execpages; int64_t colorhit; int64_t colormiss; int64_t ncolors; }; #ifdef _KERNEL /* we need this before including uvm_page.h on some platforms */ extern struct uvmexp uvmexp; #endif /* * Finally, bring in standard UVM headers. */ #include #include #include #include #include #include #include #include #include /* * Shareable process virtual address space. * May eventually be merged with vm_map. * Several fields are temporary (text, data stuff). */ struct vmspace { struct vm_map vm_map; /* VM address map */ int vm_refcnt; /* number of references * * note: protected by vm_map.ref_lock */ void * vm_shm; /* SYS5 shared memory private data XXX */ /* we copy from vm_startcopy to the end of the structure on fork */ #define vm_startcopy vm_rssize segsz_t vm_rssize; /* current resident set size in pages */ segsz_t vm_swrss; /* resident set size before last swap */ segsz_t vm_tsize; /* text size (pages) XXX */ segsz_t vm_dsize; /* data size (pages) XXX */ segsz_t vm_ssize; /* stack size (pages) */ void * vm_taddr; /* user virtual address of text XXX */ void * vm_daddr; /* user virtual address of data XXX */ void *vm_maxsaddr; /* user VA at max stack growth */ void *vm_minsaddr; /* user VA at top of stack */ }; #define VMSPACE_IS_KERNEL_P(vm) VM_MAP_IS_KERNEL(&(vm)->vm_map) #ifdef _KERNEL extern struct pool *uvm_aiobuf_pool; /* * used to keep state while iterating over the map for a core dump. */ struct uvm_coredump_state { void *cookie; /* opaque for the caller */ vaddr_t start; /* start of region */ vaddr_t realend; /* real end of region */ vaddr_t end; /* virtual end of region */ vm_prot_t prot; /* protection of region */ int flags; /* flags; see below */ }; #define UVM_COREDUMP_STACK 0x01 /* region is user stack */ /* * the various kernel maps, owned by MD code */ extern struct vm_map *exec_map; extern struct vm_map *kernel_map; extern struct vm_map *kmem_map; extern struct vm_map *mb_map; extern struct vm_map *phys_map; /* * macros */ #define vm_resident_count(vm) (pmap_resident_count((vm)->vm_map.pmap)) #include MALLOC_DECLARE(M_VMMAP); MALLOC_DECLARE(M_VMPMAP); /* vm_machdep.c */ void vmapbuf(struct buf *, vsize_t); void vunmapbuf(struct buf *, vsize_t); #ifndef cpu_swapin void cpu_swapin(struct lwp *); #endif #ifndef cpu_swapout void cpu_swapout(struct lwp *); #endif /* uvm_aobj.c */ struct uvm_object *uao_create(vsize_t, int); void uao_detach(struct uvm_object *); void uao_detach_locked(struct uvm_object *); void uao_reference(struct uvm_object *); void uao_reference_locked(struct uvm_object *); /* uvm_bio.c */ void ubc_init(void); void * ubc_alloc(struct uvm_object *, voff_t, vsize_t *, int, int); void ubc_release(void *, int); void ubc_flush(struct uvm_object *, voff_t, voff_t); int ubc_uiomove(struct uvm_object *, struct uio *, vsize_t, int, int); /* uvm_fault.c */ #define uvm_fault(m, a, p) uvm_fault_internal(m, a, p, 0) int uvm_fault_internal(struct vm_map *, vaddr_t, vm_prot_t, int); /* handle a page fault */ /* uvm_glue.c */ #if defined(KGDB) void uvm_chgkprot(void *, size_t, int); #endif void uvm_proc_fork(struct proc *, struct proc *, bool); void uvm_lwp_fork(struct lwp *, struct lwp *, void *, size_t, void (*)(void *), void *); int uvm_coredump_walkmap(struct proc *, void *, int (*)(struct proc *, void *, struct uvm_coredump_state *), void *); void uvm_proc_exit(struct proc *); void uvm_lwp_exit(struct lwp *); void uvm_init_limits(struct proc *); bool uvm_kernacc(void *, size_t, int); __dead void uvm_scheduler(void) __attribute__((noreturn)); void uvm_kick_scheduler(void); void uvm_swapin(struct lwp *); bool uvm_uarea_alloc(vaddr_t *); void uvm_uarea_free(vaddr_t uaddr); void uvm_uarea_drain(bool); int uvm_vslock(struct vmspace *, void *, size_t, vm_prot_t); void uvm_vsunlock(struct vmspace *, void *, size_t); void uvm_lwp_hold(struct lwp *); void uvm_lwp_rele(struct lwp *); /* uvm_init.c */ void uvm_init(void); /* uvm_io.c */ int uvm_io(struct vm_map *, struct uio *); /* uvm_km.c */ vaddr_t uvm_km_alloc(struct vm_map *, vsize_t, vsize_t, uvm_flag_t); void uvm_km_free(struct vm_map *, vaddr_t, vsize_t, uvm_flag_t); struct vm_map *uvm_km_suballoc(struct vm_map *, vaddr_t *, vaddr_t *, vsize_t, int, bool, struct vm_map_kernel *); vaddr_t uvm_km_alloc_poolpage(struct vm_map *, bool); void uvm_km_free_poolpage(struct vm_map *, vaddr_t); vaddr_t uvm_km_alloc_poolpage_cache(struct vm_map *, bool); void uvm_km_free_poolpage_cache(struct vm_map *, vaddr_t); void uvm_km_vacache_init(struct vm_map *, const char *, size_t); /* uvm_map.c */ int uvm_map(struct vm_map *, vaddr_t *, vsize_t, struct uvm_object *, voff_t, vsize_t, uvm_flag_t); int uvm_map_pageable(struct vm_map *, vaddr_t, vaddr_t, bool, int); int uvm_map_pageable_all(struct vm_map *, int, vsize_t); bool uvm_map_checkprot(struct vm_map *, vaddr_t, vaddr_t, vm_prot_t); int uvm_map_protect(struct vm_map *, vaddr_t, vaddr_t, vm_prot_t, bool); struct vmspace *uvmspace_alloc(vaddr_t, vaddr_t); void uvmspace_init(struct vmspace *, struct pmap *, vaddr_t, vaddr_t); void uvmspace_exec(struct lwp *, vaddr_t, vaddr_t); struct vmspace *uvmspace_fork(struct vmspace *); void uvmspace_addref(struct vmspace *); void uvmspace_free(struct vmspace *); void uvmspace_share(struct proc *, struct proc *); void uvmspace_unshare(struct lwp *); /* uvm_meter.c */ void uvm_meter(void); int uvm_sysctl(int *, u_int, void *, size_t *, void *, size_t, struct proc *); int uvm_pctparam_check(struct uvm_pctparam *, int); void uvm_pctparam_set(struct uvm_pctparam *, int); int uvm_pctparam_get(struct uvm_pctparam *); void uvm_pctparam_init(struct uvm_pctparam *, int, int (*)(struct uvm_pctparam *, int)); int uvm_pctparam_createsysctlnode(struct uvm_pctparam *, const char *, const char *); /* uvm_mmap.c */ int uvm_mmap(struct vm_map *, vaddr_t *, vsize_t, vm_prot_t, vm_prot_t, int, void *, voff_t, vsize_t); vaddr_t uvm_default_mapaddr(struct proc *, vaddr_t, vsize_t); /* uvm_mremap.c */ int uvm_mremap(struct vm_map *, vaddr_t, vsize_t, struct vm_map *, vaddr_t *, vsize_t, struct proc *, int); /* uvm_object.c */ int uobj_wirepages(struct uvm_object *uobj, off_t start, off_t end); void uobj_unwirepages(struct uvm_object *uobj, off_t start, off_t end); /* uvm_page.c */ struct vm_page *uvm_pagealloc_strat(struct uvm_object *, voff_t, struct vm_anon *, int, int, int); #define uvm_pagealloc(obj, off, anon, flags) \ uvm_pagealloc_strat((obj), (off), (anon), (flags), \ UVM_PGA_STRAT_NORMAL, 0) void uvm_pagereplace(struct vm_page *, struct vm_page *); void uvm_pagerealloc(struct vm_page *, struct uvm_object *, voff_t); /* Actually, uvm_page_physload takes PF#s which need their own type */ void uvm_page_physload(paddr_t, paddr_t, paddr_t, paddr_t, int); void uvm_setpagesize(void); /* uvm_pager.c */ void uvm_aio_biodone1(struct buf *); void uvm_aio_biodone(struct buf *); void uvm_aio_aiodone(struct buf *); /* uvm_pdaemon.c */ void uvm_pageout(void *); struct work; void uvm_aiodone_worker(struct work *, void *); void uvm_estimatepageable(int *, int *); /* uvm_pglist.c */ int uvm_pglistalloc(psize_t, paddr_t, paddr_t, paddr_t, paddr_t, struct pglist *, int, int); void uvm_pglistfree(struct pglist *); /* uvm_swap.c */ void uvm_swap_init(void); /* uvm_unix.c */ int uvm_grow(struct proc *, vaddr_t); /* uvm_user.c */ void uvm_deallocate(struct vm_map *, vaddr_t, vsize_t); /* uvm_vnode.c */ void uvm_vnp_setsize(struct vnode *, voff_t); void uvm_vnp_setwritesize(struct vnode *, voff_t); void uvm_vnp_sync(struct mount *); int uvn_findpages(struct uvm_object *, voff_t, int *, struct vm_page **, int); void uvm_vnp_zerorange(struct vnode *, off_t, size_t); bool uvn_text_p(struct uvm_object *); bool uvn_clean_p(struct uvm_object *); bool uvn_needs_writefault_p(struct uvm_object *); /* kern_malloc.c */ void kmeminit_nkmempages(void); void kmeminit(void); extern int nkmempages; #endif /* _KERNEL */ #endif /* _UVM_UVM_EXTERN_H_ */ @ 1.134.4.1 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.135 2007/08/18 00:21:11 ad Exp $ */ a596 1 void uvm_cpu_attach(struct cpu_info *); @ 1.134.4.2 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.134.4.1 2007/09/03 16:49:16 jmcneill Exp $ */ d591 1 a591 1 void uvm_uarea_free(vaddr_t, struct cpu_info *); @ 1.134.4.3 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.134.4.2 2007/11/06 19:25:41 joerg Exp $ */ d297 3 a299 3 unsigned anonpages; /* number of pages used by anon mappings */ unsigned filepages; /* number of pages used by cached file data */ unsigned execpages; /* number of pages used by cached exec data */ @ 1.134.4.4 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.139 2007/12/05 09:37:34 yamt Exp $ */ d207 9 a480 9 * helpers for calling ubc_release() */ #ifdef PMAP_CACHE_VIVT #define UBC_WANT_UNMAP(vp) (((vp)->v_iflag & VI_TEXT) != 0) #else #define UBC_WANT_UNMAP(vp) false #endif /* d716 1 @ 1.133 log @Retire uvn_attach() - it abuses VXLOCK and its functionality, setting vnode sizes, is handled elsewhere: file system vnode creation or spec_open() for regular files or block special files, respectively. Add a call to VOP_MMAP() to the pagedvn exec path, since the vnode is being memory mapped. reviewed by tech-kern & wrstuden @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.132 2007/07/17 17:42:08 joerg Exp $ */ d565 1 a565 1 int); @ 1.132 log @Add native mremap system call based on the UVM implementation for Linux compat. Add code to enforce alignment of the new location. Special thanks to wizd for helping with the man page. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.131 2007/07/09 21:11:36 ad Exp $ */ a715 1 struct uvm_object *uvn_attach(void *, vm_prot_t); @ 1.132.2.1 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.134 2007/07/27 09:50:37 yamt Exp $ */ d565 1 a565 1 int, int); d716 1 @ 1.132.2.2 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.132.2.1 2007/08/15 13:51:20 skrll Exp $ */ a596 1 void uvm_cpu_attach(struct cpu_info *); @ 1.131 log @Merge some of the less invasive changes from the vmlocking branch: - kthread, callout, devsw API changes - select()/poll() improvements - miscellaneous MT safety improvements @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.130 2007/06/05 12:31:35 yamt Exp $ */ a664 1 #define UVM_MREMAP_FIXED 1 @ 1.130 log @improve post-ubc file overwrite performance in common cases. ie. when it's safe, actually overwrite blocks rather than doing read-modify-write. also fixes PR/33152 and PR/36303. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.129 2007/03/24 21:15:39 rmind Exp $ */ d595 2 @ 1.129 log @Export uvm_uarea_free() to the rest. Make things compile again. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.128 2007/03/04 06:03:48 christos Exp $ */ d192 3 a194 3 #define UBC_READ 0x01 #define UBC_WRITE 0x02 #define UBC_FAULTBUSY 0x04 d199 6 a204 1 #define UBC_UNMAP 0x01 d564 2 d713 1 @ 1.128 log @Kill caddr_t; there will be some MI fallout, but it will be fixed shortly. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.127 2007/02/22 06:05:00 thorpej Exp $ */ d584 1 @ 1.128.4.1 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.131 2007/07/09 21:11:36 ad Exp $ */ d192 3 a194 3 #define UBC_READ 0x001 #define UBC_WRITE 0x002 #define UBC_FAULTBUSY 0x004 d199 1 a199 6 #define UBC_UNMAP 0x010 /* * flags for ubc_uiomve() */ #define UBC_PARTIALOK 0x100 a558 2 int ubc_uiomove(struct uvm_object *, struct uio *, vsize_t, int); a583 1 void uvm_uarea_free(vaddr_t uaddr); a586 2 void uvm_lwp_hold(struct lwp *); void uvm_lwp_rele(struct lwp *); a704 1 void uvm_vnp_setwritesize(struct vnode *, voff_t); @ 1.128.2.1 log @- Put a per-LWP lock around swapin / swapout. - Replace use of lockmgr(). - Minor locking fixes and assertions. - uvm_map.h no longer pulls in proc.h, etc. - Use kpause where appropriate. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.128 2007/03/04 06:03:48 christos Exp $ */ d587 1 a587 2 void uvm_lwp_hold(struct lwp *); void uvm_lwp_rele(struct lwp *); @ 1.128.2.2 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.128.2.1 2007/04/05 21:32:52 ad Exp $ */ a583 1 void uvm_uarea_free(vaddr_t uaddr); @ 1.128.2.3 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.128.2.2 2007/04/10 13:26:56 ad Exp $ */ d192 3 a194 3 #define UBC_READ 0x001 #define UBC_WRITE 0x002 #define UBC_FAULTBUSY 0x004 d199 1 a199 6 #define UBC_UNMAP 0x010 /* * flags for ubc_uiomve() */ #define UBC_PARTIALOK 0x100 a558 2 int ubc_uiomove(struct uvm_object *, struct uio *, vsize_t, int); a706 1 void uvm_vnp_setwritesize(struct vnode *, voff_t); @ 1.128.2.4 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.128.2.3 2007/06/09 23:58:21 ad Exp $ */ a597 1 @ 1.128.2.5 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.128.2.4 2007/07/15 15:53:07 ad Exp $ */ d565 1 a565 1 int, int); a596 1 void uvm_cpu_attach(struct cpu_info *); d665 1 d717 1 @ 1.128.2.6 log @fix some races around pagedaemon and uvm_wait. ok'ed by Andrew Doran. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.128.2.5 2007/08/20 21:28:31 ad Exp $ */ a696 2 void uvm_pageout_start(int); void uvm_pageout_done(int); @ 1.128.2.7 log @Free uareas back to the uarea cache on the CPU where they were last used. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.128.2.6 2007/08/21 22:32:25 yamt Exp $ */ d591 1 a591 1 void uvm_uarea_free(vaddr_t, struct cpu_info *); @ 1.128.6.1 log @Pullup to -current @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.128 2007/03/04 06:03:48 christos Exp $ */ a583 1 void uvm_uarea_free(vaddr_t uaddr); @ 1.127 log @TRUE -> true, FALSE -> false @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.126 2007/02/21 23:00:12 thorpej Exp $ */ d484 1 a484 1 caddr_t vm_shm; /* SYS5 shared memory private data XXX */ d492 4 a495 4 caddr_t vm_taddr; /* user virtual address of text XXX */ caddr_t vm_daddr; /* user virtual address of data XXX */ caddr_t vm_maxsaddr; /* user VA at max stack growth */ caddr_t vm_minsaddr; /* user VA at top of stack */ d567 1 a567 1 void uvm_chgkprot(caddr_t, size_t, int); d579 1 a579 1 bool uvm_kernacc(caddr_t, size_t, int); @ 1.126 log @Replace the Mach-derived boolean_t type with the C99 bool type. A future commit will replace use of TRUE and FALSE with true and false. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.125 2007/02/15 20:21:13 ad Exp $ */ d207 1 a207 1 #define UBC_WANT_UNMAP(vp) FALSE @ 1.125 log @Add uvm_kick_scheduler() (MP safe) to replace wakeup(&proc0). @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.124 2006/12/21 15:55:26 yamt Exp $ */ d569 1 a569 1 void uvm_proc_fork(struct proc *, struct proc *, boolean_t); d579 1 a579 1 boolean_t uvm_kernacc(caddr_t, size_t, int); d583 2 a584 2 boolean_t uvm_uarea_alloc(vaddr_t *); void uvm_uarea_drain(boolean_t); d602 1 a602 1 vaddr_t *, vsize_t, int, boolean_t, d604 1 a604 1 vaddr_t uvm_km_alloc_poolpage(struct vm_map *, boolean_t); d606 1 a606 1 vaddr_t uvm_km_alloc_poolpage_cache(struct vm_map *, boolean_t); d616 1 a616 1 vaddr_t, boolean_t, int); d618 1 a618 1 boolean_t uvm_map_checkprot(struct vm_map *, vaddr_t, d621 1 a621 1 vaddr_t, vm_prot_t, boolean_t); d710 3 a712 3 boolean_t uvn_text_p(struct uvm_object *); boolean_t uvn_clean_p(struct uvm_object *); boolean_t uvn_needs_writefault_p(struct uvm_object *); @ 1.125.2.1 log @- sync with head. - move sched_changepri back to kern_synch.c as it doesn't know PPQ anymore. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.125 2007/02/15 20:21:13 ad Exp $ */ d207 1 a207 1 #define UBC_WANT_UNMAP(vp) false d569 1 a569 1 void uvm_proc_fork(struct proc *, struct proc *, bool); d579 1 a579 1 bool uvm_kernacc(caddr_t, size_t, int); d583 2 a584 2 bool uvm_uarea_alloc(vaddr_t *); void uvm_uarea_drain(bool); d602 1 a602 1 vaddr_t *, vsize_t, int, bool, d604 1 a604 1 vaddr_t uvm_km_alloc_poolpage(struct vm_map *, bool); d606 1 a606 1 vaddr_t uvm_km_alloc_poolpage_cache(struct vm_map *, bool); d616 1 a616 1 vaddr_t, bool, int); d618 1 a618 1 bool uvm_map_checkprot(struct vm_map *, vaddr_t, d621 1 a621 1 vaddr_t, vm_prot_t, bool); d710 3 a712 3 bool uvn_text_p(struct uvm_object *); bool uvn_clean_p(struct uvm_object *); bool uvn_needs_writefault_p(struct uvm_object *); @ 1.125.2.2 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.125.2.1 2007/02/27 16:55:25 yamt Exp $ */ d484 1 a484 1 void * vm_shm; /* SYS5 shared memory private data XXX */ d492 4 a495 4 void * vm_taddr; /* user virtual address of text XXX */ void * vm_daddr; /* user virtual address of data XXX */ void *vm_maxsaddr; /* user VA at max stack growth */ void *vm_minsaddr; /* user VA at top of stack */ d567 1 a567 1 void uvm_chgkprot(void *, size_t, int); d579 1 a579 1 bool uvm_kernacc(void *, size_t, int); @ 1.125.2.3 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.125.2.2 2007/03/12 06:01:11 rmind Exp $ */ a583 1 void uvm_uarea_free(vaddr_t uaddr); @ 1.124 log @merge yamt-splraiseipl branch. - finish implementing splraiseipl (and makeiplcookie). http://mail-index.NetBSD.org/tech-kern/2006/07/01/0000.html - complete workqueue(9) and fix its ipl problem, which is reported to cause audio skipping. - fix netbt (at least compilation problems) for some ports. - fix PR/33218. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.123 2006/12/07 14:06:51 elad Exp $ */ d581 1 @ 1.123 log @Back out uvm_is_swap_device(). @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.122 2006/12/01 16:06:09 elad Exp $ */ d684 2 a685 1 void uvm_aiodone_daemon(void *); @ 1.122 log @Introduce uvm_is_swap_device(), to check if the passed struct vnode * is used as a swap device or not. Okay mrg@@. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.121 2006/10/12 10:14:20 yamt Exp $ */ a693 1 boolean_t uvm_is_swap_device(struct vnode *); @ 1.122.2.1 log @Pull up following revision(s) (requested by elad in ticket #261): sys/uvm/uvm_extern.h: revision 1.123 sys/uvm/uvm_swap.c: revision 1.115 share/man/man9/uvm.9: revision 1.79 Back out uvm_is_swap_device(). @ text @d1 1 a1 1 /* $NetBSD$ */ d694 1 @ 1.121 log @move some knowledge about vnode into uvm_vnode.c. @ text @d1 1 a1 1 /* $NetBSD$ */ d694 1 @ 1.120 log @uobj_wirepages and uobj_unwirepages from Mindaugas. PR/34771. (commented out in files.uvm for now because there is no user in tree.) http://mail-index.netbsd.org/tech-kern/2006/09/24/0000.html http://mail-index.netbsd.org/tech-kern/2006/10/10/0000.html @ text @d708 3 @ 1.119 log @add support for O_DIRECT (I/O directly to application memory, bypassing any kernel caching for file data). @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.118 2006/09/15 15:51:13 yamt Exp $ */ d656 6 @ 1.118 log @merge yamt-pdpolicy branch. - separate page replacement policy from the rest of kernel - implement an alternative replacement policy @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.117 2006/09/01 20:39:05 cherry Exp $ */ d584 2 a585 2 int uvm_vslock(struct proc *, caddr_t, size_t, vm_prot_t); void uvm_vsunlock(struct proc *, caddr_t, size_t); @ 1.118.2.1 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.118 2006/09/15 15:51:13 yamt Exp $ */ d584 2 a585 2 int uvm_vslock(struct vmspace *, void *, size_t, vm_prot_t); void uvm_vsunlock(struct vmspace *, void *, size_t); a655 6 /* uvm_object.c */ int uobj_wirepages(struct uvm_object *uobj, off_t start, off_t end); void uobj_unwirepages(struct uvm_object *uobj, off_t start, off_t end); a701 3 boolean_t uvn_text_p(struct uvm_object *); boolean_t uvn_clean_p(struct uvm_object *); boolean_t uvn_needs_writefault_p(struct uvm_object *); @ 1.118.2.2 log @use workqueue for aiodoned. @ text @d1 1 a1 1 /* $NetBSD$ */ d684 1 a684 2 struct work; void uvm_aiodone_worker(struct work *, void *); @ 1.117 log @bumps kernel aobj to 64 bit. \ See: http://mail-index.netbsd.org/tech-kern/2006/03/07/0007.html @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.116 2006/08/04 22:42:36 he Exp $ */ d259 1 a259 1 int pct_pct; /* percent [0, 100] */ d261 1 a277 2 int active; /* number of active pages */ int inactive; /* number of pages that we free'd but may want back */ a298 1 int inactarg; /* target number of inactive pages */ a299 14 int anonmin; /* min threshold for anon pages */ int execmin; /* min threshold for executable pages */ int filemin; /* min threshold for file pages */ int anonminpct; /* min percent anon pages */ int execminpct; /* min percent executable pages */ int fileminpct; /* min percent file pages */ int anonmax; /* max threshold for anon pages */ int execmax; /* max threshold for executable pages */ int filemax; /* max threshold for file pages */ int anonmaxpct; /* max percent anon pages */ int execmaxpct; /* max percent executable pages */ int filemaxpct; /* max percent file pages */ struct uvm_pctparam inactivepct; /* length of inactive queue (pct of the whole queue) */ d392 1 a392 1 int64_t inactarg; d636 1 d638 5 d679 1 @ 1.117.2.1 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.121 2006/10/12 10:14:20 yamt Exp $ */ d259 1 a259 1 int pct_pct; /* percent [0, 100] */ /* should be the first member */ a260 1 int (*pct_check)(struct uvm_pctparam *, int); d277 2 d300 1 d302 14 d408 1 a408 1 int64_t inactarg; /* unused */ d600 2 a601 2 int uvm_vslock(struct vmspace *, void *, size_t, vm_prot_t); void uvm_vsunlock(struct vmspace *, void *, size_t); a651 1 int uvm_pctparam_check(struct uvm_pctparam *, int); a652 5 int uvm_pctparam_get(struct uvm_pctparam *); void uvm_pctparam_init(struct uvm_pctparam *, int, int (*)(struct uvm_pctparam *, int)); int uvm_pctparam_createsysctlnode(struct uvm_pctparam *, const char *, const char *); a665 6 /* uvm_object.c */ int uobj_wirepages(struct uvm_object *uobj, off_t start, off_t end); void uobj_unwirepages(struct uvm_object *uobj, off_t start, off_t end); a688 1 void uvm_estimatepageable(int *, int *); a710 3 boolean_t uvn_text_p(struct uvm_object *); boolean_t uvn_clean_p(struct uvm_object *); boolean_t uvn_needs_writefault_p(struct uvm_object *); @ 1.117.2.2 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.124 2006/12/21 15:55:26 yamt Exp $ */ d684 1 a684 2 struct work; void uvm_aiodone_worker(struct work *, void *); @ 1.116 log @Rearrange included headers and/or add include of and , so that the mipsco port can build again, ref. http://mail-index.netbsd.org/port-mips/2006/08/04/0000.html Reviewed by thorpej @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.115 2006/07/05 14:26:42 drochner Exp $ */ d90 1 @ 1.115 log @Introduce a UVM_KMF_EXEC flag for uvm_km_alloc() which enforces an executable mapping. Up to now, only R+W was requested from pmap_kenter_pa. On most CPUs, we get an executable mapping anyway, due to lack of hardware support or due to lazyness in the pmap implementation. Only alpha does obey VM_PROT_EXECUTE, afaics. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.114 2006/05/19 15:08:14 yamt Exp $ */ d482 1 a483 1 #include @ 1.114 log @UVM_MAPFLAG: add missing parens. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.113 2006/05/14 21:38:17 elad Exp $ */ d170 1 @ 1.114.2.1 log @file uvm_extern.h was added on branch chap-midi on 2006-05-19 15:08:15 +0000 @ text @d1 717 @ 1.114.2.2 log @UVM_MAPFLAG: add missing parens. @ text @a0 717 /* $NetBSD: uvm_extern.h,v 1.114 2006/05/19 15:08:14 yamt Exp $ */ /* * * Copyright (c) 1997 Charles D. Cranor and Washington University. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Charles D. Cranor and * Washington University. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * from: Id: uvm_extern.h,v 1.1.2.21 1998/02/07 01:16:53 chs Exp */ /*- * Copyright (c) 1991, 1992, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @@(#)vm_extern.h 8.5 (Berkeley) 5/3/95 */ #ifndef _UVM_UVM_EXTERN_H_ #define _UVM_UVM_EXTERN_H_ /* * uvm_extern.h: this file defines the external interface to the VM system. * * this should be the only file included by non-VM parts of the kernel * which need access to VM services. if you want to know the interface * to the MI VM layer without knowing the details, this is the file to * learn. * * NOTE: vm system calls are prototyped in syscallargs.h */ /* * typedefs, necessary for standard UVM headers. */ typedef unsigned int uvm_flag_t; typedef int vm_inherit_t; /* XXX: inheritance codes */ typedef off_t voff_t; /* XXX: offset within a uvm_object */ /* * defines */ /* * the following defines are for uvm_map and functions which call it. */ /* protections bits */ #define UVM_PROT_MASK 0x07 /* protection mask */ #define UVM_PROT_NONE 0x00 /* protection none */ #define UVM_PROT_ALL 0x07 /* everything */ #define UVM_PROT_READ 0x01 /* read */ #define UVM_PROT_WRITE 0x02 /* write */ #define UVM_PROT_EXEC 0x04 /* exec */ /* protection short codes */ #define UVM_PROT_R 0x01 /* read */ #define UVM_PROT_W 0x02 /* write */ #define UVM_PROT_RW 0x03 /* read-write */ #define UVM_PROT_X 0x04 /* exec */ #define UVM_PROT_RX 0x05 /* read-exec */ #define UVM_PROT_WX 0x06 /* write-exec */ #define UVM_PROT_RWX 0x07 /* read-write-exec */ /* 0x08: not used */ /* inherit codes */ #define UVM_INH_MASK 0x30 /* inherit mask */ #define UVM_INH_SHARE 0x00 /* "share" */ #define UVM_INH_COPY 0x10 /* "copy" */ #define UVM_INH_NONE 0x20 /* "none" */ #define UVM_INH_DONATE 0x30 /* "donate" << not used */ /* 0x40, 0x80: not used */ /* bits 0x700: max protection, 0x800: not used */ /* bits 0x7000: advice, 0x8000: not used */ /* advice: matches MADV_* from sys/mman.h and POSIX_FADV_* from sys/fcntl.h */ #define UVM_ADV_NORMAL 0x0 /* 'normal' */ #define UVM_ADV_RANDOM 0x1 /* 'random' */ #define UVM_ADV_SEQUENTIAL 0x2 /* 'sequential' */ /* 0x3: will need, 0x4: dontneed */ #define UVM_ADV_MASK 0x7 /* mask */ /* bits 0xffff0000: mapping flags */ #define UVM_FLAG_FIXED 0x010000 /* find space */ #define UVM_FLAG_OVERLAY 0x020000 /* establish overlay */ #define UVM_FLAG_NOMERGE 0x040000 /* don't merge map entries */ #define UVM_FLAG_COPYONW 0x080000 /* set copy_on_write flag */ #define UVM_FLAG_AMAPPAD 0x100000 /* for bss: pad amap to reduce malloc() */ #define UVM_FLAG_TRYLOCK 0x200000 /* fail if we can not lock map */ #define UVM_FLAG_NOWAIT 0x400000 /* not allowed to sleep */ #define UVM_FLAG_QUANTUM 0x800000 /* entry never be splitted later */ #define UVM_FLAG_WAITVA 0x1000000 /* wait for va */ #define UVM_FLAG_VAONLY 0x2000000 /* unmap: no pages are mapped */ /* macros to extract info */ #define UVM_PROTECTION(X) ((X) & UVM_PROT_MASK) #define UVM_INHERIT(X) (((X) & UVM_INH_MASK) >> 4) #define UVM_MAXPROTECTION(X) (((X) >> 8) & UVM_PROT_MASK) #define UVM_ADVICE(X) (((X) >> 12) & UVM_ADV_MASK) #define UVM_MAPFLAG(PROT,MAXPROT,INH,ADVICE,FLAGS) \ (((MAXPROT) << 8)|(PROT)|(INH)|((ADVICE) << 12)|(FLAGS)) /* magic offset value: offset not known(obj) or don't care(!obj) */ #define UVM_UNKNOWN_OFFSET ((voff_t) -1) /* * the following defines are for uvm_km_alloc/free's flags */ #define UVM_KMF_WIRED 0x1 /* allocation type: wired */ #define UVM_KMF_PAGEABLE 0x2 /* allocation type: pageable */ #define UVM_KMF_VAONLY 0x4 /* allocation type: VA only */ #define UVM_KMF_TYPEMASK (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE | UVM_KMF_WIRED) #define UVM_KMF_CANFAIL 0x8 /* caller handles failure */ #define UVM_KMF_ZERO 0x10 /* want zero filled memory */ #define UVM_KMF_TRYLOCK UVM_FLAG_TRYLOCK /* try locking only */ #define UVM_KMF_NOWAIT UVM_FLAG_NOWAIT /* not allowed to sleep */ #define UVM_KMF_WAITVA UVM_FLAG_WAITVA /* sleep for va */ /* * the following defines the strategies for uvm_pagealloc_strat() */ #define UVM_PGA_STRAT_NORMAL 0 /* high -> low free list walk */ #define UVM_PGA_STRAT_ONLY 1 /* only specified free list */ #define UVM_PGA_STRAT_FALLBACK 2 /* ONLY falls back on NORMAL */ /* * flags for uvm_pagealloc_strat() */ #define UVM_PGA_USERESERVE 0x0001 /* ok to use reserve pages */ #define UVM_PGA_ZERO 0x0002 /* returned page must be zero'd */ /* * flags for ubc_alloc() */ #define UBC_READ 0x01 #define UBC_WRITE 0x02 #define UBC_FAULTBUSY 0x04 /* * flags for ubc_release() */ #define UBC_UNMAP 0x01 /* * helpers for calling ubc_release() */ #ifdef PMAP_CACHE_VIVT #define UBC_WANT_UNMAP(vp) (((vp)->v_flag & VTEXT) != 0) #else #define UBC_WANT_UNMAP(vp) FALSE #endif /* * flags for uvn_findpages(). */ #define UFP_ALL 0x00 #define UFP_NOWAIT 0x01 #define UFP_NOALLOC 0x02 #define UFP_NOCACHE 0x04 #define UFP_NORDONLY 0x08 #define UFP_DIRTYONLY 0x10 #define UFP_BACKWARD 0x20 /* * lockflags that control the locking behavior of various functions. */ #define UVM_LK_ENTER 0x00000001 /* map locked on entry */ #define UVM_LK_EXIT 0x00000002 /* leave map locked on exit */ /* * structures */ struct buf; struct core; struct loadavg; struct mount; struct pglist; struct proc; struct uio; struct uvm_object; struct vm_anon; struct vmspace; struct pmap; struct vnode; struct pool; struct simplelock; struct vm_map_entry; struct vm_map; struct vm_page; struct vmtotal; /* * uvm_pctparam: parameter to be shown as percentage to user. */ #define UVM_PCTPARAM_SHIFT 8 #define UVM_PCTPARAM_SCALE (1 << UVM_PCTPARAM_SHIFT) #define UVM_PCTPARAM_APPLY(pct, x) \ (((x) * (pct)->pct_scaled) >> UVM_PCTPARAM_SHIFT) struct uvm_pctparam { int pct_pct; /* percent [0, 100] */ int pct_scaled; }; /* * uvmexp: global data structures that are exported to parts of the kernel * other than the vm system. */ struct uvmexp { /* vm_page constants */ int pagesize; /* size of a page (PAGE_SIZE): must be power of 2 */ int pagemask; /* page mask */ int pageshift; /* page shift */ /* vm_page counters */ int npages; /* number of pages we manage */ int free; /* number of free pages */ int active; /* number of active pages */ int inactive; /* number of pages that we free'd but may want back */ int paging; /* number of pages in the process of being paged out */ int wired; /* number of wired pages */ /* * Adding anything before this line will break binary compatibility * with top(1) on NetBSD 1.5. */ int ncolors; /* number of page color buckets: must be p-o-2 */ int colormask; /* color bucket mask */ int zeropages; /* number of zero'd pages */ int reserve_pagedaemon; /* number of pages reserved for pagedaemon */ int reserve_kernel; /* number of pages reserved for kernel */ int anonpages; /* number of pages used by anon mappings */ int filepages; /* number of pages used by cached file data */ int execpages; /* number of pages used by cached exec data */ /* pageout params */ int freemin; /* min number of free pages */ int freetarg; /* target number of free pages */ int inactarg; /* target number of inactive pages */ int wiredmax; /* max number of wired pages */ int anonmin; /* min threshold for anon pages */ int execmin; /* min threshold for executable pages */ int filemin; /* min threshold for file pages */ int anonminpct; /* min percent anon pages */ int execminpct; /* min percent executable pages */ int fileminpct; /* min percent file pages */ int anonmax; /* max threshold for anon pages */ int execmax; /* max threshold for executable pages */ int filemax; /* max threshold for file pages */ int anonmaxpct; /* max percent anon pages */ int execmaxpct; /* max percent executable pages */ int filemaxpct; /* max percent file pages */ struct uvm_pctparam inactivepct; /* length of inactive queue (pct of the whole queue) */ /* swap */ int nswapdev; /* number of configured swap devices in system */ int swpages; /* number of PAGE_SIZE'ed swap pages */ int swpgavail; /* number of swap pages currently available */ int swpginuse; /* number of swap pages in use */ int swpgonly; /* number of swap pages in use, not also in RAM */ int nswget; /* number of times fault calls uvm_swap_get() */ /* stat counters. XXX: should be 64-bit counters */ int faults; /* page fault count */ int traps; /* trap count */ int intrs; /* interrupt count */ int swtch; /* context switch count */ int softs; /* software interrupt count */ int syscalls; /* system calls */ int pageins; /* pagein operation count */ /* pageouts are in pdpageouts below */ int swapins; /* swapins */ int swapouts; /* swapouts */ int pgswapin; /* pages swapped in */ int pgswapout; /* pages swapped out */ int forks; /* forks */ int forks_ppwait; /* forks where parent waits */ int forks_sharevm; /* forks where vmspace is shared */ int pga_zerohit; /* pagealloc where zero wanted and zero was available */ int pga_zeromiss; /* pagealloc where zero wanted and zero not available */ int zeroaborts; /* number of times page zeroing was aborted */ int colorhit; /* pagealloc where we got optimal color */ int colormiss; /* pagealloc where we didn't */ /* fault subcounters. XXX: should be 64-bit counters */ int fltnoram; /* number of times fault was out of ram */ int fltnoanon; /* number of times fault was out of anons */ int fltpgwait; /* number of times fault had to wait on a page */ int fltpgrele; /* number of times fault found a released page */ int fltrelck; /* number of times fault relock called */ int fltrelckok; /* number of times fault relock is a success */ int fltanget; /* number of times fault gets anon page */ int fltanretry; /* number of times fault retrys an anon get */ int fltamcopy; /* number of times fault clears "needs copy" */ int fltnamap; /* number of times fault maps a neighbor anon page */ int fltnomap; /* number of times fault maps a neighbor obj page */ int fltlget; /* number of times fault does a locked pgo_get */ int fltget; /* number of times fault does an unlocked get */ int flt_anon; /* number of times fault anon (case 1a) */ int flt_acow; /* number of times fault anon cow (case 1b) */ int flt_obj; /* number of times fault is on object page (2a) */ int flt_prcopy; /* number of times fault promotes with copy (2b) */ int flt_przero; /* number of times fault promotes with zerofill (2b) */ /* daemon counters. XXX: should be 64-bit counters */ int pdwoke; /* number of times daemon woke up */ int pdrevs; /* number of times daemon rev'd clock hand */ int pdswout; /* number of times daemon called for swapout */ int pdfreed; /* number of pages daemon freed since boot */ int pdscans; /* number of pages daemon scanned since boot */ int pdanscan; /* number of anonymous pages scanned by daemon */ int pdobscan; /* number of object pages scanned by daemon */ int pdreact; /* number of pages daemon reactivated since boot */ int pdbusy; /* number of times daemon found a busy page */ int pdpageouts; /* number of times daemon started a pageout */ int pdpending; /* number of times daemon got a pending pagout */ int pddeact; /* number of pages daemon deactivates */ int pdreanon; /* anon pages reactivated due to thresholds */ int pdrefile; /* file pages reactivated due to thresholds */ int pdreexec; /* executable pages reactivated due to thresholds */ }; /* * The following structure is 64-bit alignment safe. New elements * should only be added to the end of this structure so binary * compatibility can be preserved. */ struct uvmexp_sysctl { int64_t pagesize; int64_t pagemask; int64_t pageshift; int64_t npages; int64_t free; int64_t active; int64_t inactive; int64_t paging; int64_t wired; int64_t zeropages; int64_t reserve_pagedaemon; int64_t reserve_kernel; int64_t freemin; int64_t freetarg; int64_t inactarg; int64_t wiredmax; int64_t nswapdev; int64_t swpages; int64_t swpginuse; int64_t swpgonly; int64_t nswget; int64_t unused1; /* used to be nanon */ int64_t unused2; /* used to be nanonneeded */ int64_t unused3; /* used to be nfreeanon */ int64_t faults; int64_t traps; int64_t intrs; int64_t swtch; int64_t softs; int64_t syscalls; int64_t pageins; int64_t swapins; int64_t swapouts; int64_t pgswapin; int64_t pgswapout; int64_t forks; int64_t forks_ppwait; int64_t forks_sharevm; int64_t pga_zerohit; int64_t pga_zeromiss; int64_t zeroaborts; int64_t fltnoram; int64_t fltnoanon; int64_t fltpgwait; int64_t fltpgrele; int64_t fltrelck; int64_t fltrelckok; int64_t fltanget; int64_t fltanretry; int64_t fltamcopy; int64_t fltnamap; int64_t fltnomap; int64_t fltlget; int64_t fltget; int64_t flt_anon; int64_t flt_acow; int64_t flt_obj; int64_t flt_prcopy; int64_t flt_przero; int64_t pdwoke; int64_t pdrevs; int64_t pdswout; int64_t pdfreed; int64_t pdscans; int64_t pdanscan; int64_t pdobscan; int64_t pdreact; int64_t pdbusy; int64_t pdpageouts; int64_t pdpending; int64_t pddeact; int64_t anonpages; int64_t filepages; int64_t execpages; int64_t colorhit; int64_t colormiss; int64_t ncolors; }; #ifdef _KERNEL /* we need this before including uvm_page.h on some platforms */ extern struct uvmexp uvmexp; #endif /* * Finally, bring in standard UVM headers. */ #include #include #include #include #include #include #include #include #include /* * Shareable process virtual address space. * May eventually be merged with vm_map. * Several fields are temporary (text, data stuff). */ struct vmspace { struct vm_map vm_map; /* VM address map */ int vm_refcnt; /* number of references * * note: protected by vm_map.ref_lock */ caddr_t vm_shm; /* SYS5 shared memory private data XXX */ /* we copy from vm_startcopy to the end of the structure on fork */ #define vm_startcopy vm_rssize segsz_t vm_rssize; /* current resident set size in pages */ segsz_t vm_swrss; /* resident set size before last swap */ segsz_t vm_tsize; /* text size (pages) XXX */ segsz_t vm_dsize; /* data size (pages) XXX */ segsz_t vm_ssize; /* stack size (pages) */ caddr_t vm_taddr; /* user virtual address of text XXX */ caddr_t vm_daddr; /* user virtual address of data XXX */ caddr_t vm_maxsaddr; /* user VA at max stack growth */ caddr_t vm_minsaddr; /* user VA at top of stack */ }; #define VMSPACE_IS_KERNEL_P(vm) VM_MAP_IS_KERNEL(&(vm)->vm_map) #ifdef _KERNEL extern struct pool *uvm_aiobuf_pool; /* * used to keep state while iterating over the map for a core dump. */ struct uvm_coredump_state { void *cookie; /* opaque for the caller */ vaddr_t start; /* start of region */ vaddr_t realend; /* real end of region */ vaddr_t end; /* virtual end of region */ vm_prot_t prot; /* protection of region */ int flags; /* flags; see below */ }; #define UVM_COREDUMP_STACK 0x01 /* region is user stack */ /* * the various kernel maps, owned by MD code */ extern struct vm_map *exec_map; extern struct vm_map *kernel_map; extern struct vm_map *kmem_map; extern struct vm_map *mb_map; extern struct vm_map *phys_map; /* * macros */ #define vm_resident_count(vm) (pmap_resident_count((vm)->vm_map.pmap)) #include MALLOC_DECLARE(M_VMMAP); MALLOC_DECLARE(M_VMPMAP); /* vm_machdep.c */ void vmapbuf(struct buf *, vsize_t); void vunmapbuf(struct buf *, vsize_t); #ifndef cpu_swapin void cpu_swapin(struct lwp *); #endif #ifndef cpu_swapout void cpu_swapout(struct lwp *); #endif /* uvm_aobj.c */ struct uvm_object *uao_create(vsize_t, int); void uao_detach(struct uvm_object *); void uao_detach_locked(struct uvm_object *); void uao_reference(struct uvm_object *); void uao_reference_locked(struct uvm_object *); /* uvm_bio.c */ void ubc_init(void); void * ubc_alloc(struct uvm_object *, voff_t, vsize_t *, int, int); void ubc_release(void *, int); void ubc_flush(struct uvm_object *, voff_t, voff_t); /* uvm_fault.c */ #define uvm_fault(m, a, p) uvm_fault_internal(m, a, p, 0) int uvm_fault_internal(struct vm_map *, vaddr_t, vm_prot_t, int); /* handle a page fault */ /* uvm_glue.c */ #if defined(KGDB) void uvm_chgkprot(caddr_t, size_t, int); #endif void uvm_proc_fork(struct proc *, struct proc *, boolean_t); void uvm_lwp_fork(struct lwp *, struct lwp *, void *, size_t, void (*)(void *), void *); int uvm_coredump_walkmap(struct proc *, void *, int (*)(struct proc *, void *, struct uvm_coredump_state *), void *); void uvm_proc_exit(struct proc *); void uvm_lwp_exit(struct lwp *); void uvm_init_limits(struct proc *); boolean_t uvm_kernacc(caddr_t, size_t, int); __dead void uvm_scheduler(void) __attribute__((noreturn)); void uvm_swapin(struct lwp *); boolean_t uvm_uarea_alloc(vaddr_t *); void uvm_uarea_drain(boolean_t); int uvm_vslock(struct proc *, caddr_t, size_t, vm_prot_t); void uvm_vsunlock(struct proc *, caddr_t, size_t); /* uvm_init.c */ void uvm_init(void); /* uvm_io.c */ int uvm_io(struct vm_map *, struct uio *); /* uvm_km.c */ vaddr_t uvm_km_alloc(struct vm_map *, vsize_t, vsize_t, uvm_flag_t); void uvm_km_free(struct vm_map *, vaddr_t, vsize_t, uvm_flag_t); struct vm_map *uvm_km_suballoc(struct vm_map *, vaddr_t *, vaddr_t *, vsize_t, int, boolean_t, struct vm_map_kernel *); vaddr_t uvm_km_alloc_poolpage(struct vm_map *, boolean_t); void uvm_km_free_poolpage(struct vm_map *, vaddr_t); vaddr_t uvm_km_alloc_poolpage_cache(struct vm_map *, boolean_t); void uvm_km_free_poolpage_cache(struct vm_map *, vaddr_t); void uvm_km_vacache_init(struct vm_map *, const char *, size_t); /* uvm_map.c */ int uvm_map(struct vm_map *, vaddr_t *, vsize_t, struct uvm_object *, voff_t, vsize_t, uvm_flag_t); int uvm_map_pageable(struct vm_map *, vaddr_t, vaddr_t, boolean_t, int); int uvm_map_pageable_all(struct vm_map *, int, vsize_t); boolean_t uvm_map_checkprot(struct vm_map *, vaddr_t, vaddr_t, vm_prot_t); int uvm_map_protect(struct vm_map *, vaddr_t, vaddr_t, vm_prot_t, boolean_t); struct vmspace *uvmspace_alloc(vaddr_t, vaddr_t); void uvmspace_init(struct vmspace *, struct pmap *, vaddr_t, vaddr_t); void uvmspace_exec(struct lwp *, vaddr_t, vaddr_t); struct vmspace *uvmspace_fork(struct vmspace *); void uvmspace_addref(struct vmspace *); void uvmspace_free(struct vmspace *); void uvmspace_share(struct proc *, struct proc *); void uvmspace_unshare(struct lwp *); /* uvm_meter.c */ void uvm_meter(void); int uvm_sysctl(int *, u_int, void *, size_t *, void *, size_t, struct proc *); void uvm_pctparam_set(struct uvm_pctparam *, int); /* uvm_mmap.c */ int uvm_mmap(struct vm_map *, vaddr_t *, vsize_t, vm_prot_t, vm_prot_t, int, void *, voff_t, vsize_t); vaddr_t uvm_default_mapaddr(struct proc *, vaddr_t, vsize_t); /* uvm_mremap.c */ int uvm_mremap(struct vm_map *, vaddr_t, vsize_t, struct vm_map *, vaddr_t *, vsize_t, struct proc *, int); #define UVM_MREMAP_FIXED 1 /* uvm_page.c */ struct vm_page *uvm_pagealloc_strat(struct uvm_object *, voff_t, struct vm_anon *, int, int, int); #define uvm_pagealloc(obj, off, anon, flags) \ uvm_pagealloc_strat((obj), (off), (anon), (flags), \ UVM_PGA_STRAT_NORMAL, 0) void uvm_pagereplace(struct vm_page *, struct vm_page *); void uvm_pagerealloc(struct vm_page *, struct uvm_object *, voff_t); /* Actually, uvm_page_physload takes PF#s which need their own type */ void uvm_page_physload(paddr_t, paddr_t, paddr_t, paddr_t, int); void uvm_setpagesize(void); /* uvm_pager.c */ void uvm_aio_biodone1(struct buf *); void uvm_aio_biodone(struct buf *); void uvm_aio_aiodone(struct buf *); /* uvm_pdaemon.c */ void uvm_pageout(void *); void uvm_aiodone_daemon(void *); /* uvm_pglist.c */ int uvm_pglistalloc(psize_t, paddr_t, paddr_t, paddr_t, paddr_t, struct pglist *, int, int); void uvm_pglistfree(struct pglist *); /* uvm_swap.c */ void uvm_swap_init(void); /* uvm_unix.c */ int uvm_grow(struct proc *, vaddr_t); /* uvm_user.c */ void uvm_deallocate(struct vm_map *, vaddr_t, vsize_t); /* uvm_vnode.c */ void uvm_vnp_setsize(struct vnode *, voff_t); void uvm_vnp_sync(struct mount *); struct uvm_object *uvn_attach(void *, vm_prot_t); int uvn_findpages(struct uvm_object *, voff_t, int *, struct vm_page **, int); void uvm_vnp_zerorange(struct vnode *, off_t, size_t); /* kern_malloc.c */ void kmeminit_nkmempages(void); void kmeminit(void); extern int nkmempages; #endif /* _KERNEL */ #endif /* _UVM_UVM_EXTERN_H_ */ @ 1.114.4.1 log @Merge from HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.115 2006/07/05 14:26:42 drochner Exp $ */ a169 1 #define UVM_KMF_EXEC 0x20 /* need executable mapping */ @ 1.113 log @integrate kauth. @ text @d1 1 a1 1 /* $NetBSD$ */ d156 1 a156 1 ((MAXPROT << 8)|(PROT)|(INH)|((ADVICE) << 12)|(FLAGS)) @ 1.112 log @-clean up the interface to uvm_fault: the "fault type" didn't serve any purpose (done by a macro, so we don't save any cycles for now) -kill vm_fault_t; it is not needed for real faults, and for simulated faults (wiring) it can be replaced by UVM internal flags -remove from uvm_extern.h again @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.111 2006/03/01 12:38:44 yamt Exp $ */ a234 1 struct ucred; @ 1.112.2.1 log @Merge 2006-05-24 NetBSD-current into the "peter-altq" branch. @ text @d1 1 a1 1 /* $NetBSD$ */ d156 1 a156 1 (((MAXPROT) << 8)|(PROT)|(INH)|((ADVICE) << 12)|(FLAGS)) d235 1 @ 1.111 log @merge yamt-uio_vmspace branch. - use vmspace rather than proc or lwp where appropriate. the latter is more natural to specify an address space. (and less likely to be abused for random purposes.) - fix a swdmover race. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.110 2006/02/10 00:53:04 simonb Exp $ */ a86 1 typedef int vm_fault_t; a487 1 #include d576 3 a578 3 int uvm_fault(struct vm_map *, vaddr_t, vm_fault_t, vm_prot_t); /* handle a page fault */ @ 1.111.4.1 log @oops - *really* sync to head this time. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.112 2006/03/15 18:09:25 drochner Exp $ */ d87 1 d489 1 d578 3 a580 3 #define uvm_fault(m, a, p) uvm_fault_internal(m, a, p, 0) int uvm_fault_internal(struct vm_map *, vaddr_t, vm_prot_t, int); /* handle a page fault */ @ 1.111.4.2 log @- Move kauth_cred_t declaration to - Cleanup struct ucred; forward declarations that are unused. - Don't include in any header, but include it in the c files that need it. Approved by core. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.111.4.1 2006/04/19 03:58:21 elad Exp $ */ d235 1 @ 1.111.2.1 log @separate page replacement policy from the rest of kernel. @ text @d1 1 a1 1 /* $NetBSD$ */ d259 1 a259 1 int pct_pct; /* percent [0, 100] */ /* should be the first member */ a260 1 int (*pct_check)(struct uvm_pctparam *, int); d277 2 d300 1 d302 14 d408 1 a408 1 int64_t inactarg; /* unused */ a652 1 int uvm_pctparam_check(struct uvm_pctparam *, int); a653 5 int uvm_pctparam_get(struct uvm_pctparam *); void uvm_pctparam_init(struct uvm_pctparam *, int, int (*)(struct uvm_pctparam *, int)); int uvm_pctparam_createsysctlnode(struct uvm_pctparam *, const char *, const char *); a689 1 void uvm_estimatepageable(int *, int *); @ 1.111.2.2 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.111.2.1 2006/03/05 12:51:09 yamt Exp $ */ d87 1 d473 1 d562 3 a564 3 #define uvm_fault(m, a, p) uvm_fault_internal(m, a, p, 0) int uvm_fault_internal(struct vm_map *, vaddr_t, vm_prot_t, int); /* handle a page fault */ @ 1.111.2.3 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.111.2.2 2006/04/01 12:07:57 yamt Exp $ */ d156 1 a156 1 (((MAXPROT) << 8)|(PROT)|(INH)|((ADVICE) << 12)|(FLAGS)) d235 1 @ 1.111.2.4 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.111.2.3 2006/05/24 10:59:30 yamt Exp $ */ a169 1 #define UVM_KMF_EXEC 0x20 /* need executable mapping */ d465 1 a466 1 #include @ 1.111.2.5 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.111.2.4 2006/08/11 15:47:46 yamt Exp $ */ a89 1 typedef voff_t pgoff_t; /* XXX: number of pages within a uvm object */ @ 1.110 log @Make a note that some counters should be 64-bit as they wrap far to quickly. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.109 2006/01/21 13:34:15 yamt Exp $ */ d514 1 d643 1 @ 1.109 log @implement compat_linux mremap. @ text @d1 1 a1 1 /* $NetBSD$ */ d325 1 a325 1 /* stat counters */ d350 1 a350 1 /* fault subcounters */ d370 1 a370 1 /* daemon counters */ @ 1.109.2.1 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.117 2006/09/01 20:39:05 cherry Exp $ */ d87 1 a90 1 typedef voff_t pgoff_t; /* XXX: number of pages within a uvm object */ d157 1 a157 1 (((MAXPROT) << 8)|(PROT)|(INH)|((ADVICE) << 12)|(FLAGS)) a170 1 #define UVM_KMF_EXEC 0x20 /* need executable mapping */ d236 1 d325 1 a325 1 /* stat counters. XXX: should be 64-bit counters */ d350 1 a350 1 /* fault subcounters. XXX: should be 64-bit counters */ d370 1 a370 1 /* daemon counters. XXX: should be 64-bit counters */ d483 1 a484 1 #include d489 1 a513 1 #define VMSPACE_IS_KERNEL_P(vm) VM_MAP_IS_KERNEL(&(vm)->vm_map) d577 3 a579 3 #define uvm_fault(m, a, p) uvm_fault_internal(m, a, p, 0) int uvm_fault_internal(struct vm_map *, vaddr_t, vm_prot_t, int); /* handle a page fault */ a641 1 void uvmspace_addref(struct vmspace *); @ 1.109.4.1 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.112 2006/03/15 18:09:25 drochner Exp $ */ d87 1 d325 1 a325 1 /* stat counters. XXX: should be 64-bit counters */ d350 1 a350 1 /* fault subcounters. XXX: should be 64-bit counters */ d370 1 a370 1 /* daemon counters. XXX: should be 64-bit counters */ d489 1 a513 1 #define VMSPACE_IS_KERNEL_P(vm) VM_MAP_IS_KERNEL(&(vm)->vm_map) d577 3 a579 3 #define uvm_fault(m, a, p) uvm_fault_internal(m, a, p, 0) int uvm_fault_internal(struct vm_map *, vaddr_t, vm_prot_t, int); /* handle a page fault */ a641 1 void uvmspace_addref(struct vmspace *); @ 1.109.4.2 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.109.4.1 2006/04/22 11:40:28 simonb Exp $ */ d156 1 a156 1 (((MAXPROT) << 8)|(PROT)|(INH)|((ADVICE) << 12)|(FLAGS)) d235 1 @ 1.108 log @make length of inactive queue tunable by sysctl. (vm.inactivepct) @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.107 2005/11/29 22:52:03 yamt Exp $ */ d659 6 @ 1.108.2.1 log @- add a function to add a reference to a vmspace. - add a macro to check if a vmspace belongs to kernel. @ text @d1 1 a1 1 /* $NetBSD$ */ a513 1 #define VMSPACE_IS_KERNEL(vm) VM_MAP_IS_KERNEL(&(vm)->vm_map) a641 1 void uvmspace_addref(struct vmspace *); @ 1.108.2.2 log @rename VMSPACE_IS_KERNEL to VMSPACE_IS_KERNEL_P. ("predicate") suggested by Matt Thomas. @ text @d514 1 a514 1 #define VMSPACE_IS_KERNEL_P(vm) VM_MAP_IS_KERNEL(&(vm)->vm_map) @ 1.108.2.3 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.108.2.2 2006/01/15 10:44:52 yamt Exp $ */ a660 6 /* uvm_mremap.c */ int uvm_mremap(struct vm_map *, vaddr_t, vsize_t, struct vm_map *, vaddr_t *, vsize_t, struct proc *, int); #define UVM_MREMAP_FIXED 1 @ 1.108.2.4 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.108.2.3 2006/02/01 14:52:48 yamt Exp $ */ d325 1 a325 1 /* stat counters. XXX: should be 64-bit counters */ d350 1 a350 1 /* fault subcounters. XXX: should be 64-bit counters */ d370 1 a370 1 /* daemon counters. XXX: should be 64-bit counters */ @ 1.107 log @merge yamt-readahead branch. @ text @d1 1 a1 1 /* $NetBSD$ */ d251 13 d314 2 a323 3 int unused1; /* used to be nanon */ int unused2; /* used to be nanonneeded */ int unused3; /* used to be nfreeanon */ d651 1 @ 1.106 log @remove one of duplicated forward decl. of vmspace. pointed by Dheeraj S. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.105 2005/09/01 02:16:46 yamt Exp $ */ d131 1 a131 1 /* advice: matches MADV_* from sys/mman.h */ d559 2 a560 1 void * ubc_alloc(struct uvm_object *, voff_t, vsize_t *, int); @ 1.106.6.1 log @comment. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.106 2005/09/01 02:21:12 yamt Exp $ */ d131 1 a131 1 /* advice: matches MADV_* from sys/mman.h and POSIX_FADV_* from sys/fcntl.h */ @ 1.106.6.2 log @- as read-ahead context is per-vnode now, there are less reasons to make VOP_READ call uvm_ra_request explicitly. move it to pager (uvn_get) so that it can handle accesses via mmap as well. - pass advice to pager via ubc. - tweak DPRINTF. XXX can be disturbed by PGO_LOCKED. XXX it's controversial where it should be done. (uvm_fault, uvn_get or genfs_getpages.) @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.106.6.1 2005/11/17 03:56:00 yamt Exp $ */ d559 1 a559 2 void * ubc_alloc(struct uvm_object *, voff_t, vsize_t *, int, int); @ 1.105 log @put back uvm_fault.h for now as it's needed for some ports. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.104 2005/08/27 16:11:32 yamt Exp $ */ a247 1 struct vmspace; @ 1.104 log @don't include uvm_fault.h unnecessarily. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.103 2005/06/10 05:10:13 matt Exp $ */ d478 1 @ 1.103 log @Rework the coredump code to have no explicit knownledge of how coredump i/o is done. Instead, pass an opaque cookie which is then passed to a new routine, coredump_write, which does the actual i/o. This allows the method of doing i/o to change without affecting any future MD code. Also, make netbsd32_core.c [re]use core_netbsd.c (in a similar manner that core_elf64.c uses core_elf32.c) and eliminate that code duplication. cpu_coredump{,32} is now called twice, first with a NULL iocookie to fill the core structure and a second to actually write md parts of the coredump. All i/o is nolonger random access and is suitable for shipping over a stream. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.102 2005/06/02 17:01:44 matt Exp $ */ a477 1 #include @ 1.103.2.1 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.103 2005/06/10 05:10:13 matt Exp $ */ d87 1 d131 1 a131 1 /* advice: matches MADV_* from sys/mman.h and POSIX_FADV_* from sys/fcntl.h */ d157 1 a157 1 (((MAXPROT) << 8)|(PROT)|(INH)|((ADVICE) << 12)|(FLAGS)) d236 1 d248 1 a251 13 * uvm_pctparam: parameter to be shown as percentage to user. */ #define UVM_PCTPARAM_SHIFT 8 #define UVM_PCTPARAM_SCALE (1 << UVM_PCTPARAM_SHIFT) #define UVM_PCTPARAM_APPLY(pct, x) \ (((x) * (pct)->pct_scaled) >> UVM_PCTPARAM_SHIFT) struct uvm_pctparam { int pct_pct; /* percent [0, 100] */ int pct_scaled; }; /* a301 2 struct uvm_pctparam inactivepct; /* length of inactive queue (pct of the whole queue) */ d310 3 d314 1 a314 1 /* stat counters. XXX: should be 64-bit counters */ d339 1 a339 1 /* fault subcounters. XXX: should be 64-bit counters */ d359 1 a359 1 /* daemon counters. XXX: should be 64-bit counters */ d478 1 a502 1 #define VMSPACE_IS_KERNEL_P(vm) VM_MAP_IS_KERNEL(&(vm)->vm_map) d560 1 a560 2 void * ubc_alloc(struct uvm_object *, voff_t, vsize_t *, int, int); d565 3 a567 3 #define uvm_fault(m, a, p) uvm_fault_internal(m, a, p, 0) int uvm_fault_internal(struct vm_map *, vaddr_t, vm_prot_t, int); /* handle a page fault */ a629 1 void uvmspace_addref(struct vmspace *); a638 1 void uvm_pctparam_set(struct uvm_pctparam *, int); a645 6 /* uvm_mremap.c */ int uvm_mremap(struct vm_map *, vaddr_t, vsize_t, struct vm_map *, vaddr_t *, vsize_t, struct proc *, int); #define UVM_MREMAP_FIXED 1 @ 1.103.2.2 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.103.2.1 2006/06/21 15:12:39 yamt Exp $ */ a89 1 typedef voff_t pgoff_t; /* XXX: number of pages within a uvm object */ a169 1 #define UVM_KMF_EXEC 0x20 /* need executable mapping */ d257 1 a257 1 int pct_pct; /* percent [0, 100] */ /* should be the first member */ a258 1 int (*pct_check)(struct uvm_pctparam *, int); d275 2 d298 1 d300 14 d406 1 a406 1 int64_t inactarg; /* unused */ d481 1 a482 1 #include d598 2 a599 2 int uvm_vslock(struct vmspace *, void *, size_t, vm_prot_t); void uvm_vsunlock(struct vmspace *, void *, size_t); a649 1 int uvm_pctparam_check(struct uvm_pctparam *, int); a650 5 int uvm_pctparam_get(struct uvm_pctparam *); void uvm_pctparam_init(struct uvm_pctparam *, int, int (*)(struct uvm_pctparam *, int)); int uvm_pctparam_createsysctlnode(struct uvm_pctparam *, const char *, const char *); a663 6 /* uvm_object.c */ int uobj_wirepages(struct uvm_object *uobj, off_t start, off_t end); void uobj_unwirepages(struct uvm_object *uobj, off_t start, off_t end); d686 1 a686 3 struct work; void uvm_aiodone_worker(struct work *, void *); void uvm_estimatepageable(int *, int *); a708 3 boolean_t uvn_text_p(struct uvm_object *); boolean_t uvn_clean_p(struct uvm_object *); boolean_t uvn_needs_writefault_p(struct uvm_object *); @ 1.103.2.3 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.103.2.2 2006/12/30 20:51:05 yamt Exp $ */ d207 1 a207 1 #define UBC_WANT_UNMAP(vp) false d569 1 a569 1 void uvm_proc_fork(struct proc *, struct proc *, bool); d579 1 a579 1 bool uvm_kernacc(caddr_t, size_t, int); a580 1 void uvm_kick_scheduler(void); d582 2 a583 2 bool uvm_uarea_alloc(vaddr_t *); void uvm_uarea_drain(bool); d601 1 a601 1 vaddr_t *, vsize_t, int, bool, d603 1 a603 1 vaddr_t uvm_km_alloc_poolpage(struct vm_map *, bool); d605 1 a605 1 vaddr_t uvm_km_alloc_poolpage_cache(struct vm_map *, bool); d615 1 a615 1 vaddr_t, bool, int); d617 1 a617 1 bool uvm_map_checkprot(struct vm_map *, vaddr_t, d620 1 a620 1 vaddr_t, vm_prot_t, bool); d709 3 a711 3 bool uvn_text_p(struct uvm_object *); bool uvn_clean_p(struct uvm_object *); bool uvn_needs_writefault_p(struct uvm_object *); @ 1.103.2.4 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.103.2.3 2007/02/26 09:12:28 yamt Exp $ */ d192 3 a194 3 #define UBC_READ 0x001 #define UBC_WRITE 0x002 #define UBC_FAULTBUSY 0x004 d199 1 a199 6 #define UBC_UNMAP 0x010 /* * flags for ubc_uiomve() */ #define UBC_PARTIALOK 0x100 d484 1 a484 1 void * vm_shm; /* SYS5 shared memory private data XXX */ d492 4 a495 4 void * vm_taddr; /* user virtual address of text XXX */ void * vm_daddr; /* user virtual address of data XXX */ void *vm_maxsaddr; /* user VA at max stack growth */ void *vm_minsaddr; /* user VA at top of stack */ a558 2 int ubc_uiomove(struct uvm_object *, struct uio *, vsize_t, int, int); d567 1 a567 1 void uvm_chgkprot(void *, size_t, int); d579 1 a579 1 bool uvm_kernacc(void *, size_t, int); a583 1 void uvm_uarea_free(vaddr_t uaddr); a586 3 void uvm_lwp_hold(struct lwp *); void uvm_lwp_rele(struct lwp *); void uvm_cpu_attach(struct cpu_info *); d655 1 a704 1 void uvm_vnp_setwritesize(struct vnode *, voff_t); d706 1 @ 1.103.2.5 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.103.2.4 2007/09/03 14:47:05 yamt Exp $ */ d591 1 a591 1 void uvm_uarea_free(vaddr_t, struct cpu_info *); @ 1.103.2.6 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.103.2.5 2007/11/15 11:45:38 yamt Exp $ */ d207 9 d297 3 a299 3 unsigned anonpages; /* number of pages used by anon mappings */ unsigned filepages; /* number of pages used by cached file data */ unsigned execpages; /* number of pages used by cached exec data */ a480 9 * helpers for calling ubc_release() */ #ifdef PMAP_CACHE_VIVT #define UBC_WANT_UNMAP(vp) (((vp)->v_iflag & VI_TEXT) != 0) #else #define UBC_WANT_UNMAP(vp) false #endif /* d716 1 @ 1.103.2.7 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.103.2.6 2007/12/07 17:35:26 yamt Exp $ */ a500 1 size_t vm_aslr_delta_mmap; /* mmap() random delta for ASLR */ d587 1 a587 1 __dead void uvm_scheduler(void); a642 1 void uvm_whatis(uintptr_t, void (*)(const char *, ...)); a696 2 void uvm_pageout_start(int); void uvm_pageout_done(int); @ 1.103.2.8 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.103.2.7 2008/01/21 09:48:20 yamt Exp $ */ d593 1 @ 1.103.2.9 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.103.2.8 2008/02/04 09:25:09 yamt Exp $ */ a692 2 void uvm_aio_aiodone_pages(struct vm_page **, int, bool, int); @ 1.102 log @When writing coredumps, don't write zero uninstantiated demand-zero pages. Also, with ELF core dumps, trim trailing zeroes from sections. These two changes can shrink coredumps by over 50% in size. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.101 2005/05/15 08:01:06 yamt Exp $ */ d577 2 a578 3 struct vnode *, struct ucred *, int (*)(struct proc *, struct vnode *, struct ucred *, @ 1.101 log @remove anon related statistics which are no longer used. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.100 2005/04/01 11:59:38 yamt Exp $ */ d514 2 a515 1 vaddr_t end; /* end of region */ a520 1 #define UVM_COREDUMP_NODUMP 0x02 /* don't actually dump this region */ @ 1.100 log @merge yamt-km branch. - don't use managed mappings/backing objects for wired memory allocations. save some resources like pv_entry. also fix (most of) PR/27030. - simplify kernel memory management API. - simplify pmap bootstrap of some ports. - some related cleanups. @ text @d1 1 a1 1 /* $NetBSD$ */ d310 3 a312 3 int nanon; /* number total of anon's in system */ int nanonneeded;/* number of anons currently needed */ int nfreeanon; /* number of free anon's */ d404 3 a406 3 int64_t nanon; int64_t nanonneeded; int64_t nfreeanon; @ 1.99 log @Fix some things regarding COMPAT_NETBSD32 and limits/VM addresses. * For sparc64 and amd64, define *SIZ32 VM constants. * Add a new function pointer to struct emul, pointing at a function that will return the default VM map address. The default function is uvm_map_defaultaddr, which just uses the VM_DEFAULT_ADDRESS macro. This gives emulations control over the default map address, and allows things to be mapped at the right address (in 32bit range) for COMPAT_NETBSD32. * Add code to adjust the data and stack limits when a COMPAT_NETBSD32 or COMPAT_SVR4_32 binary is executed. * Don't use USRSTACK in kern_resource.c, use p_vmspace->vm_minsaddr instead (emulations might have set it differently) * Since this changes struct emul, bump kernel version to 3.99.2 Tested on amd64, compile-tested on sparc64. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.98 2005/01/13 11:50:32 yamt Exp $ */ d148 1 d163 1 a163 1 * the following defines are for uvm_km_kmemalloc's flags d165 6 a170 2 #define UVM_KMF_VALLOC 0x1 /* allocate VA only */ #define UVM_KMF_CANFAIL 0x2 /* caller handles failure */ d173 1 a534 4 /* zalloc zeros memory, alloc does not */ #define uvm_km_zalloc(MAP,SIZE) uvm_km_alloc1(MAP,SIZE,TRUE) #define uvm_km_alloc(MAP,SIZE) uvm_km_alloc1(MAP,SIZE,FALSE) d600 5 a604 7 vaddr_t uvm_km_alloc1(struct vm_map *, vsize_t, boolean_t); void uvm_km_free(struct vm_map *, vaddr_t, vsize_t); #define uvm_km_free_wakeup(map, start, size) uvm_km_free((map), (start), (size)) vaddr_t uvm_km_kmemalloc1(struct vm_map *, struct uvm_object *, vsize_t, vsize_t, voff_t, int); vaddr_t uvm_km_kmemalloc(struct vm_map *, struct uvm_object *, vsize_t, int); d608 3 a610 13 vaddr_t uvm_km_valloc1(struct vm_map *, vsize_t, vsize_t, voff_t, uvm_flag_t); vaddr_t uvm_km_valloc(struct vm_map *, vsize_t); vaddr_t uvm_km_valloc_align(struct vm_map *, vsize_t, vsize_t); vaddr_t uvm_km_valloc_wait(struct vm_map *, vsize_t); vaddr_t uvm_km_valloc_prefer_wait(struct vm_map *, vsize_t, voff_t); vaddr_t uvm_km_alloc_poolpage1(struct vm_map *, struct uvm_object *, boolean_t); void uvm_km_free_poolpage1(struct vm_map *, vaddr_t); vaddr_t uvm_km_alloc_poolpage_cache(struct vm_map *, struct uvm_object *, boolean_t); a614 35 extern __inline__ vaddr_t uvm_km_kmemalloc(struct vm_map *map, struct uvm_object *obj, vsize_t sz, int flags) { return uvm_km_kmemalloc1(map, obj, sz, 0, UVM_UNKNOWN_OFFSET, flags); } extern __inline__ vaddr_t uvm_km_valloc(struct vm_map *map, vsize_t sz) { return uvm_km_valloc1(map, sz, 0, UVM_UNKNOWN_OFFSET, UVM_KMF_NOWAIT); } extern __inline__ vaddr_t uvm_km_valloc_align(struct vm_map *map, vsize_t sz, vsize_t align) { return uvm_km_valloc1(map, sz, align, UVM_UNKNOWN_OFFSET, UVM_KMF_NOWAIT); } extern __inline__ vaddr_t uvm_km_valloc_prefer_wait(struct vm_map *map, vsize_t sz, voff_t prefer) { return uvm_km_valloc1(map, sz, 0, prefer, 0); } extern __inline__ vaddr_t uvm_km_valloc_wait(struct vm_map *map, vsize_t sz) { return uvm_km_valloc1(map, sz, 0, UVM_UNKNOWN_OFFSET, 0); } #define uvm_km_alloc_poolpage(waitok) \ uvm_km_alloc_poolpage1(kmem_map, NULL, (waitok)) #define uvm_km_free_poolpage(addr) \ uvm_km_free_poolpage1(kmem_map, (addr)) @ 1.98 log @in uvm_unmap_remove, always wakeup va waiters if any. uvm_km_free_wakeup is now a synonym of uvm_km_free. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.97 2005/01/09 16:42:44 chs Exp $ */ d690 1 @ 1.98.8.1 log @Pull up following revision(s) (requested by fvdl in ticket #798): sys/compat/sunos/sunos_exec.c: revision 1.47 sys/compat/pecoff/pecoff_emul.c: revision 1.11 sys/arch/sparc64/sparc64/netbsd32_machdep.c: revision 1.45 sys/arch/amd64/amd64/netbsd32_machdep.c: revision 1.12 sys/sys/proc.h: revision 1.198 sys/compat/mach/mach_exec.c: revision 1.56 sys/compat/freebsd/freebsd_exec.c: revision 1.27 sys/arch/sparc64/include/vmparam.h: revision 1.27 sys/kern/kern_resource.c: revision 1.91 sys/compat/netbsd32/netbsd32_netbsd.c: revision 1.88 sys/compat/osf1/osf1_exec.c: revision 1.39 sys/compat/svr4_32/svr4_32_resource.c: revision 1.5 sys/compat/ultrix/ultrix_misc.c: revision 1.99 sys/compat/svr4_32/svr4_32_exec.h: revision 1.9 sys/kern/exec_elf32.c: revision 1.103 sys/compat/aoutm68k/aoutm68k_exec.c: revision 1.19 sys/compat/sunos32/sunos32_exec.c: revision 1.20 sys/compat/hpux/hpux_exec.c: revision 1.46 sys/compat/darwin/darwin_exec.c: revision 1.40 sys/kern/sysv_shm.c: revision 1.83 sys/uvm/uvm_extern.h: revision 1.99 sys/uvm/uvm_mmap.c: revision 1.89 sys/kern/kern_exec.c: revision 1.195 sys/compat/netbsd32/netbsd32.h: revision 1.31 sys/arch/sparc64/sparc64/svr4_32_machdep.c: revision 1.20 sys/compat/svr4/svr4_exec.c: revision 1.56 sys/compat/irix/irix_exec.c: revision 1.41 sys/compat/ibcs2/ibcs2_exec.c: revision 1.63 sys/compat/svr4_32/svr4_32_exec.c: revision 1.16 sys/arch/amd64/include/vmparam.h: revision 1.8 sys/compat/linux/common/linux_exec.c: revision 1.73 Fix some things regarding COMPAT_NETBSD32 and limits/VM addresses. * For sparc64 and amd64, define *SIZ32 VM constants. * Add a new function pointer to struct emul, pointing at a function that will return the default VM map address. The default function is uvm_map_defaultaddr, which just uses the VM_DEFAULT_ADDRESS macro. This gives emulations control over the default map address, and allows things to be mapped at the right address (in 32bit range) for COMPAT_NETBSD32. * Add code to adjust the data and stack limits when a COMPAT_NETBSD32 or COMPAT_SVR4_32 binary is executed. * Don't use USRSTACK in kern_resource.c, use p_vmspace->vm_minsaddr instead (emulations might have set it differently) * Since this changes struct emul, bump kernel version to 3.99.2 Tested on amd64, compile-tested on sparc64. @ text @d1 1 a1 1 /* $NetBSD$ */ a689 1 vaddr_t uvm_default_mapaddr(struct proc *, vaddr_t, vsize_t); @ 1.98.2.1 log @sync with -current @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.100 2005/04/01 11:59:38 yamt Exp $ */ a147 1 #define UVM_FLAG_VAONLY 0x2000000 /* unmap: no pages are mapped */ d162 1 a162 1 * the following defines are for uvm_km_alloc/free's flags d164 2 a165 6 #define UVM_KMF_WIRED 0x1 /* allocation type: wired */ #define UVM_KMF_PAGEABLE 0x2 /* allocation type: pageable */ #define UVM_KMF_VAONLY 0x4 /* allocation type: VA only */ #define UVM_KMF_TYPEMASK (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE | UVM_KMF_WIRED) #define UVM_KMF_CANFAIL 0x8 /* caller handles failure */ #define UVM_KMF_ZERO 0x10 /* want zero filled memory */ a167 1 #define UVM_KMF_WAITVA UVM_FLAG_WAITVA /* sleep for va */ d529 4 d598 7 a604 5 vaddr_t uvm_km_alloc(struct vm_map *, vsize_t, vsize_t, uvm_flag_t); void uvm_km_free(struct vm_map *, vaddr_t, vsize_t, uvm_flag_t); d608 13 a620 3 vaddr_t uvm_km_alloc_poolpage(struct vm_map *, boolean_t); void uvm_km_free_poolpage(struct vm_map *, vaddr_t); vaddr_t uvm_km_alloc_poolpage_cache(struct vm_map *, boolean_t); d625 35 a689 1 vaddr_t uvm_default_mapaddr(struct proc *, vaddr_t, vsize_t); @ 1.98.4.1 log @remove some compatibility functions. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.98 2005/01/13 11:50:32 yamt Exp $ */ d603 2 d610 6 d625 2 a626 3 static __inline vaddr_t uvm_km_kmemalloc(struct vm_map *map, struct uvm_object *obj, vsize_t sz, int flags) a627 1 d631 1 a631 1 static __inline vaddr_t a633 1 d637 1 a637 1 static __inline vaddr_t d640 1 a640 3 return uvm_km_valloc1(map, sz, align, UVM_UNKNOWN_OFFSET, UVM_KMF_NOWAIT); d643 1 a643 1 static __inline vaddr_t a645 1 d649 1 a649 1 static __inline vaddr_t a651 1 @ 1.98.4.2 log @- don't use uvm_object or managed mappings for wired allocations. (eg. malloc(9)) - simplify uvm_km_* apis. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.98.4.1 2005/01/25 12:55:32 yamt Exp $ */ a147 1 #define UVM_FLAG_VAONLY 0x2000000 /* unmap: no pages are mapped */ d162 1 a162 1 * the following defines are for uvm_km_alloc/free's flags d164 2 a165 6 #define UVM_KMF_WIRED 0x1 /* allocation type: wired */ #define UVM_KMF_PAGEABLE 0x2 /* allocation type: pageable */ #define UVM_KMF_VAONLY 0x4 /* allocation type: VA only */ #define UVM_KMF_TYPEMASK (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE | UVM_KMF_WIRED) #define UVM_KMF_CANFAIL 0x8 /* caller handles failure */ #define UVM_KMF_ZERO 0x10 /* want zero filled memory */ a167 1 #define UVM_KMF_WAITVA UVM_FLAG_WAITVA /* sleep for va */ d529 4 d598 5 a602 5 vaddr_t uvm_km_alloc(struct vm_map *, vsize_t, vsize_t, uvm_flag_t); void uvm_km_free(struct vm_map *, vaddr_t, vsize_t, uvm_flag_t); d606 7 a612 3 vaddr_t uvm_km_alloc_poolpage(struct vm_map *, boolean_t); void uvm_km_free_poolpage(struct vm_map *, vaddr_t); vaddr_t uvm_km_alloc_poolpage_cache(struct vm_map *, boolean_t); d617 42 @ 1.98.4.3 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD$ */ a644 1 vaddr_t uvm_default_mapaddr(struct proc *, vaddr_t, vsize_t); @ 1.97 log @adjust the UBC mapping code to support non-vnode uvm_objects. this means we can no longer look at the vnode size to determine how many pages to request in a fault, which is good since for NFS the size can change out from under us on the server anyway. there's also a new flag UBC_UNMAP for ubc_release(), so that the file system code can make the decision about whether to cache mappings for files being used as executables. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.96 2005/01/01 21:08:02 yamt Exp $ */ d138 1 a138 1 /* mapping flags */ d147 1 d600 1 a600 1 void uvm_km_free_wakeup(struct vm_map *, vaddr_t, vsize_t); @ 1.96 log @in the case of !PMAP_MAP_POOLPAGE, gather pool backend allocations to large chunks for kernel_map and kmem_map to ease kva fragmentation. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.95 2005/01/01 21:02:13 yamt Exp $ */ d182 1 a182 1 * the following defines are for ubc_alloc's flags d189 14 d223 1 d225 1 d241 2 a242 2 extern struct pool *uvm_aiobuf_pool; d456 1 d499 2 a531 2 #endif /* _KERNEL */ a533 11 struct buf; struct loadavg; struct proc; struct pmap; struct vmspace; struct vmtotal; struct mount; struct vnode; struct core; #ifdef _KERNEL @ 1.95 log @introduce vm_map_kernel, a subclass of vm_map, and move some kernel-only members of vm_map to it. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.94 2005/01/01 21:00:06 yamt Exp $ */ d612 5 @ 1.94 log @for in-kernel maps, - allocate kva for vm_map_entry from the map itsself and remove the static limit, MAX_KMAPENT. - keep merged entries for later splitting to fix allocate-to-free problem. PR/24039. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.93 2004/08/28 22:12:40 thorpej Exp $ */ d600 1 a600 1 struct vm_map *); @ 1.93 log @Garbage-collect pagemove(); nothing use it anymore (YAY!!!) @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.92 2004/05/04 21:33:40 pk Exp $ */ d146 1 @ 1.92 log @Since a `vmspace' always includes a `vm_map' we can re-use vm_map's reference count lock to also protect the vmspace's reference count. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.91 2004/03/24 07:55:01 junyoung Exp $ */ a533 1 void pagemove(caddr_t, caddr_t, size_t); @ 1.91 log @Nuke __P(). @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.90 2004/03/14 16:47:23 jdolecek Exp $ */ d463 2 a464 1 int vm_refcnt; /* number of references */ @ 1.90 log @fix typo in comment @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.89 2004/02/13 13:47:16 yamt Exp $ */ d531 3 a533 3 void vmapbuf __P((struct buf *, vsize_t)); void vunmapbuf __P((struct buf *, vsize_t)); void pagemove __P((caddr_t, caddr_t, size_t)); d535 1 a535 1 void cpu_swapin __P((struct lwp *)); d538 1 a538 1 void cpu_swapout __P((struct lwp *)); d542 5 a546 5 struct uvm_object *uao_create __P((vsize_t, int)); void uao_detach __P((struct uvm_object *)); void uao_detach_locked __P((struct uvm_object *)); void uao_reference __P((struct uvm_object *)); void uao_reference_locked __P((struct uvm_object *)); d549 4 a552 5 void ubc_init __P((void)); void * ubc_alloc __P((struct uvm_object *, voff_t, vsize_t *, int)); void ubc_release __P((void *, int)); void ubc_flush __P((struct uvm_object *, voff_t, voff_t)); d555 2 a556 2 int uvm_fault __P((struct vm_map *, vaddr_t, vm_fault_t, vm_prot_t)); d561 1 a561 1 void uvm_chgkprot __P((caddr_t, size_t, int)); d563 4 a566 4 void uvm_proc_fork __P((struct proc *, struct proc *, boolean_t)); void uvm_lwp_fork __P((struct lwp *, struct lwp *, void *, size_t, void (*)(void *), void *)); int uvm_coredump_walkmap __P((struct proc *, d570 7 a576 7 struct uvm_coredump_state *), void *)); void uvm_proc_exit __P((struct proc *)); void uvm_lwp_exit __P((struct lwp *)); void uvm_init_limits __P((struct proc *)); boolean_t uvm_kernacc __P((caddr_t, size_t, int)); __dead void uvm_scheduler __P((void)) __attribute__((noreturn)); void uvm_swapin __P((struct lwp *)); d579 2 a580 3 int uvm_vslock __P((struct proc *, caddr_t, size_t, vm_prot_t)); void uvm_vsunlock __P((struct proc *, caddr_t, size_t)); d584 1 a584 1 void uvm_init __P((void)); d587 1 a587 1 int uvm_io __P((struct vm_map *, struct uio *)); d590 8 a597 10 vaddr_t uvm_km_alloc1 __P((struct vm_map *, vsize_t, boolean_t)); void uvm_km_free __P((struct vm_map *, vaddr_t, vsize_t)); void uvm_km_free_wakeup __P((struct vm_map *, vaddr_t, vsize_t)); vaddr_t uvm_km_kmemalloc1 __P((struct vm_map *, struct uvm_object *, vsize_t, vsize_t, voff_t, int)); vaddr_t uvm_km_kmemalloc __P((struct vm_map *, struct uvm_object *, vsize_t, int)); struct vm_map *uvm_km_suballoc __P((struct vm_map *, vaddr_t *, d599 12 a610 12 struct vm_map *)); vaddr_t uvm_km_valloc1 __P((struct vm_map *, vsize_t, vsize_t, voff_t, uvm_flag_t)); vaddr_t uvm_km_valloc __P((struct vm_map *, vsize_t)); vaddr_t uvm_km_valloc_align __P((struct vm_map *, vsize_t, vsize_t)); vaddr_t uvm_km_valloc_wait __P((struct vm_map *, vsize_t)); vaddr_t uvm_km_valloc_prefer_wait __P((struct vm_map *, vsize_t, voff_t)); vaddr_t uvm_km_alloc_poolpage1 __P((struct vm_map *, struct uvm_object *, boolean_t)); void uvm_km_free_poolpage1 __P((struct vm_map *, vaddr_t)); d648 18 a665 19 int uvm_map __P((struct vm_map *, vaddr_t *, vsize_t, struct uvm_object *, voff_t, vsize_t, uvm_flag_t)); int uvm_map_pageable __P((struct vm_map *, vaddr_t, vaddr_t, boolean_t, int)); int uvm_map_pageable_all __P((struct vm_map *, int, vsize_t)); boolean_t uvm_map_checkprot __P((struct vm_map *, vaddr_t, vaddr_t, vm_prot_t)); int uvm_map_protect __P((struct vm_map *, vaddr_t, vaddr_t, vm_prot_t, boolean_t)); struct vmspace *uvmspace_alloc __P((vaddr_t, vaddr_t)); void uvmspace_init __P((struct vmspace *, struct pmap *, vaddr_t, vaddr_t)); void uvmspace_exec __P((struct lwp *, vaddr_t, vaddr_t)); struct vmspace *uvmspace_fork __P((struct vmspace *)); void uvmspace_free __P((struct vmspace *)); void uvmspace_share __P((struct proc *, struct proc *)); void uvmspace_unshare __P((struct lwp *)); d669 3 a671 3 void uvm_meter __P((void)); int uvm_sysctl __P((int *, u_int, void *, size_t *, void *, size_t, struct proc *)); d674 3 a676 3 int uvm_mmap __P((struct vm_map *, vaddr_t *, vsize_t, vm_prot_t, vm_prot_t, int, void *, voff_t, vsize_t)); d679 2 a680 2 struct vm_page *uvm_pagealloc_strat __P((struct uvm_object *, voff_t, struct vm_anon *, int, int, int)); d684 4 a687 4 void uvm_pagereplace __P((struct vm_page *, struct vm_page *)); void uvm_pagerealloc __P((struct vm_page *, struct uvm_object *, voff_t)); d689 3 a691 3 void uvm_page_physload __P((paddr_t, paddr_t, paddr_t, paddr_t, int)); void uvm_setpagesize __P((void)); d694 3 a696 3 void uvm_aio_biodone1 __P((struct buf *)); void uvm_aio_biodone __P((struct buf *)); void uvm_aio_aiodone __P((struct buf *)); d699 2 a700 2 void uvm_pageout __P((void *)); void uvm_aiodone_daemon __P((void *)); d703 3 a705 3 int uvm_pglistalloc __P((psize_t, paddr_t, paddr_t, paddr_t, paddr_t, struct pglist *, int, int)); void uvm_pglistfree __P((struct pglist *)); d708 1 a708 1 void uvm_swap_init __P((void)); d711 1 a711 1 int uvm_grow __P((struct proc *, vaddr_t)); d714 1 a714 1 void uvm_deallocate __P((struct vm_map *, vaddr_t, vsize_t)); d717 6 a722 6 void uvm_vnp_setsize __P((struct vnode *, voff_t)); void uvm_vnp_sync __P((struct mount *)); struct uvm_object *uvn_attach __P((void *, vm_prot_t)); int uvn_findpages __P((struct uvm_object *, voff_t, int *, struct vm_page **, int)); void uvm_vnp_zerorange __P((struct vnode *, off_t, size_t)); d725 2 a726 2 void kmeminit_nkmempages __P((void)); void kmeminit __P((void)); @ 1.89 log @when breaking a loan from uobj, insert the replacement page into the same position as the original page on the object memq so that genfs_putpages (and lfs) won't be confused. noted by Stephan Uphoff (PR/24328) @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.88 2004/01/04 11:33:32 jdolecek Exp $ */ d259 1 a259 1 int execpages; /* number of pages used by cached exec date */ @ 1.88 log @Rearrange process exit path to avoid need to free resources from different process context ('reaper'). From within the exiting process context: * deactivate pmap and free vmspace while we can still block * introduce MD cpu_lwp_free() - this cleans all MD-specific context (such as FPU state), and is the last potentially blocking operation; all of cpu_wait(), and most of cpu_exit(), is now folded into cpu_lwp_free() * process is now immediatelly marked as zombie and made available for pickup by parent; the remaining last lwp continues the exit as fully detached * MI (rather than MD) code bumps uvmexp.swtch, cpu_exit() is now same for both 'process' and 'lwp' exit uvm_lwp_exit() is modified to never block; the u-area memory is now always just linked to the list of available u-areas. Introduce (blocking) uvm_uarea_drain(), which is called to release the excessive u-area memory; this is called by parent within wait4(), or by pagedaemon on memory shortage. uvm_uarea_free() is now private function within uvm_glue.c. MD process/lwp exit code now always calls lwp_exit2() immediatelly after switching away from the exiting lwp. g/c now unneeded routines and variables, including the reaper kernel thread @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.87 2003/12/18 15:02:04 pk Exp $ */ d689 2 @ 1.87 log @* Introduce uvm_km_kmemalloc1() which allows alignment and preferred offset to be passed to uvm_map(). * Turn all uvm_km_valloc*() macros back into (inlined) functions to retain binary compatibility with any 3rd party modules. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.86 2003/12/18 08:15:42 pk Exp $ */ d579 1 a579 1 void uvm_uarea_free(vaddr_t); @ 1.86 log @Condense all existing variants of uvm_km_valloc into a single function: uvm_km_valloc1(), and use it to express all of uvm_km_valloc() uvm_km_valloc_wait() uvm_km_valloc_prefer() uvm_km_valloc_prefer_wait() uvm_km_valloc_align() in terms of it by macro expansion. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.85 2003/11/13 03:09:30 chs Exp $ */ d597 2 d605 1 a605 1 vsize_t, voff_t, uvm_flag_t)); d616 29 a644 8 #define uvm_km_valloc(map, size) \ uvm_km_valloc1(map, size, 0, UVM_UNKNOWN_OFFSET, UVM_KMF_NOWAIT) #define uvm_km_valloc_align(map, size, align) \ uvm_km_valloc1(map, size, align, UVM_UNKNOWN_OFFSET, UVM_KMF_NOWAIT) #define uvm_km_valloc_prefer_wait(map, size, prefer) \ uvm_km_valloc1(map, size, 0, prefer, 0) #define uvm_km_valloc_wait(map, size) \ uvm_km_valloc1(map, size, 0, UVM_UNKNOWN_OFFSET, 0) @ 1.85 log @eliminate uvm_useracc() in favor of checking the return value of copyin() or copyout(). uvm_useracc() tells us whether the mapping permissions allow access to the desired part of an address space, and many callers assume that this is the same as knowing whether an attempt to access that part of the address space will succeed. however, access to user space can fail for reasons other than insufficient permission, most notably that paging in any non-resident data can fail due to i/o errors. most of the callers of uvm_useracc() make the above incorrect assumption. the rest are all misguided optimizations, which optimize for the case where an operation will fail. we'd rather optimize for operations succeeding, in which case we should just attempt the access and handle failures due to insufficient permissions the same way we handle i/o errors. since there appear to be no good uses of uvm_useracc(), we'll just remove it. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.84 2003/08/11 16:33:30 pk Exp $ */ d602 2 d614 9 @ 1.84 log @Introduce uvm_swapisfull(), which computes the available swap space by taking into account swap devices that are in the process of being removed. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.83 2003/08/07 16:34:48 agc Exp $ */ a579 1 boolean_t uvm_useracc __P((caddr_t, size_t, int)); @ 1.83 log @Move UCB-licensed code from 4-clause to 3-clause licence. Patches provided by Joel Baker in PR 22364, verified by myself. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.82 2003/06/29 22:32:49 fvdl Exp $ */ d282 1 @ 1.82 log @Back out the lwp/ktrace changes. They contained a lot of colateral damage, and need to be examined and discussed more. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.80 2003/05/10 21:10:23 thorpej Exp $ */ d49 1 a49 5 * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors @ 1.82.2.1 log @Apply the aborted ktrace-lwp changes to a specific branch. This is just for others to review, I'm concerned that patch fuziness may have resulted in some errant code being generated but I'll look at that later by comparing the diff from the base to the branch with the file I attempt to apply to it. This will, at the very least, put the changes in a better context for others to review them and attempt to tinker with removing passing of 'struct lwp' through the kernel. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.82 2003/06/29 22:32:49 fvdl Exp $ */ d570 1 a570 1 int uvm_coredump_walkmap __P((struct lwp *, d572 1 a572 1 int (*)(struct lwp *, struct vnode *, d646 1 a646 1 void *, size_t, struct lwp *)); @ 1.82.2.2 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.82.2.1 2003/07/02 15:27:29 darrenr Exp $ */ d49 5 a53 1 * 3. Neither the name of the University nor the names of its contributors d263 1 a263 1 int execpages; /* number of pages used by cached exec data */ a285 1 int swpgavail; /* number of swap pages currently available */ d466 1 a466 2 int vm_refcnt; /* number of references * * note: protected by vm_map.ref_lock */ d534 3 a536 3 void vmapbuf(struct buf *, vsize_t); void vunmapbuf(struct buf *, vsize_t); void pagemove(caddr_t, caddr_t, size_t); d538 1 a538 1 void cpu_swapin(struct lwp *); d541 1 a541 1 void cpu_swapout(struct lwp *); d545 5 a549 5 struct uvm_object *uao_create(vsize_t, int); void uao_detach(struct uvm_object *); void uao_detach_locked(struct uvm_object *); void uao_reference(struct uvm_object *); void uao_reference_locked(struct uvm_object *); d552 5 a556 4 void ubc_init(void); void * ubc_alloc(struct uvm_object *, voff_t, vsize_t *, int); void ubc_release(void *, int); void ubc_flush(struct uvm_object *, voff_t, voff_t); d559 2 a560 2 int uvm_fault(struct vm_map *, vaddr_t, vm_fault_t, vm_prot_t); d565 1 a565 1 void uvm_chgkprot(caddr_t, size_t, int); d567 4 a570 4 void uvm_proc_fork(struct proc *, struct proc *, boolean_t); void uvm_lwp_fork(struct lwp *, struct lwp *, void *, size_t, void (*)(void *), void *); int uvm_coredump_walkmap(struct lwp *, d574 7 a580 7 struct uvm_coredump_state *), void *); void uvm_proc_exit(struct proc *); void uvm_lwp_exit(struct lwp *); void uvm_init_limits(struct proc *); boolean_t uvm_kernacc(caddr_t, size_t, int); __dead void uvm_scheduler(void) __attribute__((noreturn)); void uvm_swapin(struct lwp *); d582 5 a586 3 void uvm_uarea_drain(boolean_t); int uvm_vslock(struct proc *, caddr_t, size_t, vm_prot_t); void uvm_vsunlock(struct proc *, caddr_t, size_t); d590 1 a590 1 void uvm_init(void); d593 1 a593 1 int uvm_io(struct vm_map *, struct uio *); d596 8 a603 8 vaddr_t uvm_km_alloc1(struct vm_map *, vsize_t, boolean_t); void uvm_km_free(struct vm_map *, vaddr_t, vsize_t); void uvm_km_free_wakeup(struct vm_map *, vaddr_t, vsize_t); vaddr_t uvm_km_kmemalloc1(struct vm_map *, struct uvm_object *, vsize_t, vsize_t, voff_t, int); vaddr_t uvm_km_kmemalloc(struct vm_map *, struct uvm_object *, vsize_t, int); struct vm_map *uvm_km_suballoc(struct vm_map *, vaddr_t *, d605 10 a614 42 struct vm_map *); vaddr_t uvm_km_valloc1(struct vm_map *, vsize_t, vsize_t, voff_t, uvm_flag_t); vaddr_t uvm_km_valloc(struct vm_map *, vsize_t); vaddr_t uvm_km_valloc_align(struct vm_map *, vsize_t, vsize_t); vaddr_t uvm_km_valloc_wait(struct vm_map *, vsize_t); vaddr_t uvm_km_valloc_prefer_wait(struct vm_map *, vsize_t, voff_t); vaddr_t uvm_km_alloc_poolpage1(struct vm_map *, struct uvm_object *, boolean_t); void uvm_km_free_poolpage1(struct vm_map *, vaddr_t); extern __inline__ vaddr_t uvm_km_kmemalloc(struct vm_map *map, struct uvm_object *obj, vsize_t sz, int flags) { return uvm_km_kmemalloc1(map, obj, sz, 0, UVM_UNKNOWN_OFFSET, flags); } extern __inline__ vaddr_t uvm_km_valloc(struct vm_map *map, vsize_t sz) { return uvm_km_valloc1(map, sz, 0, UVM_UNKNOWN_OFFSET, UVM_KMF_NOWAIT); } extern __inline__ vaddr_t uvm_km_valloc_align(struct vm_map *map, vsize_t sz, vsize_t align) { return uvm_km_valloc1(map, sz, align, UVM_UNKNOWN_OFFSET, UVM_KMF_NOWAIT); } extern __inline__ vaddr_t uvm_km_valloc_prefer_wait(struct vm_map *map, vsize_t sz, voff_t prefer) { return uvm_km_valloc1(map, sz, 0, prefer, 0); } extern __inline__ vaddr_t uvm_km_valloc_wait(struct vm_map *map, vsize_t sz) { return uvm_km_valloc1(map, sz, 0, UVM_UNKNOWN_OFFSET, 0); } d622 19 a640 18 int uvm_map(struct vm_map *, vaddr_t *, vsize_t, struct uvm_object *, voff_t, vsize_t, uvm_flag_t); int uvm_map_pageable(struct vm_map *, vaddr_t, vaddr_t, boolean_t, int); int uvm_map_pageable_all(struct vm_map *, int, vsize_t); boolean_t uvm_map_checkprot(struct vm_map *, vaddr_t, vaddr_t, vm_prot_t); int uvm_map_protect(struct vm_map *, vaddr_t, vaddr_t, vm_prot_t, boolean_t); struct vmspace *uvmspace_alloc(vaddr_t, vaddr_t); void uvmspace_init(struct vmspace *, struct pmap *, vaddr_t, vaddr_t); void uvmspace_exec(struct lwp *, vaddr_t, vaddr_t); struct vmspace *uvmspace_fork(struct vmspace *); void uvmspace_free(struct vmspace *); void uvmspace_share(struct proc *, struct proc *); void uvmspace_unshare(struct lwp *); d644 3 a646 3 void uvm_meter(void); int uvm_sysctl(int *, u_int, void *, size_t *, void *, size_t, struct lwp *); d649 3 a651 3 int uvm_mmap(struct vm_map *, vaddr_t *, vsize_t, vm_prot_t, vm_prot_t, int, void *, voff_t, vsize_t); d654 2 a655 2 struct vm_page *uvm_pagealloc_strat(struct uvm_object *, voff_t, struct vm_anon *, int, int, int); d659 2 a660 4 void uvm_pagereplace(struct vm_page *, struct vm_page *); void uvm_pagerealloc(struct vm_page *, struct uvm_object *, voff_t); d662 3 a664 3 void uvm_page_physload(paddr_t, paddr_t, paddr_t, paddr_t, int); void uvm_setpagesize(void); d667 3 a669 3 void uvm_aio_biodone1(struct buf *); void uvm_aio_biodone(struct buf *); void uvm_aio_aiodone(struct buf *); d672 2 a673 2 void uvm_pageout(void *); void uvm_aiodone_daemon(void *); d676 3 a678 3 int uvm_pglistalloc(psize_t, paddr_t, paddr_t, paddr_t, paddr_t, struct pglist *, int, int); void uvm_pglistfree(struct pglist *); d681 1 a681 1 void uvm_swap_init(void); d684 1 a684 1 int uvm_grow(struct proc *, vaddr_t); d687 1 a687 1 void uvm_deallocate(struct vm_map *, vaddr_t, vsize_t); d690 6 a695 6 void uvm_vnp_setsize(struct vnode *, voff_t); void uvm_vnp_sync(struct mount *); struct uvm_object *uvn_attach(void *, vm_prot_t); int uvn_findpages(struct uvm_object *, voff_t, int *, struct vm_page **, int); void uvm_vnp_zerorange(struct vnode *, off_t, size_t); d698 2 a699 2 void kmeminit_nkmempages(void); void kmeminit(void); @ 1.82.2.3 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.82.2.2 2004/08/03 10:57:04 skrll Exp $ */ d534 1 @ 1.82.2.4 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.93 2004/08/28 22:12:40 thorpej Exp $ */ d566 1 a566 1 int uvm_coredump_walkmap(struct proc *, d568 1 a568 1 int (*)(struct proc *, struct vnode *, d671 1 a671 1 void *, size_t, struct proc *); @ 1.82.2.5 log @Fix the sync with head I botched. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.82.2.3 2004/09/03 12:45:55 skrll Exp $ */ d566 1 a566 1 int uvm_coredump_walkmap(struct lwp *, d568 1 a568 1 int (*)(struct lwp *, struct vnode *, d671 1 a671 1 void *, size_t, struct lwp *); @ 1.82.2.6 log @Reduce diff to HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.82.2.5 2004/09/21 13:39:24 skrll Exp $ */ d671 1 a671 1 void *, size_t, struct proc *); @ 1.82.2.7 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.82.2.6 2004/10/31 07:12:40 skrll Exp $ */ d138 1 a138 1 /* bits 0xffff0000: mapping flags */ a145 2 #define UVM_FLAG_QUANTUM 0x800000 /* entry never be splitted later */ #define UVM_FLAG_WAITVA 0x1000000 /* wait for va */ d181 1 a181 1 * flags for ubc_alloc() a187 14 * flags for ubc_release() */ #define UBC_UNMAP 0x01 /* * helpers for calling ubc_release() */ #ifdef PMAP_CACHE_VIVT #define UBC_WANT_UNMAP(vp) (((vp)->v_flag & VTEXT) != 0) #else #define UBC_WANT_UNMAP(vp) FALSE #endif /* a207 1 struct buf; a208 1 struct loadavg; d224 2 a225 2 struct vmspace; struct vmtotal; a438 1 /* we need this before including uvm_page.h on some platforms */ a480 2 extern struct pool *uvm_aiobuf_pool; d512 2 d516 11 d592 1 a592 1 #define uvm_km_free_wakeup(map, start, size) uvm_km_free((map), (start), (size)) d599 1 a599 1 struct vm_map_kernel *); a610 5 vaddr_t uvm_km_alloc_poolpage_cache(struct vm_map *, struct uvm_object *, boolean_t); void uvm_km_free_poolpage_cache(struct vm_map *, vaddr_t); void uvm_km_vacache_init(struct vm_map *, const char *, size_t); @ 1.82.2.8 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.82.2.7 2005/01/17 19:33:11 skrll Exp $ */ a147 1 #define UVM_FLAG_VAONLY 0x2000000 /* unmap: no pages are mapped */ d162 1 a162 1 * the following defines are for uvm_km_alloc/free's flags d164 2 a165 6 #define UVM_KMF_WIRED 0x1 /* allocation type: wired */ #define UVM_KMF_PAGEABLE 0x2 /* allocation type: pageable */ #define UVM_KMF_VAONLY 0x4 /* allocation type: VA only */ #define UVM_KMF_TYPEMASK (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE | UVM_KMF_WIRED) #define UVM_KMF_CANFAIL 0x8 /* caller handles failure */ #define UVM_KMF_ZERO 0x10 /* want zero filled memory */ a167 1 #define UVM_KMF_WAITVA UVM_FLAG_WAITVA /* sleep for va */ d529 4 d598 7 a604 5 vaddr_t uvm_km_alloc(struct vm_map *, vsize_t, vsize_t, uvm_flag_t); void uvm_km_free(struct vm_map *, vaddr_t, vsize_t, uvm_flag_t); d608 13 a620 3 vaddr_t uvm_km_alloc_poolpage(struct vm_map *, boolean_t); void uvm_km_free_poolpage(struct vm_map *, vaddr_t); vaddr_t uvm_km_alloc_poolpage_cache(struct vm_map *, boolean_t); d625 35 a689 1 vaddr_t uvm_default_mapaddr(struct proc *, vaddr_t, vsize_t); @ 1.82.2.9 log @Sync with HEAD. Here we go again... @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.82.2.8 2005/04/01 14:32:12 skrll Exp $ */ d248 1 d310 3 a312 3 int unused1; /* used to be nanon */ int unused2; /* used to be nanonneeded */ int unused3; /* used to be nfreeanon */ d404 3 a406 3 int64_t unused1; /* used to be nanon */ int64_t unused2; /* used to be nanonneeded */ int64_t unused3; /* used to be nfreeanon */ d514 1 a514 2 vaddr_t realend; /* real end of region */ vaddr_t end; /* virtual end of region */ d520 1 d576 4 a579 3 int uvm_coredump_walkmap(struct proc *, void *, int (*)(struct proc *, void *, @ 1.82.2.10 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.107 2005/11/29 22:52:03 yamt Exp $ */ d131 1 a131 1 /* advice: matches MADV_* from sys/mman.h and POSIX_FADV_* from sys/fcntl.h */ d559 1 a559 2 void * ubc_alloc(struct uvm_object *, voff_t, vsize_t *, int, int); @ 1.81 log @Pass lwp pointers throughtout the kernel, as required, so that the lwpid can be inserted into ktrace records. The general change has been to replace "struct proc *" with "struct lwp *" in various function prototypes, pass the lwp through and use l_proc to get the process pointer when needed. Bump the kernel rev up to 1.6V @ text @d570 1 a570 1 int uvm_coredump_walkmap __P((struct lwp *, d572 1 a572 1 int (*)(struct lwp *, struct vnode *, d646 1 a646 1 void *, size_t, struct lwp *)); @ 1.80 log @Back out the following chagne: http://mail-index.netbsd.org/source-changes/2003/05/08/0068.html There were some side-effects that I didn't anticipate, and fixing them is proving to be more difficult than I thought, do just eject for now. Maybe one day we can look at this again. Fixes PR kern/21517. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.78 2003/05/03 19:01:06 wiz Exp $ */ d570 1 a570 1 int uvm_coredump_walkmap __P((struct proc *, d572 1 a572 1 int (*)(struct proc *, struct vnode *, d646 1 a646 1 void *, size_t, struct proc *)); @ 1.79 log @Simplify the way the bounds of the managed kernel virtual address space is advertised to UVM by making virtual_avail and virtual_end first-class exported variables by UVM. Machine-dependent code is responsible for initializing them before main() is called. Anything that steals KVA must adjust these variables accordingly. This reduces the number of instances of this info from 3 to 1, and simplifies the pmap(9) interface by removing the pmap_virtual_space() function call, and removing two arguments from pmap_steal_memory(). This also eliminates some kludges such as having to burn kernel_map entries on space used by the kernel and stolen KVA. This also eliminates use of VM_{MIN,MAX}_KERNEL_ADDRESS from MI code, this giving MD code greater flexibility over the bounds of the managed kernel virtual address space if a given port's specific platforms can vary in this regard (this is especially true of the evb* ports). @ text @a506 10 * these variables define the boundaries of the managed kernel virtual * address space. they are initialized by machine-dependent code during * bootstrap. note that before kernel virtual memory is initialized, * some address space might be "stolen" during bootstrap. anything that * steals address space must update these variables accordingly. */ extern vaddr_t virtual_avail; extern vaddr_t virtual_end; /* @ 1.78 log @Misc fixes from jmc@@openbsd. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.77 2003/02/01 06:23:54 thorpej Exp $ */ d505 10 @ 1.77 log @Add extensible malloc types, adapted from FreeBSD. This turns malloc types into a structure, a pointer to which is passed around, instead of an int constant. Allow the limit to be adjusted when the malloc type is defined, or with a function call, as suggested by Jonathan Stone. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.76 2003/01/18 09:42:58 thorpej Exp $ */ d343 1 a343 1 int pdscans; /* number of pages daemon scaned since boot */ @ 1.76 log @Merge the nathanw_sa branch. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.75 2002/12/11 07:10:20 thorpej Exp $ */ d529 3 @ 1.75 log @Define a UVM_FLAG_NOWAIT, which indicates that we're not allowed to sleep. Define UVM_KMF_NOWAIT in terms of UVM_FLAG_NOWAIT. From Manuel Bouyer. Fixes a problem where any mapping with read protection was created in a "nowait" context, causing spurious failures. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.74 2002/11/17 08:32:45 chs Exp $ */ d535 1 a535 1 void cpu_swapin __P((struct proc *)); d538 1 a538 1 void cpu_swapout __P((struct proc *)); d564 3 d572 2 a573 3 void uvm_fork __P((struct proc *, struct proc *, boolean_t, void *, size_t, void (*)(void *), void *)); void uvm_exit __P((struct proc *)); d577 1 a577 1 void uvm_swapin __P((struct proc *)); d633 1 a633 1 void uvmspace_exec __P((struct proc *, vaddr_t, vaddr_t)); d637 1 a637 1 void uvmspace_unshare __P((struct proc *)); @ 1.74 log @change uvm_uarea_alloc() to indicate whether the returned uarea is already backed by physical pages (ie. because it reused a previously-freed one), so that we can skip a bunch of useless work in that case. this fixes the underlying problem behind PR 18543, and also speeds up fork() quite a bit (eg. 7% on my pc, 1% on my ultra2) when we get a cache hit. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.73 2002/09/22 07:20:31 chs Exp $ */ d149 1 d166 2 a167 3 #define UVM_KMF_NOWAIT 0x1 /* matches M_NOWAIT */ #define UVM_KMF_VALLOC 0x2 /* allocate VA only */ #define UVM_KMF_CANFAIL 0x4 /* caller handles failure */ d169 1 @ 1.73 log @encapsulate knowledge of uarea allocation in some new functions. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.72 2002/09/15 16:54:29 chs Exp $ */ d575 1 a575 1 vaddr_t uvm_uarea_alloc(void); @ 1.72 log @add a new km flag UVM_KMF_CANFAIL, which causes uvm_km_kmemalloc() to return failure if swap is full and there are no free physical pages. have malloc() use this flag if M_CANFAIL is passed to it. use M_CANFAIL to allow amap_extend() to fail when memory is scarce. this should prevent most of the remaining hangs in low-memory situations. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.71 2002/05/17 22:00:50 enami Exp $ */ d575 2 @ 1.71 log @Make uvn_findpages to return number of pages found so that caller can easily check if all requested pages are found or not. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.70 2001/12/10 01:52:26 thorpej Exp $ */ d167 1 @ 1.71.2.1 log @Pull up revision 1.72 (requested by skrll): add a new km flag UVM_KMF_CANFAIL, which causes uvm_km_kmemalloc() to return failure if swap is full and there are no free physical pages. have malloc() use this flag if M_CANFAIL is passed to it. use M_CANFAIL to allow amap_extend() to fail when memory is scarce. this should prevent most of the remaining hangs in low-memory situations. @ text @d1 1 a1 1 /* $NetBSD$ */ a166 1 #define UVM_KMF_CANFAIL 0x4 /* caller handles failure */ @ 1.70 log @Move the code that walks the process's VM map during a coredump into uvm_coredump_walkmap(), and use callbacks into the coredump routine to do something with each section. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.69 2001/12/09 03:07:19 chs Exp $ */ d684 1 a684 1 void uvn_findpages __P((struct uvm_object *, voff_t, @ 1.70.8.1 log @Catch up with -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.71 2002/05/17 22:00:50 enami Exp $ */ d684 1 a684 1 int uvn_findpages __P((struct uvm_object *, voff_t, @ 1.69 log @add {anon,file,exec}max as a upper bound on the amount of memory that will be allocated for the respective usage types when there is contention for memory. replace "vnode" and "vtext" with "file" and "exec" in uvmexp field names and sysctl names. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.68 2001/12/08 00:35:33 thorpej Exp $ */ d482 14 d562 5 @ 1.68 log @Make the coredump routine exec-format/emulation specific. Split out traditional NetBSD coredump routines into core_netbsd.c and netbsd32_core.c (for COMPAT_NETBSD32). @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.67 2001/09/15 20:36:45 chs Exp $ */ d259 3 a261 3 int anonpages; /* number of pages used by anon pagers */ int vnodepages; /* number of pages used by vnode page cache */ int vtextpages; /* number of pages used by vtext vnodes */ d269 2 a270 2 int vtextmin; /* min threshold for vtext pages */ int vnodemin; /* min threshold for vnode pages */ d272 8 a279 2 int vtextminpct;/* min percent vtext pages */ int vnodeminpct;/* min percent vnode pages */ d349 3 a351 3 int pdreanon; /* anon pages reactivated due to min threshold */ int pdrevnode; /* vnode pages reactivated due to min threshold */ int pdrevtext; /* vtext pages reactivated due to min threshold */ d432 2 a433 2 int64_t vnodepages; int64_t vtextpages; @ 1.67 log @a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.66 2001/08/16 01:37:50 chs Exp $ */ a649 2 int uvm_coredump __P((struct proc *, struct vnode *, struct ucred *, struct core *)); a650 4 /* should only be needed if COMPAT_NETBSD32 is defined */ struct core32; int uvm_coredump32 __P((struct proc *, struct vnode *, struct ucred *, struct core32 *)); @ 1.66 log @user maps are always pageable. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.65 2001/06/02 18:09:26 chs Exp $ */ d159 1 a159 1 /* magic offset value */ a160 1 /* offset not known(obj) or don't care(!obj) */ d185 3 a187 2 #define UBC_READ 0 #define UBC_WRITE 1 d192 7 a198 5 #define UFP_ALL 0x0 #define UFP_NOWAIT 0x1 #define UFP_NOALLOC 0x2 #define UFP_NOCACHE 0x4 #define UFP_NORDONLY 0x8 a247 3 int ncolors; /* number of page color buckets: must be p-o-2 */ int colormask; /* color bucket mask */ d253 3 a345 4 /* kernel memory objects: managed by uvm_km_kmemalloc() only! */ struct uvm_object *kmem_object; struct uvm_object *mb_object; a495 1 /* XXX clean up later */ d530 1 a530 1 void ubc_release __P((void *, vsize_t)); a556 1 /* init the uvm system */ d582 3 a584 3 #define uvm_km_alloc_poolpage(waitok) \ uvm_km_alloc_poolpage1(kmem_map, uvmexp.kmem_object, (waitok)) #define uvm_km_free_poolpage(addr) \ d628 2 a629 2 void uvm_page_physload __P((paddr_t, paddr_t, paddr_t, paddr_t, int)); d642 2 a643 3 int uvm_pglistalloc __P((psize_t, paddr_t, paddr_t, paddr_t, paddr_t, struct pglist *, int, int)); a667 1 void uvm_vnp_asyncget __P((struct vnode *, off_t, size_t)); @ 1.66.2.1 log @Catch up with -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.67 2001/09/15 20:36:45 chs Exp $ */ d159 1 a159 1 /* magic offset value: offset not known(obj) or don't care(!obj) */ d161 1 d186 2 a187 3 #define UBC_READ 0x01 #define UBC_WRITE 0x02 #define UBC_FAULTBUSY 0x04 d192 5 a196 7 #define UFP_ALL 0x00 #define UFP_NOWAIT 0x01 #define UFP_NOALLOC 0x02 #define UFP_NOCACHE 0x04 #define UFP_NORDONLY 0x08 #define UFP_DIRTYONLY 0x10 #define UFP_BACKWARD 0x20 d246 3 a253 3 int ncolors; /* number of page color buckets: must be p-o-2 */ int colormask; /* color bucket mask */ d344 4 d498 1 d533 1 a533 1 void ubc_release __P((void *, int)); d560 1 d586 3 a588 3 #define uvm_km_alloc_poolpage(waitok) \ uvm_km_alloc_poolpage1(kmem_map, NULL, (waitok)) #define uvm_km_free_poolpage(addr) \ d632 2 a633 2 void uvm_page_physload __P((paddr_t, paddr_t, paddr_t, paddr_t, int)); d646 3 a648 2 int uvm_pglistalloc __P((psize_t, paddr_t, paddr_t, paddr_t, paddr_t, struct pglist *, int, int)); d673 1 @ 1.65 log @replace vm_map{,_entry}_t with struct vm_map{,_entry} *. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.64 2001/05/26 21:27:21 chs Exp $ */ d603 1 a603 2 struct vmspace *uvmspace_alloc __P((vaddr_t, vaddr_t, boolean_t)); d605 1 a605 1 vaddr_t, vaddr_t, boolean_t)); @ 1.65.2.1 log @Merge Aug 24 -current into the kqueue branch. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.66 2001/08/16 01:37:50 chs Exp $ */ d603 2 a604 1 struct vmspace *uvmspace_alloc __P((vaddr_t, vaddr_t)); d606 1 a606 1 vaddr_t, vaddr_t)); @ 1.65.2.2 log @Sync kqueue branch with -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.65.2.1 2001/08/25 06:17:20 thorpej Exp $ */ d159 1 a159 1 /* magic offset value: offset not known(obj) or don't care(!obj) */ d161 1 d186 2 a187 3 #define UBC_READ 0x01 #define UBC_WRITE 0x02 #define UBC_FAULTBUSY 0x04 d192 5 a196 7 #define UFP_ALL 0x00 #define UFP_NOWAIT 0x01 #define UFP_NOALLOC 0x02 #define UFP_NOCACHE 0x04 #define UFP_NORDONLY 0x08 #define UFP_DIRTYONLY 0x10 #define UFP_BACKWARD 0x20 d246 3 a253 3 int ncolors; /* number of page color buckets: must be p-o-2 */ int colormask; /* color bucket mask */ d257 3 a259 3 int anonpages; /* number of pages used by anon mappings */ int filepages; /* number of pages used by cached file data */ int execpages; /* number of pages used by cached exec date */ d267 2 a268 2 int execmin; /* min threshold for executable pages */ int filemin; /* min threshold for file pages */ d270 2 a271 8 int execminpct; /* min percent executable pages */ int fileminpct; /* min percent file pages */ int anonmax; /* max threshold for anon pages */ int execmax; /* max threshold for executable pages */ int filemax; /* max threshold for file pages */ int anonmaxpct; /* max percent anon pages */ int execmaxpct; /* max percent executable pages */ int filemaxpct; /* max percent file pages */ d341 7 a347 3 int pdreanon; /* anon pages reactivated due to thresholds */ int pdrefile; /* file pages reactivated due to thresholds */ int pdreexec; /* executable pages reactivated due to thresholds */ d428 2 a429 2 int64_t filepages; int64_t execpages; a477 14 * used to keep state while iterating over the map for a core dump. */ struct uvm_coredump_state { void *cookie; /* opaque for the caller */ vaddr_t start; /* start of region */ vaddr_t end; /* end of region */ vm_prot_t prot; /* protection of region */ int flags; /* flags; see below */ }; #define UVM_COREDUMP_STACK 0x01 /* region is user stack */ #define UVM_COREDUMP_NODUMP 0x02 /* don't actually dump this region */ /* d498 1 d533 1 a533 1 void ubc_release __P((void *, int)); a544 5 int uvm_coredump_walkmap __P((struct proc *, struct vnode *, struct ucred *, int (*)(struct proc *, struct vnode *, struct ucred *, struct uvm_coredump_state *), void *)); d560 1 d586 3 a588 3 #define uvm_km_alloc_poolpage(waitok) \ uvm_km_alloc_poolpage1(kmem_map, NULL, (waitok)) #define uvm_km_free_poolpage(addr) \ d632 2 a633 2 void uvm_page_physload __P((paddr_t, paddr_t, paddr_t, paddr_t, int)); d646 3 a648 2 int uvm_pglistalloc __P((psize_t, paddr_t, paddr_t, paddr_t, paddr_t, struct pglist *, int, int)); d655 2 d658 4 d673 1 @ 1.65.2.3 log @catch up with -current on kqueue branch @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.65.2.2 2002/01/10 20:05:32 thorpej Exp $ */ d684 1 a684 1 int uvn_findpages __P((struct uvm_object *, voff_t, @ 1.65.2.4 log @sync kqueue with -current; this includes merge of gehenna-devsw branch, merge of i386 MP branch, and part of autoconf rototil work @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.65.2.3 2002/06/23 17:52:16 jdolecek Exp $ */ a166 1 #define UVM_KMF_CANFAIL 0x4 /* caller handles failure */ a573 2 vaddr_t uvm_uarea_alloc(void); void uvm_uarea_free(vaddr_t); @ 1.64 log @replace vm_page_t with struct vm_page *. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.63 2001/05/25 04:06:12 chs Exp $ */ d90 1 a90 1 typedef unsigned int uvm_flag_t; a95 11 union vm_map_object; typedef union vm_map_object vm_map_object_t; struct vm_map_entry; typedef struct vm_map_entry *vm_map_entry_t; struct vm_map; typedef struct vm_map *vm_map_t; struct vm_page; d221 3 d480 5 a484 5 extern vm_map_t exec_map; extern vm_map_t kernel_map; extern vm_map_t kmem_map; extern vm_map_t mb_map; extern vm_map_t phys_map; d537 1 a537 1 int uvm_fault __P((vm_map_t, vaddr_t, vm_fault_t, d563 1 a563 1 int uvm_io __P((vm_map_t, struct uio *)); d566 24 a589 21 vaddr_t uvm_km_alloc1 __P((vm_map_t, vsize_t, boolean_t)); void uvm_km_free __P((vm_map_t, vaddr_t, vsize_t)); void uvm_km_free_wakeup __P((vm_map_t, vaddr_t, vsize_t)); vaddr_t uvm_km_kmemalloc __P((vm_map_t, struct uvm_object *, vsize_t, int)); struct vm_map *uvm_km_suballoc __P((vm_map_t, vaddr_t *, vaddr_t *, vsize_t, int, boolean_t, vm_map_t)); vaddr_t uvm_km_valloc __P((vm_map_t, vsize_t)); vaddr_t uvm_km_valloc_align __P((vm_map_t, vsize_t, vsize_t)); vaddr_t uvm_km_valloc_wait __P((vm_map_t, vsize_t)); vaddr_t uvm_km_valloc_prefer_wait __P((vm_map_t, vsize_t, voff_t)); vaddr_t uvm_km_alloc_poolpage1 __P((vm_map_t, struct uvm_object *, boolean_t)); void uvm_km_free_poolpage1 __P((vm_map_t, vaddr_t)); #define uvm_km_alloc_poolpage(waitok) uvm_km_alloc_poolpage1(kmem_map, \ uvmexp.kmem_object, (waitok)) #define uvm_km_free_poolpage(addr) uvm_km_free_poolpage1(kmem_map, (addr)) d592 1 a592 1 int uvm_map __P((vm_map_t, vaddr_t *, vsize_t, d595 1 a595 1 int uvm_map_pageable __P((vm_map_t, vaddr_t, d597 3 a599 2 int uvm_map_pageable_all __P((vm_map_t, int, vsize_t)); boolean_t uvm_map_checkprot __P((vm_map_t, vaddr_t, d601 1 a601 1 int uvm_map_protect __P((vm_map_t, vaddr_t, d620 1 a620 1 int uvm_mmap __P((vm_map_t, vaddr_t *, vsize_t, d665 1 a665 1 void uvm_deallocate __P((vm_map_t, vaddr_t, vsize_t)); @ 1.63 log @remove trailing whitespace. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.62 2001/05/02 01:22:19 thorpej Exp $ */ a105 1 typedef struct vm_page *vm_page_t; @ 1.62 log @Support dynamic sizing of the page color bins. We also support dynamically re-coloring pages; as machine-dependent code discovers the size of the system's caches, it may call uvm_page_recolor() with the new number of colors to use. If the new mumber of colors is smaller (or equal to) the current number of colors, then uvm_page_recolor() is a no-op. The system defaults to one bucket if machine-dependent code does not initialize uvmexp.ncolors before uvm_page_init() is called. Note that the number of color bins should be initialized to something reasonable as early as possible -- for many early memory allocations, we live with the consequences of the page choice for the lifetime of the boot. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.61 2001/05/01 19:36:56 thorpej Exp $ */ d258 1 a258 1 /* d568 1 a568 1 void uvm_init __P((void)); d601 1 a601 1 int uvm_map_pageable __P((vm_map_t, vaddr_t, d606 1 a606 1 int uvm_map_protect __P((vm_map_t, vaddr_t, d621 1 a621 1 int uvm_sysctl __P((int *, u_int, void *, size_t *, d626 1 a626 1 vm_prot_t, vm_prot_t, int, d635 1 a635 1 void uvm_pagerealloc __P((struct vm_page *, d654 1 a654 1 struct pglist *, int, int)); d661 1 a661 1 int uvm_coredump __P((struct proc *, struct vnode *, d666 1 a666 1 int uvm_coredump32 __P((struct proc *, struct vnode *, @ 1.61 log @Add the number of page colors to uvmexp. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.60 2001/04/29 04:23:21 thorpej Exp $ */ d255 3 a315 1 int ncolors; /* number of page color buckets */ @ 1.60 log @Implement page coloring, using a round-robin bucket selection algorithm (Solaris calls this "Bin Hopping"). This implementation currently relies on MD code to define a constant defining the number of buckets. This will change reasonably soon (MD code will be able to dynamically size the bucket array). @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.59 2001/04/25 18:09:52 thorpej Exp $ */ d313 1 d439 1 @ 1.59 log @pmap_resident_count() always exists. Besides, returning the value of vm_rssize is pointless -- it is never initialized to anything other than 0. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.58 2001/03/15 06:10:56 chs Exp $ */ d311 2 d436 2 @ 1.58 log @eliminate the KERN_* error codes in favor of the traditional E* codes. the mapping is: KERN_SUCCESS 0 KERN_INVALID_ADDRESS EFAULT KERN_PROTECTION_FAILURE EACCES KERN_NO_SPACE ENOMEM KERN_INVALID_ARGUMENT EINVAL KERN_FAILURE various, mostly turn into KASSERTs KERN_RESOURCE_SHORTAGE ENOMEM KERN_NOT_RECEIVER KERN_NO_ACCESS KERN_PAGES_LOCKED @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */ d465 1 a465 1 segsz_t vm_rssize; /* current resident set size in pages */ a496 1 #ifdef pmap_resident_count a497 3 #else #define vm_resident_count(vm) ((vm)->vm_rssize) #endif @ 1.57 log @add UBC memory-usage balancing. we track the number of pages in use for each of the basic types (anonymous data, executable image, cached files) and prevent the pagedaemon from reusing a given page if that would reduce the count of that type of page below a sysctl-setable minimum threshold. the thresholds are controlled via three new sysctl tunables: vm.anonmin, vm.vnodemin, and vm.vtextmin. these tunables are the percentages of pageable memory reserved for each usage, and we do not allow the sum of the minimums to be more than 95% so that there's always some memory that can be reused. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.56 2001/02/06 17:01:52 eeh Exp $ */ d623 1 a623 1 caddr_t, voff_t, vsize_t)); d666 1 a666 1 int uvm_deallocate __P((vm_map_t, vaddr_t, vsize_t)); @ 1.56 log @Specify a process' address space limits for uvmspace_exec(). @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.55 2000/11/30 11:04:43 simonb Exp $ */ d254 4 a257 2 /* XXX: Adding anything before this line will break binary * compatibility with top(1) on NetBSD 1.5. d259 1 d272 6 d345 3 @ 1.56.2.1 log @Initial commit of scheduler activations and lightweight process support. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.56 2001/02/06 17:01:52 eeh Exp $ */ d509 1 a509 1 void cpu_swapin __P((struct lwp *)); d512 1 a512 1 void cpu_swapout __P((struct lwp *)); d538 1 a538 2 void uvm_proc_fork __P((struct proc *, struct proc *, boolean_t)); void uvm_lwp_fork __P((struct lwp *, struct lwp *, d540 1 a540 2 void uvm_proc_exit __P((struct proc *)); void uvm_lwp_exit __P((struct lwp *)); d544 1 a544 1 void uvm_swapin __P((struct lwp *)); d596 1 a596 1 void uvmspace_exec __P((struct lwp *, vaddr_t, vaddr_t)); d600 1 a600 1 void uvmspace_unshare __P((struct lwp *)); @ 1.56.2.2 log @Catch up with -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.58 2001/03/15 06:10:56 chs Exp $ */ d254 2 a255 4 /* * Adding anything before this line will break binary compatibility * with top(1) on NetBSD 1.5. a256 1 a268 6 int anonmin; /* min threshold for anon pages */ int vtextmin; /* min threshold for vtext pages */ int vnodemin; /* min threshold for vnode pages */ int anonminpct; /* min percent anon pages */ int vtextminpct;/* min percent vtext pages */ int vnodeminpct;/* min percent vnode pages */ a335 3 int pdreanon; /* anon pages reactivated due to min threshold */ int pdrevnode; /* vnode pages reactivated due to min threshold */ int pdrevtext; /* vtext pages reactivated due to min threshold */ d613 1 a613 1 void *, voff_t, vsize_t)); d656 1 a656 1 void uvm_deallocate __P((vm_map_t, vaddr_t, vsize_t)); @ 1.56.2.3 log @Catch up to -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.56.2.2 2001/04/09 01:59:12 nathanw Exp $ */ d90 1 a90 1 typedef unsigned int uvm_flag_t; d96 12 a232 3 struct vm_map_entry; struct vm_map; struct vm_page; d255 1 a255 4 int ncolors; /* number of page color buckets: must be p-o-2 */ int colormask; /* color bucket mask */ /* a310 2 int colorhit; /* pagealloc where we got optimal color */ int colormiss; /* pagealloc where we didn't */ a433 3 int64_t colorhit; int64_t colormiss; int64_t ncolors; d465 1 a465 1 segsz_t vm_rssize; /* current resident set size in pages */ d481 5 a485 5 extern struct vm_map *exec_map; extern struct vm_map *kernel_map; extern struct vm_map *kmem_map; extern struct vm_map *mb_map; extern struct vm_map *phys_map; d497 1 d499 3 d542 1 a542 1 int uvm_fault __P((struct vm_map *, vaddr_t, vm_fault_t, d566 1 a566 1 void uvm_init __P((void)); d570 1 a570 1 int uvm_io __P((struct vm_map *, struct uio *)); d573 21 a593 24 vaddr_t uvm_km_alloc1 __P((struct vm_map *, vsize_t, boolean_t)); void uvm_km_free __P((struct vm_map *, vaddr_t, vsize_t)); void uvm_km_free_wakeup __P((struct vm_map *, vaddr_t, vsize_t)); vaddr_t uvm_km_kmemalloc __P((struct vm_map *, struct uvm_object *, vsize_t, int)); struct vm_map *uvm_km_suballoc __P((struct vm_map *, vaddr_t *, vaddr_t *, vsize_t, int, boolean_t, struct vm_map *)); vaddr_t uvm_km_valloc __P((struct vm_map *, vsize_t)); vaddr_t uvm_km_valloc_align __P((struct vm_map *, vsize_t, vsize_t)); vaddr_t uvm_km_valloc_wait __P((struct vm_map *, vsize_t)); vaddr_t uvm_km_valloc_prefer_wait __P((struct vm_map *, vsize_t, voff_t)); vaddr_t uvm_km_alloc_poolpage1 __P((struct vm_map *, struct uvm_object *, boolean_t)); void uvm_km_free_poolpage1 __P((struct vm_map *, vaddr_t)); #define uvm_km_alloc_poolpage(waitok) \ uvm_km_alloc_poolpage1(kmem_map, uvmexp.kmem_object, (waitok)) #define uvm_km_free_poolpage(addr) \ uvm_km_free_poolpage1(kmem_map, (addr)) d596 1 a596 1 int uvm_map __P((struct vm_map *, vaddr_t *, vsize_t, d599 1 a599 1 int uvm_map_pageable __P((struct vm_map *, vaddr_t, d601 2 a602 3 int uvm_map_pageable_all __P((struct vm_map *, int, vsize_t)); boolean_t uvm_map_checkprot __P((struct vm_map *, vaddr_t, d604 1 a604 1 int uvm_map_protect __P((struct vm_map *, vaddr_t, d619 1 a619 1 int uvm_sysctl __P((int *, u_int, void *, size_t *, d623 2 a624 2 int uvm_mmap __P((struct vm_map *, vaddr_t *, vsize_t, vm_prot_t, vm_prot_t, int, d633 1 a633 1 void uvm_pagerealloc __P((struct vm_page *, d652 1 a652 1 struct pglist *, int, int)); d659 1 a659 1 int uvm_coredump __P((struct proc *, struct vnode *, d664 1 a664 1 int uvm_coredump32 __P((struct proc *, struct vnode *, d668 1 a668 1 void uvm_deallocate __P((struct vm_map *, vaddr_t, vsize_t)); @ 1.56.2.4 log @Catch up with -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.56.2.3 2001/06/21 20:10:26 nathanw Exp $ */ d605 2 a606 1 struct vmspace *uvmspace_alloc __P((vaddr_t, vaddr_t)); d608 1 a608 1 vaddr_t, vaddr_t)); @ 1.56.2.5 log @Catch up to -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.56.2.4 2001/08/24 00:13:35 nathanw Exp $ */ d159 1 a159 1 /* magic offset value: offset not known(obj) or don't care(!obj) */ d161 1 d186 2 a187 3 #define UBC_READ 0x01 #define UBC_WRITE 0x02 #define UBC_FAULTBUSY 0x04 d192 5 a196 7 #define UFP_ALL 0x00 #define UFP_NOWAIT 0x01 #define UFP_NOALLOC 0x02 #define UFP_NOCACHE 0x04 #define UFP_NORDONLY 0x08 #define UFP_DIRTYONLY 0x10 #define UFP_BACKWARD 0x20 d246 3 a253 3 int ncolors; /* number of page color buckets: must be p-o-2 */ int colormask; /* color bucket mask */ d344 4 d498 1 d533 1 a533 1 void ubc_release __P((void *, int)); d562 1 d588 3 a590 3 #define uvm_km_alloc_poolpage(waitok) \ uvm_km_alloc_poolpage1(kmem_map, NULL, (waitok)) #define uvm_km_free_poolpage(addr) \ d634 2 a635 2 void uvm_page_physload __P((paddr_t, paddr_t, paddr_t, paddr_t, int)); d648 3 a650 2 int uvm_pglistalloc __P((psize_t, paddr_t, paddr_t, paddr_t, paddr_t, struct pglist *, int, int)); d675 1 @ 1.56.2.6 log @Catch up to -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.56.2.5 2001/09/21 22:37:12 nathanw Exp $ */ d259 3 a261 3 int anonpages; /* number of pages used by anon mappings */ int filepages; /* number of pages used by cached file data */ int execpages; /* number of pages used by cached exec date */ d269 2 a270 2 int execmin; /* min threshold for executable pages */ int filemin; /* min threshold for file pages */ d272 2 a273 8 int execminpct; /* min percent executable pages */ int fileminpct; /* min percent file pages */ int anonmax; /* max threshold for anon pages */ int execmax; /* max threshold for executable pages */ int filemax; /* max threshold for file pages */ int anonmaxpct; /* max percent anon pages */ int execmaxpct; /* max percent executable pages */ int filemaxpct; /* max percent file pages */ d343 3 a345 3 int pdreanon; /* anon pages reactivated due to thresholds */ int pdrefile; /* file pages reactivated due to thresholds */ int pdreexec; /* executable pages reactivated due to thresholds */ d426 2 a427 2 int64_t filepages; int64_t execpages; a475 14 * used to keep state while iterating over the map for a core dump. */ struct uvm_coredump_state { void *cookie; /* opaque for the caller */ vaddr_t start; /* start of region */ vaddr_t end; /* end of region */ vm_prot_t prot; /* protection of region */ int flags; /* flags; see below */ }; #define UVM_COREDUMP_STACK 0x01 /* region is user stack */ #define UVM_COREDUMP_NODUMP 0x02 /* don't actually dump this region */ /* a544 5 int uvm_coredump_walkmap __P((struct proc *, struct vnode *, struct ucred *, int (*)(struct proc *, struct vnode *, struct ucred *, struct uvm_coredump_state *), void *)); d652 2 d655 4 @ 1.56.2.7 log @Catch up to -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.56.2.6 2002/01/08 00:35:00 nathanw Exp $ */ d686 1 a686 1 int uvn_findpages __P((struct uvm_object *, voff_t, @ 1.56.2.8 log @Catch up to -current. @ text @d1 1 a1 1 /* $NetBSD$ */ a166 1 #define UVM_KMF_CANFAIL 0x4 /* caller handles failure */ @ 1.56.2.9 log @Catch up to -current. @ text @a576 2 vaddr_t uvm_uarea_alloc(void); void uvm_uarea_free(vaddr_t); @ 1.56.2.10 log @Sync with HEAD. @ text @d577 1 a577 1 boolean_t uvm_uarea_alloc(vaddr_t *); @ 1.56.2.11 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.56.2.10 2002/12/11 06:51:53 thorpej Exp $ */ a148 1 #define UVM_FLAG_NOWAIT 0x400000 /* not allowed to sleep */ d165 3 a167 2 #define UVM_KMF_VALLOC 0x1 /* allocate VA only */ #define UVM_KMF_CANFAIL 0x2 /* caller handles failure */ a168 1 #define UVM_KMF_NOWAIT UVM_FLAG_NOWAIT /* not allowed to sleep */ @ 1.55 log @Move uvm_pgcnt_vnode and uvm_pgcnt_anon into uvmexp (as vnodepages and anonpages), and add vtextpages which is currently unused but will be used to trace the number of pages used by vtext vnodes. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.54 2000/11/29 09:52:18 simonb Exp $ */ d596 1 a596 1 void uvmspace_exec __P((struct proc *)); @ 1.54 log @Add a vm.uvmexp2 sysctl that uses a ABI-safe 'struct uvmexp_sysctl'. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.53 2000/11/27 08:40:03 chs Exp $ */ d257 1 a257 1 int zeropages; /* number of zero'd pages */ d259 4 a262 1 int reserve_kernel; /* number of pages reserved for kernel */ a272 1 int swpguniq; /* number of swap pages in use, not also in RAM */ d336 1 a336 1 a365 1 int64_t swpguniq; d419 3 @ 1.53 log @Initial integration of the Unified Buffer Cache project. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.52 2000/11/27 04:36:40 nisimura Exp $ */ d254 3 d338 80 @ 1.52 log @Introduce uvm_km_valloc_align() and use it to glab process's USPACE aligned on USPACE boundary in kernel virutal address. It's benefitial for MIPS R4000's paired TLB entry design. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.51 2000/09/28 19:05:06 eeh Exp $ */ d196 15 d231 1 d234 2 d435 7 d443 2 a444 2 int uvm_fault __P((vm_map_t, vaddr_t, vm_fault_t, vm_prot_t)); d539 5 d546 1 a571 3 void uvm_vnp_terminate __P((struct vnode *)); /* terminate a uvm/uvn object */ boolean_t uvm_vnp_uncache __P((struct vnode *)); d573 4 @ 1.51 log @Add support for variable end of user stacks needed to support COMPAT_NETBSD32: `struct vmspace' has a new field `vm_minsaddr' which is the user TOS. PS_STRINGS is deprecated in favor of curproc->p_pstr which is derived from `vm_minsaddr'. Bump the kernel version number. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.50 2000/09/21 17:46:04 thorpej Exp $ */ d457 1 @ 1.50 log @Make PMAP_PAGEIDLEZERO() return a boolean value. FALSE indidcates that the page being zero'd was not completed and that page zeroing should be aborted. This may be used by machine-dependent code doing slow page access to reduce the latency of running a process that has become runnable while in the middle of doing a slow page zero. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.49 2000/09/13 15:00:25 thorpej Exp $ */ d356 1 @ 1.49 log @Add an align argument to uvm_map() and some callers of that routine. Works similarly fto pmap_prefer(), but allows callers to specify a minimum power-of-two alignment of the region. How we ever got along without this for so long is beyond me. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.48 2000/08/12 22:41:55 thorpej Exp $ */ d277 2 @ 1.48 log @Don't bother with a trampoline to start the pagedaemon and reaper threads. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.47 2000/08/01 00:53:09 wiz Exp $ */ d467 2 a468 1 struct uvm_object *, voff_t, uvm_flag_t)); @ 1.47 log @Rename VM_INHERIT_* to MAP_INHERIT_* and move them to sys/sys/mman.h as discussed on tech-kern. Retire sys/uvm/uvm_inherit.h, update man page for minherit(2). @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.46 2000/07/24 20:10:51 jeffs Exp $ */ d510 1 a510 1 void uvm_pageout __P((void)); @ 1.46 log @Add uvm_km_valloc_prefer_wait(). Used to valloc with the passed in voff_t being passed to PMAP_PREFER(), which results in the propper virtual alignment of the allocated space. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.45 2000/06/27 16:16:43 mrg Exp $ */ a328 1 #include @ 1.45 log @move the contents of into . is simply an include of now. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.44 2000/06/27 09:00:14 mrg Exp $ */ d456 2 @ 1.44 log @more vm header file changes: merged into merged into has become this leaves just in NetBSD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.43 2000/06/26 14:21:17 mrg Exp $ */ d38 1 a38 1 * Copyright (c) 1992, 1993 d87 22 d318 2 d321 37 a357 1 extern struct uvmexp uvmexp; d378 5 a382 6 /* * typedefs */ typedef unsigned int uvm_flag_t; typedef int vm_fault_t; @ 1.43 log @remove/move more mach vm header files: -> -> -> into -> nothing -> into also includes a bunch of include removals (due to redudancy with ), and a scattering of other similar headers. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.42 2000/06/08 05:52:34 thorpej Exp $ */ d37 35 d325 11 d337 11 @ 1.42 log @Change UVM_UNLOCK_AND_WAIT() to use ltsleep() (it is now atomic, as advertised). Garbage-collect uvm_sleep(). @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.41 2000/05/28 05:49:06 thorpej Exp $ */ d263 9 @ 1.41 log @Rather than starting init and creating kthreads by forking and then doing a cpu_set_kpc(), just pass the entry point and argument all the way down the fork path starting with fork1(). In order to avoid special-casing the normal fork in every cpu_fork(), MI code passes down child_return() and the child process pointer explicitly. This fixes a race condition on multiprocessor systems; a CPU could grab the newly created processes (which has been placed on a run queue) before cpu_set_kpc() would be performed. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.40 2000/04/24 17:12:00 thorpej Exp $ */ a298 2 void uvm_sleep __P((void *, struct simplelock *, boolean_t, const char *, int)); @ 1.40 log @Changes necessary to implement pre-zero'ing of pages in the idle loop: - Make page free lists have two actual queues: known-zero pages and pages with unknown contents. - Implement uvm_pageidlezero(). This function attempts to zero up to the target number of pages until the target has been reached (currently target is `all free pages') or until whichqs becomes non-zero (indicating that a process is ready to run). - Define a new hook for the pmap module for pre-zero'ing pages. This is used to zero the pages using uncached access. This allows us to zero as many pages as we want without polluting the cache. In order to use this feature, each platform must add the appropropriate glue in their idle loop. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.39 2000/04/10 00:28:05 thorpej Exp $ */ d302 1 a302 1 void *, size_t)); @ 1.40.2.1 log @Sync w/ netbsd-1-5-base. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.42 2000/06/08 05:52:34 thorpej Exp $ */ d299 2 d302 1 a302 1 void *, size_t, void (*)(void *), void *)); @ 1.39 log @Add UVM_PGA_ZERO which instructs uvm_pagealloc{,_strat}() to return a zero'd, ! PG_CLEAN page, as if it were uvm_pagezero()'d. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.38 2000/03/26 20:54:46 kleink Exp $ */ d179 1 d216 4 @ 1.38 log @Merge parts of chs-ubc2 into the trunk: Add a new type voff_t (defined as a synonym for off_t) to describe offsets into uvm objects, and update the appropriate interfaces to use it, the most visible effect being the ability to mmap() file offsets beyond the range of a vaddr_t. Originally by Chuck Silvers; blame me for problems caused by merging this into non-UBC. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.37 2000/02/11 19:22:54 thorpej Exp $ */ d135 2 a136 1 #define UVM_PGA_USERESERVE 0x0001 @ 1.37 log @Add some very simple code to auto-size the kmem_map. We take the amount of physical memory, divide it by 4, and then allow machine dependent code to place upper and lower bounds on the size. Export the computed value to userspace via the new "vm.nkmempages" sysctl. NKMEMCLUSTERS is now deprecated and will generate an error if you attempt to use it. The new option, should you choose to use it, is called NKMEMPAGES, and two new options NKMEMPAGES_MIN and NKMEMPAGES_MAX allow the user to configure the bounds in the kernel config file. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.36 2000/01/11 06:57:49 chs Exp $ */ d115 1 a115 1 #define UVM_UNKNOWN_OFFSET ((vaddr_t) -1) d337 1 a337 1 struct uvm_object *, vaddr_t, uvm_flag_t)); d364 1 a364 1 caddr_t, vaddr_t, vsize_t)); d368 1 a368 1 vaddr_t, struct vm_anon *, int, int, int)); d373 1 a373 1 struct uvm_object *, vaddr_t)); d404 1 a404 1 void uvm_vnp_setsize __P((struct vnode *, u_quad_t)); @ 1.36 log @add support for ``swapctl -d'' (removing swap space). improve handling of i/o errors in swap space. reviewed by: Chuck Cranor @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.35 1999/12/30 16:09:47 eeh Exp $ */ d410 5 @ 1.35 log @I should have made uvm_page_physload() take paddr_t's instead of vaddr_t's. Also, add uvm_coredump32(). @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.34 1999/07/22 22:58:38 thorpej Exp $ */ d280 1 d282 1 @ 1.34 log @Garbage collect thread_sleep()/thread_wakeup() left over from the old Mach VM code. Also nuke iprintf(), which was no longer used anywhere. Add proclist locking where appropriate. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.33 1999/07/17 21:35:49 thorpej Exp $ */ d373 2 a374 2 void uvm_page_physload __P((vaddr_t, vaddr_t, vaddr_t, vaddr_t, int)); d393 4 @ 1.34.2.1 log @Update thorpej_scsipi to -current as of a month ago @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.51 2000/09/28 19:05:06 eeh Exp $ */ a36 35 /*- * Copyright (c) 1991, 1992, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @@(#)vm_extern.h 8.5 (Berkeley) 5/3/95 */ a51 22 * typedefs, necessary for standard UVM headers. */ typedef unsigned int uvm_flag_t; typedef int vm_fault_t; typedef int vm_inherit_t; /* XXX: inheritance codes */ typedef off_t voff_t; /* XXX: offset within a uvm_object */ union vm_map_object; typedef union vm_map_object vm_map_object_t; struct vm_map_entry; typedef struct vm_map_entry *vm_map_entry_t; struct vm_map; typedef struct vm_map *vm_map_t; struct vm_page; typedef struct vm_page *vm_page_t; /* d115 1 a115 1 #define UVM_UNKNOWN_OFFSET ((voff_t) -1) d135 1 a135 2 #define UVM_PGA_USERESERVE 0x0001 /* ok to use reserve pages */ #define UVM_PGA_ZERO 0x0002 /* returned page must be zero'd */ a177 1 int zeropages; /* number of zero'd pages */ a213 6 int pga_zerohit; /* pagealloc where zero wanted and zero was available */ int pga_zeromiss; /* pagealloc where zero wanted and zero not available */ int zeroaborts; /* number of times page zeroing was aborted */ d255 1 a256 48 #endif /* * Finally, bring in standard UVM headers. */ #include #include #include #include #include #include #include #include #include #include /* * Shareable process virtual address space. * May eventually be merged with vm_map. * Several fields are temporary (text, data stuff). */ struct vmspace { struct vm_map vm_map; /* VM address map */ int vm_refcnt; /* number of references */ caddr_t vm_shm; /* SYS5 shared memory private data XXX */ /* we copy from vm_startcopy to the end of the structure on fork */ #define vm_startcopy vm_rssize segsz_t vm_rssize; /* current resident set size in pages */ segsz_t vm_swrss; /* resident set size before last swap */ segsz_t vm_tsize; /* text size (pages) XXX */ segsz_t vm_dsize; /* data size (pages) XXX */ segsz_t vm_ssize; /* stack size (pages) */ caddr_t vm_taddr; /* user virtual address of text XXX */ caddr_t vm_daddr; /* user virtual address of data XXX */ caddr_t vm_maxsaddr; /* user VA at max stack growth */ caddr_t vm_minsaddr; /* user VA at top of stack */ }; #ifdef _KERNEL /* * the various kernel maps, owned by MD code */ extern vm_map_t exec_map; extern vm_map_t kernel_map; extern vm_map_t kmem_map; extern vm_map_t mb_map; extern vm_map_t phys_map; d268 3 a270 5 #ifdef pmap_resident_count #define vm_resident_count(vm) (pmap_resident_count((vm)->vm_map.pmap)) #else #define vm_resident_count(vm) ((vm)->vm_rssize) #endif d272 2 a273 10 /* XXX clean up later */ struct buf; struct loadavg; struct proc; struct pmap; struct vmspace; struct vmtotal; struct mount; struct vnode; struct core; a276 11 /* vm_machdep.c */ void vmapbuf __P((struct buf *, vsize_t)); void vunmapbuf __P((struct buf *, vsize_t)); void pagemove __P((caddr_t, caddr_t, size_t)); #ifndef cpu_swapin void cpu_swapin __P((struct proc *)); #endif #ifndef cpu_swapout void cpu_swapout __P((struct proc *)); #endif a279 1 void uao_detach_locked __P((struct uvm_object *)); a280 1 void uao_reference_locked __P((struct uvm_object *)); d291 2 d294 1 a294 1 void *, size_t, void (*)(void *), void *)); a324 2 vaddr_t uvm_km_valloc_prefer_wait __P((vm_map_t, vsize_t, voff_t)); d335 1 a335 2 struct uvm_object *, voff_t, vsize_t, uvm_flag_t)); d362 1 a362 1 caddr_t, voff_t, vsize_t)); d366 1 a366 1 voff_t, struct vm_anon *, int, int, int)); d371 1 a371 1 struct uvm_object *, voff_t)); d373 2 a374 2 void uvm_page_physload __P((paddr_t, paddr_t, paddr_t, paddr_t, int)); d378 1 a378 1 void uvm_pageout __P((void *)); a392 4 /* should only be needed if COMPAT_NETBSD32 is defined */ struct core32; int uvm_coredump32 __P((struct proc *, struct vnode *, struct ucred *, struct core32 *)); d398 1 a398 1 void uvm_vnp_setsize __P((struct vnode *, voff_t)); a403 5 /* kern_malloc.c */ void kmeminit_nkmempages __P((void)); void kmeminit __P((void)); extern int nkmempages; @ 1.34.2.2 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD$ */ a195 15 * the following defines are for ubc_alloc's flags */ #define UBC_READ 0 #define UBC_WRITE 1 /* * flags for uvn_findpages(). */ #define UFP_ALL 0x0 #define UFP_NOWAIT 0x1 #define UFP_NOALLOC 0x2 #define UFP_NOCACHE 0x4 #define UFP_NORDONLY 0x8 /* a215 1 struct pool; a217 2 extern struct pool *uvm_aiobuf_pool; d236 1 a236 4 /* XXX: Adding anything before this line will break binary * compatibility with top(1) on NetBSD 1.5. */ int zeropages; /* number of zero'd pages */ d238 1 a238 4 int reserve_kernel; /* number of pages reserved for kernel */ int anonpages; /* number of pages used by anon pagers */ int vnodepages; /* number of pages used by vnode page cache */ int vtextpages; /* number of pages used by vtext vnodes */ d249 1 d313 1 a313 1 a318 82 /* * The following structure is 64-bit alignment safe. New elements * should only be added to the end of this structure so binary * compatibility can be preserved. */ struct uvmexp_sysctl { int64_t pagesize; int64_t pagemask; int64_t pageshift; int64_t npages; int64_t free; int64_t active; int64_t inactive; int64_t paging; int64_t wired; int64_t zeropages; int64_t reserve_pagedaemon; int64_t reserve_kernel; int64_t freemin; int64_t freetarg; int64_t inactarg; int64_t wiredmax; int64_t nswapdev; int64_t swpages; int64_t swpginuse; int64_t swpgonly; int64_t nswget; int64_t nanon; int64_t nanonneeded; int64_t nfreeanon; int64_t faults; int64_t traps; int64_t intrs; int64_t swtch; int64_t softs; int64_t syscalls; int64_t pageins; int64_t swapins; int64_t swapouts; int64_t pgswapin; int64_t pgswapout; int64_t forks; int64_t forks_ppwait; int64_t forks_sharevm; int64_t pga_zerohit; int64_t pga_zeromiss; int64_t zeroaborts; int64_t fltnoram; int64_t fltnoanon; int64_t fltpgwait; int64_t fltpgrele; int64_t fltrelck; int64_t fltrelckok; int64_t fltanget; int64_t fltanretry; int64_t fltamcopy; int64_t fltnamap; int64_t fltnomap; int64_t fltlget; int64_t fltget; int64_t flt_anon; int64_t flt_acow; int64_t flt_obj; int64_t flt_prcopy; int64_t flt_przero; int64_t pdwoke; int64_t pdrevs; int64_t pdswout; int64_t pdfreed; int64_t pdscans; int64_t pdanscan; int64_t pdobscan; int64_t pdreact; int64_t pdbusy; int64_t pdpageouts; int64_t pdpending; int64_t pddeact; int64_t anonpages; int64_t vnodepages; int64_t vtextpages; }; a416 7 /* uvm_bio.c */ void ubc_init __P((void)); void * ubc_alloc __P((struct uvm_object *, voff_t, vsize_t *, int)); void ubc_release __P((void *, vsize_t)); void ubc_flush __P((struct uvm_object *, voff_t, voff_t)); d418 2 a419 2 int uvm_fault __P((vm_map_t, vaddr_t, vm_fault_t, vm_prot_t)); a456 1 vaddr_t uvm_km_valloc_align __P((vm_map_t, vsize_t, vsize_t)); a512 5 /* uvm_pager.c */ void uvm_aio_biodone1 __P((struct buf *)); void uvm_aio_biodone __P((struct buf *)); void uvm_aio_aiodone __P((struct buf *)); a514 1 void uvm_aiodone_daemon __P((void *)); d540 3 a543 4 void uvn_findpages __P((struct uvm_object *, voff_t, int *, struct vm_page **, int)); void uvm_vnp_zerorange __P((struct vnode *, off_t, size_t)); void uvm_vnp_asyncget __P((struct vnode *, off_t, size_t)); @ 1.34.2.3 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.34.2.2 2000/12/08 09:20:53 bouyer Exp $ */ d596 1 a596 1 void uvmspace_exec __P((struct proc *, vaddr_t, vaddr_t)); @ 1.34.2.4 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.34.2.3 2001/02/11 19:17:48 bouyer Exp $ */ d254 2 a255 4 /* * Adding anything before this line will break binary compatibility * with top(1) on NetBSD 1.5. a256 1 a268 6 int anonmin; /* min threshold for anon pages */ int vtextmin; /* min threshold for vtext pages */ int vnodemin; /* min threshold for vnode pages */ int anonminpct; /* min percent anon pages */ int vtextminpct;/* min percent vtext pages */ int vnodeminpct;/* min percent vnode pages */ a335 3 int pdreanon; /* anon pages reactivated due to min threshold */ int pdrevnode; /* vnode pages reactivated due to min threshold */ int pdrevtext; /* vtext pages reactivated due to min threshold */ @ 1.34.2.5 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD$ */ d623 1 a623 1 void *, voff_t, vsize_t)); d666 1 a666 1 void uvm_deallocate __P((vm_map_t, vaddr_t, vsize_t)); @ 1.33 log @Add a set of "lockflags", which can control the locking behavior of some functions. Use these flags in uvm_map_pageable() to determine if the map is locked on entry (replaces an already present boolean_t argument `islocked'), and if the function should return with the map still locked. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.32 1999/07/02 23:20:58 thorpej Exp $ */ d158 1 d291 2 @ 1.32 log @Bring in additional uvmexp members from chs-ubc2, so that VM stats can be read no matter which kernel you're running. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.31 1999/06/21 17:25:11 thorpej Exp $ */ d138 6 d334 1 a334 1 vaddr_t, boolean_t, boolean_t)); @ 1.31 log @Protect prototypes, certain macros, and inlines from userland. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.30 1999/06/18 05:13:46 thorpej Exp $ */ d183 1 d188 1 @ 1.30 log @Add the guts of mlockall(MCL_FUTURE). This requires that a process's "memlock" resource limit to uvm_mmap(). Update all calls accordingly. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.29 1999/06/17 15:47:22 thorpej Exp $ */ d245 1 d257 2 d266 2 d394 2 a396 1 @ 1.29 log @Make uvm_vslock() return the error code from uvm_fault_wire(). All places which use uvm_vslock() should now test the return value. If it's not KERN_SUCCESS, wiring the pages failed, so the operation which is using uvm_vslock() should error out. XXX We currently just EFAULT a failed uvm_vslock(). We may want to do more about translating error codes in the future. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.28 1999/06/15 23:27:47 thorpej Exp $ */ d321 1 a321 1 vaddr_t, boolean_t)); d346 1 a346 1 caddr_t, vaddr_t)); @ 1.28 log @Several changes, developed and tested concurrently: * Provide POSIX 1003.1b mlockall(2) and munlockall(2) system calls. MCL_CURRENT is presently implemented. MCL_FUTURE is not fully implemented. Also, the same one-unlock-for-every-lock caveat currently applies here as it does to mlock(2). This will be addressed in a future commit. * Provide the mincore(2) system call, with the same semantics as Solaris. * Clean up the error recovery in uvm_map_pageable(). * Fix a bug where a process would hang if attempting to mlock a zero-fill region where none of the pages in that region are resident. [ This fix has been submitted for inclusion in 1.4.1 ] @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.27 1999/05/26 19:16:36 thorpej Exp $ */ d285 1 a285 1 void uvm_vslock __P((struct proc *, caddr_t, size_t, @ 1.27 log @Change the vm_map's "entries_pageable" member to a r/o flags member, which has PAGEABLE and INTRSAFE flags. PAGEABLE now really means "pageable", not "allocate vm_map_entry's from non-static pool", so update all map creations to reflect that. INTRSAFE maps are maps that are used in interrupt context (e.g. kmem_map, mb_map), and thus use the static map entry pool (XXX as does kernel_map, for now). This will eventually change now these maps are locked, as well. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.26 1999/05/26 01:05:24 thorpej Exp $ */ d322 1 @ 1.26 log @Pass an access_type to uvm_vslock(). @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.25 1999/05/13 21:58:38 thorpej Exp $ */ d305 1 a305 1 vaddr_t *, vsize_t, boolean_t, @ 1.25 log @Allow the caller to specify a stack for the child process. If NULL, the child inherits the stack pointer from the parent (traditional behavior). Like the signal stack, the stack area is secified as a low address and a size; machine-dependent code accounts for stack direction. This is required for clone(2). @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.24 1999/04/11 04:04:11 chs Exp $ */ d285 2 a286 1 void uvm_vslock __P((struct proc *, caddr_t, size_t)); @ 1.24 log @add a `flags' argument to uvm_pagealloc_strat(). define a flag UVM_PGA_USERESERVE to allow non-kernel object allocations to use pages from the reserve. use the new flag for allocations in pmap modules. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.23 1999/03/26 17:34:15 chs Exp $ */ d277 2 a278 1 void uvm_fork __P((struct proc *, struct proc *, boolean_t)); @ 1.23 log @add uvmexp.swpgonly and use it to detect out-of-swap conditions. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.22 1999/03/25 18:48:50 mrg Exp $ */ a120 1 d133 5 d347 4 a350 3 vaddr_t, struct vm_anon *, int, int)); #define uvm_pagealloc(obj, off, anon) \ uvm_pagealloc_strat((obj), (off), (anon), UVM_PGA_STRAT_NORMAL, 0) @ 1.23.2.1 log @pull up 1.23 -> 1.24: add a `flags' argument to uvm_pagealloc_strat(). define a flag UVM_PGA_USERESERVE to allow non-kernel object allocations to use pages from the reserve. use the new flag for allocations in pmap modules. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.24 1999/04/11 04:04:11 chs Exp $ */ d121 1 a133 5 * flags for uvm_pagealloc_strat() */ #define UVM_PGA_USERESERVE 0x0001 /* d343 3 a345 4 vaddr_t, struct vm_anon *, int, int, int)); #define uvm_pagealloc(obj, off, anon, flags) \ uvm_pagealloc_strat((obj), (off), (anon), (flags), \ UVM_PGA_STRAT_NORMAL, 0) @ 1.23.2.1.2.1 log @merge everything from chs-ubc branch. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.23.2.1 1999/04/16 16:27:36 chs Exp $ */ a137 15 * the following defines are for ubc_alloc's flags */ #define UBC_READ 0 #define UBC_WRITE 1 /* * flags for uvn_findpages(). */ #define UFP_ALL 0x0 #define UFP_NOWAIT 0x1 #define UFP_NOALLOC 0x2 #define UFP_NOCACHE 0x4 #define UFP_NORDONLY 0x8 /* a151 4 struct uvm_aiodesc; struct pool; extern struct pool *uvm_aiobuf_pool; a182 1 int swpguniq; /* number of swap pages in use, not also in RAM */ a186 1 int nanonneeded;/* number of anons currently needed */ a265 1 void uao_detach_locked __P((struct uvm_object *)); a266 7 void uao_reference_locked __P((struct uvm_object *)); /* uvm_bio.c */ void * ubc_alloc __P((struct uvm_object *, vaddr_t, vsize_t *, int)); void ubc_release __P((void *, vsize_t)); void ubc_flush __P((struct uvm_object *, vaddr_t, vaddr_t)); a357 4 /* uvm_pager.c */ void uvm_aio_biodone __P((struct buf *)); void uvm_aio_aiodone __P((struct uvm_aiodesc *)); a359 1 void uvm_aiodone_daemon __P((void)); a384 5 void uvn_findpages __P((struct uvm_object *, vaddr_t, int *, struct vm_page **, int)); void uvm_vnp_zerorange __P((struct vnode *, off_t, size_t)); void uvm_vnp_asyncget __P((struct vnode *, off_t, size_t, size_t)); @ 1.23.2.1.2.2 log @Sync w/ -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.23.2.1.2.1 1999/06/07 04:25:35 chs Exp $ */ d306 1 a306 2 void uvm_fork __P((struct proc *, struct proc *, boolean_t, void *, size_t)); d313 1 a313 2 int uvm_vslock __P((struct proc *, caddr_t, size_t, vm_prot_t)); d332 1 a332 1 vaddr_t *, vsize_t, int, d348 1 a348 2 vaddr_t, boolean_t, boolean_t)); int uvm_map_pageable_all __P((vm_map_t, int, vsize_t)); d372 1 a372 1 caddr_t, vaddr_t, vsize_t)); @ 1.23.2.1.2.3 log @Sync w/ -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.23.2.1.2.2 1999/06/21 01:47:19 thorpej Exp $ */ a265 1 #ifdef _KERNEL a276 2 #endif /* _KERNEL */ a283 2 #ifdef _KERNEL d428 1 a428 1 #endif /* _KERNEL */ a429 1 #endif /* _UVM_UVM_EXTERN_H_ */ @ 1.23.2.1.2.4 log @adjust protos. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.23.2.1.2.3 1999/07/01 23:55:15 thorpej Exp $ */ a395 1 void uvm_aio_biodone1 __P((struct buf *)); d397 1 a397 1 void uvm_aio_aiodone __P((struct buf *)); d430 2 a431 1 void uvm_vnp_asyncget __P((struct vnode *, off_t, size_t)); @ 1.23.2.1.2.5 log @remove uvm_vnp_uncache(), it's no longer needed. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.23.2.1.2.4 1999/07/04 01:57:35 chs Exp $ */ d425 2 @ 1.23.2.1.2.6 log @Update from trunk. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.23.2.1.2.5 1999/07/11 05:44:00 chs Exp $ */ a152 6 * lockflags that control the locking behavior of various functions. */ #define UVM_LK_ENTER 0x00000001 /* map locked on entry */ #define UVM_LK_EXIT 0x00000002 /* leave map locked on exit */ /* a168 1 struct simplelock; a310 2 void uvm_sleep __P((void *, struct simplelock *, boolean_t, const char *, int)); d355 1 a355 1 vaddr_t, boolean_t, int)); @ 1.23.2.1.2.7 log @create a new type "voff_t" for uvm_object offsets and define it to be "off_t". also, remove pgo_asyncget(). @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.23.2.1.2.6 1999/08/02 23:16:14 thorpej Exp $ */ a51 7 * types */ /* byte offset within a uvm_object */ typedef off_t voff_t; /* d304 1 a304 1 void * ubc_alloc __P((struct uvm_object *, voff_t, vsize_t *, d307 1 a307 1 void ubc_flush __P((struct uvm_object *, voff_t, voff_t)); d310 2 a311 2 int uvm_fault __P((vm_map_t, vaddr_t, vm_fault_t, vm_prot_t)); d362 1 a362 1 struct uvm_object *, voff_t, uvm_flag_t)); d389 1 a389 1 caddr_t, voff_t, vsize_t)); d393 1 a393 1 voff_t, struct vm_anon *, int, int, int)); d398 1 a398 1 struct uvm_object *, voff_t)); d431 1 a431 1 void uvm_vnp_setsize __P((struct vnode *, voff_t)); d435 1 a435 1 void uvn_findpages __P((struct uvm_object *, voff_t, @ 1.22 log @remove now >1 year old pre-release message. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.21 1998/09/08 23:44:21 thorpej Exp $ */ d180 1 @ 1.21 log @Implement uvm_exit(), which frees VM resources when a process finishes exiting. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.20 1998/08/28 20:05:49 thorpej Exp $ */ a2 4 /* * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE! * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<< */ @ 1.21.2.1 log @initial snapshot. lots left to do. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.21 1998/09/08 23:44:21 thorpej Exp $ */ a137 6 * the following defines are for ubc_alloc's flags */ #define UBC_READ 0 #define UBC_WRITE 1 /* a182 1 int swpguniq; /* number of swap pages in use, not also in RAM */ a185 1 int nanonneeded;/* number of anons currently needed */ a264 1 void uao_detach_locked __P((struct uvm_object *)); a265 7 void uao_reference_locked __P((struct uvm_object *)); /* uvm_bio.c */ void * ubc_alloc __P((struct uvm_object *, vaddr_t, vsize_t, int)); void ubc_release __P((void *, vsize_t)); void ubc_flush __P((struct uvm_object *, vaddr_t, vaddr_t)); a382 4 int uvm_vnp_relocate __P((struct vnode *, vaddr_t, vsize_t, daddr_t)); void uvn_findpage __P((struct uvm_object *, vaddr_t, struct vm_page **)); @ 1.21.2.2 log @define UFP_* (uvn_findpages() flags). add uvm_aiobuf pool stuff. add new prototypes. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.21.2.1 1998/11/09 06:06:37 chs Exp $ */ a143 8 * flags for uvn_findpage(). */ #define UFP_ALL 0x0 #define UFP_NOWAIT 0x1 #define UFP_NOALLOC 0x2 #define UFP_NOCACHE 0x4 /* a157 4 struct uvm_aiodesc; struct pool; extern struct pool *uvm_aiobuf_pool; a371 4 /* uvm_pager.c */ void uvm_aio_biodone __P((struct buf *)); void uvm_aio_aiodone __P((struct uvm_aiodesc *)); d399 4 a402 5 void uvn_findpages __P((struct uvm_object *, vaddr_t, int *, struct vm_page **, int)); void uvm_vnp_setpageblknos __P((struct vnode *, off_t, off_t, daddr_t, int, boolean_t)); void uvm_vnp_zerorange __P((struct vnode *, off_t, size_t)); @ 1.21.2.3 log @add decl for aiodone daemon. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.21.2.2 1999/02/25 04:11:15 chs Exp $ */ a389 1 void uvm_aiodone_daemon __P((void)); @ 1.21.2.4 log @change ubc_alloc()'s length arg to be a pointer instead of the value. the pointed-to value is the total desired length on input, and is updated to the length that will fit in the returned window. this allows callers of ubc_alloc() to be ignorant of the window size. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.21.2.3 1999/04/09 04:37:27 chs Exp $ */ d290 1 a290 1 void * ubc_alloc __P((struct uvm_object *, vaddr_t, vsize_t *, @ 1.21.2.5 log @uvm_vnp_setpageblknos() is out, uvm_vnp_asyncget() is in. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.21.2.4 1999/04/30 04:32:08 chs Exp $ */ d144 1 a144 1 * flags for uvn_findpages(). d418 2 a420 2 void uvm_vnp_asyncget __P((struct vnode *, off_t, size_t, size_t)); @ 1.21.2.6 log @add a new uvn_findpages() flag, UFP_NORDONLY, which means that PG_RDONLY pages should not be returned. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.21.2.5 1999/05/30 15:17:57 chs Exp $ */ a149 1 #define UFP_NORDONLY 0x8 @ 1.20 log @Add a waitok boolean argument to the VM system's pool page allocator backend. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.19 1998/08/13 02:11:00 eeh Exp $ */ d277 1 @ 1.19 log @Merge paddr_t changes into the main branch. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.18 1998/08/01 01:39:03 thorpej Exp $ */ d306 1 a306 1 struct uvm_object *)); d309 2 a310 2 #define uvm_km_alloc_poolpage() uvm_km_alloc_poolpage1(kmem_map, \ uvmexp.kmem_object) @ 1.18 log @We need to be able to specify a uvm_object to the pool page allocator, too. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.17 1998/07/31 20:46:37 thorpej Exp $ */ d119 1 a119 1 #define UVM_UNKNOWN_OFFSET ((vm_offset_t) -1) d263 1 a263 1 struct uvm_object *uao_create __P((vm_size_t, int)); d268 1 a268 1 int uvm_fault __P((vm_map_t, vm_offset_t, d294 8 a301 8 vm_offset_t uvm_km_alloc1 __P((vm_map_t, vm_size_t, boolean_t)); void uvm_km_free __P((vm_map_t, vm_offset_t, vm_size_t)); void uvm_km_free_wakeup __P((vm_map_t, vm_offset_t, vm_size_t)); vm_offset_t uvm_km_kmemalloc __P((vm_map_t, struct uvm_object *, vm_size_t, int)); struct vm_map *uvm_km_suballoc __P((vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t, boolean_t, d303 3 a305 3 vm_offset_t uvm_km_valloc __P((vm_map_t, vm_size_t)); vm_offset_t uvm_km_valloc_wait __P((vm_map_t, vm_size_t)); vm_offset_t uvm_km_alloc_poolpage1 __P((vm_map_t, d307 1 a307 1 void uvm_km_free_poolpage1 __P((vm_map_t, vm_offset_t)); d314 9 a322 9 int uvm_map __P((vm_map_t, vm_offset_t *, vm_size_t, struct uvm_object *, vm_offset_t, uvm_flag_t)); int uvm_map_pageable __P((vm_map_t, vm_offset_t, vm_offset_t, boolean_t)); boolean_t uvm_map_checkprot __P((vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t)); int uvm_map_protect __P((vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t)); struct vmspace *uvmspace_alloc __P((vm_offset_t, vm_offset_t, d325 1 a325 1 vm_offset_t, vm_offset_t, boolean_t)); d339 1 a339 1 int uvm_mmap __P((vm_map_t, vm_offset_t *, vm_size_t, d341 1 a341 1 caddr_t, vm_offset_t)); d345 1 a345 1 vm_offset_t, struct vm_anon *, int, int)); d349 4 a352 3 struct uvm_object *, vm_offset_t)); void uvm_page_physload __P((vm_offset_t, vm_offset_t, vm_offset_t, vm_offset_t, int)); d359 2 a360 2 int uvm_pglistalloc __P((vm_size_t, vm_offset_t, vm_offset_t, vm_offset_t, vm_offset_t, d370 1 a370 1 int uvm_grow __P((struct proc *, vm_offset_t)); d373 1 a373 1 int uvm_deallocate __P((vm_map_t, vm_offset_t, vm_size_t)); d384 1 @ 1.17 log @Allow an alternate splimp-protected map to be specified in the pool page allocator routines. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.16 1998/07/24 20:28:48 thorpej Exp $ */ d305 2 a306 1 vm_offset_t uvm_km_alloc_poolpage1 __P((vm_map_t)); d309 2 a310 1 #define uvm_km_alloc_poolpage() uvm_km_alloc_poolpage1(kmem_map) @ 1.16 log @Implement uvm_km_{alloc,free}_poolpage(). These functions use pmap hooks to map/unmap pool pages if provided by the pmap layer. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.15 1998/07/08 04:28:27 thorpej Exp $ */ d305 2 a306 2 vm_offset_t uvm_km_alloc_poolpage __P((void)); void uvm_km_free_poolpage __P((vm_offset_t)); d308 2 @ 1.16.2.1 log @Split vm_offset_t and vm_size_t into paddr_t, psize_t, vaddr_t, and vsize_t. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.16 1998/07/24 20:28:48 thorpej Exp $ */ d119 1 a119 1 #define UVM_UNKNOWN_OFFSET ((vaddr_t) -1) d263 1 a263 1 struct uvm_object *uao_create __P((vsize_t, int)); d268 1 a268 1 int uvm_fault __P((vm_map_t, vaddr_t, d294 8 a301 8 vaddr_t uvm_km_alloc1 __P((vm_map_t, vsize_t, boolean_t)); void uvm_km_free __P((vm_map_t, vaddr_t, vsize_t)); void uvm_km_free_wakeup __P((vm_map_t, vaddr_t, vsize_t)); vaddr_t uvm_km_kmemalloc __P((vm_map_t, struct uvm_object *, vsize_t, int)); struct vm_map *uvm_km_suballoc __P((vm_map_t, vaddr_t *, vaddr_t *, vsize_t, boolean_t, d303 5 a307 4 vaddr_t uvm_km_valloc __P((vm_map_t, vsize_t)); vaddr_t uvm_km_valloc_wait __P((vm_map_t, vsize_t)); vaddr_t uvm_km_alloc_poolpage __P((void)); void uvm_km_free_poolpage __P((vaddr_t)); d310 9 a318 9 int uvm_map __P((vm_map_t, vaddr_t *, vsize_t, struct uvm_object *, vaddr_t, uvm_flag_t)); int uvm_map_pageable __P((vm_map_t, vaddr_t, vaddr_t, boolean_t)); boolean_t uvm_map_checkprot __P((vm_map_t, vaddr_t, vaddr_t, vm_prot_t)); int uvm_map_protect __P((vm_map_t, vaddr_t, vaddr_t, vm_prot_t, boolean_t)); struct vmspace *uvmspace_alloc __P((vaddr_t, vaddr_t, d321 1 a321 1 vaddr_t, vaddr_t, boolean_t)); d335 1 a335 1 int uvm_mmap __P((vm_map_t, vaddr_t *, vsize_t, d337 1 a337 1 caddr_t, vaddr_t)); d341 1 a341 1 vaddr_t, struct vm_anon *, int, int)); d345 3 a347 4 struct uvm_object *, vaddr_t)); /* Actually, uvm_page_physload takes PF#s which need their own type */ void uvm_page_physload __P((vaddr_t, vaddr_t, vaddr_t, vaddr_t, int)); d354 2 a355 2 int uvm_pglistalloc __P((psize_t, paddr_t, paddr_t, paddr_t, paddr_t, d365 1 a365 1 int uvm_grow __P((struct proc *, vaddr_t)); d368 1 a368 1 int uvm_deallocate __P((vm_map_t, vaddr_t, vsize_t)); a378 1 @ 1.16.2.2 log @Revert cdevsw mmap routines to return int. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.16.2.1 1998/07/30 14:04:10 eeh Exp $ */ d305 2 a306 7 vaddr_t uvm_km_alloc_poolpage1 __P((vm_map_t, struct uvm_object *)); void uvm_km_free_poolpage1 __P((vm_map_t, vaddr_t)); #define uvm_km_alloc_poolpage() uvm_km_alloc_poolpage1(kmem_map, \ uvmexp.kmem_object) #define uvm_km_free_poolpage(addr) uvm_km_free_poolpage1(kmem_map, (addr)) @ 1.15 log @Add support for multiple memory free lists. There is at least one default free list, and 0 - N additional free list, in order of descending priority. A new page allocation function, uvm_pagealloc_strat(), has been added, providing three page allocation strategies: - normal: high -> low priority free list walk, taking the page off the first free list that has one. - only: attempt to allocate a page only from the specified free list, failing if that free list has none available. - fallback: if `only' fails, fall back on `normal'. uvm_pagealloc(...) is provided for normal use (and is a synonym for uvm_pagealloc_strat(..., UVM_PGA_STRAT_NORMAL, 0); the free list argument is ignored for the `normal' case). uvm_page_physload() now specified which free list the pages will be loaded onto. This means that some platforms which have multiple physical memory segments may define additional vm_physsegs if they wish to break individual physical segments into differing priorities. Machine-dependent code must define _at least_ the following constants in : VM_NFREELIST: the number of free lists the system will have VM_FREELIST_DEFAULT: the default freelist (should always be 0, but is defined in machdep code so that it's with all of the other free list-related constants). Additional free list names may be defined by machine-dependent code, but they will only be used by machine-dependent code (e.g. for loading the vm_physsegs). @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.14 1998/07/04 22:18:53 jonathan Exp $ */ d305 2 @ 1.14 log @defopt DDB. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.13 1998/05/09 15:04:40 kleink Exp $ */ d131 7 d338 4 a341 2 struct vm_page *uvm_pagealloc __P((struct uvm_object *, vm_offset_t, struct vm_anon *)); d345 1 a345 1 vm_offset_t, vm_offset_t)); @ 1.13 log @Use size_t to pass the length of the memory region to operate on to chgkprot(), kernacc(), useracc(), vslock() and vsunlock(); (unsigned) ints are not adequate on all platforms. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.12 1998/04/30 06:28:59 thorpej Exp $ */ a306 5 #if defined(DDB) void uvm_map_print __P((vm_map_t, boolean_t)); void uvm_map_printit __P((vm_map_t, boolean_t, void (*) __P((const char *, ...)))); #endif a308 8 #if defined(DDB) void uvm_object_print __P((struct uvm_object *, boolean_t)); void uvm_object_printit __P((struct uvm_object *, boolean_t, void (*) __P((const char *, ...)))); void uvm_page_print __P((struct vm_page *, boolean_t)); void uvm_page_printit __P((struct vm_page *, boolean_t, void (*) __P((const char *, ...)))); #endif @ 1.12 log @Pass vslock() and vsunlock() a proc *, rather than implicitly operating on curproc. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.11 1998/03/30 06:24:42 mycroft Exp $ */ d267 1 a267 1 void uvm_chgkprot __P((caddr_t, int, int)); d271 1 a271 1 boolean_t uvm_kernacc __P((caddr_t, int, int)); d274 3 a276 3 boolean_t uvm_useracc __P((caddr_t, int, int)); void uvm_vslock __P((struct proc *, caddr_t, u_int)); void uvm_vsunlock __P((struct proc *, caddr_t, u_int)); @ 1.11 log @Mark scheduler() and uvm_scheduler() as never returning. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.10 1998/03/27 01:47:06 thorpej Exp $ */ d275 2 a276 2 void uvm_vslock __P((caddr_t, u_int)); void uvm_vsunlock __P((caddr_t, u_int)); @ 1.10 log @Split uvmspace_alloc() into uvmspace_alloc() and uvmspace_init(). The latter can be used for initializing a pre-allocated vmspace. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.9 1998/03/09 00:58:56 mrg Exp $ */ d272 1 a272 1 void uvm_scheduler __P((void)); @ 1.9 log @KNF. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.8 1998/02/10 02:34:31 perry Exp $ */ d143 1 d322 4 a325 1 struct vmspace *uvmspace_alloc __P((vm_offset_t, vm_offset_t, int)); @ 1.8 log @add/cleanup multiple inclusion protection. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.7 1998/02/09 13:08:22 mrg Exp $ */ d151 1 a151 1 /* vm_page constants */ d156 1 a156 1 /* vm_page counters */ d166 5 a170 5 /* pageout params */ int freemin; /* min number of free pages */ int freetarg; /* target number of free pages */ int inactarg; /* target number of inactive pages */ int wiredmax; /* max number of wired pages */ d172 1 a172 1 /* swap */ d180 1 a180 1 /* stat counters */ d197 1 a197 1 /* fault subcounters */ d217 1 a217 1 /* daemon counters */ d231 1 a231 1 /* kernel memory objects: managed by uvm_km_kmemalloc() only! */ d261 1 a261 1 vm_fault_t, vm_prot_t)); @ 1.7 log @keep statistics on pageout/pagein, total pages, and total operations. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.6 1998/02/08 06:15:58 thorpej Exp $ */ d41 2 a42 2 #ifndef UVM_UVM_EXTERN_H #define UVM_UVM_EXTERN_H d376 1 a376 1 #endif @ 1.6 log @Allow callers of uvm_km_suballoc() to specify where the base of the submap _must_ begin, by adding a "fixed" boolean argument. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.5 1998/02/07 17:00:36 mrg Exp $ */ d181 13 a193 11 int faults; /* page fault count */ int traps; /* trap count */ int intrs; /* interrupt count */ int swtch; /* context switch count */ int softs; /* software interrupt count */ int syscalls; /* system calls */ int pageins; /* pages paged in */ int pageouts; /* pages paged out */ int swapins; /* swapins */ int swapouts; /* swapouts */ int forks; /* forks */ a195 1 @ 1.5 log @implement counters for pages paged in/out @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.4 1998/02/07 11:08:22 mrg Exp $ */ d293 1 a293 1 vm_map_t)); @ 1.4 log @restore rcsids @ text @d1 1 a1 1 /* $NetBSD$ */ d187 2 @ 1.3 log @prototype for uvm_map_checkprot() moved here. add uvmexp fields for pagouts-in-progress and kernel-reserved pages. @ text @d1 1 a1 1 /* $NetBSD: uvm_extern.h,v 1.2 1998/02/06 22:31:43 thorpej Exp $ */ d37 2 @ 1.2 log @RCS ID police. @ text @d1 1 a1 1 /* $NetBSD$ */ d159 1 d161 2 d173 1 a173 1 int swpginuse; /* swap pages in use */ d299 2 @ 1.1 log @Initial revision @ text @d1 1 a1 1 /* $Id: uvm_extern.h,v 1.1.2.19 1998/02/04 02:31:39 chuck Exp $ */ @ 1.1.1.1 log @initial import of the new virtual memory system, UVM, into -current. UVM was written by chuck cranor , with some minor portions derived from the old Mach code. i provided some help getting swap and paging working, and other bug fixes/ideas. chuck silvers also provided some other fixes. this is the UVM kernel code portion. this will be KNF'd shortly. :-) @ text @@