head 1.120; access; symbols netbsd-10-0-RELEASE:1.118 netbsd-10-0-RC6:1.118 netbsd-10-0-RC5:1.118 netbsd-10-0-RC4:1.118 netbsd-10-0-RC3:1.118 netbsd-10-0-RC2:1.118 thorpej-ifq:1.120.0.4 thorpej-ifq-base:1.120 thorpej-altq-separation:1.120.0.2 thorpej-altq-separation-base:1.120 netbsd-10-0-RC1:1.118 netbsd-10:1.118.0.18 netbsd-10-base:1.118 bouyer-sunxi-drm:1.118.0.16 bouyer-sunxi-drm-base:1.118 netbsd-9-3-RELEASE:1.103 thorpej-i2c-spi-conf2:1.118.0.14 thorpej-i2c-spi-conf2-base:1.118 thorpej-futex2:1.118.0.12 thorpej-futex2-base:1.118 thorpej-cfargs2:1.118.0.10 thorpej-cfargs2-base:1.118 cjep_sun2x-base1:1.118 cjep_sun2x:1.118.0.8 cjep_sun2x-base:1.118 cjep_staticlib_x-base1:1.118 netbsd-9-2-RELEASE:1.103 cjep_staticlib_x:1.118.0.6 cjep_staticlib_x-base:1.118 thorpej-i2c-spi-conf:1.118.0.4 thorpej-i2c-spi-conf-base:1.118 thorpej-cfargs:1.118.0.2 thorpej-cfargs-base:1.118 thorpej-futex:1.117.0.2 thorpej-futex-base:1.118 netbsd-9-1-RELEASE:1.103 bouyer-xenpvh-base2:1.111 phil-wifi-20200421:1.111 bouyer-xenpvh-base1:1.111 phil-wifi-20200411:1.111 bouyer-xenpvh:1.111.0.2 bouyer-xenpvh-base:1.111 is-mlppp:1.107.0.2 is-mlppp-base:1.107 phil-wifi-20200406:1.111 netbsd-8-2-RELEASE:1.102.10.1 ad-namecache-base3:1.107 netbsd-9-0-RELEASE:1.103 netbsd-9-0-RC2:1.103 ad-namecache-base2:1.105 ad-namecache-base1:1.105 ad-namecache:1.104.0.2 ad-namecache-base:1.104 netbsd-9-0-RC1:1.103 phil-wifi-20191119:1.103 netbsd-9:1.103.0.8 netbsd-9-base:1.103 phil-wifi-20190609:1.103 netbsd-8-1-RELEASE:1.102.10.1 netbsd-8-1-RC1:1.102.10.1 isaki-audio2:1.103.0.6 isaki-audio2-base:1.103 pgoyette-compat-merge-20190127:1.103 pgoyette-compat-20190127:1.103 pgoyette-compat-20190118:1.103 pgoyette-compat-1226:1.103 pgoyette-compat-1126:1.103 pgoyette-compat-1020:1.103 pgoyette-compat-0930:1.103 pgoyette-compat-0906:1.103 netbsd-7-2-RELEASE:1.99 pgoyette-compat-0728:1.103 netbsd-8-0-RELEASE:1.102.10.1 phil-wifi:1.103.0.4 phil-wifi-base:1.103 pgoyette-compat-0625:1.103 netbsd-8-0-RC2:1.102.10.1 pgoyette-compat-0521:1.103 pgoyette-compat-0502:1.103 pgoyette-compat-0422:1.103 netbsd-8-0-RC1:1.102.10.1 pgoyette-compat-0415:1.103 pgoyette-compat-0407:1.103 pgoyette-compat-0330:1.103 pgoyette-compat-0322:1.103 pgoyette-compat-0315:1.103 netbsd-7-1-2-RELEASE:1.99 pgoyette-compat:1.103.0.2 pgoyette-compat-base:1.103 netbsd-7-1-1-RELEASE:1.99 tls-maxphys-base-20171202:1.103 matt-nb8-mediatek:1.102.10.1.0.2 matt-nb8-mediatek-base:1.102.10.1 nick-nhusb-base-20170825:1.102 perseant-stdc-iso10646:1.102.0.12 perseant-stdc-iso10646-base:1.102 netbsd-8:1.102.0.10 netbsd-8-base:1.102 prg-localcount2-base3:1.102 prg-localcount2-base2:1.102 prg-localcount2-base1:1.102 prg-localcount2:1.102.0.8 prg-localcount2-base:1.102 pgoyette-localcount-20170426:1.102 bouyer-socketcan-base1:1.102 jdolecek-ncq:1.102.0.6 jdolecek-ncq-base:1.102 pgoyette-localcount-20170320:1.102 netbsd-7-1:1.99.0.22 netbsd-7-1-RELEASE:1.99 netbsd-7-1-RC2:1.99 nick-nhusb-base-20170204:1.102 netbsd-7-nhusb-base-20170116:1.99 bouyer-socketcan:1.102.0.4 bouyer-socketcan-base:1.102 pgoyette-localcount-20170107:1.102 netbsd-7-1-RC1:1.99 nick-nhusb-base-20161204:1.102 pgoyette-localcount-20161104:1.102 netbsd-7-0-2-RELEASE:1.99 nick-nhusb-base-20161004:1.102 localcount-20160914:1.102 netbsd-7-nhusb:1.99.0.20 netbsd-7-nhusb-base:1.99 pgoyette-localcount-20160806:1.102 pgoyette-localcount-20160726:1.102 pgoyette-localcount:1.102.0.2 pgoyette-localcount-base:1.102 nick-nhusb-base-20160907:1.102 nick-nhusb-base-20160529:1.102 netbsd-7-0-1-RELEASE:1.99 nick-nhusb-base-20160422:1.102 nick-nhusb-base-20160319:1.102 nick-nhusb-base-20151226:1.102 netbsd-7-0:1.99.0.18 netbsd-7-0-RELEASE:1.99 nick-nhusb-base-20150921:1.100 netbsd-7-0-RC3:1.99 netbsd-7-0-RC2:1.99 netbsd-7-0-RC1:1.99 nick-nhusb-base-20150606:1.99 nick-nhusb-base-20150406:1.99 nick-nhusb:1.99.0.16 nick-nhusb-base:1.99 netbsd-5-2-3-RELEASE:1.90 netbsd-5-1-5-RELEASE:1.90 netbsd-6-0-6-RELEASE:1.97.8.1 netbsd-6-1-5-RELEASE:1.97.8.1 netbsd-7:1.99.0.14 netbsd-7-base:1.99 yamt-pagecache-base9:1.99 yamt-pagecache-tag8:1.97.2.9 netbsd-6-1-4-RELEASE:1.97.8.1 netbsd-6-0-5-RELEASE:1.97.8.1 tls-earlyentropy:1.99.0.12 tls-earlyentropy-base:1.99 riastradh-xf86-video-intel-2-7-1-pre-2-21-15:1.99 riastradh-drm2-base3:1.99 netbsd-6-1-3-RELEASE:1.97.8.1 netbsd-6-0-4-RELEASE:1.97.8.1 netbsd-5-2-2-RELEASE:1.90 netbsd-5-1-4-RELEASE:1.90 netbsd-6-1-2-RELEASE:1.97.8.1 netbsd-6-0-3-RELEASE:1.97.8.1 netbsd-5-2-1-RELEASE:1.90 netbsd-5-1-3-RELEASE:1.90 rmind-smpnet-nbase:1.99 netbsd-6-1-1-RELEASE:1.97.8.1 riastradh-drm2-base2:1.99 riastradh-drm2-base1:1.99 riastradh-drm2:1.99.0.10 riastradh-drm2-base:1.99 rmind-smpnet:1.99.0.4 rmind-smpnet-base:1.99 netbsd-6-1:1.97.8.1.0.6 netbsd-6-0-2-RELEASE:1.97.8.1 netbsd-6-1-RELEASE:1.97.8.1 khorben-n900:1.99.0.8 netbsd-6-1-RC4:1.97.8.1 netbsd-6-1-RC3:1.97.8.1 agc-symver:1.99.0.6 agc-symver-base:1.99 netbsd-6-1-RC2:1.97.8.1 netbsd-6-1-RC1:1.97.8.1 yamt-pagecache-base8:1.99 netbsd-5-2:1.90.0.34 netbsd-6-0-1-RELEASE:1.97.8.1 yamt-pagecache-base7:1.99 netbsd-5-2-RELEASE:1.90 netbsd-5-2-RC1:1.90 matt-nb6-plus-nbase:1.97.8.1 yamt-pagecache-base6:1.99 netbsd-6-0:1.97.8.1.0.4 netbsd-6-0-RELEASE:1.97.8.1 netbsd-6-0-RC2:1.97.8.1 tls-maxphys:1.99.0.2 tls-maxphys-base:1.99 matt-nb6-plus:1.97.8.1.0.2 matt-nb6-plus-base:1.97.8.1 netbsd-6-0-RC1:1.97.8.1 jmcneill-usbmp-base10:1.98 yamt-pagecache-base5:1.97 jmcneill-usbmp-base9:1.97 yamt-pagecache-base4:1.97 jmcneill-usbmp-base8:1.97 jmcneill-usbmp-base7:1.97 jmcneill-usbmp-base6:1.97 jmcneill-usbmp-base5:1.97 jmcneill-usbmp-base4:1.97 jmcneill-usbmp-base3:1.97 jmcneill-usbmp-pre-base2:1.97 jmcneill-usbmp-base2:1.97 netbsd-6:1.97.0.8 netbsd-6-base:1.97 netbsd-5-1-2-RELEASE:1.90 netbsd-5-1-1-RELEASE:1.90 jmcneill-usbmp:1.97.0.6 jmcneill-usbmp-base:1.97 jmcneill-audiomp3:1.97.0.4 jmcneill-audiomp3-base:1.97 yamt-pagecache-base3:1.97 yamt-pagecache-base2:1.97 yamt-pagecache:1.97.0.2 yamt-pagecache-base:1.97 rmind-uvmplock-nbase:1.95 cherry-xenmp:1.95.0.2 cherry-xenmp-base:1.95 uebayasi-xip-base7:1.94 bouyer-quota2-nbase:1.94 bouyer-quota2:1.93.0.8 bouyer-quota2-base:1.94 jruoho-x86intr:1.93.0.6 jruoho-x86intr-base:1.93 matt-mips64-premerge-20101231:1.93 matt-nb5-mips64-premerge-20101231:1.90 matt-nb5-pq3:1.90.0.32 matt-nb5-pq3-base:1.90 netbsd-5-1:1.90.0.30 uebayasi-xip-base6:1.93 uebayasi-xip-base5:1.93 netbsd-5-1-RELEASE:1.90 uebayasi-xip-base4:1.93 uebayasi-xip-base3:1.93 yamt-nfs-mp-base11:1.93 netbsd-5-1-RC4:1.90 matt-nb5-mips64-k15:1.90 uebayasi-xip-base2:1.93 yamt-nfs-mp-base10:1.93 netbsd-5-1-RC3:1.90 netbsd-5-1-RC2:1.90 uebayasi-xip-base1:1.93 netbsd-5-1-RC1:1.90 rmind-uvmplock:1.93.0.4 rmind-uvmplock-base:1.95 yamt-nfs-mp-base9:1.93 uebayasi-xip:1.93.0.2 uebayasi-xip-base:1.93 netbsd-5-0-2-RELEASE:1.90 matt-nb5-mips64-premerge-20091211:1.90 matt-premerge-20091211:1.92 yamt-nfs-mp-base8:1.92 matt-nb5-mips64-u2-k2-k4-k7-k8-k9:1.90 matt-nb4-mips64-k7-u2a-k9b:1.90 matt-nb5-mips64-u1-k1-k5:1.90 yamt-nfs-mp-base7:1.92 matt-nb5-mips64:1.90.0.28 netbsd-5-0-1-RELEASE:1.90 jymxensuspend-base:1.90 yamt-nfs-mp-base6:1.90 yamt-nfs-mp-base5:1.90 yamt-nfs-mp-base4:1.90 jym-xensuspend-nbase:1.92 yamt-nfs-mp-base3:1.90 nick-hppapmap-base4:1.90 nick-hppapmap-base3:1.90 netbsd-5-0:1.90.0.26 netbsd-5-0-RELEASE:1.90 netbsd-5-0-RC4:1.90 netbsd-5-0-RC3:1.90 nick-hppapmap-base2:1.90 netbsd-5-0-RC2:1.90 jym-xensuspend:1.90.0.24 jym-xensuspend-base:1.90 netbsd-5-0-RC1:1.90 haad-dm-base2:1.90 haad-nbase2:1.90 ad-audiomp2:1.90.0.22 ad-audiomp2-base:1.90 netbsd-5:1.90.0.20 netbsd-5-base:1.90 nick-hppapmap:1.90.0.18 nick-hppapmap-base:1.90 matt-mips64-base2:1.90 matt-mips64:1.85.0.6 haad-dm-base1:1.90 wrstuden-revivesa-base-4:1.90 netbsd-4-0-1-RELEASE:1.77.2.1 wrstuden-revivesa-base-3:1.90 wrstuden-revivesa-base-2:1.90 wrstuden-fixsa-newbase:1.77.2.1 nick-csl-alignment-base5:1.85 haad-dm:1.90.0.16 haad-dm-base:1.90 wrstuden-revivesa-base-1:1.90 simonb-wapbl-nbase:1.90 yamt-pf42-base4:1.90 simonb-wapbl:1.90.0.14 simonb-wapbl-base:1.90 yamt-pf42-base3:1.90 hpcarm-cleanup-nbase:1.90 yamt-pf42-baseX:1.90 yamt-pf42-base2:1.90 yamt-nfs-mp-base2:1.90 wrstuden-revivesa:1.90.0.12 wrstuden-revivesa-base:1.90 yamt-nfs-mp:1.90.0.10 yamt-nfs-mp-base:1.90 yamt-pf42:1.90.0.8 yamt-pf42-base:1.90 ad-socklock-base1:1.90 yamt-lazymbuf-base15:1.90 yamt-lazymbuf-base14:1.90 keiichi-mipv6-nbase:1.90 mjf-devfs2:1.90.0.6 mjf-devfs2-base:1.90 nick-net80211-sync:1.90.0.4 nick-net80211-sync-base:1.90 keiichi-mipv6:1.90.0.2 keiichi-mipv6-base:1.90 bouyer-xeni386-merge1:1.89.6.1 matt-armv6-prevmlocking:1.85.2.1 wrstuden-fixsa-base-1:1.77.2.1 vmlocking2-base3:1.89 netbsd-4-0:1.77.2.1.0.6 netbsd-4-0-RELEASE:1.77.2.1 bouyer-xeni386-nbase:1.90 yamt-kmem-base3:1.89 cube-autoconf:1.89.0.8 cube-autoconf-base:1.89 yamt-kmem-base2:1.89 bouyer-xeni386:1.89.0.6 bouyer-xeni386-base:1.90 yamt-kmem:1.89.0.4 yamt-kmem-base:1.89 vmlocking2-base2:1.89 reinoud-bufcleanup-nbase:1.89 vmlocking2:1.89.0.2 vmlocking2-base1:1.89 netbsd-4-0-RC5:1.77.2.1 matt-nb4-arm:1.77.2.1.0.4 matt-nb4-arm-base:1.77.2.1 matt-armv6-nbase:1.90 jmcneill-base:1.87 netbsd-4-0-RC4:1.77.2.1 mjf-devfs:1.87.0.4 mjf-devfs-base:1.90 bouyer-xenamd64-base2:1.87 vmlocking-nbase:1.89 yamt-x86pmap-base4:1.87 bouyer-xenamd64:1.87.0.2 bouyer-xenamd64-base:1.87 netbsd-4-0-RC3:1.77.2.1 yamt-x86pmap-base3:1.87 yamt-x86pmap-base2:1.85 netbsd-4-0-RC2:1.77.2.1 yamt-x86pmap:1.85.0.4 yamt-x86pmap-base:1.85 netbsd-4-0-RC1:1.77.2.1 matt-armv6:1.85.0.2 matt-armv6-base:1.90 matt-mips64-base:1.85 jmcneill-pm:1.84.0.4 jmcneill-pm-base:1.89 hpcarm-cleanup:1.84.0.2 hpcarm-cleanup-base:1.90 nick-csl-alignment:1.83.0.2 nick-csl-alignment-base:1.83 netbsd-3-1-1-RELEASE:1.64 netbsd-3-0-3-RELEASE:1.64 yamt-idlelwp-base8:1.81 wrstuden-fixsa:1.77.2.1.0.2 wrstuden-fixsa-base:1.77.2.1 thorpej-atomic:1.81.0.8 thorpej-atomic-base:1.81 reinoud-bufcleanup:1.81.0.6 reinoud-bufcleanup-base:1.89 mjf-ufs-trans:1.81.0.4 mjf-ufs-trans-base:1.83 vmlocking:1.81.0.2 vmlocking-base:1.87 ad-audiomp:1.80.0.2 ad-audiomp-base:1.80 yamt-idlelwp:1.78.0.2 post-newlock2-merge:1.78 newlock2-nbase:1.78 yamt-splraiseipl-base5:1.78 yamt-splraiseipl-base4:1.78 yamt-splraiseipl-base3:1.78 abandoned-netbsd-4-base:1.72 abandoned-netbsd-4:1.72.0.2 netbsd-3-1:1.64.0.12 netbsd-3-1-RELEASE:1.64 netbsd-3-0-2-RELEASE:1.64 yamt-splraiseipl-base2:1.76 netbsd-3-1-RC4:1.64 yamt-splraiseipl:1.73.0.2 yamt-splraiseipl-base:1.73 netbsd-3-1-RC3:1.64 yamt-pdpolicy-base9:1.72 newlock2:1.72.0.4 newlock2-base:1.78 yamt-pdpolicy-base8:1.72 netbsd-3-1-RC2:1.64 netbsd-3-1-RC1:1.64 yamt-pdpolicy-base7:1.72 netbsd-4:1.77.0.2 netbsd-4-base:1.77 yamt-pdpolicy-base6:1.70 chap-midi-nbase:1.70 netbsd-3-0-1-RELEASE:1.64 gdamore-uart:1.70.0.4 gdamore-uart-base:1.70 simonb-timcounters-final:1.69.6.1 yamt-pdpolicy-base5:1.70 chap-midi:1.70.0.2 chap-midi-base:1.70 yamt-pdpolicy-base4:1.69 yamt-pdpolicy-base3:1.69 peter-altq-base:1.69 peter-altq:1.69.0.12 yamt-pdpolicy-base2:1.69 elad-kernelauth-base:1.69 elad-kernelauth:1.69.0.10 yamt-pdpolicy:1.69.0.8 yamt-pdpolicy-base:1.69 yamt-uio_vmspace-base5:1.69 simonb-timecounters:1.69.0.6 simonb-timecounters-base:1.70 rpaulo-netinet-merge-pcb:1.69.0.4 rpaulo-netinet-merge-pcb-base:1.72 yamt-uio_vmspace:1.69.0.2 netbsd-3-0:1.64.0.10 netbsd-3-0-RELEASE:1.64 netbsd-3-0-RC6:1.64 yamt-readahead-base3:1.67 netbsd-3-0-RC5:1.64 netbsd-3-0-RC4:1.64 netbsd-3-0-RC3:1.64 yamt-readahead-base2:1.66 netbsd-3-0-RC2:1.64 yamt-readahead-pervnode:1.66 yamt-readahead-perfile:1.66 yamt-readahead:1.66.0.8 yamt-readahead-base:1.66 netbsd-3-0-RC1:1.64 yamt-vop-base3:1.66 netbsd-2-0-3-RELEASE:1.63 netbsd-2-1:1.63.0.8 yamt-vop-base2:1.66 thorpej-vnode-attr:1.66.0.6 thorpej-vnode-attr-base:1.66 netbsd-2-1-RELEASE:1.63 yamt-vop:1.66.0.4 yamt-vop-base:1.66 netbsd-2-1-RC6:1.63 netbsd-2-1-RC5:1.63 netbsd-2-1-RC4:1.63 netbsd-2-1-RC3:1.63 netbsd-2-1-RC2:1.63 netbsd-2-1-RC1:1.63 yamt-lazymbuf:1.66.0.2 yamt-km-base4:1.64 netbsd-2-0-2-RELEASE:1.63 yamt-km-base3:1.64 netbsd-3:1.64.0.8 netbsd-3-base:1.64 yamt-km-base2:1.64 yamt-km:1.64.0.4 yamt-km-base:1.64 kent-audio2:1.64.0.2 kent-audio2-base:1.64 netbsd-2-0-1-RELEASE:1.63 kent-audio1-beforemerge:1.64 netbsd-2:1.63.0.6 netbsd-2-base:1.63 kent-audio1:1.63.0.4 kent-audio1-base:1.63 netbsd-2-0-RELEASE:1.63 netbsd-2-0-RC5:1.63 netbsd-2-0-RC4:1.63 netbsd-2-0-RC3:1.63 netbsd-2-0-RC2:1.63 netbsd-2-0-RC1:1.63 netbsd-2-0:1.63.0.2 netbsd-2-0-base:1.63 netbsd-1-6-PATCH002-RELEASE:1.58 netbsd-1-6-PATCH002:1.58 netbsd-1-6-PATCH002-RC4:1.58 netbsd-1-6-PATCH002-RC3:1.58 netbsd-1-6-PATCH002-RC2:1.58 netbsd-1-6-PATCH002-RC1:1.58 ktrace-lwp:1.62.0.2 ktrace-lwp-base:1.68 netbsd-1-6-PATCH001:1.58 netbsd-1-6-PATCH001-RELEASE:1.58 netbsd-1-6-PATCH001-RC3:1.58 netbsd-1-6-PATCH001-RC2:1.58 netbsd-1-6-PATCH001-RC1:1.58 nathanw_sa_end:1.46.2.13 nathanw_sa_before_merge:1.59 fvdl_fs64_base:1.59 gmcgarry_ctxsw:1.59.0.4 gmcgarry_ctxsw_base:1.59 gmcgarry_ucred:1.59.0.2 gmcgarry_ucred_base:1.59 nathanw_sa_base:1.59 kqueue-aftermerge:1.59 kqueue-beforemerge:1.59 netbsd-1-6-RELEASE:1.58 netbsd-1-6-RC3:1.58 netbsd-1-6-RC2:1.58 netbsd-1-6-RC1:1.58 netbsd-1-6:1.58.0.2 netbsd-1-6-base:1.58 gehenna-devsw:1.57.0.8 gehenna-devsw-base:1.58 netbsd-1-5-PATCH003:1.33.4.1 eeh-devprop:1.57.0.6 eeh-devprop-base:1.57 newlock:1.57.0.4 newlock-base:1.57 ifpoll-base:1.57 thorpej-mips-cache:1.54.0.2 thorpej-mips-cache-base:1.55 thorpej-devvp-base3:1.54 thorpej-devvp-base2:1.54 post-chs-ubcperf:1.52 pre-chs-ubcperf:1.51 thorpej-devvp:1.51.0.2 thorpej-devvp-base:1.51 netbsd-1-5-PATCH002:1.33 kqueue:1.50.0.2 kqueue-base:1.59 netbsd-1-5-PATCH001:1.33 thorpej_scsipi_beforemerge:1.48 nathanw_sa:1.46.0.2 thorpej_scsipi_nbase:1.48 netbsd-1-5-RELEASE:1.33 netbsd-1-5-BETA2:1.33 netbsd-1-5-BETA:1.33 netbsd-1-4-PATCH003:1.22.2.2 netbsd-1-5-ALPHA2:1.33 netbsd-1-5:1.33.0.4 netbsd-1-5-base:1.33 minoura-xpg4dl-base:1.33 minoura-xpg4dl:1.33.0.2 netbsd-1-4-PATCH002:1.22.2.2 chs-ubc2-newbase:1.28 wrstuden-devbsize-19991221:1.27 wrstuden-devbsize:1.26.0.6 wrstuden-devbsize-base:1.27 kame_141_19991130:1.22.2.1 comdex-fall-1999:1.27.0.2 comdex-fall-1999-base:1.27 fvdl-softdep:1.26.0.4 fvdl-softdep-base:1.27 thorpej_scsipi:1.26.0.2 thorpej_scsipi_base:1.48 netbsd-1-4-PATCH001:1.22.2.1 kame_14_19990705:1.22.2.1 kame_14_19990628:1.22.2.1 kame:1.22.2.1.0.4 chs-ubc2:1.22.2.1.0.2 chs-ubc2-base:1.25 netbsd-1-4-RELEASE:1.22.2.1 netbsd-1-4:1.22.0.2 netbsd-1-4-base:1.22 kenh-if-detach:1.17.0.4 kenh-if-detach-base:1.17 chs-ubc:1.17.0.2 chs-ubc-base:1.17 eeh-paddr_t:1.13.0.2 eeh-paddr_t-base:1.13 uvm980205:1.1.1.1 CDC:1.1.1; locks; strict; comment @ * @; 1.120 date 2023.04.09.12.37.12; author riastradh; state Exp; branches; next 1.119; commitid TvynUv8sFQ9UNqkE; 1.119 date 2023.04.09.09.00.56; author riastradh; state Exp; branches; next 1.118; commitid BDFG6GqMwsZHBpkE; 1.118 date 2021.03.13.15.29.55; author skrll; state Exp; branches; next 1.117; commitid O4zBGCByT4oA5aLC; 1.117 date 2020.08.16.00.24.41; author chs; state Exp; branches 1.117.2.1; next 1.116; commitid Zzhl9pVBhaoJNdkC; 1.116 date 2020.08.14.09.06.15; author chs; state Exp; branches; next 1.115; commitid LOTESSEHlNtCK0kC; 1.115 date 2020.07.09.05.57.15; author skrll; state Exp; branches; next 1.114; commitid YL8rLYSFWcHhRmfC; 1.114 date 2020.05.25.21.15.10; author ad; state Exp; branches; next 1.113; commitid CVpyKL9QSDmTnF9C; 1.113 date 2020.05.19.22.22.15; author ad; state Exp; branches; next 1.112; commitid WQMv6F9qwLT9WT8C; 1.112 date 2020.05.19.21.45.57; author ad; state Exp; branches; next 1.111; commitid DStDfGAMTJiAKT8C; 1.111 date 2020.03.22.18.32.42; author ad; state Exp; branches; next 1.110; commitid GrnnOJcv6kiTxq1C; 1.110 date 2020.03.14.20.45.23; author ad; state Exp; branches; next 1.109; commitid bF939dNKuAUmxp0C; 1.109 date 2020.03.14.20.23.51; author ad; state Exp; branches; next 1.108; commitid GCq6DlGFYKYYpp0C; 1.108 date 2020.03.03.13.32.44; author rjs; state Exp; branches; next 1.107; commitid VwsaJDu8cLvouXYB; 1.107 date 2020.02.27.22.12.54; author ad; state Exp; branches; next 1.106; commitid VzTPAxKUd8O4xmYB; 1.106 date 2020.02.23.15.46.43; author ad; state Exp; branches; next 1.105; commitid DJJO1ciCDgZlwOXB; 1.105 date 2020.01.15.17.55.45; author ad; state Exp; branches; next 1.104; commitid 3y5oCFDwuvhmuOSB; 1.104 date 2019.12.21.14.41.44; author ad; state Exp; branches 1.104.2.1; next 1.103; commitid ygHsIzNcdnHCdAPB; 1.103 date 2017.10.28.00.37.13; author pgoyette; state Exp; branches 1.103.4.1; next 1.102; commitid lLEUvabQ9cCLKKcA; 1.102 date 2015.12.06.09.38.54; author wiz; state Exp; branches 1.102.10.1; next 1.101; 1.101 date 2015.12.06.08.53.22; author mlelstv; state Exp; branches; next 1.100; 1.100 date 2015.08.24.22.50.32; author pooka; state Exp; branches; next 1.99; 1.99 date 2012.07.30.23.56.48; author matt; state Exp; branches 1.99.2.1 1.99.16.1; next 1.98; 1.98 date 2012.06.01.14.52.48; author martin; state Exp; branches; next 1.97; 1.97 date 2011.09.06.16.41.55; author matt; state Exp; branches 1.97.2.1 1.97.6.1 1.97.8.1; next 1.96; 1.96 date 2011.06.12.03.36.04; author rmind; state Exp; branches; next 1.95; 1.95 date 2011.04.23.18.14.13; author rmind; state Exp; branches 1.95.2.1; next 1.94; 1.94 date 2011.02.02.20.07.25; author chuck; state Exp; branches; next 1.93; 1.93 date 2010.01.08.11.35.12; author pooka; state Exp; branches 1.93.2.1 1.93.4.1 1.93.6.1 1.93.8.1; next 1.92; 1.92 date 2009.08.04.23.31.57; author pooka; state Exp; branches; next 1.91; 1.91 date 2009.08.04.23.03.01; author pooka; state Exp; branches; next 1.90; 1.90 date 2008.01.02.11.49.21; author ad; state Exp; branches 1.90.10.1 1.90.28.1; next 1.89; 1.89 date 2007.12.01.10.40.28; author yamt; state Exp; branches 1.89.2.1 1.89.6.1; next 1.88; 1.88 date 2007.12.01.10.18.21; author yamt; state Exp; branches; next 1.87; 1.87 date 2007.10.11.19.53.43; author ad; state Exp; branches 1.87.4.1; next 1.86; 1.86 date 2007.10.10.20.42.41; author ad; state Exp; branches; next 1.85; 1.85 date 2007.08.04.09.42.58; author pooka; state Exp; branches 1.85.2.1 1.85.4.1 1.85.6.1; next 1.84; 1.84 date 2007.07.22.19.16.06; author pooka; state Exp; branches 1.84.4.1; next 1.83; 1.83 date 2007.07.09.21.11.37; author ad; state Exp; branches 1.83.2.1; next 1.82; 1.82 date 2007.06.05.12.31.36; author yamt; state Exp; branches; next 1.81; 1.81 date 2007.03.04.06.03.49; author christos; state Exp; branches 1.81.2.1 1.81.4.1; next 1.80; 1.80 date 2007.02.22.06.05.02; author thorpej; state Exp; branches; next 1.79; 1.79 date 2007.02.21.23.00.15; author thorpej; state Exp; branches; next 1.78; 1.78 date 2006.12.09.16.11.52; author chs; state Exp; branches 1.78.2.1; next 1.77; 1.77 date 2006.11.01.10.18.27; author yamt; state Exp; branches 1.77.2.1; next 1.76; 1.76 date 2006.10.14.09.20.35; author yamt; state Exp; branches; next 1.75; 1.75 date 2006.10.12.10.14.20; author yamt; state Exp; branches; next 1.74; 1.74 date 2006.10.12.01.32.54; author christos; state Exp; branches; next 1.73; 1.73 date 2006.09.15.15.51.13; author yamt; state Exp; branches 1.73.2.1; next 1.72; 1.72 date 2006.07.22.08.47.56; author yamt; state Exp; branches 1.72.4.1; next 1.71; 1.71 date 2006.07.21.16.48.54; author ad; state Exp; branches; next 1.70; 1.70 date 2006.05.14.21.38.18; author elad; state Exp; branches; next 1.69; 1.69 date 2005.12.11.12.25.29; author christos; state Exp; branches 1.69.4.1 1.69.6.1 1.69.8.1 1.69.10.1 1.69.12.1; next 1.68; 1.68 date 2005.11.29.22.52.03; author yamt; state Exp; branches; next 1.67; 1.67 date 2005.11.29.15.45.28; author yamt; state Exp; branches; next 1.66; 1.66 date 2005.06.27.02.29.32; author thorpej; state Exp; branches 1.66.2.1 1.66.8.1; next 1.65; 1.65 date 2005.06.27.02.19.48; author thorpej; state Exp; branches; next 1.64; 1.64 date 2005.01.09.16.42.44; author chs; state Exp; branches; next 1.63; 1.63 date 2004.03.24.07.55.01; author junyoung; state Exp; branches; next 1.62; 1.62 date 2003.06.29.22.32.52; author fvdl; state Exp; branches 1.62.2.1; next 1.61; 1.61 date 2003.06.28.14.22.30; author darrenr; state Exp; branches; next 1.60; 1.60 date 2003.04.22.14.28.16; author yamt; state Exp; branches; next 1.59; 1.59 date 2002.09.06.13.18.43; author gehenna; state Exp; branches; next 1.58; 1.58 date 2002.05.17.22.00.50; author enami; state Exp; branches; next 1.57; 1.57 date 2001.12.31.07.00.15; author chs; state Exp; branches 1.57.8.1; next 1.56; 1.56 date 2001.12.09.03.07.44; author chs; state Exp; branches; next 1.55; 1.55 date 2001.11.10.07.37.01; author lukem; state Exp; branches; next 1.54; 1.54 date 2001.09.26.07.23.51; author chs; state Exp; branches 1.54.2.1; next 1.53; 1.53 date 2001.09.22.22.33.16; author sommerfeld; state Exp; branches; next 1.52; 1.52 date 2001.09.15.20.36.47; author chs; state Exp; branches; next 1.51; 1.51 date 2001.08.17.05.53.02; author chs; state Exp; branches 1.51.2.1; next 1.50; 1.50 date 2001.05.26.21.27.21; author chs; state Exp; branches 1.50.2.1; next 1.49; 1.49 date 2001.05.25.04.06.18; author chs; state Exp; branches; next 1.48; 1.48 date 2001.03.10.22.46.51; author chs; state Exp; branches; next 1.47; 1.47 date 2001.03.09.01.02.13; author chs; state Exp; branches; next 1.46; 1.46 date 2001.02.22.01.02.09; author enami; state Exp; branches 1.46.2.1; next 1.45; 1.45 date 2001.02.18.19.40.25; author chs; state Exp; branches; next 1.44; 1.44 date 2001.02.08.06.43.05; author chs; state Exp; branches; next 1.43; 1.43 date 2001.02.06.10.53.23; author chs; state Exp; branches; next 1.42; 1.42 date 2001.01.28.23.30.47; author thorpej; state Exp; branches; next 1.41; 1.41 date 2001.01.08.06.21.13; author chs; state Exp; branches; next 1.40; 1.40 date 2000.12.16.06.17.09; author chs; state Exp; branches; next 1.39; 1.39 date 2000.12.06.03.37.30; author chs; state Exp; branches; next 1.38; 1.38 date 2000.11.30.11.04.44; author simonb; state Exp; branches; next 1.37; 1.37 date 2000.11.27.08.40.06; author chs; state Exp; branches; next 1.36; 1.36 date 2000.11.24.20.34.01; author chs; state Exp; branches; next 1.35; 1.35 date 2000.06.27.17.29.37; author mrg; state Exp; branches; next 1.34; 1.34 date 2000.06.26.14.21.19; author mrg; state Exp; branches; next 1.33; 1.33 date 2000.05.19.03.45.05; author thorpej; state Exp; branches 1.33.4.1; next 1.32; 1.32 date 2000.04.03.07.35.24; author chs; state Exp; branches; next 1.31; 1.31 date 2000.03.27.16.58.23; author kleink; state Exp; branches; next 1.30; 1.30 date 2000.03.26.20.54.47; author kleink; state Exp; branches; next 1.29; 1.29 date 2000.03.13.23.52.42; author soren; state Exp; branches; next 1.28; 1.28 date 2000.01.28.08.02.48; author chs; state Exp; branches; next 1.27; 1.27 date 99.10.19.16.04.45; author chs; state Exp; branches; next 1.26; 1.26 date 99.09.12.01.17.42; author chs; state Exp; branches 1.26.2.1 1.26.4.1 1.26.6.1; next 1.25; 1.25 date 99.07.22.22.58.39; author thorpej; state Exp; branches; next 1.24; 1.24 date 99.07.22.21.27.32; author thorpej; state Exp; branches; next 1.23; 1.23 date 99.04.11.04.04.11; author chs; state Exp; branches; next 1.22; 1.22 date 99.03.25.18.48.56; author mrg; state Exp; branches 1.22.2.1; next 1.21; 1.21 date 99.03.25.00.20.35; author sommerfe; state Exp; branches; next 1.20; 1.20 date 99.03.24.03.45.28; author cgd; state Exp; branches; next 1.19; 1.19 date 99.03.04.06.48.54; author chs; state Exp; branches; next 1.18; 1.18 date 99.01.29.12.56.17; author bouyer; state Exp; branches; next 1.17; 1.17 date 98.11.04.06.21.40; author chs; state Exp; branches 1.17.2.1; next 1.16; 1.16 date 98.10.18.23.50.01; author chs; state Exp; branches; next 1.15; 1.15 date 98.08.13.02.11.04; author eeh; state Exp; branches; next 1.14; 1.14 date 98.08.09.22.36.39; author perry; state Exp; branches; next 1.13; 1.13 date 98.07.07.23.22.13; author thorpej; state Exp; branches 1.13.2.1; next 1.12; 1.12 date 98.06.24.20.58.49; author sommerfe; state Exp; branches; next 1.11; 1.11 date 98.06.22.22.01.12; author sommerfe; state Exp; branches; next 1.10; 1.10 date 98.05.05.20.51.07; author kleink; state Exp; branches; next 1.9; 1.9 date 98.03.11.01.37.40; author chuck; state Exp; branches; next 1.8; 1.8 date 98.03.09.00.58.59; author mrg; state Exp; branches; next 1.7; 1.7 date 98.03.01.02.25.29; author fvdl; state Exp; branches; next 1.6; 1.6 date 98.02.19.00.55.04; author thorpej; state Exp; branches; next 1.5; 1.5 date 98.02.18.06.35.46; author mrg; state Exp; branches; next 1.4; 1.4 date 98.02.10.14.12.33; author mrg; state Exp; branches; next 1.3; 1.3 date 98.02.07.11.09.57; author mrg; state Exp; branches; next 1.2; 1.2 date 98.02.06.22.32.35; author thorpej; state Exp; branches; next 1.1; 1.1 date 98.02.05.06.25.08; author mrg; state Exp; branches 1.1.1.1; next ; 1.117.2.1 date 2021.04.03.22.29.03; author thorpej; state Exp; branches; next ; commitid 1gqS07EfPjskJTNC; 1.104.2.1 date 2020.01.17.21.47.38; author ad; state Exp; branches; next 1.104.2.2; commitid T9pwLWote7xbI5TB; 1.104.2.2 date 2020.02.29.20.21.12; author ad; state Exp; branches; next ; commitid OjSb8ro7YQETQBYB; 1.103.4.1 date 2020.04.08.14.09.05; author martin; state Exp; branches; next ; commitid Qli2aW9E74UFuA3C; 1.102.10.1 date 2017.11.02.21.29.53; author snj; state Exp; branches; next ; commitid fkeTzdk6u06twvdA; 1.99.2.1 date 2017.12.03.11.39.22; author jdolecek; state Exp; branches; next ; commitid XcIYRZTAh1LmerhA; 1.99.16.1 date 2015.09.22.12.06.17; author skrll; state Exp; branches; next 1.99.16.2; 1.99.16.2 date 2015.12.27.12.10.19; author skrll; state Exp; branches; next ; 1.97.2.1 date 2011.11.02.21.54.01; author yamt; state Exp; branches; next 1.97.2.2; 1.97.2.2 date 2011.11.26.15.19.06; author yamt; state Exp; branches; next 1.97.2.3; 1.97.2.3 date 2011.12.20.13.46.17; author yamt; state Exp; branches; next 1.97.2.4; 1.97.2.4 date 2012.01.18.02.09.06; author yamt; state Exp; branches; next 1.97.2.5; 1.97.2.5 date 2012.02.17.08.18.57; author yamt; state Exp; branches; next 1.97.2.6; 1.97.2.6 date 2012.08.01.21.12.23; author yamt; state Exp; branches; next 1.97.2.7; 1.97.2.7 date 2012.08.01.22.34.14; author yamt; state Exp; branches; next 1.97.2.8; 1.97.2.8 date 2012.10.30.17.23.03; author yamt; state Exp; branches; next 1.97.2.9; 1.97.2.9 date 2012.11.02.08.26.33; author yamt; state Exp; branches; next ; 1.97.6.1 date 2012.06.02.11.09.42; author mrg; state Exp; branches; next ; 1.97.8.1 date 2012.06.11.21.25.02; author riz; state Exp; branches; next ; 1.95.2.1 date 2011.06.23.14.20.37; author cherry; state Exp; branches; next ; 1.93.2.1 date 2010.02.11.06.26.47; author uebayasi; state Exp; branches; next 1.93.2.2; 1.93.2.2 date 2010.08.25.14.21.23; author uebayasi; state Exp; branches; next 1.93.2.3; 1.93.2.3 date 2010.11.16.07.44.25; author uebayasi; state Exp; branches; next 1.93.2.4; 1.93.2.4 date 2010.11.18.16.16.36; author uebayasi; state Exp; branches; next 1.93.2.5; 1.93.2.5 date 2010.11.20.08.00.53; author uebayasi; state Exp; branches; next 1.93.2.6; 1.93.2.6 date 2010.11.21.12.02.06; author uebayasi; state Exp; branches; next ; 1.93.4.1 date 2010.03.16.15.38.19; author rmind; state Exp; branches; next 1.93.4.2; 1.93.4.2 date 2010.03.17.06.03.19; author rmind; state Exp; branches; next 1.93.4.3; 1.93.4.3 date 2011.03.05.20.56.38; author rmind; state Exp; branches; next 1.93.4.4; 1.93.4.4 date 2011.05.31.03.05.15; author rmind; state Exp; branches; next ; 1.93.6.1 date 2011.06.06.09.10.24; author jruoho; state Exp; branches; next ; 1.93.8.1 date 2011.02.08.16.20.08; author bouyer; state Exp; branches; next ; 1.90.10.1 date 2009.08.19.18.48.36; author yamt; state Exp; branches; next 1.90.10.2; 1.90.10.2 date 2010.03.11.15.04.48; author yamt; state Exp; branches; next ; 1.90.28.1 date 2011.05.25.23.58.50; author matt; state Exp; branches; next 1.90.28.2; 1.90.28.2 date 2011.06.03.02.43.42; author matt; state Exp; branches; next 1.90.28.3; 1.90.28.3 date 2011.06.03.07.52.48; author matt; state Exp; branches; next 1.90.28.4; 1.90.28.4 date 2012.02.29.18.03.40; author matt; state Exp; branches; next ; 1.89.2.1 date 2007.12.04.13.04.07; author ad; state Exp; branches; next 1.89.2.2; 1.89.2.2 date 2007.12.18.15.24.32; author ad; state Exp; branches; next ; 1.89.6.1 date 2008.01.02.21.58.47; author bouyer; state Exp; branches; next ; 1.87.4.1 date 2007.12.08.18.21.47; author mjf; state Exp; branches; next 1.87.4.2; 1.87.4.2 date 2008.02.18.21.07.33; author mjf; state Exp; branches; next ; 1.85.2.1 date 2007.11.06.23.35.33; author matt; state Exp; branches; next 1.85.2.2; 1.85.2.2 date 2008.01.09.01.58.45; author matt; state Exp; branches; next ; 1.85.4.1 date 2007.10.14.11.49.26; author yamt; state Exp; branches; next ; 1.85.6.1 date 2007.08.04.09.42.58; author pooka; state dead; branches; next 1.85.6.2; 1.85.6.2 date 2007.08.04.09.42.59; author pooka; state Exp; branches; next ; 1.84.4.1 date 2007.08.04.12.33.17; author jmcneill; state Exp; branches; next 1.84.4.2; 1.84.4.2 date 2007.10.26.15.49.44; author joerg; state Exp; branches; next 1.84.4.3; 1.84.4.3 date 2007.12.03.16.15.26; author joerg; state Exp; branches; next ; 1.83.2.1 date 2007.08.15.13.51.24; author skrll; state Exp; branches; next ; 1.81.2.1 date 2007.03.13.17.51.58; author ad; state Exp; branches; next 1.81.2.2; 1.81.2.2 date 2007.03.21.20.09.39; author ad; state Exp; branches; next 1.81.2.3; 1.81.2.3 date 2007.04.13.15.49.51; author ad; state Exp; branches; next 1.81.2.4; 1.81.2.4 date 2007.04.13.20.56.19; author ad; state Exp; branches; next 1.81.2.5; 1.81.2.5 date 2007.06.09.23.58.21; author ad; state Exp; branches; next 1.81.2.6; 1.81.2.6 date 2007.06.17.21.32.23; author ad; state Exp; branches; next 1.81.2.7; 1.81.2.7 date 2007.07.15.13.28.22; author ad; state Exp; branches; next 1.81.2.8; 1.81.2.8 date 2007.07.15.15.53.08; author ad; state Exp; branches; next 1.81.2.9; 1.81.2.9 date 2007.08.20.21.28.34; author ad; state Exp; branches; next 1.81.2.10; 1.81.2.10 date 2007.10.09.15.22.28; author ad; state Exp; branches; next ; 1.81.4.1 date 2007.07.11.20.12.58; author mjf; state Exp; branches; next ; 1.78.2.1 date 2007.02.27.16.55.30; author yamt; state Exp; branches; next 1.78.2.2; 1.78.2.2 date 2007.03.12.06.01.13; author rmind; state Exp; branches; next ; 1.77.2.1 date 2007.02.17.23.27.53; author tron; state Exp; branches; next ; 1.73.2.1 date 2006.10.22.06.07.54; author yamt; state Exp; branches; next 1.73.2.2; 1.73.2.2 date 2006.12.10.07.19.34; author yamt; state Exp; branches; next ; 1.72.4.1 date 2006.11.18.21.39.50; author ad; state Exp; branches; next 1.72.4.2; 1.72.4.2 date 2007.01.12.01.04.25; author ad; state Exp; branches; next ; 1.69.4.1 date 2006.09.09.03.00.13; author rpaulo; state Exp; branches; next ; 1.69.6.1 date 2006.06.01.22.39.45; author kardel; state Exp; branches; next ; 1.69.8.1 date 2006.03.12.09.38.56; author yamt; state Exp; branches; next 1.69.8.2; 1.69.8.2 date 2006.05.24.10.59.30; author yamt; state Exp; branches; next 1.69.8.3; 1.69.8.3 date 2006.08.11.15.47.46; author yamt; state Exp; branches; next ; 1.69.10.1 date 2006.03.08.00.31.57; author elad; state Exp; branches; next ; 1.69.12.1 date 2006.05.24.15.50.49; author tron; state Exp; branches; next ; 1.66.2.1 date 2006.06.21.15.12.40; author yamt; state Exp; branches; next 1.66.2.2; 1.66.2.2 date 2006.12.30.20.51.06; author yamt; state Exp; branches; next 1.66.2.3; 1.66.2.3 date 2007.02.26.09.12.33; author yamt; state Exp; branches; next 1.66.2.4; 1.66.2.4 date 2007.09.03.14.47.13; author yamt; state Exp; branches; next 1.66.2.5; 1.66.2.5 date 2007.10.27.11.36.56; author yamt; state Exp; branches; next 1.66.2.6; 1.66.2.6 date 2007.12.07.17.35.29; author yamt; state Exp; branches; next 1.66.2.7; 1.66.2.7 date 2008.01.21.09.48.26; author yamt; state Exp; branches; next ; 1.66.8.1 date 2005.11.19.17.37.00; author yamt; state Exp; branches; next 1.66.8.2; 1.66.8.2 date 2005.11.29.21.23.34; author yamt; state Exp; branches; next ; 1.62.2.1 date 2003.07.02.15.27.30; author darrenr; state Exp; branches; next 1.62.2.2; 1.62.2.2 date 2004.08.03.10.57.09; author skrll; state Exp; branches; next 1.62.2.3; 1.62.2.3 date 2004.09.18.14.57.12; author skrll; state Exp; branches; next 1.62.2.4; 1.62.2.4 date 2004.09.21.13.39.31; author skrll; state Exp; branches; next 1.62.2.5; 1.62.2.5 date 2005.01.17.19.33.11; author skrll; state Exp; branches; next 1.62.2.6; 1.62.2.6 date 2005.11.10.14.12.40; author skrll; state Exp; branches; next 1.62.2.7; 1.62.2.7 date 2005.12.11.10.29.42; author christos; state Exp; branches; next ; 1.57.8.1 date 2002.05.16.03.45.49; author gehenna; state Exp; branches; next 1.57.8.2; 1.57.8.2 date 2002.05.30.13.52.45; author gehenna; state Exp; branches; next ; 1.54.2.1 date 2001.11.12.21.19.58; author thorpej; state Exp; branches; next ; 1.51.2.1 date 2001.09.07.04.45.46; author thorpej; state Exp; branches; next 1.51.2.2; 1.51.2.2 date 2001.10.01.12.48.48; author fvdl; state Exp; branches; next ; 1.50.2.1 date 2001.08.25.06.17.23; author thorpej; state Exp; branches; next 1.50.2.2; 1.50.2.2 date 2002.01.10.20.05.49; author thorpej; state Exp; branches; next 1.50.2.3; 1.50.2.3 date 2002.06.23.17.52.20; author jdolecek; state Exp; branches; next 1.50.2.4; 1.50.2.4 date 2002.10.10.18.45.11; author jdolecek; state Exp; branches; next ; 1.46.2.1 date 2001.03.05.22.50.12; author nathanw; state Exp; branches; next 1.46.2.2; 1.46.2.2 date 2001.04.09.01.59.24; author nathanw; state Exp; branches; next 1.46.2.3; 1.46.2.3 date 2001.06.21.20.10.51; author nathanw; state Exp; branches; next 1.46.2.4; 1.46.2.4 date 2001.08.24.00.13.45; author nathanw; state Exp; branches; next 1.46.2.5; 1.46.2.5 date 2001.09.21.22.37.18; author nathanw; state Exp; branches; next 1.46.2.6; 1.46.2.6 date 2001.09.26.19.55.16; author nathanw; state Exp; branches; next 1.46.2.7; 1.46.2.7 date 2001.11.14.19.19.10; author nathanw; state Exp; branches; next 1.46.2.8; 1.46.2.8 date 2002.01.08.00.35.08; author nathanw; state Exp; branches; next 1.46.2.9; 1.46.2.9 date 2002.06.20.03.50.47; author nathanw; state Exp; branches; next 1.46.2.10; 1.46.2.10 date 2002.06.24.22.13.01; author nathanw; state Exp; branches; next 1.46.2.11; 1.46.2.11 date 2002.07.12.01.40.46; author nathanw; state Exp; branches; next 1.46.2.12; 1.46.2.12 date 2002.07.16.14.07.43; author nathanw; state Exp; branches; next 1.46.2.13; 1.46.2.13 date 2002.09.17.21.24.12; author nathanw; state Exp; branches; next ; 1.33.4.1 date 2001.11.15.23.09.22; author he; state Exp; branches; next ; 1.26.2.1 date 99.10.20.22.57.34; author thorpej; state Exp; branches; next 1.26.2.2; 1.26.2.2 date 2000.11.20.18.12.08; author bouyer; state Exp; branches; next 1.26.2.3; 1.26.2.3 date 2000.12.08.09.21.06; author bouyer; state Exp; branches; next 1.26.2.4; 1.26.2.4 date 2001.01.05.17.37.04; author bouyer; state Exp; branches; next 1.26.2.5; 1.26.2.5 date 2001.01.18.09.24.07; author bouyer; state Exp; branches; next 1.26.2.6; 1.26.2.6 date 2001.02.11.19.17.51; author bouyer; state Exp; branches; next 1.26.2.7; 1.26.2.7 date 2001.03.12.13.32.15; author bouyer; state Exp; branches; next 1.26.2.8; 1.26.2.8 date 2001.03.23.11.38.28; author bouyer; state Exp; branches; next ; 1.26.4.1 date 99.11.15.00.42.31; author fvdl; state Exp; branches; next ; 1.26.6.1 date 99.12.27.18.36.44; author wrstuden; state Exp; branches; next ; 1.22.2.1 date 99.04.16.16.29.56; author chs; state Exp; branches 1.22.2.1.2.1; next 1.22.2.2; 1.22.2.2 date 2000.01.31.20.36.12; author he; state Exp; branches; next ; 1.22.2.1.2.1 date 99.06.07.04.25.38; author chs; state Exp; branches; next 1.22.2.1.2.2; 1.22.2.1.2.2 date 99.07.04.02.08.14; author chs; state Exp; branches; next 1.22.2.1.2.3; 1.22.2.1.2.3 date 99.07.11.05.47.13; author chs; state Exp; branches; next 1.22.2.1.2.4; 1.22.2.1.2.4 date 99.07.31.19.04.49; author chs; state Exp; branches; next 1.22.2.1.2.5; 1.22.2.1.2.5 date 99.08.02.23.39.29; author thorpej; state Exp; branches; next 1.22.2.1.2.6; 1.22.2.1.2.6 date 99.08.06.12.47.28; author chs; state Exp; branches; next 1.22.2.1.2.7; 1.22.2.1.2.7 date 99.08.09.00.05.56; author chs; state Exp; branches; next 1.22.2.1.2.8; 1.22.2.1.2.8 date 99.08.11.05.40.13; author chs; state Exp; branches; next 1.22.2.1.2.9; 1.22.2.1.2.9 date 99.08.31.21.03.47; author perseant; state Exp; branches; next ; 1.17.2.1 date 98.11.09.06.06.40; author chs; state Exp; branches; next 1.17.2.2; 1.17.2.2 date 98.11.16.08.29.12; author chs; state Exp; branches; next 1.17.2.3; 1.17.2.3 date 99.02.25.04.44.38; author chs; state Exp; branches; next 1.17.2.4; 1.17.2.4 date 99.04.09.04.48.42; author chs; state Exp; branches; next 1.17.2.5; 1.17.2.5 date 99.04.29.05.36.41; author chs; state Exp; branches; next 1.17.2.6; 1.17.2.6 date 99.04.30.04.29.15; author chs; state Exp; branches; next 1.17.2.7; 1.17.2.7 date 99.05.30.15.41.44; author chs; state Exp; branches; next 1.17.2.8; 1.17.2.8 date 99.06.02.05.02.13; author chs; state Exp; branches; next ; 1.13.2.1 date 98.07.30.14.04.17; author eeh; state Exp; branches; next ; 1.1.1.1 date 98.02.05.06.25.08; author mrg; state Exp; branches; next ; desc @@ 1.120 log @uvm: Simplify assertion in uvn_get. No functional change intended. @ text @/* $NetBSD: uvm_vnode.c,v 1.119 2023/04/09 09:00:56 riastradh Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. * Copyright (c) 1991, 1993 * The Regents of the University of California. * Copyright (c) 1990 University of Utah. * * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @@(#)vnode_pager.c 8.8 (Berkeley) 2/13/94 * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp */ /* * uvm_vnode.c: the vnode pager. */ #include __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.119 2023/04/09 09:00:56 riastradh Exp $"); #ifdef _KERNEL_OPT #include "opt_uvmhist.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef UVMHIST UVMHIST_DEFINE(ubchist); #endif /* * functions */ static void uvn_alloc_ractx(struct uvm_object *); static void uvn_detach(struct uvm_object *); static int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *, int, vm_prot_t, int, int); static void uvn_markdirty(struct uvm_object *); static int uvn_put(struct uvm_object *, voff_t, voff_t, int); static void uvn_reference(struct uvm_object *); static int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **, unsigned int, struct uvm_page_array *a, unsigned int); /* * master pager structure */ const struct uvm_pagerops uvm_vnodeops = { .pgo_reference = uvn_reference, .pgo_detach = uvn_detach, .pgo_get = uvn_get, .pgo_put = uvn_put, .pgo_markdirty = uvn_markdirty, }; /* * the ops! */ /* * uvn_reference * * duplicate a reference to a VM object. Note that the reference * count must already be at least one (the passed in reference) so * there is no chance of the uvn being killed or locked out here. * * => caller must call with object unlocked. * => caller must be using the same accessprot as was used at attach time */ static void uvn_reference(struct uvm_object *uobj) { vref((struct vnode *)uobj); } /* * uvn_detach * * remove a reference to a VM object. * * => caller must call with object unlocked and map locked. */ static void uvn_detach(struct uvm_object *uobj) { vrele((struct vnode *)uobj); } /* * uvn_put: flush page data to backing store. * * => object must be locked on entry! VOP_PUTPAGES must unlock it. * => flags: PGO_SYNCIO -- use sync. I/O */ static int uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags) { struct vnode *vp = (struct vnode *)uobj; int error; KASSERT(rw_write_held(uobj->vmobjlock)); error = VOP_PUTPAGES(vp, offlo, offhi, flags); return error; } /* * uvn_get: get pages (synchronously) from backing store * * => prefer map unlocked (not required) * => object must be locked! we will _unlock_ it before starting any I/O. * => flags: PGO_LOCKED: fault data structures are locked * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx] * => NOTE: caller must check for released pages!! */ static int uvn_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps /* IN/OUT */, int *npagesp /* IN (OUT if PGO_LOCKED)*/, int centeridx, vm_prot_t access_type, int advice, int flags) { struct vnode *vp = (struct vnode *)uobj; int error; UVMHIST_FUNC(__func__); UVMHIST_CALLARGS(ubchist, "vp %#jx off %#jx", (uintptr_t)vp, offset, 0, 0); if (vp->v_type == VREG && (access_type & VM_PROT_WRITE) == 0 && (flags & PGO_LOCKED) == 0 && vp->v_tag != VT_TMPFS) { uvn_alloc_ractx(uobj); uvm_ra_request(vp->v_ractx, advice, uobj, offset, *npagesp << PAGE_SHIFT); } error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx, access_type, advice, flags); if (flags & PGO_LOCKED) KASSERT(rw_lock_held(uobj->vmobjlock)); return error; } /* * uvn_markdirty: called when the object gains first dirty page * * => uobj must be write locked. */ static void uvn_markdirty(struct uvm_object *uobj) { struct vnode *vp = (struct vnode *)uobj; KASSERT(rw_write_held(uobj->vmobjlock)); mutex_enter(vp->v_interlock); if ((vp->v_iflag & VI_ONWORKLST) == 0) { vn_syncer_add_to_worklist(vp, filedelay); } mutex_exit(vp->v_interlock); } /* * uvn_findpages: * return the page for the uobj and offset requested, allocating if needed. * => uobj must be locked. * => returned pages will be BUSY. */ int uvn_findpages(struct uvm_object *uobj, voff_t offset, unsigned int *npagesp, struct vm_page **pgs, struct uvm_page_array *a, unsigned int flags) { unsigned int count, found, npages; int i, rv; struct uvm_page_array a_store; if (a == NULL) { /* * XXX fragile API * note that the array can be the one supplied by the caller of * uvn_findpages. in that case, fillflags used by the caller * might not match strictly with ours. * in particular, the caller might have filled the array * without DENSE but passed us UFP_DIRTYONLY (thus DENSE). */ const unsigned int fillflags = ((flags & UFP_BACKWARD) ? UVM_PAGE_ARRAY_FILL_BACKWARD : 0) | ((flags & UFP_DIRTYONLY) ? (UVM_PAGE_ARRAY_FILL_DIRTY|UVM_PAGE_ARRAY_FILL_DENSE) : 0); a = &a_store; uvm_page_array_init(a, uobj, fillflags); } count = found = 0; npages = *npagesp; if (flags & UFP_BACKWARD) { for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) { rv = uvn_findpage(uobj, offset, &pgs[i], flags, a, i + 1); if (rv == 0) { if (flags & UFP_DIRTYONLY) break; } else found++; count++; } } else { for (i = 0; i < npages; i++, offset += PAGE_SIZE) { rv = uvn_findpage(uobj, offset, &pgs[i], flags, a, npages - i); if (rv == 0) { if (flags & UFP_DIRTYONLY) break; } else found++; count++; } } if (a == &a_store) { uvm_page_array_fini(a); } *npagesp = count; return (found); } /* * uvn_findpage: find a single page * * if a suitable page was found, put it in *pgp and return 1. * otherwise return 0. */ static int uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp, unsigned int flags, struct uvm_page_array *a, unsigned int nleft) { struct vm_page *pg; UVMHIST_FUNC(__func__); UVMHIST_CALLARGS(ubchist, "vp %#jx off %#jx", (uintptr_t)uobj, offset, 0, 0); /* * NOBUSY must come with NOWAIT and NOALLOC. if NOBUSY is * specified, this may be called with a reader lock. */ KASSERT(rw_lock_held(uobj->vmobjlock)); KASSERT((flags & UFP_NOBUSY) == 0 || (flags & UFP_NOWAIT) != 0); KASSERT((flags & UFP_NOBUSY) == 0 || (flags & UFP_NOALLOC) != 0); KASSERT((flags & UFP_NOBUSY) != 0 || rw_write_held(uobj->vmobjlock)); if (*pgp != NULL) { UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0); goto skip_offset; } for (;;) { /* * look for an existing page. */ pg = uvm_page_array_fill_and_peek(a, offset, nleft); if (pg != NULL && pg->offset != offset) { struct vm_page __diagused *tpg; KASSERT( ((a->ar_flags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0) == (pg->offset < offset)); KASSERT((tpg = uvm_pagelookup(uobj, offset)) == NULL || ((a->ar_flags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0 && !uvm_obj_page_dirty_p(tpg))); pg = NULL; if ((a->ar_flags & UVM_PAGE_ARRAY_FILL_DENSE) != 0) { UVMHIST_LOG(ubchist, "dense", 0,0,0,0); return 0; } } /* nope? allocate one now */ if (pg == NULL) { if (flags & UFP_NOALLOC) { UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0); return 0; } pg = uvm_pagealloc(uobj, offset, NULL, UVM_FLAG_COLORMATCH); if (pg == NULL) { if (flags & UFP_NOWAIT) { UVMHIST_LOG(ubchist, "nowait",0,0,0,0); return 0; } rw_exit(uobj->vmobjlock); uvm_wait("uvnfp1"); uvm_page_array_clear(a); rw_enter(uobj->vmobjlock, RW_WRITER); continue; } UVMHIST_LOG(ubchist, "alloced %#jx (color %ju)", (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0); KASSERTMSG(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN, "page %p not clean", pg); break; } else if (flags & UFP_NOCACHE) { UVMHIST_LOG(ubchist, "nocache",0,0,0,0); goto skip; } /* page is there, see if we need to wait on it */ if ((pg->flags & PG_BUSY) != 0) { if (flags & UFP_NOWAIT) { UVMHIST_LOG(ubchist, "nowait",0,0,0,0); goto skip; } UVMHIST_LOG(ubchist, "wait %#jx (color %ju)", (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0); uvm_pagewait(pg, uobj->vmobjlock, "uvnfp2"); uvm_page_array_clear(a); rw_enter(uobj->vmobjlock, RW_WRITER); continue; } /* skip PG_RDONLY pages if requested */ if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) { UVMHIST_LOG(ubchist, "nordonly",0,0,0,0); goto skip; } /* stop on clean pages if requested */ if (flags & UFP_DIRTYONLY) { const bool dirty = uvm_pagecheckdirty(pg, false); if (!dirty) { UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0); return 0; } } /* mark the page BUSY and we're done. */ if ((flags & UFP_NOBUSY) == 0) { pg->flags |= PG_BUSY; UVM_PAGE_OWN(pg, "uvn_findpage"); } UVMHIST_LOG(ubchist, "found %#jx (color %ju)", (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0); uvm_page_array_advance(a); break; } *pgp = pg; return 1; skip_offset: /* * skip this offset */ pg = uvm_page_array_peek(a); if (pg != NULL) { if (pg->offset == offset) { uvm_page_array_advance(a); } else { KASSERT((a->ar_flags & UVM_PAGE_ARRAY_FILL_DENSE) == 0); } } return 0; skip: /* * skip this page */ KASSERT(pg != NULL); uvm_page_array_advance(a); return 0; } /* * uvm_vnp_setsize: grow or shrink a vnode uobj * * grow => just update size value * shrink => toss un-needed pages * * => we assume that the caller has a reference of some sort to the * vnode in question so that it will not be yanked out from under * us. */ void uvm_vnp_setsize(struct vnode *vp, voff_t newsize) { struct uvm_object *uobj = &vp->v_uobj; voff_t pgend = round_page(newsize); voff_t oldsize; UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist); rw_enter(uobj->vmobjlock, RW_WRITER); UVMHIST_LOG(ubchist, "vp %#jx old %#jx new %#jx", (uintptr_t)vp, vp->v_size, newsize, 0); /* * now check if the size has changed: if we shrink we had better * toss some pages... */ KASSERT(newsize != VSIZENOTSET); KASSERT(newsize >= 0); KASSERT(vp->v_size <= vp->v_writesize); KASSERT(vp->v_size == vp->v_writesize || newsize == vp->v_writesize || newsize <= vp->v_size); oldsize = vp->v_writesize; /* * check whether size shrinks * if old size hasn't been set, there are no pages to drop * if there was an integer overflow in pgend, then this is no shrink */ if (oldsize > pgend && oldsize != VSIZENOTSET && pgend >= 0) { (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO); rw_enter(uobj->vmobjlock, RW_WRITER); } mutex_enter(vp->v_interlock); vp->v_size = vp->v_writesize = newsize; mutex_exit(vp->v_interlock); rw_exit(uobj->vmobjlock); } void uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize) { rw_enter(vp->v_uobj.vmobjlock, RW_WRITER); KASSERT(newsize != VSIZENOTSET); KASSERT(newsize >= 0); KASSERT(vp->v_size != VSIZENOTSET); KASSERT(vp->v_writesize != VSIZENOTSET); KASSERT(vp->v_size <= vp->v_writesize); KASSERT(vp->v_size <= newsize); mutex_enter(vp->v_interlock); vp->v_writesize = newsize; mutex_exit(vp->v_interlock); rw_exit(vp->v_uobj.vmobjlock); } bool uvn_text_p(struct uvm_object *uobj) { struct vnode *vp = (struct vnode *)uobj; int iflag; /* * v_interlock is not held here, but VI_EXECMAP is only ever changed * with the vmobjlock held too. */ iflag = atomic_load_relaxed(&vp->v_iflag); return (iflag & VI_EXECMAP) != 0; } static void uvn_alloc_ractx(struct uvm_object *uobj) { struct vnode *vp = (struct vnode *)uobj; struct uvm_ractx *ra = NULL; KASSERT(rw_write_held(uobj->vmobjlock)); if (vp->v_type != VREG) { return; } if (vp->v_ractx != NULL) { return; } if (vp->v_ractx == NULL) { rw_exit(uobj->vmobjlock); ra = uvm_ra_allocctx(); rw_enter(uobj->vmobjlock, RW_WRITER); if (ra != NULL && vp->v_ractx == NULL) { vp->v_ractx = ra; ra = NULL; } } if (ra != NULL) { uvm_ra_freectx(ra); } } @ 1.119 log @uvm(9): KASSERT(A && B) -> KASSERT(A); KASSERT(B) @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.118 2021/03/13 15:29:55 skrll Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.118 2021/03/13 15:29:55 skrll Exp $"); d192 2 a193 2 KASSERT(((flags & PGO_LOCKED) != 0 && rw_lock_held(uobj->vmobjlock)) || (flags & PGO_LOCKED) == 0); @ 1.118 log @Consistently use %#jx instead of 0x%jx or just %jx in UVMHIST_LOG formats @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.117 2020/08/16 00:24:41 chs Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.117 2020/08/16 00:24:41 chs Exp $"); d452 2 a453 1 KASSERT(newsize != VSIZENOTSET && newsize >= 0); d480 2 a481 1 KASSERT(newsize != VSIZENOTSET && newsize >= 0); @ 1.117 log @in uvm_findpage(), when uvm_page_array_fill_and_peek() returns a page that is not the one we want and we make an assertion about dirtiness, check the dirty status of the page we wanted rather than the page we got. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.116 2020/08/14 09:06:15 chs Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.116 2020/08/14 09:06:15 chs Exp $"); d179 1 a179 1 UVMHIST_CALLARGS(ubchist, "vp %#jx off 0x%jx", (uintptr_t)vp, offset, d293 1 a293 1 UVMHIST_CALLARGS(ubchist, "vp %#jx off 0x%jx", (uintptr_t)uobj, offset, d444 1 a444 1 UVMHIST_LOG(ubchist, "vp %#jx old 0x%jx new 0x%jx", @ 1.117.2.1 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.118 2021/03/13 15:29:55 skrll Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.118 2021/03/13 15:29:55 skrll Exp $"); d179 1 a179 1 UVMHIST_CALLARGS(ubchist, "vp %#jx off %#jx", (uintptr_t)vp, offset, d293 1 a293 1 UVMHIST_CALLARGS(ubchist, "vp %#jx off %#jx", (uintptr_t)uobj, offset, d444 1 a444 1 UVMHIST_LOG(ubchist, "vp %#jx old %#jx new %#jx", @ 1.116 log @centralize calls from UVM to radixtree into a few functions. in those functions, assert that the object lock is held in the correct mode. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.115 2020/07/09 05:57:15 skrll Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.115 2020/07/09 05:57:15 skrll Exp $"); d316 1 d320 1 a320 1 KASSERT(uvm_pagelookup(uobj, offset) == NULL || d322 1 a322 1 !uvm_obj_page_dirty_p(pg))); @ 1.115 log @Consistently use UVMHIST(__func__) Convert UVMHIST_{CALLED,LOG} into UVMHIST_CALLARGS @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.114 2020/05/25 21:15:10 ad Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.114 2020/05/25 21:15:10 ad Exp $"); d319 3 a321 4 KASSERT(uvm_pagelookup(uobj, offset) == NULL || ((a->ar_flags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0 && radix_tree_get_tag(&uobj->uo_pages, offset >> PAGE_SHIFT, UVM_PAGE_DIRTY_TAG) == 0)); a502 8 bool uvn_clean_p(struct uvm_object *uobj) { return radix_tree_empty_tagged_tree_p(&uobj->uo_pages, UVM_PAGE_DIRTY_TAG); } @ 1.114 log @- Alter the convention for uvm_page_array slightly, so the basic search parameters can't change part way through a search: move the "uobj" and "flags" arguments over to uvm_page_array_init() and store those with the array. - With that, detect when it's not possible to find any more pages in the tree with the given search parameters, and avoid repeated tree lookups if the caller loops over uvm_page_array_fill_and_peek(). @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.113 2020/05/19 22:22:15 ad Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.113 2020/05/19 22:22:15 ad Exp $"); d178 2 a179 3 UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist); UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx", (uintptr_t)vp, (int)offset, d292 2 a293 2 UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist); UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx", (uintptr_t)uobj, offset, d441 1 a441 1 UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist); @ 1.113 log @PR kern/32166: pgo_get protocol is ambiguous Also problems with tmpfs+nfs noted by hannken@@. Don't pass PGO_ALLPAGES to pgo_get, and ignore PGO_DONTCARE in the !PGO_LOCKED case. In uao_get() have uvm_pagealloc() take care of page zeroing and release busy pages on error. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.112 2020/05/19 21:45:57 ad Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.112 2020/05/19 21:45:57 ad Exp $"); d234 12 d247 1 a247 1 uvm_page_array_init(a); a292 4 const unsigned int fillflags = ((flags & UFP_BACKWARD) ? UVM_PAGE_ARRAY_FILL_BACKWARD : 0) | ((flags & UFP_DIRTYONLY) ? (UVM_PAGE_ARRAY_FILL_DIRTY|UVM_PAGE_ARRAY_FILL_DENSE) : 0); a313 7 * * XXX fragile API * note that the array can be the one supplied by the caller of * uvn_findpages. in that case, fillflags used by the caller * might not match strictly with ours. * in particular, the caller might have filled the array * without DENSE but passed us UFP_DIRTYONLY (thus DENSE). d315 1 a315 2 pg = uvm_page_array_fill_and_peek(a, uobj, offset, nleft, fillflags); d318 1 a318 1 ((fillflags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0) d321 2 a322 2 || ((fillflags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0 && radix_tree_get_tag(&uobj->uo_pages, d325 1 a325 1 if ((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) != 0) { d411 1 a411 1 KASSERT((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) == 0); @ 1.112 log @Don't try to do readahead on tmpfs. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.111 2020/03/22 18:32:42 ad Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.111 2020/03/22 18:32:42 ad Exp $"); d164 1 a164 2 * => flags: PGO_ALLPAGES: get all of the pages * PGO_LOCKED: fault data structures are locked @ 1.111 log @Process concurrent page faults on individual uvm_objects / vm_amaps in parallel, where the relevant pages are already in-core. Proposed on tech-kern. Temporarily disabled on MP architectures with __HAVE_UNLOCKED_PMAP until adjustments are made to their pmaps. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.110 2020/03/14 20:45:23 ad Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.110 2020/03/14 20:45:23 ad Exp $"); d185 1 a185 1 && (flags & PGO_LOCKED) == 0) { @ 1.110 log @Make uvm_pagemarkdirty() responsible for putting vnodes onto the syncer work list. Proposed on tech-kern@@. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.109 2020/03/14 20:23:51 ad Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.109 2020/03/14 20:23:51 ad Exp $"); d290 9 a298 1 KASSERT(rw_write_held(uobj->vmobjlock)); d391 4 a394 2 pg->flags |= PG_BUSY; UVM_PAGE_OWN(pg, "uvn_findpage"); @ 1.109 log @Make page waits (WANTED vs BUSY) interlocked by pg->interlock. Gets RW locks out of the equation for sleep/wakeup, and allows observing+waiting for busy pages when holding only a read lock. Proposed on tech-kern. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.108 2020/03/03 13:32:44 rjs Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.108 2020/03/03 13:32:44 rjs Exp $"); d54 1 d84 1 d101 1 a158 1 d199 19 d486 1 d492 2 a493 1 return (vp->v_iflag & VI_EXECMAP) != 0; a503 14 bool uvn_needs_writefault_p(struct uvm_object *uobj) { struct vnode *vp = (struct vnode *)uobj; /* * v_interlock is not held here, but VI_WRMAP and VI_WRMAPDIRTY are * only ever changed with the vmobjlock held too, or when it's known * the uvm_object contains no pages (VI_PAGES clear). */ return uvn_clean_p(uobj) || (vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP; } @ 1.108 log @Make some wait channel names unique to six characters. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.107 2020/02/27 22:12:54 ad Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.107 2020/02/27 22:12:54 ad Exp $"); a337 1 pg->flags |= PG_WANTED; d340 1 a340 2 UVM_UNLOCK_AND_WAIT_RW(pg, uobj->vmobjlock, 0, "uvnfp2", 0); @ 1.107 log @Tighten up the locking around vp->v_iflag a little more after the recent split of vmobjlock & v_interlock. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.106 2020/02/23 15:46:43 ad Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.106 2020/02/23 15:46:43 ad Exp $"); d317 1 a317 1 uvm_wait("uvn_fp1"); d342 1 a342 1 "uvn_fp2", 0); @ 1.106 log @UVM locking changes, proposed on tech-kern: - Change the lock on uvm_object, vm_amap and vm_anon to be a RW lock. - Break v_interlock and vmobjlock apart. v_interlock remains a mutex. - Do partial PV list locking in the x86 pmap. Others to follow later. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.105 2020/01/15 17:55:45 ad Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.105 2020/01/15 17:55:45 ad Exp $"); d468 4 a477 1 struct vnode *vp = (struct vnode *)uobj; d479 2 a480 1 return (vp->v_iflag & VI_ONWORKLST) == 0; d488 5 @ 1.105 log @Merge from yamt-pagecache (after much testing): - Reduce unnecessary page scan in putpages esp. when an object has a ton of pages cached but only a few of them are dirty. - Reduce the number of pmap operations by tracking page dirtiness more precisely in uvm layer. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.104 2019/12/21 14:41:44 ad Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.104 2019/12/21 14:41:44 ad Exp $"); d79 1 d150 1 a150 1 KASSERT(mutex_owned(vp->v_interlock)); d184 1 a184 1 vn_ra_allocctx(vp); d192 1 a192 1 KASSERT(((flags & PGO_LOCKED) != 0 && mutex_owned(vp->v_interlock)) || d269 1 a269 1 KASSERT(mutex_owned(uobj->vmobjlock)); d316 1 a316 1 mutex_exit(uobj->vmobjlock); d319 1 a319 1 mutex_enter(uobj->vmobjlock); d341 2 a342 2 UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0, "uvn_fp2", 0); d344 1 a344 1 mutex_enter(uobj->vmobjlock); d416 1 a416 1 mutex_enter(uobj->vmobjlock); d439 1 a439 1 mutex_enter(uobj->vmobjlock); d441 1 d443 2 a444 1 mutex_exit(uobj->vmobjlock); d451 1 a451 1 mutex_enter(vp->v_interlock); d457 1 d460 1 d487 28 @ 1.104 log @- Rename VM_PGCOLOR_BUCKET() to VM_PGCOLOR(). I want to reuse "bucket" for something else soon and TBH it matches what this macro does better. - Add inlines to set/get locator values in the unused lower bits of pg->phys_addr. Begin by using it to cache the freelist index, because computing it is expensive and that shows up during profiling. Discussed on tech-kern. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.103 2017/10/28 00:37:13 pgoyette Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.103 2017/10/28 00:37:13 pgoyette Exp $"); d69 1 d86 2 a87 1 int); a140 1 * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed) d205 2 a206 2 uvn_findpages(struct uvm_object *uobj, voff_t offset, int *npagesp, struct vm_page **pgs, int flags) d208 8 a215 2 int i, count, found, npages, rv; d220 2 a221 1 rv = uvn_findpage(uobj, offset, &pgs[i], flags); d231 2 a232 1 rv = uvn_findpage(uobj, offset, &pgs[i], flags); d241 3 d248 7 d257 1 a257 1 int flags) d260 4 a263 1 bool dirty; d272 1 a272 1 return 0; d275 26 a300 2 /* look for an existing page */ pg = uvm_pagelookup(uobj, offset); d317 1 d323 2 d328 1 a328 1 return 0; d335 1 a335 1 return 0; d342 1 d350 1 a350 1 return 0; d355 1 a355 3 dirty = pmap_clear_modify(pg) || (pg->flags & PG_CLEAN) == 0; pg->flags |= PG_CLEAN; d367 1 d372 22 @ 1.104.2.1 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.105 2020/01/15 17:55:45 ad Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.105 2020/01/15 17:55:45 ad Exp $"); a68 1 #include d85 1 a85 2 unsigned int, struct uvm_page_array *a, unsigned int); d139 1 d204 2 a205 2 uvn_findpages(struct uvm_object *uobj, voff_t offset, unsigned int *npagesp, struct vm_page **pgs, struct uvm_page_array *a, unsigned int flags) d207 2 a208 8 unsigned int count, found, npages; int i, rv; struct uvm_page_array a_store; if (a == NULL) { a = &a_store; uvm_page_array_init(a); } d213 1 a213 2 rv = uvn_findpage(uobj, offset, &pgs[i], flags, a, i + 1); d223 1 a223 2 rv = uvn_findpage(uobj, offset, &pgs[i], flags, a, npages - i); a231 3 if (a == &a_store) { uvm_page_array_fini(a); } a235 7 /* * uvn_findpage: find a single page * * if a suitable page was found, put it in *pgp and return 1. * otherwise return 0. */ d238 1 a238 1 unsigned int flags, struct uvm_page_array *a, unsigned int nleft) d241 1 a241 4 const unsigned int fillflags = ((flags & UFP_BACKWARD) ? UVM_PAGE_ARRAY_FILL_BACKWARD : 0) | ((flags & UFP_DIRTYONLY) ? (UVM_PAGE_ARRAY_FILL_DIRTY|UVM_PAGE_ARRAY_FILL_DENSE) : 0); d250 1 a250 1 goto skip_offset; d253 2 a254 26 /* * look for an existing page. * * XXX fragile API * note that the array can be the one supplied by the caller of * uvn_findpages. in that case, fillflags used by the caller * might not match strictly with ours. * in particular, the caller might have filled the array * without DENSE but passed us UFP_DIRTYONLY (thus DENSE). */ pg = uvm_page_array_fill_and_peek(a, uobj, offset, nleft, fillflags); if (pg != NULL && pg->offset != offset) { KASSERT( ((fillflags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0) == (pg->offset < offset)); KASSERT(uvm_pagelookup(uobj, offset) == NULL || ((fillflags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0 && radix_tree_get_tag(&uobj->uo_pages, offset >> PAGE_SHIFT, UVM_PAGE_DIRTY_TAG) == 0)); pg = NULL; if ((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) != 0) { UVMHIST_LOG(ubchist, "dense", 0,0,0,0); return 0; } } a270 1 uvm_page_array_clear(a); a275 2 KASSERTMSG(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN, "page %p not clean", pg); d279 1 a279 1 goto skip; d286 1 a286 1 goto skip; a292 1 uvm_page_array_clear(a); d300 1 a300 1 goto skip; d305 3 a307 1 const bool dirty = uvm_pagecheckdirty(pg, false); a318 1 uvm_page_array_advance(a); a322 22 skip_offset: /* * skip this offset */ pg = uvm_page_array_peek(a); if (pg != NULL) { if (pg->offset == offset) { uvm_page_array_advance(a); } else { KASSERT((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) == 0); } } return 0; skip: /* * skip this page */ KASSERT(pg != NULL); uvm_page_array_advance(a); return 0; @ 1.104.2.2 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.107 2020/02/27 22:12:54 ad Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.107 2020/02/27 22:12:54 ad Exp $"); a78 1 static void uvn_alloc_ractx(struct uvm_object *); d149 1 a149 1 KASSERT(rw_write_held(uobj->vmobjlock)); d183 1 a183 1 uvn_alloc_ractx(uobj); d191 1 a191 1 KASSERT(((flags & PGO_LOCKED) != 0 && rw_lock_held(uobj->vmobjlock)) || d268 1 a268 1 KASSERT(rw_write_held(uobj->vmobjlock)); d315 1 a315 1 rw_exit(uobj->vmobjlock); d318 1 a318 1 rw_enter(uobj->vmobjlock, RW_WRITER); d340 2 a341 2 UVM_UNLOCK_AND_WAIT_RW(pg, uobj->vmobjlock, 0, "uvn_fp2", 0); d343 1 a343 1 rw_enter(uobj->vmobjlock, RW_WRITER); d415 1 a415 1 rw_enter(uobj->vmobjlock, RW_WRITER); d438 1 a438 1 rw_enter(uobj->vmobjlock, RW_WRITER); a439 1 mutex_enter(vp->v_interlock); d441 1 a441 2 mutex_exit(vp->v_interlock); rw_exit(uobj->vmobjlock); d448 1 a448 1 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER); a453 1 mutex_enter(vp->v_interlock); a455 1 rw_exit(vp->v_uobj.vmobjlock); a462 4 /* * v_interlock is not held here, but VI_EXECMAP is only ever changed * with the vmobjlock held too. */ d469 1 d471 1 a471 2 return radix_tree_empty_tagged_tree_p(&uobj->uo_pages, UVM_PAGE_DIRTY_TAG); a478 5 /* * v_interlock is not held here, but VI_WRMAP and VI_WRMAPDIRTY are * only ever changed with the vmobjlock held too, or when it's known * the uvm_object contains no pages (VI_PAGES clear). */ a481 28 static void uvn_alloc_ractx(struct uvm_object *uobj) { struct vnode *vp = (struct vnode *)uobj; struct uvm_ractx *ra = NULL; KASSERT(rw_write_held(uobj->vmobjlock)); if (vp->v_type != VREG) { return; } if (vp->v_ractx != NULL) { return; } if (vp->v_ractx == NULL) { rw_exit(uobj->vmobjlock); ra = uvm_ra_allocctx(); rw_enter(uobj->vmobjlock, RW_WRITER); if (ra != NULL && vp->v_ractx == NULL) { vp->v_ractx = ra; ra = NULL; } } if (ra != NULL) { uvm_ra_freectx(ra); } } @ 1.103 log @Update the kernhist(9) kernel history code to address issues identified in PR kern/52639, as well as some general cleaning-up... (As proposed on tech-kern@@ with additional changes and enhancements.) Details of changes: * All history arguments are now stored as uintmax_t values[1], both in the kernel and in the structures used for exporting the history data to userland via sysctl(9). This avoids problems on some architectures where passing a 64-bit (or larger) value to printf(3) can cause it to process the value as multiple arguments. (This can be particularly problematic when printf()'s format string is not a literal, since in that case the compiler cannot know how large each argument should be.) * Update the data structures used for exporting kernel history data to include a version number as well as the length of history arguments. * All [2] existing users of kernhist(9) have had their format strings updated. Each format specifier now includes an explicit length modifier 'j' to refer to numeric values of the size of uintmax_t. * All [2] existing users of kernhist(9) have had their format strings updated to replace uses of "%p" with "%#jx", and the pointer arguments are now cast to (uintptr_t) before being subsequently cast to (uintmax_t). This is needed to avoid compiler warnings about casting "pointer to integer of a different size." * All [2] existing users of kernhist(9) have had instances of "%s" or "%c" format strings replaced with numeric formats; several instances of mis-match between format string and argument list have been fixed. * vmstat(1) has been modified to handle the new size of arguments in the history data as exported by sysctl(9). * vmstat(1) now provides a warning message if the history requested with the -u option does not exist (previously, this condition was silently ignored, with only a single blank line being printed). * vmstat(1) now checks the version and argument length included in the data exported via sysctl(9) and exits if they do not match the values with which vmstat was built. * The kernhist(9) man-page has been updated to note the additional requirements imposed on the format strings, along with several other minor changes and enhancements. [1] It would have been possible to use an explicit length (for example, uint64_t) for the history arguments. But that would require another "rototill" of all the users in the future when we add support for an architecture that supports a larger size. Also, the printf(3) format specifiers for explicitly-sized values, such as "%"PRIu64, are much more verbose (and less aesthetically appealing, IMHO) than simply using "%ju". [2] I've tried very hard to find "all [the] existing users of kernhist(9)" but it is possible that I've missed some of them. I would be glad to update any stragglers that anyone identifies. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.102 2015/12/06 09:38:54 wiz Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.102 2015/12/06 09:38:54 wiz Exp $"); d275 1 a275 1 (uintptr_t)pg, VM_PGCOLOR_BUCKET(pg), 0, 0); d290 1 a290 1 (uintptr_t)pg, VM_PGCOLOR_BUCKET(pg), 0, 0); d318 1 a318 1 (uintptr_t)pg, VM_PGCOLOR_BUCKET(pg), 0, 0); @ 1.103.4.1 log @Merge changes from current as of 20200406 @ text @d1 1 a1 1 /* $NetBSD$ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD$"); a53 1 #include a68 1 #include a77 1 static void uvn_alloc_ractx(struct uvm_object *); a80 1 static void uvn_markdirty(struct uvm_object *); d85 1 a85 2 unsigned int, struct uvm_page_array *a, unsigned int); a95 1 .pgo_markdirty = uvn_markdirty, d139 1 d148 1 a148 1 KASSERT(rw_write_held(uobj->vmobjlock)); d154 1 d182 1 a182 1 uvn_alloc_ractx(uobj); d190 1 a190 1 KASSERT(((flags & PGO_LOCKED) != 0 && rw_lock_held(uobj->vmobjlock)) || a194 19 /* * uvn_markdirty: called when the object gains first dirty page * * => uobj must be write locked. */ static void uvn_markdirty(struct uvm_object *uobj) { struct vnode *vp = (struct vnode *)uobj; KASSERT(rw_write_held(uobj->vmobjlock)); mutex_enter(vp->v_interlock); if ((vp->v_iflag & VI_ONWORKLST) == 0) { vn_syncer_add_to_worklist(vp, filedelay); } mutex_exit(vp->v_interlock); } d204 2 a205 2 uvn_findpages(struct uvm_object *uobj, voff_t offset, unsigned int *npagesp, struct vm_page **pgs, struct uvm_page_array *a, unsigned int flags) d207 2 a208 8 unsigned int count, found, npages; int i, rv; struct uvm_page_array a_store; if (a == NULL) { a = &a_store; uvm_page_array_init(a); } d213 1 a213 2 rv = uvn_findpage(uobj, offset, &pgs[i], flags, a, i + 1); d223 1 a223 2 rv = uvn_findpage(uobj, offset, &pgs[i], flags, a, npages - i); a231 3 if (a == &a_store) { uvm_page_array_fini(a); } a235 7 /* * uvn_findpage: find a single page * * if a suitable page was found, put it in *pgp and return 1. * otherwise return 0. */ d238 1 a238 1 unsigned int flags, struct uvm_page_array *a, unsigned int nleft) d241 1 a241 4 const unsigned int fillflags = ((flags & UFP_BACKWARD) ? UVM_PAGE_ARRAY_FILL_BACKWARD : 0) | ((flags & UFP_DIRTYONLY) ? (UVM_PAGE_ARRAY_FILL_DIRTY|UVM_PAGE_ARRAY_FILL_DENSE) : 0); d246 1 a246 9 /* * NOBUSY must come with NOWAIT and NOALLOC. if NOBUSY is * specified, this may be called with a reader lock. */ KASSERT(rw_lock_held(uobj->vmobjlock)); KASSERT((flags & UFP_NOBUSY) == 0 || (flags & UFP_NOWAIT) != 0); KASSERT((flags & UFP_NOBUSY) == 0 || (flags & UFP_NOALLOC) != 0); KASSERT((flags & UFP_NOBUSY) != 0 || rw_write_held(uobj->vmobjlock)); d250 1 a250 1 goto skip_offset; d253 2 a254 26 /* * look for an existing page. * * XXX fragile API * note that the array can be the one supplied by the caller of * uvn_findpages. in that case, fillflags used by the caller * might not match strictly with ours. * in particular, the caller might have filled the array * without DENSE but passed us UFP_DIRTYONLY (thus DENSE). */ pg = uvm_page_array_fill_and_peek(a, uobj, offset, nleft, fillflags); if (pg != NULL && pg->offset != offset) { KASSERT( ((fillflags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0) == (pg->offset < offset)); KASSERT(uvm_pagelookup(uobj, offset) == NULL || ((fillflags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0 && radix_tree_get_tag(&uobj->uo_pages, offset >> PAGE_SHIFT, UVM_PAGE_DIRTY_TAG) == 0)); pg = NULL; if ((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) != 0) { UVMHIST_LOG(ubchist, "dense", 0,0,0,0); return 0; } } d269 3 a271 4 rw_exit(uobj->vmobjlock); uvm_wait("uvnfp1"); uvm_page_array_clear(a); rw_enter(uobj->vmobjlock, RW_WRITER); d275 1 a275 3 (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0); KASSERTMSG(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN, "page %p not clean", pg); d279 1 a279 1 goto skip; d286 1 a286 1 goto skip; d288 1 d290 4 a293 4 (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0); uvm_pagewait(pg, uobj->vmobjlock, "uvnfp2"); uvm_page_array_clear(a); rw_enter(uobj->vmobjlock, RW_WRITER); d300 1 a300 1 goto skip; d305 3 a307 1 const bool dirty = uvm_pagecheckdirty(pg, false); d315 2 a316 4 if ((flags & UFP_NOBUSY) == 0) { pg->flags |= PG_BUSY; UVM_PAGE_OWN(pg, "uvn_findpage"); } d318 1 a318 2 (uintptr_t)pg, VM_PGCOLOR(pg), 0, 0); uvm_page_array_advance(a); a322 22 skip_offset: /* * skip this offset */ pg = uvm_page_array_peek(a); if (pg != NULL) { if (pg->offset == offset) { uvm_page_array_advance(a); } else { KASSERT((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) == 0); } } return 0; skip: /* * skip this page */ KASSERT(pg != NULL); uvm_page_array_advance(a); return 0; d344 1 a344 1 rw_enter(uobj->vmobjlock, RW_WRITER); d367 1 a367 1 rw_enter(uobj->vmobjlock, RW_WRITER); a368 1 mutex_enter(vp->v_interlock); d370 1 a370 2 mutex_exit(vp->v_interlock); rw_exit(uobj->vmobjlock); d377 1 a377 1 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER); a382 1 mutex_enter(vp->v_interlock); a384 1 rw_exit(vp->v_uobj.vmobjlock); a390 1 int iflag; d392 1 a392 6 /* * v_interlock is not held here, but VI_EXECMAP is only ever changed * with the vmobjlock held too. */ iflag = atomic_load_relaxed(&vp->v_iflag); return (iflag & VI_EXECMAP) != 0; d398 1 d400 1 a400 2 return radix_tree_empty_tagged_tree_p(&uobj->uo_pages, UVM_PAGE_DIRTY_TAG); d403 2 a404 2 static void uvn_alloc_ractx(struct uvm_object *uobj) a406 1 struct uvm_ractx *ra = NULL; d408 2 a409 20 KASSERT(rw_write_held(uobj->vmobjlock)); if (vp->v_type != VREG) { return; } if (vp->v_ractx != NULL) { return; } if (vp->v_ractx == NULL) { rw_exit(uobj->vmobjlock); ra = uvm_ra_allocctx(); rw_enter(uobj->vmobjlock, RW_WRITER); if (ra != NULL && vp->v_ractx == NULL) { vp->v_ractx = ra; ra = NULL; } } if (ra != NULL) { uvm_ra_freectx(ra); } @ 1.102 log @Fix typo in comment. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.101 2015/12/06 08:53:22 mlelstv Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.101 2015/12/06 08:53:22 mlelstv Exp $"); d177 2 a178 1 UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0); d243 2 a244 1 UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0); d274 2 a275 2 UVMHIST_LOG(ubchist, "alloced %p (color %u)", pg, VM_PGCOLOR_BUCKET(pg), 0,0); d289 2 a290 2 UVMHIST_LOG(ubchist, "wait %p (color %u)", pg, VM_PGCOLOR_BUCKET(pg), 0,0); d317 2 a318 2 UVMHIST_LOG(ubchist, "found %p (color %u)", pg, VM_PGCOLOR_BUCKET(pg), 0,0); d345 2 a346 2 UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x", vp, vp->v_size, newsize, 0); @ 1.102.10.1 log @Pull up following revision(s) (requested by pgoyette in ticket #335): share/man/man9/kernhist.9: 1.5-1.8 sys/arch/acorn26/acorn26/pmap.c: 1.39 sys/arch/arm/arm32/fault.c: 1.105 via patch sys/arch/arm/arm32/pmap.c: 1.350, 1.359 sys/arch/arm/broadcom/bcm2835_bsc.c: 1.7 sys/arch/arm/omap/if_cpsw.c: 1.20 sys/arch/arm/omap/tiotg.c: 1.7 sys/arch/evbarm/conf/RPI2_INSTALL: 1.3 sys/dev/ic/sl811hs.c: 1.98 sys/dev/usb/ehci.c: 1.256 sys/dev/usb/if_axe.c: 1.83 sys/dev/usb/motg.c: 1.18 sys/dev/usb/ohci.c: 1.274 sys/dev/usb/ucom.c: 1.119 sys/dev/usb/uhci.c: 1.277 sys/dev/usb/uhub.c: 1.137 sys/dev/usb/umass.c: 1.160-1.162 sys/dev/usb/umass_quirks.c: 1.100 sys/dev/usb/umass_scsipi.c: 1.55 sys/dev/usb/usb.c: 1.168 sys/dev/usb/usb_mem.c: 1.70 sys/dev/usb/usb_subr.c: 1.221 sys/dev/usb/usbdi.c: 1.175 sys/dev/usb/usbdi_util.c: 1.67-1.70 sys/dev/usb/usbroothub.c: 1.3 sys/dev/usb/xhci.c: 1.75 sys/external/bsd/drm2/dist/drm/i915/i915_gem.c: 1.34 sys/kern/kern_history.c: 1.15 sys/kern/kern_xxx.c: 1.74 sys/kern/vfs_bio.c: 1.275-1.276 sys/miscfs/genfs/genfs_io.c: 1.71 sys/sys/kernhist.h: 1.21 sys/ufs/ffs/ffs_balloc.c: 1.63 sys/ufs/lfs/lfs_vfsops.c: 1.361 sys/ufs/lfs/ulfs_inode.c: 1.21 sys/ufs/lfs/ulfs_vnops.c: 1.52 sys/ufs/ufs/ufs_inode.c: 1.102 sys/ufs/ufs/ufs_vnops.c: 1.239 sys/uvm/pmap/pmap.c: 1.37-1.39 sys/uvm/pmap/pmap_tlb.c: 1.22 sys/uvm/uvm_amap.c: 1.108 sys/uvm/uvm_anon.c: 1.64 sys/uvm/uvm_aobj.c: 1.126 sys/uvm/uvm_bio.c: 1.91 sys/uvm/uvm_device.c: 1.66 sys/uvm/uvm_fault.c: 1.201 sys/uvm/uvm_km.c: 1.144 sys/uvm/uvm_loan.c: 1.85 sys/uvm/uvm_map.c: 1.353 sys/uvm/uvm_page.c: 1.194 sys/uvm/uvm_pager.c: 1.111 sys/uvm/uvm_pdaemon.c: 1.109 sys/uvm/uvm_swap.c: 1.175 sys/uvm/uvm_vnode.c: 1.103 usr.bin/vmstat/vmstat.c: 1.219 Reorder to test for null before null deref in debug code -- Reorder to test for null before null deref in debug code -- KNF -- No need for '\n' in UVMHIST_LOG -- normalise a BIOHIST log message -- Update the kernhist(9) kernel history code to address issues identified in PR kern/52639, as well as some general cleaning-up... (As proposed on tech-kern@@ with additional changes and enhancements.) Details of changes: * All history arguments are now stored as uintmax_t values[1], both in the kernel and in the structures used for exporting the history data to userland via sysctl(9). This avoids problems on some architectures where passing a 64-bit (or larger) value to printf(3) can cause it to process the value as multiple arguments. (This can be particularly problematic when printf()'s format string is not a literal, since in that case the compiler cannot know how large each argument should be.) * Update the data structures used for exporting kernel history data to include a version number as well as the length of history arguments. * All [2] existing users of kernhist(9) have had their format strings updated. Each format specifier now includes an explicit length modifier 'j' to refer to numeric values of the size of uintmax_t. * All [2] existing users of kernhist(9) have had their format strings updated to replace uses of "%p" with "%#jx", and the pointer arguments are now cast to (uintptr_t) before being subsequently cast to (uintmax_t). This is needed to avoid compiler warnings about casting "pointer to integer of a different size." * All [2] existing users of kernhist(9) have had instances of "%s" or "%c" format strings replaced with numeric formats; several instances of mis-match between format string and argument list have been fixed. * vmstat(1) has been modified to handle the new size of arguments in the history data as exported by sysctl(9). * vmstat(1) now provides a warning message if the history requested with the -u option does not exist (previously, this condition was silently ignored, with only a single blank line being printed). * vmstat(1) now checks the version and argument length included in the data exported via sysctl(9) and exits if they do not match the values with which vmstat was built. * The kernhist(9) man-page has been updated to note the additional requirements imposed on the format strings, along with several other minor changes and enhancements. [1] It would have been possible to use an explicit length (for example, uint64_t) for the history arguments. But that would require another "rototill" of all the users in the future when we add support for an architecture that supports a larger size. Also, the printf(3) format specifiers for explicitly-sized values, such as "%"PRIu64, are much more verbose (and less aesthetically appealing, IMHO) than simply using "%ju". [2] I've tried very hard to find "all [the] existing users of kernhist(9)" but it is possible that I've missed some of them. I would be glad to update any stragglers that anyone identifies. -- For some reason this single kernel seems to have outgrown its declared size as a result of the kernhist(9) changes. Bump the size. XXX The amount of increase may be excessive - anyone with more detailed XXX knowledge please feel free to further adjust the value appropriately. -- Misssed one cast of pointer --> uintptr_t in previous kernhist(9) commit -- And yet another one. :( -- Use correct mark-up for NetBSD version. -- More improvements in grammar and readability. -- Remove a stray '"' (obvious typo) and add a couple of casts that are probably needed. -- And replace an instance of "%p" conversion with "%#jx" -- Whitespace fix. Give Bl tag table a width. Fix Xr. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.102 2015/12/06 09:38:54 wiz Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.102 2015/12/06 09:38:54 wiz Exp $"); d177 1 a177 2 UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx", (uintptr_t)vp, (int)offset, 0, 0); d242 1 a242 2 UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx", (uintptr_t)uobj, offset, 0, 0); d272 2 a273 2 UVMHIST_LOG(ubchist, "alloced %#jx (color %ju)", (uintptr_t)pg, VM_PGCOLOR_BUCKET(pg), 0, 0); d287 2 a288 2 UVMHIST_LOG(ubchist, "wait %#jx (color %ju)", (uintptr_t)pg, VM_PGCOLOR_BUCKET(pg), 0, 0); d315 2 a316 2 UVMHIST_LOG(ubchist, "found %#jx (color %ju)", (uintptr_t)pg, VM_PGCOLOR_BUCKET(pg), 0, 0); d343 2 a344 2 UVMHIST_LOG(ubchist, "vp %#jx old 0x%jx new 0x%jx", (uintptr_t)vp, vp->v_size, newsize, 0); @ 1.101 log @Clean up assertions and catch integer overflow. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.100 2015/08/24 22:50:32 pooka Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.100 2015/08/24 22:50:32 pooka Exp $"); d359 1 a359 1 * check wether size shrinks @ 1.100 log @to garnish, dust with _KERNEL_OPT @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.99 2012/07/30 23:56:48 matt Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.99 2012/07/30 23:56:48 matt Exp $"); d351 1 a351 1 KASSERT(newsize != VSIZENOTSET); a356 1 KASSERT(oldsize != VSIZENOTSET || pgend > oldsize); d358 6 a363 1 if (oldsize > pgend) { d376 1 a376 1 KASSERT(newsize != VSIZENOTSET); @ 1.99 log @-fno-common broke kernhist since it used commons. Add a KERNHIST_DEFINE which is define the kernel history. Change UVM to deal with the new usage. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.98 2012/06/01 14:52:48 martin Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.98 2012/06/01 14:52:48 martin Exp $"); d50 1 d52 1 @ 1.99.2.1 log @update from HEAD @ text @d1 1 a1 1 /* $NetBSD$ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD$"); a49 1 #ifdef _KERNEL_OPT a50 1 #endif d175 1 a175 2 UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx", (uintptr_t)vp, (int)offset, 0, 0); d240 1 a240 2 UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx", (uintptr_t)uobj, offset, 0, 0); d270 2 a271 2 UVMHIST_LOG(ubchist, "alloced %#jx (color %ju)", (uintptr_t)pg, VM_PGCOLOR_BUCKET(pg), 0, 0); d285 2 a286 2 UVMHIST_LOG(ubchist, "wait %#jx (color %ju)", (uintptr_t)pg, VM_PGCOLOR_BUCKET(pg), 0, 0); d313 2 a314 2 UVMHIST_LOG(ubchist, "found %#jx (color %ju)", (uintptr_t)pg, VM_PGCOLOR_BUCKET(pg), 0, 0); d341 2 a342 2 UVMHIST_LOG(ubchist, "vp %#jx old 0x%jx new 0x%jx", (uintptr_t)vp, vp->v_size, newsize, 0); d349 1 a349 1 KASSERT(newsize != VSIZENOTSET && newsize >= 0); d355 1 d357 1 a357 6 /* * check whether size shrinks * if old size hasn't been set, there are no pages to drop * if there was an integer overflow in pgend, then this is no shrink */ if (oldsize > pgend && oldsize != VSIZENOTSET && pgend >= 0) { d370 1 a370 1 KASSERT(newsize != VSIZENOTSET && newsize >= 0); @ 1.99.16.1 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.100 2015/08/24 22:50:32 pooka Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.100 2015/08/24 22:50:32 pooka Exp $"); a49 1 #ifdef _KERNEL_OPT a50 1 #endif @ 1.99.16.2 log @Sync with HEAD (as of 26th Dec) @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.99.16.1 2015/09/22 12:06:17 skrll Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.99.16.1 2015/09/22 12:06:17 skrll Exp $"); d351 1 a351 1 KASSERT(newsize != VSIZENOTSET && newsize >= 0); d357 1 d359 1 a359 6 /* * check whether size shrinks * if old size hasn't been set, there are no pages to drop * if there was an integer overflow in pgend, then this is no shrink */ if (oldsize > pgend && oldsize != VSIZENOTSET && pgend >= 0) { d372 1 a372 1 KASSERT(newsize != VSIZENOTSET && newsize >= 0); @ 1.98 log @Only use generic readahead on VREG vnodes, the space used to store the context is not valid on other types. Prevents the crash reported in PR kern/38889, but does not fix the mmap of block devices, more work is needed (no size on VBLK vnodes). @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.97 2011/09/06 16:41:55 matt Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.97 2011/09/06 16:41:55 matt Exp $"); d68 4 @ 1.97 log @Allocate color appropriate pages. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.96 2011/06/12 03:36:04 rmind Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.96 2011/06/12 03:36:04 rmind Exp $"); d173 2 a174 1 if ((access_type & VM_PROT_WRITE) == 0 && (flags & PGO_LOCKED) == 0) { @ 1.97.8.1 log @Pull up following revision(s) (requested by martin in ticket #301): sys/uvm/uvm_vnode.c: revision 1.98 tests/lib/libc/sys/t_mmap.c: revision 1.3 tests/lib/libc/sys/t_mmap.c: revision 1.4 tests/lib/libc/sys/t_mmap.c: revision 1.5 tests/lib/libc/sys/t_mmap.c: revision 1.6 Only use generic readahead on VREG vnodes, the space used to store the context is not valid on other types. Prevents the crash reported in PR kern/38889, but does not fix the mmap of block devices, more work is needed (no size on VBLK vnodes). Do not skip the block device mmap test, as it does not crash the kernel any more. Mark it as expected failure instead. mmap_block: do not use a hardcoded block device list, but query the kernel for attached disks instead, then try to mmap the raw partition. Use atf_tc_skip(). A test case for serious PR kern/38889: crash on open/mmap/close of block device. The test case is skipped for the time being as it replicates the panic described in the PR (tested on NetBSD/amd64 6.0 BETA). @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.97 2011/09/06 16:41:55 matt Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.97 2011/09/06 16:41:55 matt Exp $"); d173 1 a173 2 if (vp->v_type == VREG && (access_type & VM_PROT_WRITE) == 0 && (flags & PGO_LOCKED) == 0) { @ 1.97.6.1 log @sync to latest -current. @ text @d1 1 a1 1 /* $NetBSD$ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD$"); d173 1 a173 2 if (vp->v_type == VREG && (access_type & VM_PROT_WRITE) == 0 && (flags & PGO_LOCKED) == 0) { @ 1.97.2.1 log @page cache related changes - maintain object pages in radix tree rather than rb tree. - reduce unnecessary page scan in putpages. esp. when an object has a ton of pages cached but only a few of them are dirty. - reduce the number of pmap operations by tracking page dirtiness more precisely in uvm layer. - fix nfs commit range tracking. - fix nfs write clustering. XXX hack @ text @d1 1 a1 1 /* $NetBSD$ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD$"); d133 1 d288 2 a289 3 /* skip PG_RDONLY and PG_HOLE pages if requested */ if ((flags & UFP_NORDONLY) && (pg->flags & (PG_RDONLY|PG_HOLE))) { d296 3 a298 1 dirty = uvm_pagecheckdirty(pg, false); @ 1.97.2.2 log @- uvm_page_array_fill: add some more parameters - uvn_findpages: use gang-lookup - genfs_putpages: re-enable backward clustering - mechanical changes after the recent radixtree.h api changes @ text @a66 1 #include d79 1 a79 1 int, struct uvm_page_array *a, unsigned int); a198 1 struct uvm_page_array a; a199 1 uvm_page_array_init(&a); d204 1 a204 2 rv = uvn_findpage(uobj, offset, &pgs[i], flags, &a, npages - i); d214 1 a214 2 rv = uvn_findpage(uobj, offset, &pgs[i], flags, &a, npages - i); a222 1 uvm_page_array_fini(&a); d229 1 a229 1 int flags, struct uvm_page_array *a, unsigned int nleft) a232 4 const unsigned int fillflags = ((flags & UFP_BACKWARD) ? UVM_PAGE_ARRAY_FILL_BACKWARD : 0) || ((flags & UFP_DIRTYONLY) ? (UVM_PAGE_ARRAY_FILL_DIRTYONLY|UVM_PAGE_ARRAY_FILL_DENSE) : 0); d240 1 a240 1 goto skip; d244 1 a244 9 pg = uvm_page_array_fill_and_peek(a, uobj, offset, nleft, fillflags); if (pg != NULL && pg->offset != offset) { KASSERT((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) == 0); KASSERT( ((fillflags & UVM_PAGE_ARRAY_FILL_BACKWARD) != 0) == (pg->offset < offset)); pg = NULL; } a260 1 uvm_page_array_clear(a); d269 1 a269 1 goto skip; d276 1 a276 1 goto skip; a282 1 uvm_page_array_clear(a); d291 1 a291 1 goto skip; a307 1 uvm_page_array_advance(a); a311 11 skip: pg = uvm_page_array_peek(a); if (pg != NULL) { if (pg->offset == offset) { uvm_page_array_advance(a); } else { KASSERT((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) == 0); } } return 0; @ 1.97.2.3 log @don't inline uvn_findpages in genfs_io. @ text @d80 1 a80 2 unsigned int, struct uvm_page_array *a, unsigned int); d196 2 a197 2 uvn_findpages(struct uvm_object *uobj, voff_t offset, unsigned int *npagesp, struct vm_page **pgs, struct uvm_page_array *a, unsigned int flags) d199 4 a202 8 unsigned int count, found, npages; int i, rv; struct uvm_page_array a_store; if (a == NULL) { a = &a_store; uvm_page_array_init(a); } d207 1 a207 1 rv = uvn_findpage(uobj, offset, &pgs[i], flags, a, d218 1 a218 1 rv = uvn_findpage(uobj, offset, &pgs[i], flags, a, d228 1 a228 3 if (a == &a_store) { uvm_page_array_fini(a); } d235 1 a235 1 unsigned int flags, struct uvm_page_array *a, unsigned int nleft) a313 14 /* * check for PG_PAGER1 requests */ if ((flags & UFP_NOPAGER1) != 0 && (pg->flags & PG_PAGER1) != 0) { UVMHIST_LOG(ubchist, "nopager1",0,0,0,0); goto skip; } if ((flags & UFP_ONLYPAGER1) != 0 && (pg->flags & PG_PAGER1) == 0) { UVMHIST_LOG(ubchist, "onlypager1",0,0,0,0); goto skip; } @ 1.97.2.4 log @- bug fixes - minor optimizations - assertions - comments @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.97.2.3 2011/12/20 13:46:17 yamt Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.97.2.3 2011/12/20 13:46:17 yamt Exp $"); d213 1 a213 1 i + 1); d245 1 d247 1 a247 1 ((flags & UFP_BACKWARD) ? UVM_PAGE_ARRAY_FILL_BACKWARD : 0) | d257 1 a257 1 goto skip_offset; d260 1 a260 10 /* * look for an existing page. * * XXX fragile API * note that the array can be the one supplied by the caller of * uvm_findpages. in that case, fillflags used by the caller * might not match strictly with ours. * in particular, the caller might have filled the array * without DIRTYONLY or DENSE but passed us UFP_DIRTYONLY. */ d264 1 d337 1 a337 2 const bool dirty = uvm_pagecheckdirty(pg, false); d355 1 a355 4 skip_offset: /* * skip this offset */ a364 8 skip: /* * skip this page */ KASSERT(pg != NULL); uvm_page_array_advance(a); return 0; @ 1.97.2.5 log @byebye PG_HOLE as it turned out to be unnecessary. @ text @d1 1 a1 1 /* $NetBSD$ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD$"); d321 3 a323 3 /* skip PG_RDONLY pages if requested */ if ((flags & UFP_NORDONLY) != 0 && (pg->flags & PG_RDONLY) != 0) { @ 1.97.2.6 log @fix a typo in a comment. @ text @d264 1 a264 1 * uvn_findpages. in that case, fillflags used by the caller @ 1.97.2.7 log @- fix integrity sync. putpages for integrity sync (fsync, msync with MS_SYNC, etc) should not skip pages being written back by other threads. - adapt to radix tree tag api changes. @ text @d248 1 a248 1 (UVM_PAGE_ARRAY_FILL_DIRTY|UVM_PAGE_ARRAY_FILL_DENSE) : 0); @ 1.97.2.8 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.97.2.7 2012/08/01 22:34:14 yamt Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.97.2.7 2012/08/01 22:34:14 yamt Exp $"); a68 4 #ifdef UVMHIST UVMHIST_DEFINE(ubchist); #endif d174 1 a174 2 if (vp->v_type == VREG && (access_type & VM_PROT_WRITE) == 0 && (flags & PGO_LOCKED) == 0) { @ 1.97.2.9 log @uvn_findpage: fix dense case. add comments. @ text @d1 1 a1 1 /* $NetBSD$ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD$"); a244 7 /* * uvn_findpage: find a single page * * if a suitable page was found, put it in *pgp and return 1. * otherwise return 0. */ d272 1 a272 1 * without DENSE but passed us UFP_DIRTYONLY (thus DENSE). a279 4 KASSERT(uvm_pagelookup(uobj, offset) == NULL || ((fillflags & UVM_PAGE_ARRAY_FILL_DIRTY) != 0 && radix_tree_get_tag(&uobj->uo_pages, offset >> PAGE_SHIFT, UVM_PAGE_DIRTY_TAG) == 0)); a280 4 if ((fillflags & UVM_PAGE_ARRAY_FILL_DENSE) != 0) { UVMHIST_LOG(ubchist, "dense", 0,0,0,0); return 0; } @ 1.96 log @Welcome to 5.99.53! Merge rmind-uvmplock branch: - Reorganize locking in UVM and provide extra serialisation for pmap(9). New lock order: [vmpage-owner-lock] -> pmap-lock. - Simplify locking in some pmap(9) modules by removing P->V locking. - Use lock object on vmobjlock (and thus vnode_t::v_interlock) to share the locks amongst UVM objects where necessary (tmpfs, layerfs, unionfs). - Rewrite and optimise x86 TLB shootdown code, make it simpler and cleaner. Add TLBSTATS option for x86 to collect statistics about TLB shootdowns. - Unify /dev/mem et al in MI code and provide required locking (removes kernel-lock on some ports). Also, avoid cache-aliasing issues. Thanks to Andrew Doran and Joerg Sonnenberger, as their initial patches formed the core changes of this branch. @ text @d1 1 a1 1 /* $NetBSD$ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD$"); d253 2 a254 1 pg = uvm_pagealloc(uobj, offset, NULL, 0); d265 2 a266 1 UVMHIST_LOG(ubchist, "alloced %p", pg,0,0,0); d280 2 a281 1 UVMHIST_LOG(ubchist, "wait %p", pg,0,0,0); d308 2 a309 1 UVMHIST_LOG(ubchist, "found %p", pg,0,0,0); @ 1.95 log @Replace "malloc" in comments, remove unnecessary header inclusions. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.94 2011/02/02 20:07:25 chuck Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.94 2011/02/02 20:07:25 chuck Exp $"); d142 1 a142 1 KASSERT(mutex_owned(&vp->v_interlock)); d182 1 a182 1 KASSERT(((flags & PGO_LOCKED) != 0 && mutex_owned(&vp->v_interlock)) || d237 2 d259 1 a259 1 mutex_exit(&uobj->vmobjlock); d261 1 a261 1 mutex_enter(&uobj->vmobjlock); d279 1 a279 1 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0, d281 1 a281 1 mutex_enter(&uobj->vmobjlock); d331 1 a331 1 mutex_enter(&uobj->vmobjlock); d350 1 a350 1 mutex_enter(&uobj->vmobjlock); d353 1 a353 1 mutex_exit(&uobj->vmobjlock); d360 1 a360 1 mutex_enter(&vp->v_interlock); d367 1 a367 1 mutex_exit(&vp->v_interlock); @ 1.95.2.1 log @Catchup with rmind-uvmplock merge. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.96 2011/06/12 03:36:04 rmind Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.96 2011/06/12 03:36:04 rmind Exp $"); d142 1 a142 1 KASSERT(mutex_owned(vp->v_interlock)); d182 1 a182 1 KASSERT(((flags & PGO_LOCKED) != 0 && mutex_owned(vp->v_interlock)) || a236 2 KASSERT(mutex_owned(uobj->vmobjlock)); d257 1 a257 1 mutex_exit(uobj->vmobjlock); d259 1 a259 1 mutex_enter(uobj->vmobjlock); d277 1 a277 1 UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0, d279 1 a279 1 mutex_enter(uobj->vmobjlock); d329 1 a329 1 mutex_enter(uobj->vmobjlock); d348 1 a348 1 mutex_enter(uobj->vmobjlock); d351 1 a351 1 mutex_exit(uobj->vmobjlock); d358 1 a358 1 mutex_enter(vp->v_interlock); d365 1 a365 1 mutex_exit(vp->v_interlock); @ 1.94 log @udpate license clauses on my code to match the new-style BSD licenses. verified with Mike Hibler it is ok to remove clause 3 on utah copyright, as per UCB. based on diff that rmind@@ sent me. no functional change with this commit. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.93 2010/01/08 11:35:12 pooka Exp $ */ d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.93 2010/01/08 11:35:12 pooka Exp $"); a54 2 #include #include @ 1.93 log @The VATTR_NULL/VREF/VHOLD/HOLDRELE() macros lost their will to live years ago when the kernel was modified to not alter ABI based on DIAGNOSTIC, and now just call the respective function interfaces (in lowercase). Plenty of mix'n match upper/lowercase has creeped into the tree since then. Nuke the macros and convert all callsites to lowercase. no functional change @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.92 2009/08/04 23:31:57 pooka Exp $ */ d23 1 a23 6 * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Charles D. Cranor, * Washington University, the University of California, Berkeley and * its contributors. * 4. Neither the name of the University nor the names of its contributors d48 1 a48 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.92 2009/08/04 23:31:57 pooka Exp $"); @ 1.93.6.1 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.95 2011/04/23 18:14:13 rmind Exp $ */ d23 6 a28 1 * 3. Neither the name of the University nor the names of its contributors d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.95 2011/04/23 18:14:13 rmind Exp $"); d60 2 @ 1.93.8.1 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.94 2011/02/02 20:07:25 chuck Exp $ */ d23 6 a28 1 * 3. Neither the name of the University nor the names of its contributors d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.94 2011/02/02 20:07:25 chuck Exp $"); @ 1.93.4.1 log @Change struct uvm_object::vmobjlock to be dynamically allocated with mutex_obj_alloc(). It allows us to share the locks among UVM objects. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.93 2010/01/08 11:35:12 pooka Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.93 2010/01/08 11:35:12 pooka Exp $"); d149 1 a149 1 KASSERT(mutex_owned(vp->v_interlock)); d189 1 a189 1 KASSERT(((flags & PGO_LOCKED) != 0 && mutex_owned(vp->v_interlock)) || d264 1 a264 1 mutex_exit(uobj->vmobjlock); d266 1 a266 1 mutex_enter(uobj->vmobjlock); d284 1 a284 1 UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0, d286 1 a286 1 mutex_enter(uobj->vmobjlock); d336 1 a336 1 mutex_enter(uobj->vmobjlock); d355 1 a355 1 mutex_enter(uobj->vmobjlock); d358 1 a358 1 mutex_exit(uobj->vmobjlock); d365 1 a365 1 mutex_enter(vp->v_interlock); d372 1 a372 1 mutex_exit(vp->v_interlock); @ 1.93.4.2 log @Reorganise UVM locking to protect P->V state and serialise pmap(9) operations on the same page(s) by always locking their owner. Hence lock order: "vmpage"-lock -> pmap-lock. Patch, proposed on tech-kern@@, from Andrew Doran. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.93.4.1 2010/03/16 15:38:19 rmind Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.93.4.1 2010/03/16 15:38:19 rmind Exp $"); a243 2 KASSERT(mutex_owned(uobj->vmobjlock)); @ 1.93.4.3 log @sync with head @ text @d1 1 a1 1 /* $NetBSD$ */ d23 6 a28 1 * 3. Neither the name of the University nor the names of its contributors d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD$"); @ 1.93.4.4 log @sync with head @ text @d55 2 @ 1.93.2.1 log @uvn_get: For XIP vnodes, skip read-ahead, because it's pointless. @ text @d1 1 a1 1 /* $NetBSD$ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD$"); a179 4 #ifdef XIP if ((vp->v_flag & VV_XIP) != 0) goto uvn_get_ra_done; #endif a184 3 #ifdef XIP uvn_get_ra_done: #endif @ 1.93.2.2 log @Actually make this build with options XIP. @ text @a55 1 #include "opt_xip.h" d181 1 a181 1 if ((vp->v_vflag & VV_XIP) != 0) @ 1.93.2.3 log @Factor out the part which lookups physical page "identity" from UVM object, into sys/uvm/uvm_vnode.c:uvn_findpage_xip(). Eventually this will become a call to cdev UVM object pager. @ text @a406 36 /* * uvn_findpage_xip * Lookup a physical page identity (== struct vm_page * in * the current UVM design) within the given vnode, at the * given offset. */ struct vm_page * uvn_findpage_xip(struct uvm_object *uobj, off_t off) { struct vnode *vp = (struct vnode *)uobj; struct vm_physseg *seg; struct vm_page *pg; KASSERT((vp->v_vflag & VV_XIP) != 0); KASSERT((off & PAGE_MASK) == 0); /* * Lookup a physical page identity from the underlying physical * segment. * * Eventually, this will be replaced by a call of character * device pager method, which is a generalized version of * cdev_mmap(). Which means that v_physseg will become struct * uvm_object *, and this will call cdev_page(uobj, off). */ seg = vp->v_physseg; KASSERT(seg != NULL); pg = seg->pgs + (off >> PAGE_SHIFT); KASSERT(pg->phys_addr == (seg->start << PAGE_SHIFT) + off); return pg; } @ 1.93.2.4 log @Make XIP pager use cdev_mmap() instead of struct vm_physseg. @ text @d415 1 a415 1 uvn_findpage_xip(struct vnode *devvp, struct uvm_object *uobj, off_t off) a416 1 #if defined(DIAGNOSTIC) a417 1 #endif a420 2 #if defined(XIP) #if !defined(XIP_CDEV_MMAP) d434 1 a434 1 seg = devvp->v_physseg; a437 31 #else dev_t dev; paddr_t mdpgno, pa, pfn; int segno, segidx; KASSERT(vp != NULL); KASSERT((vp->v_vflag & VV_XIP) != 0); KASSERT((off & PAGE_MASK) == 0); /* * Get an "mmap cookie" from device. */ dev = devsw_blk2chr(devvp->v_rdev); mdpgno = cdev_mmap(dev, off, 0); KASSERT(mdpgno != -1); /* * Index the matching vm_page and return it the vnode pager * (genfs_getpages). */ pa = pmap_phys_address(mdpgno); pfn = atop(pa); segno = vm_physseg_find_device(pfn, &segidx); seg = VM_PHYSDEV_PTR(segno); KASSERT(seg != NULL); KASSERT(segidx == pfn - seg->start); KASSERT(seg->pgs != NULL); pg = seg->pgs + segidx; #endif #endif @ 1.93.2.5 log @uvn_findpage_xip: This is responsible to return a page with marked as "busy". @ text @a475 2 pg->flags |= PG_BUSY; @ 1.93.2.6 log @uvm_findpage_xip: A few UVMHIST logs. @ text @a422 3 UVMHIST_FUNC("uvn_findpage_xip"); UVMHIST_CALLED(ubchist); UVMHIST_LOG(ubchist, "called devvp=%p uobj=%p off=%lx",devvp,uobj,(long)off,0); a477 2 UVMHIST_LOG(ubchist, "done pa=%lx seg=%p pg=%p off=%lx",(long)pa,seg,pg,(long)off); @ 1.92 log @uvm_vnp_zerorange() logically and by implementation more a part of ubc than uvm_vnode, so move it over. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.91 2009/08/04 23:03:01 pooka Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.91 2009/08/04 23:03:01 pooka Exp $"); d117 1 a117 1 VREF((struct vnode *)uobj); @ 1.91 log @kernel opt polish: g/c unnecessary fs_nfs.h and opt_ddb.h @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.90 2008/01/02 11:49:21 ad Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.90 2008/01/02 11:49:21 ad Exp $"); a374 28 /* * uvm_vnp_zerorange: set a range of bytes in a file to zero. */ void uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len) { void *win; int flags; /* * XXXUBC invent kzero() and use it */ while (len) { vsize_t bytelen = len; win = ubc_alloc(&vp->v_uobj, off, &bytelen, UVM_ADV_NORMAL, UBC_WRITE); memset(win, 0, bytelen); flags = UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0; ubc_release(win, flags); off += bytelen; len -= bytelen; } } @ 1.90 log @Merge vmlocking2 to head. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.89 2007/12/01 10:40:28 yamt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.89 2007/12/01 10:40:28 yamt Exp $"); a54 1 #include "fs_nfs.h" a55 1 #include "opt_ddb.h" @ 1.90.28.1 log @Make uvm_map recognize UVM_FLAG_COLORMATCH which tells uvm_map that the 'align' argument specifies the starting color of the KVA range to be returned. When calling uvm_km_alloc with UVM_KMF_VAONLY, also specify the starting color of the kva range returned (UMV_KMF_COLORMATCH) and pass those to uvm_map. In uvm_pglistalloc, make sure the pages being returned have sequentially advancing colors (so they can be mapped in a contiguous address range). Add a few missing UVM_FLAG_COLORMATCH flags to uvm_pagealloc calls. Make the socket and pipe loan color-safe. Make the mips pmap enforce strict page color (color(VA) == color(PA)). @ text @d1 1 a1 1 /* uvm_vnode.c,v 1.90 2008/01/02 11:49:21 ad Exp */ d53 1 a53 1 __KERNEL_RCSID(0, "uvm_vnode.c,v 1.90 2008/01/02 11:49:21 ad Exp"); d260 1 a260 2 pg = uvm_pagealloc(uobj, offset, NULL, UVM_FLAG_COLORMATCH); @ 1.90.28.2 log @Rework page free lists to be sorted by color first rather than free_list. Kept per color PGFL_* counter in each page free list. Minor cleanups. @ text @d272 1 a272 2 UVMHIST_LOG(ubchist, "alloced %p (%u)", pg, VM_PGCOLOR_BUCKET(pg), 0,0); d286 1 a286 2 UVMHIST_LOG(ubchist, "wait %p (color %u)", pg, VM_PGCOLOR_BUCKET(pg), 0,0); d313 1 a313 2 UVMHIST_LOG(ubchist, "found %p (color %u)", pg, VM_PGCOLOR_BUCKET(pg), 0,0); @ 1.90.28.3 log @Restore $NetBSD$ @ text @d1 1 a1 1 /* $NetBSD$ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD$"); @ 1.90.28.4 log @Improve UVM_PAGE_TRKOWN. Add more asserts to uvm_page. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.90.28.3 2011/06/03 07:52:48 matt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.90.28.3 2011/06/03 07:52:48 matt Exp $"); d314 1 a314 1 UVM_PAGE_OWN(pg, "uvn_findpage", NULL); @ 1.90.10.1 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.90 2008/01/02 11:49:21 ad Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.90 2008/01/02 11:49:21 ad Exp $"); d55 1 d57 1 d377 28 @ 1.90.10.2 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.90.10.1 2009/08/19 18:48:36 yamt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.90.10.1 2009/08/19 18:48:36 yamt Exp $"); d117 1 a117 1 vref((struct vnode *)uobj); @ 1.89 log @constify pagerops. @ text @d1 1 a1 1 /* $NetBSD$ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD$"); d151 1 a151 1 LOCK_ASSERT(simple_lock_held(&vp->v_interlock)); d153 1 a182 1 simple_unlock(&vp->v_interlock); a185 1 simple_lock(&vp->v_interlock); d191 2 a192 3 LOCK_ASSERT(((flags & PGO_LOCKED) != 0 && simple_lock_held(&vp->v_interlock)) || (flags & PGO_LOCKED) == 0); d266 1 a266 1 simple_unlock(&uobj->vmobjlock); d268 1 a268 1 simple_lock(&uobj->vmobjlock); d288 1 a288 1 simple_lock(&uobj->vmobjlock); d338 1 a338 1 simple_lock(&uobj->vmobjlock); d357 1 a357 1 simple_lock(&uobj->vmobjlock); d360 1 a360 1 simple_unlock(&uobj->vmobjlock); d367 1 a367 1 simple_lock(&vp->v_interlock); d374 1 a374 1 simple_unlock(&vp->v_interlock); @ 1.89.6.1 log @Sync with HEAD @ text @d151 1 a151 1 KASSERT(mutex_owned(&vp->v_interlock)); a152 1 d182 1 d186 1 d192 3 a194 2 KASSERT(((flags & PGO_LOCKED) != 0 && mutex_owned(&vp->v_interlock)) || (flags & PGO_LOCKED) == 0); d268 1 a268 1 mutex_exit(&uobj->vmobjlock); d270 1 a270 1 mutex_enter(&uobj->vmobjlock); d290 1 a290 1 mutex_enter(&uobj->vmobjlock); d340 1 a340 1 mutex_enter(&uobj->vmobjlock); d359 1 a359 1 mutex_enter(&uobj->vmobjlock); d362 1 a362 1 mutex_exit(&uobj->vmobjlock); d369 1 a369 1 mutex_enter(&vp->v_interlock); d376 1 a376 1 mutex_exit(&vp->v_interlock); @ 1.89.2.1 log @Pull the vmlocking changes into a new branch. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.89 2007/12/01 10:40:28 yamt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.89 2007/12/01 10:40:28 yamt Exp $"); d151 1 a151 1 KASSERT(mutex_owned(&vp->v_interlock)); a152 1 d182 1 a182 1 mutex_exit(&vp->v_interlock); d186 1 a186 1 mutex_enter(&vp->v_interlock); d192 3 a194 4 KASSERT(((flags & PGO_LOCKED) != 0 && mutex_owned(&vp->v_interlock)) || ((flags & PGO_LOCKED) == 0 && !mutex_owned(&vp->v_interlock))); d268 1 a268 1 mutex_exit(&uobj->vmobjlock); d270 1 a270 1 mutex_enter(&uobj->vmobjlock); d290 1 a290 1 mutex_enter(&uobj->vmobjlock); d340 1 a340 1 mutex_enter(&uobj->vmobjlock); d359 1 a359 1 mutex_enter(&uobj->vmobjlock); d362 1 a362 1 mutex_exit(&uobj->vmobjlock); d369 1 a369 1 mutex_enter(&vp->v_interlock); d376 1 a376 1 mutex_exit(&vp->v_interlock); @ 1.89.2.2 log @Lock readahead context using the associated object's lock. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.89.2.1 2007/12/04 13:04:07 ad Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.89.2.1 2007/12/04 13:04:07 ad Exp $"); d183 1 d187 1 d193 4 a196 2 KASSERT(((flags & PGO_LOCKED) != 0 && mutex_owned(&vp->v_interlock)) || (flags & PGO_LOCKED) == 0); @ 1.88 log @use designated initiaizers for uvm_pagerops. @ text @d94 1 a94 1 struct uvm_pagerops uvm_vnodeops = { @ 1.87 log @Remove LOCK_ASSERT(!simple_lock_held(&foo)); @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.86 2007/10/10 20:42:41 ad Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.86 2007/10/10 20:42:41 ad Exp $"); d95 4 a98 6 NULL, uvn_reference, uvn_detach, NULL, uvn_get, uvn_put, @ 1.87.4.1 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.89 2007/12/01 10:40:28 yamt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.89 2007/12/01 10:40:28 yamt Exp $"); d94 7 a100 5 const struct uvm_pagerops uvm_vnodeops = { .pgo_reference = uvn_reference, .pgo_detach = uvn_detach, .pgo_get = uvn_get, .pgo_put = uvn_put, @ 1.87.4.2 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.90 2008/01/02 11:49:21 ad Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.90 2008/01/02 11:49:21 ad Exp $"); d151 1 a151 1 KASSERT(mutex_owned(&vp->v_interlock)); a152 1 d182 1 d186 1 d192 3 a194 2 KASSERT(((flags & PGO_LOCKED) != 0 && mutex_owned(&vp->v_interlock)) || (flags & PGO_LOCKED) == 0); d268 1 a268 1 mutex_exit(&uobj->vmobjlock); d270 1 a270 1 mutex_enter(&uobj->vmobjlock); d290 1 a290 1 mutex_enter(&uobj->vmobjlock); d340 1 a340 1 mutex_enter(&uobj->vmobjlock); d359 1 a359 1 mutex_enter(&uobj->vmobjlock); d362 1 a362 1 mutex_exit(&uobj->vmobjlock); d369 1 a369 1 mutex_enter(&vp->v_interlock); d376 1 a376 1 mutex_exit(&vp->v_interlock); @ 1.86 log @Merge from vmlocking: - Split vnode::v_flag into three fields, depending on field locking. - simple_lock -> kmutex in a few places. - Fix some simple locking problems. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.85 2007/08/04 09:42:58 pooka Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.85 2007/08/04 09:42:58 pooka Exp $"); a154 1 LOCK_ASSERT(!simple_lock_held(&vp->v_interlock)); d196 1 a196 2 ((flags & PGO_LOCKED) == 0 && !simple_lock_held(&vp->v_interlock))); @ 1.85 log @Use VSIZENOTSET only in KASSERTs @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.84 2007/07/22 19:16:06 pooka Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.84 2007/07/22 19:16:06 pooka Exp $"); d416 1 a416 1 return (vp->v_flag & VEXECMAP) != 0; d424 1 a424 1 return (vp->v_flag & VONWORKLST) == 0; d433 1 a433 1 (vp->v_flag & (VWRITEMAP|VWRITEMAPDIRTY)) == VWRITEMAP; @ 1.85.6.1 log @file uvm_vnode.c was added on branch matt-mips64 on 2007-08-04 09:42:59 +0000 @ text @d1 434 @ 1.85.6.2 log @Use VSIZENOTSET only in KASSERTs @ text @a0 434 /* $NetBSD: uvm_vnode.c,v 1.85 2007/08/04 09:42:58 pooka Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. * Copyright (c) 1991, 1993 * The Regents of the University of California. * Copyright (c) 1990 University of Utah. * * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Charles D. Cranor, * Washington University, the University of California, Berkeley and * its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @@(#)vnode_pager.c 8.8 (Berkeley) 2/13/94 * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp */ /* * uvm_vnode.c: the vnode pager. */ #include __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.85 2007/08/04 09:42:58 pooka Exp $"); #include "fs_nfs.h" #include "opt_uvmhist.h" #include "opt_ddb.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * functions */ static void uvn_detach(struct uvm_object *); static int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *, int, vm_prot_t, int, int); static int uvn_put(struct uvm_object *, voff_t, voff_t, int); static void uvn_reference(struct uvm_object *); static int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **, int); /* * master pager structure */ struct uvm_pagerops uvm_vnodeops = { NULL, uvn_reference, uvn_detach, NULL, uvn_get, uvn_put, }; /* * the ops! */ /* * uvn_reference * * duplicate a reference to a VM object. Note that the reference * count must already be at least one (the passed in reference) so * there is no chance of the uvn being killed or locked out here. * * => caller must call with object unlocked. * => caller must be using the same accessprot as was used at attach time */ static void uvn_reference(struct uvm_object *uobj) { VREF((struct vnode *)uobj); } /* * uvn_detach * * remove a reference to a VM object. * * => caller must call with object unlocked and map locked. */ static void uvn_detach(struct uvm_object *uobj) { vrele((struct vnode *)uobj); } /* * uvn_put: flush page data to backing store. * * => object must be locked on entry! VOP_PUTPAGES must unlock it. * => flags: PGO_SYNCIO -- use sync. I/O * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed) */ static int uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags) { struct vnode *vp = (struct vnode *)uobj; int error; LOCK_ASSERT(simple_lock_held(&vp->v_interlock)); error = VOP_PUTPAGES(vp, offlo, offhi, flags); LOCK_ASSERT(!simple_lock_held(&vp->v_interlock)); return error; } /* * uvn_get: get pages (synchronously) from backing store * * => prefer map unlocked (not required) * => object must be locked! we will _unlock_ it before starting any I/O. * => flags: PGO_ALLPAGES: get all of the pages * PGO_LOCKED: fault data structures are locked * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx] * => NOTE: caller must check for released pages!! */ static int uvn_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps /* IN/OUT */, int *npagesp /* IN (OUT if PGO_LOCKED)*/, int centeridx, vm_prot_t access_type, int advice, int flags) { struct vnode *vp = (struct vnode *)uobj; int error; UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist); UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0); if ((access_type & VM_PROT_WRITE) == 0 && (flags & PGO_LOCKED) == 0) { simple_unlock(&vp->v_interlock); vn_ra_allocctx(vp); uvm_ra_request(vp->v_ractx, advice, uobj, offset, *npagesp << PAGE_SHIFT); simple_lock(&vp->v_interlock); } error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx, access_type, advice, flags); LOCK_ASSERT(((flags & PGO_LOCKED) != 0 && simple_lock_held(&vp->v_interlock)) || ((flags & PGO_LOCKED) == 0 && !simple_lock_held(&vp->v_interlock))); return error; } /* * uvn_findpages: * return the page for the uobj and offset requested, allocating if needed. * => uobj must be locked. * => returned pages will be BUSY. */ int uvn_findpages(struct uvm_object *uobj, voff_t offset, int *npagesp, struct vm_page **pgs, int flags) { int i, count, found, npages, rv; count = found = 0; npages = *npagesp; if (flags & UFP_BACKWARD) { for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) { rv = uvn_findpage(uobj, offset, &pgs[i], flags); if (rv == 0) { if (flags & UFP_DIRTYONLY) break; } else found++; count++; } } else { for (i = 0; i < npages; i++, offset += PAGE_SIZE) { rv = uvn_findpage(uobj, offset, &pgs[i], flags); if (rv == 0) { if (flags & UFP_DIRTYONLY) break; } else found++; count++; } } *npagesp = count; return (found); } static int uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp, int flags) { struct vm_page *pg; bool dirty; UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist); UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0); if (*pgp != NULL) { UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0); return 0; } for (;;) { /* look for an existing page */ pg = uvm_pagelookup(uobj, offset); /* nope? allocate one now */ if (pg == NULL) { if (flags & UFP_NOALLOC) { UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0); return 0; } pg = uvm_pagealloc(uobj, offset, NULL, 0); if (pg == NULL) { if (flags & UFP_NOWAIT) { UVMHIST_LOG(ubchist, "nowait",0,0,0,0); return 0; } simple_unlock(&uobj->vmobjlock); uvm_wait("uvn_fp1"); simple_lock(&uobj->vmobjlock); continue; } UVMHIST_LOG(ubchist, "alloced %p", pg,0,0,0); break; } else if (flags & UFP_NOCACHE) { UVMHIST_LOG(ubchist, "nocache",0,0,0,0); return 0; } /* page is there, see if we need to wait on it */ if ((pg->flags & PG_BUSY) != 0) { if (flags & UFP_NOWAIT) { UVMHIST_LOG(ubchist, "nowait",0,0,0,0); return 0; } pg->flags |= PG_WANTED; UVMHIST_LOG(ubchist, "wait %p", pg,0,0,0); UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0, "uvn_fp2", 0); simple_lock(&uobj->vmobjlock); continue; } /* skip PG_RDONLY pages if requested */ if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) { UVMHIST_LOG(ubchist, "nordonly",0,0,0,0); return 0; } /* stop on clean pages if requested */ if (flags & UFP_DIRTYONLY) { dirty = pmap_clear_modify(pg) || (pg->flags & PG_CLEAN) == 0; pg->flags |= PG_CLEAN; if (!dirty) { UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0); return 0; } } /* mark the page BUSY and we're done. */ pg->flags |= PG_BUSY; UVM_PAGE_OWN(pg, "uvn_findpage"); UVMHIST_LOG(ubchist, "found %p", pg,0,0,0); break; } *pgp = pg; return 1; } /* * uvm_vnp_setsize: grow or shrink a vnode uobj * * grow => just update size value * shrink => toss un-needed pages * * => we assume that the caller has a reference of some sort to the * vnode in question so that it will not be yanked out from under * us. */ void uvm_vnp_setsize(struct vnode *vp, voff_t newsize) { struct uvm_object *uobj = &vp->v_uobj; voff_t pgend = round_page(newsize); voff_t oldsize; UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist); simple_lock(&uobj->vmobjlock); UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x", vp, vp->v_size, newsize, 0); /* * now check if the size has changed: if we shrink we had better * toss some pages... */ KASSERT(newsize != VSIZENOTSET); KASSERT(vp->v_size <= vp->v_writesize); KASSERT(vp->v_size == vp->v_writesize || newsize == vp->v_writesize || newsize <= vp->v_size); oldsize = vp->v_writesize; KASSERT(oldsize != VSIZENOTSET || pgend > oldsize); if (oldsize > pgend) { (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO); simple_lock(&uobj->vmobjlock); } vp->v_size = vp->v_writesize = newsize; simple_unlock(&uobj->vmobjlock); } void uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize) { simple_lock(&vp->v_interlock); KASSERT(newsize != VSIZENOTSET); KASSERT(vp->v_size != VSIZENOTSET); KASSERT(vp->v_writesize != VSIZENOTSET); KASSERT(vp->v_size <= vp->v_writesize); KASSERT(vp->v_size <= newsize); vp->v_writesize = newsize; simple_unlock(&vp->v_interlock); } /* * uvm_vnp_zerorange: set a range of bytes in a file to zero. */ void uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len) { void *win; int flags; /* * XXXUBC invent kzero() and use it */ while (len) { vsize_t bytelen = len; win = ubc_alloc(&vp->v_uobj, off, &bytelen, UVM_ADV_NORMAL, UBC_WRITE); memset(win, 0, bytelen); flags = UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0; ubc_release(win, flags); off += bytelen; len -= bytelen; } } bool uvn_text_p(struct uvm_object *uobj) { struct vnode *vp = (struct vnode *)uobj; return (vp->v_flag & VEXECMAP) != 0; } bool uvn_clean_p(struct uvm_object *uobj) { struct vnode *vp = (struct vnode *)uobj; return (vp->v_flag & VONWORKLST) == 0; } bool uvn_needs_writefault_p(struct uvm_object *uobj) { struct vnode *vp = (struct vnode *)uobj; return uvn_clean_p(uobj) || (vp->v_flag & (VWRITEMAP|VWRITEMAPDIRTY)) == VWRITEMAP; } @ 1.85.2.1 log @sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.87 2007/10/11 19:53:43 ad Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.87 2007/10/11 19:53:43 ad Exp $"); d155 1 d197 2 a198 1 (flags & PGO_LOCKED) == 0); d416 1 a416 1 return (vp->v_iflag & VI_EXECMAP) != 0; d424 1 a424 1 return (vp->v_iflag & VI_ONWORKLST) == 0; d433 1 a433 1 (vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP; @ 1.85.2.2 log @sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.85.2.1 2007/11/06 23:35:33 matt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.85.2.1 2007/11/06 23:35:33 matt Exp $"); d94 7 a100 5 const struct uvm_pagerops uvm_vnodeops = { .pgo_reference = uvn_reference, .pgo_detach = uvn_detach, .pgo_get = uvn_get, .pgo_put = uvn_put, d153 1 a153 1 KASSERT(mutex_owned(&vp->v_interlock)); a154 1 d184 1 d188 1 d194 3 a196 2 KASSERT(((flags & PGO_LOCKED) != 0 && mutex_owned(&vp->v_interlock)) || (flags & PGO_LOCKED) == 0); d270 1 a270 1 mutex_exit(&uobj->vmobjlock); d272 1 a272 1 mutex_enter(&uobj->vmobjlock); d292 1 a292 1 mutex_enter(&uobj->vmobjlock); d342 1 a342 1 mutex_enter(&uobj->vmobjlock); d361 1 a361 1 mutex_enter(&uobj->vmobjlock); d364 1 a364 1 mutex_exit(&uobj->vmobjlock); d371 1 a371 1 mutex_enter(&vp->v_interlock); d378 1 a378 1 mutex_exit(&vp->v_interlock); @ 1.85.4.1 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.85 2007/08/04 09:42:58 pooka Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.85 2007/08/04 09:42:58 pooka Exp $"); d155 1 d197 2 a198 1 (flags & PGO_LOCKED) == 0); d416 1 a416 1 return (vp->v_iflag & VI_EXECMAP) != 0; d424 1 a424 1 return (vp->v_iflag & VI_ONWORKLST) == 0; d433 1 a433 1 (vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP; @ 1.84 log @Retire uvn_attach() - it abuses VXLOCK and its functionality, setting vnode sizes, is handled elsewhere: file system vnode creation or spec_open() for regular files or block special files, respectively. Add a call to VOP_MMAP() to the pagedvn exec path, since the vnode is being memory mapped. reviewed by tech-kern & wrstuden @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.83 2007/07/09 21:11:37 ad Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.83 2007/07/09 21:11:37 ad Exp $"); d353 9 a361 9 if (vp->v_writesize != VSIZENOTSET) { KASSERT(vp->v_size <= vp->v_writesize); KASSERT(vp->v_size == vp->v_writesize || newsize == vp->v_writesize || newsize <= vp->v_size); oldsize = vp->v_writesize; } else { oldsize = vp->v_size; } if (oldsize > pgend && oldsize != VSIZENOTSET) { d374 1 @ 1.84.4.1 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.85 2007/08/04 09:42:58 pooka Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.85 2007/08/04 09:42:58 pooka Exp $"); d353 9 a361 9 KASSERT(newsize != VSIZENOTSET); KASSERT(vp->v_size <= vp->v_writesize); KASSERT(vp->v_size == vp->v_writesize || newsize == vp->v_writesize || newsize <= vp->v_size); oldsize = vp->v_writesize; KASSERT(oldsize != VSIZENOTSET || pgend > oldsize); if (oldsize > pgend) { a373 1 KASSERT(newsize != VSIZENOTSET); @ 1.84.4.2 log @Sync with HEAD. Follow the merge of pmap.c on i386 and amd64 and move pmap_init_tmp_pgtbl into arch/x86/x86/pmap.c. Modify the ACPI wakeup code to restore CR4 before jumping back into kernel space as the large page option might cover that. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.84.4.1 2007/08/04 12:33:17 jmcneill Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.84.4.1 2007/08/04 12:33:17 jmcneill Exp $"); d155 1 d197 2 a198 1 (flags & PGO_LOCKED) == 0); d416 1 a416 1 return (vp->v_iflag & VI_EXECMAP) != 0; d424 1 a424 1 return (vp->v_iflag & VI_ONWORKLST) == 0; d433 1 a433 1 (vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP; @ 1.84.4.3 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.84.4.2 2007/10/26 15:49:44 joerg Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.84.4.2 2007/10/26 15:49:44 joerg Exp $"); d94 7 a100 5 const struct uvm_pagerops uvm_vnodeops = { .pgo_reference = uvn_reference, .pgo_detach = uvn_detach, .pgo_get = uvn_get, .pgo_put = uvn_put, @ 1.83 log @Merge some of the less invasive changes from the vmlocking branch: - kthread, callout, devsw API changes - select()/poll() improvements - miscellaneous MT safety improvements @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.82 2007/06/05 12:31:36 yamt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.82 2007/06/05 12:31:36 yamt Exp $"); a107 112 * uvn_attach * * attach a vnode structure to a VM object. if the vnode is already * attached, then just bump the reference count by one and return the * VM object. if not already attached, attach and return the new VM obj. * the "accessprot" tells the max access the attaching thread wants to * our pages. * * => caller must _not_ already be holding the lock on the uvm_object. * => in fact, nothing should be locked so that we can sleep here. * => note that uvm_object is first thing in vnode structure, so their * pointers are equiv. */ struct uvm_object * uvn_attach(void *arg, vm_prot_t accessprot) { struct vnode *vp = arg; struct uvm_object *uobj = &vp->v_uobj; struct vattr vattr; int result; struct partinfo pi; voff_t used_vnode_size; UVMHIST_FUNC("uvn_attach"); UVMHIST_CALLED(maphist); UVMHIST_LOG(maphist, "(vn=0x%x)", arg,0,0,0); used_vnode_size = (voff_t)0; /* * first get a lock on the uobj. */ simple_lock(&uobj->vmobjlock); while (vp->v_flag & VXLOCK) { vp->v_flag |= VXWANT; UVMHIST_LOG(maphist, " SLEEPING on blocked vn",0,0,0,0); UVM_UNLOCK_AND_WAIT(uobj, &uobj->vmobjlock, false, "uvn_attach", 0); simple_lock(&uobj->vmobjlock); UVMHIST_LOG(maphist," WOKE UP",0,0,0,0); } /* * if we're mapping a BLK device, make sure it is a disk. */ if (vp->v_type == VBLK) { if (bdev_type(vp->v_rdev) != D_DISK) { simple_unlock(&uobj->vmobjlock); UVMHIST_LOG(maphist,"<- done (VBLK not D_DISK!)", 0,0,0,0); return(NULL); } } KASSERT(vp->v_type == VREG || vp->v_type == VBLK); /* * set up our idea of the size * if this hasn't been done already. */ if (vp->v_size == VSIZENOTSET) { vp->v_flag |= VXLOCK; simple_unlock(&uobj->vmobjlock); /* drop lock in case we sleep */ /* XXX: curproc? */ if (vp->v_type == VBLK) { /* * We could implement this as a specfs getattr call, but: * * (1) VOP_GETATTR() would get the file system * vnode operation, not the specfs operation. * * (2) All we want is the size, anyhow. */ result = bdev_ioctl(vp->v_rdev, DIOCGPART, (void *)&pi, FREAD, curlwp); if (result == 0) { /* XXX should remember blocksize */ used_vnode_size = (voff_t)pi.disklab->d_secsize * (voff_t)pi.part->p_size; } } else { result = VOP_GETATTR(vp, &vattr, curlwp->l_cred, curlwp); if (result == 0) used_vnode_size = vattr.va_size; } /* relock object */ simple_lock(&uobj->vmobjlock); if (vp->v_flag & VXWANT) { wakeup(vp); } vp->v_flag &= ~(VXLOCK|VXWANT); if (result != 0) { simple_unlock(&uobj->vmobjlock); UVMHIST_LOG(maphist,"<- done (VOP_GETATTR FAILED!)", 0,0,0,0); return(NULL); } vp->v_size = vp->v_writesize = used_vnode_size; } simple_unlock(&uobj->vmobjlock); UVMHIST_LOG(maphist,"<- done, refcnt=%d", vp->v_usecount, 0, 0, 0); return uobj; } /* @ 1.83.2.1 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.85 2007/08/04 09:42:58 pooka Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.85 2007/08/04 09:42:58 pooka Exp $"); d108 112 d465 9 a473 9 KASSERT(newsize != VSIZENOTSET); KASSERT(vp->v_size <= vp->v_writesize); KASSERT(vp->v_size == vp->v_writesize || newsize == vp->v_writesize || newsize <= vp->v_size); oldsize = vp->v_writesize; KASSERT(oldsize != VSIZENOTSET || pgend > oldsize); if (oldsize > pgend) { a485 1 KASSERT(newsize != VSIZENOTSET); @ 1.82 log @improve post-ubc file overwrite performance in common cases. ie. when it's safe, actually overwrite blocks rather than doing read-modify-write. also fixes PR/33152 and PR/36303. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.81 2007/03/04 06:03:49 christos Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.81 2007/03/04 06:03:49 christos Exp $"); a127 1 const struct bdevsw *bdev; d154 1 a154 2 bdev = bdevsw_lookup(vp->v_rdev); if (bdev == NULL || bdev->d_type != D_DISK) { d182 2 a183 7 bdev = bdevsw_lookup(vp->v_rdev); if (bdev != NULL) { result = (*bdev->d_ioctl)(vp->v_rdev, DIOCGPART, (void *)&pi, FREAD, curlwp); } else { result = ENXIO; } @ 1.81 log @Kill caddr_t; there will be some MI fallout, but it will be fixed shortly. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.80 2007/02/22 06:05:02 thorpej Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.80 2007/02/22 06:05:02 thorpej Exp $"); d215 1 a215 1 vp->v_size = used_vnode_size; d472 8 a479 1 oldsize = vp->v_size; d484 1 a484 1 vp->v_size = newsize; d488 13 @ 1.81.4.1 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.83 2007/07/09 21:11:37 ad Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.83 2007/07/09 21:11:37 ad Exp $"); d128 1 d155 2 a156 1 if (bdev_type(vp->v_rdev) != D_DISK) { d184 7 a190 2 result = bdev_ioctl(vp->v_rdev, DIOCGPART, (void *)&pi, FREAD, curlwp); d215 1 a215 1 vp->v_size = vp->v_writesize = used_vnode_size; d472 1 a472 8 if (vp->v_writesize != VSIZENOTSET) { KASSERT(vp->v_size <= vp->v_writesize); KASSERT(vp->v_size == vp->v_writesize || newsize == vp->v_writesize || newsize <= vp->v_size); oldsize = vp->v_writesize; } else { oldsize = vp->v_size; } d477 1 a477 1 vp->v_size = vp->v_writesize = newsize; a480 13 void uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize) { simple_lock(&vp->v_interlock); KASSERT(vp->v_size != VSIZENOTSET); KASSERT(vp->v_writesize != VSIZENOTSET); KASSERT(vp->v_size <= vp->v_writesize); KASSERT(vp->v_size <= newsize); vp->v_writesize = newsize; simple_unlock(&vp->v_interlock); } @ 1.81.2.1 log @Pull in the initial set of changes for the vmlocking branch. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.81 2007/03/04 06:03:49 christos Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.81 2007/03/04 06:03:49 christos Exp $"); d141 1 a141 1 mutex_enter(&uobj->vmobjlock); d147 1 a147 1 mutex_enter(&uobj->vmobjlock); d157 1 a157 1 mutex_exit(&uobj->vmobjlock); d173 1 a173 1 mutex_exit(&uobj->vmobjlock); /* drop lock in case we sleep */ d203 1 a203 1 mutex_enter(&uobj->vmobjlock); d211 1 a211 1 mutex_exit(&uobj->vmobjlock); d219 1 a219 1 mutex_exit(&uobj->vmobjlock); a221 1 d272 1 a272 2 KERNEL_LOCK(1, curlwp); KASSERT(mutex_owned(&vp->v_interlock)); d274 1 a274 3 KASSERT(!mutex_owned(&vp->v_interlock)); KERNEL_UNLOCK_ONE(curlwp); d304 1 a304 1 mutex_exit(&vp->v_interlock); d308 1 a308 1 mutex_enter(&vp->v_interlock); a310 1 KERNEL_LOCK(1, curlwp); a312 1 KERNEL_UNLOCK_ONE(curlwp); d314 2 a315 2 KASSERT(((flags & PGO_LOCKED) != 0 && mutex_owned(&vp->v_interlock)) || d317 1 a317 1 !mutex_owned(&vp->v_interlock))); d391 1 a391 1 mutex_exit(&uobj->vmobjlock); d393 1 a393 1 mutex_enter(&uobj->vmobjlock); d413 1 a413 1 mutex_enter(&uobj->vmobjlock); d463 1 a463 1 mutex_enter(&uobj->vmobjlock); d475 1 a475 1 mutex_enter(&uobj->vmobjlock); d478 1 a478 1 mutex_exit(&uobj->vmobjlock); @ 1.81.2.2 log @Acquire the kernel lock in the VOP_* wrappers and the socket ops. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.81.2.1 2007/03/13 17:51:58 ad Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.81.2.1 2007/03/13 17:51:58 ad Exp $"); d273 1 d276 2 d315 1 d318 1 @ 1.81.2.3 log @- Fix a (new) bug where vget tries to acquire freed vnodes' interlocks. - Minor locking fixes. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.81.2.2 2007/03/21 20:09:39 ad Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.81.2.2 2007/03/21 20:09:39 ad Exp $"); d142 2 a143 1 if (vp->v_flag & VXLOCK) { d145 3 a147 1 vwait(vp, VXLOCK); d204 5 a208 1 vunwait(vp, VXLOCK); @ 1.81.2.4 log @- Make the devsw interface MP safe, and add some comments. - Allow individual block/character drivers to be marked MP safe. - Provide wrappers around the device methods that look up the device, returning ENXIO if it's not found, and acquire the kernel lock if needed. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.81.2.3 2007/04/13 15:49:51 ad Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.81.2.3 2007/04/13 15:49:51 ad Exp $"); d152 2 a153 1 if (bdev_type(vp->v_rdev) != D_DISK) { @ 1.81.2.5 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.81.2.4 2007/04/13 20:56:19 ad Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.81.2.4 2007/04/13 20:56:19 ad Exp $"); d207 1 a207 1 vp->v_size = vp->v_writesize = used_vnode_size; d465 1 a465 8 if (vp->v_writesize != VSIZENOTSET) { KASSERT(vp->v_size <= vp->v_writesize); KASSERT(vp->v_size == vp->v_writesize || newsize == vp->v_writesize || newsize <= vp->v_size); oldsize = vp->v_writesize; } else { oldsize = vp->v_size; } d470 1 a470 1 vp->v_size = vp->v_writesize = newsize; a473 13 void uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize) { mutex_enter(&vp->v_interlock); KASSERT(vp->v_size != VSIZENOTSET); KASSERT(vp->v_writesize != VSIZENOTSET); KASSERT(vp->v_size <= vp->v_writesize); KASSERT(vp->v_size <= newsize); vp->v_writesize = newsize; mutex_exit(&vp->v_interlock); } @ 1.81.2.6 log @- Increase the number of thread priorities from 128 to 256. How the space is set up is to be revisited. - Implement soft interrupts as kernel threads. A generic implementation is provided, with hooks for fast-path MD code that can run the interrupt threads over the top of other threads executing in the kernel. - Split vnode::v_flag into three fields, depending on how the flag is locked (by the interlock, by the vnode lock, by the file system). - Miscellaneous locking fixes and improvements. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.81.2.5 2007/06/09 23:58:21 ad Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.81.2.5 2007/06/09 23:58:21 ad Exp $"); d142 1 a142 1 if (vp->v_iflag & VI_XLOCK) { d144 1 a144 1 vwait(vp, VI_XLOCK); d168 1 a168 1 vp->v_iflag |= VI_XLOCK; d200 1 a200 1 vunwait(vp, VI_XLOCK); d527 1 a527 1 return (vp->v_iflag & VI_EXECMAP) != 0; d535 1 a535 1 return (vp->v_iflag & VI_ONWORKLST) == 0; d544 1 a544 1 (vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP; @ 1.81.2.7 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.81.2.6 2007/06/17 21:32:23 ad Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.81.2.6 2007/06/17 21:32:23 ad Exp $"); d128 1 d180 7 a186 2 result = bdev_ioctl(vp->v_rdev, DIOCGPART, (void *)&pi, FREAD, curlwp); @ 1.81.2.8 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.81.2.7 2007/07/15 13:28:22 ad Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.81.2.7 2007/07/15 13:28:22 ad Exp $"); d141 1 a141 1 while (vp->v_iflag & VI_XLOCK) { @ 1.81.2.9 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.81.2.8 2007/07/15 15:53:08 ad Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.81.2.8 2007/07/15 15:53:08 ad Exp $"); d108 106 d467 1 a467 1 if (oldsize > pgend) { @ 1.81.2.10 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.81.2.9 2007/08/20 21:28:34 ad Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.81.2.9 2007/08/20 21:28:34 ad Exp $"); d353 8 a360 8 KASSERT(newsize != VSIZENOTSET); KASSERT(vp->v_size <= vp->v_writesize); KASSERT(vp->v_size == vp->v_writesize || newsize == vp->v_writesize || newsize <= vp->v_size); oldsize = vp->v_writesize; KASSERT(oldsize != VSIZENOTSET || pgend > oldsize); a373 1 KASSERT(newsize != VSIZENOTSET); @ 1.80 log @TRUE -> true, FALSE -> false @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.79 2007/02/21 23:00:15 thorpej Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.79 2007/02/21 23:00:15 thorpej Exp $"); d187 1 a187 1 (caddr_t)&pi, FREAD, curlwp); @ 1.79 log @Replace the Mach-derived boolean_t type with the C99 bool type. A future commit will replace use of TRUE and FALSE with true and false. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.78 2006/12/09 16:11:52 chs Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.78 2006/12/09 16:11:52 chs Exp $"); d145 1 a145 1 UVM_UNLOCK_AND_WAIT(uobj, &uobj->vmobjlock, FALSE, @ 1.78 log @a smorgasbord of improvements to vnode locking and path lookup: - LOCKPARENT is no longer relevant for lookup(), relookup() or VOP_LOOKUP(). these now always return the parent vnode locked. namei() works as before. lookup() and various other paths no longer acquire vnode locks in the wrong order via vrele(). fixes PR 32535. as a nice side effect, path lookup is also up to 25% faster. - the above allows us to get rid of PDIRUNLOCK. - also get rid of WANTPARENT (just use LOCKPARENT and unlock it). - remove an assumption in layer_node_find() that all file systems implement a recursive VOP_LOCK() (unionfs doesn't). - require that all file systems supply vfs_vptofh and vfs_fhtovp routines. fill in eopnotsupp() for file systems that don't support being exported and remove the checks for NULL. (layerfs calls these without checking.) - in union_lookup1(), don't change refcounts in the ISDOTDOT case, just adjust which vnode is locked. fixes PR 33374. - apply fixes for ufs_rename() from ufs_vnops.c rev. 1.61 to ext2fs_rename(). @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.77 2006/11/01 10:18:27 yamt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.77 2006/11/01 10:18:27 yamt Exp $"); d367 1 a367 1 boolean_t dirty; d509 1 a509 1 boolean_t d517 1 a517 1 boolean_t d525 1 a525 1 boolean_t @ 1.78.2.1 log @- sync with head. - move sched_changepri back to kern_synch.c as it doesn't know PPQ anymore. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.78 2006/12/09 16:11:52 chs Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.78 2006/12/09 16:11:52 chs Exp $"); d145 1 a145 1 UVM_UNLOCK_AND_WAIT(uobj, &uobj->vmobjlock, false, d367 1 a367 1 bool dirty; d509 1 a509 1 bool d517 1 a517 1 bool d525 1 a525 1 bool @ 1.78.2.2 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.78.2.1 2007/02/27 16:55:30 yamt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.78.2.1 2007/02/27 16:55:30 yamt Exp $"); d187 1 a187 1 (void *)&pi, FREAD, curlwp); @ 1.77 log @remove some __unused from function parameters. @ text @d1 1 a1 1 /* $NetBSD$ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD$"); d314 4 @ 1.77.2.1 log @Apply patch (requested by chs in ticket #422): - Fix various deadlock problems with nullfs and unionfs. - Speed up path lookups by upto 25%. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.77 2006/11/01 10:18:27 yamt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.77 2006/11/01 10:18:27 yamt Exp $"); a313 4 LOCK_ASSERT(((flags & PGO_LOCKED) != 0 && simple_lock_held(&vp->v_interlock)) || ((flags & PGO_LOCKED) == 0 && !simple_lock_held(&vp->v_interlock))); @ 1.76 log @uvm_vnp_setsize: put back v_size assignment after uvn_put. PR/34147 from Juergen Hannken-Illjes. @ text @d123 1 a123 1 uvn_attach(void *arg, vm_prot_t accessprot __unused) @ 1.75 log @move some knowledge about vnode into uvm_vnode.c. @ text @a468 1 vp->v_size = newsize; d471 1 a471 2 } else { simple_unlock(&uobj->vmobjlock); d473 2 @ 1.74 log @- sprinkle __unused on function decls. - fix a couple of unused bugs - no more -Wno-unused for i386 @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.73 2006/09/15 15:51:13 yamt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.73 2006/09/15 15:51:13 yamt Exp $"); d504 25 @ 1.73 log @merge yamt-pdpolicy branch. - separate page replacement policy from the rest of kernel - implement an alternative replacement policy @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.72 2006/07/22 08:47:56 yamt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.72 2006/07/22 08:47:56 yamt Exp $"); d123 1 a123 1 uvn_attach(void *arg, vm_prot_t accessprot) @ 1.73.2.1 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.73 2006/09/15 15:51:13 yamt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.73 2006/09/15 15:51:13 yamt Exp $"); d123 1 a123 1 uvn_attach(void *arg, vm_prot_t accessprot __unused) d469 1 d472 2 a473 1 simple_lock(&uobj->vmobjlock); a474 2 vp->v_size = newsize; simple_unlock(&uobj->vmobjlock); a503 25 boolean_t uvn_text_p(struct uvm_object *uobj) { struct vnode *vp = (struct vnode *)uobj; return (vp->v_flag & VEXECMAP) != 0; } boolean_t uvn_clean_p(struct uvm_object *uobj) { struct vnode *vp = (struct vnode *)uobj; return (vp->v_flag & VONWORKLST) == 0; } boolean_t uvn_needs_writefault_p(struct uvm_object *uobj) { struct vnode *vp = (struct vnode *)uobj; return uvn_clean_p(uobj) || (vp->v_flag & (VWRITEMAP|VWRITEMAPDIRTY)) == VWRITEMAP; } @ 1.73.2.2 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.73.2.1 2006/10/22 06:07:54 yamt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.73.2.1 2006/10/22 06:07:54 yamt Exp $"); d123 1 a123 1 uvn_attach(void *arg, vm_prot_t accessprot) a313 4 LOCK_ASSERT(((flags & PGO_LOCKED) != 0 && simple_lock_held(&vp->v_interlock)) || ((flags & PGO_LOCKED) == 0 && !simple_lock_held(&vp->v_interlock))); @ 1.72 log @- in genfs_getpages, take g_glock earlier so that it can't be intervened by truncation. it also fixes a deadlock. (g_glock vs pages locking order) - uvm_vnp_setsize: modify v_size while holding v_interlock. reviewed by Chuck Silvers. @ text @d1 1 a1 1 /* $NetBSD$ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD$"); a56 1 #include "opt_readahead.h" a297 3 #if defined(READAHEAD_STATS) int orignpages = *npagesp; #endif /* defined(READAHEAD_STATS) */ a313 25 #if defined(READAHEAD_STATS) if (((flags & PGO_LOCKED) != 0 && *npagesp > 0) || ((flags & (PGO_LOCKED|PGO_SYNCIO)) == PGO_SYNCIO && error == 0)) { int i; if ((flags & PGO_LOCKED) == 0) { simple_lock(&uobj->vmobjlock); } for (i = 0; i < orignpages; i++) { struct vm_page *pg = pps[i]; if (pg == NULL || pg == PGO_DONTCARE) { continue; } if ((pg->flags & PG_SPECULATIVE) != 0) { pg->flags &= ~PG_SPECULATIVE; uvm_ra_hit.ev_count++; } } if ((flags & PGO_LOCKED) == 0) { simple_unlock(&uobj->vmobjlock); } } #endif /* defined(READAHEAD_STATS) */ @ 1.72.4.1 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.77 2006/11/01 10:18:27 yamt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.77 2006/11/01 10:18:27 yamt Exp $"); d57 1 d299 3 d318 25 d498 1 d501 2 a502 1 simple_lock(&uobj->vmobjlock); a503 2 vp->v_size = newsize; simple_unlock(&uobj->vmobjlock); a532 25 boolean_t uvn_text_p(struct uvm_object *uobj) { struct vnode *vp = (struct vnode *)uobj; return (vp->v_flag & VEXECMAP) != 0; } boolean_t uvn_clean_p(struct uvm_object *uobj) { struct vnode *vp = (struct vnode *)uobj; return (vp->v_flag & VONWORKLST) == 0; } boolean_t uvn_needs_writefault_p(struct uvm_object *uobj) { struct vnode *vp = (struct vnode *)uobj; return uvn_clean_p(uobj) || (vp->v_flag & (VWRITEMAP|VWRITEMAPDIRTY)) == VWRITEMAP; } @ 1.72.4.2 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.78 2006/12/09 16:11:52 chs Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.78 2006/12/09 16:11:52 chs Exp $"); a313 4 LOCK_ASSERT(((flags & PGO_LOCKED) != 0 && simple_lock_held(&vp->v_interlock)) || ((flags & PGO_LOCKED) == 0 && !simple_lock_held(&vp->v_interlock))); @ 1.71 log @- Use the LWP cached credentials where sane. - Minor cosmetic changes. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.70 2006/05/14 21:38:18 elad Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.70 2006/05/14 21:38:18 elad Exp $"); d485 1 d497 3 a499 1 if (vp->v_size > pgend && vp->v_size != VSIZENOTSET) { a503 1 vp->v_size = newsize; @ 1.70 log @integrate kauth. @ text @d1 1 a1 1 /* $NetBSD$ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD$"); d198 1 a198 1 result = VOP_GETATTR(vp, &vattr, curproc->p_cred, curlwp); @ 1.69 log @merge ktrace-lwp. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.68 2005/11/29 22:52:03 yamt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.68 2005/11/29 22:52:03 yamt Exp $"); d198 1 a198 1 result = VOP_GETATTR(vp, &vattr, curproc->p_ucred, curlwp); @ 1.69.4.1 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.72 2006/07/22 08:47:56 yamt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.72 2006/07/22 08:47:56 yamt Exp $"); d198 1 a198 1 result = VOP_GETATTR(vp, &vattr, curlwp->l_cred, curlwp); a484 1 voff_t oldsize; d496 1 a496 3 oldsize = vp->v_size; vp->v_size = newsize; if (oldsize > pgend && oldsize != VSIZENOTSET) { d501 1 @ 1.69.6.1 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.69 2005/12/11 12:25:29 christos Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.69 2005/12/11 12:25:29 christos Exp $"); d198 1 a198 1 result = VOP_GETATTR(vp, &vattr, curproc->p_cred, curlwp); @ 1.69.12.1 log @Merge 2006-05-24 NetBSD-current into the "peter-altq" branch. @ text @d1 1 a1 1 /* $NetBSD$ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD$"); d198 1 a198 1 result = VOP_GETATTR(vp, &vattr, curproc->p_cred, curlwp); @ 1.69.8.1 log @- change the way to account read-ahead stats. - fix UVM_PQFLAGBITS. @ text @d1 1 a1 1 /* $NetBSD$ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD$"); d57 1 d299 3 d318 25 @ 1.69.8.2 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.69.8.1 2006/03/12 09:38:56 yamt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.69.8.1 2006/03/12 09:38:56 yamt Exp $"); d197 1 a197 1 result = VOP_GETATTR(vp, &vattr, curproc->p_cred, curlwp); @ 1.69.8.3 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.69.8.2 2006/05/24 10:59:30 yamt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.69.8.2 2006/05/24 10:59:30 yamt Exp $"); d197 1 a197 1 result = VOP_GETATTR(vp, &vattr, curlwp->l_cred, curlwp); a455 1 voff_t oldsize; d467 1 a467 3 oldsize = vp->v_size; vp->v_size = newsize; if (oldsize > pgend && oldsize != VSIZENOTSET) { d472 1 @ 1.69.10.1 log @Adapt to kernel authorization changes. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.69 2005/12/11 12:25:29 christos Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.69 2005/12/11 12:25:29 christos Exp $"); d198 1 a198 1 result = VOP_GETATTR(vp, &vattr, curproc->p_cred, curlwp); @ 1.68 log @merge yamt-readahead branch. @ text @d1 1 a1 1 /* $NetBSD$ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD$"); d188 1 a188 1 (caddr_t)&pi, FREAD, curproc); d198 1 a198 1 result = VOP_GETATTR(vp, &vattr, curproc->p_ucred, curproc); @ 1.67 log @read-ahead statistics. @ text @d76 1 d306 9 d521 2 a522 1 win = ubc_alloc(&vp->v_uobj, off, &bytelen, UBC_WRITE); @ 1.66 log @Sprinkle some static. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.65 2005/06/27 02:19:48 thorpej Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.65 2005/06/27 02:19:48 thorpej Exp $"); d57 1 d298 4 d307 26 @ 1.66.2.1 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.66 2005/06/27 02:29:32 thorpej Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.66 2005/06/27 02:29:32 thorpej Exp $"); a56 1 #include "opt_readahead.h" a74 1 #include d186 1 a186 1 (caddr_t)&pi, FREAD, curlwp); d196 1 a196 1 result = VOP_GETATTR(vp, &vattr, curproc->p_cred, curlwp); a296 4 #if defined(READAHEAD_STATS) int orignpages = *npagesp; #endif /* defined(READAHEAD_STATS) */ a299 9 if ((access_type & VM_PROT_WRITE) == 0 && (flags & PGO_LOCKED) == 0) { simple_unlock(&vp->v_interlock); vn_ra_allocctx(vp); uvm_ra_request(vp->v_ractx, advice, uobj, offset, *npagesp << PAGE_SHIFT); simple_lock(&vp->v_interlock); } a301 26 #if defined(READAHEAD_STATS) if (((flags & PGO_LOCKED) != 0 && *npagesp > 0) || ((flags & (PGO_LOCKED|PGO_SYNCIO)) == PGO_SYNCIO && error == 0)) { int i; if ((flags & PGO_LOCKED) == 0) { simple_lock(&uobj->vmobjlock); } for (i = 0; i < orignpages; i++) { struct vm_page *pg = pps[i]; if (pg == NULL || pg == PGO_DONTCARE) { continue; } if ((pg->flags & PG_SPECULATIVE) != 0) { pg->flags &= ~PG_SPECULATIVE; uvm_ra_hit.ev_count++; } } if ((flags & PGO_LOCKED) == 0) { simple_unlock(&uobj->vmobjlock); } } #endif /* defined(READAHEAD_STATS) */ d480 1 a480 2 win = ubc_alloc(&vp->v_uobj, off, &bytelen, UVM_ADV_NORMAL, UBC_WRITE); @ 1.66.2.2 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.66.2.1 2006/06/21 15:12:40 yamt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.66.2.1 2006/06/21 15:12:40 yamt Exp $"); d57 1 d198 1 a198 1 result = VOP_GETATTR(vp, &vattr, curlwp->l_cred, curlwp); d299 3 d318 25 a342 4 LOCK_ASSERT(((flags & PGO_LOCKED) != 0 && simple_lock_held(&vp->v_interlock)) || ((flags & PGO_LOCKED) == 0 && !simple_lock_held(&vp->v_interlock))); a484 1 voff_t oldsize; d496 1 a496 2 oldsize = vp->v_size; if (oldsize > pgend && oldsize != VSIZENOTSET) { d498 2 a499 1 simple_lock(&uobj->vmobjlock); a501 1 simple_unlock(&uobj->vmobjlock); a530 25 boolean_t uvn_text_p(struct uvm_object *uobj) { struct vnode *vp = (struct vnode *)uobj; return (vp->v_flag & VEXECMAP) != 0; } boolean_t uvn_clean_p(struct uvm_object *uobj) { struct vnode *vp = (struct vnode *)uobj; return (vp->v_flag & VONWORKLST) == 0; } boolean_t uvn_needs_writefault_p(struct uvm_object *uobj) { struct vnode *vp = (struct vnode *)uobj; return uvn_clean_p(uobj) || (vp->v_flag & (VWRITEMAP|VWRITEMAPDIRTY)) == VWRITEMAP; } @ 1.66.2.3 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.66.2.2 2006/12/30 20:51:06 yamt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.66.2.2 2006/12/30 20:51:06 yamt Exp $"); d145 1 a145 1 UVM_UNLOCK_AND_WAIT(uobj, &uobj->vmobjlock, false, d367 1 a367 1 bool dirty; d509 1 a509 1 bool d517 1 a517 1 bool d525 1 a525 1 bool @ 1.66.2.4 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.66.2.3 2007/02/26 09:12:33 yamt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.66.2.3 2007/02/26 09:12:33 yamt Exp $"); d108 119 d472 2 a473 9 KASSERT(newsize != VSIZENOTSET); KASSERT(vp->v_size <= vp->v_writesize); KASSERT(vp->v_size == vp->v_writesize || newsize == vp->v_writesize || newsize <= vp->v_size); oldsize = vp->v_writesize; KASSERT(oldsize != VSIZENOTSET || pgend > oldsize); if (oldsize > pgend) { d477 1 a477 1 vp->v_size = vp->v_writesize = newsize; a480 14 void uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize) { simple_lock(&vp->v_interlock); KASSERT(newsize != VSIZENOTSET); KASSERT(vp->v_size != VSIZENOTSET); KASSERT(vp->v_writesize != VSIZENOTSET); KASSERT(vp->v_size <= vp->v_writesize); KASSERT(vp->v_size <= newsize); vp->v_writesize = newsize; simple_unlock(&vp->v_interlock); } @ 1.66.2.5 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.66.2.4 2007/09/03 14:47:13 yamt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.66.2.4 2007/09/03 14:47:13 yamt Exp $"); d155 1 d197 2 a198 1 (flags & PGO_LOCKED) == 0); d416 1 a416 1 return (vp->v_iflag & VI_EXECMAP) != 0; d424 1 a424 1 return (vp->v_iflag & VI_ONWORKLST) == 0; d433 1 a433 1 (vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP; @ 1.66.2.6 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.66.2.5 2007/10/27 11:36:56 yamt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.66.2.5 2007/10/27 11:36:56 yamt Exp $"); d94 7 a100 5 const struct uvm_pagerops uvm_vnodeops = { .pgo_reference = uvn_reference, .pgo_detach = uvn_detach, .pgo_get = uvn_get, .pgo_put = uvn_put, @ 1.66.2.7 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.66.2.6 2007/12/07 17:35:29 yamt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.66.2.6 2007/12/07 17:35:29 yamt Exp $"); d151 1 a151 1 KASSERT(mutex_owned(&vp->v_interlock)); a152 1 d182 1 d186 1 d192 3 a194 2 KASSERT(((flags & PGO_LOCKED) != 0 && mutex_owned(&vp->v_interlock)) || (flags & PGO_LOCKED) == 0); d268 1 a268 1 mutex_exit(&uobj->vmobjlock); d270 1 a270 1 mutex_enter(&uobj->vmobjlock); d290 1 a290 1 mutex_enter(&uobj->vmobjlock); d340 1 a340 1 mutex_enter(&uobj->vmobjlock); d359 1 a359 1 mutex_enter(&uobj->vmobjlock); d362 1 a362 1 mutex_exit(&uobj->vmobjlock); d369 1 a369 1 mutex_enter(&vp->v_interlock); d376 1 a376 1 mutex_exit(&vp->v_interlock); @ 1.66.8.1 log @- as read-ahead context is per-vnode now, there are less reasons to make VOP_READ call uvm_ra_request explicitly. move it to pager (uvn_get) so that it can handle accesses via mmap as well. - pass advice to pager via ubc. - tweak DPRINTF. XXX can be disturbed by PGO_LOCKED. XXX it's controversial where it should be done. (uvm_fault, uvn_get or genfs_getpages.) @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.66 2005/06/27 02:29:32 thorpej Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.66 2005/06/27 02:29:32 thorpej Exp $"); a74 1 #include a299 9 if ((access_type & VM_PROT_WRITE) == 0 && (flags & PGO_LOCKED) == 0) { simple_unlock(&vp->v_interlock); vn_ra_allocctx(vp); uvm_ra_request(vp->v_ractx, advice, uobj, offset, *npagesp << PAGE_SHIFT); simple_lock(&vp->v_interlock); } a301 1 d480 1 a480 2 win = ubc_alloc(&vp->v_uobj, off, &bytelen, UVM_ADV_NORMAL, UBC_WRITE); @ 1.66.8.2 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.66.8.1 2005/11/19 17:37:00 yamt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.66.8.1 2005/11/19 17:37:00 yamt Exp $"); a56 1 #include "opt_readahead.h" a297 4 #if defined(READAHEAD_STATS) int orignpages = *npagesp; #endif /* defined(READAHEAD_STATS) */ a312 25 #if defined(READAHEAD_STATS) if (((flags & PGO_LOCKED) != 0 && *npagesp > 0) || ((flags & (PGO_LOCKED|PGO_SYNCIO)) == PGO_SYNCIO && error == 0)) { int i; if ((flags & PGO_LOCKED) == 0) { simple_lock(&uobj->vmobjlock); } for (i = 0; i < orignpages; i++) { struct vm_page *pg = pps[i]; if (pg == NULL || pg == PGO_DONTCARE) { continue; } if ((pg->flags & PG_SPECULATIVE) != 0) { pg->flags &= ~PG_SPECULATIVE; uvm_ra_hit.ev_count++; } } if ((flags & PGO_LOCKED) == 0) { simple_unlock(&uobj->vmobjlock); } } #endif /* defined(READAHEAD_STATS) */ @ 1.65 log @Use ANSI function decls. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.64 2005/01/09 16:42:44 chs Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.64 2005/01/09 16:42:44 chs Exp $"); d80 5 a84 5 void uvn_detach(struct uvm_object *); int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *, int, vm_prot_t, int, int); int uvn_put(struct uvm_object *, voff_t, voff_t, int); void uvn_reference(struct uvm_object *); d86 2 a87 1 int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **, int); d236 1 a236 1 void d251 1 a251 1 void d265 1 a265 1 int d289 1 a289 1 int d346 1 a346 1 int @ 1.64 log @adjust the UBC mapping code to support non-vnode uvm_objects. this means we can no longer look at the vnode size to determine how many pages to request in a fault, which is good since for NFS the size can change out from under us on the server anyway. there's also a new flag UBC_UNMAP for ubc_release(), so that the file system code can make the decision about whether to cache mappings for files being used as executables. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.63 2004/03/24 07:55:01 junyoung Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.63 2004/03/24 07:55:01 junyoung Exp $"); d121 1 a121 3 uvn_attach(arg, accessprot) void *arg; vm_prot_t accessprot; d236 1 a236 2 uvn_reference(uobj) struct uvm_object *uobj; d251 1 a251 2 uvn_detach(uobj) struct uvm_object *uobj; d265 1 a265 5 uvn_put(uobj, offlo, offhi, flags) struct uvm_object *uobj; voff_t offlo; voff_t offhi; int flags; d289 4 a292 8 uvn_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags) struct uvm_object *uobj; voff_t offset; struct vm_page **pps; /* IN/OUT */ int *npagesp; /* IN (OUT if PGO_LOCKED) */ int centeridx; vm_prot_t access_type; int advice, flags; d313 2 a314 6 uvn_findpages(uobj, offset, npagesp, pgs, flags) struct uvm_object *uobj; voff_t offset; int *npagesp; struct vm_page **pgs; int flags; d346 2 a347 5 uvn_findpage(uobj, offset, pgp, flags) struct uvm_object *uobj; voff_t offset; struct vm_page **pgp; int flags; d439 1 a439 3 uvm_vnp_setsize(vp, newsize) struct vnode *vp; voff_t newsize; d467 1 a467 4 uvm_vnp_zerorange(vp, off, len) struct vnode *vp; off_t off; size_t len; @ 1.63 log @Nuke __P(). @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.62 2003/06/29 22:32:52 fvdl Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.62 2003/06/29 22:32:52 fvdl Exp $"); d493 6 a498 1 void *win; d500 11 a510 14 /* * XXXUBC invent kzero() and use it */ while (len) { vsize_t bytelen = len; win = ubc_alloc(&vp->v_uobj, off, &bytelen, UBC_WRITE); memset(win, 0, bytelen); ubc_release(win, 0); off += bytelen; len -= bytelen; } @ 1.62 log @Back out the lwp/ktrace changes. They contained a lot of colateral damage, and need to be examined and discussed more. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.60 2003/04/22 14:28:16 yamt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.60 2003/04/22 14:28:16 yamt Exp $"); d80 5 a84 5 void uvn_detach __P((struct uvm_object *)); int uvn_get __P((struct uvm_object *, voff_t, struct vm_page **, int *, int, vm_prot_t, int, int)); int uvn_put __P((struct uvm_object *, voff_t, voff_t, int)); void uvn_reference __P((struct uvm_object *)); d86 1 a86 1 int uvn_findpage __P((struct uvm_object *, voff_t, struct vm_page **, int)); @ 1.62.2.1 log @Apply the aborted ktrace-lwp changes to a specific branch. This is just for others to review, I'm concerned that patch fuziness may have resulted in some errant code being generated but I'll look at that later by comparing the diff from the base to the branch with the file I attempt to apply to it. This will, at the very least, put the changes in a better context for others to review them and attempt to tinker with removing passing of 'struct lwp' through the kernel. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.62 2003/06/29 22:32:52 fvdl Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.62 2003/06/29 22:32:52 fvdl Exp $"); d187 1 a187 1 (caddr_t)&pi, FREAD, curlwp); d197 1 a197 1 result = VOP_GETATTR(vp, &vattr, curproc->p_ucred, curlwp); @ 1.62.2.2 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.62.2.1 2003/07/02 15:27:30 darrenr Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.62.2.1 2003/07/02 15:27:30 darrenr Exp $"); d80 5 a84 5 void uvn_detach(struct uvm_object *); int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *, int, vm_prot_t, int, int); int uvn_put(struct uvm_object *, voff_t, voff_t, int); void uvn_reference(struct uvm_object *); d86 1 a86 1 int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **, int); @ 1.62.2.3 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.63 2004/03/24 07:55:01 junyoung Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.63 2004/03/24 07:55:01 junyoung Exp $"); d187 1 a187 1 (caddr_t)&pi, FREAD, curproc); d197 1 a197 1 result = VOP_GETATTR(vp, &vattr, curproc->p_ucred, curproc); @ 1.62.2.4 log @Fix the sync with head I botched. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.62.2.2 2004/08/03 10:57:09 skrll Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.62.2.2 2004/08/03 10:57:09 skrll Exp $"); d187 1 a187 1 (caddr_t)&pi, FREAD, curlwp); d197 1 a197 1 result = VOP_GETATTR(vp, &vattr, curproc->p_ucred, curlwp); @ 1.62.2.5 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.62.2.4 2004/09/21 13:39:31 skrll Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.62.2.4 2004/09/21 13:39:31 skrll Exp $"); d493 1 a493 6 void *win; int flags; /* * XXXUBC invent kzero() and use it */ d495 14 a508 11 while (len) { vsize_t bytelen = len; win = ubc_alloc(&vp->v_uobj, off, &bytelen, UBC_WRITE); memset(win, 0, bytelen); flags = UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0; ubc_release(win, flags); off += bytelen; len -= bytelen; } @ 1.62.2.6 log @Sync with HEAD. Here we go again... @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.62.2.5 2005/01/17 19:33:11 skrll Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.62.2.5 2005/01/17 19:33:11 skrll Exp $"); d80 5 a84 5 static void uvn_detach(struct uvm_object *); static int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *, int, vm_prot_t, int, int); static int uvn_put(struct uvm_object *, voff_t, voff_t, int); static void uvn_reference(struct uvm_object *); d86 1 a86 2 static int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **, int); d121 3 a123 1 uvn_attach(void *arg, vm_prot_t accessprot) d237 3 a239 2 static void uvn_reference(struct uvm_object *uobj) d253 3 a255 2 static void uvn_detach(struct uvm_object *uobj) d268 6 a273 2 static int uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags) d296 9 a304 5 static int uvn_get(struct uvm_object *uobj, voff_t offset, struct vm_page **pps /* IN/OUT */, int *npagesp /* IN (OUT if PGO_LOCKED)*/, int centeridx, vm_prot_t access_type, int advice, int flags) d325 6 a330 2 uvn_findpages(struct uvm_object *uobj, voff_t offset, int *npagesp, struct vm_page **pgs, int flags) d361 6 a366 3 static int uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp, int flags) d458 3 a460 1 uvm_vnp_setsize(struct vnode *vp, voff_t newsize) d488 4 a491 1 uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len) @ 1.62.2.7 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.68 2005/11/29 22:52:03 yamt Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.68 2005/11/29 22:52:03 yamt Exp $"); a56 1 #include "opt_readahead.h" a74 1 #include a296 4 #if defined(READAHEAD_STATS) int orignpages = *npagesp; #endif /* defined(READAHEAD_STATS) */ a299 9 if ((access_type & VM_PROT_WRITE) == 0 && (flags & PGO_LOCKED) == 0) { simple_unlock(&vp->v_interlock); vn_ra_allocctx(vp); uvm_ra_request(vp->v_ractx, advice, uobj, offset, *npagesp << PAGE_SHIFT); simple_lock(&vp->v_interlock); } a301 26 #if defined(READAHEAD_STATS) if (((flags & PGO_LOCKED) != 0 && *npagesp > 0) || ((flags & (PGO_LOCKED|PGO_SYNCIO)) == PGO_SYNCIO && error == 0)) { int i; if ((flags & PGO_LOCKED) == 0) { simple_lock(&uobj->vmobjlock); } for (i = 0; i < orignpages; i++) { struct vm_page *pg = pps[i]; if (pg == NULL || pg == PGO_DONTCARE) { continue; } if ((pg->flags & PG_SPECULATIVE) != 0) { pg->flags &= ~PG_SPECULATIVE; uvm_ra_hit.ev_count++; } } if ((flags & PGO_LOCKED) == 0) { simple_unlock(&uobj->vmobjlock); } } #endif /* defined(READAHEAD_STATS) */ d480 1 a480 2 win = ubc_alloc(&vp->v_uobj, off, &bytelen, UVM_ADV_NORMAL, UBC_WRITE); @ 1.61 log @Pass lwp pointers throughtout the kernel, as required, so that the lwpid can be inserted into ktrace records. The general change has been to replace "struct proc *" with "struct lwp *" in various function prototypes, pass the lwp through and use l_proc to get the process pointer when needed. Bump the kernel rev up to 1.6V @ text @d187 1 a187 1 (caddr_t)&pi, FREAD, curlwp); d197 1 a197 1 result = VOP_GETATTR(vp, &vattr, curproc->p_ucred, curlwp); @ 1.60 log @correct accounting of {exec,file}pages. they are not updated correctly when breaking loan. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.59 2002/09/06 13:24:14 gehenna Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.59 2002/09/06 13:24:14 gehenna Exp $"); d187 1 a187 1 (caddr_t)&pi, FREAD, curproc); d197 1 a197 1 result = VOP_GETATTR(vp, &vattr, curproc->p_ucred, curproc); @ 1.59 log @Merge the gehenna-devsw branch into the trunk. This merge changes the device switch tables from static array to dynamically generated by config(8). - All device switches is defined as a constant structure in device drivers. - The new grammer ``device-major'' is introduced to ``files''. device-major char [block ] [] - All device major numbers must be listed up in port dependent majors. by using this grammer. - Added the new naming convention. The name of the device switch must be _[bc]devsw for auto-generation of device switch tables. - The backward compatibility of loading block/character device switch by LKM framework is broken. This is necessary to convert from block/character device major to device name in runtime and vice versa. - The restriction to assign device major by LKM is completely removed. We don't need to reserve LKM entries for dynamic loading of device switch. - In compile time, device major numbers list is packed into the kernel and the LKM framework will refer it to assign device major number dynamically. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.58 2002/05/17 22:00:50 enami Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.58 2002/05/17 22:00:50 enami Exp $"); a396 5 } if (UVM_OBJ_IS_VTEXT(uobj)) { uvmexp.execpages++; } else { uvmexp.filepages++; @ 1.58 log @Make uvn_findpages to return number of pages found so that caller can easily check if all requested pages are found or not. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.57 2001/12/31 07:00:15 chs Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.57 2001/12/31 07:00:15 chs Exp $"); d128 1 d154 8 a161 4 if (vp->v_type == VBLK && bdevsw[major(vp->v_rdev)].d_type != D_DISK) { simple_unlock(&uobj->vmobjlock); UVMHIST_LOG(maphist,"<- done (VBLK not D_DISK!)", 0,0,0,0); return(NULL); d184 7 a190 2 result = (*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&pi, FREAD, curproc); @ 1.57 log @in uvm_vnp_setsize(), wait for any i/o in progress on pages that we free. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.56 2001/12/09 03:07:44 chs Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.56 2001/12/09 03:07:44 chs Exp $"); d314 1 a314 1 void d322 1 a322 1 int i, count, npages, rv; d324 1 a324 1 count = 0; d329 5 a333 3 if (flags & UFP_DIRTYONLY && rv == 0) { break; } d339 5 a343 3 if (flags & UFP_DIRTYONLY && rv == 0) { break; } d348 1 d407 1 d426 1 @ 1.57.8.1 log @Replace the direct-access to devsw table with calling devsw APIs. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.57 2001/12/31 07:00:15 chs Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.57 2001/12/31 07:00:15 chs Exp $"); a127 1 const struct bdevsw *bdev; d153 4 a156 8 if (vp->v_type == VBLK) { bdev = bdevsw_lookup(vp->v_rdev); if (bdev == NULL || bdev->d_type != D_DISK) { simple_unlock(&uobj->vmobjlock); UVMHIST_LOG(maphist,"<- done (VBLK not D_DISK!)", 0,0,0,0); return(NULL); } d179 2 a180 7 bdev = bdevsw_lookup(vp->v_rdev); if (bdev != NULL) { result = (*bdev->d_ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&pi, FREAD, curproc); } else { result = ENXIO; } @ 1.57.8.2 log @Catch up with -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.57.8.1 2002/05/16 03:45:49 gehenna Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.57.8.1 2002/05/16 03:45:49 gehenna Exp $"); d324 1 a324 1 int d332 1 a332 1 int i, count, found, npages, rv; d334 1 a334 1 count = found = 0; d339 3 a341 5 if (rv == 0) { if (flags & UFP_DIRTYONLY) break; } else found++; d347 3 a349 5 if (rv == 0) { if (flags & UFP_DIRTYONLY) break; } else found++; a353 1 return (found); a411 1 UVMHIST_LOG(ubchist, "wait %p", pg,0,0,0); a429 1 UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0); @ 1.56 log @replace "vnode" and "vtext" with "file" and "exec" in uvmexp field names. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.55 2001/11/10 07:37:01 lukem Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.55 2001/11/10 07:37:01 lukem Exp $"); d464 1 a464 1 (void) uvn_put(uobj, pgend, 0, PGO_FREE); @ 1.55 log @add RCSIDs, and in some cases, slightly cleanup #include order @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.54 2001/09/26 07:23:51 chs Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD$"); d384 1 a384 1 uvmexp.vtextpages++; d386 1 a386 1 uvmexp.vnodepages++; @ 1.54 log @change the names of the arguments to uvn_put() to match their usage. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.53 2001/09/22 22:33:16 sommerfeld Exp $ */ d48 7 a57 4 /* * uvm_vnode.c: the vnode pager. */ @ 1.54.2.1 log @Sync the thorpej-mips-cache branch with -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.55 2001/11/10 07:37:01 lukem Exp $ */ d48 4 a54 7 #include __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.55 2001/11/10 07:37:01 lukem Exp $"); #include "fs_nfs.h" #include "opt_uvmhist.h" #include "opt_ddb.h" @ 1.53 log @VOP_PUTPAGES must release the uobj's lock for us, so ensure it's locked beforehand and unlocked afterwards using LOCK_ASSERT(). @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.52 2001/09/15 20:36:47 chs Exp $ */ d256 1 a256 1 uvn_put(uobj, off, len, flags) d258 2 a259 2 voff_t off; voff_t len; d266 1 a266 1 error = VOP_PUTPAGES(vp, off, len, flags); @ 1.52 log @a whole bunch of changes to improve performance and robustness under load: - remove special treatment of pager_map mappings in pmaps. this is required now, since I've removed the globals that expose the address range. pager_map now uses pmap_kenter_pa() instead of pmap_enter(), so there's no longer any need to special-case it. - eliminate struct uvm_vnode by moving its fields into struct vnode. - rewrite the pageout path. the pager is now responsible for handling the high-level requests instead of only getting control after a bunch of work has already been done on its behalf. this will allow us to UBCify LFS, which needs tighter control over its pages than other filesystems do. writing a page to disk no longer requires making it read-only, which allows us to write wired pages without causing all kinds of havoc. - use a new PG_PAGEOUT flag to indicate that a page should be freed on behalf of the pagedaemon when it's unlocked. this flag is very similar to PG_RELEASED, but unlike PG_RELEASED, PG_PAGEOUT can be cleared if the pageout fails due to eg. an indirect-block buffer being locked. this allows us to remove the "version" field from struct vm_page, and together with shrinking "loan_count" from 32 bits to 16, struct vm_page is now 4 bytes smaller. - no longer use PG_RELEASED for swap-backed pages. if the page is busy because it's being paged out, we can't release the swap slot to be reallocated until that write is complete, but unlike with vnodes we don't keep a count of in-progress writes so there's no good way to know when the write is done. instead, when we need to free a busy swap-backed page, just sleep until we can get it busy ourselves. - implement a fast-path for extending writes which allows us to avoid zeroing new pages. this substantially reduces cpu usage. - encapsulate the data used by the genfs code in a struct genfs_node, which must be the first element of the filesystem-specific vnode data for filesystems which use genfs_{get,put}pages(). - eliminate many of the UVM pagerops, since they aren't needed anymore now that the pager "put" operation is a higher-level operation. - enhance the genfs code to allow NFS to use the genfs_{get,put}pages instead of a modified copy. - clean up struct vnode by removing all the fields that used to be used by the vfs_cluster.c code (which we don't use anymore with UBC). - remove kmem_object and mb_object since they were useless. instead of allocating pages to these objects, we now just allocate pages with no object. such pages are mapped in the kernel until they are freed, so we can use the mapping to find the page to free it. this allows us to remove splvm() protection in several places. The sum of all these changes improves write throughput on my decstation 5000/200 to within 1% of the rate of NetBSD 1.5 and reduces the elapsed time for "make release" of a NetBSD 1.5 source tree on my 128MB pc to 10% less than a 1.5 kernel took. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.51 2001/08/17 05:53:02 chs Exp $ */ d250 1 a250 1 * => object must be locked! we will _unlock_ it before starting I/O. d265 1 d267 1 @ 1.51 log @allow mappings of VBLK vnodes. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.50 2001/05/26 21:27:21 chs Exp $ */ a71 1 #include d77 7 a83 15 static void uvn_cluster __P((struct uvm_object *, voff_t, voff_t *, voff_t *)); static void uvn_detach __P((struct uvm_object *)); static int uvn_findpage __P((struct uvm_object *, voff_t, struct vm_page **, int)); static boolean_t uvn_flush __P((struct uvm_object *, voff_t, voff_t, int)); static int uvn_get __P((struct uvm_object *, voff_t, struct vm_page **, int *, int, vm_prot_t, int, int)); static int uvn_put __P((struct uvm_object *, struct vm_page **, int, boolean_t)); static void uvn_reference __P((struct uvm_object *)); static boolean_t uvn_releasepg __P((struct vm_page *, struct vm_page **)); a93 1 uvn_flush, a95 3 uvn_cluster, uvm_mk_pcluster, uvn_releasepg, d123 1 a123 1 struct uvm_vnode *uvn = &vp->v_uvm; d134 1 a134 1 * first get a lock on the uvn. d136 4 a139 3 simple_lock(&uvn->u_obj.vmobjlock); while (uvn->u_flags & VXLOCK) { uvn->u_flags |= VXWANT; d141 1 a141 1 UVM_UNLOCK_AND_WAIT(uvn, &uvn->u_obj.vmobjlock, FALSE, d143 1 a143 1 simple_lock(&uvn->u_obj.vmobjlock); d151 1 a151 1 simple_unlock(&uvn->u_obj.vmobjlock); d161 1 a161 1 if (uvn->u_size == VSIZENOTSET) { d163 3 a165 2 uvn->u_flags |= VXLOCK; simple_unlock(&uvn->u_obj.vmobjlock); /* drop lock in case we sleep */ d190 1 a190 1 simple_lock(&uvn->u_obj.vmobjlock); d192 4 a195 3 if (uvn->u_flags & VXWANT) wakeup(uvn); uvn->u_flags &= ~(VXLOCK|VXWANT); d198 1 a198 1 simple_unlock(&uvn->u_obj.vmobjlock); /* drop lock */ d202 1 a202 1 uvn->u_size = used_vnode_size; d206 2 a207 3 /* unlock and return */ simple_unlock(&uvn->u_obj.vmobjlock); UVMHIST_LOG(maphist,"<- done, refcnt=%d", uvn->u_obj.uo_refs, d209 1 a209 1 return (&uvn->u_obj); d224 1 a224 2 static void d231 1 d239 2 a240 1 static void a247 549 * uvn_releasepg: handled a released page in a uvn * * => "pg" is a PG_BUSY [caller owns it], PG_RELEASED page that we need * to dispose of. * => caller must handled PG_WANTED case * => called with page's object locked, pageq's unlocked * => returns TRUE if page's object is still alive, FALSE if we * killed the page's object. if we return TRUE, then we * return with the object locked. * => if (nextpgp != NULL) => we return the next page on the queue, and return * with the page queues locked [for pagedaemon] * => if (nextpgp == NULL) => we return with page queues unlocked [normal case] * => we kill the uvn if it is not referenced and we are suppose to * kill it ("relkill"). */ boolean_t uvn_releasepg(pg, nextpgp) struct vm_page *pg; struct vm_page **nextpgp; /* OUT */ { KASSERT(pg->flags & PG_RELEASED); /* * dispose of the page [caller handles PG_WANTED] */ pmap_page_protect(pg, VM_PROT_NONE); uvm_lock_pageq(); if (nextpgp) *nextpgp = TAILQ_NEXT(pg, pageq); uvm_pagefree(pg); if (!nextpgp) uvm_unlock_pageq(); return (TRUE); } /* * issues to consider: * there are two tailq's in the uvm. structure... one for pending async * i/o and one for "done" async i/o. to do an async i/o one puts * a buf on the "pending" list (protected by splbio()), starts the * i/o and returns 0. when the i/o is done, we expect * some sort of "i/o done" function to be called (at splbio(), interrupt * time). this function should remove the buf from the pending list * and place it on the "done" list and wakeup the daemon. the daemon * will run at normal spl() and will remove all items from the "done" * list and call the iodone hook for each done request (see uvm_pager.c). * * => return KERN_SUCCESS (aio finished, free it). otherwise requeue for * later collection. * => called with pageq's locked by the daemon. * * general outline: * - "try" to lock object. if fail, just return (will try again later) * - drop "u_nio" (this req is done!) * - if (object->iosync && u_naio == 0) { wakeup &uvn->u_naio } * - get "page" structures (atop?). * - handle "wanted" pages * - handle "released" pages [using pgo_releasepg] * >>> pgo_releasepg may kill the object * dont forget to look at "object" wanted flag in all cases. */ /* * uvn_flush: flush pages out of a uvm object. * * => "stop == 0" means flush all pages at or after "start". * => object should be locked by caller. we may _unlock_ the object * if (and only if) we need to clean a page (PGO_CLEANIT), or * if PGO_SYNCIO is set and there are pages busy. * we return with the object locked. * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O). * thus, a caller might want to unlock higher level resources * (e.g. vm_map) before calling flush. * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, then we will neither * unlock the object nor block. * => if PGO_ALLPAGES is set, then all pages in the object are valid targets * for flushing. * => NOTE: we rely on the fact that the object's memq is a TAILQ and * that new pages are inserted on the tail end of the list. thus, * we can make a complete pass through the object in one go by starting * at the head and working towards the tail (new pages are put in * front of us). * => NOTE: we are allowed to lock the page queues, so the caller * must not be holding the lock on them [e.g. pagedaemon had * better not call us with the queues locked] * => we return TRUE unless we encountered some sort of I/O error * * comment on "cleaning" object and PG_BUSY pages: * this routine is holding the lock on the object. the only time * that it can run into a PG_BUSY page that it does not own is if * some other process has started I/O on the page (e.g. either * a pagein, or a pageout). if the PG_BUSY page is being paged * in, then it can not be dirty (!PG_CLEAN) because no one has * had a chance to modify it yet. if the PG_BUSY page is being * paged out then it means that someone else has already started * cleaning the page for us (how nice!). in this case, if we * have syncio specified, then after we make our pass through the * object we need to wait for the other PG_BUSY pages to clear * off (i.e. we need to do an iosync). also note that once a * page is PG_BUSY it must stay in its object until it is un-busyed. * * note on page traversal: * we can traverse the pages in an object either by going down the * linked list in "uobj->memq", or we can go over the address range * by page doing hash table lookups for each address. depending * on how many pages are in the object it may be cheaper to do one * or the other. we set "by_list" to true if we are using memq. * if the cost of a hash lookup was equal to the cost of the list * traversal we could compare the number of pages in the start->stop * range to the total number of pages in the object. however, it * seems that a hash table lookup is more expensive than the linked * list traversal, so we multiply the number of pages in the * start->stop range by a penalty which we define below. */ #define UVN_HASH_PENALTY 4 /* XXX: a guess */ static boolean_t uvn_flush(uobj, start, stop, flags) struct uvm_object *uobj; voff_t start, stop; int flags; { struct uvm_vnode *uvn = (struct uvm_vnode *)uobj; struct vnode *vp = (struct vnode *)uobj; struct vm_page *pp, *ppnext, *ptmp; struct vm_page *pps[256], **ppsp; int s; int npages, result, lcv; boolean_t retval, need_iosync, by_list, needs_clean, all, wasclean; boolean_t async = (flags & PGO_SYNCIO) == 0; voff_t curoff; u_short pp_version; UVMHIST_FUNC("uvn_flush"); UVMHIST_CALLED(maphist); UVMHIST_LOG(maphist, "uobj %p start 0x%x stop 0x%x flags 0x%x", uobj, start, stop, flags); KASSERT(flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)); if (uobj->uo_npages == 0) { if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL && (vp->v_flag & VONWORKLST)) { vp->v_flag &= ~VONWORKLST; LIST_REMOVE(vp, v_synclist); } return TRUE; } #ifdef DEBUG if (uvn->u_size == VSIZENOTSET) { printf("uvn_flush: size not set vp %p\n", uvn); vprint("uvn_flush VSIZENOTSET", vp); flags |= PGO_ALLPAGES; } #endif /* * get init vals and determine how we are going to traverse object */ if (stop == 0) { stop = trunc_page(LLONG_MAX); } curoff = 0; need_iosync = FALSE; retval = TRUE; wasclean = TRUE; if (flags & PGO_ALLPAGES) { all = TRUE; by_list = TRUE; } else { start = trunc_page(start); stop = round_page(stop); all = FALSE; by_list = (uobj->uo_npages <= ((stop - start) >> PAGE_SHIFT) * UVN_HASH_PENALTY); } UVMHIST_LOG(maphist, " flush start=0x%x, stop=0x%x, by_list=%d, flags=0x%x", start, stop, by_list, flags); /* * PG_CLEANCHK: this bit is used by the pgo_mk_pcluster function as * a _hint_ as to how up to date the PG_CLEAN bit is. if the hint * is wrong it will only prevent us from clustering... it won't break * anything. we clear all PG_CLEANCHK bits here, and pgo_mk_pcluster * will set them as it syncs PG_CLEAN. This is only an issue if we * are looking at non-inactive pages (because inactive page's PG_CLEAN * bit is always up to date since there are no mappings). * [borrowed PG_CLEANCHK idea from FreeBSD VM] */ if ((flags & PGO_CLEANIT) != 0 && uobj->pgops->pgo_mk_pcluster != NULL) { if (by_list) { TAILQ_FOREACH(pp, &uobj->memq, listq) { if (!all && (pp->offset < start || pp->offset >= stop)) continue; pp->flags &= ~PG_CLEANCHK; } } else { /* by hash */ for (curoff = start ; curoff < stop; curoff += PAGE_SIZE) { pp = uvm_pagelookup(uobj, curoff); if (pp) pp->flags &= ~PG_CLEANCHK; } } } /* * now do it. note: we must update ppnext in body of loop or we * will get stuck. we need to use ppnext because we may free "pp" * before doing the next loop. */ if (by_list) { pp = TAILQ_FIRST(&uobj->memq); } else { curoff = start; pp = uvm_pagelookup(uobj, curoff); } ppnext = NULL; ppsp = NULL; uvm_lock_pageq(); /* locked: both page queues and uobj */ for ( ; (by_list && pp != NULL) || (!by_list && curoff < stop) ; pp = ppnext) { if (by_list) { if (!all && (pp->offset < start || pp->offset >= stop)) { ppnext = TAILQ_NEXT(pp, listq); continue; } } else { curoff += PAGE_SIZE; if (pp == NULL) { if (curoff < stop) ppnext = uvm_pagelookup(uobj, curoff); continue; } } /* * handle case where we do not need to clean page (either * because we are not clean or because page is not dirty or * is busy): * * NOTE: we are allowed to deactivate a non-wired active * PG_BUSY page, but once a PG_BUSY page is on the inactive * queue it must stay put until it is !PG_BUSY (so as not to * confuse pagedaemon). */ if ((flags & PGO_CLEANIT) == 0 || (pp->flags & PG_BUSY) != 0) { needs_clean = FALSE; if (!async) need_iosync = TRUE; } else { /* * freeing: nuke all mappings so we can sync * PG_CLEAN bit with no race */ if ((pp->flags & PG_CLEAN) != 0 && (flags & PGO_FREE) != 0 && /* XXX ACTIVE|INACTIVE test unnecessary? */ (pp->pqflags & (PQ_ACTIVE|PQ_INACTIVE)) != 0) pmap_page_protect(pp, VM_PROT_NONE); if ((pp->flags & PG_CLEAN) != 0 && pmap_is_modified(pp)) pp->flags &= ~(PG_CLEAN); pp->flags |= PG_CLEANCHK; needs_clean = ((pp->flags & PG_CLEAN) == 0); } /* * if we don't need a clean... load ppnext and dispose of pp */ if (!needs_clean) { if (by_list) ppnext = TAILQ_NEXT(pp, listq); else { if (curoff < stop) ppnext = uvm_pagelookup(uobj, curoff); } if (flags & PGO_DEACTIVATE) { if ((pp->pqflags & PQ_INACTIVE) == 0 && (pp->flags & PG_BUSY) == 0 && pp->wire_count == 0) { pmap_clear_reference(pp); uvm_pagedeactivate(pp); } } else if (flags & PGO_FREE) { if (pp->flags & PG_BUSY) { pp->flags |= PG_RELEASED; } else { pmap_page_protect(pp, VM_PROT_NONE); uvm_pagefree(pp); } } /* ppnext is valid so we can continue... */ continue; } /* * pp points to a page in the locked object that we are * working on. if it is !PG_CLEAN,!PG_BUSY and we asked * for cleaning (PGO_CLEANIT). we clean it now. * * let uvm_pager_put attempted a clustered page out. * note: locked: uobj and page queues. */ wasclean = FALSE; pp->flags |= PG_BUSY; /* we 'own' page now */ UVM_PAGE_OWN(pp, "uvn_flush"); pmap_page_protect(pp, VM_PROT_READ); pp_version = pp->version; ppsp = pps; npages = sizeof(pps) / sizeof(struct vm_page *); /* locked: page queues, uobj */ result = uvm_pager_put(uobj, pp, &ppsp, &npages, flags | PGO_DOACTCLUST, start, stop); /* unlocked: page queues, uobj */ /* * at this point nothing is locked. if we did an async I/O * it is remotely possible for the async i/o to complete and * the page "pp" be freed or what not before we get a chance * to relock the object. in order to detect this, we have * saved the version number of the page in "pp_version". */ /* relock! */ simple_lock(&uobj->vmobjlock); uvm_lock_pageq(); /* * the cleaning operation is now done. finish up. note that * on error uvm_pager_put drops the cluster for us. * on success uvm_pager_put returns the cluster to us in * ppsp/npages. */ /* * for pending async i/o if we are not deactivating/freeing * we can move on to the next page. */ if (result == 0 && async && (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) { /* * no per-page ops: refresh ppnext and continue */ if (by_list) { if (pp->version == pp_version) ppnext = TAILQ_NEXT(pp, listq); else ppnext = TAILQ_FIRST(&uobj->memq); } else { if (curoff < stop) ppnext = uvm_pagelookup(uobj, curoff); } continue; } /* * need to look at each page of the I/O operation. we defer * processing "pp" until the last trip through this "for" loop * so that we can load "ppnext" for the main loop after we * play with the cluster pages [thus the "npages + 1" in the * loop below]. */ for (lcv = 0 ; lcv < npages + 1 ; lcv++) { /* * handle ppnext for outside loop, and saving pp * until the end. */ if (lcv < npages) { if (ppsp[lcv] == pp) continue; /* skip pp until the end */ ptmp = ppsp[lcv]; } else { ptmp = pp; /* set up next page for outer loop */ if (by_list) { if (pp->version == pp_version) ppnext = TAILQ_NEXT(pp, listq); else ppnext = TAILQ_FIRST( &uobj->memq); } else { if (curoff < stop) ppnext = uvm_pagelookup(uobj, curoff); } } /* * verify the page wasn't moved while obj was * unlocked */ if (result == 0 && async && ptmp->uobject != uobj) continue; /* * unbusy the page if I/O is done. note that for * async I/O it is possible that the I/O op * finished before we relocked the object (in * which case the page is no longer busy). */ if (result != 0 || !async) { if (ptmp->flags & PG_WANTED) { /* still holding object lock */ wakeup(ptmp); } ptmp->flags &= ~(PG_WANTED|PG_BUSY); UVM_PAGE_OWN(ptmp, NULL); if (ptmp->flags & PG_RELEASED) { uvm_unlock_pageq(); if (!uvn_releasepg(ptmp, NULL)) { UVMHIST_LOG(maphist, "released %p", ptmp, 0,0,0); return (TRUE); } uvm_lock_pageq(); continue; } else { if ((flags & PGO_WEAK) == 0 && !(result == EIO && curproc == uvm.pagedaemon_proc)) { ptmp->flags |= (PG_CLEAN|PG_CLEANCHK); if ((flags & PGO_FREE) == 0) { pmap_clear_modify(ptmp); } } } } /* * dispose of page */ if (flags & PGO_DEACTIVATE) { if ((pp->pqflags & PQ_INACTIVE) == 0 && (pp->flags & PG_BUSY) == 0 && pp->wire_count == 0) { pmap_clear_reference(ptmp); uvm_pagedeactivate(ptmp); } } else if (flags & PGO_FREE) { if (result == 0 && async) { if ((ptmp->flags & PG_BUSY) != 0) /* signal for i/o done */ ptmp->flags |= PG_RELEASED; } else { if (result != 0) { printf("uvn_flush: obj=%p, " "offset=0x%llx. error %d\n", pp->uobject, (long long)pp->offset, result); printf("uvn_flush: WARNING: " "changes to page may be " "lost!\n"); retval = FALSE; } pmap_page_protect(ptmp, VM_PROT_NONE); uvm_pagefree(ptmp); } } } /* end of "lcv" for loop */ } /* end of "pp" for loop */ uvm_unlock_pageq(); if ((flags & PGO_CLEANIT) && all && wasclean && LIST_FIRST(&vp->v_dirtyblkhd) == NULL && (vp->v_flag & VONWORKLST)) { vp->v_flag &= ~VONWORKLST; LIST_REMOVE(vp, v_synclist); } if (need_iosync) { UVMHIST_LOG(maphist," <>",0,0,0,0); /* * XXX this doesn't use the new two-flag scheme, * but to use that, all i/o initiators will have to change. */ s = splbio(); while (vp->v_numoutput != 0) { UVMHIST_LOG(ubchist, "waiting for vp %p num %d", vp, vp->v_numoutput,0,0); vp->v_flag |= VBWAIT; UVM_UNLOCK_AND_WAIT(&vp->v_numoutput, &uvn->u_obj.vmobjlock, FALSE, "uvn_flush",0); simple_lock(&uvn->u_obj.vmobjlock); } splx(s); } /* return, with object locked! */ UVMHIST_LOG(maphist,"<- done (retval=0x%x)",retval,0,0,0); return(retval); } /* * uvn_cluster * * we are about to do I/O in an object at offset. this function is called * to establish a range of offsets around "offset" in which we can cluster * I/O. * * - currently doesn't matter if obj locked or not. */ static void uvn_cluster(uobj, offset, loffset, hoffset) struct uvm_object *uobj; voff_t offset; voff_t *loffset, *hoffset; /* OUT */ { struct uvm_vnode *uvn = (struct uvm_vnode *)uobj; *loffset = offset; *hoffset = MIN(offset + MAXBSIZE, round_page(uvn->u_size)); } /* d255 2 a256 2 static int uvn_put(uobj, pps, npages, flags) d258 3 a260 2 struct vm_page **pps; int npages, flags; d265 1 a265 1 error = VOP_PUTPAGES(vp, pps, npages, flags, NULL); d281 1 a281 1 static int d306 1 a306 1 * => returned page will be BUSY. d310 1 a310 1 uvn_findpages(uobj, offset, npagesp, pps, flags) d314 1 a314 1 struct vm_page **pps; d317 1 a317 1 int i, rv, npages; d319 1 a319 1 rv = 0; d321 16 a336 2 for (i = 0; i < npages; i++, offset += PAGE_SIZE) { rv += uvn_findpage(uobj, offset, &pps[i], flags); d338 1 a338 1 *npagesp = rv; d341 1 a341 1 static int d349 1 d361 1 a361 1 /* nope? allocate one now */ d383 1 a383 1 UVMHIST_LOG(ubchist, "alloced",0,0,0,0); d391 1 a391 1 if ((pg->flags & (PG_BUSY|PG_RELEASED)) != 0) { d409 10 d422 1 a422 1 UVMHIST_LOG(ubchist, "found",0,0,0,0); d430 1 a430 1 * uvm_vnp_setsize: grow or shrink a vnode uvn a437 7 * * called from: * => truncate fns (ext2fs_truncate, ffs_truncate, detrunc[msdos]) * => "write" fns (ext2fs_write, WRITE [ufs/ufs], msdosfs_write, nfs_write) * => ffs_balloc [XXX: why? doesn't WRITE handle?] * => NFS: nfs_loadattrcache, nfs_getattrcache, nfs_setattr * => union fs: union_newsize d445 1 a445 1 struct uvm_vnode *uvn = &vp->v_uvm; d449 3 a451 3 simple_lock(&uvn->u_obj.vmobjlock); UVMHIST_LOG(ubchist, "old 0x%x new 0x%x", uvn->u_size, newsize, 0,0); d458 4 a461 2 if (uvn->u_size > pgend && uvn->u_size != VSIZENOTSET) { (void) uvn_flush(&uvn->u_obj, pgend, 0, PGO_FREE); d463 1 a463 2 uvn->u_size = newsize; simple_unlock(&uvn->u_obj.vmobjlock); d485 1 a485 1 win = ubc_alloc(&vp->v_uvm.u_obj, off, &bytelen, UBC_WRITE); @ 1.51.2.1 log @Commit my "devvp" changes to the thorpej-devvp branch. This replaces the use of dev_t in most places with a struct vnode *. This will form the basic infrastructure for real cloning device support (besides being architecurally cleaner -- it'll be good to get away from using numbers to represent objects). @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.51 2001/08/17 05:53:02 chs Exp $ */ d187 1 a187 1 result = (*bdevsw[major(vp->v_rdev)].d_ioctl)(vp, @ 1.51.2.2 log @Catch up with -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.51.2.1 2001/09/07 04:45:46 thorpej Exp $ */ d72 1 d78 15 a92 7 void uvn_detach __P((struct uvm_object *)); int uvn_get __P((struct uvm_object *, voff_t, struct vm_page **, int *, int, vm_prot_t, int, int)); int uvn_put __P((struct uvm_object *, voff_t, voff_t, int)); void uvn_reference __P((struct uvm_object *)); int uvn_findpage __P((struct uvm_object *, voff_t, struct vm_page **, int)); d103 1 d106 3 d136 1 a136 1 struct uvm_object *uobj = &vp->v_uobj; d147 1 a147 1 * first get a lock on the uobj. d149 3 a151 4 simple_lock(&uobj->vmobjlock); while (vp->v_flag & VXLOCK) { vp->v_flag |= VXWANT; d153 1 a153 1 UVM_UNLOCK_AND_WAIT(uobj, &uobj->vmobjlock, FALSE, d155 1 a155 1 simple_lock(&uobj->vmobjlock); d163 1 a163 1 simple_unlock(&uobj->vmobjlock); d173 1 a173 1 if (vp->v_size == VSIZENOTSET) { d175 2 a176 3 vp->v_flag |= VXLOCK; simple_unlock(&uobj->vmobjlock); /* drop lock in case we sleep */ d201 1 a201 1 simple_lock(&uobj->vmobjlock); d203 3 a205 4 if (vp->v_flag & VXWANT) { wakeup(vp); } vp->v_flag &= ~(VXLOCK|VXWANT); d208 1 a208 1 simple_unlock(&uobj->vmobjlock); d212 1 a212 1 vp->v_size = used_vnode_size; d216 3 a218 2 simple_unlock(&uobj->vmobjlock); UVMHIST_LOG(maphist,"<- done, refcnt=%d", vp->v_usecount, d220 1 a220 1 return uobj; d235 2 a236 1 void a242 1 d250 1 a250 2 void d258 549 d809 1 a809 1 * => object must be locked on entry! VOP_PUTPAGES must unlock it. d814 2 a815 2 int uvn_put(uobj, offlo, offhi, flags) d817 2 a818 3 voff_t offlo; voff_t offhi; int flags; d823 1 a823 3 LOCK_ASSERT(simple_lock_held(&vp->v_interlock)); error = VOP_PUTPAGES(vp, offlo, offhi, flags); LOCK_ASSERT(!simple_lock_held(&vp->v_interlock)); d839 1 a839 1 int d864 1 a864 1 * => returned pages will be BUSY. d868 1 a868 1 uvn_findpages(uobj, offset, npagesp, pgs, flags) d872 1 a872 1 struct vm_page **pgs; d875 1 a875 1 int i, count, npages, rv; d877 1 a877 1 count = 0; d879 2 a880 16 if (flags & UFP_BACKWARD) { for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) { rv = uvn_findpage(uobj, offset, &pgs[i], flags); if (flags & UFP_DIRTYONLY && rv == 0) { break; } count++; } } else { for (i = 0; i < npages; i++, offset += PAGE_SIZE) { rv = uvn_findpage(uobj, offset, &pgs[i], flags); if (flags & UFP_DIRTYONLY && rv == 0) { break; } count++; } d882 1 a882 1 *npagesp = count; d885 1 a885 1 int a892 1 boolean_t dirty; d904 1 a904 1 /* nope? allocate one now */ d926 1 a926 1 UVMHIST_LOG(ubchist, "alloced %p", pg,0,0,0); d934 1 a934 1 if ((pg->flags & PG_BUSY) != 0) { a951 10 /* stop on clean pages if requested */ if (flags & UFP_DIRTYONLY) { dirty = pmap_clear_modify(pg) || (pg->flags & PG_CLEAN) == 0; pg->flags |= PG_CLEAN; if (!dirty) { return 0; } } d955 1 a955 1 UVMHIST_LOG(ubchist, "found %p", pg,0,0,0); d963 1 a963 1 * uvm_vnp_setsize: grow or shrink a vnode uobj d971 7 d985 1 a985 1 struct uvm_object *uobj = &vp->v_uobj; d989 3 a991 3 simple_lock(&uobj->vmobjlock); UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x", vp, vp->v_size, newsize, 0); d998 2 a999 4 if (vp->v_size > pgend && vp->v_size != VSIZENOTSET) { (void) uvn_put(uobj, pgend, 0, PGO_FREE); } else { simple_unlock(&uobj->vmobjlock); d1001 2 a1002 1 vp->v_size = newsize; d1024 1 a1024 1 win = ubc_alloc(&vp->v_uobj, off, &bytelen, UBC_WRITE); @ 1.50 log @replace vm_page_t with struct vm_page *. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.49 2001/05/25 04:06:18 chs Exp $ */ d167 1 a167 6 #ifdef DIAGNOSTIC if (vp->v_type != VREG) { panic("uvn_attach: vp %p not VREG", vp); } #endif @ 1.50.2.1 log @Merge Aug 24 -current into the kqueue branch. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.51 2001/08/17 05:53:02 chs Exp $ */ d167 6 a172 1 KASSERT(vp->v_type == VREG || vp->v_type == VBLK); @ 1.50.2.2 log @Sync kqueue branch with -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.50.2.1 2001/08/25 06:17:23 thorpej Exp $ */ d48 4 a55 7 #include __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.57 2001/12/31 07:00:15 chs Exp $"); #include "fs_nfs.h" #include "opt_uvmhist.h" #include "opt_ddb.h" d72 1 d78 15 a92 7 void uvn_detach __P((struct uvm_object *)); int uvn_get __P((struct uvm_object *, voff_t, struct vm_page **, int *, int, vm_prot_t, int, int)); int uvn_put __P((struct uvm_object *, voff_t, voff_t, int)); void uvn_reference __P((struct uvm_object *)); int uvn_findpage __P((struct uvm_object *, voff_t, struct vm_page **, int)); d103 1 d106 3 d136 1 a136 1 struct uvm_object *uobj = &vp->v_uobj; d147 1 a147 1 * first get a lock on the uobj. d149 3 a151 4 simple_lock(&uobj->vmobjlock); while (vp->v_flag & VXLOCK) { vp->v_flag |= VXWANT; d153 1 a153 1 UVM_UNLOCK_AND_WAIT(uobj, &uobj->vmobjlock, FALSE, d155 1 a155 1 simple_lock(&uobj->vmobjlock); d163 1 a163 1 simple_unlock(&uobj->vmobjlock); d173 1 a173 2 if (vp->v_size == VSIZENOTSET) { d175 2 a176 2 vp->v_flag |= VXLOCK; simple_unlock(&uobj->vmobjlock); /* drop lock in case we sleep */ d201 1 a201 1 simple_lock(&uobj->vmobjlock); d203 3 a205 4 if (vp->v_flag & VXWANT) { wakeup(vp); } vp->v_flag &= ~(VXLOCK|VXWANT); d208 1 a208 1 simple_unlock(&uobj->vmobjlock); d212 1 a212 1 vp->v_size = used_vnode_size; d216 3 a218 2 simple_unlock(&uobj->vmobjlock); UVMHIST_LOG(maphist,"<- done, refcnt=%d", vp->v_usecount, d220 1 a220 1 return uobj; d235 2 a236 1 void a242 1 d250 1 a250 2 void d258 549 d809 1 a809 1 * => object must be locked on entry! VOP_PUTPAGES must unlock it. d814 2 a815 2 int uvn_put(uobj, offlo, offhi, flags) d817 2 a818 3 voff_t offlo; voff_t offhi; int flags; d823 1 a823 3 LOCK_ASSERT(simple_lock_held(&vp->v_interlock)); error = VOP_PUTPAGES(vp, offlo, offhi, flags); LOCK_ASSERT(!simple_lock_held(&vp->v_interlock)); d839 1 a839 1 int d864 1 a864 1 * => returned pages will be BUSY. d868 1 a868 1 uvn_findpages(uobj, offset, npagesp, pgs, flags) d872 1 a872 1 struct vm_page **pgs; d875 1 a875 1 int i, count, npages, rv; d877 1 a877 1 count = 0; d879 2 a880 16 if (flags & UFP_BACKWARD) { for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) { rv = uvn_findpage(uobj, offset, &pgs[i], flags); if (flags & UFP_DIRTYONLY && rv == 0) { break; } count++; } } else { for (i = 0; i < npages; i++, offset += PAGE_SIZE) { rv = uvn_findpage(uobj, offset, &pgs[i], flags); if (flags & UFP_DIRTYONLY && rv == 0) { break; } count++; } d882 1 a882 1 *npagesp = count; d885 1 a885 1 int a892 1 boolean_t dirty; d904 1 a904 1 /* nope? allocate one now */ d922 1 a922 1 uvmexp.execpages++; d924 1 a924 1 uvmexp.filepages++; d926 1 a926 1 UVMHIST_LOG(ubchist, "alloced %p", pg,0,0,0); d934 1 a934 1 if ((pg->flags & PG_BUSY) != 0) { a951 10 /* stop on clean pages if requested */ if (flags & UFP_DIRTYONLY) { dirty = pmap_clear_modify(pg) || (pg->flags & PG_CLEAN) == 0; pg->flags |= PG_CLEAN; if (!dirty) { return 0; } } d955 1 a955 1 UVMHIST_LOG(ubchist, "found %p", pg,0,0,0); d963 1 a963 1 * uvm_vnp_setsize: grow or shrink a vnode uobj d971 7 d985 1 a985 1 struct uvm_object *uobj = &vp->v_uobj; d989 3 a991 3 simple_lock(&uobj->vmobjlock); UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x", vp, vp->v_size, newsize, 0); d998 2 a999 4 if (vp->v_size > pgend && vp->v_size != VSIZENOTSET) { (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO); } else { simple_unlock(&uobj->vmobjlock); d1001 2 a1002 1 vp->v_size = newsize; d1024 1 a1024 1 win = ubc_alloc(&vp->v_uobj, off, &bytelen, UBC_WRITE); @ 1.50.2.3 log @catch up with -current on kqueue branch @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.50.2.2 2002/01/10 20:05:49 thorpej Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.50.2.2 2002/01/10 20:05:49 thorpej Exp $"); d314 1 a314 1 int d322 1 a322 1 int i, count, found, npages, rv; d324 1 a324 1 count = found = 0; d329 3 a331 5 if (rv == 0) { if (flags & UFP_DIRTYONLY) break; } else found++; d337 3 a339 5 if (rv == 0) { if (flags & UFP_DIRTYONLY) break; } else found++; a343 1 return (found); a401 1 UVMHIST_LOG(ubchist, "wait %p", pg,0,0,0); a419 1 UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0); @ 1.50.2.4 log @sync kqueue with -current; this includes merge of gehenna-devsw branch, merge of i386 MP branch, and part of autoconf rototil work @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.50.2.3 2002/06/23 17:52:20 jdolecek Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.50.2.3 2002/06/23 17:52:20 jdolecek Exp $"); a127 1 const struct bdevsw *bdev; d153 4 a156 8 if (vp->v_type == VBLK) { bdev = bdevsw_lookup(vp->v_rdev); if (bdev == NULL || bdev->d_type != D_DISK) { simple_unlock(&uobj->vmobjlock); UVMHIST_LOG(maphist,"<- done (VBLK not D_DISK!)", 0,0,0,0); return(NULL); } d179 2 a180 7 bdev = bdevsw_lookup(vp->v_rdev); if (bdev != NULL) { result = (*bdev->d_ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&pi, FREAD, curproc); } else { result = ENXIO; } @ 1.49 log @remove trailing whitespace. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.48 2001/03/10 22:46:51 chs Exp $ */ d85 5 a89 4 static int uvn_get __P((struct uvm_object *, voff_t, vm_page_t *, int *, int, vm_prot_t, int, int)); static int uvn_put __P((struct uvm_object *, vm_page_t *, int, boolean_t)); @ 1.48 log @eliminate the VM_PAGER_* error codes in favor of the traditional E* codes. the mapping is: VM_PAGER_OK 0 VM_PAGER_BAD VM_PAGER_FAIL VM_PAGER_PEND 0 (see below) VM_PAGER_ERROR EIO VM_PAGER_AGAIN EAGAIN VM_PAGER_UNLOCK EBUSY VM_PAGER_REFAULT ERESTART for async i/o requests, it used to be possible for the request to be convert to sync, and the pager would return VM_PAGER_OK or VM_PAGER_PEND to indicate whether the caller should perform post-i/o cleanup. this is no longer allowed; pagers must now return 0 to indicate that the async i/o was successfully started, and the caller never needs to worry about doing the post-i/o cleanup. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.47 2001/03/09 01:02:13 chs Exp $ */ d6 1 a6 1 * The Regents of the University of California. d26 1 a26 1 * Washington University, the University of California, Berkeley and d232 1 a232 1 * count must already be at least one (the passed in reference) so d235 1 a235 1 * => caller must call with object unlocked. d284 1 a284 1 d360 1 a360 1 * cleaning the page for us (how nice!). in this case, if we d362 1 a362 1 * object we need to wait for the other PG_BUSY pages to clear d370 1 a370 1 * on how many pages are in the object it may be cheaper to do one d376 1 a376 1 * list traversal, so we multiply the number of pages in the d438 1 a438 1 by_list = (uobj->uo_npages <= d495 1 a495 1 for ( ; (by_list && pp != NULL) || d516 1 a516 1 * d533 1 a533 1 if ((pp->flags & PG_CLEAN) != 0 && d594 1 a594 1 result = uvm_pager_put(uobj, pp, &ppsp, &npages, d600 2 a601 2 * it is remotely possible for the async i/o to complete and * the page "pp" be freed or what not before we get a chance d641 2 a642 2 * need to look at each page of the I/O operation. we defer * processing "pp" until the last trip through this "for" loop d644 1 a644 1 * play with the cluster pages [thus the "npages + 1" in the d718 1 a718 1 d776 1 a776 1 &uvn->u_obj.vmobjlock, d842 1 a842 1 d949 1 a949 1 d972 1 a972 1 * => we assume that the caller has a reference of some sort to the @ 1.47 log @add UBC memory-usage balancing. we track the number of pages in use for each of the basic types (anonymous data, executable image, cached files) and prevent the pagedaemon from reusing a given page if that would reduce the count of that type of page below a sysctl-setable minimum threshold. the thresholds are controlled via three new sysctl tunables: vm.anonmin, vm.vnodemin, and vm.vtextmin. these tunables are the percentages of pageable memory reserved for each usage, and we do not allow the sum of the minimums to be more than 95% so that there's always some memory that can be reused. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.46 2001/02/22 01:02:09 enami Exp $ */ a252 2 * => this starts the detach process, but doesn't have to finish it * (async i/o could still be pending). d304 1 a304 1 * i/o and returns VM_PAGER_PEND. when the i/o is done, we expect d395 1 d525 1 a525 1 if (flags & PGO_SYNCIO) a589 1 ReTry: d611 4 a614 28 * VM_PAGER_AGAIN: given the structure of this pager, this * can only happen when we are doing async I/O and can't * map the pages into kernel memory (pager_map) due to lack * of vm space. if this happens we drop back to sync I/O. */ if (result == VM_PAGER_AGAIN) { /* * it is unlikely, but page could have been released * while we had the object lock dropped. we ignore * this now and retry the I/O. we will detect and * handle the released page after the syncio I/O * completes. */ #ifdef DIAGNOSTIC if (flags & PGO_SYNCIO) panic("uvn_flush: PGO_SYNCIO return 'try again' error (impossible)"); #endif flags |= PGO_SYNCIO; goto ReTry; } /* * the cleaning operation is now done. finish up. note that * on error (!OK, !PEND) uvm_pager_put drops the cluster for us. * if success (OK, PEND) then uvm_pager_put returns the cluster * to us in ppsp/npages. d622 1 a622 1 if (result == VM_PAGER_PEND && d679 1 a679 1 if (result == VM_PAGER_PEND && ptmp->uobject != uobj) d684 1 a684 1 * pending I/O it is possible that the I/O op d689 1 a689 1 if (result != VM_PAGER_PEND) { d708 1 a708 1 !(result == VM_PAGER_ERROR && d731 1 a731 1 if (result == VM_PAGER_PEND) { d736 1 a736 1 if (result != VM_PAGER_OK) { d828 1 a828 1 return uvm_errno2vmerror(error); d860 1 a860 1 return uvm_errno2vmerror(error); @ 1.46 log @When shrinking file size, don't dispose of a page still in use. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.45 2001/02/18 19:40:25 chs Exp $ */ d833 1 a833 1 *hoffset = min(offset + MAXBSIZE, round_page(uvn->u_size)); d940 1 a940 7 if (uvmexp.vnodepages > (uvmexp.active + uvmexp.inactive + uvmexp.wired + uvmexp.free) * 7 / 8) { pg = NULL; } else { pg = uvm_pagealloc(uobj, offset, NULL, 0); } d951 5 a955 1 uvmexp.vnodepages++; @ 1.46.2.1 log @Initial commit of scheduler activations and lightweight process support. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.46 2001/02/22 01:02:09 enami Exp $ */ a57 1 #include d192 1 a192 1 DIOCGPART, (caddr_t)&pi, FREAD, curproc->l_proc); d199 1 a199 2 result = VOP_GETATTR(vp, &vattr, curproc->l_proc->p_ucred, curproc->l_proc); @ 1.46.2.2 log @Catch up with -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.48 2001/03/10 22:46:51 chs Exp $ */ d255 2 d308 1 a308 1 * i/o and returns 0. when the i/o is done, we expect a398 1 boolean_t async = (flags & PGO_SYNCIO) == 0; d528 1 a528 1 if (!async) d593 1 d615 28 a642 4 * the cleaning operation is now done. finish up. note that * on error uvm_pager_put drops the cluster for us. * on success uvm_pager_put returns the cluster to us in * ppsp/npages. d650 1 a650 1 if (result == 0 && async && d707 1 a707 1 if (result == 0 && async && ptmp->uobject != uobj) d712 1 a712 1 * async I/O it is possible that the I/O op d717 1 a717 1 if (result != 0 || !async) { d736 1 a736 1 !(result == EIO && d759 1 a759 1 if (result == 0 && async) { d764 1 a764 1 if (result != 0) { d835 1 a835 1 *hoffset = MIN(offset + MAXBSIZE, round_page(uvn->u_size)); d856 1 a856 1 return error; d888 1 a888 1 return error; d942 7 a948 1 pg = uvm_pagealloc(uobj, offset, NULL, 0); d959 1 a959 5 if (UVM_OBJ_IS_VTEXT(uobj)) { uvmexp.vtextpages++; } else { uvmexp.vnodepages++; } @ 1.46.2.3 log @Catch up to -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.46.2.2 2001/04/09 01:59:24 nathanw Exp $ */ d6 1 a6 1 * The Regents of the University of California. d26 1 a26 1 * Washington University, the University of California, Berkeley and d86 4 a89 5 static int uvn_get __P((struct uvm_object *, voff_t, struct vm_page **, int *, int, vm_prot_t, int, int)); static int uvn_put __P((struct uvm_object *, struct vm_page **, int, boolean_t)); d234 1 a234 1 * count must already be at least one (the passed in reference) so d237 1 a237 1 * => caller must call with object unlocked. d286 1 a286 1 d362 1 a362 1 * cleaning the page for us (how nice!). in this case, if we d364 1 a364 1 * object we need to wait for the other PG_BUSY pages to clear d372 1 a372 1 * on how many pages are in the object it may be cheaper to do one d378 1 a378 1 * list traversal, so we multiply the number of pages in the d440 1 a440 1 by_list = (uobj->uo_npages <= d497 1 a497 1 for ( ; (by_list && pp != NULL) || d518 1 a518 1 * d535 1 a535 1 if ((pp->flags & PG_CLEAN) != 0 && d596 1 a596 1 result = uvm_pager_put(uobj, pp, &ppsp, &npages, d602 2 a603 2 * it is remotely possible for the async i/o to complete and * the page "pp" be freed or what not before we get a chance d643 2 a644 2 * need to look at each page of the I/O operation. we defer * processing "pp" until the last trip through this "for" loop d646 1 a646 1 * play with the cluster pages [thus the "npages + 1" in the d720 1 a720 1 d778 1 a778 1 &uvn->u_obj.vmobjlock, d844 1 a844 1 d951 1 a951 1 d974 1 a974 1 * => we assume that the caller has a reference of some sort to the @ 1.46.2.4 log @Catch up with -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.46.2.3 2001/06/21 20:10:51 nathanw Exp $ */ d168 6 a173 1 KASSERT(vp->v_type == VREG || vp->v_type == VBLK); @ 1.46.2.5 log @Catch up to -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.46.2.4 2001/08/24 00:13:45 nathanw Exp $ */ d73 1 d79 15 a93 7 void uvn_detach __P((struct uvm_object *)); int uvn_get __P((struct uvm_object *, voff_t, struct vm_page **, int *, int, vm_prot_t, int, int)); int uvn_put __P((struct uvm_object *, voff_t, voff_t, int)); void uvn_reference __P((struct uvm_object *)); int uvn_findpage __P((struct uvm_object *, voff_t, struct vm_page **, int)); d104 1 d107 3 d137 1 a137 1 struct uvm_object *uobj = &vp->v_uobj; d148 1 a148 1 * first get a lock on the uobj. d150 3 a152 4 simple_lock(&uobj->vmobjlock); while (vp->v_flag & VXLOCK) { vp->v_flag |= VXWANT; d154 1 a154 1 UVM_UNLOCK_AND_WAIT(uobj, &uobj->vmobjlock, FALSE, d156 1 a156 1 simple_lock(&uobj->vmobjlock); d164 1 a164 1 simple_unlock(&uobj->vmobjlock); d174 1 a174 1 if (vp->v_size == VSIZENOTSET) { d176 2 a177 3 vp->v_flag |= VXLOCK; simple_unlock(&uobj->vmobjlock); /* drop lock in case we sleep */ d203 1 a203 1 simple_lock(&uobj->vmobjlock); d205 3 a207 4 if (vp->v_flag & VXWANT) { wakeup(vp); } vp->v_flag &= ~(VXLOCK|VXWANT); d210 1 a210 1 simple_unlock(&uobj->vmobjlock); d214 1 a214 1 vp->v_size = used_vnode_size; d218 3 a220 2 simple_unlock(&uobj->vmobjlock); UVMHIST_LOG(maphist,"<- done, refcnt=%d", vp->v_usecount, d222 1 a222 1 return uobj; d237 2 a238 1 void a244 1 d252 1 a252 2 void d260 549 d816 2 a817 2 int uvn_put(uobj, off, len, flags) d819 2 a820 3 voff_t off; voff_t len; int flags; d825 1 a825 1 error = VOP_PUTPAGES(vp, off, len, flags); d841 1 a841 1 int d866 1 a866 1 * => returned pages will be BUSY. d870 1 a870 1 uvn_findpages(uobj, offset, npagesp, pgs, flags) d874 1 a874 1 struct vm_page **pgs; d877 1 a877 1 int i, count, npages, rv; d879 1 a879 1 count = 0; d881 2 a882 16 if (flags & UFP_BACKWARD) { for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) { rv = uvn_findpage(uobj, offset, &pgs[i], flags); if (flags & UFP_DIRTYONLY && rv == 0) { break; } count++; } } else { for (i = 0; i < npages; i++, offset += PAGE_SIZE) { rv = uvn_findpage(uobj, offset, &pgs[i], flags); if (flags & UFP_DIRTYONLY && rv == 0) { break; } count++; } d884 1 a884 1 *npagesp = count; d887 1 a887 1 int a894 1 boolean_t dirty; d906 1 a906 1 /* nope? allocate one now */ d928 1 a928 1 UVMHIST_LOG(ubchist, "alloced %p", pg,0,0,0); d936 1 a936 1 if ((pg->flags & PG_BUSY) != 0) { a953 10 /* stop on clean pages if requested */ if (flags & UFP_DIRTYONLY) { dirty = pmap_clear_modify(pg) || (pg->flags & PG_CLEAN) == 0; pg->flags |= PG_CLEAN; if (!dirty) { return 0; } } d957 1 a957 1 UVMHIST_LOG(ubchist, "found %p", pg,0,0,0); d965 1 a965 1 * uvm_vnp_setsize: grow or shrink a vnode uobj d973 7 d987 1 a987 1 struct uvm_object *uobj = &vp->v_uobj; d991 3 a993 3 simple_lock(&uobj->vmobjlock); UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x", vp, vp->v_size, newsize, 0); d1000 2 a1001 4 if (vp->v_size > pgend && vp->v_size != VSIZENOTSET) { (void) uvn_put(uobj, pgend, 0, PGO_FREE); } else { simple_unlock(&uobj->vmobjlock); d1003 2 a1004 1 vp->v_size = newsize; d1026 1 a1026 1 win = ubc_alloc(&vp->v_uobj, off, &bytelen, UBC_WRITE); @ 1.46.2.6 log @Catch up to -current. Again. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.46.2.5 2001/09/21 22:37:18 nathanw Exp $ */ d252 1 a252 1 * => object must be locked on entry! VOP_PUTPAGES must unlock it. d258 1 a258 1 uvn_put(uobj, offlo, offhi, flags) d260 2 a261 2 voff_t offlo; voff_t offhi; d267 1 a267 3 LOCK_ASSERT(simple_lock_held(&vp->v_interlock)); error = VOP_PUTPAGES(vp, offlo, offhi, flags); LOCK_ASSERT(!simple_lock_held(&vp->v_interlock)); @ 1.46.2.7 log @Catch up to -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.46.2.6 2001/09/26 19:55:16 nathanw Exp $ */ d48 4 a54 7 #include __KERNEL_RCSID(0, "$NetBSD$"); #include "fs_nfs.h" #include "opt_uvmhist.h" #include "opt_ddb.h" @ 1.46.2.8 log @Catch up to -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.46.2.7 2001/11/14 19:19:10 nathanw Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.46.2.7 2001/11/14 19:19:10 nathanw Exp $"); d386 1 a386 1 uvmexp.execpages++; d388 1 a388 1 uvmexp.filepages++; d466 1 a466 1 (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO); @ 1.46.2.9 log @Catch up to -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.46.2.8 2002/01/08 00:35:08 nathanw Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.46.2.8 2002/01/08 00:35:08 nathanw Exp $"); d316 1 a316 1 int d324 1 a324 1 int i, count, found, npages, rv; d326 1 a326 1 count = found = 0; d331 3 a333 5 if (rv == 0) { if (flags & UFP_DIRTYONLY) break; } else found++; d339 3 a341 5 if (rv == 0) { if (flags & UFP_DIRTYONLY) break; } else found++; a345 1 return (found); a403 1 UVMHIST_LOG(ubchist, "wait %p", pg,0,0,0); a421 1 UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0); @ 1.46.2.10 log @Curproc->curlwp renaming. Change uses of "curproc->l_proc" back to "curproc", which is more like the original use. Bare uses of "curproc" are now "curlwp". "curproc" is now #defined in proc.h as ((curlwp) ? (curlwp)->l_proc) : NULL) so that it is always safe to reference curproc (*de*referencing curproc is another story, but that's always been true). @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.46.2.9 2002/06/20 03:50:47 nathanw Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.46.2.9 2002/06/20 03:50:47 nathanw Exp $"); d170 1 a170 1 /* XXX: curlwp? */ d181 1 a181 1 DIOCGPART, (caddr_t)&pi, FREAD, curproc); d188 2 a189 2 result = VOP_GETATTR(vp, &vattr, curproc->p_ucred, curproc); @ 1.46.2.11 log @No longer need to pull in lwp.h; proc.h pulls it in for us. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.46.2.10 2002/06/24 22:13:01 nathanw Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.46.2.10 2002/06/24 22:13:01 nathanw Exp $"); d61 1 @ 1.46.2.12 log @Whitespace. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.46.2.11 2002/07/12 01:40:46 nathanw Exp $ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.46.2.11 2002/07/12 01:40:46 nathanw Exp $"); d169 1 a169 1 /* XXX: curproc? */ d187 2 a188 1 result = VOP_GETATTR(vp, &vattr, curproc->p_ucred, curproc); @ 1.46.2.13 log @Catch up to -current. @ text @d1 1 a1 1 /* $NetBSD$ */ d53 1 a53 1 __KERNEL_RCSID(0, "$NetBSD$"); a127 1 const struct bdevsw *bdev; d153 4 a156 8 if (vp->v_type == VBLK) { bdev = bdevsw_lookup(vp->v_rdev); if (bdev == NULL || bdev->d_type != D_DISK) { simple_unlock(&uobj->vmobjlock); UVMHIST_LOG(maphist,"<- done (VBLK not D_DISK!)", 0,0,0,0); return(NULL); } d179 2 a180 7 bdev = bdevsw_lookup(vp->v_rdev); if (bdev != NULL) { result = (*bdev->d_ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&pi, FREAD, curproc); } else { result = ENXIO; } @ 1.45 log @in uvn_flush(), add a fast path for the case where the vnode has no pages. update the comment above this function while I'm here. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.44 2001/02/08 06:43:05 chs Exp $ */ d1018 1 d1030 2 a1031 2 if (uvn->u_size > newsize && uvn->u_size != VSIZENOTSET) { (void) uvn_flush(&uvn->u_obj, newsize, 0, PGO_FREE); @ 1.44 log @remove a debug printf() that has outlived its usefulness. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.43 2001/02/06 10:53:23 chs Exp $ */ a301 6 * NOTE: currently we have to use VOP_READ/VOP_WRITE because they go * through the buffer cache and allow I/O in any size. These VOPs use * synchronous i/o. [vs. VOP_STRATEGY which can be async, but doesn't * go through the buffer cache or allow I/O sizes larger than a * block]. we will eventually want to change this. * a302 1 * uvm provides the uvm_aiodesc structure for async i/o management. d305 1 a305 1 * an aiodesc on the "pending" list (protected by splbio()), starts the d308 1 a308 1 * time). this function should remove the aiodesc from the pending list d311 1 a311 7 * list and call the "aiodone" hook for each done request (see uvm_pager.c). * [in the old vm code, this was done by calling the "put" routine with * null arguments which made the code harder to read and understand because * you had one function ("put") doing two things.] * * so the current pager needs: * int uvn_aiodone(struct uvm_aiodesc *) d403 9 @ 1.43 log @in uvn_flush(), interpret a "stop" value of 0 as meaning all pages at offsets equal to or higher than "start". use this in uvm_vnp_setsize() instead of the vnode's size since there can be pages past EOF. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.42 2001/01/28 23:30:47 thorpej Exp $ */ a441 7 #ifdef DEBUG if (stop > round_page(uvn->u_size)) { printf("uvn_flush: oor vp %p start 0x%x stop 0x%x " "size 0x%x\n", uvn, (int)start, (int)stop, (int)round_page(uvn->u_size)); } #endif @ 1.42 log @Page scanner improvements, behavior is actually a bit more like Mach VM's now. Specific changes: - Pages now need not have all of their mappings removed before being put on the inactive list. They only need to have the "referenced" attribute cleared. This makes putting pages onto the inactive list much more efficient. In order to eliminate redundant clearings of "refrenced", callers of uvm_pagedeactivate() must now do this themselves. - When checking the "modified" attribute for a page (for clearing PG_CLEAN), make sure to only do it if PG_CLEAN is currently set on the page (saves a potentially expensive pmap operation). - When scanning the inactive list, if a page is referenced, reactivate it (this part was actually added in uvm_pdaemon.c,v 1.27). This now works properly now that pages on the inactive list are allowed to have mappings. - When scanning the inactive list and considering a page for freeing, remove all mappings, and then check the "modified" attribute if the page is marked PG_CLEAN. - When scanning the active list, if the page was referenced since its last sweep by the scanner, don't deactivate it. (This part was actually added in uvm_pdaemon.c,v 1.28.) These changes greatly improve interactive performance during moderate to high memory and I/O load. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.41 2001/01/08 06:21:13 chs Exp $ */ d345 1 d429 3 d1041 1 a1041 1 (void) uvn_flush(&uvn->u_obj, newsize, uvn->u_size, PGO_FREE); @ 1.41 log @in uvn_flush(), when PGO_SYNCIO is specified then we should wait for pending i/os to complete before returning even if PGO_CLEANIT is not specified. this fixes two races: (1) NFS write rpcs vs. setattr operations which truncate the file. if the truncate doesn't wait for pending writes to complete then a later write rpc completion can undo the effect of the truncate. this problem has been reported by several people. (2) write i/os in disk-based filesystem vs. the disk block being freed by a truncation, allocated to a new file, and written again with different data. if the disk driver reorders the requests and does the second i/o first, the old data will clobber the new, corrupting the new file. I haven't heard of anyone experiencing this problem yet, but it's fixed now anyway. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.40 2000/12/16 06:17:09 chs Exp $ */ d543 2 a544 1 (pp->pqflags & PQ_ACTIVE) != 0) d568 1 a568 1 pmap_page_protect(pp, VM_PROT_NONE); d760 1 a760 1 pmap_page_protect(ptmp, VM_PROT_NONE); @ 1.40 log @in uvn_flush(), don't deactivate busy pages. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.39 2000/12/06 03:37:30 chs Exp $ */ d346 2 a347 1 * if (and only if) we need to clean a page (PGO_CLEANIT). d349 6 a354 6 * => if PGO_CLEANIT is set, we may block (due to I/O). thus, a caller * might want to unlock higher level resources (e.g. vm_map) * before calling flush. * => if PGO_CLEANIT is not set, then we will neither unlock the object * or block. * => if PGO_ALLPAGE is set, then all pages in the object are valid targets d533 1 a533 2 if ((flags & (PGO_CLEANIT|PGO_SYNCIO)) == (PGO_CLEANIT|PGO_SYNCIO)) @ 1.39 log @in uvn_findpage(), only increment the counter of vnode pages if we succeed in allocating a page. from Lars Heidieker in PR 11636. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.38 2000/11/30 11:04:44 simonb Exp $ */ d565 1 d757 1 @ 1.38 log @Move uvm_pgcnt_vnode and uvm_pgcnt_anon into uvmexp (as vnodepages and anonpages), and add vtextpages which is currently unused but will be used to trace the number of pages used by vtext vnodes. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.37 2000/11/27 08:40:06 chs Exp $ */ a949 1 uvmexp.vnodepages++; d961 1 @ 1.37 log @Initial integration of the Unified Buffer Cache project. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.36 2000/11/24 20:34:01 chs Exp $ */ a73 2 extern u_long uvm_pgcnt_vnode; d944 1 a944 1 if (uvm_pgcnt_vnode > d950 1 a950 1 uvm_pgcnt_vnode++; @ 1.36 log @g/c unused pager ops "asyncget" and "aiodone". @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.35 2000/06/27 17:29:37 mrg Exp $ */ d50 1 d58 1 d66 2 d74 1 a74 14 /* * private global data structure * * we keep a list of writeable active vnode-backed VM objects for sync op. * we keep a simpleq of vnodes that are currently being sync'd. */ LIST_HEAD(uvn_list_struct, uvm_vnode); static struct uvn_list_struct uvn_wlist; /* writeable uvns */ static simple_lock_data_t uvn_wl_lock; /* locks uvn_wlist */ SIMPLEQ_HEAD(uvn_sq_struct, uvm_vnode); static struct uvn_sq_struct uvn_sync_q; /* sync'ing uvns */ lock_data_t uvn_sync_lock; /* locks sync operation */ d80 14 a93 16 static void uvn_cluster __P((struct uvm_object *, voff_t, voff_t *, voff_t *)); static void uvn_detach __P((struct uvm_object *)); static boolean_t uvn_flush __P((struct uvm_object *, voff_t, voff_t, int)); static int uvn_get __P((struct uvm_object *, voff_t, vm_page_t *, int *, int, vm_prot_t, int, int)); static void uvn_init __P((void)); static int uvn_io __P((struct uvm_vnode *, vm_page_t *, int, int, int)); static int uvn_put __P((struct uvm_object *, vm_page_t *, int, boolean_t)); static void uvn_reference __P((struct uvm_object *)); static boolean_t uvn_releasepg __P((struct vm_page *, struct vm_page **)); d100 1 a100 1 uvn_init, d103 1 a103 1 NULL, /* no specialized fault routine required */ d108 1 a108 1 uvm_mk_pcluster, /* use generic version of this: see uvm_pager.c */ a116 16 * uvn_init * * init pager private data structures. */ static void uvn_init() { LIST_INIT(&uvn_wlist); simple_lock_init(&uvn_wl_lock); /* note: uvn_sync_q init'd in uvm_vnp_sync() */ lockinit(&uvn_sync_lock, PVM, "uvnsync", 0, 0); } /* d139 1 a139 1 int oldflags, result; d141 1 a141 1 u_quad_t used_vnode_size; d145 1 a145 2 used_vnode_size = (u_quad_t)0; /* XXX gcc -Wuninitialized */ d151 2 a152 4 while (uvn->u_flags & UVM_VNODE_BLOCKED) { printf("uvn_attach: blocked at 0x%p flags 0x%x\n", uvn, uvn->u_flags); uvn->u_flags |= UVM_VNODE_WANTED; d164 1 a164 1 simple_unlock(&uvn->u_obj.vmobjlock); /* drop lock */ d169 6 d176 2 a177 4 * now we have lock and uvn must not be in a blocked state. * first check to see if it is already active, in which case * we can bump the reference count, check to see if we need to * add it to the writeable list, and then return. d179 1 a179 19 if (uvn->u_flags & UVM_VNODE_VALID) { /* already active? */ /* regain VREF if we were persisting */ if (uvn->u_obj.uo_refs == 0) { VREF(vp); UVMHIST_LOG(maphist," VREF (reclaim persisting vnode)", 0,0,0,0); } uvn->u_obj.uo_refs++; /* bump uvn ref! */ /* check for new writeable uvn */ if ((accessprot & VM_PROT_WRITE) != 0 && (uvn->u_flags & UVM_VNODE_WRITEABLE) == 0) { simple_lock(&uvn_wl_lock); LIST_INSERT_HEAD(&uvn_wlist, uvn, u_wlist); simple_unlock(&uvn_wl_lock); /* we are now on wlist! */ uvn->u_flags |= UVM_VNODE_WRITEABLE; } d181 1 a181 16 /* unlock and return */ simple_unlock(&uvn->u_obj.vmobjlock); UVMHIST_LOG(maphist,"<- done, refcnt=%d", uvn->u_obj.uo_refs, 0, 0, 0); return (&uvn->u_obj); } /* * need to call VOP_GETATTR() to get the attributes, but that could * block (due to I/O), so we want to unlock the object before calling. * however, we want to keep anyone else from playing with the object * while it is unlocked. to do this we set UVM_VNODE_ALOCK which * prevents anyone from attaching to the vnode until we are done with * it. */ uvn->u_flags = UVM_VNODE_ALOCK; a183 1 d197 2 a198 2 used_vnode_size = (u_quad_t)pi.disklab->d_secsize * (u_quad_t)pi.part->p_size; d207 5 a211 1 simple_lock(&uvn->u_obj.vmobjlock); a213 3 if (uvn->u_flags & UVM_VNODE_WANTED) wakeup(uvn); uvn->u_flags = 0; a217 20 /* * make sure that the newsize fits within a vaddr_t * XXX: need to revise addressing data types */ #ifdef DEBUG if (vp->v_type == VBLK) printf("used_vnode_size = %qu\n", (long long)used_vnode_size); #endif /* * now set up the uvn. */ uvn->u_obj.pgops = &uvm_vnodeops; TAILQ_INIT(&uvn->u_obj.memq); uvn->u_obj.uo_npages = 0; uvn->u_obj.uo_refs = 1; /* just us... */ oldflags = uvn->u_flags; uvn->u_flags = UVM_VNODE_VALID|UVM_VNODE_CANPERSIST; uvn->u_nio = 0; a219 6 /* if write access, we need to add it to the wlist */ if (accessprot & VM_PROT_WRITE) { simple_lock(&uvn_wl_lock); LIST_INSERT_HEAD(&uvn_wlist, uvn, u_wlist); simple_unlock(&uvn_wl_lock); uvn->u_flags |= UVM_VNODE_WRITEABLE; /* we are on wlist! */ d222 1 a222 6 /* * add a reference to the vnode. this reference will stay as long * as there is a valid mapping of the vnode. dropped when the * reference count goes to zero [and we either free or persist]. */ VREF(vp); d224 3 a226 5 if (oldflags & UVM_VNODE_WANTED) wakeup(uvn); UVMHIST_LOG(maphist,"<- done/VREF, ret 0x%x", &uvn->u_obj,0,0,0); return(&uvn->u_obj); d246 1 a246 17 #ifdef DEBUG struct uvm_vnode *uvn = (struct uvm_vnode *) uobj; #endif UVMHIST_FUNC("uvn_reference"); UVMHIST_CALLED(maphist); simple_lock(&uobj->vmobjlock); #ifdef DEBUG if ((uvn->u_flags & UVM_VNODE_VALID) == 0) { printf("uvn_reference: ref=%d, flags=0x%x\n", uvn->u_flags, uobj->uo_refs); panic("uvn_reference: invalid state"); } #endif uobj->uo_refs++; UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)", uobj, uobj->uo_refs,0,0); simple_unlock(&uobj->vmobjlock); d262 1 a262 285 struct uvm_vnode *uvn; struct vnode *vp; int oldflags; UVMHIST_FUNC("uvn_detach"); UVMHIST_CALLED(maphist); simple_lock(&uobj->vmobjlock); UVMHIST_LOG(maphist," (uobj=0x%x) ref=%d", uobj,uobj->uo_refs,0,0); uobj->uo_refs--; /* drop ref! */ if (uobj->uo_refs) { /* still more refs */ simple_unlock(&uobj->vmobjlock); UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0); return; } /* * get other pointers ... */ uvn = (struct uvm_vnode *) uobj; vp = (struct vnode *) uobj; /* * clear VTEXT flag now that there are no mappings left (VTEXT is used * to keep an active text file from being overwritten). */ vp->v_flag &= ~VTEXT; /* * we just dropped the last reference to the uvn. see if we can * let it "stick around". */ if (uvn->u_flags & UVM_VNODE_CANPERSIST) { /* won't block */ uvn_flush(uobj, 0, 0, PGO_DEACTIVATE|PGO_ALLPAGES); simple_unlock(&uobj->vmobjlock); vrele(vp); /* drop vnode reference */ UVMHIST_LOG(maphist,"<- done/vrele! (persist)", 0,0,0,0); return; } /* * its a goner! */ UVMHIST_LOG(maphist," its a goner (flushing)!", 0,0,0,0); uvn->u_flags |= UVM_VNODE_DYING; /* * even though we may unlock in flush, no one can gain a reference * to us until we clear the "dying" flag [because it blocks * attaches]. we will not do that until after we've disposed of all * the pages with uvn_flush(). note that before the flush the only * pages that could be marked PG_BUSY are ones that are in async * pageout by the daemon. (there can't be any pending "get"'s * because there are no references to the object). */ (void) uvn_flush(uobj, 0, 0, PGO_CLEANIT|PGO_FREE|PGO_ALLPAGES); UVMHIST_LOG(maphist," its a goner (done flush)!", 0,0,0,0); /* * given the structure of this pager, the above flush request will * create the following state: all the pages that were in the object * have either been free'd or they are marked PG_BUSY|PG_RELEASED. * the PG_BUSY bit was set either by us or the daemon for async I/O. * in either case, if we have pages left we can't kill the object * yet because i/o is pending. in this case we set the "relkill" * flag which will cause pgo_releasepg to kill the object once all * the I/O's are done [pgo_releasepg will be called from the aiodone * routine or from the page daemon]. */ if (uobj->uo_npages) { /* I/O pending. iodone will free */ #ifdef DEBUG /* * XXXCDC: very unlikely to happen until we have async i/o * so print a little info message in case it does. */ printf("uvn_detach: vn %p has pages left after flush - " "relkill mode\n", uobj); #endif uvn->u_flags |= UVM_VNODE_RELKILL; simple_unlock(&uobj->vmobjlock); UVMHIST_LOG(maphist,"<- done! (releasepg will kill obj)", 0, 0, 0, 0); return; } /* * kill object now. note that we can't be on the sync q because * all references are gone. */ if (uvn->u_flags & UVM_VNODE_WRITEABLE) { simple_lock(&uvn_wl_lock); /* protect uvn_wlist */ LIST_REMOVE(uvn, u_wlist); simple_unlock(&uvn_wl_lock); } #ifdef DIAGNOSTIC if (uobj->memq.tqh_first != NULL) panic("uvn_deref: vnode VM object still has pages afer " "syncio/free flush"); #endif oldflags = uvn->u_flags; uvn->u_flags = 0; simple_unlock(&uobj->vmobjlock); /* wake up any sleepers */ if (oldflags & UVM_VNODE_WANTED) wakeup(uvn); /* * drop our reference to the vnode. */ vrele(vp); UVMHIST_LOG(maphist,"<- done (vrele) final", 0,0,0,0); return; } /* * uvm_vnp_terminate: external hook to clear out a vnode's VM * * called in two cases: * [1] when a persisting vnode vm object (i.e. one with a zero reference * count) needs to be freed so that a vnode can be reused. this * happens under "getnewvnode" in vfs_subr.c. if the vnode from * the free list is still attached (i.e. not VBAD) then vgone is * called. as part of the vgone trace this should get called to * free the vm object. this is the common case. * [2] when a filesystem is being unmounted by force (MNT_FORCE, * "umount -f") the vgone() function is called on active vnodes * on the mounted file systems to kill their data (the vnodes become * "dead" ones [see src/sys/miscfs/deadfs/...]). that results in a * call here (even if the uvn is still in use -- i.e. has a non-zero * reference count). this case happens at "umount -f" and during a * "reboot/halt" operation. * * => the caller must XLOCK and VOP_LOCK the vnode before calling us * [protects us from getting a vnode that is already in the DYING * state...] * => unlike uvn_detach, this function must not return until all the * uvn's pages are disposed of. * => in case [2] the uvn is still alive after this call, but all I/O * ops will fail (due to the backing vnode now being "dead"). this * will prob. kill any process using the uvn due to pgo_get failing. */ void uvm_vnp_terminate(vp) struct vnode *vp; { struct uvm_vnode *uvn = &vp->v_uvm; int oldflags; UVMHIST_FUNC("uvm_vnp_terminate"); UVMHIST_CALLED(maphist); /* * lock object and check if it is valid */ simple_lock(&uvn->u_obj.vmobjlock); UVMHIST_LOG(maphist, " vp=0x%x, ref=%d, flag=0x%x", vp, uvn->u_obj.uo_refs, uvn->u_flags, 0); if ((uvn->u_flags & UVM_VNODE_VALID) == 0) { simple_unlock(&uvn->u_obj.vmobjlock); UVMHIST_LOG(maphist, "<- done (not active)", 0, 0, 0, 0); return; } /* * must be a valid uvn that is not already dying (because XLOCK * protects us from that). the uvn can't in the ALOCK state * because it is valid, and uvn's that are in the ALOCK state haven't * been marked valid yet. */ #ifdef DEBUG /* * debug check: are we yanking the vnode out from under our uvn? */ if (uvn->u_obj.uo_refs) { printf("uvm_vnp_terminate(%p): terminating active vnode " "(refs=%d)\n", uvn, uvn->u_obj.uo_refs); } #endif /* * it is possible that the uvn was detached and is in the relkill * state [i.e. waiting for async i/o to finish so that releasepg can * kill object]. we take over the vnode now and cancel the relkill. * we want to know when the i/o is done so we can recycle right * away. note that a uvn can only be in the RELKILL state if it * has a zero reference count. */ if (uvn->u_flags & UVM_VNODE_RELKILL) uvn->u_flags &= ~UVM_VNODE_RELKILL; /* cancel RELKILL */ /* * block the uvn by setting the dying flag, and then flush the * pages. (note that flush may unlock object while doing I/O, but * it will re-lock it before it returns control here). * * also, note that we tell I/O that we are already VOP_LOCK'd so * that uvn_io doesn't attempt to VOP_LOCK again. * * XXXCDC: setting VNISLOCKED on an active uvn which is being terminated * due to a forceful unmount might not be a good idea. maybe we * need a way to pass in this info to uvn_flush through a * pager-defined PGO_ constant [currently there are none]. */ uvn->u_flags |= UVM_VNODE_DYING|UVM_VNODE_VNISLOCKED; (void) uvn_flush(&uvn->u_obj, 0, 0, PGO_CLEANIT|PGO_FREE|PGO_ALLPAGES); /* * as we just did a flush we expect all the pages to be gone or in * the process of going. sleep to wait for the rest to go [via iosync]. */ while (uvn->u_obj.uo_npages) { #ifdef DEBUG struct vm_page *pp; for (pp = uvn->u_obj.memq.tqh_first ; pp != NULL ; pp = pp->listq.tqe_next) { if ((pp->flags & PG_BUSY) == 0) panic("uvm_vnp_terminate: detected unbusy pg"); } if (uvn->u_nio == 0) panic("uvm_vnp_terminate: no I/O to wait for?"); printf("uvm_vnp_terminate: waiting for I/O to fin.\n"); /* * XXXCDC: this is unlikely to happen without async i/o so we * put a printf in just to keep an eye on it. */ #endif uvn->u_flags |= UVM_VNODE_IOSYNC; UVM_UNLOCK_AND_WAIT(&uvn->u_nio, &uvn->u_obj.vmobjlock, FALSE, "uvn_term",0); simple_lock(&uvn->u_obj.vmobjlock); } /* * done. now we free the uvn if its reference count is zero * (true if we are zapping a persisting uvn). however, if we are * terminating a uvn with active mappings we let it live ... future * calls down to the vnode layer will fail. */ oldflags = uvn->u_flags; if (uvn->u_obj.uo_refs) { /* * uvn must live on it is dead-vnode state until all references * are gone. restore flags. clear CANPERSIST state. */ uvn->u_flags &= ~(UVM_VNODE_DYING|UVM_VNODE_VNISLOCKED| UVM_VNODE_WANTED|UVM_VNODE_CANPERSIST); } else { /* * free the uvn now. note that the VREF reference is already * gone [it is dropped when we enter the persist state]. */ if (uvn->u_flags & UVM_VNODE_IOSYNCWANTED) panic("uvm_vnp_terminate: io sync wanted bit set"); if (uvn->u_flags & UVM_VNODE_WRITEABLE) { simple_lock(&uvn_wl_lock); LIST_REMOVE(uvn, u_wlist); simple_unlock(&uvn_wl_lock); } uvn->u_flags = 0; /* uvn is history, clear all bits */ } if (oldflags & UVM_VNODE_WANTED) wakeup(uvn); /* object lock still held */ simple_unlock(&uvn->u_obj.vmobjlock); UVMHIST_LOG(maphist, "<- done", 0, 0, 0, 0); d275 1 a275 1 * => if (nextpgp != NULL) => we return pageq.tqe_next here, and return d287 1 a287 5 struct uvm_vnode *uvn = (struct uvm_vnode *) pg->uobject; #ifdef DIAGNOSTIC if ((pg->flags & PG_RELEASED) == 0) panic("uvn_releasepg: page not released!"); #endif d295 1 a295 1 *nextpgp = pg->pageq.tqe_next; /* next page for daemon */ a299 26 /* * now see if we need to kill the object */ if (uvn->u_flags & UVM_VNODE_RELKILL) { if (uvn->u_obj.uo_refs) panic("uvn_releasepg: kill flag set on referenced " "object!"); if (uvn->u_obj.uo_npages == 0) { if (uvn->u_flags & UVM_VNODE_WRITEABLE) { simple_lock(&uvn_wl_lock); LIST_REMOVE(uvn, u_wlist); simple_unlock(&uvn_wl_lock); } #ifdef DIAGNOSTIC if (uvn->u_obj.memq.tqh_first) panic("uvn_releasepg: pages in object with npages == 0"); #endif if (uvn->u_flags & UVM_VNODE_WANTED) /* still holding object lock */ wakeup(uvn); uvn->u_flags = 0; /* DEAD! */ simple_unlock(&uvn->u_obj.vmobjlock); return (FALSE); } } d403 2 a404 1 struct uvm_vnode *uvn = (struct uvm_vnode *) uobj; d406 2 a407 1 struct vm_page *pps[MAXBSIZE >> PAGE_SHIFT], **ppsp; d409 1 a409 1 boolean_t retval, need_iosync, by_list, needs_clean, all; d413 11 a424 1 curoff = 0; /* XXX: shut up gcc */ d429 1 d431 2 a432 1 retval = TRUE; /* return value */ d435 1 a435 1 by_list = TRUE; /* always go by the list */ d440 5 a444 3 if (stop > round_page(uvn->u_size)) printf("uvn_flush: strange, got an out of range " "flush (fixed)\n"); d469 1 a469 2 for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = pp->listq.tqe_next) { d493 1 a493 1 pp = uobj->memq.tqh_first; d499 3 a501 3 ppnext = NULL; /* XXX: shut up gcc */ ppsp = NULL; /* XXX: shut up gcc */ uvm_lock_pageq(); /* page queues locked */ d505 1 a505 2 (!by_list && curoff < stop) ; pp = ppnext) { a506 5 /* * range check */ d509 1 a509 1 ppnext = pp->listq.tqe_next; a511 1 a512 5 /* * null check */ a518 1 d534 1 a534 2 if ((pp->flags & PG_BUSY) != 0 && (flags & (PGO_CLEANIT|PGO_SYNCIO)) == d538 1 d550 1 a550 2 pp->flags |= PG_CLEANCHK; /* update "hint" */ a557 1 /* load ppnext */ d559 1 a559 1 ppnext = pp->listq.tqe_next; a564 1 /* now dispose of pp */ a573 1 /* release busy pages */ a576 1 /* removed page from object */ d593 1 d604 1 a604 1 flags | PGO_DOACTCLUST, start, stop); d627 2 a628 1 /* d655 2 a656 1 if (result == VM_PAGER_PEND) { d658 11 a668 16 if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) { /* * no per-page ops: refresh ppnext and continue */ if (by_list) { if (pp->version == pp_version) ppnext = pp->listq.tqe_next; else /* reset */ ppnext = uobj->memq.tqh_first; } else { if (curoff < stop) ppnext = uvm_pagelookup(uobj, curoff); } continue; d670 1 a670 2 /* need to do anything here? */ d697 1 a697 1 ppnext = pp->listq.tqe_next; d699 2 a700 2 /* reset */ ppnext = uobj->memq.tqh_first; d703 2 a704 1 ppnext = uvm_pagelookup(uobj, curoff); d709 1 a709 1 * verify the page didn't get moved while obj was d723 1 a723 1 if (ptmp->flags & PG_WANTED) d726 1 a726 1 a729 2 /* pgo_releasepg wants this */ d731 4 a734 1 if (!uvn_releasepg(ptmp, NULL)) d736 3 a738 4 uvm_lock_pageq(); /* relock */ continue; /* next page */ d740 9 a748 3 ptmp->flags |= (PG_CLEAN|PG_CLEANCHK); if ((flags & PGO_FREE) == 0) pmap_clear_modify(ptmp); a761 1 d770 1 a770 2 "offset=0x%llx. error " "during pageout.\n", d772 2 a773 1 (long long)pp->offset); a782 1 a783 1 a785 3 /* * done with pagequeues: unlock */ d787 8 d796 4 a799 4 /* * now wait for all I/O if required. */ if (need_iosync) { d801 9 a809 5 UVMHIST_LOG(maphist," <>",0,0,0,0); while (uvn->u_nio != 0) { uvn->u_flags |= UVM_VNODE_IOSYNC; UVM_UNLOCK_AND_WAIT(&uvn->u_nio, &uvn->u_obj.vmobjlock, FALSE, "uvn_flush",0); d812 1 a812 3 if (uvn->u_flags & UVM_VNODE_IOSYNCWANTED) wakeup(&uvn->u_flags); uvn->u_flags &= ~(UVM_VNODE_IOSYNC|UVM_VNODE_IOSYNCWANTED); d836 2 a837 1 struct uvm_vnode *uvn = (struct uvm_vnode *) uobj; d839 1 a839 12 if (*loffset >= uvn->u_size) panic("uvn_cluster: offset out of range"); /* * XXX: old pager claims we could use VOP_BMAP to get maxcontig value. */ *hoffset = *loffset + MAXBSIZE; if (*hoffset > round_page(uvn->u_size)) /* past end? */ *hoffset = round_page(uvn->u_size); return; a844 1 * => prefer map unlocked (not required) a847 2 * => XXX: currently we use VOP_READ/VOP_WRITE which are only sync. * [thus we never do async i/o! see iodone comment] d856 2 a857 5 int retval; /* note: object locked */ retval = uvn_io((struct uvm_vnode*)uobj, pps, npages, flags, UIO_WRITE); /* note: object unlocked */ d859 2 a860 1 return(retval); d881 1 a881 1 int centeridx, advice, flags; d883 1 d885 9 a893 6 voff_t current_offset; struct vm_page *ptmp; int lcv, result, gotpages; boolean_t done; UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(maphist); UVMHIST_LOG(maphist, "flags=%d", flags,0,0,0); a894 3 /* * step 1: handled the case where fault data structures are locked. */ d896 6 a901 1 if (flags & PGO_LOCKED) { d903 9 a911 4 /* * gotpages is the current number of pages we've gotten (which * we pass back up to caller via *npagesp. */ d913 7 a919 1 gotpages = 0; d921 10 a930 5 /* * step 1a: get pages that are already resident. only do this * if the data structures are locked (i.e. the first time * through). */ d932 21 a952 19 done = TRUE; /* be optimistic */ for (lcv = 0, current_offset = offset ; lcv < *npagesp ; lcv++, current_offset += PAGE_SIZE) { /* do we care about this page? if not, skip it */ if (pps[lcv] == PGO_DONTCARE) continue; /* lookup page */ ptmp = uvm_pagelookup(uobj, current_offset); /* to be useful must get a non-busy, non-released pg */ if (ptmp == NULL || (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) { if (lcv == centeridx || (flags & PGO_ALLPAGES) != 0) done = FALSE; /* need to do a wait or I/O! */ continue; d954 4 a957 90 /* * useful page: busy/lock it and plug it in our * result array */ ptmp->flags |= PG_BUSY; /* loan up to caller */ UVM_PAGE_OWN(ptmp, "uvn_get1"); pps[lcv] = ptmp; gotpages++; } /* "for" lcv loop */ /* * XXX: given the "advice", should we consider async read-ahead? * XXX: fault current does deactive of pages behind us. is * this good (other callers might now). */ /* * XXX: read-ahead currently handled by buffer cache (bread) * level. * XXX: no async i/o available. * XXX: so we don't do anything now. */ /* * step 1c: now we've either done everything needed or we to * unlock and do some waiting or I/O. */ *npagesp = gotpages; /* let caller know */ if (done) return(VM_PAGER_OK); /* bingo! */ else /* EEK! Need to unlock and I/O */ return(VM_PAGER_UNLOCK); } /* * step 2: get non-resident or busy pages. * object is locked. data structures are unlocked. * * XXX: because we can't do async I/O at this level we get things * page at a time (otherwise we'd chunk). the VOP_READ() will do * async-read-ahead for us at a lower level. */ for (lcv = 0, current_offset = offset ; lcv < *npagesp ; lcv++, current_offset += PAGE_SIZE) { /* skip over pages we've already gotten or don't want */ /* skip over pages we don't _have_ to get */ if (pps[lcv] != NULL || (lcv != centeridx && (flags & PGO_ALLPAGES) == 0)) continue; /* * we have yet to locate the current page (pps[lcv]). we first * look for a page that is already at the current offset. if * we fine a page, we check to see if it is busy or released. * if that is the case, then we sleep on the page until it is * no longer busy or released and repeat the lookup. if the * page we found is neither busy nor released, then we busy it * (so we own it) and plug it into pps[lcv]. this breaks the * following while loop and indicates we are ready to move on * to the next page in the "lcv" loop above. * * if we exit the while loop with pps[lcv] still set to NULL, * then it means that we allocated a new busy/fake/clean page * ptmp in the object and we need to do I/O to fill in the data. */ while (pps[lcv] == NULL) { /* top of "pps" while loop */ /* look for a current page */ ptmp = uvm_pagelookup(uobj, current_offset); /* nope? allocate one now (if we can) */ if (ptmp == NULL) { ptmp = uvm_pagealloc(uobj, current_offset, NULL, 0); /* out of RAM? */ if (ptmp == NULL) { simple_unlock(&uobj->vmobjlock); uvm_wait("uvn_getpage"); simple_lock(&uobj->vmobjlock); /* goto top of pps while loop */ continue; d959 2 a960 13 /* * got new page ready for I/O. break pps * while loop. pps[lcv] is still NULL. */ break; } /* page is there, see if we need to wait on it */ if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) { ptmp->flags |= PG_WANTED; UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock, FALSE, "uvn_get",0); d962 1 a962 1 continue; /* goto top of pps while loop */ d964 5 a968 10 /* * if we get here then the page has become resident * and unbusy between steps 1 and 2. we busy it * now (so we own it) and set pps[lcv] (so that we * exit the while loop). */ ptmp->flags |= PG_BUSY; UVM_PAGE_OWN(ptmp, "uvn_get2"); pps[lcv] = ptmp; d971 11 a981 38 /* * if we own the a valid page at the correct offset, pps[lcv] * will point to it. nothing more to do except go to the * next page. */ if (pps[lcv]) continue; /* next lcv */ /* * we have a "fake/busy/clean" page that we just allocated. do * I/O to fill it with valid data. note that object must be * locked going into uvn_io, but will be unlocked afterwards. */ result = uvn_io((struct uvm_vnode *) uobj, &ptmp, 1, PGO_SYNCIO, UIO_READ); /* * I/O done. object is unlocked (by uvn_io). because we used * syncio the result can not be PEND or AGAIN. we must relock * and check for errors. */ /* lock object. check for errors. */ simple_lock(&uobj->vmobjlock); if (result != VM_PAGER_OK) { if (ptmp->flags & PG_WANTED) /* object lock still held */ wakeup(ptmp); ptmp->flags &= ~(PG_WANTED|PG_BUSY); UVM_PAGE_OWN(ptmp, NULL); uvm_lock_pageq(); uvm_pagefree(ptmp); uvm_unlock_pageq(); simple_unlock(&uobj->vmobjlock); return(result); d983 5 a987 70 /* * we got the page! clear the fake flag (indicates valid * data now in page) and plug into our result array. note * that page is still busy. * * it is the callers job to: * => check if the page is released * => unbusy the page * => activate the page */ ptmp->flags &= ~PG_FAKE; /* data is valid ... */ pmap_clear_modify(ptmp); /* ... and clean */ pps[lcv] = ptmp; } /* lcv loop */ /* * finally, unlock object and return. */ simple_unlock(&uobj->vmobjlock); return (VM_PAGER_OK); } /* * uvn_io: do I/O to a vnode * * => prefer map unlocked (not required) * => object must be locked! we will _unlock_ it before starting I/O. * => flags: PGO_SYNCIO -- use sync. I/O * => XXX: currently we use VOP_READ/VOP_WRITE which are only sync. * [thus we never do async i/o! see iodone comment] */ static int uvn_io(uvn, pps, npages, flags, rw) struct uvm_vnode *uvn; vm_page_t *pps; int npages, flags, rw; { struct vnode *vn; struct uio uio; struct iovec iov; vaddr_t kva; off_t file_offset; int waitf, result, mapinflags; size_t got, wanted; UVMHIST_FUNC("uvn_io"); UVMHIST_CALLED(maphist); UVMHIST_LOG(maphist, "rw=%d", rw,0,0,0); /* * init values */ waitf = (flags & PGO_SYNCIO) ? M_WAITOK : M_NOWAIT; vn = (struct vnode *) uvn; file_offset = pps[0]->offset; /* * check for sync'ing I/O. */ while (uvn->u_flags & UVM_VNODE_IOSYNC) { if (waitf == M_NOWAIT) { simple_unlock(&uvn->u_obj.vmobjlock); UVMHIST_LOG(maphist,"<- try again (iosync)",0,0,0,0); return(VM_PAGER_AGAIN); a988 9 uvn->u_flags |= UVM_VNODE_IOSYNCWANTED; UVM_UNLOCK_AND_WAIT(&uvn->u_flags, &uvn->u_obj.vmobjlock, FALSE, "uvn_iosync",0); simple_lock(&uvn->u_obj.vmobjlock); } /* * check size */ d990 5 a994 4 if (file_offset >= uvn->u_size) { simple_unlock(&uvn->u_obj.vmobjlock); UVMHIST_LOG(maphist,"<- BAD (size check)",0,0,0,0); return(VM_PAGER_BAD); d996 2 a997 252 /* * first try and map the pages in (without waiting) */ mapinflags = (rw == UIO_READ) ? UVMPAGER_MAPIN_READ : UVMPAGER_MAPIN_WRITE; kva = uvm_pagermapin(pps, npages, NULL, mapinflags); if (kva == 0 && waitf == M_NOWAIT) { simple_unlock(&uvn->u_obj.vmobjlock); UVMHIST_LOG(maphist,"<- mapin failed (try again)",0,0,0,0); return(VM_PAGER_AGAIN); } /* * ok, now bump u_nio up. at this point we are done with uvn * and can unlock it. if we still don't have a kva, try again * (this time with sleep ok). */ uvn->u_nio++; /* we have an I/O in progress! */ simple_unlock(&uvn->u_obj.vmobjlock); /* NOTE: object now unlocked */ if (kva == 0) kva = uvm_pagermapin(pps, npages, NULL, mapinflags | UVMPAGER_MAPIN_WAITOK); /* * ok, mapped in. our pages are PG_BUSY so they are not going to * get touched (so we can look at "offset" without having to lock * the object). set up for I/O. */ /* * fill out uio/iov */ iov.iov_base = (caddr_t) kva; wanted = npages << PAGE_SHIFT; if (file_offset + wanted > uvn->u_size) wanted = uvn->u_size - file_offset; /* XXX: needed? */ iov.iov_len = wanted; uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_offset = file_offset; uio.uio_segflg = UIO_SYSSPACE; uio.uio_rw = rw; uio.uio_resid = wanted; uio.uio_procp = NULL; /* * do the I/O! (XXX: curproc?) */ UVMHIST_LOG(maphist, "calling VOP",0,0,0,0); /* * This process may already have this vnode locked, if we faulted in * copyin() or copyout() on a region backed by this vnode * while doing I/O to the vnode. If this is the case, don't * panic.. instead, return the error to the user. * * XXX this is a stopgap to prevent a panic. * Ideally, this kind of operation *should* work. */ result = 0; if ((uvn->u_flags & UVM_VNODE_VNISLOCKED) == 0) result = vn_lock(vn, LK_EXCLUSIVE | LK_RETRY | LK_RECURSEFAIL); if (result == 0) { /* NOTE: vnode now locked! */ if (rw == UIO_READ) result = VOP_READ(vn, &uio, 0, curproc->p_ucred); else result = VOP_WRITE(vn, &uio, 0, curproc->p_ucred); if ((uvn->u_flags & UVM_VNODE_VNISLOCKED) == 0) VOP_UNLOCK(vn, 0); } /* NOTE: vnode now unlocked (unless vnislocked) */ UVMHIST_LOG(maphist, "done calling VOP",0,0,0,0); /* * result == unix style errno (0 == OK!) * * zero out rest of buffer (if needed) */ if (result == 0) { got = wanted - uio.uio_resid; if (wanted && got == 0) { result = EIO; /* XXX: error? */ } else if (got < PAGE_SIZE * npages && rw == UIO_READ) { memset((void *) (kva + got), 0, (npages << PAGE_SHIFT) - got); } } /* * now remove pager mapping */ uvm_pagermapout(kva, npages); /* * now clean up the object (i.e. drop I/O count) */ simple_lock(&uvn->u_obj.vmobjlock); /* NOTE: object now locked! */ uvn->u_nio--; /* I/O DONE! */ if ((uvn->u_flags & UVM_VNODE_IOSYNC) != 0 && uvn->u_nio == 0) { wakeup(&uvn->u_nio); } simple_unlock(&uvn->u_obj.vmobjlock); /* NOTE: object now unlocked! */ /* * done! */ UVMHIST_LOG(maphist, "<- done (result %d)", result,0,0,0); if (result == 0) return(VM_PAGER_OK); else return(VM_PAGER_ERROR); } /* * uvm_vnp_uncache: disable "persisting" in a vnode... when last reference * is gone we will kill the object (flushing dirty pages back to the vnode * if needed). * * => returns TRUE if there was no uvm_object attached or if there was * one and we killed it [i.e. if there is no active uvn] * => called with the vnode VOP_LOCK'd [we will unlock it for I/O, if * needed] * * => XXX: given that we now kill uvn's when a vnode is recycled (without * having to hold a reference on the vnode) and given a working * uvm_vnp_sync(), how does that effect the need for this function? * [XXXCDC: seems like it can die?] * * => XXX: this function should DIE once we merge the VM and buffer * cache. * * research shows that this is called in the following places: * ext2fs_truncate, ffs_truncate, detrunc[msdosfs]: called when vnode * changes sizes * ext2fs_write, WRITE [ufs_readwrite], msdosfs_write: called when we * are written to * ex2fs_chmod, ufs_chmod: called if VTEXT vnode and the sticky bit * is off * ffs_realloccg: when we can't extend the current block and have * to allocate a new one we call this [XXX: why?] * nfsrv_rename, rename_files: called when the target filename is there * and we want to remove it * nfsrv_remove, sys_unlink: called on file we are removing * nfsrv_access: if VTEXT and we want WRITE access and we don't uncache * then return "text busy" * nfs_open: seems to uncache any file opened with nfs * vn_writechk: if VTEXT vnode and can't uncache return "text busy" */ boolean_t uvm_vnp_uncache(vp) struct vnode *vp; { struct uvm_vnode *uvn = &vp->v_uvm; /* * lock uvn part of the vnode and check to see if we need to do anything */ simple_lock(&uvn->u_obj.vmobjlock); if ((uvn->u_flags & UVM_VNODE_VALID) == 0 || (uvn->u_flags & UVM_VNODE_BLOCKED) != 0) { simple_unlock(&uvn->u_obj.vmobjlock); return(TRUE); } /* * we have a valid, non-blocked uvn. clear persist flag. * if uvn is currently active we can return now. */ uvn->u_flags &= ~UVM_VNODE_CANPERSIST; if (uvn->u_obj.uo_refs) { simple_unlock(&uvn->u_obj.vmobjlock); return(FALSE); } /* * uvn is currently persisting! we have to gain a reference to * it so that we can call uvn_detach to kill the uvn. */ VREF(vp); /* seems ok, even with VOP_LOCK */ uvn->u_obj.uo_refs++; /* value is now 1 */ simple_unlock(&uvn->u_obj.vmobjlock); #ifdef DEBUG /* * carry over sanity check from old vnode pager: the vnode should * be VOP_LOCK'd, and we confirm it here. */ if (!VOP_ISLOCKED(vp)) { boolean_t is_ok_anyway = FALSE; #ifdef NFS extern int (**nfsv2_vnodeop_p) __P((void *)); extern int (**spec_nfsv2nodeop_p) __P((void *)); extern int (**fifo_nfsv2nodeop_p) __P((void *)); /* vnode is NOT VOP_LOCKed: some vnode types _never_ lock */ if (vp->v_op == nfsv2_vnodeop_p || vp->v_op == spec_nfsv2nodeop_p) { is_ok_anyway = TRUE; } if (vp->v_op == fifo_nfsv2nodeop_p) { is_ok_anyway = TRUE; } #endif /* NFS */ if (!is_ok_anyway) panic("uvm_vnp_uncache: vnode not locked!"); } #endif /* DEBUG */ /* * now drop our reference to the vnode. if we have the sole * reference to the vnode then this will cause it to die [as we * just cleared the persist flag]. we have to unlock the vnode * while we are doing this as it may trigger I/O. * * XXX: it might be possible for uvn to get reclaimed while we are * unlocked causing us to return TRUE when we should not. we ignore * this as a false-positive return value doesn't hurt us. */ VOP_UNLOCK(vp, 0); uvn_detach(&uvn->u_obj); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* * and return... */ return(TRUE); d1024 5 d1031 2 a1032 1 * lock uvn and check for valid object, and if valid: do it! a1033 2 simple_lock(&uvn->u_obj.vmobjlock); if (uvn->u_flags & UVM_VNODE_VALID) { d1035 2 a1036 10 /* * now check if the size has changed: if we shrink we had better * toss some pages... */ if (uvn->u_size > newsize) { (void)uvn_flush(&uvn->u_obj, newsize, uvn->u_size, PGO_FREE); } uvn->u_size = newsize; d1038 1 a1039 5 /* * done */ return; d1043 1 a1043 8 * uvm_vnp_sync: flush all dirty VM pages back to their backing vnodes. * * => called from sys_sync with no VM structures locked * => only one process can do a sync at a time (because the uvn * structure only has one queue for sync'ing). we ensure this * by holding the uvn_sync_lock while the sync is in progress. * other processes attempting a sync will sleep on this lock * until we are done. d1047 4 a1050 2 uvm_vnp_sync(mp) struct mount *mp; d1052 1 a1052 30 struct uvm_vnode *uvn; struct vnode *vp; boolean_t got_lock; /* * step 1: ensure we are only ones using the uvn_sync_q by locking * our lock... */ lockmgr(&uvn_sync_lock, LK_EXCLUSIVE, (void *)0); /* * step 2: build up a simpleq of uvns of interest based on the * write list. we gain a reference to uvns of interest. must * be careful about locking uvn's since we will be holding uvn_wl_lock * in the body of the loop. */ SIMPLEQ_INIT(&uvn_sync_q); simple_lock(&uvn_wl_lock); for (uvn = uvn_wlist.lh_first ; uvn != NULL ; uvn = uvn->u_wlist.le_next) { vp = (struct vnode *) uvn; if (mp && vp->v_mount != mp) continue; /* attempt to gain reference */ while ((got_lock = simple_lock_try(&uvn->u_obj.vmobjlock)) == FALSE && (uvn->u_flags & UVM_VNODE_BLOCKED) == 0) /* spin */ ; d1054 14 a1067 74 /* * we will exit the loop if either if the following are true: * - we got the lock [always true if NCPU == 1] * - we failed to get the lock but noticed the vnode was * "blocked" -- in this case the vnode must be a dying * vnode, and since dying vnodes are in the process of * being flushed out, we can safely skip this one * * we want to skip over the vnode if we did not get the lock, * or if the vnode is already dying (due to the above logic). * * note that uvn must already be valid because we found it on * the wlist (this also means it can't be ALOCK'd). */ if (!got_lock || (uvn->u_flags & UVM_VNODE_BLOCKED) != 0) { if (got_lock) simple_unlock(&uvn->u_obj.vmobjlock); continue; /* skip it */ } /* * gain reference. watch out for persisting uvns (need to * regain vnode REF). */ if (uvn->u_obj.uo_refs == 0) VREF(vp); uvn->u_obj.uo_refs++; simple_unlock(&uvn->u_obj.vmobjlock); /* * got it! */ SIMPLEQ_INSERT_HEAD(&uvn_sync_q, uvn, u_syncq); } simple_unlock(&uvn_wl_lock); /* * step 3: we now have a list of uvn's that may need cleaning. * we are holding the uvn_sync_lock, but have dropped the uvn_wl_lock * (so we can now safely lock uvn's again). */ for (uvn = uvn_sync_q.sqh_first ; uvn ; uvn = uvn->u_syncq.sqe_next) { simple_lock(&uvn->u_obj.vmobjlock); #ifdef DEBUG if (uvn->u_flags & UVM_VNODE_DYING) { printf("uvm_vnp_sync: dying vnode on sync list\n"); } #endif uvn_flush(&uvn->u_obj, 0, 0, PGO_CLEANIT|PGO_ALLPAGES|PGO_DOACTCLUST); /* * if we have the only reference and we just cleaned the uvn, * then we can pull it out of the UVM_VNODE_WRITEABLE state * thus allowing us to avoid thinking about flushing it again * on later sync ops. */ if (uvn->u_obj.uo_refs == 1 && (uvn->u_flags & UVM_VNODE_WRITEABLE)) { LIST_REMOVE(uvn, u_wlist); uvn->u_flags &= ~UVM_VNODE_WRITEABLE; } simple_unlock(&uvn->u_obj.vmobjlock); /* now drop our reference to the uvn */ uvn_detach(&uvn->u_obj); } /* * done! release sync lock */ lockmgr(&uvn_sync_lock, LK_RELEASE, (void *)0); @ 1.35 log @remove include of @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.34 2000/06/26 14:21:19 mrg Exp $ */ a88 2 static int uvn_asyncget __P((struct uvm_object *, voff_t, int)); a116 1 uvn_asyncget, a119 1 NULL, /* AIO-DONE function (not until we have asyncio) */ a1543 22 } /* * uvn_asyncget: start async I/O to bring pages into ram * * => caller must lock object(???XXX: see if this is best) * => could be called from uvn_get or a madvise() fault-ahead. * => if it fails, it doesn't matter. */ static int uvn_asyncget(uobj, offset, npages) struct uvm_object *uobj; voff_t offset; int npages; { /* * XXXCDC: we can't do async I/O yet */ printf("uvn_asyncget called\n"); return (KERN_SUCCESS); @ 1.34 log @remove/move more mach vm header files: -> -> -> into -> nothing -> into also includes a bunch of include removals (due to redudancy with ), and a scattering of other similar headers. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.33 2000/05/19 03:45:05 thorpej Exp $ */ a65 2 #include @ 1.33 log @Tell uvm_pagermapin() the direction of the I/O so that it can map with only the protection that it needs. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.32 2000/04/03 07:35:24 chs Exp $ */ a67 2 #include #include @ 1.33.4.1 log @Apply patch (requested by chs): Make sure to initialize uio_procp in uvn_io(). Fixes kernel crash problem, reported in PR#14185. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.33 2000/05/19 03:45:05 thorpej Exp $ */ d1685 1 a1685 1 uio.uio_procp = curproc; @ 1.32 log @remove the "shareprot" pagerop. it's not needed anymore since share maps are long gone. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.31 2000/03/27 16:58:23 kleink Exp $ */ d1597 1 a1597 1 int waitf, result; d1641 5 a1645 2 kva = uvm_pagermapin(pps, npages, NULL, M_NOWAIT); if (kva == NULL && waitf == M_NOWAIT) { d1660 3 a1662 3 if (kva == NULL) { kva = uvm_pagermapin(pps, npages, NULL, M_WAITOK); } @ 1.31 log @Kill duplicate uvn_attach() prototype (public, already in uvm_vnode.h). @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.30 2000/03/26 20:54:47 kleink Exp $ */ a126 1 uvm_shareprot, /* !NULL: allow us in share maps */ @ 1.30 log @Merge parts of chs-ubc2 into the trunk: Add a new type voff_t (defined as a synonym for off_t) to describe offsets into uvm objects, and update the appropriate interfaces to use it, the most visible effect being the ability to mmap() file offsets beyond the range of a vaddr_t. Originally by Chuck Silvers; blame me for problems caused by merging this into non-UBC. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.29 2000/03/13 23:52:42 soren Exp $ */ a94 1 struct uvm_object *uvn_attach __P((void *, vm_prot_t)); @ 1.29 log @Fix doubled 'the's in comments. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.28 2000/01/28 08:02:48 chs Exp $ */ d93 1 a93 1 static int uvn_asyncget __P((struct uvm_object *, vaddr_t, d96 2 a97 2 static void uvn_cluster __P((struct uvm_object *, vaddr_t, vaddr_t *, vaddr_t *)); d99 3 a101 3 static boolean_t uvn_flush __P((struct uvm_object *, vaddr_t, vaddr_t, int)); static int uvn_get __P((struct uvm_object *, vaddr_t, d190 2 a295 7 if (used_vnode_size > (vaddr_t) -PAGE_SIZE) { #ifdef DEBUG printf("uvn_attach: vn %p size truncated %qx->%x\n", vp, (long long)used_vnode_size, -PAGE_SIZE); #endif used_vnode_size = (vaddr_t) -PAGE_SIZE; } d832 1 a832 1 vaddr_t start, stop; d839 2 a840 2 boolean_t retval, need_iosync, by_list, needs_clean; vaddr_t curoff; d852 1 a852 2 start = 0; stop = round_page(uvn->u_size); d862 1 d887 2 a888 1 if (pp->offset < start || pp->offset >= stop) d930 2 a931 1 if (pp->offset < start || pp->offset >= stop) { d1202 1 a1202 1 "offset=0x%lx. error " d1204 2 a1205 1 pp->uobject, pp->offset); d1260 2 a1261 2 vaddr_t offset; vaddr_t *loffset, *hoffset; /* OUT */ d1320 1 a1320 1 vaddr_t offset; d1326 1 a1326 1 vaddr_t current_offset; d1567 1 a1567 1 vaddr_t offset; d1597 4 a1600 2 vaddr_t kva, file_offset; int waitf, result, got, wanted; d1910 1 a1910 1 u_quad_t newsize; a1920 14 * make sure that the newsize fits within a vaddr_t * XXX: need to revise addressing data types */ if (newsize > (vaddr_t) -PAGE_SIZE) { #ifdef DEBUG printf("uvm_vnp_setsize: vn %p size truncated " "%qx->%lx\n", vp, (long long)newsize, (vaddr_t)-PAGE_SIZE); #endif newsize = (vaddr_t)-PAGE_SIZE; } /* d1926 1 a1926 1 (void)uvn_flush(&uvn->u_obj, (vaddr_t) newsize, d1929 1 a1929 1 uvn->u_size = (vaddr_t)newsize; @ 1.28 log @remove a debug printf that has outlived its usefulness. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.27 1999/10/19 16:04:45 chs Exp $ */ d558 1 a558 1 * protects us from that). the uvn can't in the the ALOCK state @ 1.27 log @put various debugging printfs under #ifdef DEBUG. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.26 1999/09/12 01:17:42 chs Exp $ */ a1635 3 #ifdef DEBUG printf("uvn_io: note: size check fired\n"); #endif @ 1.26 log @eliminate the PMAP_NEW option by making it required for all ports. ports which previously had no support for PMAP_NEW now implement the pmap_k* interfaces as wrappers around the non-k versions. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.25 1999/07/22 22:58:39 thorpej Exp $ */ d353 1 a353 1 #ifdef DIAGNOSTIC d359 1 a359 1 #ifdef DIAGNOSTIC d462 1 a462 1 #ifdef DIAGNOSTIC d608 1 a608 1 #ifdef DIAGNOSTIC d863 1 d867 1 a867 1 d1636 1 a1636 1 #ifdef DIAGNOSTIC d2046 1 a2046 1 #ifdef DIAGNOSTIC @ 1.26.6.1 log @Pull up to last week's -current. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.27 1999/10/19 16:04:45 chs Exp $ */ d353 1 a353 1 #ifdef DEBUG d359 1 a359 1 #ifdef DEBUG d462 1 a462 1 #ifdef DEBUG d608 1 a608 1 #ifdef DEBUG a862 1 #ifdef DEBUG d866 1 a866 1 #endif d1635 1 a1635 1 #ifdef DEBUG d2045 1 a2045 1 #ifdef DEBUG @ 1.26.4.1 log @Sync with -current @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.27 1999/10/19 16:04:45 chs Exp $ */ d353 1 a353 1 #ifdef DEBUG d359 1 a359 1 #ifdef DEBUG d462 1 a462 1 #ifdef DEBUG d608 1 a608 1 #ifdef DEBUG a862 1 #ifdef DEBUG d866 1 a866 1 #endif d1635 1 a1635 1 #ifdef DEBUG d2045 1 a2045 1 #ifdef DEBUG @ 1.26.2.1 log @Sync w/ trunk. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.27 1999/10/19 16:04:45 chs Exp $ */ d353 1 a353 1 #ifdef DEBUG d359 1 a359 1 #ifdef DEBUG d462 1 a462 1 #ifdef DEBUG d608 1 a608 1 #ifdef DEBUG a862 1 #ifdef DEBUG d866 1 a866 1 #endif d1635 1 a1635 1 #ifdef DEBUG d2045 1 a2045 1 #ifdef DEBUG @ 1.26.2.2 log @Update thorpej_scsipi to -current as of a month ago @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.35 2000/06/27 17:29:37 mrg Exp $ */ d67 4 d93 1 a93 1 static int uvn_asyncget __P((struct uvm_object *, voff_t, d95 3 a97 2 static void uvn_cluster __P((struct uvm_object *, voff_t, voff_t *, voff_t *)); d99 3 a101 3 static boolean_t uvn_flush __P((struct uvm_object *, voff_t, voff_t, int)); static int uvn_get __P((struct uvm_object *, voff_t, d128 1 a189 2 printf("uvn_attach: blocked at 0x%p flags 0x%x\n", uvn, uvn->u_flags); d294 7 d558 1 a558 1 * protects us from that). the uvn can't in the ALOCK state d837 1 a837 1 voff_t start, stop; d844 2 a845 2 boolean_t retval, need_iosync, by_list, needs_clean, all; voff_t curoff; d857 2 a858 1 all = TRUE; a867 1 all = FALSE; d892 1 a892 2 if (!all && (pp->offset < start || pp->offset >= stop)) d934 1 a934 2 if (!all && (pp->offset < start || pp->offset >= stop)) { d1205 1 a1205 1 "offset=0x%llx. error " d1207 1 a1207 2 pp->uobject, (long long)pp->offset); d1262 2 a1263 2 voff_t offset; voff_t *loffset, *hoffset; /* OUT */ d1322 1 a1322 1 voff_t offset; d1328 1 a1328 1 voff_t current_offset; d1569 1 a1569 1 voff_t offset; d1599 2 a1600 4 vaddr_t kva; off_t file_offset; int waitf, result, mapinflags; size_t got, wanted; d1636 3 d1646 2 a1647 5 mapinflags = (rw == UIO_READ) ? UVMPAGER_MAPIN_READ : UVMPAGER_MAPIN_WRITE; kva = uvm_pagermapin(pps, npages, NULL, mapinflags); if (kva == 0 && waitf == M_NOWAIT) { d1662 3 a1664 3 if (kva == 0) kva = uvm_pagermapin(pps, npages, NULL, mapinflags | UVMPAGER_MAPIN_WAITOK); d1913 1 a1913 1 voff_t newsize; d1924 14 d1943 1 a1943 1 (void)uvn_flush(&uvn->u_obj, newsize, d1946 1 a1946 1 uvn->u_size = newsize; @ 1.26.2.3 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD$ */ a49 1 #include "opt_ddb.h" a56 1 #include a63 2 #include #include d71 15 d89 18 a106 14 static void uvn_cluster __P((struct uvm_object *, voff_t, voff_t *, voff_t *)); static void uvn_detach __P((struct uvm_object *)); static int uvn_findpage __P((struct uvm_object *, voff_t, struct vm_page **, int)); static boolean_t uvn_flush __P((struct uvm_object *, voff_t, voff_t, int)); static int uvn_get __P((struct uvm_object *, voff_t, vm_page_t *, int *, int, vm_prot_t, int, int)); static int uvn_put __P((struct uvm_object *, vm_page_t *, int, boolean_t)); static void uvn_reference __P((struct uvm_object *)); static boolean_t uvn_releasepg __P((struct vm_page *, struct vm_page **)); d113 1 a113 1 NULL, d116 1 a116 1 NULL, d119 1 d122 2 a123 1 uvm_mk_pcluster, d132 16 d170 1 a170 1 int result; d172 1 a172 1 voff_t used_vnode_size; d176 2 a177 1 used_vnode_size = (voff_t)0; d183 4 a186 2 while (uvn->u_flags & VXLOCK) { uvn->u_flags |= VXWANT; d198 1 a198 1 simple_unlock(&uvn->u_obj.vmobjlock); d203 32 a234 5 #ifdef DIAGNOSTIC if (vp->v_type != VREG) { panic("uvn_attach: vp %p not VREG", vp); } #endif d237 6 a242 2 * set up our idea of the size * if this hasn't been done already. d244 1 a244 3 if (uvn->u_size == VSIZENOTSET) { uvn->u_flags |= VXLOCK; d247 1 d261 2 a262 2 used_vnode_size = (voff_t)pi.disklab->d_secsize * (voff_t)pi.part->p_size; d271 1 a271 5 simple_lock(&uvn->u_obj.vmobjlock); if (uvn->u_flags & VXWANT) wakeup(uvn); uvn->u_flags &= ~(VXLOCK|VXWANT); d274 3 d281 20 d303 6 d311 6 a316 1 /* unlock and return */ d318 5 a322 3 UVMHIST_LOG(maphist,"<- done, refcnt=%d", uvn->u_obj.uo_refs, 0, 0, 0); return (&uvn->u_obj); d342 17 a358 1 VREF((struct vnode *)uobj); d374 285 a658 1 vrele((struct vnode *)uobj); d671 1 a671 1 * => if (nextpgp != NULL) => we return the next page on the queue, and return d683 5 a687 1 KASSERT(pg->flags & PG_RELEASED); d695 1 a695 1 *nextpgp = TAILQ_NEXT(pg, pageq); d700 26 d829 1 a829 2 struct uvm_vnode *uvn = (struct uvm_vnode *)uobj; struct vnode *vp = (struct vnode *)uobj; d831 1 a831 2 struct vm_page *pps[256], **ppsp; int s; d833 1 a833 1 boolean_t retval, need_iosync, by_list, needs_clean, all, wasclean; a836 11 UVMHIST_LOG(maphist, "uobj %p start 0x%x stop 0x%x flags 0x%x", uobj, start, stop, flags); KASSERT(flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE)); #ifdef DEBUG if (uvn->u_size == VSIZENOTSET) { printf("uvn_flush: size not set vp %p\n", uvn); vprint("uvn_flush VSIZENOTSET", vp); flags |= PGO_ALLPAGES; } #endif d838 1 a842 1 curoff = 0; d844 1 a844 2 retval = TRUE; wasclean = TRUE; d847 1 a847 1 by_list = TRUE; d852 3 a854 5 if (stop > round_page(uvn->u_size)) { printf("uvn_flush: oor vp %p start 0x%x stop 0x%x " "size 0x%x\n", uvn, (int)start, (int)stop, (int)round_page(uvn->u_size)); } d879 2 a880 1 TAILQ_FOREACH(pp, &uobj->memq, listq) { d904 1 a904 1 pp = TAILQ_FIRST(&uobj->memq); d910 3 a912 3 ppnext = NULL; ppsp = NULL; uvm_lock_pageq(); d916 2 a917 1 (!by_list && curoff < stop) ; pp = ppnext) { d919 5 d926 1 a926 1 ppnext = TAILQ_NEXT(pp, listq); d929 1 d931 5 d942 1 d958 2 a959 1 if ((flags & (PGO_CLEANIT|PGO_SYNCIO)) == a962 1 d974 2 a975 1 pp->flags |= PG_CLEANCHK; d983 1 d985 1 a985 1 ppnext = TAILQ_NEXT(pp, listq); d991 1 d1001 1 d1005 1 a1021 1 wasclean = FALSE; d1032 1 a1032 1 flags | PGO_DOACTCLUST, start, stop); d1055 1 a1055 2 /* d1082 1 a1082 2 if (result == VM_PAGER_PEND && (flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) { d1084 16 a1099 11 /* * no per-page ops: refresh ppnext and continue */ if (by_list) { if (pp->version == pp_version) ppnext = TAILQ_NEXT(pp, listq); else ppnext = TAILQ_FIRST(&uobj->memq); } else { if (curoff < stop) ppnext = uvm_pagelookup(uobj, curoff); d1101 2 a1102 1 continue; d1129 1 a1129 1 ppnext = TAILQ_NEXT(pp, listq); d1131 2 a1132 2 ppnext = TAILQ_FIRST( &uobj->memq); d1135 1 a1135 2 ppnext = uvm_pagelookup(uobj, curoff); d1140 1 a1140 1 * verify the page wasn't moved while obj was d1154 1 a1154 1 if (ptmp->flags & PG_WANTED) { d1157 1 a1157 1 } d1161 2 d1164 1 a1164 4 if (!uvn_releasepg(ptmp, NULL)) { UVMHIST_LOG(maphist, "released %p", ptmp, 0,0,0); d1166 4 a1169 3 } uvm_lock_pageq(); continue; d1171 3 a1173 9 if ((flags & PGO_WEAK) == 0 && !(result == VM_PAGER_ERROR && curproc == uvm.pagedaemon_proc)) { ptmp->flags |= (PG_CLEAN|PG_CLEANCHK); if ((flags & PGO_FREE) == 0) { pmap_clear_modify(ptmp); } } d1187 1 d1196 2 a1197 1 "offset=0x%llx. error %d\n", d1199 1 a1199 2 (long long)pp->offset, result); d1209 1 d1211 1 d1214 3 d1218 4 a1221 6 if ((flags & PGO_CLEANIT) && all && wasclean && LIST_FIRST(&vp->v_dirtyblkhd) == NULL && (vp->v_flag & VONWORKLST)) { vp->v_flag &= ~VONWORKLST; LIST_REMOVE(vp, v_synclist); } d1223 1 d1225 4 a1228 15 /* * XXX this doesn't use the new two-flag scheme, * but to use that, all i/o initiators will have to change. */ s = splbio(); while (vp->v_numoutput != 0) { UVMHIST_LOG(ubchist, "waiting for vp %p num %d", vp, vp->v_numoutput,0,0); vp->v_flag |= VBWAIT; UVM_UNLOCK_AND_WAIT(&vp->v_numoutput, &uvn->u_obj.vmobjlock, FALSE, "uvn_flush",0); d1231 3 a1233 1 splx(s); d1257 2 a1258 1 struct uvm_vnode *uvn = (struct uvm_vnode *)uobj; d1260 11 a1270 2 *loffset = offset; *hoffset = min(offset + MAXBSIZE, round_page(uvn->u_size)); d1276 1 d1280 2 d1290 1 a1290 2 struct vnode *vp = (struct vnode *)uobj; int error; d1292 5 a1296 2 error = VOP_PUTPAGES(vp, pps, npages, flags, NULL); return uvm_errno2vmerror(error); d1317 1 a1317 1 int centeridx; a1318 1 int advice, flags; d1320 228 a1547 8 struct vnode *vp = (struct vnode *)uobj; int error; UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist); UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0); error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx, access_type, advice, flags); return uvm_errno2vmerror(error); a1549 1 d1551 5 a1555 4 * uvn_findpages: * return the page for the uobj and offset requested, allocating if needed. * => uobj must be locked. * => returned page will be BUSY. d1558 2 a1559 2 void uvn_findpages(uobj, offset, npagesp, pps, flags) d1562 1 a1562 3 int *npagesp; struct vm_page **pps; int flags; a1563 1 int i, rv, npages; d1565 5 a1569 6 rv = 0; npages = *npagesp; for (i = 0; i < npages; i++, offset += PAGE_SIZE) { rv += uvn_findpage(uobj, offset, &pps[i], flags); } *npagesp = rv; d1572 10 d1583 4 a1586 5 uvn_findpage(uobj, offset, pgp, flags) struct uvm_object *uobj; voff_t offset; struct vm_page **pgp; int flags; d1588 138 a1725 3 struct vm_page *pg; UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist); UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0); d1727 5 a1731 37 if (*pgp != NULL) { UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0); return 0; } for (;;) { /* look for an existing page */ pg = uvm_pagelookup(uobj, offset); /* nope? allocate one now */ if (pg == NULL) { if (flags & UFP_NOALLOC) { UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0); return 0; } if (uvmexp.vnodepages > (uvmexp.active + uvmexp.inactive + uvmexp.wired + uvmexp.free) * 7 / 8) { pg = NULL; } else { pg = uvm_pagealloc(uobj, offset, NULL, 0); } if (pg == NULL) { if (flags & UFP_NOWAIT) { UVMHIST_LOG(ubchist, "nowait",0,0,0,0); return 0; } simple_unlock(&uobj->vmobjlock); uvm_wait("uvn_fp1"); simple_lock(&uobj->vmobjlock); continue; } uvmexp.vnodepages++; UVMHIST_LOG(ubchist, "alloced",0,0,0,0); break; } else if (flags & UFP_NOCACHE) { UVMHIST_LOG(ubchist, "nocache",0,0,0,0); return 0; d1733 1 d1735 120 a1854 11 /* page is there, see if we need to wait on it */ if ((pg->flags & (PG_BUSY|PG_RELEASED)) != 0) { if (flags & UFP_NOWAIT) { UVMHIST_LOG(ubchist, "nowait",0,0,0,0); return 0; } pg->flags |= PG_WANTED; UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0, "uvn_fp2", 0); simple_lock(&uobj->vmobjlock); continue; d1856 2 a1857 5 /* skip PG_RDONLY pages if requested */ if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) { UVMHIST_LOG(ubchist, "nordonly",0,0,0,0); return 0; d1859 5 d1865 19 a1883 8 /* mark the page BUSY and we're done. */ pg->flags |= PG_BUSY; UVM_PAGE_OWN(pg, "uvn_findpage"); UVMHIST_LOG(ubchist, "found",0,0,0,0); break; } *pgp = pg; return 1; a1909 1 UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist); d1911 3 d1915 6 d1922 7 a1928 1 UVMHIST_LOG(ubchist, "old 0x%x new 0x%x", uvn->u_size, newsize, 0,0); d1931 1 a1931 2 * now check if the size has changed: if we shrink we had better * toss some pages... d1933 1 a1933 6 if (uvn->u_size > newsize && uvn->u_size != VSIZENOTSET) { (void) uvn_flush(&uvn->u_obj, newsize, uvn->u_size, PGO_FREE); } uvn->u_size = newsize; simple_unlock(&uvn->u_obj.vmobjlock); d1937 8 a1944 1 * uvm_vnp_zerorange: set a range of bytes in a file to zero. d1948 4 a1951 1 uvm_vnp_zerorange(vp, off, len) d1953 98 a2050 4 off_t off; size_t len; { void *win; d2052 4 a2055 14 /* * XXXUBC invent kzero() and use it */ while (len) { vsize_t bytelen = len; win = ubc_alloc(&vp->v_uvm.u_obj, off, &bytelen, UBC_WRITE); memset(win, 0, bytelen); ubc_release(win, 0); off += bytelen; len -= bytelen; } @ 1.26.2.4 log @Sync with HEAD @ text @a564 1 (pp->flags & PG_BUSY) == 0 && a755 1 (pp->flags & PG_BUSY) == 0 && @ 1.26.2.5 log @Sync with head (for UBC+NFS fixes, mostly). @ text @d346 1 a346 2 * if (and only if) we need to clean a page (PGO_CLEANIT), or * if PGO_SYNCIO is set and there are pages busy. d348 6 a353 6 * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O). * thus, a caller might want to unlock higher level resources * (e.g. vm_map) before calling flush. * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, then we will neither * unlock the object nor block. * => if PGO_ALLPAGES is set, then all pages in the object are valid targets d532 2 a533 1 if (flags & PGO_SYNCIO) @ 1.26.2.6 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.26.2.5 2001/01/18 09:24:07 bouyer Exp $ */ a344 1 * => "stop == 0" means flush all pages at or after "start". a427 3 if (stop == 0) { stop = trunc_page(LLONG_MAX); } d438 7 d543 1 a543 2 /* XXX ACTIVE|INACTIVE test unnecessary? */ (pp->pqflags & (PQ_ACTIVE|PQ_INACTIVE)) != 0) d567 1 a567 1 pmap_clear_reference(pp); d759 1 a759 1 pmap_clear_reference(ptmp); d1036 1 a1036 1 (void) uvn_flush(&uvn->u_obj, newsize, 0, PGO_FREE); @ 1.26.2.7 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.26.2.6 2001/02/11 19:17:51 bouyer Exp $ */ d253 2 d302 6 d309 1 d312 2 a313 2 * a buf on the "pending" list (protected by splbio()), starts the * i/o and returns 0. when the i/o is done, we expect d315 1 a315 1 * time). this function should remove the buf from the pending list d318 7 a324 1 * list and call the iodone hook for each done request (see uvm_pager.c). a409 1 boolean_t async = (flags & PGO_SYNCIO) == 0; a416 9 if (uobj->uo_npages == 0) { if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL && (vp->v_flag & VONWORKLST)) { vp->v_flag &= ~VONWORKLST; LIST_REMOVE(vp, v_synclist); } return TRUE; } d530 1 a530 1 if (!async) d595 1 d617 4 a620 4 * the cleaning operation is now done. finish up. note that * on error uvm_pager_put drops the cluster for us. * on success uvm_pager_put returns the cluster to us in * ppsp/npages. d652 1 a652 1 if (result == 0 && async && d709 1 a709 1 if (result == 0 && async && ptmp->uobject != uobj) d714 1 a714 1 * async I/O it is possible that the I/O op d719 1 a719 1 if (result != 0 || !async) { d738 1 a738 1 !(result == EIO && d761 1 a761 1 if (result == 0 && async) { d766 1 a766 1 if (result != 0) { d837 1 a837 1 *hoffset = MIN(offset + MAXBSIZE, round_page(uvn->u_size)); d858 1 a858 1 return error; d890 1 a890 1 return error; d944 7 a950 1 pg = uvm_pagealloc(uobj, offset, NULL, 0); d961 1 a961 5 if (UVM_OBJ_IS_VTEXT(uobj)) { uvmexp.vtextpages++; } else { uvmexp.vnodepages++; } a1021 1 voff_t pgend = round_page(newsize); d1033 2 a1034 2 if (uvn->u_size > pgend && uvn->u_size != VSIZENOTSET) { (void) uvn_flush(&uvn->u_obj, pgend, 0, PGO_FREE); @ 1.26.2.8 log @Make sure files that shouldn't change are identical to HEAD. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.48 2001/03/10 22:46:51 chs Exp $ */ d615 24 @ 1.25 log @Garbage collect thread_sleep()/thread_wakeup() left over from the old Mach VM code. Also nuke iprintf(), which was no longer used anywhere. Add proclist locking where appropriate. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.24 1999/07/22 21:27:32 thorpej Exp $ */ d703 1 a703 1 pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE); d978 1 a978 1 pmap_page_protect(PMAP_PGARG(pp), VM_PROT_NONE); d980 1 a980 1 pmap_is_modified(PMAP_PGARG(pp))) d1003 1 a1003 2 pmap_page_protect(PMAP_PGARG(pp), VM_PROT_NONE); d1012 1 a1012 2 pmap_page_protect(PMAP_PGARG(pp), VM_PROT_NONE); d1032 1 a1032 1 pmap_page_protect(PMAP_PGARG(pp), VM_PROT_READ); d1181 1 a1181 2 pmap_clear_modify( PMAP_PGARG(ptmp)); d1192 1 a1192 2 pmap_page_protect(PMAP_PGARG(ptmp), VM_PROT_NONE); d1212 1 a1212 2 pmap_page_protect(PMAP_PGARG(ptmp), VM_PROT_NONE); d1544 1 a1544 1 pmap_clear_modify(PMAP_PGARG(ptmp)); /* ... and clean */ @ 1.24 log @0 -> FALSE in a few places. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.23 1999/04/11 04:04:11 chs Exp $ */ d1166 1 a1166 1 thread_wakeup(ptmp); d1526 1 a1526 1 thread_wakeup(ptmp); @ 1.23 log @add a `flags' argument to uvm_pagealloc_strat(). define a flag UVM_PGA_USERESERVE to allow non-kernel object allocations to use pages from the reserve. use the new flag for allocations in pmap modules. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.22 1999/03/25 18:48:56 mrg Exp $ */ d1481 1 a1481 1 &uobj->vmobjlock, 0, "uvn_get",0); @ 1.22 log @remove now >1 year old pre-release message. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.21 1999/03/25 00:20:35 sommerfe Exp $ */ d1458 1 a1458 1 NULL); /* alloc */ @ 1.22.2.1 log @pull up 1.22 -> 1.23: add a `flags' argument to uvm_pagealloc_strat(). define a flag UVM_PGA_USERESERVE to allow non-kernel object allocations to use pages from the reserve. use the new flag for allocations in pmap modules. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.23 1999/04/11 04:04:11 chs Exp $ */ d1458 1 a1458 1 NULL, 0); @ 1.22.2.2 log @Pull up revision 1.28 (via patch, requested by chs): Remove a debug printf that has outlived its usefulness. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.22.2.1 1999/04/16 16:29:56 chs Exp $ */ d1640 3 @ 1.22.2.1.2.1 log @merge everything from chs-ubc branch. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.22.2.1 1999/04/16 16:29:56 chs Exp $ */ a49 1 #include "opt_ddb.h" a56 1 #include a63 1 #include d93 1 a93 1 static int uvn_asyncget __P((struct uvm_object *, vaddr_t, d95 17 a111 19 struct uvm_object * uvn_attach __P((void *, vm_prot_t)); static void uvn_cluster __P((struct uvm_object *, vaddr_t, vaddr_t *, vaddr_t *)); static void uvn_detach __P((struct uvm_object *)); static int uvn_findpage __P((struct uvm_object *, vaddr_t, struct vm_page **, int)); static boolean_t uvn_flush __P((struct uvm_object *, vaddr_t, vaddr_t, int)); static int uvn_get __P((struct uvm_object *, vaddr_t, vm_page_t *, int *, int, vm_prot_t, int, int)); static void uvn_init __P((void)); static int uvn_put __P((struct uvm_object *, vm_page_t *, int, boolean_t)); static void uvn_reference __P((struct uvm_object *)); static boolean_t uvn_releasepg __P((struct vm_page *, struct vm_page **)); static void uvn_doasyncget __P((struct vm_page **, size_t, daddr_t)); d176 1 a176 1 int result; d178 1 a178 1 off_t used_vnode_size; d202 1 a202 1 simple_unlock(&uvn->u_obj.vmobjlock); d207 32 a238 14 /* check for new writeable uvn */ if ((accessprot & VM_PROT_WRITE) != 0 && (uvn->u_flags & UVM_VNODE_WRITEABLE) == 0) { simple_lock(&uvn_wl_lock); LIST_INSERT_HEAD(&uvn_wlist, uvn, u_wlist); simple_unlock(&uvn_wl_lock); /* we are now on wlist! */ uvn->u_flags |= UVM_VNODE_WRITEABLE; } #ifdef DIAGNOSTIC if (vp->v_type != VREG) { panic("uvn_attach: vp %p not VREG", vp); } #endif d241 6 a246 2 * set up our idea of the size * if this hasn't been done already. a247 2 if (uvn->u_size == VSIZENOTSET) { d251 1 d274 11 d290 4 d302 10 a311 12 /* relock object */ simple_lock(&uvn->u_obj.vmobjlock); if (uvn->u_flags & UVM_VNODE_WANTED) wakeup(uvn); uvn->u_flags = 0; if (result != 0) { simple_unlock(&uvn->u_obj.vmobjlock); /* drop lock */ UVMHIST_LOG(maphist,"<- done (VOP_GETATTR FAILED!)", 0,0,0,0); return(NULL); } d314 6 d322 6 a327 1 /* unlock and return */ d329 5 a333 3 UVMHIST_LOG(maphist,"<- done, refcnt=%d", uvn->u_obj.uo_refs, 0, 0, 0); return (&uvn->u_obj); d353 3 d358 12 a369 1 VREF((struct vnode *)uobj); d385 3 d389 117 a505 1 vrele((struct vnode *)uobj); d541 121 a661 5 if (uvn->u_flags & UVM_VNODE_WRITEABLE) { simple_lock(&uvn_wl_lock); LIST_REMOVE(uvn, u_wlist); uvn->u_flags &= ~(UVM_VNODE_WRITEABLE); simple_unlock(&uvn_wl_lock); d663 7 a710 4 #if 1 /* XXX I'm sure we need to do something here. */ uvn = uvn; #else a736 1 #endif a840 1 struct vnode *vp = (struct vnode *)uobj; a848 15 if (uvn->u_size == VSIZENOTSET) { #ifdef DEBUG void vp_name(void *); printf("uvn_flush: size not set vp %p\n", uvn); if ((flags & PGO_ALLPAGES) == 0) printf("... and PGO_ALLPAGES not set: " "start 0x%lx end 0x%lx flags 0x%x\n", start, stop, flags); vprint("uvn_flush VSIZENOTSET", vp); vp_name(uvn); #endif flags |= PGO_ALLPAGES; } d858 1 a858 1 stop = -1; d863 3 a865 3 if (stop > round_page(uvn->u_size)) { printf("uvn_flush: oor vp %p start 0x%x stop 0x%x size 0x%x\n", uvn, (int)start, (int)stop, (int)round_page(uvn->u_size)); } d889 3 a891 5 for (pp = TAILQ_FIRST(&uobj->memq); pp != NULL ; pp = TAILQ_NEXT(pp, listq)) { if (pp->offset < start || (pp->offset >= stop && stop != -1)) d913 1 a913 1 pp = TAILQ_FIRST(&uobj->memq); d934 1 a934 1 ppnext = TAILQ_NEXT(pp, listq); d1042 1 a1042 1 flags | PGO_DOACTCLUST, start, stop); d1166 1 a1166 1 wakeup(ptmp); d1208 3 a1210 3 "offset=0x%lx. error %d\n", pp->uobject, pp->offset, result); d1235 1 d1237 4 a1240 11 /* * XXX this doesn't use the new two-flag scheme, * but to use that, all i/o initiators will have to change. */ while (vp->v_numoutput != 0) { vp->v_flag |= VBWAIT; UVM_UNLOCK_AND_WAIT(&vp->v_numoutput, &uvn->u_obj.vmobjlock, FALSE, "uvn_flush",0); d1243 3 a1269 2 UVMHIST_FUNC("uvn_cluster"); UVMHIST_CALLED(ubchist); d1273 1 a1273 8 { /* XXX nfs writes cause trouble with this */ *loffset = *hoffset = offset; UVMHIST_LOG(ubchist, "uvn_cluster: offset out of range: vp %p loffset 0x%x", uobj, (int)*loffset, 0,0); Debugger(); return; } d1302 1 a1302 3 int retval, sync; sync = (flags & PGO_SYNCIO) ? 1 : 0; d1305 1 a1305 6 simple_lock_assert(&uobj->vmobjlock, SLOCK_LOCKED); /* XXX why would the VOP need it locked? */ /* currently, just to increment vp->v_numoutput (aka uvn->u_nio) */ simple_unlock(&uobj->vmobjlock); retval = VOP_PUTPAGES((struct vnode *)uobj, pps, npages, sync, &retval); a1306 1 simple_lock_assert(&uobj->vmobjlock, SLOCK_UNLOCKED); d1332 17 a1348 2 struct vnode *vp = (struct vnode *)uobj; int error; d1350 39 a1388 7 simple_lock_assert(&uobj->vmobjlock, SLOCK_LOCKED); error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx, access_type, advice, flags); simple_lock_assert(&uobj->vmobjlock, flags & PGO_LOCKED ? SLOCK_LOCKED : SLOCK_UNLOCKED); return error ? VM_PAGER_ERROR : VM_PAGER_OK; } d1390 11 a1400 6 /* * uvn_findpages: * return the page for the uobj and offset requested, allocating if needed. * => uobj must be locked. * => returned page will be BUSY. */ d1402 4 a1405 9 void uvn_findpages(uobj, offset, npagesp, pps, flags) struct uvm_object *uobj; vaddr_t offset; int *npagesp; struct vm_page **pps; int flags; { int i, rv, npages; d1407 6 a1412 4 rv = 0; npages = *npagesp; for (i = 0; i < npages; i++, offset += PAGE_SIZE) { rv += uvn_findpage(uobj, offset, &pps[i], flags); a1413 2 *npagesp = rv; } d1415 8 d1424 8 a1431 10 static int uvn_findpage(uobj, offset, pps, flags) struct uvm_object *uobj; vaddr_t offset; struct vm_page **pps; int flags; { struct vm_page *ptmp; UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist); UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0); d1433 15 a1447 1 simple_lock_assert(&uobj->vmobjlock, SLOCK_LOCKED); d1449 4 a1452 9 if (*pps == PGO_DONTCARE) { UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0); return 0; } #ifdef DIAGNOTISTIC if (*pps != NULL) { panic("uvn_findpage: *pps not NULL"); } #endif d1454 1 a1454 11 for (;;) { /* look for an existing page */ ptmp = uvm_pagelookup(uobj, offset); /* nope? allocate one now */ if (ptmp == NULL) { if (flags & UFP_NOALLOC) { UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0); return 0; } ptmp = uvm_pagealloc(uobj, offset, NULL, 0); d1456 12 a1467 3 if (flags & UFP_NOWAIT) { UVMHIST_LOG(ubchist, "nowait",0,0,0,0); return 0; d1469 13 a1481 2 simple_unlock(&uobj->vmobjlock); uvm_wait("uvn_fp1"); d1483 1 a1483 1 continue; d1485 10 a1494 5 UVMHIST_LOG(ubchist, "alloced",0,0,0,0); break; } else if (flags & UFP_NOCACHE) { UVMHIST_LOG(ubchist, "nocache",0,0,0,0); return 0; d1497 63 a1559 26 /* page is there, see if we need to wait on it */ if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) { if (flags & UFP_NOWAIT) { UVMHIST_LOG(ubchist, "nowait",0,0,0,0); return 0; } ptmp->flags |= PG_WANTED; UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock, 0, "uvn_fp2",0); simple_lock(&uobj->vmobjlock); continue; } /* skip PG_RDONLY pages if requested */ if ((flags & UFP_NORDONLY) && (ptmp->flags & PG_RDONLY)) { UVMHIST_LOG(ubchist, "nordonly",0,0,0,0); return 0; } /* BUSY the page and we're done. */ ptmp->flags |= PG_BUSY; UVM_PAGE_OWN(ptmp, "uvn_findpage"); UVMHIST_LOG(ubchist, "found",0,0,0,0); break; } *pps = ptmp; return 1; d1584 227 d1815 78 d1925 1 d1927 4 a1930 4 /* * make sure that the newsize fits within a vaddr_t * XXX: need to revise addressing data types */ d1932 1 a1932 1 if (newsize > (vaddr_t) -PAGE_SIZE) { d1934 3 a1936 3 printf("uvm_vnp_setsize: vn %p size truncated " "%qx->%lx\n", vp, (long long)newsize, (vaddr_t)-PAGE_SIZE); d1938 13 a1950 1 newsize = (vaddr_t)-PAGE_SIZE; d1952 1 d1955 1 a1955 2 * now check if the size has changed: if we shrink we had better * toss some pages... d1957 1 a1957 7 if (uvn->u_size > newsize && uvn->u_size != VSIZENOTSET) { (void) uvn_flush(&uvn->u_obj, (vaddr_t)newsize, uvn->u_size, PGO_FREE); } uvn->u_size = (vaddr_t)newsize; simple_unlock(&uvn->u_obj.vmobjlock); d1993 2 a1994 2 for (uvn = LIST_FIRST(&uvn_wlist); uvn != NULL; uvn = LIST_NEXT(uvn, u_wlist)) { d2003 1 a2003 1 (uvn->u_flags & UVM_VNODE_BLOCKED) == 0) d2026 8 a2033 1 vget(vp, LK_INTERLOCK); d2050 5 d2056 1 a2056 1 PGO_CLEANIT|PGO_ALLPAGES|PGO_DOACTCLUST); a2065 1 simple_lock(&uvn_wl_lock); a2067 1 simple_unlock(&uvn_wl_lock); a2079 185 } /* * uvm_vnp_zerorange: set a range of bytes in a file to zero. * this is called from fs-specific code when truncating a file * to zero the part of last block that is past the new end-of-file. */ void uvm_vnp_zerorange(vp, off, len) struct vnode *vp; off_t off; size_t len; { void *win; /* * XXX invent kzero() and use it */ while (len) { vsize_t bytelen = len; win = ubc_alloc(&vp->v_uvm.u_obj, off, &bytelen, UBC_WRITE); memset(win, 0, bytelen); ubc_release(win, 0); off += bytelen; len -= bytelen; } } /* * uvn_doasyncget: start one readahead i/o. */ static void uvn_doasyncget(pgs, bytes, blkno) struct vm_page **pgs; size_t bytes; daddr_t blkno; { struct uvm_aiobuf *abp; struct buf *bp; struct vnode *vp = (struct vnode *)pgs[0]->uobject; int pages = roundup(bytes, PAGE_SIZE) >> PAGE_SHIFT; UVMHIST_FUNC("uvn_doasyncget"); UVMHIST_CALLED(ubchist); UVMHIST_LOG(ubchist, "vp %p offset 0x%x bytes 0x%x blkno 0x%x", vp, (int)pgs[0]->offset, (int)bytes, (int)blkno); abp = pool_get(uvm_aiobuf_pool, PR_WAITOK); abp->aio.aiodone = uvm_aio_aiodone; abp->aio.kva = uvm_pagermapin(pgs, pages, NULL, M_WAITOK); abp->aio.npages = pages; abp->aio.pd_ptr = abp; bp = &abp->buf; bzero(bp, sizeof *bp); bp->b_flags = B_BUSY|B_READ|B_CALL|B_ASYNC; bp->b_iodone = uvm_aio_biodone; bp->b_lblkno = 0; bp->b_blkno = blkno; bp->b_bufsize = pages << PAGE_SHIFT; bp->b_bcount = bytes; bp->b_vp = vp; bp->b_data = (void *)abp->aio.kva; VOP_STRATEGY(bp); } #define MAXRAPAGES 16 /* * asynchronously create pages for a vnode and read their data. */ void uvm_vnp_asyncget(vp, off, len, bsize) struct vnode *vp; off_t off; size_t len; size_t bsize; { off_t filesize = vp->v_uvm.u_size; struct vm_page *pgs[MAXRAPAGES]; struct uvm_object *uobj = &vp->v_uvm.u_obj; daddr_t lbn, blkno; int i, npages, npgs, startidx, run, bytes, startpage, endpage; int count; UVMHIST_FUNC("uvn_asyncget"); UVMHIST_CALLED(ubchist); if (off != trunc_page(off)) { panic("off 0x%x not page-aligned", (int)off); } UVMHIST_LOG(ubchist, "asyncget off 0x%x len 0x%x", (int)off, (int)len,0,0); count = round_page(len) >> PAGE_SHIFT; while (count > 0) { if (off >= filesize) { return; } lbn = off / bsize; if (VOP_BMAP(vp, lbn, NULL, &blkno, &run) != 0) { return; } UVMHIST_LOG(ubchist, "bmap lbn 0x%x bn 0x%x", (int)lbn, (int)blkno,0,0); /* don't do readahead past file holes... */ if (blkno == (daddr_t)-1) { return; } startpage = off >> PAGE_SHIFT; endpage = min(roundup(off + 1 + run * bsize, bsize), round_page(filesize)) >> PAGE_SHIFT; npages = min(endpage - startpage, min(count, MAXRAPAGES)); UVMHIST_LOG(ubchist, "off 0x%x run 0x%x " "startpage %d endpage %d", (int)off, run, startpage, endpage); UVMHIST_LOG(ubchist, "runend 0x%x fileend 0x%x sum 0x%x", (int)roundup(off + 1 + run * bsize, bsize), (int)round_page(filesize), (int)(off + 1 + run * bsize), 0); if (npages == 0) { return; } memset(pgs, 0, npages * sizeof(pgs[0])); simple_lock(&uobj->vmobjlock); npgs = npages; uvn_findpages(uobj, off, &npgs, pgs, UFP_NOWAIT | UFP_NOCACHE); simple_unlock(&uobj->vmobjlock); blkno += (off - lbn * bsize) >> DEV_BSHIFT; /* * activate any pages we just allocated. */ for (i = 0; i < npages; i++) { if (pgs[i] == NULL) { continue; } uvm_pageactivate(pgs[i]); } /* * start i/os on the pages. */ for (i = 0; i < npages; i++) { for (startidx = i; i < npages; i++) { if (pgs[i] == NULL) { break; } } if (i > startidx) { bytes = min((i - startidx) << PAGE_SHIFT, filesize - pgs[startidx]->offset); bytes = roundup(bytes, DEV_BSIZE); UVMHIST_LOG(ubchist, "bytes i %d startidx %d " "filesize 0x%x pgoff 0x%x", i, startidx, (int)filesize, (int)pgs[startidx]->offset); uvn_doasyncget(&pgs[startidx], bytes, blkno + startidx * (PAGE_SIZE >> DEV_BSHIFT)); } } off += npages << PAGE_SHIFT; count -= npages; return; } @ 1.22.2.1.2.2 log @remove UVM_VNODE_* flags in favor of V* vnode flags. rewrite uvm_vnp_zerorange(). it's still wrong, but it's closer. update stuff to use buf instead of uvm_aiobuf. uvm_vnp_asyncget() can now determine the blocksize from the vnode rather than needing it to be passed in. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.22.2.1.2.1 1999/06/07 04:25:38 chs Exp $ */ a66 1 #include d194 2 a195 2 while (uvn->u_flags & VXLOCK) { uvn->u_flags |= VXWANT; d214 1 a214 1 (uvn->u_flags & VDIRTY) == 0) { d219 1 a219 1 uvn->u_flags |= VDIRTY; d233 1 a233 1 uvn->u_flags = VXLOCK; d274 1 a274 1 if (uvn->u_flags & VXWANT) d366 1 a366 1 if (uvn->u_flags & VDIRTY) { d369 1 a369 1 uvn->u_flags &= ~(VDIRTY); d425 1 a425 1 if (uvn->u_flags & VDIRTY) { d434 1 a434 1 if (uvn->u_flags & VXWANT) a550 1 int s; d587 1 a587 3 printf("uvn_flush: oor vp %p start 0x%x stop 0x%x " "size 0x%x\n", uvn, (int)start, (int)stop, (int)round_page(uvn->u_size)); a966 1 s = splbio(); a973 1 splx(s); d997 2 a998 1 struct uvm_vnode *uvn = (struct uvm_vnode *)uobj; d1001 19 a1019 1 *hoffset = min(offset + MAXBSIZE, round_page(uvn->u_size)); d1029 2 d1043 1 d1045 3 d1050 1 a1078 2 UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist); UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0); d1322 1 a1322 1 (uvn->u_flags & VXLOCK) == 0) d1339 1 a1339 1 if (!got_lock || (uvn->u_flags & VXLOCK) != 0) { d1367 1 a1367 1 * then we can pull it out of the VDIRTY state d1371 2 a1372 1 if (uvn->u_obj.uo_refs == 1 && (uvn->u_flags & VDIRTY)) { d1375 1 a1375 1 uvn->u_flags &= ~VDIRTY; d1394 2 a1396 1 d1403 16 a1418 17 struct uvm_object *uobj = &vp->v_uvm.u_obj; off_t pagestart = trunc_page(off); off_t pageend = round_page(off + len); int npages = (pageend - pagestart) >> PAGE_SHIFT; struct vm_page *pgs[npages]; char *cp; memset(pgs, 0, sizeof(pgs)); simple_lock(&uobj->vmobjlock); uvn_findpages(uobj, (vaddr_t)pagestart, &npages, pgs, 0); simple_unlock(&uobj->vmobjlock); cp = (char *)uvm_pagermapin(pgs, npages, M_WAITOK); memset(cp + (off - pagestart), 0, len); uvm_pagermapout((vaddr_t)cp, npages); simple_lock(&uobj->vmobjlock); uvm_pager_dropcluster(uobj, NULL, pgs, &npages, 0, 0); simple_unlock(&uobj->vmobjlock); d1431 1 a1434 1 int s; d1440 8 a1447 4 s = splbio(); bp = pool_get(&bufpool, PR_WAITOK); splx(s); bp->b_data = (void *)uvm_pagermapin(pgs, pages, M_WAITOK); d1455 1 a1455 1 UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0); d1467 1 a1467 1 uvm_vnp_asyncget(vp, off, len) d1471 1 a1476 2 int bshift = vp->v_mount->mnt_fs_bshift; int dev_bshift = vp->v_mount->mnt_dev_bshift; d1494 1 a1494 1 lbn = off >> bshift; d1508 1 a1508 1 endpage = min(roundup(off + 1 + (run << bshift), 1 << bshift), d1516 1 a1516 2 (int)roundup(off + 1 + (run << bshift), (1 << bshift)), d1518 1 a1518 1 (int)(off + 1 + (run << bshift)), 0); d1531 1 a1531 1 blkno += (off - (lbn << bshift)) >> dev_bshift; d1557 1 a1557 1 bytes = roundup(bytes, 1 << dev_bshift); d1565 2 a1566 2 blkno + startidx * (PAGE_SIZE >> dev_bshift)); a1571 2 /* XXX for now, don't loop */ @ 1.22.2.1.2.3 log @remove uvm_vnp_uncache(), it's not needed anymore. use uvm_errno2vmerror(). put uvm_vnp_zerorange() back the way it was before, it was right the first time. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.22.2.1.2.2 1999/07/04 02:08:14 chs Exp $ */ d1012 1 d1024 1 a1024 2 struct vnode *vp = (struct vnode *)uobj; int error, sync; d1030 1 a1030 1 error = VOP_PUTPAGES(vp, pps, npages, sync, NULL); d1033 1 a1033 1 return uvm_errno2vmerror(error); d1054 1 a1054 1 int centeridx; a1055 1 int advice, flags; d1067 1 a1067 1 return uvm_errno2vmerror(error); d1197 7 d1383 17 a1399 16 void *win; /* * XXX invent kzero() and use it */ while (len) { vsize_t bytelen = len; win = ubc_alloc(&vp->v_uvm.u_obj, off, &bytelen, UBC_WRITE); memset(win, 0, bytelen); ubc_release(win, 0); off += bytelen; len -= bytelen; } @ 1.22.2.1.2.4 log @in uvn_findpage(), ignore any offsets where the return page pointer is non-NULL. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.22.2.1.2.3 1999/07/11 05:47:13 chs Exp $ */ a972 3 UVMHIST_LOG(ubchist, "waiting for vp %p num %d", vp, vp->v_numoutput,0,0); d1109 2 a1110 1 if (*pps != NULL) { d1114 6 @ 1.22.2.1.2.5 log @Update from trunk. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.22.2.1.2.4 1999/07/31 19:04:49 chs Exp $ */ d1031 1 d1034 1 d1066 1 d1069 2 d1111 1 @ 1.22.2.1.2.6 log @clean up some leftovers. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.22.2.1.2.5 1999/08/02 23:39:29 thorpej Exp $ */ a216 1 uvn->u_flags |= VDIRTY; d219 2 d234 1 a234 1 uvn->u_flags |= VXLOCK; d277 1 a277 1 uvn->u_flags &= ~(VXLOCK|VXWANT); d397 1 d414 31 @ 1.22.2.1.2.7 log @create a new type "voff_t" for uvm_object offsets and define it to be "off_t". also, remove pgo_asyncget(). @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.22.2.1.2.6 1999/08/06 12:47:28 chs Exp $ */ d97 5 a101 2 static void uvn_cluster __P((struct uvm_object *, voff_t, voff_t *, voff_t *)); d103 1 a103 1 static int uvn_findpage __P((struct uvm_object *, voff_t, d105 5 a109 4 static boolean_t uvn_flush __P((struct uvm_object *, voff_t, voff_t, int)); static int uvn_get __P((struct uvm_object *, voff_t, vm_page_t *, int *, int, vm_prot_t, int, int)); d111 2 a112 2 static int uvn_put __P((struct uvm_object *, vm_page_t *, int, boolean_t)); d130 1 d258 13 d512 1 a512 1 voff_t start, stop; d522 1 a522 1 voff_t curoff; d533 2 a534 2 "start 0x%llx end 0x%llx flags 0x%x\n", (long long)start, (long long)stop, flags); d904 2 a905 3 "offset=0x%llx. error %d\n", pp->uobject, (long long)pp->offset, d970 2 a971 2 voff_t offset; voff_t *loffset, *hoffset; /* OUT */ d1019 1 a1019 1 voff_t offset; d1046 1 a1046 1 voff_t offset; d1065 1 a1065 1 voff_t offset; d1134 22 d1176 1 a1176 1 voff_t newsize; d1186 14 d1205 2 a1206 1 (void) uvn_flush(&uvn->u_obj, newsize, uvn->u_size, PGO_FREE); d1208 1 a1208 1 uvn->u_size = newsize; @ 1.22.2.1.2.8 log @fix uvn_flush() to work now that vnode offsets are signed. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.22.2.1.2.7 1999/08/09 00:05:56 chs Exp $ */ d503 1 a503 1 boolean_t retval, need_iosync, by_list, needs_clean, all; d531 2 a532 1 all = TRUE; a542 1 all = FALSE; d568 2 a569 2 if (!all && (pp->offset < start || pp->offset >= stop)) d611 1 a611 2 if (!all && (pp->offset < start || pp->offset >= stop)) { @ 1.22.2.1.2.9 log @Rudimentary support for LFS under UBC: - LFS-specific VOP_BALLOC and VOP_PUTPAGES vnode ops. - getblk VREG panic #ifdef'd out (can be reinstated when Ifile is internalized and Ifile can be made another type from VREG) - interface to VOP_PUTPAGES changed to pass all pager flags, not just sync. FS putpages routines must know about the pager flags. - new LFS magic disk address, -2 ("unwritten"), meaning accounted for but not assigned to a fixed disk location (since LFS does these two things separately, and the previous accounting method using buffer headers no longer will work). Changed references to (foo == (daddr_t)-1) to (foo < 0). Since disk drivers reject all addresses < 0, this should not present a problem for other FSs. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.22.2.1.2.8 1999/08/11 05:40:13 chs Exp $ */ d978 3 a980 1 int error; d983 1 a983 1 error = VOP_PUTPAGES(vp, pps, npages, flags, NULL); @ 1.21 log @Prevent deadlock cited in PR4629 from crashing the system. (copyout and system call now just return EFAULT). A complete fix will presumably have to wait for UBC and/or for vnode locking protocols to be revamped to allow use of shared locks. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.20 1999/03/24 03:45:28 cgd Exp $ */ a2 4 /* * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE! * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<< */ @ 1.20 log @after discussion with chuck, nuke pgo_attach from uvm_pagerops @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.19 1999/03/04 06:48:54 chs Exp $ */ d1703 10 d1714 4 a1717 2 vn_lock(vn, LK_EXCLUSIVE | LK_RETRY); /* NOTE: vnode now locked! */ d1719 4 a1722 4 if (rw == UIO_READ) result = VOP_READ(vn, &uio, 0, curproc->p_ucred); else result = VOP_WRITE(vn, &uio, 0, curproc->p_ucred); d1724 4 a1727 2 if ((uvn->u_flags & UVM_VNODE_VNISLOCKED) == 0) VOP_UNLOCK(vn, 0); @ 1.19 log @fix printf arg types. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.18 1999/01/29 12:56:17 bouyer Exp $ */ a122 1 uvn_attach, @ 1.18 log @A small typo fix, + enclose "used_vnode_size = %qu" debug printf inside #ifdef DEBUG/#endif @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.17 1998/11/04 06:21:40 chs Exp $ */ d297 1 a297 1 printf("used_vnode_size = %qu\n", used_vnode_size); d302 1 a302 1 used_vnode_size, -PAGE_SIZE); d1926 2 a1927 1 "%qx->%lx\n", vp, newsize, (vaddr_t)-PAGE_SIZE); @ 1.17 log @we must unlock a vp's object's lock before calling vrele(). @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.16 1998/10/18 23:50:01 chs Exp $ */ d204 1 a204 1 * if we're maping a BLK device, make sure it is a disk. d295 4 a298 1 if (vp->v_type == VBLK) printf("used_vnode_size = %qu\n", used_vnode_size); @ 1.17.2.1 log @initial snapshot. lots left to do. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.17 1998/11/04 06:21:40 chs Exp $ */ a60 1 #include d109 2 d183 1 a183 1 off_t used_vnode_size; d204 1 a204 1 * if we're mapping a BLK device, make sure it is a disk. d207 1 a207 1 simple_unlock(&uvn->u_obj.vmobjlock); a211 92 #ifdef UBC oldflags = 0; #ifdef DIAGNOSTIC if (vp->v_type != VREG) { panic("uvn_attach: vp %p not VREG", vp); } #endif /* * set up our idea of the size * if this hasn't been done already. */ if (uvn->u_size == VSIZENOTSET) { uvn->u_flags = UVM_VNODE_ALOCK; simple_unlock(&uvn->u_obj.vmobjlock); /* drop lock in case we sleep */ /* XXX: curproc? */ if (vp->v_type == VBLK) { /* * We could implement this as a specfs getattr call, but: * * (1) VOP_GETATTR() would get the file system * vnode operation, not the specfs operation. * * (2) All we want is the size, anyhow. */ result = (*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&pi, FREAD, curproc); if (result == 0) { /* XXX should remember blocksize */ used_vnode_size = (u_quad_t)pi.disklab->d_secsize * (u_quad_t)pi.part->p_size; } } else { result = VOP_GETATTR(vp, &vattr, curproc->p_ucred, curproc); if (result == 0) used_vnode_size = vattr.va_size; } /* * make sure that the newsize fits within a vaddr_t * XXX: need to revise addressing data types */ if (used_vnode_size > (vaddr_t) -PAGE_SIZE) { #ifdef DEBUG printf("uvn_attach: vn %p size truncated %qx->%x\n", vp, used_vnode_size, -PAGE_SIZE); #endif used_vnode_size = (vaddr_t) -PAGE_SIZE; } /* relock object */ simple_lock(&uvn->u_obj.vmobjlock); if (uvn->u_flags & UVM_VNODE_WANTED) wakeup(uvn); uvn->u_flags = 0; if (result != 0) { simple_unlock(&uvn->u_obj.vmobjlock); /* drop lock */ UVMHIST_LOG(maphist,"<- done (VOP_GETATTR FAILED!)", 0,0,0,0); return(NULL); } uvn->u_size = used_vnode_size; } /* check for new writeable uvn */ if ((accessprot & VM_PROT_WRITE) != 0 && (uvn->u_flags & UVM_VNODE_WRITEABLE) == 0) { simple_lock(&uvn_wl_lock); if (uvn->u_wlist.le_next != NULL) { printf("already on wlist vp %p\n", uvn); Debugger(); } LIST_INSERT_HEAD(&uvn_wlist, uvn, u_wlist); simple_unlock(&uvn_wl_lock); /* we are now on wlist! */ uvn->u_flags |= UVM_VNODE_WRITEABLE; } /* unlock and return */ simple_unlock(&uvn->u_obj.vmobjlock); UVMHIST_LOG(maphist,"<- done, refcnt=%d", uvn->u_obj.uo_refs, 0, 0, 0); return (&uvn->u_obj); #else a335 1 #endif a354 2 #ifdef UBC #else a357 1 #endif a359 3 #ifdef UBC VREF((struct vnode *)uobj); #else a371 1 #endif a386 2 #ifdef UBC #else a389 1 #endif a391 3 #ifdef UBC vrele((struct vnode *)uobj); #else a485 1 XXXwlist a507 1 #endif a542 9 #ifdef UBC if (uvn->u_flags & UVM_VNODE_WRITEABLE) { simple_lock(&uvn_wl_lock); LIST_REMOVE(uvn, u_wlist); uvn->u_wlist.le_next = NULL; uvn->u_flags &= ~(UVM_VNODE_WRITEABLE); simple_unlock(&uvn_wl_lock); } #else d671 1 a671 1 #endif a712 4 #ifdef UBC /* XXX I'm sure we need to do something here. */ uvn = uvn; #else a738 1 #endif a850 21 #ifdef UBC if (uvn->u_size == VSIZENOTSET) { void vp_name(void *); printf("uvn_flush: size not set vp %p\n", uvn); if ((flags & PGO_ALLPAGES) == 0) printf("... and PGO_ALLPAGES not set: " "start 0x%lx end 0x%lx flags 0x%x\n", start, stop, flags); vp_name(uvn); flags |= PGO_ALLPAGES; } #if 0 /* XXX unfortunately this is legitimate */ if (flags & PGO_FREE && uobj->uo_refs) { printf("uvn_flush: PGO_FREE on ref'd vp %p\n", uobj); Debugger(); } #endif #endif a859 3 #ifdef UBC stop = -1; #else a860 1 #endif d865 3 a867 4 if (stop > round_page(uvn->u_size)) { printf("uvn_flush: out of range flush (fixed)\n"); printf(" vp %p stop 0x%x\n", uvn, (int)stop); } d891 3 a893 5 for (pp = TAILQ_FIRST(&uobj->memq); pp != NULL ; pp = TAILQ_NEXT(pp, listq)) { if (pp->offset < start || (pp->offset >= stop && stop != -1)) d915 1 a915 1 pp = TAILQ_FIRST(&uobj->memq); d936 1 a936 1 ppnext = TAILQ_NEXT(pp, listq); d1210 3 a1212 3 "offset=0x%lx. error %d\n", pp->uobject, pp->offset, result); a1235 6 #ifdef UBC /* * XXX currently not needed since all i/o is sync. * merge this with VBWAIT. */ #else a1248 1 #endif d1275 1 a1275 12 #ifdef UBC { /* XXX nfs writes cause trouble with this */ *loffset = *hoffset = offset; printf("uvn_cluster: offset out of range: vp %p loffset 0x%x\n", uobj, (int) *loffset); return; } #else panic("uvn_cluster: offset out of range: vp %p loffset 0x%x", uobj, (int) *loffset); #endif d1307 1 a1307 2 simple_lock_assert(&uobj->vmobjlock, 1); retval = VOP_PUTPAGES((struct vnode *)uobj, pps, npages, 1, &retval); a1308 1 simple_lock_assert(&uobj->vmobjlock, 0); d1334 17 a1350 2 struct vnode *vp = (struct vnode *)uobj; int error; d1352 1 a1352 4 simple_lock_assert(&uobj->vmobjlock, 1); error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx, access_type, advice, flags); simple_lock_assert(&uobj->vmobjlock, flags & PGO_LOCKED ? 1 : 0); d1354 10 a1363 2 return error ? VM_PAGER_ERROR : VM_PAGER_OK; } d1365 3 a1367 6 /* * uvn_findpage: * return the page for the uobj and offset requested, allocating if needed. * => uobj must be locked. * => returned page will be BUSY. */ d1369 2 a1370 8 void uvn_findpage(uobj, offset, pps) struct uvm_object *uobj; vaddr_t offset; struct vm_page **pps; { struct vm_page *ptmp; UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(maphist); d1372 6 a1377 11 for (;;) { /* look for a current page */ ptmp = uvm_pagelookup(uobj, offset); /* nope? allocate one now */ if (ptmp == NULL) { ptmp = uvm_pagealloc(uobj, offset, NULL); if (ptmp == NULL) { simple_unlock(&uobj->vmobjlock); uvm_wait("uvn_fp1"); simple_lock(&uobj->vmobjlock); d1382 2 a1383 1 * XXX for now, always zero new pages. d1385 40 a1424 1 pmap_zero_page(VM_PAGE_TO_PHYS(ptmp)); d1426 71 a1496 1 break; d1499 38 a1536 7 /* page is there, see if we need to wait on it */ if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) { ptmp->flags |= PG_WANTED; UVM_UNLOCK_AND_WAIT(ptmp, &uobj->vmobjlock, 0, "uvn_fp2",0); simple_lock(&uobj->vmobjlock); continue; d1538 24 a1561 7 /* BUSY the page and we're done. */ ptmp->flags |= PG_BUSY; UVM_PAGE_OWN(ptmp, "uvn_findpage"); break; } *pps = ptmp; d1587 177 a1802 2 #ifdef UBC #else d1880 1 a1880 1 #endif a1912 2 #ifdef UBC #else d1914 1 a1914 1 #endif a1932 4 #ifdef UBC if (uvn->u_size > newsize && uvn->u_size != VSIZENOTSET) { #else /* d1934 2 a1935 4 */ #endif (void)uvn_flush(&uvn->u_obj, (vaddr_t)newsize, uvn->u_size, PGO_FREE); a1936 2 #ifdef DEBUGxx printf("uvm_vnp_setsize: vp %p newsize 0x%x\n", vp, (int)newsize); a1937 3 #endif #ifdef UBC #else a1938 1 #endif d1940 5 d1990 1 a1990 1 (uvn->u_flags & UVM_VNODE_BLOCKED) == 0) a2016 3 #ifdef UBC /* XXX should be using a vref-like function here */ #else a2018 1 #endif a2036 2 #ifdef UBC #else a2041 1 #endif a2066 83 } /* * uvm_vnp_relocate: update pages' blknos */ int uvm_vnp_relocate(vp, off, len, blkno) struct vnode *vp; vaddr_t off; vsize_t len; daddr_t blkno; { int npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; struct vm_page *pgs[npages], *pg; int i, rv; printf("relocate: vp %p off 0x%lx npages 0x%x blkno 0x%x\n", vp, off, npages, blkno); #ifdef DIAGNOSTIC if (off & (PAGE_SIZE - 1)) { panic("uvm_vnp_relocate: vp %p bad off 0x%lx", vp, off); } #endif /* * get all the pages in the range, change their blknos. * XXX access_type? advice? */ bzero(pgs, sizeof pgs); again: simple_lock(&vp->v_uvm.u_obj.vmobjlock); rv = (vp->v_uvm.u_obj.pgops->pgo_get)(&vp->v_uvm.u_obj, off, pgs, &npages, 0, 0, 0, PGO_ALLPAGES); switch (rv) { case VM_PAGER_OK: break; #ifdef DIAGNOSTIC case VM_PAGER_PEND: panic("ubc_fault: pgo_get got PENDing on non-async I/O"); #endif case VM_PAGER_AGAIN: tsleep(&lbolt, PVM, "uvn_relocate", 0); goto again; default: return rv; } for (i = 0; i < npages; i++) { pg = pgs[i]; #ifdef DIAGNOSTIC if (pg == NULL) { panic("uvm_vnp_relocate: NULL pg"); } #endif pg->blkno = blkno; blkno += PAGE_SIZE >> DEV_BSHIFT; if (pg->flags & PG_WANTED) { wakeup(pg); } #ifdef DIAGNOSTIC if (pg->flags & PG_RELEASED) { panic("uvm_vnp_relocate: " "pgo_get gave us a RELEASED page"); } #endif pg->flags &= ~PG_BUSY; UVM_PAGE_OWN(pg, NULL); } return 0; @ 1.17.2.2 log @uvn_put() now unlocks the uobj before calling VOP_PUTPAGES(). move the important line of uvm_vnp_setsize() outside the debug ifdef. adjust other debugging code. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.17.2.1 1998/11/09 06:06:40 chs Exp $ */ a52 1 #include "opt_uvm.h" a78 4 #ifdef UBC UVMHIST_DECL(ubchist); #endif d1012 2 a1013 1 printf("uvn_flush: oor vp %p start 0x%x stop 0x%x size 0x%x\n", uvn, (int)start, (int)stop, (int)round_page(uvn->u_size)); a1427 2 UVMHIST_FUNC("uvn_cluster"); UVMHIST_CALLED(ubchist); d1435 2 a1436 3 UVMHIST_LOG(ubchist, "uvn_cluster: offset out of range: vp %p loffset 0x%x", uobj, (int)*loffset, 0,0); Debugger(); a1474 3 /* XXX why would the VOP need it locked? */ simple_unlock(&uobj->vmobjlock); d1777 1 a1778 1 uvn->u_size = (vaddr_t)newsize; @ 1.17.2.3 log @delete non-UBC parts of uvn_attach(), uvn_reference(), uvn_detach(), uvm_vnp_terminate(), uvm_vnp_uncache(). add uvn_findpages(), for looking-up/allocating multiple pages. allow async vnode pageouts. lock the writeable list when remove vnodes from it too. rename uvm_vnp_relocate() to uvm_vnp_setpageblknos() and expand its functionality to optionally zero the pages. add uvm_vnp_zerorange(), incomplete but does enough for the moment. use LIST_* macros and SLOCK_{,UN}LOCKED. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.17.2.2 1998/11/16 08:29:12 chs Exp $ */ d80 4 d103 1 a103 1 static int uvn_asyncget __P((struct uvm_object *, vaddr_t, d105 15 a119 17 struct uvm_object * uvn_attach __P((void *, vm_prot_t)); static void uvn_cluster __P((struct uvm_object *, vaddr_t, vaddr_t *, vaddr_t *)); static void uvn_detach __P((struct uvm_object *)); static int uvn_findpage __P((struct uvm_object *, vaddr_t, struct vm_page **, int)); static boolean_t uvn_flush __P((struct uvm_object *, vaddr_t, vaddr_t, int)); static int uvn_get __P((struct uvm_object *, vaddr_t, vm_page_t *, int *, int, vm_prot_t, int, int)); static void uvn_init __P((void)); static int uvn_put __P((struct uvm_object *, vm_page_t *, int, boolean_t)); static void uvn_reference __P((struct uvm_object *)); static boolean_t uvn_releasepg __P((struct vm_page *, struct vm_page **)); d216 1 d219 1 d286 128 a413 3 /* check for new writeable uvn */ if ((accessprot & VM_PROT_WRITE) != 0 && (uvn->u_flags & UVM_VNODE_WRITEABLE) == 0) { a415 1 uvn->u_flags |= UVM_VNODE_WRITEABLE; d417 1 a417 1 /* we are now on wlist! */ d420 6 a425 1 /* unlock and return */ d427 6 a432 3 UVMHIST_LOG(maphist,"<- done, refcnt=%d", uvn->u_obj.uo_refs, 0, 0, 0); return (&uvn->u_obj); d452 6 d460 1 d462 14 d491 6 d499 1 d501 119 d655 1 a655 1 d659 1 d663 130 d1197 1 a1197 1 flags | PGO_DOACTCLUST, start, stop); d1321 1 a1321 1 wakeup(ptmp); d1478 1 a1478 3 int retval, sync; sync = (flags & PGO_SYNCIO) ? 1 : 0; d1481 1 a1481 1 simple_lock_assert(&uobj->vmobjlock, SLOCK_LOCKED); a1483 1 /* currently, just to increment vp->v_numoutput (aka uvn->u_nio) */ d1485 1 a1485 1 retval = VOP_PUTPAGES((struct vnode *)uobj, pps, npages, sync, &retval); d1487 1 a1487 1 simple_lock_assert(&uobj->vmobjlock, SLOCK_UNLOCKED); d1516 1 a1516 1 simple_lock_assert(&uobj->vmobjlock, SLOCK_LOCKED); d1519 1 a1519 2 simple_lock_assert(&uobj->vmobjlock, flags & PGO_LOCKED ? SLOCK_LOCKED : SLOCK_UNLOCKED); d1525 1 a1525 1 * uvn_findpages: d1532 1 a1532 20 uvn_findpages(uobj, offset, npagesp, pps, flags) struct uvm_object *uobj; vaddr_t offset; int *npagesp; struct vm_page **pps; int flags; { int i, rv, npages; rv = 0; npages = *npagesp; for (i = 0; i < npages; i++, offset += PAGE_SIZE) { rv += uvn_findpage(uobj, offset, &pps[i], flags); } *npagesp = rv; } static int uvn_findpage(uobj, offset, pps, flags) a1535 1 int flags; d1538 1 a1538 14 UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist); UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0); simple_lock_assert(&uobj->vmobjlock, SLOCK_LOCKED); if (*pps == PGO_DONTCARE) { UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0); return 0; } #ifdef DIAGNOTISTIC if (*pps != NULL) { panic("uvn_findpage: *pps not NULL"); } #endif d1541 1 a1541 1 /* look for an existing page */ a1545 4 if (flags & UFP_NOALLOC) { UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0); return 0; } a1547 4 if (flags & UFP_NOWAIT) { UVMHIST_LOG(ubchist, "nowait",0,0,0,0); return 0; } d1553 6 a1558 1 UVMHIST_LOG(ubchist, "alloced",0,0,0,0); a1559 3 } else if (flags & UFP_NOCACHE) { UVMHIST_LOG(ubchist, "nocache",0,0,0,0); return 0; a1563 4 if (flags & UFP_NOWAIT) { UVMHIST_LOG(ubchist, "nowait",0,0,0,0); return 0; } a1573 1 UVMHIST_LOG(ubchist, "found",0,0,0,0); a1576 1 return 1; d1641 80 d1829 2 a1830 2 for (uvn = LIST_FIRST(&uvn_wlist); uvn != NULL; uvn = LIST_NEXT(uvn, u_wlist)) { a1908 1 simple_lock(&uvn_wl_lock); a1910 1 simple_unlock(&uvn_wl_lock); d1927 1 a1927 6 * uvm_vnp_setpageblknos: find pages and set their blknos. * this is used for two purposes: updating blknos in existing pages * when the data is relocated on disk, and preallocating pages when * those pages are about to be completely overwritten. * * => vp's uobj should not be locked, and is returned not locked. d1930 2 a1931 2 void uvm_vnp_setpageblknos(vp, off, len, blkno, ufp_flags, zero) d1933 2 a1934 1 off_t off, len; a1935 2 int ufp_flags; boolean_t zero; a1936 1 int i; d1938 40 a1977 2 struct vm_page *pgs[npages]; struct uvm_object *uobj = &vp->v_uvm.u_obj; a1978 3 memset(pgs, 0, npages); simple_lock(&uobj->vmobjlock); uvn_findpages(uobj, trunc_page(off), &npages, pgs, ufp_flags); d1980 5 a1984 2 if (pgs[i] == NULL) { continue; d1986 3 a1988 1 pgs[i]->blkno = blkno; d1990 9 a1998 2 if (zero) { uvm_pagezero(pgs[i]); d2000 3 a2003 22 uvm_pager_dropcluster(uobj, NULL, pgs, &npages, PGO_PDFREECLUST, 0); simple_unlock(&uobj->vmobjlock); } /* * uvm_vnp_zerorange: set a range of bytes in a file to zero. * this is called from fs-specific code when truncating a file * to zero the part of last block that is past the new end-of-file. */ void uvm_vnp_zerorange(vp, off, len) struct vnode *vp; off_t off; size_t len; { void *win; /* * XXX deal with multiple windows * XXX invent kzero() and use it */ d2005 1 a2005 3 win = ubc_alloc(&vp->v_uvm.u_obj, off, len, UBC_WRITE); memset(win + (off & (MAXBSIZE - 1)), 0, len); ubc_release(win, 0); @ 1.17.2.4 log @fix vnode reference-counting in uvm_vnp_sync(). @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.17.2.3 1999/02/25 04:44:38 chs Exp $ */ d577 1 a577 1 if ((flags & PGO_FREE) && uobj->uo_refs) { d1422 1 a1422 1 vget(vp, LK_INTERLOCK); d1426 1 a1428 1 #endif @ 1.17.2.5 log @temporarily make uvm_vnp_sync() use sync io. make uvm_vnp_zerorange() deal with ranges larger than 1 ubc window. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.17.2.4 1999/04/09 04:48:42 chs Exp $ */ a566 1 #ifdef DEBUG a572 1 #endif a1452 5 /* * XXX use PGO_SYNCIO for now to avoid problems with * uvmexp.paging. */ d1454 1 a1454 1 PGO_CLEANIT|PGO_ALLPAGES|PGO_DOACTCLUST|PGO_SYNCIO); d1537 1 d1541 3 a1543 8 while (len) { int byteoff = off & (MAXBSIZE - 1); int bytelen = min(len, MAXBSIZE - byteoff); win = ubc_alloc(&vp->v_uvm.u_obj, off, bytelen, UBC_WRITE); memset(win + (off & (MAXBSIZE - 1)), 0, bytelen); ubc_release(win, 0); } @ 1.17.2.6 log @fix uvn_flush() to actually wait for sync i/os. fix uvm_vnp_setpageblknos() to deal with big ranges. fix uvm_vnp_zerorange() to not be just totally wrong. also, use the new ubc_alloc() interface. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.17.2.5 1999/04/29 05:36:41 chs Exp $ */ a554 1 struct vnode *vp = (struct vnode *)uobj; d977 6 a985 14 #ifdef UBC /* * XXX this doesn't use the new two-flag scheme, * but to use that, all i/o initiators will have to change. */ while (vp->v_numoutput != 0) { vp->v_flag |= VBWAIT; UVM_UNLOCK_AND_WAIT(&vp->v_numoutput, &uvn->u_obj.vmobjlock, FALSE, "uvn_flush",0); simple_lock(&uvn->u_obj.vmobjlock); } #else d995 1 a996 1 } d1509 1 a1509 1 struct vm_page *pgs[16]; d1512 1 d1514 9 a1522 14 while (npages > 0) { int pages = min(npages, 16); memset(pgs, 0, pages); uvn_findpages(uobj, trunc_page(off), &pages, pgs, ufp_flags); for (i = 0; i < pages; i++) { if (pgs[i] == NULL) { continue; } pgs[i]->blkno = blkno; blkno += PAGE_SIZE >> DEV_BSHIFT; if (zero) { uvm_pagezero(pgs[i]); } a1523 5 uvm_pager_dropcluster(uobj, NULL, pgs, &pages, PGO_PDFREECLUST, 0); off += pages << PAGE_SHIFT; npages -= pages; d1525 1 d1548 2 a1549 1 vsize_t bytelen = len; d1551 2 a1552 2 win = ubc_alloc(&vp->v_uvm.u_obj, off, &bytelen, UBC_WRITE); memset(win, 0, bytelen); a1553 1 len -= bytelen; @ 1.17.2.7 log @add uvm_vnp_asyncget() and uvn_doasyncget() for doing readahead. in uvm_vnp_sync(), use an async uvn_flush() again. remove uvm_vnp_setpageblknos(). @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.17.2.6 1999/04/30 04:29:15 chs Exp $ */ a69 1 #include a117 2 static void uvn_doasyncget __P((struct vm_page **, size_t, daddr_t)); a573 1 vprint("uvn_flush VSIZENOTSET", vp); d1464 5 d1470 1 a1470 1 PGO_CLEANIT|PGO_ALLPAGES|PGO_DOACTCLUST); d1500 48 a1569 2 off += bytelen; a1570 155 } } /* * uvn_doasyncget: start one readahead i/o. */ static void uvn_doasyncget(pgs, bytes, blkno) struct vm_page **pgs; size_t bytes; daddr_t blkno; { struct uvm_aiobuf *abp; struct buf *bp; struct vnode *vp = (struct vnode *)pgs[0]->uobject; int pages = roundup(bytes, PAGE_SIZE) >> PAGE_SHIFT; UVMHIST_FUNC("uvn_doasyncget"); UVMHIST_CALLED(ubchist); UVMHIST_LOG(ubchist, "vp %p offset 0x%x bytes 0x%x blkno 0x%x", vp, (int)pgs[0]->offset, (int)bytes, (int)blkno); abp = pool_get(uvm_aiobuf_pool, PR_WAITOK); abp->aio.aiodone = uvm_aio_aiodone; abp->aio.kva = uvm_pagermapin(pgs, pages, NULL, M_WAITOK); abp->aio.npages = pages; abp->aio.pd_ptr = abp; bp = &abp->buf; bzero(bp, sizeof *bp); bp->b_flags = B_BUSY|B_READ|B_CALL|B_ASYNC; bp->b_iodone = uvm_aio_biodone; bp->b_lblkno = 0; bp->b_blkno = blkno; bp->b_bufsize = pages << PAGE_SHIFT; bp->b_bcount = bytes; bp->b_vp = vp; bp->b_data = (void *)abp->aio.kva; VOP_STRATEGY(bp); } #define MAXRAPAGES 16 /* * asynchronously create pages for a vnode and read their data. */ void uvm_vnp_asyncget(vp, off, len, bsize) struct vnode *vp; off_t off; size_t len; size_t bsize; { off_t filesize = vp->v_uvm.u_size; struct vm_page *pgs[MAXRAPAGES]; struct uvm_object *uobj = &vp->v_uvm.u_obj; daddr_t lbn, blkno; int i, npages, npgs, startidx, run, bytes, startpage, endpage; int count; UVMHIST_FUNC("uvn_asyncget"); UVMHIST_CALLED(ubchist); if (off != trunc_page(off)) { panic("off 0x%x not page-aligned", (int)off); } UVMHIST_LOG(ubchist, "asyncget off 0x%x len 0x%x", (int)off, (int)len,0,0); count = round_page(len) >> PAGE_SHIFT; while (count > 0) { if (off >= filesize) { return; } lbn = off / bsize; if (VOP_BMAP(vp, lbn, NULL, &blkno, &run) != 0) { return; } UVMHIST_LOG(ubchist, "bmap lbn 0x%x bn 0x%x", (int)lbn, (int)blkno,0,0); /* don't do readahead past file holes... */ if (blkno == (daddr_t)-1) { return; } startpage = off >> PAGE_SHIFT; endpage = min(roundup(off + 1 + run * bsize, bsize), round_page(filesize)) >> PAGE_SHIFT; npages = min(endpage - startpage, min(count, MAXRAPAGES)); UVMHIST_LOG(ubchist, "off 0x%x run 0x%x " "startpage %d endpage %d", (int)off, run, startpage, endpage); UVMHIST_LOG(ubchist, "runend 0x%x fileend 0x%x sum 0x%x", (int)roundup(off + 1 + run * bsize, bsize), (int)round_page(filesize), (int)(off + 1 + run * bsize), 0); if (npages == 0) { return; } memset(pgs, 0, npages * sizeof(pgs[0])); simple_lock(&uobj->vmobjlock); npgs = npages; uvn_findpages(uobj, off, &npgs, pgs, UFP_NOWAIT | UFP_NOCACHE); simple_unlock(&uobj->vmobjlock); blkno += (off - lbn * bsize) >> DEV_BSHIFT; /* * activate any pages we just allocated. */ for (i = 0; i < npages; i++) { if (pgs[i] == NULL) { continue; } uvm_pageactivate(pgs[i]); } /* * start i/os on the pages. */ for (i = 0; i < npages; i++) { for (startidx = i; i < npages; i++) { if (pgs[i] == NULL) { break; } } if (i > startidx) { bytes = min((i - startidx) << PAGE_SHIFT, filesize - pgs[startidx]->offset); bytes = roundup(bytes, DEV_BSIZE); UVMHIST_LOG(ubchist, "bytes i %d startidx %d " "filesize 0x%x pgoff 0x%x", i, startidx, (int)filesize, (int)pgs[startidx]->offset); uvn_doasyncget(&pgs[startidx], bytes, blkno + startidx * (PAGE_SIZE >> DEV_BSHIFT)); } } off += npages << PAGE_SHIFT; count -= npages; return; @ 1.17.2.8 log @implement UFP_NORDONLY. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.17.2.7 1999/05/30 15:41:44 chs Exp $ */ a1218 6 /* skip PG_RDONLY pages if requested */ if ((flags & UFP_NORDONLY) && (ptmp->flags & PG_RDONLY)) { UVMHIST_LOG(ubchist, "nordonly",0,0,0,0); return 0; } @ 1.16 log @shift by PAGE_SHIFT instead of multiplying or dividing by PAGE_SIZE. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.15 1998/08/13 02:11:04 eeh Exp $ */ d423 1 a424 1 simple_unlock(&uobj->vmobjlock); @ 1.15 log @Merge paddr_t changes into the main branch. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.14 1998/08/09 22:36:39 perry Exp $ */ d844 1 a844 1 struct vm_page *pps[MAXBSIZE/PAGE_SIZE], **ppsp; d870 1 a870 1 ((stop - start) / PAGE_SIZE) * UVN_HASH_PENALTY); d1683 1 a1683 1 wanted = npages * PAGE_SIZE; d1728 2 a1729 1 memset((void *) (kva + got), 0, (PAGE_SIZE * npages) - got); @ 1.14 log @bzero->memset, bcopy->memcpy, bcmp->memcmp @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.13 1998/07/07 23:22:13 thorpej Exp $ */ d97 1 a97 1 static int uvn_asyncget __P((struct uvm_object *, vm_offset_t, d100 2 a101 2 static void uvn_cluster __P((struct uvm_object *, vm_offset_t, vm_offset_t *, vm_offset_t *)); d103 3 a105 3 static boolean_t uvn_flush __P((struct uvm_object *, vm_offset_t, vm_offset_t, int)); static int uvn_get __P((struct uvm_object *, vm_offset_t, d292 1 a292 1 * make sure that the newsize fits within a vm_offset_t d296 1 a296 1 if (used_vnode_size > (vm_offset_t) -PAGE_SIZE) { d301 1 a301 1 used_vnode_size = (vm_offset_t) -PAGE_SIZE; d839 1 a839 1 vm_offset_t start, stop; d847 1 a847 1 vm_offset_t curoff; d1268 2 a1269 2 vm_offset_t offset; vm_offset_t *loffset, *hoffset; /* OUT */ d1328 1 a1328 1 vm_offset_t offset; d1334 1 a1334 1 vm_offset_t current_offset; d1575 1 a1575 1 vm_offset_t offset; d1605 1 a1605 1 vm_offset_t kva, file_offset; d1915 1 a1915 1 * make sure that the newsize fits within a vm_offset_t d1919 1 a1919 1 if (newsize > (vm_offset_t) -PAGE_SIZE) { d1922 1 a1922 1 "%qx->%lx\n", vp, newsize, (vm_offset_t)-PAGE_SIZE); d1924 1 a1924 1 newsize = (vm_offset_t)-PAGE_SIZE; d1933 1 a1933 1 (void)uvn_flush(&uvn->u_obj, (vm_offset_t) newsize, d1936 1 a1936 1 uvn->u_size = (vm_offset_t)newsize; @ 1.13 log @Add support for mmap'ing disk block devices. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.12 1998/06/24 20:58:49 sommerfe Exp $ */ d1728 1 a1728 1 bzero((void *) (kva + got), (PAGE_SIZE * npages) - got); @ 1.13.2.1 log @Split vm_offset_t and vm_size_t into paddr_t, psize_t, vaddr_t, and vsize_t. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.13 1998/07/07 23:22:13 thorpej Exp $ */ d97 1 a97 1 static int uvn_asyncget __P((struct uvm_object *, vaddr_t, d100 2 a101 2 static void uvn_cluster __P((struct uvm_object *, vaddr_t, vaddr_t *, vaddr_t *)); d103 3 a105 3 static boolean_t uvn_flush __P((struct uvm_object *, vaddr_t, vaddr_t, int)); static int uvn_get __P((struct uvm_object *, vaddr_t, d292 1 a292 1 * make sure that the newsize fits within a vaddr_t d296 1 a296 1 if (used_vnode_size > (vaddr_t) -PAGE_SIZE) { d301 1 a301 1 used_vnode_size = (vaddr_t) -PAGE_SIZE; d839 1 a839 1 vaddr_t start, stop; d847 1 a847 1 vaddr_t curoff; d1268 2 a1269 2 vaddr_t offset; vaddr_t *loffset, *hoffset; /* OUT */ d1328 1 a1328 1 vaddr_t offset; d1334 1 a1334 1 vaddr_t current_offset; d1575 1 a1575 1 vaddr_t offset; d1605 1 a1605 1 vaddr_t kva, file_offset; d1915 1 a1915 1 * make sure that the newsize fits within a vaddr_t d1919 1 a1919 1 if (newsize > (vaddr_t) -PAGE_SIZE) { d1922 1 a1922 1 "%qx->%lx\n", vp, newsize, (vaddr_t)-PAGE_SIZE); d1924 1 a1924 1 newsize = (vaddr_t)-PAGE_SIZE; d1933 1 a1933 1 (void)uvn_flush(&uvn->u_obj, (vaddr_t) newsize, d1936 1 a1936 1 uvn->u_size = (vaddr_t)newsize; @ 1.12 log @Always include fifos; "not an option any more". @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.11 1998/06/22 22:01:12 sommerfe Exp $ */ d64 6 d182 1 d188 2 d204 9 a255 1 result = VOP_GETATTR(vp, &vattr, curproc->p_ucred, curproc); d257 20 a276 11 /* * make sure that the newsize fits within a vm_offset_t * XXX: need to revise addressing data types */ used_vnode_size = vattr.va_size; if (used_vnode_size > (vm_offset_t) -PAGE_SIZE) { #ifdef DEBUG printf("uvn_attach: vn %p size truncated %qx->%x\n", vp, used_vnode_size, -PAGE_SIZE); #endif used_vnode_size = (vm_offset_t) -PAGE_SIZE; d290 14 a303 1 @ 1.11 log @defopt for options FIFO @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.10 1998/05/05 20:51:07 kleink Exp $ */ a53 1 #include "opt_fifo.h" a1806 1 #ifdef FIFO a1807 1 #endif /* FIFO */ a1813 1 #ifdef FIFO a1816 1 #endif /* FIFO */ @ 1.10 log @Remove inclusions of syscall (and syscall argument) related header files; we don't need them here. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.9 1998/03/11 01:37:40 chuck Exp $ */ d54 1 @ 1.9 log @bug fix: when doing uvm_vnp_sync() actually skip over blocked uvn's so that we don't try and sync them later. should get rid of the "uvm_vnp_sync: dying vnode on sync list" related warnings that were occuring during a "make install." @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.8 1998/03/09 00:58:59 mrg Exp $ */ a60 1 #include a67 2 #include @ 1.8 log @KNF. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.7 1998/03/01 02:25:29 fvdl Exp $ */ d1956 2 a1957 1 FALSE && (uvn->u_flags & UVM_VNODE_BLOCKED) == 0) d1961 9 a1969 5 * we will exit the loop if we were unable to get the lock and * we detected that the vnode was "blocked" ... if it is * blocked then it must be a dying vnode. since dying vnodes * are in the process of being flushed out we can safely skip * it. d1974 5 a1978 2 if (!got_lock) continue; @ 1.7 log @Merge with Lite2 + local changes @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.6 1998/02/19 00:55:04 thorpej Exp $ */ d119 14 a132 14 uvn_init, uvn_attach, uvn_reference, uvn_detach, NULL, /* no specialized fault routine required */ uvn_flush, uvn_get, uvn_asyncget, uvn_put, uvn_cluster, uvm_mk_pcluster, /* use generic version of this: see uvm_pager.c */ uvm_shareprot, /* !NULL: allow us in share maps */ NULL, /* AIO-DONE function (not until we have asyncio) */ uvn_releasepg, d145 3 a147 1 static void uvn_init() d149 4 a152 5 { LIST_INIT(&uvn_wlist); simple_lock_init(&uvn_wl_lock); /* note: uvn_sync_q init'd in uvm_vnp_sync() */ lockinit(&uvn_sync_lock, PVM, "uvnsync", 0, 0); d170 26 a195 1 struct uvm_object *uvn_attach(arg, accessprot) d197 7 a203 2 void *arg; vm_prot_t accessprot; d205 44 a248 71 { struct vnode *vp = arg; struct uvm_vnode *uvn = &vp->v_uvm; struct vattr vattr; int oldflags, result; u_quad_t used_vnode_size; UVMHIST_FUNC("uvn_attach"); UVMHIST_CALLED(maphist); UVMHIST_LOG(maphist, "(vn=0x%x)", arg,0,0,0); /* * first get a lock on the uvn. */ simple_lock(&uvn->u_obj.vmobjlock); while (uvn->u_flags & UVM_VNODE_BLOCKED) { uvn->u_flags |= UVM_VNODE_WANTED; UVMHIST_LOG(maphist, " SLEEPING on blocked vn",0,0,0,0); UVM_UNLOCK_AND_WAIT(uvn, &uvn->u_obj.vmobjlock, FALSE, "uvn_attach",0); simple_lock(&uvn->u_obj.vmobjlock); UVMHIST_LOG(maphist," WOKE UP",0,0,0,0); } /* * now we have lock and uvn must not be in a blocked state. * first check to see if it is already active, in which case * we can bump the reference count, check to see if we need to * add it to the writeable list, and then return. */ if (uvn->u_flags & UVM_VNODE_VALID) { /* already active? */ /* regain VREF if we were persisting */ if (uvn->u_obj.uo_refs == 0) { VREF(vp); UVMHIST_LOG(maphist," VREF (reclaim persisting vnode)", 0,0,0,0); } uvn->u_obj.uo_refs++; /* bump uvn ref! */ /* check for new writeable uvn */ if ((accessprot & VM_PROT_WRITE) != 0 && (uvn->u_flags & UVM_VNODE_WRITEABLE) == 0) { simple_lock(&uvn_wl_lock); LIST_INSERT_HEAD(&uvn_wlist, uvn, u_wlist); simple_unlock(&uvn_wl_lock); uvn->u_flags |= UVM_VNODE_WRITEABLE; /* we are now on wlist! */ } /* unlock and return */ simple_unlock(&uvn->u_obj.vmobjlock); UVMHIST_LOG(maphist,"<- done, refcnt=%d", uvn->u_obj.uo_refs,0,0,0); return(&uvn->u_obj); } /* * need to call VOP_GETATTR() to get the attributes, but that could * block (due to I/O), so we want to unlock the object before calling. * however, we want to keep anyone else from playing with the object * while it is unlocked. to do this we set UVM_VNODE_ALOCK which * prevents anyone from attaching to the vnode until we are done with * it. */ uvn->u_flags = UVM_VNODE_ALOCK; simple_unlock(&uvn->u_obj.vmobjlock); /* drop lock in case we sleep */ /* XXX: curproc? */ result = VOP_GETATTR(vp, &vattr, curproc->p_ucred, curproc); /* * make sure that the newsize fits within a vm_offset_t * XXX: need to revise addressing data types */ used_vnode_size = vattr.va_size; if (used_vnode_size > (vm_offset_t) -PAGE_SIZE) { d250 2 a251 2 printf("uvn_attach: vn %p size truncated %qx->%x\n", vp, used_vnode_size, -PAGE_SIZE); d253 2 a254 2 used_vnode_size = (vm_offset_t) -PAGE_SIZE; } d256 2 a257 2 /* relock object */ simple_lock(&uvn->u_obj.vmobjlock); d259 38 a296 38 if (result != 0) { if (uvn->u_flags & UVM_VNODE_WANTED) wakeup(uvn); uvn->u_flags = 0; simple_unlock(&uvn->u_obj.vmobjlock); /* drop lock */ UVMHIST_LOG(maphist,"<- done (VOP_GETATTR FAILED!)", 0,0,0,0); return(NULL); } /* * now set up the uvn. */ uvn->u_obj.pgops = &uvm_vnodeops; TAILQ_INIT(&uvn->u_obj.memq); uvn->u_obj.uo_npages = 0; uvn->u_obj.uo_refs = 1; /* just us... */ oldflags = uvn->u_flags; uvn->u_flags = UVM_VNODE_VALID|UVM_VNODE_CANPERSIST; uvn->u_nio = 0; uvn->u_size = used_vnode_size; /* if write access, we need to add it to the wlist */ if (accessprot & VM_PROT_WRITE) { simple_lock(&uvn_wl_lock); LIST_INSERT_HEAD(&uvn_wlist, uvn, u_wlist); simple_unlock(&uvn_wl_lock); uvn->u_flags |= UVM_VNODE_WRITEABLE; /* we are on wlist! */ } /* * add a reference to the vnode. this reference will stay as long * as there is a valid mapping of the vnode. dropped when the reference * count goes to zero [and we either free or persist]. */ VREF(vp); simple_unlock(&uvn->u_obj.vmobjlock); if (oldflags & UVM_VNODE_WANTED) wakeup(uvn); d298 2 a299 2 UVMHIST_LOG(maphist,"<- done/VREF, ret 0x%x", &uvn->u_obj,0,0,0); return(&uvn->u_obj); d315 3 a317 4 static void uvn_reference(uobj) struct uvm_object *uobj; d320 1 a320 1 struct uvm_vnode *uvn = (struct uvm_vnode *) uobj; d322 1 a322 1 UVMHIST_FUNC("uvn_reference"); UVMHIST_CALLED(maphist); d324 1 a324 1 simple_lock(&uobj->vmobjlock); d326 5 a330 4 if ((uvn->u_flags & UVM_VNODE_VALID) == 0) { printf("uvn_reference: ref=%d, flags=0x%x\n", uvn->u_flags, uobj->uo_refs); panic("uvn_reference: invalid state"); } d332 2 a333 2 uobj->uo_refs++; UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)", d335 1 a335 1 simple_unlock(&uobj->vmobjlock); d347 18 d366 44 a409 1 static void uvn_detach(uobj) d411 1 a411 1 struct uvm_object *uobj; d413 13 a425 75 { struct uvm_vnode *uvn; struct vnode *vp; int oldflags; UVMHIST_FUNC("uvn_detach"); UVMHIST_CALLED(maphist); simple_lock(&uobj->vmobjlock); UVMHIST_LOG(maphist," (uobj=0x%x) ref=%d", uobj,uobj->uo_refs,0,0); uobj->uo_refs--; /* drop ref! */ if (uobj->uo_refs) { /* still more refs */ simple_unlock(&uobj->vmobjlock); UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0); return; } /* * get other pointers ... */ uvn = (struct uvm_vnode *) uobj; vp = (struct vnode *) uobj; /* * clear VTEXT flag now that there are no mappings left (VTEXT is used * to keep an active text file from being overwritten). */ vp->v_flag &= ~VTEXT; /* * we just dropped the last reference to the uvn. see if we can * let it "stick around". */ if (uvn->u_flags & UVM_VNODE_CANPERSIST) { uvn_flush(uobj, 0, 0, PGO_DEACTIVATE|PGO_ALLPAGES); /* won't block */ vrele(vp); /* drop vnode reference */ simple_unlock(&uobj->vmobjlock); UVMHIST_LOG(maphist,"<- done/vrele! (persist)", 0,0,0,0); return; } /* * its a goner! */ UVMHIST_LOG(maphist," its a goner (flushing)!", 0,0,0,0); uvn->u_flags |= UVM_VNODE_DYING; /* * even though we may unlock in flush, no one can gain a reference * to us until we clear the "dying" flag [because it blocks * attaches]. we will not do that until after we've disposed of all * the pages with uvn_flush(). note that before the flush the only * pages that could be marked PG_BUSY are ones that are in async * pageout by the daemon. (there can't be any pending "get"'s * because there are no references to the object). */ (void) uvn_flush(uobj, 0, 0, PGO_CLEANIT|PGO_FREE|PGO_ALLPAGES); UVMHIST_LOG(maphist," its a goner (done flush)!", 0,0,0,0); /* * given the structure of this pager, the above flush request will * create the following state: all the pages that were in the object * have either been free'd or they are marked PG_BUSY|PG_RELEASED. * the PG_BUSY bit was set either by us or the daemon for async I/O. * in either case, if we have pages left we can't kill the object * yet because i/o is pending. in this case we set the "relkill" * flag which will cause pgo_releasepg to kill the object once all * the I/O's are done [pgo_releasepg will be called from the aiodone * routine or from the page daemon]. */ d427 1 a427 1 if (uobj->uo_npages) { /* I/O pending. iodone will free */ d429 6 a434 6 /* * XXXCDC: very unlikely to happen until we have async i/o so print * a little info message in case it does. */ printf("uvn_detach: vn %p has pages left after flush - relkill mode\n", uobj); d436 16 a451 15 uvn->u_flags |= UVM_VNODE_RELKILL; simple_unlock(&uobj->vmobjlock); UVMHIST_LOG(maphist,"<- done! (releasepg will kill obj)", 0,0,0,0); return; } /* * kill object now. note that we can't be on the sync q because * all references are gone. */ if (uvn->u_flags & UVM_VNODE_WRITEABLE) { simple_lock(&uvn_wl_lock); /* protect uvn_wlist */ LIST_REMOVE(uvn, u_wlist); simple_unlock(&uvn_wl_lock); } d453 3 a455 2 if (uobj->memq.tqh_first != NULL) panic("uvn_deref: vnode VM object still has pages afer syncio/free flush"); d457 13 a469 13 oldflags = uvn->u_flags; uvn->u_flags = 0; simple_unlock(&uobj->vmobjlock); /* wake up any sleepers */ if (oldflags & UVM_VNODE_WANTED) wakeup(uvn); /* * drop our reference to the vnode. */ vrele(vp); UVMHIST_LOG(maphist,"<- done (vrele) final", 0,0,0,0); d471 1 a471 1 return; d502 7 a508 1 void uvm_vnp_terminate(vp) d510 11 a520 1 struct vnode *vp; d522 6 a527 23 { struct uvm_vnode *uvn = &vp->v_uvm; int oldflags; UVMHIST_FUNC("uvm_vnp_terminate"); UVMHIST_CALLED(maphist); /* * lock object and check if it is valid */ simple_lock(&uvn->u_obj.vmobjlock); UVMHIST_LOG(maphist, " vp=0x%x, ref=%d, flag=0x%x", vp, uvn->u_obj.uo_refs, uvn->u_flags, 0); if ((uvn->u_flags & UVM_VNODE_VALID) == 0) { simple_unlock(&uvn->u_obj.vmobjlock); UVMHIST_LOG(maphist, "<- done (not active)", 0, 0, 0, 0); return; } /* * must be a valid uvn that is not already dying (because XLOCK * protects us from that). the uvn can't in the the ALOCK state * because it is valid, and uvn's that are in the ALOCK state haven't * been marked valid yet. */ d530 7 a536 7 /* * debug check: are we yanking the vnode out from under our uvn? */ if (uvn->u_obj.uo_refs) { printf("uvm_vnp_terminate(%p): terminating active vnode (refs=%d)\n", uvn, uvn->u_obj.uo_refs); } d538 34 a571 34 /* * it is possible that the uvn was detached and is in the relkill * state [i.e. waiting for async i/o to finish so that releasepg can * kill object]. we take over the vnode now and cancel the relkill. * we want to know when the i/o is done so we can recycle right * away. note that a uvn can only be in the RELKILL state if it * has a zero reference count. */ if (uvn->u_flags & UVM_VNODE_RELKILL) uvn->u_flags &= ~UVM_VNODE_RELKILL; /* cancel RELKILL */ /* * block the uvn by setting the dying flag, and then flush the * pages. (note that flush may unlock object while doing I/O, but * it will re-lock it before it returns control here). * * also, note that we tell I/O that we are already VOP_LOCK'd so * that uvn_io doesn't attempt to VOP_LOCK again. * * XXXCDC: setting VNISLOCKED on an active uvn which is being terminated * due to a forceful unmount might not be a good idea. maybe we need * a way to pass in this info to uvn_flush through a pager-defined * PGO_ constant [currently there are none]. */ uvn->u_flags |= UVM_VNODE_DYING|UVM_VNODE_VNISLOCKED; (void) uvn_flush(&uvn->u_obj, 0, 0, PGO_CLEANIT|PGO_FREE|PGO_ALLPAGES); /* * as we just did a flush we expect all the pages to be gone or in * the process of going. sleep to wait for the rest to go [via iosync]. */ d573 1 a573 1 while (uvn->u_obj.uo_npages) { d575 13 a587 13 struct vm_page *pp; for (pp = uvn->u_obj.memq.tqh_first ; pp != NULL ; pp = pp->listq.tqe_next) { if ((pp->flags & PG_BUSY) == 0) panic("uvm_vnp_terminate: detected unbusy page"); } if (uvn->u_nio == 0) panic("uvm_vnp_terminate: no I/O to wait for?"); printf("uvm_vnp_terminate: waiting for I/O to fin.\n"); /* * XXXCDC: this is unlikely to happen without async i/o so we * put a printf in just to keep an eye on it. */ d589 12 a600 20 uvn->u_flags |= UVM_VNODE_IOSYNC; UVM_UNLOCK_AND_WAIT(&uvn->u_nio, &uvn->u_obj.vmobjlock, FALSE, "uvn_term",0); simple_lock(&uvn->u_obj.vmobjlock); } /* * done. now we free the uvn if its reference count is zero * (true if we are zapping a persisting uvn). however, if we are * terminating a uvn with active mappings we let it live ... future * calls down to the vnode layer will fail. */ oldflags = uvn->u_flags; if (uvn->u_obj.uo_refs) { /* * uvn must live on it is dead-vnode state until all references * are gone. restore flags. clear CANPERSIST state. */ d602 9 a610 1 uvn->u_flags &= ~(UVM_VNODE_DYING|UVM_VNODE_VNISLOCKED| d612 2 a613 2 } else { d615 14 a628 14 /* * free the uvn now. note that the VREF reference is already gone * [it is dropped when we enter the persist state]. */ if (uvn->u_flags & UVM_VNODE_IOSYNCWANTED) panic("uvm_vnp_terminate: io sync wanted bit set"); if (uvn->u_flags & UVM_VNODE_WRITEABLE) { simple_lock(&uvn_wl_lock); LIST_REMOVE(uvn, u_wlist); simple_unlock(&uvn_wl_lock); } uvn->u_flags = 0; /* uvn is history, clear all bits */ } d630 2 a631 2 if (oldflags & UVM_VNODE_WANTED) wakeup(uvn); /* object lock still held */ d633 2 a634 2 simple_unlock(&uvn->u_obj.vmobjlock); UVMHIST_LOG(maphist, "<- done", 0, 0, 0, 0); d655 4 a658 5 boolean_t uvn_releasepg(pg, nextpgp) struct vm_page *pg; struct vm_page **nextpgp; /* OUT */ d660 1 a660 1 struct uvm_vnode *uvn = (struct uvm_vnode *) pg->uobject; d662 2 a663 2 if ((pg->flags & PG_RELEASED) == 0) panic("uvn_releasepg: page not released!"); d665 25 a689 24 /* * dispose of the page [caller handles PG_WANTED] */ pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE); uvm_lock_pageq(); if (nextpgp) *nextpgp = pg->pageq.tqe_next; /* next page for daemon */ uvm_pagefree(pg); if (!nextpgp) uvm_unlock_pageq(); /* * now see if we need to kill the object */ if (uvn->u_flags & UVM_VNODE_RELKILL) { if (uvn->u_obj.uo_refs) panic("uvn_releasepg: kill flag set on referenced object!"); if (uvn->u_obj.uo_npages == 0) { if (uvn->u_flags & UVM_VNODE_WRITEABLE) { simple_lock(&uvn_wl_lock); LIST_REMOVE(uvn, u_wlist); simple_unlock(&uvn_wl_lock); } d691 1 a691 1 if (uvn->u_obj.memq.tqh_first) d694 10 a703 8 if (uvn->u_flags & UVM_VNODE_WANTED) wakeup(uvn); /* still holding object lock */ uvn->u_flags = 0; /* DEAD! */ simple_unlock(&uvn->u_obj.vmobjlock); return(FALSE); } } return(TRUE); d798 1 a798 1 #define UVN_HASH_PENALTY 4 /* a guess */ d800 19 a818 1 static boolean_t uvn_flush(uobj, start, stop, flags) d820 12 a831 3 struct uvm_object *uobj; vm_offset_t start, stop; int flags; d833 18 a850 80 { struct uvm_vnode *uvn = (struct uvm_vnode *) uobj; struct vm_page *pp, *ppnext, *ptmp; struct vm_page *pps[MAXBSIZE/PAGE_SIZE], **ppsp; int npages, result, lcv; boolean_t retval, need_iosync, by_list, needs_clean; vm_offset_t curoff; u_short pp_version; UVMHIST_FUNC("uvn_flush"); UVMHIST_CALLED(maphist); curoff = 0; /* XXX: shut up gcc */ /* * get init vals and determine how we are going to traverse object */ need_iosync = FALSE; retval = TRUE; /* return value */ if (flags & PGO_ALLPAGES) { start = 0; stop = round_page(uvn->u_size); by_list = TRUE; /* always go by the list */ } else { start = trunc_page(start); stop = round_page(stop); if (stop > round_page(uvn->u_size)) printf("uvn_flush: strange, got an out of range flush (fixed)\n"); by_list = (uobj->uo_npages <= ((stop - start) / PAGE_SIZE) * UVN_HASH_PENALTY); } UVMHIST_LOG(maphist," flush start=0x%x, stop=0x%x, by_list=%d, flags=0x%x", start, stop, by_list, flags); /* * PG_CLEANCHK: this bit is used by the pgo_mk_pcluster function as * a _hint_ as to how up to date the PG_CLEAN bit is. if the hint * is wrong it will only prevent us from clustering... it won't break * anything. we clear all PG_CLEANCHK bits here, and pgo_mk_pcluster * will set them as it syncs PG_CLEAN. This is only an issue if we * are looking at non-inactive pages (because inactive page's PG_CLEAN * bit is always up to date since there are no mappings). * [borrowed PG_CLEANCHK idea from FreeBSD VM] */ if ((flags & PGO_CLEANIT) != 0 && uobj->pgops->pgo_mk_pcluster != NULL) { if (by_list) { for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = pp->listq.tqe_next) { if (pp->offset < start || pp->offset >= stop) continue; pp->flags &= ~PG_CLEANCHK; } } else { /* by hash */ for (curoff = start ; curoff < stop ; curoff += PAGE_SIZE) { pp = uvm_pagelookup(uobj, curoff); if (pp) pp->flags &= ~PG_CLEANCHK; } } } /* * now do it. note: we must update ppnext in body of loop or we * will get stuck. we need to use ppnext because we may free "pp" * before doing the next loop. */ if (by_list) { pp = uobj->memq.tqh_first; } else { curoff = start; pp = uvm_pagelookup(uobj, curoff); } ppnext = NULL; /* XXX: shut up gcc */ ppsp = NULL; /* XXX: shut up gcc */ uvm_lock_pageq(); /* page queues locked */ d852 19 a870 3 /* locked: both page queues and uobj */ for ( ; (by_list && pp != NULL) || (!by_list && curoff < stop) ; pp = ppnext) { d872 5 a876 1 if (by_list) { d878 2 a879 72 /* * range check */ if (pp->offset < start || pp->offset >= stop) { ppnext = pp->listq.tqe_next; continue; } } else { /* * null check */ curoff += PAGE_SIZE; if (pp == NULL) { if (curoff < stop) ppnext = uvm_pagelookup(uobj, curoff); continue; } } /* * handle case where we do not need to clean page (either because * we are not clean or because page is not dirty or is busy): * * NOTE: we are allowed to deactivate a non-wired active PG_BUSY page, * but once a PG_BUSY page is on the inactive queue it must * stay put until it is !PG_BUSY (so as not to confuse pagedaemon). */ if ((flags & PGO_CLEANIT) == 0 || (pp->flags & PG_BUSY) != 0) { needs_clean = FALSE; if ((pp->flags & PG_BUSY) != 0 && (flags & (PGO_CLEANIT|PGO_SYNCIO)) == (PGO_CLEANIT|PGO_SYNCIO)) need_iosync = TRUE; } else { /* freeing: nuke all mappings so we can sync PG_CLEAN bit with no race */ if ((pp->flags & PG_CLEAN) != 0 && (flags & PGO_FREE) != 0 && (pp->pqflags & PQ_ACTIVE) != 0) pmap_page_protect(PMAP_PGARG(pp), VM_PROT_NONE); if ((pp->flags & PG_CLEAN) != 0 && pmap_is_modified(PMAP_PGARG(pp))) pp->flags &= ~(PG_CLEAN); pp->flags |= PG_CLEANCHK; /* update "hint" */ needs_clean = ((pp->flags & PG_CLEAN) == 0); } /* * if we don't need a clean... load ppnext and dispose of pp */ if (!needs_clean) { /* load ppnext */ if (by_list) ppnext = pp->listq.tqe_next; else { if (curoff < stop) ppnext = uvm_pagelookup(uobj, curoff); } /* now dispose of pp */ if (flags & PGO_DEACTIVATE) { if ((pp->pqflags & PQ_INACTIVE) == 0 && pp->wire_count == 0) { pmap_page_protect(PMAP_PGARG(pp), VM_PROT_NONE); uvm_pagedeactivate(pp); } } else if (flags & PGO_FREE) { if (pp->flags & PG_BUSY) { pp->flags |= PG_RELEASED; /* release busy pages */ d881 2 a882 2 pmap_page_protect(PMAP_PGARG(pp), VM_PROT_NONE); uvm_pagefree(pp); /* removed page from object */ d884 118 a1001 19 } /* ppnext is valid so we can continue... */ continue; } /* * pp points to a page in the locked object that we are working on. * if it is !PG_CLEAN,!PG_BUSY and we asked for cleaning (PGO_CLEANIT). * we clean it now. * * let uvm_pager_put attempted a clustered page out. * note: locked: uobj and page queues. */ pp->flags |= PG_BUSY; /* we 'own' page now */ UVM_PAGE_OWN(pp, "uvn_flush"); pmap_page_protect(PMAP_PGARG(pp), VM_PROT_READ); pp_version = pp->version; d1003 2 a1004 2 ppsp = pps; npages = sizeof(pps) / sizeof(struct vm_page *); d1006 2 a1007 2 /* locked: page queues, uobj */ result = uvm_pager_put(uobj, pp, &ppsp, &npages, d1009 1 a1009 1 /* unlocked: page queues, uobj */ d1011 27 a1037 26 /* * at this point nothing is locked. if we did an async I/O * it is remotely possible for the async i/o to complete and * the page "pp" be freed or what not before we get a chance * to relock the object. in order to detect this, we have * saved the version number of the page in "pp_version". */ /* relock! */ simple_lock(&uobj->vmobjlock); uvm_lock_pageq(); /* * VM_PAGER_AGAIN: given the structure of this pager, this * can only happen when we are doing async I/O and can't * map the pages into kernel memory (pager_map) due to lack * of vm space. if this happens we drop back to sync I/O. */ if (result == VM_PAGER_AGAIN) { /* * it is unlikely, but page could have been released while we * had the object lock dropped. we ignore this now and retry * the I/O. we will detect and handle the released page after * the syncio I/O completes. */ d1039 1 a1039 1 if (flags & PGO_SYNCIO) d1042 145 a1186 15 flags |= PGO_SYNCIO; goto ReTry; } /* * the cleaning operation is now done. finish up. note that * on error (!OK, !PEND) uvm_pager_put drops the cluster for us. * if success (OK, PEND) then uvm_pager_put returns the cluster * to us in ppsp/npages. */ /* * for pending async i/o if we are not deactivating/freeing we can * move on to the next page. */ d1188 1 a1188 1 if (result == VM_PAGER_PEND) { d1190 1 a1190 13 if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) { /* no per-page ops: refresh ppnext and continue */ if (by_list) { if (pp->version == pp_version) ppnext = pp->listq.tqe_next; else ppnext = uobj->memq.tqh_first; /* reset */ } else { if (curoff < stop) ppnext = uvm_pagelookup(uobj, curoff); } continue; } d1192 4 a1195 2 /* need to do anything here? */ } d1197 4 a1200 19 /* * need to look at each page of the I/O operation. we defer * processing "pp" until the last trip through this "for" loop * so that we can load "ppnext" for the main loop after we * play with the cluster pages [thus the "npages + 1" in the * loop below]. */ for (lcv = 0 ; lcv < npages + 1 ; lcv++) { /* * handle ppnext for outside loop, and saving pp until the end. */ if (lcv < npages) { if (ppsp[lcv] == pp) continue; /* skip pp until the end */ ptmp = ppsp[lcv]; } else { ptmp = pp; d1202 10 a1211 54 /* set up next page for outer loop */ if (by_list) { if (pp->version == pp_version) ppnext = pp->listq.tqe_next; else ppnext = uobj->memq.tqh_first; /* reset */ } else { if (curoff < stop) ppnext = uvm_pagelookup(uobj, curoff); } } /* * verify the page didn't get moved while obj was unlocked */ if (result == VM_PAGER_PEND && ptmp->uobject != uobj) continue; /* * unbusy the page if I/O is done. note that for pending * I/O it is possible that the I/O op finished before we * relocked the object (in which case the page is no longer * busy). */ if (result != VM_PAGER_PEND) { if (ptmp->flags & PG_WANTED) thread_wakeup(ptmp); /* still holding object lock */ ptmp->flags &= ~(PG_WANTED|PG_BUSY); UVM_PAGE_OWN(ptmp, NULL); if (ptmp->flags & PG_RELEASED) { uvm_unlock_pageq(); /* pgo_releasepg wants this */ if (!uvn_releasepg(ptmp, NULL)) { return(TRUE); } uvm_lock_pageq(); /* relock */ continue; /* next page */ } else { ptmp->flags |= (PG_CLEAN|PG_CLEANCHK); if ((flags & PGO_FREE) == 0) pmap_clear_modify(PMAP_PGARG(ptmp)); } } /* * dispose of page */ if (flags & PGO_DEACTIVATE) { if ((pp->pqflags & PQ_INACTIVE) == 0 && pp->wire_count == 0) { pmap_page_protect(PMAP_PGARG(ptmp), VM_PROT_NONE); uvm_pagedeactivate(ptmp); d1214 3 a1216 45 } else if (flags & PGO_FREE) { if (result == VM_PAGER_PEND) { if ((ptmp->flags & PG_BUSY) != 0) ptmp->flags |= PG_RELEASED; /* signal for i/o done */ } else { if (result != VM_PAGER_OK) { printf("uvn_flush: obj=%p, offset=0x%lx. error during pageout.\n", pp->uobject, pp->offset); printf("uvn_flush: WARNING: changes to page may be lost!\n"); retval = FALSE; } pmap_page_protect(PMAP_PGARG(ptmp), VM_PROT_NONE); uvm_pagefree(ptmp); } } } /* end of "lcv" for loop */ } /* end of "pp" for loop */ /* * done with pagequeues: unlock */ uvm_unlock_pageq(); /* * now wait for all I/O if required. */ if (need_iosync) { UVMHIST_LOG(maphist," <>",0,0,0,0); while (uvn->u_nio != 0) { uvn->u_flags |= UVM_VNODE_IOSYNC; UVM_UNLOCK_AND_WAIT(&uvn->u_nio, &uvn->u_obj.vmobjlock, FALSE, "uvn_flush",0); simple_lock(&uvn->u_obj.vmobjlock); } if (uvn->u_flags & UVM_VNODE_IOSYNCWANTED) wakeup(&uvn->u_flags); uvn->u_flags &= ~(UVM_VNODE_IOSYNC|UVM_VNODE_IOSYNCWANTED); } /* return, with object locked! */ UVMHIST_LOG(maphist,"<- done (retval=0x%x)",retval,0,0,0); return(retval); d1229 5 a1233 6 static void uvn_cluster(uobj, offset, loffset, hoffset) struct uvm_object *uobj; vm_offset_t offset; vm_offset_t *loffset, *hoffset; /* OUT */ d1235 2 a1236 2 struct uvm_vnode *uvn = (struct uvm_vnode *) uobj; *loffset = offset; d1238 2 a1239 2 if (*loffset >= uvn->u_size) panic("uvn_cluster: offset out of range"); d1241 6 a1246 6 /* * XXX: old pager claims we could use VOP_BMAP to get maxcontig value. */ *hoffset = *loffset + MAXBSIZE; if (*hoffset > round_page(uvn->u_size)) /* past end? */ *hoffset = round_page(uvn->u_size); d1248 1 a1248 1 return; d1262 5 a1266 6 static int uvn_put(uobj, pps, npages, flags) struct uvm_object *uobj; struct vm_page **pps; int npages, flags; d1268 1 a1268 1 int retval; d1270 3 a1272 3 /* note: object locked */ retval = uvn_io((struct uvm_vnode*)uobj, pps, npages, flags, UIO_WRITE); /* note: object unlocked */ d1274 1 a1274 1 return(retval); d1289 21 a1309 2 static int uvn_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags) d1311 78 a1388 6 struct uvm_object *uobj; vm_offset_t offset; struct vm_page **pps; /* IN/OUT */ int *npagesp; /* IN (OUT if PGO_LOCKED) */ int centeridx, advice, flags; vm_prot_t access_type; d1390 133 a1522 209 { vm_offset_t current_offset; struct vm_page *ptmp; int lcv, result, gotpages; boolean_t done; UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(maphist); UVMHIST_LOG(maphist, "flags=%d", flags,0,0,0); /* * step 1: handled the case where fault data structures are locked. */ if (flags & PGO_LOCKED) { /* * gotpages is the current number of pages we've gotten (which * we pass back up to caller via *npagesp. */ gotpages = 0; /* * step 1a: get pages that are already resident. only do this * if the data structures are locked (i.e. the first time through). */ done = TRUE; /* be optimistic */ for (lcv = 0, current_offset = offset ; lcv < *npagesp ; lcv++, current_offset += PAGE_SIZE) { /* do we care about this page? if not, skip it */ if (pps[lcv] == PGO_DONTCARE) continue; /* lookup page */ ptmp = uvm_pagelookup(uobj, current_offset); /* to be useful must get a non-busy, non-released page */ if (ptmp == NULL || (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) { if (lcv == centeridx || (flags & PGO_ALLPAGES) != 0) done = FALSE; /* need to do a wait or I/O! */ continue; } /* useful page: busy/lock it and plug it in our result array */ ptmp->flags |= PG_BUSY; /* loan up to caller */ UVM_PAGE_OWN(ptmp, "uvn_get1"); pps[lcv] = ptmp; gotpages++; } /* "for" lcv loop */ /* * XXX: given the "advice", should we consider async read-ahead? * XXX: fault current does deactive of pages behind us. is * this good (other callers might now). */ /* * XXX: read-ahead currently handled by buffer cache (bread) level. * XXX: no async i/o available. * XXX: so we don't do anything now. */ /* * step 1c: now we've either done everything needed or we to unlock * and do some waiting or I/O. */ *npagesp = gotpages; /* let caller know */ if (done) return(VM_PAGER_OK); /* bingo! */ else return(VM_PAGER_UNLOCK); /* EEK! Need to unlock and I/O */ } /* * step 2: get non-resident or busy pages. * object is locked. data structures are unlocked. * * XXX: because we can't do async I/O at this level we get things * page at a time (otherwise we'd chunk). the VOP_READ() will do * async-read-ahead for us at a lower level. */ for (lcv = 0, current_offset = offset ; lcv < *npagesp ; lcv++, current_offset += PAGE_SIZE) { /* skip over pages we've already gotten or don't want */ /* skip over pages we don't _have_ to get */ if (pps[lcv] != NULL || (lcv != centeridx && (flags & PGO_ALLPAGES) == 0)) continue; /* * we have yet to locate the current page (pps[lcv]). we first * look for a page that is already at the current offset. if we * fine a page, we check to see if it is busy or released. if that * is the case, then we sleep on the page until it is no longer busy * or released and repeat the lookup. if the page we found is * neither busy nor released, then we busy it (so we own it) and * plug it into pps[lcv]. this breaks the following while loop * and indicates we are ready to move on to the next page in the * "lcv" loop above. * * if we exit the while loop with pps[lcv] still set to NULL, then * it means that we allocated a new busy/fake/clean page ptmp in the * object and we need to do I/O to fill in the data. */ while (pps[lcv] == NULL) { /* top of "pps" while loop */ /* look for a current page */ ptmp = uvm_pagelookup(uobj, current_offset); /* nope? allocate one now (if we can) */ if (ptmp == NULL) { ptmp = uvm_pagealloc(uobj, current_offset, NULL); /* alloc */ /* out of RAM? */ if (ptmp == NULL) { simple_unlock(&uobj->vmobjlock); uvm_wait("uvn_getpage"); simple_lock(&uobj->vmobjlock); continue; /* goto top of pps while loop */ } /* * got new page ready for I/O. break pps while loop. pps[lcv] is * still NULL. */ break; } /* page is there, see if we need to wait on it */ if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) { ptmp->flags |= PG_WANTED; UVM_UNLOCK_AND_WAIT(ptmp,&uobj->vmobjlock,0,"uvn_get",0); simple_lock(&uobj->vmobjlock); continue; /* goto top of pps while loop */ } /* * if we get here then the page has become resident and unbusy * between steps 1 and 2. we busy it now (so we own it) and set * pps[lcv] (so that we exit the while loop). */ ptmp->flags |= PG_BUSY; UVM_PAGE_OWN(ptmp, "uvn_get2"); pps[lcv] = ptmp; } /* * if we own the a valid page at the correct offset, pps[lcv] will * point to it. nothing more to do except go to the next page. */ if (pps[lcv]) continue; /* next lcv */ /* * we have a "fake/busy/clean" page that we just allocated. do * I/O to fill it with valid data. note that object must be * locked going into uvn_io, but will be unlocked afterwards. */ result = uvn_io((struct uvm_vnode *) uobj, &ptmp, 1, PGO_SYNCIO, UIO_READ); /* * I/O done. object is unlocked (by uvn_io). because we used * syncio the result can not be PEND or AGAIN. we must relock * and check for errors. */ /* lock object. check for errors. */ simple_lock(&uobj->vmobjlock); if (result != VM_PAGER_OK) { if (ptmp->flags & PG_WANTED) thread_wakeup(ptmp); /* object lock still held */ ptmp->flags &= ~(PG_WANTED|PG_BUSY); UVM_PAGE_OWN(ptmp, NULL); uvm_lock_pageq(); uvm_pagefree(ptmp); uvm_unlock_pageq(); simple_unlock(&uobj->vmobjlock); return(result); } /* * we got the page! clear the fake flag (indicates valid data now * in page) and plug into our result array. note that page is still * busy. * * it is the callers job to: * => check if the page is released * => unbusy the page * => activate the page */ ptmp->flags &= ~PG_FAKE; /* data is valid ... */ pmap_clear_modify(PMAP_PGARG(ptmp)); /* ... and clean */ pps[lcv] = ptmp; } /* lcv loop */ /* * finally, unlock object and return. */ d1524 2 a1525 2 simple_unlock(&uobj->vmobjlock); return(VM_PAGER_OK); d1536 6 a1541 1 static int uvn_asyncget(uobj, offset, npages) d1543 5 a1547 10 struct uvm_object *uobj; vm_offset_t offset; int npages; { /* * XXXCDC: we can't do async I/O yet */ printf("uvn_asyncget called\n"); return(KERN_SUCCESS); d1560 12 a1571 1 static int uvn_io(uvn, pps, npages, flags, rw) d1573 5 a1577 3 struct uvm_vnode *uvn; vm_page_t *pps; int npages, flags, rw; d1579 16 a1594 30 { struct vnode *vn; struct uio uio; struct iovec iov; vm_offset_t kva, file_offset; int waitf, result, got, wanted; UVMHIST_FUNC("uvn_io"); UVMHIST_CALLED(maphist); UVMHIST_LOG(maphist, "rw=%d", rw,0,0,0); /* * init values */ waitf = (flags & PGO_SYNCIO) ? M_WAITOK : M_NOWAIT; vn = (struct vnode *) uvn; file_offset = pps[0]->offset; /* * check for sync'ing I/O. */ while (uvn->u_flags & UVM_VNODE_IOSYNC) { if (waitf == M_NOWAIT) { simple_unlock(&uvn->u_obj.vmobjlock); UVMHIST_LOG(maphist,"<- try again (iosync)",0,0,0,0); return(VM_PAGER_AGAIN); } uvn->u_flags |= UVM_VNODE_IOSYNCWANTED; UVM_UNLOCK_AND_WAIT(&uvn->u_flags, &uvn->u_obj.vmobjlock, d1596 2 a1597 2 simple_lock(&uvn->u_obj.vmobjlock); } d1599 7 a1605 7 /* * check size */ if (file_offset >= uvn->u_size) { simple_unlock(&uvn->u_obj.vmobjlock); UVMHIST_LOG(maphist,"<- BAD (size check)",0,0,0,0); d1607 1 a1607 1 printf("uvn_io: note: size check fired\n"); d1609 49 a1657 2 return(VM_PAGER_BAD); } d1659 65 a1723 112 /* * first try and map the pages in (without waiting) */ kva = uvm_pagermapin(pps, npages, NULL, M_NOWAIT); if (kva == NULL && waitf == M_NOWAIT) { simple_unlock(&uvn->u_obj.vmobjlock); UVMHIST_LOG(maphist,"<- mapin failed (try again)",0,0,0,0); return(VM_PAGER_AGAIN); } /* * ok, now bump u_nio up. at this point we are done with uvn * and can unlock it. if we still don't have a kva, try again * (this time with sleep ok). */ uvn->u_nio++; /* we have an I/O in progress! */ simple_unlock(&uvn->u_obj.vmobjlock); /* NOTE: object now unlocked */ if (kva == NULL) { kva = uvm_pagermapin(pps, npages, NULL, M_WAITOK); } /* * ok, mapped in. our pages are PG_BUSY so they are not going to * get touched (so we can look at "offset" without having to lock * the object). set up for I/O. */ /* * fill out uio/iov */ iov.iov_base = (caddr_t) kva; wanted = npages * PAGE_SIZE; if (file_offset + wanted > uvn->u_size) wanted = uvn->u_size - file_offset; /* XXX: needed? */ iov.iov_len = wanted; uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_offset = file_offset; uio.uio_segflg = UIO_SYSSPACE; uio.uio_rw = rw; uio.uio_resid = wanted; uio.uio_procp = NULL; /* * do the I/O! (XXX: curproc?) */ UVMHIST_LOG(maphist, "calling VOP",0,0,0,0); if ((uvn->u_flags & UVM_VNODE_VNISLOCKED) == 0) vn_lock(vn, LK_EXCLUSIVE | LK_RETRY); /* NOTE: vnode now locked! */ if (rw == UIO_READ) result = VOP_READ(vn, &uio, 0, curproc->p_ucred); else result = VOP_WRITE(vn, &uio, 0, curproc->p_ucred); if ((uvn->u_flags & UVM_VNODE_VNISLOCKED) == 0) VOP_UNLOCK(vn, 0); /* NOTE: vnode now unlocked (unless vnislocked) */ UVMHIST_LOG(maphist, "done calling VOP",0,0,0,0); /* * result == unix style errno (0 == OK!) * * zero out rest of buffer (if needed) */ if (result == 0) { got = wanted - uio.uio_resid; if (wanted && got == 0) { result = EIO; /* XXX: error? */ } else if (got < PAGE_SIZE * npages && rw == UIO_READ) { bzero((void *) (kva + got), (PAGE_SIZE * npages) - got); } } /* * now remove pager mapping */ uvm_pagermapout(kva, npages); /* * now clean up the object (i.e. drop I/O count) */ simple_lock(&uvn->u_obj.vmobjlock); /* NOTE: object now locked! */ uvn->u_nio--; /* I/O DONE! */ if ((uvn->u_flags & UVM_VNODE_IOSYNC) != 0 && uvn->u_nio == 0) { wakeup(&uvn->u_nio); } simple_unlock(&uvn->u_obj.vmobjlock); /* NOTE: object now unlocked! */ /* * done! */ UVMHIST_LOG(maphist, "<- done (result %d)", result,0,0,0); if (result == 0) return(VM_PAGER_OK); else return(VM_PAGER_ERROR); d1762 5 a1766 1 boolean_t uvm_vnp_uncache(vp) d1768 3 a1770 1 struct vnode *vp; d1772 22 a1793 2 { struct uvm_vnode *uvn = &vp->v_uvm; d1795 3 a1797 30 /* * lock uvn part of the vnode and check to see if we need to do anything */ simple_lock(&uvn->u_obj.vmobjlock); if ((uvn->u_flags & UVM_VNODE_VALID) == 0 || (uvn->u_flags & UVM_VNODE_BLOCKED) != 0) { simple_unlock(&uvn->u_obj.vmobjlock); return(TRUE); } /* * we have a valid, non-blocked uvn. clear persist flag. * if uvn is currently active we can return now. */ uvn->u_flags &= ~UVM_VNODE_CANPERSIST; if (uvn->u_obj.uo_refs) { simple_unlock(&uvn->u_obj.vmobjlock); return(FALSE); } /* * uvn is currently persisting! we have to gain a reference to * it so that we can call uvn_detach to kill the uvn. */ VREF(vp); /* seems ok, even with VOP_LOCK */ uvn->u_obj.uo_refs++; /* value is now 1 */ simple_unlock(&uvn->u_obj.vmobjlock); d1801 6 a1806 6 /* * carry over sanity check from old vnode pager: the vnode should * be VOP_LOCK'd, and we confirm it here. */ if (!VOP_ISLOCKED(vp)) { boolean_t is_ok_anyway = FALSE; d1808 2 a1809 2 extern int (**nfsv2_vnodeop_p) __P((void *)); extern int (**spec_nfsv2nodeop_p) __P((void *)); d1811 1 a1811 1 extern int (**fifo_nfsv2nodeop_p) __P((void *)); d1814 5 a1818 4 /* vnode is NOT VOP_LOCKed: some vnode types _never_ lock */ if (vp->v_op == nfsv2_vnodeop_p || vp->v_op == spec_nfsv2nodeop_p) { is_ok_anyway = TRUE; } d1820 3 a1822 3 if (vp->v_op == fifo_nfsv2nodeop_p) { is_ok_anyway = TRUE; } d1825 3 a1827 3 if (!is_ok_anyway) panic("uvm_vnp_uncache: vnode not locked!"); } d1830 19 a1848 19 /* * now drop our reference to the vnode. if we have the sole * reference to the vnode then this will cause it to die [as we * just cleared the persist flag]. we have to unlock the vnode * while we are doing this as it may trigger I/O. * * XXX: it might be possible for uvn to get reclaimed while we are * unlocked causing us to return TRUE when we should not. we ignore * this as a false-positive return value doesn't hurt us. */ VOP_UNLOCK(vp, 0); uvn_detach(&uvn->u_obj); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* * and return... */ return(TRUE); d1869 6 a1874 1 void uvm_vnp_setsize(vp, newsize) d1876 5 a1880 2 struct vnode *vp; u_quad_t newsize; d1882 4 a1885 13 { struct uvm_vnode *uvn = &vp->v_uvm; /* * lock uvn and check for valid object, and if valid: do it! */ simple_lock(&uvn->u_obj.vmobjlock); if (uvn->u_flags & UVM_VNODE_VALID) { /* * make sure that the newsize fits within a vm_offset_t * XXX: need to revise addressing data types */ d1887 1 a1887 1 if (newsize > (vm_offset_t) -PAGE_SIZE) { d1889 2 a1890 2 printf("uvm_vnp_setsize: vn %p size truncated %qx->%lx\n", vp, newsize, (vm_offset_t) -PAGE_SIZE); d1892 15 a1906 2 newsize = (vm_offset_t) -PAGE_SIZE; } d1908 4 a1911 17 /* * now check if the size has changed: if we shrink we had better * toss some pages... */ if (uvn->u_size > newsize) { (void) uvn_flush(&uvn->u_obj, (vm_offset_t) newsize, uvn->u_size, PGO_FREE); } uvn->u_size = (vm_offset_t)newsize; } simple_unlock(&uvn->u_obj.vmobjlock); /* * done */ return; d1925 24 a1948 1 void uvm_vnp_sync(mp) d1950 37 a1986 1 struct mount *mp; d1988 5 a1992 63 { struct uvm_vnode *uvn; struct vnode *vp; boolean_t got_lock; /* * step 1: ensure we are only ones using the uvn_sync_q by locking * our lock... */ lockmgr(&uvn_sync_lock, LK_EXCLUSIVE, (void *)0); /* * step 2: build up a simpleq of uvns of interest based on the * write list. we gain a reference to uvns of interest. must * be careful about locking uvn's since we will be holding uvn_wl_lock * in the body of the loop. */ SIMPLEQ_INIT(&uvn_sync_q); simple_lock(&uvn_wl_lock); for (uvn = uvn_wlist.lh_first ; uvn != NULL ; uvn = uvn->u_wlist.le_next) { vp = (struct vnode *) uvn; if (mp && vp->v_mount != mp) continue; /* attempt to gain reference */ while ((got_lock = simple_lock_try(&uvn->u_obj.vmobjlock)) == FALSE && (uvn->u_flags & UVM_VNODE_BLOCKED) == 0) /*spin*/; /* * we will exit the loop if we were unable to get the lock and we * detected that the vnode was "blocked" ... if it is blocked then * it must be a dying vnode. since dying vnodes are in the process * of being flushed out we can safely skip it. * * note that uvn must already be valid because we found it on the * wlist (this also means it can't be ALOCK'd). */ if (!got_lock) continue; /* * gain reference. watch out for persisting uvns (need to regain * vnode REF). */ if (uvn->u_obj.uo_refs == 0) VREF(vp); uvn->u_obj.uo_refs++; simple_unlock(&uvn->u_obj.vmobjlock); /* * got it! */ SIMPLEQ_INSERT_HEAD(&uvn_sync_q, uvn, u_syncq); } simple_unlock(&uvn_wl_lock); /* * step 3: we now have a list of uvn's that may need cleaning. * we are holding the uvn_sync_lock, but have dropped the uvn_wl_lock * (so we can now safely lock uvn's again). */ d1994 2 a1995 2 for (uvn = uvn_sync_q.sqh_first ; uvn ; uvn = uvn->u_syncq.sqe_next) { simple_lock(&uvn->u_obj.vmobjlock); d1997 3 a1999 3 if (uvn->u_flags & UVM_VNODE_DYING) { printf("uvm_vnp_sync: dying vnode on sync list\n"); } d2001 16 a2016 1 uvn_flush(&uvn->u_obj, 0, 0, PGO_CLEANIT|PGO_ALLPAGES|PGO_DOACTCLUST); d2018 8 a2025 20 /* * if we have the only reference and we just cleaned the uvn, then * we can pull it out of the UVM_VNODE_WRITEABLE state thus allowing * us to avoid thinking about flushing it again on later sync ops. */ if (uvn->u_obj.uo_refs == 1 && (uvn->u_flags & UVM_VNODE_WRITEABLE)) { LIST_REMOVE(uvn, u_wlist); uvn->u_flags &= ~UVM_VNODE_WRITEABLE; } simple_unlock(&uvn->u_obj.vmobjlock); /* now drop our reference to the uvn */ uvn_detach(&uvn->u_obj); } /* * done! release sync lock */ lockmgr(&uvn_sync_lock, LK_RELEASE, (void *)0); @ 1.6 log @Include the NFS option header. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.5 1998/02/18 06:35:46 mrg Exp $ */ a83 1 #if NCPU > 1 a84 1 #endif d1614 1 a1614 1 VOP_LOCK(vn); d1623 1 a1623 1 VOP_UNLOCK(vn); d1788 1 a1788 1 VOP_UNLOCK(vp); d1790 1 a1790 1 VOP_LOCK(vp); d1887 1 a1887 1 lockmgr(&uvn_sync_lock, LK_EXCLUSIVE, (void *)0, curproc); d1970 1 a1970 1 lockmgr(&uvn_sync_lock, LK_RELEASE, (void *)0, curproc); @ 1.5 log @bug fix from chuck: uvm_vnp_terminate panic when /sbin/init was unlinked @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.4 1998/02/10 14:12:33 mrg Exp $ */ d52 1 @ 1.4 log @- add defopt's for UVM, UVMHIST and PMAP_NEW. - remove unnecessary UVMHIST_DECL's. @ text @d1 1 a1 1 /* $NetBSD: uvm_vnode.c,v 1.3 1998/02/07 11:09:57 mrg Exp $ */ d473 23 a495 12 * called when a persisting vnode vm object (i.e. one with zero * reference count) needs to be freed so that a vnode can be reused. * this happens under "getnewvnode" in vfs_subr.c. if the vnode (from * the free list) is still attached (i.e. not VBAD) then vgone is * called. as part of the vgone trace this should get called to free * the vm object. note that the vnode must be XLOCK'd before calling * here. the XLOCK protects us from getting a vnode which is already * in the DYING state. * * => the vnode must be XLOCK'd _and_ VOP_LOCK()'d by caller * => unlike uvn_detach, this function must not return until the * old uvm_object is dead. d504 1 d520 5 a524 12 * must be a valid, persisting vnode that is not already dying. * can't be ALOCK because that would require someone else having * a valid reference to the underlying vnode (and if that was true * then the kernel wouldn't want to terminate it). */ if ((uvn->u_flags & (UVM_VNODE_VALID|UVM_VNODE_CANPERSIST|UVM_VNODE_ALOCK)) != (UVM_VNODE_VALID|UVM_VNODE_CANPERSIST)) { printf("uvm_vnp_terminate: flags = 0x%x, refs=%d\n", uvn->u_flags, uvn->u_obj.uo_refs); panic("uvm_vnp_terminate: uvn in unexpected state"); } d526 1 a526 1 #ifdef DIAGNOSTIC d528 1 a528 1 * diagnostic check: is uvn persisting? d531 1 a531 1 printf("uvm_vnp_terminate(%p): warning: object still active with %d refs\n", d541 2 a542 1 * away. d555 5 d593 4 a596 2 * free the uvn now. note that the VREF reference is already gone * [it is dropped when we enter the persist state]. a597 2 if (uvn->u_flags & UVM_VNODE_IOSYNCWANTED) panic("uvm_vnp_terminate: io sync wanted bit set"); d599 26 a624 4 if (uvn->u_flags & UVM_VNODE_WRITEABLE) { simple_lock(&uvn_wl_lock); LIST_REMOVE(uvn, u_wlist); simple_unlock(&uvn_wl_lock); d626 2 a627 1 if (uvn->u_flags & UVM_VNODE_WANTED) d629 1 a629 1 uvn->u_flags = 0; @ 1.3 log @restore rcsids @ text @d1 1 a1 1 /* $NetBSD$ */ d52 2 a72 2 UVMHIST_DECL(maphist); @ 1.2 log @RCS ID police. @ text @d49 1 @ 1.1 log @Initial revision @ text @d1 1 a1 1 /* $Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp $ */ @ 1.1.1.1 log @initial import of the new virtual memory system, UVM, into -current. UVM was written by chuck cranor , with some minor portions derived from the old Mach code. i provided some help getting swap and paging working, and other bug fixes/ideas. chuck silvers also provided some other fixes. this is the UVM kernel code portion. this will be KNF'd shortly. :-) @ text @@