head 1.252; access; symbols netbsd-10-0-RELEASE:1.249.2.1 netbsd-10-0-RC6:1.249.2.1 netbsd-10-0-RC5:1.249.2.1 netbsd-10-0-RC4:1.249.2.1 netbsd-10-0-RC3:1.249.2.1 netbsd-10-0-RC2:1.249.2.1 thorpej-ifq:1.252.0.4 thorpej-ifq-base:1.252 thorpej-altq-separation:1.252.0.2 thorpej-altq-separation-base:1.252 netbsd-10-0-RC1:1.249.2.1 netbsd-10:1.249.0.2 netbsd-10-base:1.249 bouyer-sunxi-drm:1.247.0.2 bouyer-sunxi-drm-base:1.247 netbsd-9-3-RELEASE:1.229.2.1 thorpej-i2c-spi-conf2:1.241.0.6 thorpej-i2c-spi-conf2-base:1.241 thorpej-futex2:1.241.0.4 thorpej-futex2-base:1.241 thorpej-cfargs2:1.241.0.2 thorpej-cfargs2-base:1.241 cjep_sun2x-base1:1.239 cjep_sun2x:1.239.0.8 cjep_sun2x-base:1.239 cjep_staticlib_x-base1:1.239 netbsd-9-2-RELEASE:1.229.2.1 cjep_staticlib_x:1.239.0.6 cjep_staticlib_x-base:1.239 thorpej-i2c-spi-conf:1.239.0.4 thorpej-i2c-spi-conf-base:1.241 thorpej-cfargs:1.239.0.2 thorpej-cfargs-base:1.239 thorpej-futex:1.238.0.2 thorpej-futex-base:1.239 netbsd-9-1-RELEASE:1.229.2.1 bouyer-xenpvh-base2:1.236 phil-wifi-20200421:1.236 bouyer-xenpvh-base1:1.236 phil-wifi-20200411:1.236 bouyer-xenpvh:1.236.0.4 bouyer-xenpvh-base:1.236 is-mlppp:1.236.0.2 is-mlppp-base:1.236 phil-wifi-20200406:1.236 netbsd-8-2-RELEASE:1.216.6.7 ad-namecache-base3:1.235 netbsd-9-0-RELEASE:1.229.2.1 netbsd-9-0-RC2:1.229.2.1 ad-namecache-base2:1.233 ad-namecache-base1:1.232 ad-namecache:1.232.0.2 ad-namecache-base:1.232 netbsd-9-0-RC1:1.229.2.1 phil-wifi-20191119:1.231 netbsd-9:1.229.0.2 netbsd-9-base:1.229 phil-wifi-20190609:1.228 netbsd-8-1-RELEASE:1.216.6.6 netbsd-8-1-RC1:1.216.6.6 isaki-audio2:1.228.0.2 isaki-audio2-base:1.228 pgoyette-compat-merge-20190127:1.223.2.4 pgoyette-compat-20190127:1.228 pgoyette-compat-20190118:1.228 pgoyette-compat-1226:1.228 pgoyette-compat-1126:1.228 pgoyette-compat-1020:1.228 pgoyette-compat-0930:1.228 pgoyette-compat-0906:1.228 netbsd-7-2-RELEASE:1.187.2.1 pgoyette-compat-0728:1.227 netbsd-8-0-RELEASE:1.216.6.6 phil-wifi:1.226.0.2 phil-wifi-base:1.226 pgoyette-compat-0625:1.225 netbsd-8-0-RC2:1.216.6.6 pgoyette-compat-0521:1.224 pgoyette-compat-0502:1.223 pgoyette-compat-0422:1.223 netbsd-8-0-RC1:1.216.6.5 pgoyette-compat-0415:1.223 pgoyette-compat-0407:1.223 pgoyette-compat-0330:1.223 pgoyette-compat-0322:1.223 pgoyette-compat-0315:1.223 netbsd-7-1-2-RELEASE:1.187.2.1 pgoyette-compat:1.223.0.2 pgoyette-compat-base:1.223 netbsd-7-1-1-RELEASE:1.187.2.1 tls-maxphys-base-20171202:1.220 matt-nb8-mediatek:1.216.6.1.0.2 matt-nb8-mediatek-base:1.216.6.1 nick-nhusb-base-20170825:1.216 perseant-stdc-iso10646:1.216.0.8 perseant-stdc-iso10646-base:1.216 netbsd-8:1.216.0.6 netbsd-8-base:1.216 prg-localcount2-base3:1.216 prg-localcount2-base2:1.216 prg-localcount2-base1:1.216 prg-localcount2:1.216.0.4 prg-localcount2-base:1.216 pgoyette-localcount-20170426:1.216 bouyer-socketcan-base1:1.216 jdolecek-ncq:1.216.0.2 jdolecek-ncq-base:1.216 pgoyette-localcount-20170320:1.216 netbsd-7-1:1.187.2.1.0.6 netbsd-7-1-RELEASE:1.187.2.1 netbsd-7-1-RC2:1.187.2.1 nick-nhusb-base-20170204:1.212 netbsd-7-nhusb-base-20170116:1.187.2.1 bouyer-socketcan:1.203.0.2 bouyer-socketcan-base:1.203 pgoyette-localcount-20170107:1.203 netbsd-7-1-RC1:1.187.2.1 nick-nhusb-base-20161204:1.203 pgoyette-localcount-20161104:1.203 netbsd-7-0-2-RELEASE:1.187.2.1 nick-nhusb-base-20161004:1.203 localcount-20160914:1.203 netbsd-7-nhusb:1.187.2.1.0.4 netbsd-7-nhusb-base:1.187.2.1 pgoyette-localcount-20160806:1.203 pgoyette-localcount-20160726:1.203 pgoyette-localcount:1.199.0.2 pgoyette-localcount-base:1.199 nick-nhusb-base-20160907:1.199 nick-nhusb-base-20160529:1.195 netbsd-7-0-1-RELEASE:1.187.2.1 nick-nhusb-base-20160422:1.195 nick-nhusb-base-20160319:1.195 nick-nhusb-base-20151226:1.193 netbsd-7-0:1.187.2.1.0.2 netbsd-7-0-RELEASE:1.187.2.1 nick-nhusb-base-20150921:1.191 netbsd-7-0-RC3:1.187.2.1 netbsd-7-0-RC2:1.187.2.1 netbsd-7-0-RC1:1.187.2.1 nick-nhusb-base-20150606:1.191 nick-nhusb-base-20150406:1.190 nick-nhusb:1.189.0.2 nick-nhusb-base:1.189 netbsd-5-2-3-RELEASE:1.141.6.2.2.1 netbsd-5-1-5-RELEASE:1.141.6.1.6.1 netbsd-6-0-6-RELEASE:1.168.6.1 netbsd-6-1-5-RELEASE:1.168.8.1 netbsd-7:1.187.0.2 netbsd-7-base:1.187 yamt-pagecache-base9:1.182 yamt-pagecache-tag8:1.166.2.2 netbsd-6-1-4-RELEASE:1.168.8.1 netbsd-6-0-5-RELEASE:1.168.6.1 tls-earlyentropy:1.182.0.2 tls-earlyentropy-base:1.187 riastradh-xf86-video-intel-2-7-1-pre-2-21-15:1.182 riastradh-drm2-base3:1.182 netbsd-6-1-3-RELEASE:1.168.8.1 netbsd-6-0-4-RELEASE:1.168.6.1 netbsd-5-2-2-RELEASE:1.141.6.2.2.1 netbsd-5-1-4-RELEASE:1.141.6.1.6.1 netbsd-6-1-2-RELEASE:1.168.8.1 netbsd-6-0-3-RELEASE:1.168.6.1 netbsd-5-2-1-RELEASE:1.141.6.2.2.1 netbsd-5-1-3-RELEASE:1.141.6.1.6.1 rmind-smpnet-nbase:1.182 netbsd-6-1-1-RELEASE:1.168 riastradh-drm2-base2:1.173 riastradh-drm2-base1:1.173 riastradh-drm2:1.173.0.8 riastradh-drm2-base:1.173 rmind-smpnet:1.173.0.2 rmind-smpnet-base:1.182 netbsd-6-1:1.168.0.8 netbsd-6-0-2-RELEASE:1.168 netbsd-6-1-RELEASE:1.168 khorben-n900:1.173.0.6 netbsd-6-1-RC4:1.168 netbsd-6-1-RC3:1.168 agc-symver:1.173.0.4 agc-symver-base:1.173 netbsd-6-1-RC2:1.168 netbsd-6-1-RC1:1.168 yamt-pagecache-base8:1.173 netbsd-5-2:1.141.6.2.0.2 netbsd-6-0-1-RELEASE:1.168 yamt-pagecache-base7:1.173 netbsd-5-2-RELEASE:1.141.6.2 netbsd-5-2-RC1:1.141.6.2 matt-nb6-plus-nbase:1.168 yamt-pagecache-base6:1.173 netbsd-6-0:1.168.0.6 netbsd-6-0-RELEASE:1.168 netbsd-6-0-RC2:1.168 tls-maxphys:1.171.0.2 tls-maxphys-base:1.187 matt-nb6-plus:1.168.0.4 matt-nb6-plus-base:1.168 netbsd-6-0-RC1:1.168 jmcneill-usbmp-base10:1.168 yamt-pagecache-base5:1.168 jmcneill-usbmp-base9:1.168 yamt-pagecache-base4:1.168 jmcneill-usbmp-base8:1.168 jmcneill-usbmp-base7:1.168 jmcneill-usbmp-base6:1.168 jmcneill-usbmp-base5:1.168 jmcneill-usbmp-base4:1.168 jmcneill-usbmp-base3:1.168 jmcneill-usbmp-pre-base2:1.166 jmcneill-usbmp-base2:1.168 netbsd-6:1.168.0.2 netbsd-6-base:1.168 netbsd-5-1-2-RELEASE:1.141.6.1 netbsd-5-1-1-RELEASE:1.141.6.1 jmcneill-usbmp:1.166.0.6 jmcneill-usbmp-base:1.166 jmcneill-audiomp3:1.166.0.4 jmcneill-audiomp3-base:1.166 yamt-pagecache-base3:1.166 yamt-pagecache-base2:1.166 yamt-pagecache:1.166.0.2 yamt-pagecache-base:1.166 rmind-uvmplock-nbase:1.165 cherry-xenmp:1.164.0.2 cherry-xenmp-base:1.164 bouyer-quota2-nbase:1.162 bouyer-quota2:1.160.0.4 bouyer-quota2-base:1.162 jruoho-x86intr:1.160.0.2 jruoho-x86intr-base:1.160 matt-mips64-premerge-20101231:1.159 matt-nb5-mips64-premerge-20101231:1.141.6.1 matt-nb5-pq3:1.141.6.1.0.8 matt-nb5-pq3-base:1.141.6.1 netbsd-5-1:1.141.6.1.0.6 netbsd-5-1-RELEASE:1.141.6.1 uebayasi-xip-base4:1.158 uebayasi-xip-base3:1.158 yamt-nfs-mp-base11:1.158 netbsd-5-1-RC4:1.141.6.1 matt-nb5-mips64-k15:1.141.6.1 uebayasi-xip-base2:1.158 yamt-nfs-mp-base10:1.158 netbsd-5-1-RC3:1.141.6.1 netbsd-5-1-RC2:1.141.6.1 uebayasi-xip-base1:1.158 netbsd-5-1-RC1:1.141.6.1 rmind-uvmplock:1.156.0.2 rmind-uvmplock-base:1.165 yamt-nfs-mp-base9:1.155 uebayasi-xip:1.155.0.2 uebayasi-xip-base:1.155 netbsd-5-0-2-RELEASE:1.141.6.1 matt-nb5-mips64-premerge-20091211:1.141.6.1 matt-premerge-20091211:1.149 yamt-nfs-mp-base8:1.146 matt-nb5-mips64-u2-k2-k4-k7-k8-k9:1.141.6.1 matt-nb4-mips64-k7-u2a-k9b:1.141.6.1 matt-nb5-mips64-u1-k1-k5:1.141.6.1 yamt-nfs-mp-base7:1.146 matt-nb5-mips64:1.141.6.1.0.4 netbsd-5-0-1-RELEASE:1.141.6.1 jymxensuspend-base:1.146 yamt-nfs-mp-base6:1.146 yamt-nfs-mp-base5:1.146 yamt-nfs-mp-base4:1.146 jym-xensuspend-nbase:1.147 yamt-nfs-mp-base3:1.146 nick-hppapmap-base4:1.146 nick-hppapmap-base3:1.146 netbsd-5-0:1.141.6.1.0.2 netbsd-5-0-RELEASE:1.141.6.1 netbsd-5-0-RC4:1.141.6.1 netbsd-5-0-RC3:1.141 nick-hppapmap-base2:1.142 netbsd-5-0-RC2:1.141 jym-xensuspend:1.142.0.2 jym-xensuspend-base:1.146 netbsd-5-0-RC1:1.141 haad-dm-base2:1.141 haad-nbase2:1.141 ad-audiomp2:1.141.0.8 ad-audiomp2-base:1.141 christos-time_t-nbase:1.141 netbsd-5:1.141.0.6 netbsd-5-base:1.141 nick-hppapmap:1.141.0.4 nick-hppapmap-base:1.146 matt-mips64-base2:1.141 matt-mips64:1.130.0.18 haad-dm-base1:1.141 wrstuden-revivesa-base-4:1.141 netbsd-4-0-1-RELEASE:1.125 wrstuden-revivesa-base-3:1.141 wrstuden-revivesa-base-2:1.141 wrstuden-fixsa-newbase:1.125 nick-csl-alignment-base5:1.130 haad-dm:1.141.0.2 haad-dm-base:1.141 wrstuden-revivesa-base-1:1.141 simonb-wapbl-nbase:1.141 yamt-pf42-base4:1.141 simonb-wapbl:1.140.0.2 simonb-wapbl-base:1.141 yamt-pf42-base3:1.140 hpcarm-cleanup-nbase:1.139 yamt-pf42-baseX:1.137 yamt-pf42-base2:1.139 yamt-nfs-mp-base2:1.139 wrstuden-revivesa:1.139.0.4 wrstuden-revivesa-base:1.141 yamt-nfs-mp:1.139.0.2 yamt-nfs-mp-base:1.139 yamt-pf42:1.137.0.4 yamt-pf42-base:1.137 christos-time_t:1.137.0.2 christos-time_t-base:1.141 ad-socklock-base1:1.137 yamt-lazymbuf-base15:1.135 yamt-lazymbuf-base14:1.135 keiichi-mipv6-nbase:1.134 mjf-devfs2:1.133.0.6 mjf-devfs2-base:1.142 nick-net80211-sync:1.133.0.4 nick-net80211-sync-base:1.133 keiichi-mipv6:1.133.0.2 keiichi-mipv6-base:1.134 bouyer-xeni386-merge1:1.131.4.1 matt-armv6-prevmlocking:1.130 wrstuden-fixsa-base-1:1.125 vmlocking2-base3:1.132 netbsd-4-0:1.125.0.12 netbsd-4-0-RELEASE:1.125 bouyer-xeni386-nbase:1.132 yamt-kmem-base3:1.131 cube-autoconf:1.131.0.6 cube-autoconf-base:1.131 yamt-kmem-base2:1.131 bouyer-xeni386:1.131.0.4 bouyer-xeni386-base:1.132 yamt-kmem:1.131.0.2 yamt-kmem-base:1.131 vmlocking2-base2:1.131 reinoud-bufcleanup-nbase:1.131 vmlocking2:1.130.0.16 vmlocking2-base1:1.130 netbsd-4-0-RC5:1.125 matt-nb4-arm:1.125.0.10 matt-nb4-arm-base:1.125 matt-armv6-nbase:1.134 jmcneill-base:1.130 netbsd-4-0-RC4:1.125 mjf-devfs:1.130.0.14 mjf-devfs-base:1.132 bouyer-xenamd64-base2:1.130 vmlocking-nbase:1.130 yamt-x86pmap-base4:1.130 bouyer-xenamd64:1.130.0.12 bouyer-xenamd64-base:1.130 netbsd-4-0-RC3:1.125 yamt-x86pmap-base3:1.130 yamt-x86pmap-base2:1.130 netbsd-4-0-RC2:1.125 yamt-x86pmap:1.130.0.10 yamt-x86pmap-base:1.130 netbsd-4-0-RC1:1.125 matt-armv6:1.130.0.8 matt-armv6-base:1.132 matt-mips64-base:1.130 jmcneill-pm:1.130.0.6 jmcneill-pm-base:1.131 hpcarm-cleanup:1.130.0.4 hpcarm-cleanup-base:1.133 nick-csl-alignment:1.130.0.2 nick-csl-alignment-base:1.130 netbsd-3-1-1-RELEASE:1.107 netbsd-3-0-3-RELEASE:1.107 yamt-idlelwp-base8:1.126 wrstuden-fixsa:1.125.0.8 wrstuden-fixsa-base:1.125 thorpej-atomic:1.126.0.8 thorpej-atomic-base:1.126 reinoud-bufcleanup:1.126.0.6 reinoud-bufcleanup-base:1.131 mjf-ufs-trans:1.126.0.4 mjf-ufs-trans-base:1.129 vmlocking:1.126.0.2 vmlocking-base:1.130 ad-audiomp:1.125.0.6 ad-audiomp-base:1.125 yamt-idlelwp:1.125.0.4 post-newlock2-merge:1.125 newlock2-nbase:1.125 yamt-splraiseipl-base5:1.125 yamt-splraiseipl-base4:1.125 yamt-splraiseipl-base3:1.125 abandoned-netbsd-4-base:1.121 abandoned-netbsd-4:1.121.0.2 netbsd-3-1:1.107.0.6 netbsd-3-1-RELEASE:1.107 netbsd-3-0-2-RELEASE:1.107 yamt-splraiseipl-base2:1.123 netbsd-3-1-RC4:1.107 yamt-splraiseipl:1.122.0.4 yamt-splraiseipl-base:1.122 netbsd-3-1-RC3:1.107 yamt-pdpolicy-base9:1.122 newlock2:1.122.0.2 newlock2-base:1.125 yamt-pdpolicy-base8:1.122 netbsd-3-1-RC2:1.107 netbsd-3-1-RC1:1.107 yamt-pdpolicy-base7:1.121 netbsd-4:1.125.0.2 netbsd-4-base:1.125 yamt-pdpolicy-base6:1.117 chap-midi-nbase:1.117 netbsd-3-0-1-RELEASE:1.107 gdamore-uart:1.117.0.4 gdamore-uart-base:1.117 simonb-timcounters-final:1.115.6.1 yamt-pdpolicy-base5:1.117 chap-midi:1.117.0.2 chap-midi-base:1.117 yamt-pdpolicy-base4:1.115 yamt-pdpolicy-base3:1.115 peter-altq-base:1.115 peter-altq:1.115.0.12 yamt-pdpolicy-base2:1.115 elad-kernelauth-base:1.116 elad-kernelauth:1.115.0.10 yamt-pdpolicy:1.115.0.8 yamt-pdpolicy-base:1.115 yamt-uio_vmspace-base5:1.115 simonb-timecounters:1.115.0.6 simonb-timecounters-base:1.117 rpaulo-netinet-merge-pcb:1.115.0.4 rpaulo-netinet-merge-pcb-base:1.122 yamt-uio_vmspace:1.115.0.2 netbsd-3-0:1.107.0.4 netbsd-3-0-RELEASE:1.107 netbsd-3-0-RC6:1.107 yamt-readahead-base3:1.111 netbsd-3-0-RC5:1.107 netbsd-3-0-RC4:1.107 netbsd-3-0-RC3:1.107 yamt-readahead-base2:1.111 netbsd-3-0-RC2:1.107 yamt-readahead-pervnode:1.111 yamt-readahead-perfile:1.111 yamt-readahead:1.111.0.6 yamt-readahead-base:1.111 netbsd-3-0-RC1:1.107 yamt-vop-base3:1.111 netbsd-2-0-3-RELEASE:1.90.2.2 netbsd-2-1:1.90.2.2.0.4 yamt-vop-base2:1.111 thorpej-vnode-attr:1.111.0.4 thorpej-vnode-attr-base:1.111 netbsd-2-1-RELEASE:1.90.2.2 yamt-vop:1.111.0.2 yamt-vop-base:1.111 netbsd-2-1-RC6:1.90.2.2 netbsd-2-1-RC5:1.90.2.2 netbsd-2-1-RC4:1.90.2.2 netbsd-2-1-RC3:1.90.2.2 netbsd-2-1-RC2:1.90.2.2 netbsd-2-1-RC1:1.90.2.2 yamt-lazymbuf:1.109.0.2 yamt-km-base4:1.107 netbsd-2-0-2-RELEASE:1.90.2.2 yamt-km-base3:1.107 netbsd-3:1.107.0.2 netbsd-3-base:1.107 yamt-km-base2:1.105 yamt-km:1.105.0.6 yamt-km-base:1.105 kent-audio2:1.105.0.4 kent-audio2-base:1.107 netbsd-2-0-1-RELEASE:1.90.2.2 kent-audio1-beforemerge:1.105 netbsd-2:1.90.2.2.0.2 netbsd-2-base:1.90.2.2 kent-audio1:1.105.0.2 kent-audio1-base:1.105 netbsd-2-0-RELEASE:1.90.2.2 netbsd-2-0-RC5:1.90.2.2 netbsd-2-0-RC4:1.90.2.2 netbsd-2-0-RC3:1.90.2.2 netbsd-2-0-RC2:1.90.2.2 netbsd-2-0-RC1:1.90.2.2 netbsd-2-0:1.90.0.2 netbsd-2-0-base:1.90 netbsd-1-6-PATCH002-RELEASE:1.64 netbsd-1-6-PATCH002:1.64 netbsd-1-6-PATCH002-RC4:1.64 netbsd-1-6-PATCH002-RC3:1.64 netbsd-1-6-PATCH002-RC2:1.64 netbsd-1-6-PATCH002-RC1:1.64 ktrace-lwp:1.82.0.2 ktrace-lwp-base:1.111 netbsd-1-6-PATCH001:1.64 netbsd-1-6-PATCH001-RELEASE:1.64 netbsd-1-6-PATCH001-RC3:1.64 netbsd-1-6-PATCH001-RC2:1.64 netbsd-1-6-PATCH001-RC1:1.64 nathanw_sa_end:1.60.2.9 nathanw_sa_before_merge:1.76 fvdl_fs64_base:1.76 gmcgarry_ctxsw:1.76.0.4 gmcgarry_ctxsw_base:1.76 gmcgarry_ucred:1.76.0.2 gmcgarry_ucred_base:1.76 nathanw_sa_base:1.76 kqueue-aftermerge:1.75 kqueue-beforemerge:1.74 netbsd-1-6-RELEASE:1.64 netbsd-1-6-RC3:1.64 netbsd-1-6-RC2:1.64 netbsd-1-6-RC1:1.64 netbsd-1-6:1.64.0.4 netbsd-1-6-base:1.64 gehenna-devsw:1.64.0.2 gehenna-devsw-base:1.66 netbsd-1-5-PATCH003:1.57.2.1 eeh-devprop:1.63.0.6 eeh-devprop-base:1.63 newlock:1.63.0.4 newlock-base:1.63 ifpoll-base:1.63 thorpej-mips-cache:1.62.0.2 thorpej-mips-cache-base:1.62 thorpej-devvp-base3:1.62 thorpej-devvp-base2:1.62 post-chs-ubcperf:1.62 pre-chs-ubcperf:1.62 thorpej-devvp:1.61.0.4 thorpej-devvp-base:1.61 netbsd-1-5-PATCH002:1.57.2.1 kqueue:1.61.0.2 kqueue-base:1.74 netbsd-1-5-PATCH001:1.57.2.1 thorpej_scsipi_beforemerge:1.61 nathanw_sa:1.60.0.2 thorpej_scsipi_nbase:1.61 netbsd-1-5-RELEASE:1.57 netbsd-1-5-BETA2:1.57 netbsd-1-5-BETA:1.57 netbsd-1-4-PATCH003:1.46 netbsd-1-5-ALPHA2:1.57 netbsd-1-5:1.57.0.2 netbsd-1-5-base:1.57 minoura-xpg4dl-base:1.55 minoura-xpg4dl:1.55.0.2 netbsd-1-4-PATCH002:1.46 chs-ubc2-newbase:1.51 wrstuden-devbsize-19991221:1.47 wrstuden-devbsize:1.47.0.8 wrstuden-devbsize-base:1.47 kame_141_19991130:1.46 comdex-fall-1999:1.47.0.6 comdex-fall-1999-base:1.47 fvdl-softdep:1.47.0.4 fvdl-softdep-base:1.47 thorpej_scsipi:1.47.0.2 thorpej_scsipi_base:1.61 netbsd-1-4-PATCH001:1.46 kame_14_19990705:1.46 kame_14_19990628:1.46 kame:1.46.0.8 chs-ubc2:1.46.0.6 chs-ubc2-base:1.47 netbsd-1-4-RELEASE:1.46 netbsd-1-4:1.46.0.4 netbsd-1-4-base:1.46 netbsd-1-3-PATCH003:1.38 netbsd-1-3-PATCH003-CANDIDATE2:1.38 kenh-if-detach:1.46.0.2 kenh-if-detach-base:1.46 netbsd-1-3-PATCH003-CANDIDATE1:1.38 netbsd-1-3-PATCH003-CANDIDATE0:1.38 chs-ubc:1.45.0.2 chs-ubc-base:1.45 eeh-paddr_t:1.40.0.2 eeh-paddr_t-base:1.40 netbsd-1-3-PATCH002:1.38 lite-2:1.1.1.3 lite-1:1.1.1.2 CSRG:1.1.1 netbsd-1-3-PATCH001:1.38 netbsd-1-3-RELEASE:1.38 netbsd-1-3-BETA:1.38 netbsd-1-3:1.38.0.2 netbsd-1-3-base:1.38 thorpej-signal:1.35.0.6 thorpej-signal-base:1.35 marc-pcmcia:1.35.0.4 marc-pcmcia-bp:1.35 marc-pcmcia-base:1.38 bouyer-scsipi:1.35.0.2 is-newarp-before-merge:1.33 netbsd-1-2-PATCH001:1.27 mrg-vm-swap:1.32.0.6 is-newarp:1.32.0.4 is-newarp-base:1.32 thorpej-setroot:1.32.0.2 netbsd-1-2-RELEASE:1.27 netbsd-1-2-BETA:1.27 netbsd-1-2:1.27.0.4 netbsd-1-2-base:1.27 date-03-may-96:1.1.1.2 netbsd-1-1-PATCH001:1.23 netbsd-1-1-RELEASE:1.23 netbsd-1-1:1.23.0.2 netbsd-1-1-base:1.23 netbsd-1-0-PATCH06:1.14.2.1 netbsd-1-0-PATCH05:1.14.2.1 netbsd-1-0-PATCH04:1.14.2.1 netbsd-1-0-PATCH03:1.14.2.1 netbsd-1-0-PATCH02:1.14.2.1 netbsd-1-0-PATCH1:1.14.2.1 netbsd-1-0-PATCH0:1.14.2.1 netbsd-1-0-RELEASE:1.14.2.1 netbsd-1-0:1.14.0.2 netbsd-1-0-base:1.14 magnum-base:1.5 magnum:1.5.0.4 netbsd-0-9-patch-001:1.5 netbsd-0-9-RELEASE:1.5 netbsd-0-9-BETA:1.5 netbsd-0-9-ALPHA2:1.5 netbsd-0-9-ALPHA:1.5 netbsd-0-9:1.5.0.2 netbsd-0-9-base:1.5 netbsd-0-8:1.4 netbsd-alpha-1:1.4 patchkit-0-2-2:1.1.1.1 WFJ-386bsd-01:1.1.1.1 WFJ-920714:1.1.1; locks; strict; comment @ * @; 1.252 date 2023.07.31.17.41.18; author christos; state Exp; branches; next 1.251; commitid 0aRu2QCZUkKGQYyE; 1.251 date 2023.02.08.01.37.53; author gutteridge; state Exp; branches; next 1.250; commitid A9CYjlAYrdBN3FcE; 1.250 date 2023.02.07.01.46.37; author gutteridge; state Exp; branches; next 1.249; commitid CWPrhWpgqeq0axcE; 1.249 date 2022.11.30.06.02.37; author ozaki-r; state Exp; branches 1.249.2.1; next 1.248; commitid UM0txtVLE6bCNG3E; 1.248 date 2022.11.19.08.53.06; author yamt; state Exp; branches; next 1.247; commitid WcuJL6gnWy6Q5i2E; 1.247 date 2022.09.03.10.03.20; author riastradh; state Exp; branches; next 1.246; commitid eixuDW6ugFFyXoSD; 1.246 date 2022.03.15.13.00.44; author riastradh; state Exp; branches; next 1.245; commitid mUS5HSda79tctjwD; 1.245 date 2022.03.12.17.23.32; author riastradh; state Exp; branches; next 1.244; commitid C5vHR9knjxUk1XvD; 1.244 date 2022.03.12.16.19.08; author riastradh; state Exp; branches; next 1.243; commitid z28Tv7O6gWbfFWvD; 1.243 date 2021.09.26.01.16.10; author thorpej; state Exp; branches; next 1.242; commitid Tj2MFFOGodel0paD; 1.242 date 2021.09.16.22.19.11; author andvar; state Exp; branches; next 1.241; commitid 3w937jMP2y9ele9D; 1.241 date 2021.07.14.06.50.22; author yamaguchi; state Exp; branches; next 1.240; commitid 3vT6kce6xdTXfV0D; 1.240 date 2021.06.09.15.44.15; author martin; state Exp; branches; next 1.239; commitid 1qEEAxInDUGxktWC; 1.239 date 2020.12.18.01.31.49; author thorpej; state Exp; branches 1.239.4.1; next 1.238; commitid 2lM2ZFwM6irBbaAC; 1.238 date 2020.08.02.07.19.39; author maxv; state Exp; branches 1.238.2.1; next 1.237; commitid ijT1zux3RzqZxsiC; 1.237 date 2020.06.11.13.36.20; author roy; state Exp; branches; next 1.236; commitid 6LXtl6UVe60QgObC; 1.236 date 2020.03.16.21.20.11; author pgoyette; state Exp; branches; next 1.235; commitid 488MBUUFy7ksEF0C; 1.235 date 2020.02.07.12.35.33; author thorpej; state Exp; branches; next 1.234; commitid 94Ksk2mJncCBYJVB; 1.234 date 2020.02.01.02.54.02; author riastradh; state Exp; branches; next 1.233; commitid l5LVDl8J8b2zXUUB; 1.233 date 2020.01.19.05.07.22; author thorpej; state Exp; branches; next 1.232; commitid q9EYpnG8GFb97gTB; 1.232 date 2019.11.29.17.29.31; author ryo; state Exp; branches 1.232.2.1; next 1.231; commitid 2Niqs3h5FBWtRLMB; 1.231 date 2019.09.13.06.39.29; author maxv; state Exp; branches; next 1.230; commitid wB258JwPwyZNJOCB; 1.230 date 2019.09.12.07.38.19; author maxv; state Exp; branches; next 1.229; commitid QlTb1MAQbz306HCB; 1.229 date 2019.07.10.17.55.33; author maxv; state Exp; branches 1.229.2.1; next 1.228; commitid Z6leozMj5Fk4zwuB; 1.228 date 2018.09.03.16.29.35; author riastradh; state Exp; branches; next 1.227; commitid BTC4S53hMH8f3GQA; 1.227 date 2018.07.25.07.55.45; author msaitoh; state Exp; branches; next 1.226; commitid kGvLDBKgppP4vuLA; 1.226 date 2018.06.26.06.48.02; author msaitoh; state Exp; branches 1.226.2.1; next 1.225; commitid BGd0EgCdw1Br3LHA; 1.225 date 2018.06.25.03.22.14; author msaitoh; state Exp; branches; next 1.224; commitid EaBWQVNkI1k1XBHA; 1.224 date 2018.05.14.02.55.03; author ozaki-r; state Exp; branches; next 1.223; commitid lHQkFoOzprft9dCA; 1.223 date 2018.01.25.02.45.02; author ozaki-r; state Exp; branches 1.223.2.1; next 1.222; commitid qq3IzbsKTAxgBcoA; 1.222 date 2017.12.15.07.29.11; author ozaki-r; state Exp; branches; next 1.221; commitid RLMILNhfEw3suXiA; 1.221 date 2017.12.12.06.26.57; author ozaki-r; state Exp; branches; next 1.220; commitid X5JjGzSr1Lw5fziA; 1.220 date 2017.11.30.20.25.55; author christos; state Exp; branches; next 1.219; commitid M7sqSPDExvfIg6hA; 1.219 date 2017.11.17.07.37.12; author ozaki-r; state Exp; branches; next 1.218; commitid 1NQBDXAUtBTZqmfA; 1.218 date 2017.10.25.08.12.40; author maya; state Exp; branches; next 1.217; commitid BzkgGtlAzgiZlpcA; 1.217 date 2017.10.19.01.57.15; author ozaki-r; state Exp; branches; next 1.216; commitid r1lcC4xji3WauBbA; 1.216 date 2017.02.20.03.08.38; author ozaki-r; state Exp; branches 1.216.4.1 1.216.6.1; next 1.215; 1.215 date 2017.02.19.13.58.42; author christos; state Exp; branches; next 1.214; 1.214 date 2017.02.13.03.44.45; author ozaki-r; state Exp; branches; next 1.213; 1.213 date 2017.02.09.09.30.26; author ozaki-r; state Exp; branches; next 1.212; 1.212 date 2017.02.01.08.18.33; author ozaki-r; state Exp; branches; next 1.211; 1.211 date 2017.02.01.08.16.42; author ozaki-r; state Exp; branches; next 1.210; 1.210 date 2017.02.01.08.15.15; author ozaki-r; state Exp; branches; next 1.209; 1.209 date 2017.02.01.08.13.45; author ozaki-r; state Exp; branches; next 1.208; 1.208 date 2017.02.01.08.07.27; author ozaki-r; state Exp; branches; next 1.207; 1.207 date 2017.02.01.08.06.01; author ozaki-r; state Exp; branches; next 1.206; 1.206 date 2017.01.25.01.04.23; author ozaki-r; state Exp; branches; next 1.205; 1.205 date 2017.01.24.09.05.28; author ozaki-r; state Exp; branches; next 1.204; 1.204 date 2017.01.23.10.17.36; author ozaki-r; state Exp; branches; next 1.203; 1.203 date 2016.07.19.02.47.45; author pgoyette; state Exp; branches 1.203.2.1; next 1.202; 1.202 date 2016.07.17.02.49.52; author pgoyette; state Exp; branches; next 1.201; 1.201 date 2016.07.17.01.16.30; author pgoyette; state Exp; branches; next 1.200; 1.200 date 2016.07.17.01.03.46; author pgoyette; state Exp; branches; next 1.199; 1.199 date 2016.06.20.06.46.37; author knakahara; state Exp; branches 1.199.2.1; next 1.198; 1.198 date 2016.06.10.13.31.44; author ozaki-r; state Exp; branches; next 1.197; 1.197 date 2016.06.10.13.27.15; author ozaki-r; state Exp; branches; next 1.196; 1.196 date 2016.06.07.01.06.28; author pgoyette; state Exp; branches; next 1.195; 1.195 date 2016.02.09.08.32.12; author ozaki-r; state Exp; branches; next 1.194; 1.194 date 2016.02.01.16.32.28; author christos; state Exp; branches; next 1.193; 1.193 date 2015.12.16.23.14.42; author christos; state Exp; branches; next 1.192; 1.192 date 2015.10.14.19.40.09; author christos; state Exp; branches; next 1.191; 1.191 date 2015.05.30.19.14.46; author joerg; state Exp; branches; next 1.190; 1.190 date 2014.12.29.13.38.13; author ozaki-r; state Exp; branches; next 1.189; 1.189 date 2014.09.13.17.18.45; author rmind; state Exp; branches 1.189.2.1; next 1.188; 1.188 date 2014.09.05.09.22.22; author matt; state Exp; branches; next 1.187; 1.187 date 2014.08.07.03.40.21; author ozaki-r; state Exp; branches 1.187.2.1; next 1.186; 1.186 date 2014.07.28.07.32.46; author alnsn; state Exp; branches; next 1.185; 1.185 date 2014.07.25.08.10.40; author dholland; state Exp; branches; next 1.184; 1.184 date 2014.07.10.15.32.09; author christos; state Exp; branches; next 1.183; 1.183 date 2014.06.24.10.53.30; author alnsn; state Exp; branches; next 1.182; 1.182 date 2014.03.16.05.20.30; author dholland; state Exp; branches 1.182.2.1; next 1.181; 1.181 date 2014.02.25.18.30.12; author pooka; state Exp; branches; next 1.180; 1.180 date 2013.12.05.15.55.35; author christos; state Exp; branches; next 1.179; 1.179 date 2013.11.16.01.13.52; author rmind; state Exp; branches; next 1.178; 1.178 date 2013.11.15.00.12.44; author rmind; state Exp; branches; next 1.177; 1.177 date 2013.09.18.23.34.55; author rmind; state Exp; branches; next 1.176; 1.176 date 2013.09.09.20.53.51; author christos; state Exp; branches; next 1.175; 1.175 date 2013.08.30.15.00.08; author rmind; state Exp; branches; next 1.174; 1.174 date 2013.08.29.14.25.41; author rmind; state Exp; branches; next 1.173; 1.173 date 2012.10.27.22.36.14; author alnsn; state Exp; branches 1.173.2.1; next 1.172; 1.172 date 2012.09.27.18.28.56; author alnsn; state Exp; branches; next 1.171; 1.171 date 2012.08.15.20.59.51; author alnsn; state Exp; branches 1.171.2.1; next 1.170; 1.170 date 2012.08.02.00.40.51; author rmind; state Exp; branches; next 1.169; 1.169 date 2012.08.01.23.24.29; author rmind; state Exp; branches; next 1.168; 1.168 date 2011.12.16.03.05.23; author christos; state Exp; branches 1.168.2.1 1.168.6.1 1.168.8.1; next 1.167; 1.167 date 2011.12.15.22.20.26; author christos; state Exp; branches; next 1.166; 1.166 date 2011.08.30.14.22.22; author bouyer; state Exp; branches 1.166.2.1 1.166.6.1; next 1.165; 1.165 date 2011.06.10.00.10.35; author christos; state Exp; branches; next 1.164; 1.164 date 2011.03.30.21.34.08; author christos; state Exp; branches 1.164.2.1; next 1.163; 1.163 date 2011.03.30.18.04.27; author bouyer; state Exp; branches; next 1.162; 1.162 date 2011.01.22.19.12.58; author christos; state Exp; branches; next 1.161; 1.161 date 2011.01.22.16.54.48; author christos; state Exp; branches; next 1.160; 1.160 date 2011.01.02.21.03.45; author christos; state Exp; branches 1.160.2.1 1.160.4.1; next 1.159; 1.159 date 2010.12.08.17.10.13; author pooka; state Exp; branches; next 1.158; 1.158 date 2010.04.14.13.31.33; author pooka; state Exp; branches; next 1.157; 1.157 date 2010.04.05.07.22.22; author joerg; state Exp; branches; next 1.156; 1.156 date 2010.03.13.20.38.48; author christos; state Exp; branches 1.156.2.1; next 1.155; 1.155 date 2010.01.26.01.06.23; author pooka; state Exp; branches 1.155.2.1; next 1.154; 1.154 date 2010.01.25.22.18.17; author pooka; state Exp; branches; next 1.153; 1.153 date 2010.01.19.22.08.00; author pooka; state Exp; branches; next 1.152; 1.152 date 2010.01.17.19.45.06; author pooka; state Exp; branches; next 1.151; 1.151 date 2010.01.15.22.16.46; author pooka; state Exp; branches; next 1.150; 1.150 date 2009.12.20.09.36.06; author dsl; state Exp; branches; next 1.149; 1.149 date 2009.12.09.21.32.59; author dsl; state Exp; branches; next 1.148; 1.148 date 2009.11.23.02.13.48; author rmind; state Exp; branches; next 1.147; 1.147 date 2009.10.05.17.58.15; author christos; state Exp; branches; next 1.146; 1.146 date 2009.04.11.23.05.26; author christos; state Exp; branches; next 1.145; 1.145 date 2009.04.11.15.47.33; author christos; state Exp; branches; next 1.144; 1.144 date 2009.04.04.10.12.51; author ad; state Exp; branches; next 1.143; 1.143 date 2009.03.11.05.55.22; author mrg; state Exp; branches; next 1.142; 1.142 date 2009.01.11.02.45.54; author christos; state Exp; branches 1.142.2.1; next 1.141; 1.141 date 2008.06.15.16.37.21; author christos; state Exp; branches 1.141.4.1 1.141.6.1; next 1.140; 1.140 date 2008.05.21.13.48.52; author ad; state Exp; branches 1.140.2.1; next 1.139; 1.139 date 2008.04.24.15.35.30; author ad; state Exp; branches 1.139.2.1 1.139.4.1; next 1.138; 1.138 date 2008.04.20.15.27.10; author scw; state Exp; branches; next 1.137; 1.137 date 2008.03.26.02.21.52; author christos; state Exp; branches 1.137.2.1 1.137.4.1; next 1.136; 1.136 date 2008.03.24.12.24.37; author yamt; state Exp; branches; next 1.135; 1.135 date 2008.03.21.21.55.00; author ad; state Exp; branches; next 1.134; 1.134 date 2008.03.01.14.16.52; author rmind; state Exp; branches; next 1.133; 1.133 date 2008.02.20.17.05.52; author matt; state Exp; branches 1.133.2.1 1.133.6.1; next 1.132; 1.132 date 2007.12.20.18.13.26; author dyoung; state Exp; branches; next 1.131; 1.131 date 2007.12.05.17.20.00; author pooka; state Exp; branches 1.131.4.1; next 1.130; 1.130 date 2007.07.11.21.26.53; author xtraeme; state Exp; branches 1.130.6.1 1.130.8.1 1.130.14.1 1.130.16.1; next 1.129; 1.129 date 2007.07.09.21.10.59; author ad; state Exp; branches; next 1.128; 1.128 date 2007.05.30.21.02.03; author christos; state Exp; branches; next 1.127; 1.127 date 2007.05.29.21.32.29; author christos; state Exp; branches; next 1.126; 1.126 date 2007.03.04.06.03.14; author christos; state Exp; branches 1.126.2.1 1.126.4.1; next 1.125; 1.125 date 2006.11.16.01.33.40; author christos; state Exp; branches 1.125.4.1; next 1.124; 1.124 date 2006.10.25.20.28.45; author elad; state Exp; branches; next 1.123; 1.123 date 2006.10.12.01.32.27; author christos; state Exp; branches; next 1.122; 1.122 date 2006.08.28.00.09.28; author christos; state Exp; branches 1.122.2.1 1.122.4.1; next 1.121; 1.121 date 2006.08.04.23.18.53; author martin; state Exp; branches; next 1.120; 1.120 date 2006.07.26.13.54.13; author christos; state Exp; branches; next 1.119; 1.119 date 2006.07.23.22.06.12; author ad; state Exp; branches; next 1.118; 1.118 date 2006.06.27.10.45.09; author tron; state Exp; branches; next 1.117; 1.117 date 2006.05.14.21.19.33; author elad; state Exp; branches 1.117.4.1; next 1.116; 1.116 date 2006.05.10.21.53.18; author mrg; state Exp; branches; next 1.115; 1.115 date 2005.12.26.15.45.48; author rpaulo; state Exp; branches 1.115.4.1 1.115.6.1 1.115.8.1 1.115.10.1 1.115.12.1; next 1.114; 1.114 date 2005.12.24.20.45.09; author perry; state Exp; branches; next 1.113; 1.113 date 2005.12.14.22.46.52; author rpaulo; state Exp; branches; next 1.112; 1.112 date 2005.12.11.12.24.51; author christos; state Exp; branches; next 1.111; 1.111 date 2005.09.05.18.32.24; author rpaulo; state Exp; branches; next 1.110; 1.110 date 2005.08.04.19.30.47; author rpaulo; state Exp; branches; next 1.109; 1.109 date 2005.06.22.10.36.16; author peter; state Exp; branches 1.109.2.1; next 1.108; 1.108 date 2005.06.20.02.49.19; author atatat; state Exp; branches; next 1.107; 1.107 date 2005.02.26.22.45.09; author perry; state Exp; branches; next 1.106; 1.106 date 2005.02.12.23.14.03; author christos; state Exp; branches; next 1.105; 1.105 date 2004.11.30.04.28.43; author christos; state Exp; branches 1.105.4.1 1.105.6.1; next 1.104; 1.104 date 2004.08.19.20.58.23; author christos; state Exp; branches; next 1.103; 1.103 date 2004.08.19.18.33.24; author christos; state Exp; branches; next 1.102; 1.102 date 2004.08.05.03.58.58; author enami; state Exp; branches; next 1.101; 1.101 date 2004.06.06.04.35.53; author dyoung; state Exp; branches; next 1.100; 1.100 date 2004.05.29.14.18.33; author darrenr; state Exp; branches; next 1.99; 1.99 date 2004.05.29.08.56.19; author darrenr; state Exp; branches; next 1.98; 1.98 date 2004.05.25.04.33.59; author atatat; state Exp; branches; next 1.97; 1.97 date 2004.05.19.13.09.11; author darrenr; state Exp; branches; next 1.96; 1.96 date 2004.04.30.22.07.21; author dyoung; state Exp; branches; next 1.95; 1.95 date 2004.04.20.10.51.09; author darrenr; state Exp; branches; next 1.94; 1.94 date 2004.04.15.14.56.57; author darrenr; state Exp; branches; next 1.93; 1.93 date 2004.04.14.21.34.26; author darrenr; state Exp; branches; next 1.92; 1.92 date 2004.04.11.01.41.01; author darrenr; state Exp; branches; next 1.91; 1.91 date 2004.04.10.23.31.51; author darrenr; state Exp; branches; next 1.90; 1.90 date 2004.03.24.15.34.54; author atatat; state Exp; branches 1.90.2.1; next 1.89; 1.89 date 2004.01.22.00.32.41; author jonathan; state Exp; branches; next 1.88; 1.88 date 2004.01.21.23.59.12; author jonathan; state Exp; branches; next 1.87; 1.87 date 2004.01.21.22.15.16; author jonathan; state Exp; branches; next 1.86; 1.86 date 2003.09.22.13.00.01; author christos; state Exp; branches; next 1.85; 1.85 date 2003.09.21.19.17.13; author jdolecek; state Exp; branches; next 1.84; 1.84 date 2003.08.13.19.44.12; author wrstuden; state Exp; branches; next 1.83; 1.83 date 2003.08.07.16.32.47; author agc; state Exp; branches; next 1.82; 1.82 date 2003.06.29.22.31.49; author fvdl; state Exp; branches 1.82.2.1; next 1.81; 1.81 date 2003.06.28.17.33.02; author darrenr; state Exp; branches; next 1.80; 1.80 date 2003.06.28.14.22.06; author darrenr; state Exp; branches; next 1.79; 1.79 date 2003.06.19.06.25.41; author itojun; state Exp; branches; next 1.78; 1.78 date 2003.03.13.10.18.35; author dsl; state Exp; branches; next 1.77; 1.77 date 2003.02.26.06.31.12; author matt; state Exp; branches; next 1.76; 1.76 date 2002.11.26.18.51.18; author christos; state Exp; branches; next 1.75; 1.75 date 2002.10.23.09.14.41; author jdolecek; state Exp; branches; next 1.74; 1.74 date 2002.09.25.22.21.46; author thorpej; state Exp; branches; next 1.73; 1.73 date 2002.09.24.03.14.43; author itojun; state Exp; branches; next 1.72; 1.72 date 2002.09.19.03.04.32; author atatat; state Exp; branches; next 1.71; 1.71 date 2002.09.19.01.16.58; author darrenr; state Exp; branches; next 1.70; 1.70 date 2002.09.19.00.34.00; author darrenr; state Exp; branches; next 1.69; 1.69 date 2002.09.15.23.44.12; author thorpej; state Exp; branches; next 1.68; 1.68 date 2002.09.11.05.36.26; author itojun; state Exp; branches; next 1.67; 1.67 date 2002.09.06.13.18.43; author gehenna; state Exp; branches; next 1.66; 1.66 date 2002.08.28.09.34.57; author onoe; state Exp; branches; next 1.65; 1.65 date 2002.06.06.23.54.47; author wrstuden; state Exp; branches; next 1.64; 1.64 date 2002.03.23.15.55.21; author darrenr; state Exp; branches 1.64.2.1; next 1.63; 1.63 date 2001.11.12.23.49.33; author lukem; state Exp; branches; next 1.62; 1.62 date 2001.09.10.23.11.06; author bjh21; state Exp; branches; next 1.61; 1.61 date 2001.04.13.23.30.11; author thorpej; state Exp; branches 1.61.2.1 1.61.4.1; next 1.60; 1.60 date 2000.12.29.01.55.49; author thorpej; state Exp; branches 1.60.2.1; next 1.59; 1.59 date 2000.12.12.17.55.21; author thorpej; state Exp; branches; next 1.58; 1.58 date 2000.07.04.18.46.49; author thorpej; state Exp; branches; next 1.57; 1.57 date 2000.05.28.18.17.09; author jhawk; state Exp; branches 1.57.2.1; next 1.56; 1.56 date 2000.05.28.02.49.35; author matt; state Exp; branches; next 1.55; 1.55 date 2000.05.12.05.58.01; author jonathan; state Exp; branches 1.55.2.1; next 1.54; 1.54 date 2000.04.12.04.20.47; author chs; state Exp; branches; next 1.53; 1.53 date 2000.03.30.09.45.33; author augustss; state Exp; branches; next 1.52; 1.52 date 2000.03.13.23.52.39; author soren; state Exp; branches; next 1.51; 1.51 date 2000.02.02.09.03.41; author enami; state Exp; branches; next 1.50; 1.50 date 2000.02.02.08.36.02; author enami; state Exp; branches; next 1.49; 1.49 date 2000.02.02.07.45.13; author enami; state Exp; branches; next 1.48; 1.48 date 2000.01.31.23.06.12; author thorpej; state Exp; branches; next 1.47; 1.47 date 99.05.11.02.11.08; author thorpej; state Exp; branches 1.47.2.1; next 1.46; 1.46 date 98.12.04.11.04.37; author bouyer; state Exp; branches 1.46.2.1 1.46.6.1; next 1.45; 1.45 date 98.11.05.22.50.15; author jonathan; state Exp; branches; next 1.44; 1.44 date 98.08.18.06.32.13; author thorpej; state Exp; branches; next 1.43; 1.43 date 98.08.06.04.37.57; author perry; state Exp; branches; next 1.42; 1.42 date 98.08.06.04.25.55; author perry; state Exp; branches; next 1.41; 1.41 date 98.08.06.04.24.25; author perry; state Exp; branches; next 1.40; 1.40 date 98.04.30.00.08.19; author thorpej; state Exp; branches; next 1.39; 1.39 date 98.03.01.02.25.04; author fvdl; state Exp; branches; next 1.38; 1.38 date 97.10.12.16.35.10; author mycroft; state Exp; branches; next 1.37; 1.37 date 97.10.09.18.58.08; author christos; state Exp; branches; next 1.36; 1.36 date 97.10.09.18.17.19; author christos; state Exp; branches; next 1.35; 1.35 date 97.03.17.06.45.20; author scottr; state Exp; branches 1.35.4.1; next 1.34; 1.34 date 97.03.15.18.12.18; author is; state Exp; branches; next 1.33; 1.33 date 97.02.21.23.59.35; author thorpej; state Exp; branches; next 1.32; 1.32 date 96.10.13.02.10.56; author christos; state Exp; branches 1.32.4.1; next 1.31; 1.31 date 96.10.10.22.59.41; author christos; state Exp; branches; next 1.30; 1.30 date 96.09.07.12.41.25; author mycroft; state Exp; branches; next 1.29; 1.29 date 96.06.14.22.21.54; author cgd; state Exp; branches; next 1.28; 1.28 date 96.05.22.13.41.54; author mycroft; state Exp; branches; next 1.27; 1.27 date 96.05.07.05.26.02; author thorpej; state Exp; branches; next 1.26; 1.26 date 96.05.07.02.40.22; author thorpej; state Exp; branches; next 1.25; 1.25 date 96.03.30.21.57.30; author christos; state Exp; branches; next 1.24; 1.24 date 96.02.13.21.59.53; author christos; state Exp; branches; next 1.23; 1.23 date 95.09.27.18.30.37; author thorpej; state Exp; branches; next 1.22; 1.22 date 95.08.13.04.15.38; author mycroft; state Exp; branches; next 1.21; 1.21 date 95.08.12.23.59.17; author mycroft; state Exp; branches; next 1.20; 1.20 date 95.07.23.16.29.47; author mycroft; state Exp; branches; next 1.19; 1.19 date 95.04.22.13.26.20; author cgd; state Exp; branches; next 1.18; 1.18 date 95.03.22.16.08.32; author mycroft; state Exp; branches; next 1.17; 1.17 date 95.02.23.07.19.49; author glass; state Exp; branches; next 1.16; 1.16 date 94.10.30.21.48.43; author cgd; state Exp; branches; next 1.15; 1.15 date 94.07.15.22.29.32; author cgd; state Exp; branches; next 1.14; 1.14 date 94.06.29.21.23.15; author cgd; state Exp; branches 1.14.2.1; next 1.13; 1.13 date 94.06.29.06.35.52; author cgd; state Exp; branches; next 1.12; 1.12 date 94.05.13.06.02.14; author mycroft; state Exp; branches; next 1.11; 1.11 date 94.01.25.06.10.08; author deraadt; state Exp; branches; next 1.10; 1.10 date 94.01.12.02.45.11; author mycroft; state Exp; branches; next 1.9; 1.9 date 94.01.12.00.38.50; author deraadt; state Exp; branches; next 1.8; 1.8 date 93.12.18.00.40.49; author mycroft; state Exp; branches; next 1.7; 1.7 date 93.11.23.04.51.25; author cgd; state Exp; branches; next 1.6; 1.6 date 93.11.15.09.56.46; author deraadt; state Exp; branches; next 1.5; 1.5 date 93.05.18.18.19.50; author cgd; state Exp; branches 1.5.4.1; next 1.4; 1.4 date 93.04.09.11.02.51; author glass; state Exp; branches; next 1.3; 1.3 date 93.04.05.22.04.09; author deraadt; state Exp; branches; next 1.2; 1.2 date 93.03.25.00.27.49; author cgd; state Exp; branches; next 1.1; 1.1 date 93.03.21.09.45.37; author cgd; state Exp; branches 1.1.1.1; next ; 1.249.2.1 date 2023.02.24.13.10.53; author martin; state Exp; branches; next ; commitid KfCBf0dxSo19pMeE; 1.239.4.1 date 2021.06.17.04.46.34; author thorpej; state Exp; branches; next 1.239.4.2; commitid d7CrUzY34skBrrXC; 1.239.4.2 date 2021.08.01.22.42.41; author thorpej; state Exp; branches; next ; commitid NihqK3haIgTUWj3D; 1.238.2.1 date 2021.01.03.16.35.04; author thorpej; state Exp; branches; next ; commitid hSJGvbJZNH5wFiCC; 1.232.2.1 date 2020.01.25.22.38.51; author ad; state Exp; branches; next 1.232.2.2; commitid ethRERRmx7bMJ7UB; 1.232.2.2 date 2020.02.29.20.21.06; author ad; state Exp; branches; next ; commitid OjSb8ro7YQETQBYB; 1.229.2.1 date 2019.10.16.09.46.55; author martin; state Exp; branches; next 1.229.2.2; commitid 63hblZLmbUFsI4HB; 1.229.2.2 date 2023.02.22.19.50.33; author martin; state Exp; branches; next 1.229.2.3; commitid cBCCpJ0xY6neGyeE; 1.229.2.3 date 2023.08.04.14.57.52; author martin; state Exp; branches; next ; commitid Ylyu8zQTNiTzOtzE; 1.226.2.1 date 2019.06.10.22.09.45; author christos; state Exp; branches; next 1.226.2.2; commitid jtc8rnCzWiEEHGqB; 1.226.2.2 date 2020.04.08.14.08.57; author martin; state Exp; branches; next 1.226.2.3; commitid Qli2aW9E74UFuA3C; 1.226.2.3 date 2020.04.13.08.05.15; author martin; state Exp; branches; next ; commitid X01YhRUPVUDaec4C; 1.223.2.1 date 2018.05.21.04.36.15; author pgoyette; state Exp; branches; next 1.223.2.2; commitid X5L8kSrBWQcDt7DA; 1.223.2.2 date 2018.06.25.07.26.06; author pgoyette; state Exp; branches; next 1.223.2.3; commitid 8PtAu9af7VvhiDHA; 1.223.2.3 date 2018.07.28.04.38.09; author pgoyette; state Exp; branches; next 1.223.2.4; commitid 1UP1xAIUxv1ZgRLA; 1.223.2.4 date 2018.09.06.06.56.44; author pgoyette; state Exp; branches; next ; commitid HCi1bXD317XIK0RA; 1.216.4.1 date 2017.04.27.05.36.38; author pgoyette; state Exp; branches; next 1.216.4.2; 1.216.4.2 date 2017.04.29.11.12.15; author pgoyette; state Exp; branches; next ; 1.216.6.1 date 2017.10.25.07.14.09; author snj; state Exp; branches; next 1.216.6.2; commitid odBJTdevwSqR2pcA; 1.216.6.2 date 2017.12.21.21.38.23; author snj; state Exp; branches; next 1.216.6.3; commitid sA4JExHASBsFZNjA; 1.216.6.3 date 2017.12.21.21.51.37; author snj; state Exp; branches; next 1.216.6.4; commitid D07Y1jumFQ4l4OjA; 1.216.6.4 date 2018.01.02.10.20.33; author snj; state Exp; branches; next 1.216.6.5; commitid 07oy8c4rjfdaRhlA; 1.216.6.5 date 2018.02.05.14.18.00; author martin; state Exp; branches; next 1.216.6.6; commitid 0TA1BHZFEWB45GpA; 1.216.6.6 date 2018.05.15.13.48.37; author martin; state Exp; branches; next 1.216.6.7; commitid 3CFxkgCi1y5HJoCA; 1.216.6.7 date 2019.08.04.11.19.03; author martin; state Exp; branches; next 1.216.6.8; commitid lu2doCwCp7fyzHxB; 1.216.6.8 date 2023.02.22.19.51.47; author martin; state Exp; branches; next 1.216.6.9; commitid uLhv1XKqwvgFGyeE; 1.216.6.9 date 2023.08.04.15.00.28; author martin; state Exp; branches; next ; commitid tlDgfSGTba3QPtzE; 1.203.2.1 date 2017.04.21.16.54.04; author bouyer; state Exp; branches; next ; 1.199.2.1 date 2016.07.17.05.05.10; author pgoyette; state Exp; branches; next 1.199.2.2; 1.199.2.2 date 2016.07.19.06.27.00; author pgoyette; state Exp; branches; next 1.199.2.3; 1.199.2.3 date 2016.07.26.05.54.40; author pgoyette; state Exp; branches; next 1.199.2.4; 1.199.2.4 date 2017.03.20.06.57.49; author pgoyette; state Exp; branches; next ; 1.189.2.1 date 2015.04.06.15.18.22; author skrll; state Exp; branches; next 1.189.2.2; 1.189.2.2 date 2015.06.06.14.40.25; author skrll; state Exp; branches; next 1.189.2.3; 1.189.2.3 date 2015.12.27.12.10.06; author skrll; state Exp; branches; next 1.189.2.4; 1.189.2.4 date 2016.03.19.11.30.32; author skrll; state Exp; branches; next 1.189.2.5; 1.189.2.5 date 2016.07.09.20.25.21; author skrll; state Exp; branches; next 1.189.2.6; 1.189.2.6 date 2016.10.05.20.56.08; author skrll; state Exp; branches; next 1.189.2.7; 1.189.2.7 date 2017.02.05.13.40.57; author skrll; state Exp; branches; next 1.189.2.8; 1.189.2.8 date 2017.08.28.17.53.11; author skrll; state Exp; branches; next ; commitid UQQpnjvcNkUZn05A; 1.187.2.1 date 2014.09.21.18.41.39; author snj; state Exp; branches; next ; 1.182.2.1 date 2014.08.10.06.56.15; author tls; state Exp; branches; next ; 1.173.2.1 date 2014.05.18.17.46.12; author rmind; state Exp; branches; next ; 1.171.2.1 date 2012.11.20.03.02.46; author tls; state Exp; branches; next 1.171.2.2; 1.171.2.2 date 2014.08.20.00.04.34; author tls; state Exp; branches; next 1.171.2.3; 1.171.2.3 date 2017.12.03.11.39.02; author jdolecek; state Exp; branches; next ; commitid XcIYRZTAh1LmerhA; 1.168.2.1 date 2013.09.11.03.54.35; author msaitoh; state Exp; branches; next ; 1.168.6.1 date 2013.09.11.04.01.10; author msaitoh; state Exp; branches; next ; 1.168.8.1 date 2013.09.11.04.00.54; author msaitoh; state Exp; branches; next ; 1.166.2.1 date 2012.04.17.00.08.37; author yamt; state Exp; branches; next 1.166.2.2; 1.166.2.2 date 2012.10.30.17.22.42; author yamt; state Exp; branches; next 1.166.2.3; 1.166.2.3 date 2014.05.22.11.41.08; author yamt; state Exp; branches; next ; 1.166.6.1 date 2012.02.18.07.35.37; author mrg; state Exp; branches; next ; 1.164.2.1 date 2011.06.23.14.20.25; author cherry; state Exp; branches; next ; 1.160.2.1 date 2011.06.06.09.09.52; author jruoho; state Exp; branches; next ; 1.160.4.1 date 2011.02.08.16.20.01; author bouyer; state Exp; branches; next ; 1.156.2.1 date 2010.05.30.05.18.00; author rmind; state Exp; branches; next 1.156.2.2; 1.156.2.2 date 2011.03.05.20.55.50; author rmind; state Exp; branches; next 1.156.2.3; 1.156.2.3 date 2011.04.21.01.42.13; author rmind; state Exp; branches; next 1.156.2.4; 1.156.2.4 date 2011.06.12.00.24.30; author rmind; state Exp; branches; next ; 1.155.2.1 date 2010.04.30.14.44.18; author uebayasi; state Exp; branches; next ; 1.142.2.1 date 2009.05.13.17.22.19; author jym; state Exp; branches; next ; 1.141.4.1 date 2009.01.19.13.20.11; author skrll; state Exp; branches; next 1.141.4.2; 1.141.4.2 date 2009.04.28.07.37.16; author skrll; state Exp; branches; next ; 1.141.6.1 date 2009.04.04.23.36.28; author snj; state Exp; branches 1.141.6.1.6.1; next 1.141.6.2; 1.141.6.2 date 2011.04.05.06.10.50; author riz; state Exp; branches 1.141.6.2.2.1; next 1.141.6.3; 1.141.6.3 date 2013.09.11.07.02.46; author msaitoh; state Exp; branches; next ; 1.141.6.1.6.1 date 2013.09.11.07.31.20; author msaitoh; state Exp; branches; next ; 1.141.6.2.2.1 date 2013.09.11.07.04.32; author msaitoh; state Exp; branches; next ; 1.140.2.1 date 2008.06.18.16.33.50; author simonb; state Exp; branches; next ; 1.139.2.1 date 2009.05.04.08.14.14; author yamt; state Exp; branches; next 1.139.2.2; 1.139.2.2 date 2010.03.11.15.04.26; author yamt; state Exp; branches; next 1.139.2.3; 1.139.2.3 date 2010.08.11.22.54.53; author yamt; state Exp; branches; next ; 1.139.4.1 date 2008.06.23.04.31.57; author wrstuden; state Exp; branches; next ; 1.137.2.1 date 2008.03.29.20.47.01; author christos; state Exp; branches; next 1.137.2.2; 1.137.2.2 date 2008.11.01.21.22.28; author christos; state Exp; branches; next 1.137.2.3; 1.137.2.3 date 2008.12.28.20.53.44; author christos; state Exp; branches; next ; 1.137.4.1 date 2008.05.18.12.35.26; author yamt; state Exp; branches; next 1.137.4.2; 1.137.4.2 date 2008.06.04.02.05.47; author yamt; state Exp; branches; next 1.137.4.3; 1.137.4.3 date 2008.06.17.09.15.12; author yamt; state Exp; branches; next ; 1.133.2.1 date 2008.03.24.07.16.23; author keiichi; state Exp; branches; next ; 1.133.6.1 date 2008.03.29.16.17.58; author mjf; state Exp; branches; next 1.133.6.2; 1.133.6.2 date 2008.04.03.12.43.07; author mjf; state Exp; branches; next 1.133.6.3; 1.133.6.3 date 2008.06.02.13.24.21; author mjf; state Exp; branches; next 1.133.6.4; 1.133.6.4 date 2008.06.29.09.33.18; author mjf; state Exp; branches; next 1.133.6.5; 1.133.6.5 date 2009.01.17.13.29.30; author mjf; state Exp; branches; next ; 1.131.4.1 date 2008.01.02.21.56.59; author bouyer; state Exp; branches; next ; 1.130.6.1 date 2007.12.09.19.38.33; author jmcneill; state Exp; branches; next ; 1.130.8.1 date 2008.01.09.01.57.07; author matt; state Exp; branches; next 1.130.8.2; 1.130.8.2 date 2008.03.23.02.05.04; author matt; state Exp; branches; next ; 1.130.14.1 date 2007.12.08.18.21.05; author mjf; state Exp; branches; next 1.130.14.2; 1.130.14.2 date 2007.12.27.00.46.26; author mjf; state Exp; branches; next ; 1.130.16.1 date 2007.12.08.17.57.52; author ad; state Exp; branches; next 1.130.16.2; 1.130.16.2 date 2007.12.26.21.39.50; author ad; state Exp; branches; next ; 1.126.2.1 date 2007.04.10.00.22.12; author ad; state Exp; branches; next 1.126.2.2; 1.126.2.2 date 2007.06.09.23.58.09; author ad; state Exp; branches; next 1.126.2.3; 1.126.2.3 date 2007.07.01.21.50.41; author ad; state Exp; branches; next 1.126.2.4; 1.126.2.4 date 2007.07.15.13.27.52; author ad; state Exp; branches; next 1.126.2.5; 1.126.2.5 date 2007.07.15.15.52.58; author ad; state Exp; branches; next ; 1.126.4.1 date 2007.07.11.20.10.51; author mjf; state Exp; branches; next ; 1.125.4.1 date 2007.03.12.05.59.09; author rmind; state Exp; branches; next ; 1.122.2.1 date 2006.11.18.21.39.29; author ad; state Exp; branches; next ; 1.122.4.1 date 2006.10.22.06.07.24; author yamt; state Exp; branches; next 1.122.4.2; 1.122.4.2 date 2006.12.10.07.19.00; author yamt; state Exp; branches; next ; 1.117.4.1 date 2006.07.13.17.49.57; author gdamore; state Exp; branches; next ; 1.115.4.1 date 2006.09.09.02.58.06; author rpaulo; state Exp; branches; next ; 1.115.6.1 date 2006.06.01.22.38.36; author kardel; state Exp; branches; next ; 1.115.8.1 date 2006.05.24.10.58.56; author yamt; state Exp; branches; next 1.115.8.2; 1.115.8.2 date 2006.08.11.15.46.14; author yamt; state Exp; branches; next 1.115.8.3; 1.115.8.3 date 2006.09.03.15.25.35; author yamt; state Exp; branches; next ; 1.115.10.1 date 2006.03.08.01.11.55; author elad; state Exp; branches; next 1.115.10.2; 1.115.10.2 date 2006.03.10.15.05.22; author elad; state Exp; branches; next 1.115.10.3; 1.115.10.3 date 2006.05.06.23.31.58; author christos; state Exp; branches; next 1.115.10.4; 1.115.10.4 date 2006.05.11.23.31.08; author elad; state Exp; branches; next ; 1.115.12.1 date 2006.05.24.15.50.43; author tron; state Exp; branches; next ; 1.109.2.1 date 2005.07.07.12.03.16; author yamt; state Exp; branches; next 1.109.2.2; 1.109.2.2 date 2006.06.21.15.10.26; author yamt; state Exp; branches; next 1.109.2.3; 1.109.2.3 date 2006.12.30.20.50.20; author yamt; state Exp; branches; next 1.109.2.4; 1.109.2.4 date 2007.09.03.14.42.00; author yamt; state Exp; branches; next 1.109.2.5; 1.109.2.5 date 2007.12.07.17.34.14; author yamt; state Exp; branches; next 1.109.2.6; 1.109.2.6 date 2008.01.21.09.46.59; author yamt; state Exp; branches; next 1.109.2.7; 1.109.2.7 date 2008.02.27.08.37.00; author yamt; state Exp; branches; next 1.109.2.8; 1.109.2.8 date 2008.03.17.09.15.41; author yamt; state Exp; branches; next 1.109.2.9; 1.109.2.9 date 2008.03.24.09.39.09; author yamt; state Exp; branches; next ; 1.105.4.1 date 2005.04.29.11.29.31; author kent; state Exp; branches; next ; 1.105.6.1 date 2005.03.19.08.36.31; author yamt; state Exp; branches; next ; 1.90.2.1 date 2004.04.21.03.56.14; author jmc; state Exp; branches; next 1.90.2.2; 1.90.2.2 date 2004.05.28.07.24.55; author tron; state Exp; branches; next ; 1.82.2.1 date 2003.07.02.15.26.55; author darrenr; state Exp; branches; next 1.82.2.2; 1.82.2.2 date 2004.08.03.10.54.11; author skrll; state Exp; branches; next 1.82.2.3; 1.82.2.3 date 2004.08.12.11.42.20; author skrll; state Exp; branches; next 1.82.2.4; 1.82.2.4 date 2004.08.25.06.58.58; author skrll; state Exp; branches; next 1.82.2.5; 1.82.2.5 date 2004.09.18.14.54.15; author skrll; state Exp; branches; next 1.82.2.6; 1.82.2.6 date 2004.09.21.13.36.35; author skrll; state Exp; branches; next 1.82.2.7; 1.82.2.7 date 2004.12.18.09.32.50; author skrll; state Exp; branches; next 1.82.2.8; 1.82.2.8 date 2005.02.15.21.33.29; author skrll; state Exp; branches; next 1.82.2.9; 1.82.2.9 date 2005.03.04.16.52.56; author skrll; state Exp; branches; next 1.82.2.10; 1.82.2.10 date 2005.11.10.14.10.32; author skrll; state Exp; branches; next ; 1.64.2.1 date 2002.05.16.03.58.47; author gehenna; state Exp; branches; next 1.64.2.2; 1.64.2.2 date 2002.06.20.15.52.04; author gehenna; state Exp; branches; next 1.64.2.3; 1.64.2.3 date 2002.08.29.00.56.40; author gehenna; state Exp; branches; next ; 1.61.2.1 date 2001.09.08.03.15.37; author thorpej; state Exp; branches; next 1.61.2.2; 1.61.2.2 date 2001.09.13.01.16.21; author thorpej; state Exp; branches; next 1.61.2.3; 1.61.2.3 date 2002.01.10.20.01.56; author thorpej; state Exp; branches; next 1.61.2.4; 1.61.2.4 date 2002.06.23.17.50.20; author jdolecek; state Exp; branches; next 1.61.2.5; 1.61.2.5 date 2002.09.06.08.48.46; author jdolecek; state Exp; branches; next 1.61.2.6; 1.61.2.6 date 2002.10.02.22.02.30; author jdolecek; state Exp; branches; next 1.61.2.7; 1.61.2.7 date 2002.10.10.18.43.38; author jdolecek; state Exp; branches; next ; 1.61.4.1 date 2001.09.07.04.45.41; author thorpej; state Exp; branches; next 1.61.4.2; 1.61.4.2 date 2001.09.26.15.28.25; author fvdl; state Exp; branches; next 1.61.4.3; 1.61.4.3 date 2001.10.01.12.47.31; author fvdl; state Exp; branches; next ; 1.60.2.1 date 2001.06.21.20.07.53; author nathanw; state Exp; branches; next 1.60.2.2; 1.60.2.2 date 2001.09.21.22.36.43; author nathanw; state Exp; branches; next 1.60.2.3; 1.60.2.3 date 2001.11.14.19.17.18; author nathanw; state Exp; branches; next 1.60.2.4; 1.60.2.4 date 2002.04.01.07.48.18; author nathanw; state Exp; branches; next 1.60.2.5; 1.60.2.5 date 2002.06.20.03.48.07; author nathanw; state Exp; branches; next 1.60.2.6; 1.60.2.6 date 2002.09.17.21.22.44; author nathanw; state Exp; branches; next 1.60.2.7; 1.60.2.7 date 2002.10.18.02.45.09; author nathanw; state Exp; branches; next 1.60.2.8; 1.60.2.8 date 2002.11.11.22.14.54; author nathanw; state Exp; branches; next 1.60.2.9; 1.60.2.9 date 2002.12.11.06.46.31; author thorpej; state Exp; branches; next ; 1.57.2.1 date 2001.01.25.16.29.56; author jhawk; state Exp; branches; next ; 1.55.2.1 date 2000.06.22.17.09.40; author minoura; state Exp; branches; next ; 1.47.2.1 date 2000.11.20.18.09.55; author bouyer; state Exp; branches; next 1.47.2.2; 1.47.2.2 date 2000.12.13.15.50.27; author bouyer; state Exp; branches; next 1.47.2.3; 1.47.2.3 date 2001.01.05.17.36.48; author bouyer; state Exp; branches; next 1.47.2.4; 1.47.2.4 date 2001.04.21.17.46.36; author bouyer; state Exp; branches; next ; 1.46.2.1 date 98.12.11.04.53.04; author kenh; state Exp; branches; next ; 1.46.6.1 date 99.06.21.01.27.31; author thorpej; state Exp; branches; next ; 1.35.4.1 date 97.10.14.10.28.53; author thorpej; state Exp; branches; next ; 1.32.4.1 date 97.02.07.18.06.53; author is; state Exp; branches; next 1.32.4.2; 1.32.4.2 date 97.03.09.20.58.59; author is; state Exp; branches; next 1.32.4.3; 1.32.4.3 date 97.03.12.15.56.55; author is; state Exp; branches; next ; 1.14.2.1 date 94.07.15.22.32.27; author cgd; state Exp; branches; next ; 1.5.4.1 date 93.09.24.08.53.51; author mycroft; state Exp; branches; next 1.5.4.2; 1.5.4.2 date 93.10.09.09.53.23; author mycroft; state Exp; branches; next 1.5.4.3; 1.5.4.3 date 93.11.23.04.52.03; author cgd; state Exp; branches; next 1.5.4.4; 1.5.4.4 date 93.11.27.19.43.01; author mycroft; state Exp; branches; next 1.5.4.5; 1.5.4.5 date 93.12.03.03.35.05; author mycroft; state Exp; branches; next ; 1.1.1.1 date 93.03.21.09.45.37; author cgd; state Exp; branches; next 1.1.1.2; 1.1.1.2 date 98.03.01.02.10.05; author fvdl; state Exp; branches; next 1.1.1.3; 1.1.1.3 date 98.03.01.02.13.22; author fvdl; state Exp; branches; next ; desc @@ 1.252 log @Don't call versioned stuff "old". Follow the naming convention for versioning and name them after the last version of the OS they appeared on. @ text @/* $NetBSD: bpf.c,v 1.251 2023/02/08 01:37:53 gutteridge Exp $ */ /* * Copyright (c) 1990, 1991, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from the Stanford/CMU enet packet filter, * (net/enet.c) distributed as part of 4.3BSD, and code contributed * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence * Berkeley Laboratory. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @@(#)bpf.c 8.4 (Berkeley) 1/9/95 * static char rcsid[] = * "Header: bpf.c,v 1.67 96/09/26 22:00:52 leres Exp "; */ #include __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.251 2023/02/08 01:37:53 gutteridge Exp $"); #if defined(_KERNEL_OPT) #include "opt_bpf.h" #include "sl.h" #include "opt_net_mpsafe.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef BPF_BUFSIZE /* * 4096 is too small for FDDI frames. 8192 is too small for gigabit Ethernet * jumbos (circa 9k), ATM, or Intel gig/10gig ethernet jumbos (16k). */ # define BPF_BUFSIZE 32768 #endif #define PRINET 26 /* interruptible */ /* * The default read buffer size, and limit for BIOCSBLEN, is sysctl'able. * XXX the default values should be computed dynamically based * on available memory size and available mbuf clusters. */ static int bpf_bufsize = BPF_BUFSIZE; static int bpf_maxbufsize = BPF_DFLTBUFSIZE; /* XXX set dynamically, see above */ static bool bpf_jit = false; struct bpfjit_ops bpfjit_module_ops = { .bj_generate_code = NULL, .bj_free_code = NULL }; /* * Global BPF statistics returned by net.bpf.stats sysctl. */ static struct percpu *bpf_gstats_percpu; /* struct bpf_stat */ #define BPF_STATINC(id) \ { \ struct bpf_stat *__stats = \ percpu_getref(bpf_gstats_percpu); \ __stats->bs_##id++; \ percpu_putref(bpf_gstats_percpu); \ } /* * Locking notes: * - bpf_mtx (adaptive mutex) protects: * - Gobal lists: bpf_iflist and bpf_dlist * - struct bpf_if * - bpf_close * - bpf_psz (pserialize) * - struct bpf_d has two mutexes: * - bd_buf_mtx (spin mutex) protects the buffers that can be accessed * on packet tapping * - bd_mtx (adaptive mutex) protects member variables other than the buffers * - Locking order: bpf_mtx => bpf_d#bd_mtx => bpf_d#bd_buf_mtx * - struct bpf_d obtained via fp->f_bpf in bpf_read and bpf_write is * never freed because struct bpf_d is only freed in bpf_close and * bpf_close never be called while executing bpf_read and bpf_write * - A filter that is assigned to bpf_d can be replaced with another filter * while tapping packets, so it needs to be done atomically * - struct bpf_d is iterated on bpf_dlist with psz * - struct bpf_if is iterated on bpf_iflist with psz or psref */ /* * Use a mutex to avoid a race condition between gathering the stats/peers * and opening/closing the device. */ static kmutex_t bpf_mtx; static struct psref_class *bpf_psref_class __read_mostly; static pserialize_t bpf_psz; static inline void bpf_if_acquire(struct bpf_if *bp, struct psref *psref) { psref_acquire(psref, &bp->bif_psref, bpf_psref_class); } static inline void bpf_if_release(struct bpf_if *bp, struct psref *psref) { psref_release(psref, &bp->bif_psref, bpf_psref_class); } /* * bpf_iflist is the list of interfaces; each corresponds to an ifnet * bpf_dtab holds the descriptors, indexed by minor device # */ static struct pslist_head bpf_iflist; static struct pslist_head bpf_dlist; /* Macros for bpf_d on bpf_dlist */ #define BPF_DLIST_WRITER_INSERT_HEAD(__d) \ PSLIST_WRITER_INSERT_HEAD(&bpf_dlist, (__d), bd_bpf_dlist_entry) #define BPF_DLIST_READER_FOREACH(__d) \ PSLIST_READER_FOREACH((__d), &bpf_dlist, struct bpf_d, \ bd_bpf_dlist_entry) #define BPF_DLIST_WRITER_FOREACH(__d) \ PSLIST_WRITER_FOREACH((__d), &bpf_dlist, struct bpf_d, \ bd_bpf_dlist_entry) #define BPF_DLIST_ENTRY_INIT(__d) \ PSLIST_ENTRY_INIT((__d), bd_bpf_dlist_entry) #define BPF_DLIST_WRITER_REMOVE(__d) \ PSLIST_WRITER_REMOVE((__d), bd_bpf_dlist_entry) #define BPF_DLIST_ENTRY_DESTROY(__d) \ PSLIST_ENTRY_DESTROY((__d), bd_bpf_dlist_entry) /* Macros for bpf_if on bpf_iflist */ #define BPF_IFLIST_WRITER_INSERT_HEAD(__bp) \ PSLIST_WRITER_INSERT_HEAD(&bpf_iflist, (__bp), bif_iflist_entry) #define BPF_IFLIST_READER_FOREACH(__bp) \ PSLIST_READER_FOREACH((__bp), &bpf_iflist, struct bpf_if, \ bif_iflist_entry) #define BPF_IFLIST_WRITER_FOREACH(__bp) \ PSLIST_WRITER_FOREACH((__bp), &bpf_iflist, struct bpf_if, \ bif_iflist_entry) #define BPF_IFLIST_WRITER_REMOVE(__bp) \ PSLIST_WRITER_REMOVE((__bp), bif_iflist_entry) #define BPF_IFLIST_ENTRY_INIT(__bp) \ PSLIST_ENTRY_INIT((__bp), bif_iflist_entry) #define BPF_IFLIST_ENTRY_DESTROY(__bp) \ PSLIST_ENTRY_DESTROY((__bp), bif_iflist_entry) /* Macros for bpf_d on bpf_if#bif_dlist_pslist */ #define BPFIF_DLIST_READER_FOREACH(__d, __bp) \ PSLIST_READER_FOREACH((__d), &(__bp)->bif_dlist_head, struct bpf_d, \ bd_bif_dlist_entry) #define BPFIF_DLIST_WRITER_INSERT_HEAD(__bp, __d) \ PSLIST_WRITER_INSERT_HEAD(&(__bp)->bif_dlist_head, (__d), \ bd_bif_dlist_entry) #define BPFIF_DLIST_WRITER_REMOVE(__d) \ PSLIST_WRITER_REMOVE((__d), bd_bif_dlist_entry) #define BPFIF_DLIST_ENTRY_INIT(__d) \ PSLIST_ENTRY_INIT((__d), bd_bif_dlist_entry) #define BPFIF_DLIST_READER_EMPTY(__bp) \ (PSLIST_READER_FIRST(&(__bp)->bif_dlist_head, struct bpf_d, \ bd_bif_dlist_entry) == NULL) #define BPFIF_DLIST_WRITER_EMPTY(__bp) \ (PSLIST_WRITER_FIRST(&(__bp)->bif_dlist_head, struct bpf_d, \ bd_bif_dlist_entry) == NULL) #define BPFIF_DLIST_ENTRY_DESTROY(__d) \ PSLIST_ENTRY_DESTROY((__d), bd_bif_dlist_entry) static int bpf_allocbufs(struct bpf_d *); static u_int bpf_xfilter(struct bpf_filter **, void *, u_int, u_int); static void bpf_deliver(struct bpf_if *, void *(*cpfn)(void *, const void *, size_t), void *, u_int, u_int, const u_int); static void bpf_freed(struct bpf_d *); static void bpf_free_filter(struct bpf_filter *); static void bpf_ifname(struct ifnet *, struct ifreq *); static void *bpf_mcpy(void *, const void *, size_t); static int bpf_movein(struct ifnet *, struct uio *, int, uint64_t, struct mbuf **, struct sockaddr *, struct bpf_filter **); static void bpf_attachd(struct bpf_d *, struct bpf_if *); static void bpf_detachd(struct bpf_d *); static int bpf_setif(struct bpf_d *, struct ifreq *); static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long); static void bpf_timed_out(void *); static inline void bpf_wakeup(struct bpf_d *); static int bpf_hdrlen(struct bpf_d *); static void catchpacket(struct bpf_d *, u_char *, u_int, u_int, void *(*)(void *, const void *, size_t), struct timespec *); static void reset_d(struct bpf_d *); static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *); static int bpf_setdlt(struct bpf_d *, u_int); static int bpf_read(struct file *, off_t *, struct uio *, kauth_cred_t, int); static int bpf_write(struct file *, off_t *, struct uio *, kauth_cred_t, int); static int bpf_ioctl(struct file *, u_long, void *); static int bpf_poll(struct file *, int); static int bpf_stat(struct file *, struct stat *); static int bpf_close(struct file *); static int bpf_kqfilter(struct file *, struct knote *); static const struct fileops bpf_fileops = { .fo_name = "bpf", .fo_read = bpf_read, .fo_write = bpf_write, .fo_ioctl = bpf_ioctl, .fo_fcntl = fnullop_fcntl, .fo_poll = bpf_poll, .fo_stat = bpf_stat, .fo_close = bpf_close, .fo_kqfilter = bpf_kqfilter, .fo_restart = fnullop_restart, }; dev_type_open(bpfopen); const struct cdevsw bpf_cdevsw = { .d_open = bpfopen, .d_close = noclose, .d_read = noread, .d_write = nowrite, .d_ioctl = noioctl, .d_stop = nostop, .d_tty = notty, .d_poll = nopoll, .d_mmap = nommap, .d_kqfilter = nokqfilter, .d_discard = nodiscard, .d_flag = D_OTHER | D_MPSAFE }; bpfjit_func_t bpf_jit_generate(bpf_ctx_t *bc, void *code, size_t size) { struct bpfjit_ops *ops = &bpfjit_module_ops; bpfjit_func_t (*generate_code)(const bpf_ctx_t *, const struct bpf_insn *, size_t); generate_code = atomic_load_acquire(&ops->bj_generate_code); if (generate_code != NULL) { return generate_code(bc, code, size); } return NULL; } void bpf_jit_freecode(bpfjit_func_t jcode) { KASSERT(bpfjit_module_ops.bj_free_code != NULL); bpfjit_module_ops.bj_free_code(jcode); } static int bpf_movein(struct ifnet *ifp, struct uio *uio, int linktype, uint64_t mtu, struct mbuf **mp, struct sockaddr *sockp, struct bpf_filter **wfilter) { struct mbuf *m, *m0, *n; int error; size_t len; size_t hlen; size_t align; u_int slen; /* * Build a sockaddr based on the data link layer type. * We do this at this level because the ethernet header * is copied directly into the data field of the sockaddr. * In the case of SLIP, there is no header and the packet * is forwarded as is. * Also, we are careful to leave room at the front of the mbuf * for the link level header. */ switch (linktype) { case DLT_SLIP: sockp->sa_family = AF_INET; hlen = 0; align = 0; break; case DLT_PPP: sockp->sa_family = AF_UNSPEC; hlen = 0; align = 0; break; case DLT_EN10MB: sockp->sa_family = AF_UNSPEC; /* XXX Would MAXLINKHDR be better? */ /* 6(dst)+6(src)+2(type) */ hlen = sizeof(struct ether_header); align = 2; break; case DLT_ARCNET: sockp->sa_family = AF_UNSPEC; hlen = ARC_HDRLEN; align = 5; break; case DLT_FDDI: sockp->sa_family = AF_LINK; /* XXX 4(FORMAC)+6(dst)+6(src) */ hlen = 16; align = 0; break; case DLT_ECONET: sockp->sa_family = AF_UNSPEC; hlen = 6; align = 2; break; case DLT_NULL: sockp->sa_family = AF_UNSPEC; if (ifp->if_type == IFT_LOOP) { /* Set here to apply the following validations */ hlen = sizeof(uint32_t); } else hlen = 0; align = 0; break; default: return (EIO); } len = uio->uio_resid; /* * If there aren't enough bytes for a link level header or the * packet length exceeds the interface mtu, return an error. */ if (len - hlen > mtu) return (EMSGSIZE); m0 = m = m_gethdr(M_WAIT, MT_DATA); m_reset_rcvif(m); m->m_pkthdr.len = (int)(len - hlen); if (len + align > MHLEN) { m_clget(m, M_WAIT); if ((m->m_flags & M_EXT) == 0) { error = ENOBUFS; goto bad; } } /* Ensure the data is properly aligned */ if (align > 0) m->m_data += align; for (;;) { len = M_TRAILINGSPACE(m); if (len > uio->uio_resid) len = uio->uio_resid; error = uiomove(mtod(m, void *), len, uio); if (error) goto bad; m->m_len = len; if (uio->uio_resid == 0) break; n = m_get(M_WAIT, MT_DATA); m_clget(n, M_WAIT); /* if fails, there is no problem */ m->m_next = n; m = n; } slen = bpf_xfilter(wfilter, mtod(m, u_char *), len, len); if (slen == 0) { error = EPERM; goto bad; } if (hlen != 0) { if (linktype == DLT_NULL && ifp->if_type == IFT_LOOP) { uint32_t af; /* the link header indicates the address family */ memcpy(&af, mtod(m0, void *), sizeof(af)); sockp->sa_family = af; } else { /* move link level header in the top of mbuf to sa_data */ memcpy(sockp->sa_data, mtod(m0, void *), hlen); } m0->m_data += hlen; m0->m_len -= hlen; } *mp = m0; return (0); bad: m_freem(m0); return (error); } /* * Attach file to the bpf interface, i.e. make d listen on bp. */ static void bpf_attachd(struct bpf_d *d, struct bpf_if *bp) { struct bpf_event_tracker *t; KASSERT(mutex_owned(&bpf_mtx)); KASSERT(mutex_owned(d->bd_mtx)); /* * Point d at bp, and add d to the interface's list of listeners. * Finally, point the driver's bpf cookie at the interface so * it will divert packets to bpf. */ d->bd_bif = bp; BPFIF_DLIST_WRITER_INSERT_HEAD(bp, d); *bp->bif_driverp = bp; SLIST_FOREACH(t, &bp->bif_trackers, bet_entries) { t->bet_notify(bp, bp->bif_ifp, bp->bif_dlt, BPF_TRACK_EVENT_ATTACH); } } /* * Detach a file from its interface. */ static void bpf_detachd(struct bpf_d *d) { struct bpf_if *bp; struct bpf_event_tracker *t; KASSERT(mutex_owned(&bpf_mtx)); KASSERT(mutex_owned(d->bd_mtx)); bp = d->bd_bif; /* * Check if this descriptor had requested promiscuous mode. * If so, turn it off. */ if (d->bd_promisc) { int error __diagused; d->bd_promisc = 0; /* * Take device out of promiscuous mode. Since we were * able to enter promiscuous mode, we should be able * to turn it off. But we can get an error if * the interface was configured down, so only panic * if we don't get an unexpected error. */ KERNEL_LOCK_UNLESS_NET_MPSAFE(); error = ifpromisc(bp->bif_ifp, 0); KERNEL_UNLOCK_UNLESS_NET_MPSAFE(); #ifdef DIAGNOSTIC if (error) printf("%s: ifpromisc failed: %d", __func__, error); #endif } /* Remove d from the interface's descriptor list. */ BPFIF_DLIST_WRITER_REMOVE(d); pserialize_perform(bpf_psz); if (BPFIF_DLIST_WRITER_EMPTY(bp)) { /* * Let the driver know that there are no more listeners. */ *d->bd_bif->bif_driverp = NULL; } d->bd_bif = NULL; SLIST_FOREACH(t, &bp->bif_trackers, bet_entries) { t->bet_notify(bp, bp->bif_ifp, bp->bif_dlt, BPF_TRACK_EVENT_DETACH); } } static void bpf_init(void) { mutex_init(&bpf_mtx, MUTEX_DEFAULT, IPL_NONE); bpf_psz = pserialize_create(); bpf_psref_class = psref_class_create("bpf", IPL_SOFTNET); PSLIST_INIT(&bpf_iflist); PSLIST_INIT(&bpf_dlist); bpf_gstats_percpu = percpu_alloc(sizeof(struct bpf_stat)); return; } /* * bpfilterattach() is called at boot time. We don't need to do anything * here, since any initialization will happen as part of module init code. */ /* ARGSUSED */ void bpfilterattach(int n) { } /* * Open ethernet device. Clones. */ /* ARGSUSED */ int bpfopen(dev_t dev, int flag, int mode, struct lwp *l) { struct bpf_d *d; struct file *fp; int error, fd; /* falloc() will fill in the descriptor for us. */ if ((error = fd_allocfile(&fp, &fd)) != 0) return error; d = kmem_zalloc(sizeof(*d), KM_SLEEP); d->bd_bufsize = bpf_bufsize; d->bd_direction = BPF_D_INOUT; d->bd_feedback = 0; d->bd_pid = l->l_proc->p_pid; #ifdef _LP64 if (curproc->p_flag & PK_32) d->bd_compat32 = 1; #endif getnanotime(&d->bd_btime); d->bd_atime = d->bd_mtime = d->bd_btime; callout_init(&d->bd_callout, CALLOUT_MPSAFE); selinit(&d->bd_sel); d->bd_jitcode = NULL; d->bd_rfilter = NULL; d->bd_wfilter = NULL; d->bd_locked = 0; BPF_DLIST_ENTRY_INIT(d); BPFIF_DLIST_ENTRY_INIT(d); d->bd_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); d->bd_buf_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); cv_init(&d->bd_cv, "bpf"); mutex_enter(&bpf_mtx); BPF_DLIST_WRITER_INSERT_HEAD(d); mutex_exit(&bpf_mtx); return fd_clone(fp, fd, flag, &bpf_fileops, d); } /* * Close the descriptor by detaching it from its interface, * deallocating its buffers, and marking it free. */ /* ARGSUSED */ static int bpf_close(struct file *fp) { struct bpf_d *d; mutex_enter(&bpf_mtx); if ((d = fp->f_bpf) == NULL) { mutex_exit(&bpf_mtx); return 0; } /* * Refresh the PID associated with this bpf file. */ d->bd_pid = curproc->p_pid; mutex_enter(d->bd_mtx); if (d->bd_state == BPF_WAITING) callout_halt(&d->bd_callout, d->bd_mtx); d->bd_state = BPF_IDLE; if (d->bd_bif) bpf_detachd(d); mutex_exit(d->bd_mtx); BPF_DLIST_WRITER_REMOVE(d); pserialize_perform(bpf_psz); mutex_exit(&bpf_mtx); BPFIF_DLIST_ENTRY_DESTROY(d); BPF_DLIST_ENTRY_DESTROY(d); fp->f_bpf = NULL; bpf_freed(d); callout_destroy(&d->bd_callout); seldestroy(&d->bd_sel); mutex_obj_free(d->bd_mtx); mutex_obj_free(d->bd_buf_mtx); cv_destroy(&d->bd_cv); kmem_free(d, sizeof(*d)); return (0); } /* * Rotate the packet buffers in descriptor d. Move the store buffer * into the hold slot, and the free buffer into the store slot. * Zero the length of the new store buffer. */ #define ROTATE_BUFFERS(d) \ (d)->bd_hbuf = (d)->bd_sbuf; \ (d)->bd_hlen = (d)->bd_slen; \ (d)->bd_sbuf = (d)->bd_fbuf; \ (d)->bd_slen = 0; \ (d)->bd_fbuf = NULL; /* * bpfread - read next chunk of packets from buffers */ static int bpf_read(struct file *fp, off_t *offp, struct uio *uio, kauth_cred_t cred, int flags) { struct bpf_d *d = fp->f_bpf; int timed_out; int error; /* * Refresh the PID associated with this bpf file. */ d->bd_pid = curproc->p_pid; getnanotime(&d->bd_atime); /* * Restrict application to use a buffer the same size as * the kernel buffers. */ if (uio->uio_resid != d->bd_bufsize) return (EINVAL); mutex_enter(d->bd_mtx); if (d->bd_state == BPF_WAITING) callout_halt(&d->bd_callout, d->bd_mtx); timed_out = (d->bd_state == BPF_TIMED_OUT); d->bd_state = BPF_IDLE; mutex_exit(d->bd_mtx); /* * If the hold buffer is empty, then do a timed sleep, which * ends when the timeout expires or when enough packets * have arrived to fill the store buffer. */ mutex_enter(d->bd_buf_mtx); while (d->bd_hbuf == NULL) { if (fp->f_flag & FNONBLOCK) { if (d->bd_slen == 0) { error = EWOULDBLOCK; goto out; } ROTATE_BUFFERS(d); break; } if ((d->bd_immediate || timed_out) && d->bd_slen != 0) { /* * A packet(s) either arrived since the previous * read or arrived while we were asleep. * Rotate the buffers and return what's here. */ ROTATE_BUFFERS(d); break; } error = cv_timedwait_sig(&d->bd_cv, d->bd_buf_mtx, d->bd_rtout); if (error == EINTR || error == ERESTART) goto out; if (error == EWOULDBLOCK) { /* * On a timeout, return what's in the buffer, * which may be nothing. If there is something * in the store buffer, we can rotate the buffers. */ if (d->bd_hbuf) /* * We filled up the buffer in between * getting the timeout and arriving * here, so we don't need to rotate. */ break; if (d->bd_slen == 0) { error = 0; goto out; } ROTATE_BUFFERS(d); break; } if (error != 0) goto out; } /* * At this point, we know we have something in the hold slot. */ mutex_exit(d->bd_buf_mtx); /* * Move data from hold buffer into user space. * We know the entire buffer is transferred since * we checked above that the read buffer is bpf_bufsize bytes. */ error = uiomove(d->bd_hbuf, d->bd_hlen, uio); mutex_enter(d->bd_buf_mtx); d->bd_fbuf = d->bd_hbuf; d->bd_hbuf = NULL; d->bd_hlen = 0; out: mutex_exit(d->bd_buf_mtx); return (error); } /* * If there are processes sleeping on this descriptor, wake them up. */ static inline void bpf_wakeup(struct bpf_d *d) { mutex_enter(d->bd_buf_mtx); cv_broadcast(&d->bd_cv); mutex_exit(d->bd_buf_mtx); if (d->bd_async) fownsignal(d->bd_pgid, SIGIO, 0, 0, NULL); selnotify(&d->bd_sel, 0, 0); } static void bpf_timed_out(void *arg) { struct bpf_d *d = arg; mutex_enter(d->bd_mtx); if (d->bd_state == BPF_WAITING) { d->bd_state = BPF_TIMED_OUT; if (d->bd_slen != 0) bpf_wakeup(d); } mutex_exit(d->bd_mtx); } static int bpf_write(struct file *fp, off_t *offp, struct uio *uio, kauth_cred_t cred, int flags) { struct bpf_d *d = fp->f_bpf; struct bpf_if *bp; struct ifnet *ifp; struct mbuf *m, *mc; int error; static struct sockaddr_storage dst; struct psref psref; int bound; /* * Refresh the PID associated with this bpf file. */ d->bd_pid = curproc->p_pid; m = NULL; /* XXX gcc */ bound = curlwp_bind(); mutex_enter(d->bd_mtx); bp = d->bd_bif; if (bp == NULL) { mutex_exit(d->bd_mtx); error = ENXIO; goto out_bindx; } bpf_if_acquire(bp, &psref); mutex_exit(d->bd_mtx); getnanotime(&d->bd_mtime); ifp = bp->bif_ifp; if (if_is_deactivated(ifp)) { error = ENXIO; goto out; } if (uio->uio_resid == 0) { error = 0; goto out; } error = bpf_movein(ifp, uio, (int)bp->bif_dlt, ifp->if_mtu, &m, (struct sockaddr *) &dst, &d->bd_wfilter); if (error) goto out; if (m->m_pkthdr.len > ifp->if_mtu) { m_freem(m); error = EMSGSIZE; goto out; } /* * If writing to a loopback interface, the address family has * already been specially computed in bpf_movein(), so don't * clobber it, or the loopback will reject it in looutput(). */ if (d->bd_hdrcmplt && ifp->if_type != IFT_LOOP) dst.ss_family = pseudo_AF_HDRCMPLT; if (d->bd_feedback) { mc = m_dup(m, 0, M_COPYALL, M_NOWAIT); if (mc != NULL) m_set_rcvif(mc, ifp); /* Set M_PROMISC for outgoing packets to be discarded. */ if (1 /*d->bd_direction == BPF_D_INOUT*/) m->m_flags |= M_PROMISC; } else mc = NULL; error = if_output_lock(ifp, ifp, m, (struct sockaddr *) &dst, NULL); if (mc != NULL) { if (error == 0) { int s = splsoftnet(); KERNEL_LOCK_UNLESS_IFP_MPSAFE(ifp); ifp->_if_input(ifp, mc); KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(ifp); splx(s); } else m_freem(mc); } /* * The driver frees the mbuf. */ out: bpf_if_release(bp, &psref); out_bindx: curlwp_bindx(bound); return error; } /* * Reset a descriptor by flushing its packet buffer and clearing the * receive and drop counts. */ static void reset_d(struct bpf_d *d) { KASSERT(mutex_owned(d->bd_mtx)); mutex_enter(d->bd_buf_mtx); if (d->bd_hbuf) { /* Free the hold buffer. */ d->bd_fbuf = d->bd_hbuf; d->bd_hbuf = NULL; } d->bd_slen = 0; d->bd_hlen = 0; d->bd_rcount = 0; d->bd_dcount = 0; d->bd_ccount = 0; mutex_exit(d->bd_buf_mtx); } /* * FIONREAD Check for read packet available. * BIOCGBLEN Get buffer len [for read()]. * BIOCSETF Set ethernet read filter. * BIOCFLUSH Flush read packet buffer. * BIOCPROMISC Put interface into promiscuous mode. * BIOCGDLT Get link layer type. * BIOCGETIF Get interface name. * BIOCSETIF Set interface. * BIOCSRTIMEOUT Set read timeout. * BIOCGRTIMEOUT Get read timeout. * BIOCGSTATS Get packet stats. * BIOCIMMEDIATE Set immediate mode. * BIOCVERSION Get filter language version. * BIOCGHDRCMPLT Get "header already complete" flag. * BIOCSHDRCMPLT Set "header already complete" flag. * BIOCSFEEDBACK Set packet feedback mode. * BIOCGFEEDBACK Get packet feedback mode. * BIOCGDIRECTION Get packet direction flag * BIOCSDIRECTION Set packet direction flag */ /* ARGSUSED */ static int bpf_ioctl(struct file *fp, u_long cmd, void *addr) { struct bpf_d *d = fp->f_bpf; int error = 0; /* * Refresh the PID associated with this bpf file. */ d->bd_pid = curproc->p_pid; #ifdef _LP64 if (curproc->p_flag & PK_32) d->bd_compat32 = 1; else d->bd_compat32 = 0; #endif mutex_enter(d->bd_mtx); if (d->bd_state == BPF_WAITING) callout_halt(&d->bd_callout, d->bd_mtx); d->bd_state = BPF_IDLE; mutex_exit(d->bd_mtx); if (d->bd_locked) { switch (cmd) { case BIOCGBLEN: /* FALLTHROUGH */ case BIOCFLUSH: /* FALLTHROUGH */ case BIOCGDLT: /* FALLTHROUGH */ case BIOCGDLTLIST: /* FALLTHROUGH */ case BIOCGETIF: /* FALLTHROUGH */ case BIOCGRTIMEOUT: /* FALLTHROUGH */ case BIOCGSTATS: /* FALLTHROUGH */ case BIOCVERSION: /* FALLTHROUGH */ case BIOCGHDRCMPLT: /* FALLTHROUGH */ case FIONREAD: /* FALLTHROUGH */ case BIOCLOCK: /* FALLTHROUGH */ case BIOCSRTIMEOUT: /* FALLTHROUGH */ case BIOCIMMEDIATE: /* FALLTHROUGH */ case TIOCGPGRP: break; default: return EPERM; } } switch (cmd) { default: error = EINVAL; break; /* * Check for read packet available. */ case FIONREAD: { int n; mutex_enter(d->bd_buf_mtx); n = d->bd_slen; if (d->bd_hbuf) n += d->bd_hlen; mutex_exit(d->bd_buf_mtx); *(int *)addr = n; break; } /* * Get buffer len [for read()]. */ case BIOCGBLEN: *(u_int *)addr = d->bd_bufsize; break; /* * Set buffer length. */ case BIOCSBLEN: /* * Forbid to change the buffer length if buffers are already * allocated. */ mutex_enter(d->bd_mtx); mutex_enter(d->bd_buf_mtx); if (d->bd_bif != NULL || d->bd_sbuf != NULL) error = EINVAL; else { u_int size = *(u_int *)addr; if (size > bpf_maxbufsize) *(u_int *)addr = size = bpf_maxbufsize; else if (size < BPF_MINBUFSIZE) *(u_int *)addr = size = BPF_MINBUFSIZE; d->bd_bufsize = size; } mutex_exit(d->bd_buf_mtx); mutex_exit(d->bd_mtx); break; /* * Set link layer read filter. */ case BIOCSETF: /* FALLTHROUGH */ case BIOCSETWF: error = bpf_setf(d, addr, cmd); break; case BIOCLOCK: d->bd_locked = 1; break; /* * Flush read packet buffer. */ case BIOCFLUSH: mutex_enter(d->bd_mtx); reset_d(d); mutex_exit(d->bd_mtx); break; /* * Put interface into promiscuous mode. */ case BIOCPROMISC: mutex_enter(d->bd_mtx); if (d->bd_bif == NULL) { mutex_exit(d->bd_mtx); /* * No interface attached yet. */ error = EINVAL; break; } if (d->bd_promisc == 0) { KERNEL_LOCK_UNLESS_NET_MPSAFE(); error = ifpromisc(d->bd_bif->bif_ifp, 1); KERNEL_UNLOCK_UNLESS_NET_MPSAFE(); if (error == 0) d->bd_promisc = 1; } mutex_exit(d->bd_mtx); break; /* * Get device parameters. */ case BIOCGDLT: mutex_enter(d->bd_mtx); if (d->bd_bif == NULL) error = EINVAL; else *(u_int *)addr = d->bd_bif->bif_dlt; mutex_exit(d->bd_mtx); break; /* * Get a list of supported device parameters. */ case BIOCGDLTLIST: mutex_enter(d->bd_mtx); if (d->bd_bif == NULL) error = EINVAL; else error = bpf_getdltlist(d, addr); mutex_exit(d->bd_mtx); break; /* * Set device parameters. */ case BIOCSDLT: mutex_enter(&bpf_mtx); mutex_enter(d->bd_mtx); if (d->bd_bif == NULL) error = EINVAL; else error = bpf_setdlt(d, *(u_int *)addr); mutex_exit(d->bd_mtx); mutex_exit(&bpf_mtx); break; /* * Set interface name. */ #ifdef OBIOCGETIF case OBIOCGETIF: #endif case BIOCGETIF: mutex_enter(d->bd_mtx); if (d->bd_bif == NULL) error = EINVAL; else bpf_ifname(d->bd_bif->bif_ifp, addr); mutex_exit(d->bd_mtx); break; /* * Set interface. */ #ifdef OBIOCSETIF case OBIOCSETIF: #endif case BIOCSETIF: mutex_enter(&bpf_mtx); error = bpf_setif(d, addr); mutex_exit(&bpf_mtx); break; /* * Set read timeout. */ case BIOCSRTIMEOUT: { struct timeval *tv = addr; /* Compute number of ticks. */ if (tv->tv_sec < 0 || tv->tv_usec < 0 || tv->tv_usec >= 1000000) { error = EINVAL; break; } else if (tv->tv_sec > INT_MAX/hz - 1) { d->bd_rtout = INT_MAX; } else { d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick; } if ((d->bd_rtout == 0) && (tv->tv_usec != 0)) d->bd_rtout = 1; break; } #ifdef BIOCGORTIMEOUT /* * Get read timeout. */ case BIOCGORTIMEOUT: { struct timeval50 *tv = addr; tv->tv_sec = d->bd_rtout / hz; tv->tv_usec = (d->bd_rtout % hz) * tick; break; } #endif #ifdef BIOCSORTIMEOUT /* * Set read timeout. */ case BIOCSORTIMEOUT: { struct timeval50 *tv = addr; /* Compute number of ticks. */ if (tv->tv_sec < 0 || tv->tv_usec < 0 || tv->tv_usec >= 1000000) { error = EINVAL; break; } else if (tv->tv_sec > INT_MAX/hz - 1) { d->bd_rtout = INT_MAX; } else { d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick; } if ((d->bd_rtout == 0) && (tv->tv_usec != 0)) d->bd_rtout = 1; break; } #endif /* * Get read timeout. */ case BIOCGRTIMEOUT: { struct timeval *tv = addr; tv->tv_sec = d->bd_rtout / hz; tv->tv_usec = (d->bd_rtout % hz) * tick; break; } /* * Get packet stats. */ case BIOCGSTATS: { struct bpf_stat *bs = addr; bs->bs_recv = d->bd_rcount; bs->bs_drop = d->bd_dcount; bs->bs_capt = d->bd_ccount; break; } case BIOCGSTATS_30: { struct bpf_stat30 *bs = addr; bs->bs_recv = d->bd_rcount; bs->bs_drop = d->bd_dcount; break; } /* * Set immediate mode. */ case BIOCIMMEDIATE: d->bd_immediate = *(u_int *)addr; break; case BIOCVERSION: { struct bpf_version *bv = addr; bv->bv_major = BPF_MAJOR_VERSION; bv->bv_minor = BPF_MINOR_VERSION; break; } case BIOCGHDRCMPLT: /* get "header already complete" flag */ *(u_int *)addr = d->bd_hdrcmplt; break; case BIOCSHDRCMPLT: /* set "header already complete" flag */ d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0; break; /* * Get packet direction flag */ case BIOCGDIRECTION: *(u_int *)addr = d->bd_direction; break; /* * Set packet direction flag */ case BIOCSDIRECTION: { u_int direction; direction = *(u_int *)addr; switch (direction) { case BPF_D_IN: case BPF_D_INOUT: case BPF_D_OUT: d->bd_direction = direction; break; default: error = EINVAL; } } break; /* * Set "feed packets from bpf back to input" mode */ case BIOCSFEEDBACK: d->bd_feedback = *(u_int *)addr; break; /* * Get "feed packets from bpf back to input" mode */ case BIOCGFEEDBACK: *(u_int *)addr = d->bd_feedback; break; case FIONBIO: /* Non-blocking I/O */ /* * No need to do anything special as we use IO_NDELAY in * bpfread() as an indication of whether or not to block * the read. */ break; case FIOASYNC: /* Send signal on receive packets */ mutex_enter(d->bd_mtx); d->bd_async = *(int *)addr; mutex_exit(d->bd_mtx); break; case TIOCSPGRP: /* Process or group to send signals to */ case FIOSETOWN: error = fsetown(&d->bd_pgid, cmd, addr); break; case TIOCGPGRP: case FIOGETOWN: error = fgetown(d->bd_pgid, cmd, addr); break; } return (error); } /* * Set d's packet filter program to fp. If this file already has a filter, * free it and replace it. Returns EINVAL for bogus requests. */ static int bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd) { struct bpf_insn *fcode; bpfjit_func_t jcode; size_t flen, size = 0; struct bpf_filter *oldf, *newf, **storef; jcode = NULL; flen = fp->bf_len; if ((fp->bf_insns == NULL && flen) || flen > BPF_MAXINSNS) { return EINVAL; } if (flen) { /* * Allocate the buffer, copy the byte-code from * userspace and validate it. */ size = flen * sizeof(*fp->bf_insns); fcode = kmem_alloc(size, KM_SLEEP); if (copyin(fp->bf_insns, fcode, size) != 0 || !bpf_validate(fcode, (int)flen)) { kmem_free(fcode, size); return EINVAL; } if (bpf_jit) jcode = bpf_jit_generate(NULL, fcode, flen); } else { fcode = NULL; } newf = kmem_alloc(sizeof(*newf), KM_SLEEP); newf->bf_insn = fcode; newf->bf_size = size; newf->bf_jitcode = jcode; if (cmd == BIOCSETF) d->bd_jitcode = jcode; /* XXX just for kvm(3) users */ /* Need to hold bpf_mtx for pserialize_perform */ mutex_enter(&bpf_mtx); mutex_enter(d->bd_mtx); if (cmd == BIOCSETWF) { oldf = d->bd_wfilter; storef = &d->bd_wfilter; } else { oldf = d->bd_rfilter; storef = &d->bd_rfilter; } atomic_store_release(storef, newf); reset_d(d); pserialize_perform(bpf_psz); mutex_exit(d->bd_mtx); mutex_exit(&bpf_mtx); if (oldf != NULL) bpf_free_filter(oldf); return 0; } /* * Detach a file from its current interface (if attached at all) and attach * to the interface indicated by the name stored in ifr. * Return an errno or 0. */ static int bpf_setif(struct bpf_d *d, struct ifreq *ifr) { struct bpf_if *bp; char *cp; int unit_seen, i, error; KASSERT(mutex_owned(&bpf_mtx)); /* * Make sure the provided name has a unit number, and default * it to '0' if not specified. * XXX This is ugly ... do this differently? */ unit_seen = 0; cp = ifr->ifr_name; cp[sizeof(ifr->ifr_name) - 1] = '\0'; /* sanity */ while (*cp++) if (*cp >= '0' && *cp <= '9') unit_seen = 1; if (!unit_seen) { /* Make sure to leave room for the '\0'. */ for (i = 0; i < (IFNAMSIZ - 1); ++i) { if ((ifr->ifr_name[i] >= 'a' && ifr->ifr_name[i] <= 'z') || (ifr->ifr_name[i] >= 'A' && ifr->ifr_name[i] <= 'Z')) continue; ifr->ifr_name[i] = '0'; } } /* * Look through attached interfaces for the named one. */ BPF_IFLIST_WRITER_FOREACH(bp) { struct ifnet *ifp = bp->bif_ifp; if (ifp == NULL || strcmp(ifp->if_xname, ifr->ifr_name) != 0) continue; /* skip additional entry */ if (bp->bif_driverp != &ifp->if_bpf) continue; /* * We found the requested interface. * Allocate the packet buffers if we need to. * If we're already attached to requested interface, * just flush the buffer. */ /* * bpf_allocbufs is called only here. bpf_mtx ensures that * no race condition happen on d->bd_sbuf. */ if (d->bd_sbuf == NULL) { error = bpf_allocbufs(d); if (error != 0) return (error); } mutex_enter(d->bd_mtx); if (bp != d->bd_bif) { if (d->bd_bif) { /* * Detach if attached to something else. */ bpf_detachd(d); BPFIF_DLIST_ENTRY_INIT(d); } bpf_attachd(d, bp); } reset_d(d); mutex_exit(d->bd_mtx); return (0); } /* Not found. */ return (ENXIO); } /* * Copy the interface name to the ifreq. */ static void bpf_ifname(struct ifnet *ifp, struct ifreq *ifr) { memcpy(ifr->ifr_name, ifp->if_xname, IFNAMSIZ); } static int bpf_stat(struct file *fp, struct stat *st) { struct bpf_d *d = fp->f_bpf; (void)memset(st, 0, sizeof(*st)); mutex_enter(d->bd_mtx); st->st_dev = makedev(cdevsw_lookup_major(&bpf_cdevsw), d->bd_pid); st->st_atimespec = d->bd_atime; st->st_mtimespec = d->bd_mtime; st->st_ctimespec = st->st_birthtimespec = d->bd_btime; st->st_uid = kauth_cred_geteuid(fp->f_cred); st->st_gid = kauth_cred_getegid(fp->f_cred); st->st_mode = S_IFCHR; mutex_exit(d->bd_mtx); return 0; } /* * Support for poll() system call * * Return true iff the specific operation will not block indefinitely - with * the assumption that it is safe to positively acknowledge a request for the * ability to write to the BPF device. * Otherwise, return false but make a note that a selnotify() must be done. */ static int bpf_poll(struct file *fp, int events) { struct bpf_d *d = fp->f_bpf; int revents; /* * Refresh the PID associated with this bpf file. */ mutex_enter(&bpf_mtx); d->bd_pid = curproc->p_pid; revents = events & (POLLOUT | POLLWRNORM); if (events & (POLLIN | POLLRDNORM)) { /* * An imitation of the FIONREAD ioctl code. */ mutex_enter(d->bd_mtx); if (d->bd_hlen != 0 || ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) && d->bd_slen != 0)) { revents |= events & (POLLIN | POLLRDNORM); } else { selrecord(curlwp, &d->bd_sel); /* Start the read timeout if necessary */ if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { callout_reset(&d->bd_callout, d->bd_rtout, bpf_timed_out, d); d->bd_state = BPF_WAITING; } } mutex_exit(d->bd_mtx); } mutex_exit(&bpf_mtx); return (revents); } static void filt_bpfrdetach(struct knote *kn) { struct bpf_d *d = kn->kn_hook; mutex_enter(d->bd_buf_mtx); selremove_knote(&d->bd_sel, kn); mutex_exit(d->bd_buf_mtx); } static int filt_bpfread(struct knote *kn, long hint) { struct bpf_d *d = kn->kn_hook; int rv; /* * Refresh the PID associated with this bpf file. */ d->bd_pid = curproc->p_pid; mutex_enter(d->bd_buf_mtx); kn->kn_data = d->bd_hlen; if (d->bd_immediate) kn->kn_data += d->bd_slen; rv = (kn->kn_data > 0); mutex_exit(d->bd_buf_mtx); return rv; } static const struct filterops bpfread_filtops = { .f_flags = FILTEROP_ISFD, .f_attach = NULL, .f_detach = filt_bpfrdetach, .f_event = filt_bpfread, }; static int bpf_kqfilter(struct file *fp, struct knote *kn) { struct bpf_d *d = fp->f_bpf; switch (kn->kn_filter) { case EVFILT_READ: kn->kn_fop = &bpfread_filtops; break; default: return (EINVAL); } kn->kn_hook = d; mutex_enter(d->bd_buf_mtx); selrecord_knote(&d->bd_sel, kn); mutex_exit(d->bd_buf_mtx); return (0); } /* * Copy data from an mbuf chain into a buffer. This code is derived * from m_copydata in sys/uipc_mbuf.c. */ static void * bpf_mcpy(void *dst_arg, const void *src_arg, size_t len) { const struct mbuf *m; u_int count; u_char *dst; m = src_arg; dst = dst_arg; while (len > 0) { if (m == NULL) panic("bpf_mcpy"); count = uimin(m->m_len, len); memcpy(dst, mtod(m, const void *), count); m = m->m_next; dst += count; len -= count; } return dst_arg; } static inline u_int bpf_xfilter(struct bpf_filter **filter, void *pkt, u_int pktlen, u_int buflen) { struct bpf_filter *filt; uint32_t mem[BPF_MEMWORDS]; bpf_args_t args = { .pkt = (const uint8_t *)pkt, .wirelen = pktlen, .buflen = buflen, .mem = mem, .arg = NULL }; u_int slen; filt = atomic_load_consume(filter); if (filt == NULL) /* No filter means accept all. */ return (u_int)-1; if (filt->bf_jitcode != NULL) slen = filt->bf_jitcode(NULL, &args); else slen = bpf_filter_ext(NULL, filt->bf_insn, &args); return slen; } /* * Dispatch a packet to all the listeners on interface bp. * * pkt pointer to the packet, either a data buffer or an mbuf chain * buflen buffer length, if pkt is a data buffer * cpfn a function that can copy pkt into the listener's buffer * pktlen length of the packet * direction BPF_D_IN or BPF_D_OUT */ static inline void bpf_deliver(struct bpf_if *bp, void *(*cpfn)(void *, const void *, size_t), void *pkt, u_int pktlen, u_int buflen, const u_int direction) { bool gottime = false; struct timespec ts; struct bpf_d *d; int s; u_int slen; KASSERT(!cpu_intr_p()); /* * Note that the IPL does not have to be raised at this point. * The only problem that could arise here is that if two different * interfaces shared any data. This is not the case. */ s = pserialize_read_enter(); BPFIF_DLIST_READER_FOREACH(d, bp) { if (direction == BPF_D_IN) { if (d->bd_direction == BPF_D_OUT) continue; } else { /* BPF_D_OUT */ if (d->bd_direction == BPF_D_IN) continue; } atomic_inc_ulong(&d->bd_rcount); BPF_STATINC(recv); slen = bpf_xfilter(&d->bd_rfilter, pkt, pktlen, buflen); if (slen == 0) continue; if (!gottime) { gottime = true; nanotime(&ts); } /* Assume catchpacket doesn't sleep */ catchpacket(d, pkt, pktlen, slen, cpfn, &ts); } pserialize_read_exit(s); } /* * Incoming linkage from device drivers, when the head of the packet is in * a buffer, and the tail is in an mbuf chain. */ static void _bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m, u_int direction) { u_int pktlen; struct mbuf mb; /* Skip outgoing duplicate packets. */ if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif_index == 0) { m->m_flags &= ~M_PROMISC; return; } pktlen = m_length(m) + dlen; /* * Craft on-stack mbuf suitable for passing to bpf_filter. * Note that we cut corners here; we only set up what's * absolutely needed--this mbuf should never go anywhere else. */ (void)memset(&mb, 0, sizeof(mb)); mb.m_type = MT_DATA; mb.m_next = m; mb.m_data = data; mb.m_len = dlen; bpf_deliver(bp, bpf_mcpy, &mb, pktlen, 0, direction); } /* * Incoming linkage from device drivers, when packet is in an mbuf chain. */ static void _bpf_mtap(struct bpf_if *bp, struct mbuf *m, u_int direction) { void *(*cpfn)(void *, const void *, size_t); u_int pktlen, buflen; void *marg; /* Skip outgoing duplicate packets. */ if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif_index == 0) { m->m_flags &= ~M_PROMISC; return; } pktlen = m_length(m); /* Skip zero-sized packets. */ if (__predict_false(pktlen == 0)) { return; } if (pktlen == m->m_len) { cpfn = (void *)memcpy; marg = mtod(m, void *); buflen = pktlen; KASSERT(buflen != 0); } else { cpfn = bpf_mcpy; marg = m; buflen = 0; } bpf_deliver(bp, cpfn, marg, pktlen, buflen, direction); } /* * We need to prepend the address family as * a four byte field. Cons up a dummy header * to pacify bpf. This is safe because bpf * will only read from the mbuf (i.e., it won't * try to free it or keep a pointer a to it). */ static void _bpf_mtap_af(struct bpf_if *bp, uint32_t af, struct mbuf *m, u_int direction) { struct mbuf m0; m0.m_type = MT_DATA; m0.m_flags = 0; m0.m_next = m; m0.m_nextpkt = NULL; m0.m_owner = NULL; m0.m_len = 4; m0.m_data = (char *)⁡ _bpf_mtap(bp, &m0, direction); } /* * Put the SLIP pseudo-"link header" in place. * Note this M_PREPEND() should never fail, * since we know we always have enough space * in the input buffer. */ static void _bpf_mtap_sl_in(struct bpf_if *bp, u_char *chdr, struct mbuf **m) { u_char *hp; M_PREPEND(*m, SLIP_HDRLEN, M_DONTWAIT); if (*m == NULL) return; hp = mtod(*m, u_char *); hp[SLX_DIR] = SLIPDIR_IN; (void)memcpy(&hp[SLX_CHDR], chdr, CHDR_LEN); _bpf_mtap(bp, *m, BPF_D_IN); m_adj(*m, SLIP_HDRLEN); } /* * Put the SLIP pseudo-"link header" in * place. The compressed header is now * at the beginning of the mbuf. */ static void _bpf_mtap_sl_out(struct bpf_if *bp, u_char *chdr, struct mbuf *m) { struct mbuf m0; u_char *hp; m0.m_type = MT_DATA; m0.m_flags = 0; m0.m_next = m; m0.m_nextpkt = NULL; m0.m_owner = NULL; m0.m_data = m0.m_dat; m0.m_len = SLIP_HDRLEN; hp = mtod(&m0, u_char *); hp[SLX_DIR] = SLIPDIR_OUT; (void)memcpy(&hp[SLX_CHDR], chdr, CHDR_LEN); _bpf_mtap(bp, &m0, BPF_D_OUT); m_freem(m); } static struct mbuf * bpf_mbuf_enqueue(struct bpf_if *bp, struct mbuf *m) { struct mbuf *dup; dup = m_dup(m, 0, M_COPYALL, M_NOWAIT); if (dup == NULL) return NULL; if (bp->bif_mbuf_tail != NULL) { bp->bif_mbuf_tail->m_nextpkt = dup; } else { bp->bif_mbuf_head = dup; } bp->bif_mbuf_tail = dup; #ifdef BPF_MTAP_SOFTINT_DEBUG log(LOG_DEBUG, "%s: enqueued mbuf=%p to %s\n", __func__, dup, bp->bif_ifp->if_xname); #endif return dup; } static struct mbuf * bpf_mbuf_dequeue(struct bpf_if *bp) { struct mbuf *m; int s; /* XXX NOMPSAFE: assumed running on one CPU */ s = splnet(); m = bp->bif_mbuf_head; if (m != NULL) { bp->bif_mbuf_head = m->m_nextpkt; m->m_nextpkt = NULL; if (bp->bif_mbuf_head == NULL) bp->bif_mbuf_tail = NULL; #ifdef BPF_MTAP_SOFTINT_DEBUG log(LOG_DEBUG, "%s: dequeued mbuf=%p from %s\n", __func__, m, bp->bif_ifp->if_xname); #endif } splx(s); return m; } static void bpf_mtap_si(void *arg) { struct bpf_if *bp = arg; struct mbuf *m; while ((m = bpf_mbuf_dequeue(bp)) != NULL) { #ifdef BPF_MTAP_SOFTINT_DEBUG log(LOG_DEBUG, "%s: tapping mbuf=%p on %s\n", __func__, m, bp->bif_ifp->if_xname); #endif bpf_ops->bpf_mtap(bp, m, BPF_D_IN); m_freem(m); } } static void _bpf_mtap_softint(struct ifnet *ifp, struct mbuf *m) { struct bpf_if *bp = ifp->if_bpf; struct mbuf *dup; KASSERT(cpu_intr_p()); /* To avoid extra invocations of the softint */ if (BPFIF_DLIST_READER_EMPTY(bp)) return; KASSERT(bp->bif_si != NULL); dup = bpf_mbuf_enqueue(bp, m); if (dup != NULL) softint_schedule(bp->bif_si); } static int bpf_hdrlen(struct bpf_d *d) { int hdrlen = d->bd_bif->bif_hdrlen; /* * Compute the length of the bpf header. This is not necessarily * equal to SIZEOF_BPF_HDR because we want to insert spacing such * that the network layer header begins on a longword boundary (for * performance reasons and to alleviate alignment restrictions). */ #ifdef _LP64 if (d->bd_compat32) return (BPF_WORDALIGN32(hdrlen + SIZEOF_BPF_HDR32) - hdrlen); else #endif return (BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen); } /* * Move the packet data from interface memory (pkt) into the * store buffer. Call the wakeup functions if it's time to wake up * a listener (buffer full), "cpfn" is the routine called to do the * actual data transfer. memcpy is passed in to copy contiguous chunks, * while bpf_mcpy is passed in to copy mbuf chains. In the latter case, * pkt is really an mbuf. */ static void catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen, void *(*cpfn)(void *, const void *, size_t), struct timespec *ts) { char *h; int totlen, curlen, caplen; int hdrlen = bpf_hdrlen(d); int do_wakeup = 0; atomic_inc_ulong(&d->bd_ccount); BPF_STATINC(capt); /* * Figure out how many bytes to move. If the packet is * greater or equal to the snapshot length, transfer that * much. Otherwise, transfer the whole packet (unless * we hit the buffer size limit). */ totlen = hdrlen + uimin(snaplen, pktlen); if (totlen > d->bd_bufsize) totlen = d->bd_bufsize; /* * If we adjusted totlen to fit the bufsize, it could be that * totlen is smaller than hdrlen because of the link layer header. */ caplen = totlen - hdrlen; if (caplen < 0) caplen = 0; mutex_enter(d->bd_buf_mtx); /* * Round up the end of the previous packet to the next longword. */ #ifdef _LP64 if (d->bd_compat32) curlen = BPF_WORDALIGN32(d->bd_slen); else #endif curlen = BPF_WORDALIGN(d->bd_slen); if (curlen + totlen > d->bd_bufsize) { /* * This packet will overflow the storage buffer. * Rotate the buffers if we can, then wakeup any * pending reads. */ if (d->bd_fbuf == NULL) { mutex_exit(d->bd_buf_mtx); /* * We haven't completed the previous read yet, * so drop the packet. */ atomic_inc_ulong(&d->bd_dcount); BPF_STATINC(drop); return; } ROTATE_BUFFERS(d); do_wakeup = 1; curlen = 0; } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) { /* * Immediate mode is set, or the read timeout has * already expired during a select call. A packet * arrived, so the reader should be woken up. */ do_wakeup = 1; } /* * Append the bpf header. */ h = (char *)d->bd_sbuf + curlen; #ifdef _LP64 if (d->bd_compat32) { struct bpf_hdr32 *hp32; hp32 = (struct bpf_hdr32 *)h; hp32->bh_tstamp.tv_sec = ts->tv_sec; hp32->bh_tstamp.tv_usec = ts->tv_nsec / 1000; hp32->bh_datalen = pktlen; hp32->bh_hdrlen = hdrlen; hp32->bh_caplen = caplen; } else #endif { struct bpf_hdr *hp; hp = (struct bpf_hdr *)h; hp->bh_tstamp.tv_sec = ts->tv_sec; hp->bh_tstamp.tv_usec = ts->tv_nsec / 1000; hp->bh_datalen = pktlen; hp->bh_hdrlen = hdrlen; hp->bh_caplen = caplen; } /* * Copy the packet data into the store buffer and update its length. */ (*cpfn)(h + hdrlen, pkt, caplen); d->bd_slen = curlen + totlen; mutex_exit(d->bd_buf_mtx); /* * Call bpf_wakeup after bd_slen has been updated so that kevent(2) * will cause filt_bpfread() to be called with it adjusted. */ if (do_wakeup) bpf_wakeup(d); } /* * Initialize all nonzero fields of a descriptor. */ static int bpf_allocbufs(struct bpf_d *d) { d->bd_fbuf = kmem_zalloc(d->bd_bufsize, KM_NOSLEEP); if (!d->bd_fbuf) return (ENOBUFS); d->bd_sbuf = kmem_zalloc(d->bd_bufsize, KM_NOSLEEP); if (!d->bd_sbuf) { kmem_free(d->bd_fbuf, d->bd_bufsize); return (ENOBUFS); } d->bd_slen = 0; d->bd_hlen = 0; return (0); } static void bpf_free_filter(struct bpf_filter *filter) { KASSERT(filter != NULL); if (filter->bf_insn != NULL) kmem_free(filter->bf_insn, filter->bf_size); if (filter->bf_jitcode != NULL) bpf_jit_freecode(filter->bf_jitcode); kmem_free(filter, sizeof(*filter)); } /* * Free buffers currently in use by a descriptor. * Called on close. */ static void bpf_freed(struct bpf_d *d) { /* * We don't need to lock out interrupts since this descriptor has * been detached from its interface and it yet hasn't been marked * free. */ if (d->bd_sbuf != NULL) { kmem_free(d->bd_sbuf, d->bd_bufsize); if (d->bd_hbuf != NULL) kmem_free(d->bd_hbuf, d->bd_bufsize); if (d->bd_fbuf != NULL) kmem_free(d->bd_fbuf, d->bd_bufsize); } if (d->bd_rfilter != NULL) { bpf_free_filter(d->bd_rfilter); d->bd_rfilter = NULL; } if (d->bd_wfilter != NULL) { bpf_free_filter(d->bd_wfilter); d->bd_wfilter = NULL; } d->bd_jitcode = NULL; } /* * Attach an interface to bpf. dlt is the link layer type; * hdrlen is the fixed size of the link header for the specified dlt * (variable length headers not yet supported). */ static void _bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) { struct bpf_if *bp; bp = kmem_alloc(sizeof(*bp), KM_SLEEP); mutex_enter(&bpf_mtx); bp->bif_driverp = driverp; bp->bif_ifp = ifp; bp->bif_dlt = dlt; bp->bif_si = NULL; BPF_IFLIST_ENTRY_INIT(bp); PSLIST_INIT(&bp->bif_dlist_head); psref_target_init(&bp->bif_psref, bpf_psref_class); SLIST_INIT(&bp->bif_trackers); BPF_IFLIST_WRITER_INSERT_HEAD(bp); *bp->bif_driverp = NULL; bp->bif_hdrlen = hdrlen; mutex_exit(&bpf_mtx); #if 0 printf("bpf: %s attached with dlt %x\n", ifp->if_xname, dlt); #endif } static void _bpf_mtap_softint_init(struct ifnet *ifp) { struct bpf_if *bp; mutex_enter(&bpf_mtx); BPF_IFLIST_WRITER_FOREACH(bp) { if (bp->bif_ifp != ifp) continue; bp->bif_mbuf_head = NULL; bp->bif_mbuf_tail = NULL; bp->bif_si = softint_establish(SOFTINT_NET, bpf_mtap_si, bp); if (bp->bif_si == NULL) panic("%s: softint_establish() failed", __func__); break; } mutex_exit(&bpf_mtx); if (bp == NULL) panic("%s: no bpf_if found for %s", __func__, ifp->if_xname); } /* * Remove an interface from bpf. */ static void _bpfdetach(struct ifnet *ifp) { struct bpf_if *bp; struct bpf_d *d; int s; mutex_enter(&bpf_mtx); /* Nuke the vnodes for any open instances */ again_d: BPF_DLIST_WRITER_FOREACH(d) { mutex_enter(d->bd_mtx); if (d->bd_bif != NULL && d->bd_bif->bif_ifp == ifp) { /* * Detach the descriptor from an interface now. * It will be free'ed later by close routine. */ bpf_detachd(d); mutex_exit(d->bd_mtx); goto again_d; } mutex_exit(d->bd_mtx); } again: BPF_IFLIST_WRITER_FOREACH(bp) { if (bp->bif_ifp == ifp) { BPF_IFLIST_WRITER_REMOVE(bp); pserialize_perform(bpf_psz); psref_target_destroy(&bp->bif_psref, bpf_psref_class); while (!SLIST_EMPTY(&bp->bif_trackers)) { struct bpf_event_tracker *t = SLIST_FIRST(&bp->bif_trackers); SLIST_REMOVE_HEAD(&bp->bif_trackers, bet_entries); kmem_free(t, sizeof(*t)); } BPF_IFLIST_ENTRY_DESTROY(bp); if (bp->bif_si != NULL) { /* XXX NOMPSAFE: assumed running on one CPU */ s = splnet(); while (bp->bif_mbuf_head != NULL) { struct mbuf *m = bp->bif_mbuf_head; bp->bif_mbuf_head = m->m_nextpkt; m_freem(m); } splx(s); softint_disestablish(bp->bif_si); } kmem_free(bp, sizeof(*bp)); goto again; } } mutex_exit(&bpf_mtx); } /* * Change the data link type of a interface. */ static void _bpf_change_type(struct ifnet *ifp, u_int dlt, u_int hdrlen) { struct bpf_if *bp; mutex_enter(&bpf_mtx); BPF_IFLIST_WRITER_FOREACH(bp) { if (bp->bif_driverp == &ifp->if_bpf) break; } if (bp == NULL) panic("bpf_change_type"); bp->bif_dlt = dlt; bp->bif_hdrlen = hdrlen; mutex_exit(&bpf_mtx); } /* * Get a list of available data link type of the interface. */ static int bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl) { int n, error; struct ifnet *ifp; struct bpf_if *bp; int s, bound; KASSERT(mutex_owned(d->bd_mtx)); ifp = d->bd_bif->bif_ifp; n = 0; error = 0; bound = curlwp_bind(); s = pserialize_read_enter(); BPF_IFLIST_READER_FOREACH(bp) { if (bp->bif_ifp != ifp) continue; if (bfl->bfl_list != NULL) { struct psref psref; if (n >= bfl->bfl_len) { pserialize_read_exit(s); return ENOMEM; } bpf_if_acquire(bp, &psref); pserialize_read_exit(s); error = copyout(&bp->bif_dlt, bfl->bfl_list + n, sizeof(u_int)); s = pserialize_read_enter(); bpf_if_release(bp, &psref); } n++; } pserialize_read_exit(s); curlwp_bindx(bound); bfl->bfl_len = n; return error; } /* * Set the data link type of a BPF instance. */ static int bpf_setdlt(struct bpf_d *d, u_int dlt) { int error, opromisc; struct ifnet *ifp; struct bpf_if *bp; KASSERT(mutex_owned(&bpf_mtx)); KASSERT(mutex_owned(d->bd_mtx)); if (d->bd_bif->bif_dlt == dlt) return 0; ifp = d->bd_bif->bif_ifp; BPF_IFLIST_WRITER_FOREACH(bp) { if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) break; } if (bp == NULL) return EINVAL; opromisc = d->bd_promisc; bpf_detachd(d); BPFIF_DLIST_ENTRY_INIT(d); bpf_attachd(d, bp); reset_d(d); if (opromisc) { KERNEL_LOCK_UNLESS_NET_MPSAFE(); error = ifpromisc(bp->bif_ifp, 1); KERNEL_UNLOCK_UNLESS_NET_MPSAFE(); if (error) printf("%s: bpf_setdlt: ifpromisc failed (%d)\n", bp->bif_ifp->if_xname, error); else d->bd_promisc = 1; } return 0; } static int sysctl_net_bpf_maxbufsize(SYSCTLFN_ARGS) { int newsize, error; struct sysctlnode node; node = *rnode; node.sysctl_data = &newsize; newsize = bpf_maxbufsize; error = sysctl_lookup(SYSCTLFN_CALL(&node)); if (error || newp == NULL) return (error); if (newsize < BPF_MINBUFSIZE || newsize > BPF_MAXBUFSIZE) return (EINVAL); bpf_maxbufsize = newsize; return (0); } #if defined(MODULAR) || defined(BPFJIT) static int sysctl_net_bpf_jit(SYSCTLFN_ARGS) { bool newval; int error; struct sysctlnode node; node = *rnode; node.sysctl_data = &newval; newval = bpf_jit; error = sysctl_lookup(SYSCTLFN_CALL(&node)); if (error != 0 || newp == NULL) return error; bpf_jit = newval; if (newval && bpfjit_module_ops.bj_generate_code == NULL) { printf("JIT compilation is postponed " "until after bpfjit module is loaded\n"); } return 0; } #endif static int sysctl_net_bpf_peers(SYSCTLFN_ARGS) { int error, elem_count; struct bpf_d *dp; struct bpf_d_ext dpe; size_t len, needed, elem_size, out_size; char *sp; if (namelen == 1 && name[0] == CTL_QUERY) return (sysctl_query(SYSCTLFN_CALL(rnode))); if (namelen != 2) return (EINVAL); /* BPF peers is privileged information. */ error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_INTERFACE, KAUTH_REQ_NETWORK_INTERFACE_GETPRIV, NULL, NULL, NULL); if (error) return (EPERM); len = (oldp != NULL) ? *oldlenp : 0; sp = oldp; elem_size = name[0]; elem_count = name[1]; out_size = MIN(sizeof(dpe), elem_size); needed = 0; if (elem_size < 1 || elem_count < 0) return (EINVAL); mutex_enter(&bpf_mtx); BPF_DLIST_WRITER_FOREACH(dp) { if (len >= elem_size && elem_count > 0) { #define BPF_EXT(field) dpe.bde_ ## field = dp->bd_ ## field BPF_EXT(bufsize); BPF_EXT(promisc); BPF_EXT(state); BPF_EXT(immediate); BPF_EXT(hdrcmplt); BPF_EXT(direction); BPF_EXT(pid); BPF_EXT(rcount); BPF_EXT(dcount); BPF_EXT(ccount); #undef BPF_EXT mutex_enter(dp->bd_mtx); if (dp->bd_bif) (void)strlcpy(dpe.bde_ifname, dp->bd_bif->bif_ifp->if_xname, IFNAMSIZ - 1); else dpe.bde_ifname[0] = '\0'; dpe.bde_locked = dp->bd_locked; mutex_exit(dp->bd_mtx); error = copyout(&dpe, sp, out_size); if (error) break; sp += elem_size; len -= elem_size; } needed += elem_size; if (elem_count > 0 && elem_count != INT_MAX) elem_count--; } mutex_exit(&bpf_mtx); *oldlenp = needed; return (error); } static void bpf_stats(void *p, void *arg, struct cpu_info *ci __unused) { struct bpf_stat *const stats = p; struct bpf_stat *sum = arg; int s = splnet(); sum->bs_recv += stats->bs_recv; sum->bs_drop += stats->bs_drop; sum->bs_capt += stats->bs_capt; splx(s); } static int bpf_sysctl_gstats_handler(SYSCTLFN_ARGS) { struct sysctlnode node; int error; struct bpf_stat sum; memset(&sum, 0, sizeof(sum)); node = *rnode; percpu_foreach_xcall(bpf_gstats_percpu, XC_HIGHPRI_IPL(IPL_SOFTNET), bpf_stats, &sum); node.sysctl_data = ∑ node.sysctl_size = sizeof(sum); error = sysctl_lookup(SYSCTLFN_CALL(&node)); if (error != 0 || newp == NULL) return error; return 0; } SYSCTL_SETUP(sysctl_net_bpf_setup, "bpf sysctls") { const struct sysctlnode *node; node = NULL; sysctl_createv(clog, 0, NULL, &node, CTLFLAG_PERMANENT, CTLTYPE_NODE, "bpf", SYSCTL_DESCR("BPF options"), NULL, 0, NULL, 0, CTL_NET, CTL_CREATE, CTL_EOL); if (node != NULL) { #if defined(MODULAR) || defined(BPFJIT) sysctl_createv(clog, 0, NULL, NULL, CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "jit", SYSCTL_DESCR("Toggle Just-In-Time compilation"), sysctl_net_bpf_jit, 0, &bpf_jit, 0, CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL); #endif sysctl_createv(clog, 0, NULL, NULL, CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "maxbufsize", SYSCTL_DESCR("Maximum size for data capture buffer"), sysctl_net_bpf_maxbufsize, 0, &bpf_maxbufsize, 0, CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL); sysctl_createv(clog, 0, NULL, NULL, CTLFLAG_PERMANENT, CTLTYPE_STRUCT, "stats", SYSCTL_DESCR("BPF stats"), bpf_sysctl_gstats_handler, 0, NULL, 0, CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL); sysctl_createv(clog, 0, NULL, NULL, CTLFLAG_PERMANENT, CTLTYPE_STRUCT, "peers", SYSCTL_DESCR("BPF peers"), sysctl_net_bpf_peers, 0, NULL, 0, CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL); } } static int _bpf_register_track_event(struct bpf_if **driverp, void (*_fun)(struct bpf_if *, struct ifnet *, int, int)) { struct bpf_if *bp; struct bpf_event_tracker *t; int ret = ENOENT; t = kmem_zalloc(sizeof(*t), KM_SLEEP); if (!t) return ENOMEM; t->bet_notify = _fun; mutex_enter(&bpf_mtx); BPF_IFLIST_WRITER_FOREACH(bp) { if (bp->bif_driverp != driverp) continue; SLIST_INSERT_HEAD(&bp->bif_trackers, t, bet_entries); ret = 0; break; } mutex_exit(&bpf_mtx); return ret; } static int _bpf_deregister_track_event(struct bpf_if **driverp, void (*_fun)(struct bpf_if *, struct ifnet *, int, int)) { struct bpf_if *bp; struct bpf_event_tracker *t = NULL; int ret = ENOENT; mutex_enter(&bpf_mtx); BPF_IFLIST_WRITER_FOREACH(bp) { if (bp->bif_driverp != driverp) continue; SLIST_FOREACH(t, &bp->bif_trackers, bet_entries) { if (t->bet_notify == _fun) { ret = 0; break; } } if (ret == 0) break; } if (ret == 0 && t && t->bet_notify == _fun) { SLIST_REMOVE(&bp->bif_trackers, t, bpf_event_tracker, bet_entries); } mutex_exit(&bpf_mtx); if (ret == 0) kmem_free(t, sizeof(*t)); return ret; } struct bpf_ops bpf_ops_kernel = { .bpf_attach = _bpfattach, .bpf_detach = _bpfdetach, .bpf_change_type = _bpf_change_type, .bpf_register_track_event = _bpf_register_track_event, .bpf_deregister_track_event = _bpf_deregister_track_event, .bpf_mtap = _bpf_mtap, .bpf_mtap2 = _bpf_mtap2, .bpf_mtap_af = _bpf_mtap_af, .bpf_mtap_sl_in = _bpf_mtap_sl_in, .bpf_mtap_sl_out = _bpf_mtap_sl_out, .bpf_mtap_softint = _bpf_mtap_softint, .bpf_mtap_softint_init = _bpf_mtap_softint_init, }; MODULE(MODULE_CLASS_DRIVER, bpf, "bpf_filter"); static int bpf_modcmd(modcmd_t cmd, void *arg) { #ifdef _MODULE devmajor_t bmajor, cmajor; #endif int error = 0; switch (cmd) { case MODULE_CMD_INIT: bpf_init(); #ifdef _MODULE bmajor = cmajor = NODEVMAJOR; error = devsw_attach("bpf", NULL, &bmajor, &bpf_cdevsw, &cmajor); if (error) break; #endif bpf_ops_handover_enter(&bpf_ops_kernel); atomic_swap_ptr(&bpf_ops, &bpf_ops_kernel); bpf_ops_handover_exit(); break; case MODULE_CMD_FINI: /* * While there is no reference counting for bpf callers, * unload could at least in theory be done similarly to * system call disestablishment. This should even be * a little simpler: * * 1) replace op vector with stubs * 2) post update to all cpus with xc * 3) check that nobody is in bpf anymore * (it's doubtful we'd want something like l_sysent, * but we could do something like *signed* percpu * counters. if the sum is 0, we're good). * 4) if fail, unroll changes * * NOTE: change won't be atomic to the outside. some * packets may be not captured even if unload is * not successful. I think packet capture not working * is a perfectly logical consequence of trying to * disable packet capture. */ error = EOPNOTSUPP; break; default: error = ENOTTY; break; } return error; } @ 1.251 log @bpf.c: support loopback writes when BIOCSHDRCMPLT is set Following changes in r. 1.249 "bpf: support sending packets on loopback interfaces", also allow for this to succeed when the "header complete" flag is set, which is the practice of some tools, e.g., tcpreplay and Scapy. With this change, both of those example tools now work, e.g., Scapy passes "L3bpfSocket - send and sniff on loopback" in its test suite. There are several ways of addressing this issue; this commit is intended to be the most conservative and consistent with the previous changes. (E.g., FreeBSD instead has special handling of this condition in its if_loop.c.) @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.250 2023/02/07 01:46:37 gutteridge Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.250 2023/02/07 01:46:37 gutteridge Exp $"); d1260 1 a1260 1 case BIOCGSTATSOLD: d1262 1 a1262 1 struct bpf_stat_old *bs = addr; @ 1.250 log @bpf.c: fix a few typos and grammatical issues in comments @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.249 2022/11/30 06:02:37 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.249 2022/11/30 06:02:37 ozaki-r Exp $"); d879 6 a884 1 if (d->bd_hdrcmplt) @ 1.249 log @bpf: support sending packets on loopback interfaces Previously sending packets on a loopback interface via bpf failed because the packets are treated as AF_UNSPEC by bpf and the loopback interface couldn't handle such packets. This fix enables user programs to prepend a protocol family (AF_INET or AF_INET6) to a payload. bpf interprets it and treats a packet as so, not just AF_UNSPEC. The protocol family is encoded as 4 bytes, host byte order as per DLT_NULL in the specification(*). (*) https://www.tcpdump.org/linktypes.html Proposed on tech-net and tech-kern @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.248 2022/11/19 08:53:06 yamt Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.248 2022/11/19 08:53:06 yamt Exp $"); d420 1 a420 1 /* Insure the data is properly aligned */ d1762 1 a1762 1 * Note that we cut corners here; we only setup what's d1837 1 a1837 1 * swince we know we always have enough space d1988 1 a1988 1 * store buffer. Call the wakeup functions if it's time to wakeup @ 1.249.2.1 log @Pull up following revision(s) (requested by gutteridge in ticket #103): sys/net/bpf.c: revision 1.251 bpf.c: support loopback writes when BIOCSHDRCMPLT is set Following changes in r. 1.249 "bpf: support sending packets on loopback interfaces", also allow for this to succeed when the "header complete" flag is set, which is the practice of some tools, e.g., tcpreplay and Scapy. With this change, both of those example tools now work, e.g., Scapy passes "L3bpfSocket - send and sniff on loopback" in its test suite. There are several ways of addressing this issue; this commit is intended to be the most conservative and consistent with the previous changes. (E.g., FreeBSD instead has special handling of this condition in its if_loop.c.) @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.249 2022/11/30 06:02:37 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.249 2022/11/30 06:02:37 ozaki-r Exp $"); d879 1 a879 6 /* * If writing to a loopback interface, the address family has * already been specially computed in bpf_movein(), so don't * clobber it, or the loopback will reject it in looutput(). */ if (d->bd_hdrcmplt && ifp->if_type != IFT_LOOP) @ 1.248 log @bpf: refresh bd_pid in a few more places as well This made "netstat -B" show hostapd and wpa_supplicant for me. kingcrab# netstat -B Active BPF peers PID Int Recv Drop Capt Flags Bufsize Comm 433 urtwn0 102 0 2 I-RSH 524288 hostapd 211 urtwn0 102 0 4 I-RS- 32768 dhcpd 670 bwfm0 295 0 2 I-RSH 524288 wpa_supplicant kingcrab# @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.247 2022/09/03 10:03:20 riastradh Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.247 2022/09/03 10:03:20 riastradh Exp $"); d92 1 d248 1 a248 1 static int bpf_movein(struct uio *, int, uint64_t, d327 1 a327 1 bpf_movein(struct uio *uio, int linktype, uint64_t mtu, struct mbuf **mp, d389 5 a393 1 hlen = 0; d449 9 a457 2 /* move link level header in the top of mbuf to sa_data */ memcpy(sockp->sa_data, mtod(m0, void *), hlen); d868 1 a868 1 error = bpf_movein(uio, (int)bp->bif_dlt, ifp->if_mtu, &m, @ 1.247 log @bpf(4): Reject bogus timeout values before arithmetic overflows. Reported-by: syzbot+fbd86bdf579944b64a98@@syzkaller.appspotmail.com https://syzkaller.appspot.com/bug?id=60d46fd4863952897cbf67c6b1bcc8b20ec7bde6 XXX pullup-8 XXX pullup-9 @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.246 2022/03/15 13:00:44 riastradh Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.246 2022/03/15 13:00:44 riastradh Exp $"); d685 5 d825 5 d1582 5 @ 1.246 log @bpf(4): Handle null bf_insn on free. This is not guaranteed by bpf_setf to be nonnull. Reported-by: syzbot+de1ec9471dfc2f283dda@@syzkaller.appspotmail.com @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.245 2022/03/12 17:23:32 riastradh Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.245 2022/03/12 17:23:32 riastradh Exp $"); d1155 5 a1159 1 if (tv->tv_sec > INT_MAX/hz - 1) { d1193 5 a1197 1 if (tv->tv_sec > INT_MAX/hz - 1) { @ 1.245 log @bpf(4): Nix KM_NOSLEEP and prune dead branch. https://syzkaller.appspot.com/bug?id=0fa7029d5565d9670a24c364d44bd116c76d7e7f @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.244 2022/03/12 16:19:08 riastradh Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.244 2022/03/12 16:19:08 riastradh Exp $"); a2091 1 KASSERT(filter->bf_insn != NULL); d2093 2 a2094 1 kmem_free(filter->bf_insn, filter->bf_size); @ 1.244 log @bpf(4): Clamp read timeout to INT_MAX ticks to avoid overflow. Reported-by: syzbot+c543d35064d3492b9091@@syzkaller.appspotmail.com @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.243 2021/09/26 01:16:10 thorpej Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.243 2021/09/26 01:16:10 thorpej Exp $"); d2139 2 a2140 3 bp = kmem_alloc(sizeof(*bp), KM_NOSLEEP); if (bp == NULL) panic("%s: out of memory", __func__); @ 1.243 log @Change the kqueue filterops::f_isfd field to filterops::f_flags, and define a flag FILTEROP_ISFD that has the meaning of the prior f_isfd. Field and flag name aligned with OpenBSD. This does not constitute a functional or ABI change, as the field location and size, and the value placed in that field, are the same as the previous code, but we're bumping __NetBSD_Version__ so 3rd-party module source code can adapt, as needed. NetBSD 9.99.89 @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.242 2021/09/16 22:19:11 andvar Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.242 2021/09/16 22:19:11 andvar Exp $"); d1155 6 a1160 1 d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick; d1189 6 a1194 1 d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick; @ 1.242 log @fix typos in word "successful". @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.241 2021/07/14 06:50:22 yamaguchi Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.241 2021/07/14 06:50:22 yamaguchi Exp $"); d1564 1 a1564 1 .f_isfd = 1, @ 1.241 log @unset IFF_PROMISC at bpf_detach() Doing "d->bd_promisc = 0" is that bpf_detach() does not call ifpromisc(ifp, 0). Currently, there is no reason for this behavior so that it is removed. In addition to the change, the workaround for it in vlan(4) is also removed. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.240 2021/06/09 15:44:15 martin Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.240 2021/06/09 15:44:15 martin Exp $"); d2664 1 a2664 1 * not succesful. I think packet capture not working @ 1.240 log @Add a bpf_register_track_event() function (and deregister equivalent) that allows a driver to track listeners attaching/detaching from tap points. This is usefull for drivers that would have to do extra work for some taps and can not easily decide (at the driver level) if the work would be needed further up the stack. An example is providing radiotap headers for IEEE 802.11 frames. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.239 2020/12/18 01:31:49 thorpej Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.239 2020/12/18 01:31:49 thorpej Exp $"); a2196 1 d->bd_promisc = 0; /* we can't touch device. */ @ 1.239 log @Use sel{record,remove}_knote(). @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.238 2020/08/02 07:19:39 maxv Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.238 2020/08/02 07:19:39 maxv Exp $"); d464 1 d477 5 d491 1 d532 1 d534 5 d2141 1 d2150 1 a2150 1 printf("bpf: %s attached\n", ifp->if_xname); d2213 8 d2548 57 d2609 2 @ 1.239.4.1 log @Sync w/ HEAD. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.240 2021/06/09 15:44:15 martin Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.240 2021/06/09 15:44:15 martin Exp $"); a463 1 struct bpf_event_tracker *t; a475 5 SLIST_FOREACH(t, &bp->bif_trackers, bet_entries) { t->bet_notify(bp, bp->bif_ifp, bp->bif_dlt, BPF_TRACK_EVENT_ATTACH); } a484 1 struct bpf_event_tracker *t; a524 1 a525 5 SLIST_FOREACH(t, &bp->bif_trackers, bet_entries) { t->bet_notify(bp, bp->bif_ifp, bp->bif_dlt, BPF_TRACK_EVENT_DETACH); } a2127 1 SLIST_INIT(&bp->bif_trackers); d2136 1 a2136 1 printf("bpf: %s attached with dlt %x\n", ifp->if_xname, dlt); a2198 8 while (!SLIST_EMPTY(&bp->bif_trackers)) { struct bpf_event_tracker *t = SLIST_FIRST(&bp->bif_trackers); SLIST_REMOVE_HEAD(&bp->bif_trackers, bet_entries); kmem_free(t, sizeof(*t)); } a2525 57 static int _bpf_register_track_event(struct bpf_if **driverp, void (*_fun)(struct bpf_if *, struct ifnet *, int, int)) { struct bpf_if *bp; struct bpf_event_tracker *t; int ret = ENOENT; t = kmem_zalloc(sizeof(*t), KM_SLEEP); if (!t) return ENOMEM; t->bet_notify = _fun; mutex_enter(&bpf_mtx); BPF_IFLIST_WRITER_FOREACH(bp) { if (bp->bif_driverp != driverp) continue; SLIST_INSERT_HEAD(&bp->bif_trackers, t, bet_entries); ret = 0; break; } mutex_exit(&bpf_mtx); return ret; } static int _bpf_deregister_track_event(struct bpf_if **driverp, void (*_fun)(struct bpf_if *, struct ifnet *, int, int)) { struct bpf_if *bp; struct bpf_event_tracker *t = NULL; int ret = ENOENT; mutex_enter(&bpf_mtx); BPF_IFLIST_WRITER_FOREACH(bp) { if (bp->bif_driverp != driverp) continue; SLIST_FOREACH(t, &bp->bif_trackers, bet_entries) { if (t->bet_notify == _fun) { ret = 0; break; } } if (ret == 0) break; } if (ret == 0 && t && t->bet_notify == _fun) { SLIST_REMOVE(&bp->bif_trackers, t, bpf_event_tracker, bet_entries); } mutex_exit(&bpf_mtx); if (ret == 0) kmem_free(t, sizeof(*t)); return ret; } a2529 2 .bpf_register_track_event = _bpf_register_track_event, .bpf_deregister_track_event = _bpf_deregister_track_event, @ 1.239.4.2 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.239.4.1 2021/06/17 04:46:34 thorpej Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.239.4.1 2021/06/17 04:46:34 thorpej Exp $"); d2197 1 @ 1.238 log @Use a more informative panic message. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.237 2020/06/11 13:36:20 roy Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.237 2020/06/11 13:36:20 roy Exp $"); d1531 1 a1531 1 SLIST_REMOVE(&d->bd_sel.sel_klist, kn, knote, kn_selnext); a1560 1 struct klist *klist; a1561 1 mutex_enter(d->bd_buf_mtx); a1563 1 klist = &d->bd_sel.sel_klist; a1567 1 mutex_exit(d->bd_buf_mtx); d1573 2 a1574 1 SLIST_INSERT_HEAD(klist, kn, kn_selnext); @ 1.238.2.1 log @Sync w/ HEAD. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.239 2020/12/18 01:31:49 thorpej Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.239 2020/12/18 01:31:49 thorpej Exp $"); d1531 1 a1531 1 selremove_knote(&d->bd_sel, kn); d1561 1 d1563 1 d1566 1 d1571 1 d1577 1 a1577 2 mutex_enter(d->bd_buf_mtx); selrecord_knote(&d->bd_sel, kn); @ 1.237 log @bpf(4): Add ioctls BIOCSETWF and BIOCLOCK Once BIOCLOCK is executed, the device becomes locked which prevents the execution of ioctl(2) commands which can change the underlying parameters of the bpf(4) device. An example might be the setting of bpf(4) filter programs or attaching to different network interfaces. BIOCSETWF can be used to set write filters for outgoing packets. Currently if a bpf(4) consumer is compromised, the bpf(4) descriptor can essentially be used as a raw socket, regardless of consumer's UID. Write filters give users the ability to constrain which packets can be sent through the bpf(4) descriptor. Taken from OpenBSD. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.236 2020/03/16 21:20:11 pgoyette Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.236 2020/03/16 21:20:11 pgoyette Exp $"); d2121 1 a2121 1 panic("bpfattach"); @ 1.236 log @Use the module subsystem's ability to process SYSCTL_SETUP() entries to automate installation of sysctl nodes. Note that there are still a number of device and pseudo-device modules that create entries tied to individual device units, rather than to the module itself. These are not changed. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.235 2020/02/07 12:35:33 thorpej Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.235 2020/02/07 12:35:33 thorpej Exp $"); d239 1 d248 2 a249 1 struct mbuf **, struct sockaddr *); d253 1 a253 1 static int bpf_setf(struct bpf_d *, struct bpf_program *); d327 1 a327 1 struct sockaddr *sockp) d334 1 d437 6 d584 3 a586 1 d->bd_filter = NULL; d834 1 a834 1 (struct sockaddr *) &dst); d948 22 d1028 7 a1034 2 case BIOCSETF: error = bpf_setf(d, addr); d1308 1 a1308 1 bpf_setf(struct bpf_d *d, struct bpf_program *fp) d1313 1 a1313 1 struct bpf_filter *oldf, *newf; d1344 2 a1345 1 d->bd_jitcode = jcode; /* XXX just for kvm(3) users */ d1350 8 a1357 2 oldf = d->bd_filter; atomic_store_release(&d->bd_filter, newf); d1608 25 a1645 8 uint32_t mem[BPF_MEMWORDS]; bpf_args_t args = { .pkt = (const uint8_t *)pkt, .wirelen = pktlen, .buflen = buflen, .mem = mem, .arg = NULL }; d1650 1 a1660 3 u_int slen = 0; struct bpf_filter *filter; d1672 3 a1674 8 filter = atomic_load_consume(&d->bd_filter); if (filter != NULL) { if (filter->bf_jitcode != NULL) slen = filter->bf_jitcode(NULL, &args); else slen = bpf_filter_ext(NULL, filter->bf_insn, &args); } a1675 3 if (!slen) { continue; } d2099 7 a2105 3 if (d->bd_filter != NULL) { bpf_free_filter(d->bd_filter); d->bd_filter = NULL; d2430 1 @ 1.235 log @Use percpu_foreach_xcall() to gather volatile per-cpu counters. These must be serialized against the interrupts / soft-interrupts in which they're manipulated, as well as protected from non-atomic 64-bit memory loads on 32-bit platforms. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.234 2020/02/01 02:54:02 riastradh Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.234 2020/02/01 02:54:02 riastradh Exp $"); d2430 1 a2430 3 static struct sysctllog *bpf_sysctllog; static void sysctl_net_bpf_setup(void) d2435 1 a2435 1 sysctl_createv(&bpf_sysctllog, 0, NULL, &node, d2443 1 a2443 1 sysctl_createv(&bpf_sysctllog, 0, NULL, NULL, d2450 1 a2450 1 sysctl_createv(&bpf_sysctllog, 0, NULL, NULL, d2456 1 a2456 1 sysctl_createv(&bpf_sysctllog, 0, NULL, NULL, d2462 1 a2462 1 sysctl_createv(&bpf_sysctllog, 0, NULL, NULL, a2510 1 sysctl_net_bpf_setup(); a2534 1 /* insert sysctl teardown */ @ 1.234 log @Fix wrong memory order and switch bpf to atomic_load/store_*. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.233 2020/01/19 05:07:22 thorpej Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.233 2020/01/19 05:07:22 thorpej Exp $"); d81 1 d2399 2 d2404 2 d2418 2 a2419 1 percpu_foreach(bpf_gstats_percpu, bpf_stats, &sum); @ 1.233 log @Stop including strip.h (it's no longer generated). @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.232 2019/11/29 17:29:31 ryo Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.232 2019/11/29 17:29:31 ryo Exp $"); d304 7 a310 4 membar_consumer(); if (bpfjit_module_ops.bj_generate_code != NULL) { return bpfjit_module_ops.bj_generate_code(bc, code, size); a1294 1 membar_consumer(); d1311 1 a1311 2 d->bd_filter = newf; membar_producer(); d1611 1 a1611 2 filter = d->bd_filter; membar_datadep_consumer(); a2310 7 /* * Do a full sync to publish new bpf_jit value and * update bpfjit_module_ops.bj_generate_code variable. */ membar_sync(); @ 1.232 log @bpf can send a packet greater than MCLBYTES (JumboFrame) using multiple mbuf. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.231 2019/09/13 06:39:29 maxv Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.231 2019/09/13 06:39:29 maxv Exp $"); a46 1 #include "strip.h" @ 1.232.2.1 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.233 2020/01/19 05:07:22 thorpej Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.233 2020/01/19 05:07:22 thorpej Exp $"); d47 1 @ 1.232.2.2 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.235 2020/02/07 12:35:33 thorpej Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.235 2020/02/07 12:35:33 thorpej Exp $"); a80 1 #include d304 4 a307 7 struct bpfjit_ops *ops = &bpfjit_module_ops; bpfjit_func_t (*generate_code)(const bpf_ctx_t *, const struct bpf_insn *, size_t); generate_code = atomic_load_acquire(&ops->bj_generate_code); if (generate_code != NULL) { return generate_code(bc, code, size); d1292 1 d1309 2 a1310 1 atomic_store_release(&d->bd_filter, newf); d1610 2 a1611 1 filter = atomic_load_consume(&d->bd_filter); d2311 7 a2404 2 int s = splnet(); a2407 2 splx(s); d2420 1 a2420 2 percpu_foreach_xcall(bpf_gstats_percpu, XC_HIGHPRI_IPL(IPL_SOFTNET), bpf_stats, &sum); @ 1.231 log @As I suspected, the KASSERT I added yesterday can fire if we try to process zero-sized packets. Skip them to prevent a type confusion that can trigger random page faults later. Reported-by: syzbot+3e447ebdcb2bcfa402ac@@syzkaller.appspotmail.com @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.230 2019/09/12 07:38:19 maxv Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.230 2019/09/12 07:38:19 maxv Exp $"); d324 1 a324 1 struct mbuf *m; d398 1 a398 9 /* * XXX Avoid complicated buffer chaining --- * bail if it won't fit in a single mbuf. * (Take into account possible alignment bytes) */ if (len + align > MCLBYTES) return (EIO); m = m_gethdr(M_WAIT, MT_DATA); d410 1 a410 1 if (align > 0) { d412 17 a428 1 m->m_len -= (int)align; a430 3 error = uiomove(mtod(m, void *), len, uio); if (error) goto bad; d432 4 a435 3 memcpy(sockp->sa_data, mtod(m, void *), hlen); m->m_data += hlen; /* XXX */ len -= hlen; d437 2 a438 2 m->m_len = (int)len; *mp = m; d442 1 a442 1 m_freem(m); @ 1.230 log @Add KASSERT to catch bugs. Something tells me it could easily fire. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.229 2019/07/10 17:55:33 maxv Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.229 2019/07/10 17:55:33 maxv Exp $"); d1679 5 @ 1.229 log @Fix info leak: use kmem_zalloc, because we align the buffers, and the otherwise uninitialized padding bytes get copied to userland in bpf_read(). @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.228 2018/09/03 16:29:35 riastradh Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.228 2018/09/03 16:29:35 riastradh Exp $"); d1683 1 @ 1.229.2.1 log @Pull up following revision(s) (requested by maxv in ticket #335): sys/net/bpf.c: revision 1.230 sys/net/bpf.c: revision 1.231 Add KASSERT to catch bugs. Something tells me it could easily fire. - As I suspected, the KASSERT I added yesterday can fire if we try to process zero-sized packets. Skip them to prevent a type confusion that can trigger random page faults later. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.229 2019/07/10 17:55:33 maxv Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.229 2019/07/10 17:55:33 maxv Exp $"); a1678 5 /* Skip zero-sized packets. */ if (__predict_false(pktlen == 0)) { return; } a1682 1 KASSERT(buflen != 0); @ 1.229.2.2 log @Pull up following revision(s) (requested by riastradh in ticket #1605): sys/net/bpf.c: revision 1.247 (manually merged) bpf(4): Reject bogus timeout values before arithmetic overflows. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.229.2.1 2019/10/16 09:46:55 martin Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.229.2.1 2019/10/16 09:46:55 martin Exp $"); d1095 1 a1095 10 if (tv->tv_sec < 0 || tv->tv_usec < 0 || tv->tv_usec >= 1000000) { error = EINVAL; break; } else if (tv->tv_sec > INT_MAX/hz - 1) { d->bd_rtout = INT_MAX; } else { d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick; } d1124 1 a1124 10 if (tv->tv_sec < 0 || tv->tv_usec < 0 || tv->tv_usec >= 1000000) { error = EINVAL; break; } else if (tv->tv_sec > INT_MAX/hz - 1) { d->bd_rtout = INT_MAX; } else { d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick; } @ 1.229.2.3 log @Apply patch, requested by ozaki-r in ticket #1708: sys/net/bpf.c (apply patch) bpf: allow to read with no filter (regressed at revision 1.213, fixed differently in -current) @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.229.2.2 2023/02/22 19:50:33 martin Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.229.2.2 2023/02/22 19:50:33 martin Exp $"); a1630 2 } else { slen = (u_int)-1; /* No filter means accept all */ @ 1.228 log @Rename min/max -> uimin/uimax for better honesty. These functions are defined on unsigned int. The generic name min/max should not silently truncate to 32 bits on 64-bit systems. This is purely a name change -- no functional change intended. HOWEVER! Some subsystems have #define min(a, b) ((a) < (b) ? (a) : (b)) #define max(a, b) ((a) > (b) ? (a) : (b)) even though our standard name for that is MIN/MAX. Although these may invite multiple evaluation bugs, these do _not_ cause integer truncation. To avoid `fixing' these cases, I first changed the name in libkern, and then compile-tested every file where min/max occurred in order to confirm that it failed -- and thus confirm that nothing shadowed min/max -- before changing it. I have left a handful of bootloaders that are too annoying to compile-test, and some dead code: cobalt ews4800mips hp300 hppa ia64 luna68k vax acorn32/if_ie.c (not included in any kernels) macppc/if_gm.c (superseded by gem(4)) It should be easy to fix the fallout once identified -- this way of doing things fails safe, and the goal here, after all, is to _avoid_ silent integer truncations, not introduce them. Maybe one day we can reintroduce min/max as type-generic things that never silently truncate. But we should avoid doing that for a while, so that existing code has a chance to be detected by the compiler for conversion to uimin/uimax without changing the semantics until we can properly audit it all. (Who knows, maybe in some cases integer truncation is actually intended!) @ text @d1 1 a1 1 /* $NetBSD$ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD$"); d1990 1 a1990 1 d->bd_fbuf = kmem_alloc(d->bd_bufsize, KM_NOSLEEP); d1993 1 a1993 1 d->bd_sbuf = kmem_alloc(d->bd_bufsize, KM_NOSLEEP); @ 1.227 log @ Initialize some members in a mbuf which is on stack. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.226 2018/06/26 06:48:02 msaitoh Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.226 2018/06/26 06:48:02 msaitoh Exp $"); d1547 1 a1547 1 count = min(m->m_len, len); d1892 1 a1892 1 totlen = hdrlen + min(snaplen, pktlen); @ 1.226 log @ Implement the BPF direction filter (BIOC[GS]DIRECTION). It provides backward compatibility with BIOC[GS]SEESENT ioctl. The userland interface is the same as FreeBSD. This change also fixes a bug that the direction is misunderstand on some environment by passing the direction to bpf_mtap*() instead of checking m->m_pkthdr.rcvif. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.225 2018/06/25 03:22:14 msaitoh Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.225 2018/06/25 03:22:14 msaitoh Exp $"); d1653 1 d1704 1 d1707 2 d1750 1 d1753 2 @ 1.226.2.1 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.228 2018/09/03 16:29:35 riastradh Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.228 2018/09/03 16:29:35 riastradh Exp $"); d1547 1 a1547 1 count = uimin(m->m_len, len); a1652 1 mb.m_type = MT_DATA; a1702 1 m0.m_type = MT_DATA; a1704 2 m0.m_nextpkt = NULL; m0.m_owner = NULL; a1745 1 m0.m_type = MT_DATA; a1747 2 m0.m_nextpkt = NULL; m0.m_owner = NULL; d1885 1 a1885 1 totlen = hdrlen + uimin(snaplen, pktlen); @ 1.226.2.2 log @Merge changes from current as of 20200406 @ text @d1 1 a1 1 /* $NetBSD$ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD$"); d47 1 a81 1 #include d305 4 a308 7 struct bpfjit_ops *ops = &bpfjit_module_ops; bpfjit_func_t (*generate_code)(const bpf_ctx_t *, const struct bpf_insn *, size_t); generate_code = atomic_load_acquire(&ops->bj_generate_code); if (generate_code != NULL) { return generate_code(bc, code, size); d324 1 a324 1 struct mbuf *m, *m0, *n; d398 9 a406 1 m0 = m = m_gethdr(M_WAIT, MT_DATA); d418 1 a418 1 if (align > 0) d420 1 a420 17 for (;;) { len = M_TRAILINGSPACE(m); if (len > uio->uio_resid) len = uio->uio_resid; error = uiomove(mtod(m, void *), len, uio); if (error) goto bad; m->m_len = len; if (uio->uio_resid == 0) break; n = m_get(M_WAIT, MT_DATA); m_clget(n, M_WAIT); /* if fails, there is no problem */ m->m_next = n; m = n; d423 3 d427 3 a429 4 /* move link level header in the top of mbuf to sa_data */ memcpy(sockp->sa_data, mtod(m0, void *), hlen); m0->m_data += hlen; m0->m_len -= hlen; d431 2 a432 2 *mp = m0; d436 1 a436 1 m_freem(m0); d1287 1 d1304 2 a1305 1 atomic_store_release(&d->bd_filter, newf); d1605 2 a1606 1 filter = atomic_load_consume(&d->bd_filter); d2300 7 a2393 2 int s = splnet(); a2396 2 splx(s); d2409 1 a2409 2 percpu_foreach_xcall(bpf_gstats_percpu, XC_HIGHPRI_IPL(IPL_SOFTNET), bpf_stats, &sum); d2420 3 a2422 1 SYSCTL_SETUP(sysctl_net_bpf_setup, "bpf sysctls") d2427 1 a2427 1 sysctl_createv(clog, 0, NULL, &node, d2435 1 a2435 1 sysctl_createv(clog, 0, NULL, NULL, d2442 1 a2442 1 sysctl_createv(clog, 0, NULL, NULL, d2448 1 a2448 1 sysctl_createv(clog, 0, NULL, NULL, d2454 1 a2454 1 sysctl_createv(clog, 0, NULL, NULL, d2503 1 d2528 1 @ 1.226.2.3 log @Mostly merge changes from HEAD upto 20200411 @ text @a1684 5 /* Skip zero-sized packets. */ if (__predict_false(pktlen == 0)) { return; } a1688 1 KASSERT(buflen != 0); d1996 1 a1996 1 d->bd_fbuf = kmem_zalloc(d->bd_bufsize, KM_NOSLEEP); d1999 1 a1999 1 d->bd_sbuf = kmem_zalloc(d->bd_bufsize, KM_NOSLEEP); @ 1.225 log @ Removal of bpf_tap(). @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.224 2018/05/14 02:55:03 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.224 2018/05/14 02:55:03 ozaki-r Exp $"); d241 1 a241 1 void *, u_int, u_int, const bool); d554 1 a554 1 d->bd_seesent = 1; d901 2 a902 2 * BIOCGSEESENT Get "see sent packets" mode. * BIOCSSEESENT Set "see sent packets" mode. d1189 1 a1189 1 * Get "see sent packets" flag d1191 2 a1192 2 case BIOCGSEESENT: *(u_int *)addr = d->bd_seesent; d1196 1 a1196 1 * Set "see sent" packets flag d1198 15 a1212 2 case BIOCSSEESENT: d->bd_seesent = *(u_int *)addr; d1559 5 a1563 5 * pkt pointer to the packet, either a data buffer or an mbuf chain * buflen buffer length, if pkt is a data buffer * cpfn a function that can copy pkt into the listener's buffer * pktlen length of the packet * rcv true if packet came in d1567 1 a1567 1 void *pkt, u_int pktlen, u_int buflen, const bool rcv) d1594 6 a1599 2 if (!d->bd_seesent && !rcv) { continue; d1601 1 d1633 2 a1634 1 _bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m) d1657 1 a1657 1 bpf_deliver(bp, bpf_mcpy, &mb, pktlen, 0, m->m_pkthdr.rcvif_index != 0); d1664 1 a1664 1 _bpf_mtap(struct bpf_if *bp, struct mbuf *m) d1688 1 a1688 1 bpf_deliver(bp, cpfn, marg, pktlen, buflen, m->m_pkthdr.rcvif_index != 0); d1699 1 a1699 1 _bpf_mtap_af(struct bpf_if *bp, uint32_t af, struct mbuf *m) d1708 1 a1708 1 _bpf_mtap(bp, &m0); d1730 1 a1730 1 _bpf_mtap(bp, *m); d1756 1 a1756 1 _bpf_mtap(bp, &m0); d1819 1 a1819 1 bpf_ops->bpf_mtap(bp, m); d2349 1 a2349 1 BPF_EXT(seesent); @ 1.224 log @Protect packet input routines with KERNEL_LOCK and splsoftnet if_input, i.e, ether_input and friends, now runs in softint without any protections. It's ok for ether_input itself because it's already MP-safe, however, subsequent routines called from it such as carp_input and agr_input aren't safe because they're not MP-safe. Protect if_input with KERNEL_LOCK. if_input can be called from a normal LWP context. In that case we need to prevent interrupts (softint) from running by splsoftnet to protect non-MP-safe codes (e.g., carp_input and agr_input). Pointed out by mlelstv@@ @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.223 2018/01/25 02:45:02 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.223 2018/01/25 02:45:02 ozaki-r Exp $"); a1610 13 * Incoming linkage from device drivers. Process the packet pkt, of length * pktlen, which is stored in a contiguous buffer. The packet is parsed * by each process' filter, and if accepted, stashed into the corresponding * buffer. */ static void _bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) { bpf_deliver(bp, memcpy, pkt, pktlen, pktlen, true); } /* a2442 1 .bpf_tap = _bpf_tap, @ 1.223 log @Abandon unnecessary softint The softint was introduced to defer fownsignal that was called in bpf_wakeup to softint at v1.139, but now bpf_wakeup always runs in softint so we don't need the softint anymore. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.222 2017/12/15 07:29:11 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.222 2017/12/15 07:29:11 ozaki-r Exp $"); d840 3 a842 1 if (error == 0) d844 3 a846 1 else @ 1.223.2.1 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.224 2018/05/14 02:55:03 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.224 2018/05/14 02:55:03 ozaki-r Exp $"); d840 1 a840 3 if (error == 0) { int s = splsoftnet(); KERNEL_LOCK_UNLESS_IFP_MPSAFE(ifp); d842 1 a842 3 KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(ifp); splx(s); } else @ 1.223.2.2 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.225 2018/06/25 03:22:14 msaitoh Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.225 2018/06/25 03:22:14 msaitoh Exp $"); d1611 13 d2456 1 @ 1.223.2.3 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.227 2018/07/25 07:55:45 msaitoh Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.227 2018/07/25 07:55:45 msaitoh Exp $"); d241 1 a241 1 void *, u_int, u_int, const u_int); d554 1 a554 1 d->bd_direction = BPF_D_INOUT; d901 2 a902 2 * BIOCGDIRECTION Get packet direction flag * BIOCSDIRECTION Set packet direction flag d1189 1 a1189 1 * Get packet direction flag d1191 2 a1192 2 case BIOCGDIRECTION: *(u_int *)addr = d->bd_direction; d1196 1 a1196 1 * Set packet direction flag d1198 2 a1199 15 case BIOCSDIRECTION: { u_int direction; direction = *(u_int *)addr; switch (direction) { case BPF_D_IN: case BPF_D_INOUT: case BPF_D_OUT: d->bd_direction = direction; break; default: error = EINVAL; } } d1546 5 a1550 5 * pkt pointer to the packet, either a data buffer or an mbuf chain * buflen buffer length, if pkt is a data buffer * cpfn a function that can copy pkt into the listener's buffer * pktlen length of the packet * direction BPF_D_IN or BPF_D_OUT d1554 1 a1554 1 void *pkt, u_int pktlen, u_int buflen, const u_int direction) d1581 2 a1582 6 if (direction == BPF_D_IN) { if (d->bd_direction == BPF_D_OUT) continue; } else { /* BPF_D_OUT */ if (d->bd_direction == BPF_D_IN) continue; a1583 1 d1615 1 a1615 2 _bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m, u_int direction) a1633 1 mb.m_type = MT_DATA; d1638 1 a1638 1 bpf_deliver(bp, bpf_mcpy, &mb, pktlen, 0, direction); d1645 1 a1645 1 _bpf_mtap(struct bpf_if *bp, struct mbuf *m, u_int direction) d1669 1 a1669 1 bpf_deliver(bp, cpfn, marg, pktlen, buflen, direction); d1680 1 a1680 1 _bpf_mtap_af(struct bpf_if *bp, uint32_t af, struct mbuf *m, u_int direction) a1683 1 m0.m_type = MT_DATA; a1685 2 m0.m_nextpkt = NULL; m0.m_owner = NULL; d1689 1 a1689 1 _bpf_mtap(bp, &m0, direction); d1711 1 a1711 1 _bpf_mtap(bp, *m, BPF_D_IN); a1726 1 m0.m_type = MT_DATA; a1728 2 m0.m_nextpkt = NULL; m0.m_owner = NULL; d1737 1 a1737 1 _bpf_mtap(bp, &m0, BPF_D_OUT); d1800 1 a1800 1 bpf_ops->bpf_mtap(bp, m, BPF_D_IN); d2330 1 a2330 1 BPF_EXT(direction); @ 1.223.2.4 log @Sync with HEAD Resolve a couple of conflicts (result of the uimin/uimax changes) @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.228 2018/09/03 16:29:35 riastradh Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.228 2018/09/03 16:29:35 riastradh Exp $"); d1547 1 a1547 1 count = uimin(m->m_len, len); d1892 1 a1892 1 totlen = hdrlen + uimin(snaplen, pktlen); @ 1.222 log @Make softint and callout MP-safe @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.221 2017/12/12 06:26:57 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.221 2017/12/12 06:26:57 ozaki-r Exp $"); a270 1 static void bpf_softintr(void *); a564 2 d->bd_sih = softint_establish(SOFTINT_CLOCK|SOFTINT_MPSAFE, bpf_softintr, d); a620 1 softint_disestablish(d->bd_sih); d755 1 a755 1 softint_schedule(d->bd_sih); a759 12 bpf_softintr(void *cookie) { struct bpf_d *d; d = cookie; mutex_enter(d->bd_mtx); if (d->bd_async) fownsignal(d->bd_pgid, SIGIO, 0, 0, NULL); mutex_exit(d->bd_mtx); } static void @ 1.221 log @Fix panic in callout_halt (fix typo) Reported by wiz@@ @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.220 2017/11/30 20:25:55 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.220 2017/11/30 20:25:55 christos Exp $"); d564 1 a564 1 callout_init(&d->bd_callout, 0); d566 2 a567 1 d->bd_sih = softint_establish(SOFTINT_CLOCK, bpf_softintr, d); d769 1 d772 1 d1237 1 d1239 1 @ 1.220 log @add fo_name so we can identify the fileops in a simple way. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.219 2017/11/17 07:37:12 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.219 2017/11/17 07:37:12 ozaki-r Exp $"); d665 1 a665 1 callout_halt(&d->bd_callout, d->bd_buf_mtx); @ 1.219 log @Provide macros for softnet_lock and KERNEL_LOCK hiding NET_MPSAFE switch It reduces C&P codes such as "#ifndef NET_MPSAFE KERNEL_LOCK(1, NULL); ..." scattered all over the source code and makes it easy to identify remaining KERNEL_LOCK and/or softnet_lock that are held even if NET_MPSAFE. No functional change @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.218 2017/10/25 08:12:40 maya Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.218 2017/10/25 08:12:40 maya Exp $"); d274 1 @ 1.218 log @Use C99 initializer for filterops Mostly done with spatch with touchups for indentation @@@@ expression a; identifier b,c,d; identifier p; @@@@ const struct filterops p = - { a, b, c, d + { + .f_isfd = a, + .f_attach = b, + .f_detach = c, + .f_event = d, }; @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.217 2017/10/19 01:57:15 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.217 2017/10/19 01:57:15 ozaki-r Exp $"); d487 1 a487 3 #ifndef NET_MPSAFE KERNEL_LOCK(1, NULL); #endif d489 1 a489 3 #ifndef NET_MPSAFE KERNEL_UNLOCK_ONE(NULL); #endif d1021 1 a1021 3 #ifndef NET_MPSAFE KERNEL_LOCK(1, NULL); #endif d1023 1 a1023 3 #ifndef NET_MPSAFE KERNEL_UNLOCK_ONE(NULL); #endif d2244 1 a2244 3 #ifndef NET_MPSAFE KERNEL_LOCK(1, NULL); #endif d2246 1 a2246 3 #ifndef NET_MPSAFE KERNEL_UNLOCK_ONE(NULL); #endif @ 1.217 log @Turn on D_MPSAFE flag of bpf_cdevsw that is already MP-safe Pointed out by k-goda@@IIJ @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.216 2017/02/20 03:08:38 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.216 2017/02/20 03:08:38 ozaki-r Exp $"); d1499 6 a1504 2 static const struct filterops bpfread_filtops = { 1, NULL, filt_bpfrdetach, filt_bpfread }; @ 1.216 log @Reinit a pslist entry before inserting it to a pslist again Fix PR kern/51984 Tested by nonaka@@ @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.215 2017/02/19 13:58:42 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.215 2017/02/19 13:58:42 christos Exp $"); d299 1 a299 1 .d_flag = D_OTHER @ 1.216.6.1 log @Pull up following revision(s) (requested by ozaki-r in ticket #329): sys/net/bpf.c: revision 1.217 Turn on D_MPSAFE flag of bpf_cdevsw that is already MP-safe Pointed out by k-goda@@IIJ @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.216 2017/02/20 03:08:38 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.216 2017/02/20 03:08:38 ozaki-r Exp $"); d299 1 a299 1 .d_flag = D_OTHER | D_MPSAFE @ 1.216.6.2 log @Pull up following revision(s) (requested by ozaki-r in ticket #446): sys/net/bpf.c: revision 1.221 Fix panic in callout_halt (fix typo) Reported by wiz@@ @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.216.6.1 2017/10/25 07:14:09 snj Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.216.6.1 2017/10/25 07:14:09 snj Exp $"); d668 1 a668 1 callout_halt(&d->bd_callout, d->bd_mtx); @ 1.216.6.3 log @Pull up following revision(s) (requested by ozaki-r in ticket #454): sys/net/bpf.c: revision 1.222 Make softint and callout MP-safe @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.216.6.2 2017/12/21 21:38:23 snj Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.216.6.2 2017/12/21 21:38:23 snj Exp $"); d567 1 a567 1 callout_init(&d->bd_callout, CALLOUT_MPSAFE); d569 1 a569 2 d->bd_sih = softint_establish(SOFTINT_CLOCK|SOFTINT_MPSAFE, bpf_softintr, d); a770 1 mutex_enter(d->bd_mtx); a772 1 mutex_exit(d->bd_mtx); a1240 1 mutex_enter(d->bd_mtx); a1241 1 mutex_exit(d->bd_mtx); @ 1.216.6.4 log @Pull up following revision(s) (requested by ozaki-r in ticket #456): sys/arch/arm/sunxi/sunxi_emac.c: 1.9 sys/dev/ic/dwc_gmac.c: 1.43-1.44 sys/dev/pci/if_iwm.c: 1.75 sys/dev/pci/if_wm.c: 1.543 sys/dev/pci/ixgbe/ixgbe.c: 1.112 sys/dev/pci/ixgbe/ixv.c: 1.74 sys/kern/sys_socket.c: 1.75 sys/net/agr/if_agr.c: 1.43 sys/net/bpf.c: 1.219 sys/net/if.c: 1.397, 1.399, 1.401-1.403, 1.406-1.410, 1.412-1.416 sys/net/if.h: 1.242-1.247, 1.250, 1.252-1.257 sys/net/if_bridge.c: 1.140 via patch, 1.142-1.146 sys/net/if_etherip.c: 1.40 sys/net/if_ethersubr.c: 1.243, 1.246 sys/net/if_faith.c: 1.57 sys/net/if_gif.c: 1.132 sys/net/if_l2tp.c: 1.15, 1.17 sys/net/if_loop.c: 1.98-1.101 sys/net/if_media.c: 1.35 sys/net/if_pppoe.c: 1.131-1.132 sys/net/if_spppsubr.c: 1.176-1.177 sys/net/if_tun.c: 1.142 sys/net/if_vlan.c: 1.107, 1.109, 1.114-1.121 sys/net/npf/npf_ifaddr.c: 1.3 sys/net/npf/npf_os.c: 1.8-1.9 sys/net/rtsock.c: 1.230 sys/netcan/if_canloop.c: 1.3-1.5 sys/netinet/if_arp.c: 1.255 sys/netinet/igmp.c: 1.65 sys/netinet/in.c: 1.210-1.211 sys/netinet/in_pcb.c: 1.180 sys/netinet/ip_carp.c: 1.92, 1.94 sys/netinet/ip_flow.c: 1.81 sys/netinet/ip_input.c: 1.362 sys/netinet/ip_mroute.c: 1.147 sys/netinet/ip_output.c: 1.283, 1.285, 1.287 sys/netinet6/frag6.c: 1.61 sys/netinet6/in6.c: 1.251, 1.255 sys/netinet6/in6_pcb.c: 1.162 sys/netinet6/ip6_flow.c: 1.35 sys/netinet6/ip6_input.c: 1.183 sys/netinet6/ip6_output.c: 1.196 sys/netinet6/mld6.c: 1.90 sys/netinet6/nd6.c: 1.239-1.240 sys/netinet6/nd6_nbr.c: 1.139 sys/netinet6/nd6_rtr.c: 1.136 sys/netipsec/ipsec_output.c: 1.65 sys/rump/net/lib/libnetinet/netinet_component.c: 1.9-1.10 kmem_intr_free kmem_intr_[z]alloced memory the underlying pools are the same but api-wise those should match Unify IFEF_*_MPSAFE into IFEF_MPSAFE There are already two flags for if_output and if_start, however, it seems such MPSAFE flags are eventually needed for all if_XXX operations. Having discrete flags for each operation is wasteful of if_extflags bits. So let's unify the flags into one: IFEF_MPSAFE. Fortunately IFEF_*_MPSAFE flags have never been included in any releases, so we can change them without breaking backward compatibility of the releases (though the kernel version of -current should be bumped). Note that if an interface have both MP-safe and non-MP-safe operations at a time, we have to set the IFEF_MPSAFE flag and let callees of non-MP-safe opeartions take the kernel lock. Proposed on tech-kern@@ and tech-net@@ Provide macros for softnet_lock and KERNEL_LOCK hiding NET_MPSAFE switch It reduces C&P codes such as "#ifndef NET_MPSAFE KERNEL_LOCK(1, NULL); ..." scattered all over the source code and makes it easy to identify remaining KERNEL_LOCK and/or softnet_lock that are held even if NET_MPSAFE. No functional change Hold KERNEL_LOCK on if_ioctl selectively based on IFEF_MPSAFE If IFEF_MPSAFE is set, hold the lock and otherwise don't hold. This change requires additions of KERNEL_LOCK to subsequence functions from if_ioctl such as ifmedia_ioctl and ifioctl_common to protect non-MP-safe components. Proposed on tech-kern@@ and tech-net@@ Ensure to hold if_ioctl_lock when calling if_flags_set Fix locking against myself on ifpromisc vlan_unconfig_locked could be called with holding if_ioctl_lock. Ensure to not turn on IFF_RUNNING of an interface until its initialization completes And ensure to turn off it before destruction as per IFF_RUNNING's description "resource allocated". (The description is a bit doubtful though, I believe the change is still proper.) Ensure to hold if_ioctl_lock on if_up and if_down One exception for if_down is if_detach; in the case the lock isn't needed because it's guaranteed that no other one can access ifp at that point. Make if_link_queue MP-safe if IFEF_MPSAFE if_link_queue is a queue to store events of link state changes, which is used to pass events from (typically) an interrupt handler to if_link_state_change softint. The queue was protected by KERNEL_LOCK so far, but if IFEF_MPSAFE is enabled, it becomes unsafe because (perhaps) an interrupt handler of an interface with IFEF_MPSAFE doesn't take KERNEL_LOCK. Protect it by a spin mutex. Additionally with this change KERNEL_LOCK of if_link_state_change softint is omitted if NET_MPSAFE is enabled. Note that the spin mutex is now ifp->if_snd.ifq_lock as well as the case of if_timer (see the comment). Use IFADDR_WRITER_FOREACH instead of IFADDR_READER_FOREACH At that point no other one modifies the list so IFADDR_READER_FOREACH is unnecessary. Use of IFADDR_READER_FOREACH is harmless in general though, if we try to detect contract violations of pserialize, using it violates the contract. So avoid using it makes life easy. Ensure to call if_addr_init with holding if_ioctl_lock Get rid of outdated comments Fix build of kernels without ether By throwing out if_enable_vlan_mtu and if_disable_vlan_mtu that created a unnecessary dependency from if.c to if_ethersubr.c. PR kern/52790 Rename IFNET_LOCK to IFNET_GLOBAL_LOCK IFNET_LOCK will be used in another lock, if_ioctl_lock (might be renamed then). Wrap if_ioctl_lock with IFNET_* macros (NFC) Also if_ioctl_lock perhaps needs to be renamed to something because it's now not just for ioctl... Reorder some destruction routines in if_detach - Destroy if_ioctl_lock at the end of the if_detach because it's used in various destruction routines - Move psref_target_destroy after pr_purgeif because we want to use psref in pr_purgeif (otherwise destruction procedures can be tricky) Ensure to call if_mcast_op with holding IFNET_LOCK Note that CARP doesn't deal with IFNET_LOCK yet. Remove IFNET_GLOBAL_LOCK where it's unnecessary because IFNET_LOCK is held Describe which lock is used to protect each member variable of struct ifnet Requested by skrll@@ Write a guideline for converting an interface to IFEF_MPSAFE Requested by skrll@@ Note that IFNET_LOCK must not be held in softint Don't set IFEF_MPSAFE unless NET_MPSAFE at this point Because recent investigations show that interfaces with IFEF_MPSAFE need to follow additional restrictions to work with the flag safely. We should enable it on an interface by default only if the interface surely satisfies the restrictions, which are described in if.h. Note that enabling IFEF_MPSAFE solely gains a few benefit on performance because the network stack is still serialized by the big kernel locks by default. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.216.6.3 2017/12/21 21:51:37 snj Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.216.6.3 2017/12/21 21:51:37 snj Exp $"); d487 3 a489 1 KERNEL_LOCK_UNLESS_NET_MPSAFE(); d491 3 a493 1 KERNEL_UNLOCK_UNLESS_NET_MPSAFE(); d1028 3 a1030 1 KERNEL_LOCK_UNLESS_NET_MPSAFE(); d1032 3 a1034 1 KERNEL_UNLOCK_UNLESS_NET_MPSAFE(); d2253 3 a2255 1 KERNEL_LOCK_UNLESS_NET_MPSAFE(); d2257 3 a2259 1 KERNEL_UNLOCK_UNLESS_NET_MPSAFE(); @ 1.216.6.5 log @Pull up following revision(s) (requested by ozaki-r in ticket #526): sys/net/bpfdesc.h: revision 1.45 sys/net/bpf.c: revision 1.223 Abandon unnecessary softint The softint was introduced to defer fownsignal that was called in bpf_wakeup to softint at v1.139, but now bpf_wakeup always runs in softint so we don't need the softint anymore. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.216.6.4 2018/01/02 10:20:33 snj Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.216.6.4 2018/01/02 10:20:33 snj Exp $"); d271 1 d565 2 d623 1 d758 12 d771 1 a771 1 selnotify(&d->bd_sel, 0, 0); @ 1.216.6.6 log @Pull up following revision(s) (requested by ozaki-r in ticket #826): sys/net/if_bridge.c: revision 1.155 sys/net/if.c: revision 1.421 sys/net/bpf.c: revision 1.224 sys/net/if.c: revision 1.422 sys/net/if.c: revision 1.423 Use if_is_mpsafe (NFC) Protect packet input routines with KERNEL_LOCK and splsoftnet if_input, i.e, ether_input and friends, now runs in softint without any protections. It's ok for ether_input itself because it's already MP-safe, however, subsequent routines called from it such as carp_input and agr_input aren't safe because they're not MP-safe. Protect if_input with KERNEL_LOCK. if_input can be called from a normal LWP context. In that case we need to prevent interrupts (softint) from running by splsoftnet to protect non-MP-safe codes (e.g., carp_input and agr_input). Pointed out by mlelstv@@ Protect if_deferred_start_softint with KERNEL_LOCK if the interface isn't MP-safe @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.216.6.5 2018/02/05 14:18:00 martin Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.216.6.5 2018/02/05 14:18:00 martin Exp $"); d839 1 a839 3 if (error == 0) { int s = splsoftnet(); KERNEL_LOCK_UNLESS_IFP_MPSAFE(ifp); d841 1 a841 3 KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(ifp); splx(s); } else @ 1.216.6.7 log @Pull up following revision(s) (requested by maxv in ticket #1323): sys/net/bpf.c: revision 1.229 Fix info leak: use kmem_zalloc, because we align the buffers, and the otherwise uninitialized padding bytes get copied to userland in bpf_read(). @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.216.6.6 2018/05/15 13:48:37 martin Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.216.6.6 2018/05/15 13:48:37 martin Exp $"); d1972 1 a1972 1 d->bd_fbuf = kmem_zalloc(d->bd_bufsize, KM_NOSLEEP); d1975 1 a1975 1 d->bd_sbuf = kmem_zalloc(d->bd_bufsize, KM_NOSLEEP); @ 1.216.6.8 log @Pull up following revision(s) (requested by riastradh in ticket #1802): sys/net/bpf.c: revision 1.247 (manually merged) bpf(4): Reject bogus timeout values before arithmetic overflows. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.216.6.7 2019/08/04 11:19:03 martin Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.216.6.7 2019/08/04 11:19:03 martin Exp $"); d1094 1 a1094 10 if (tv->tv_sec < 0 || tv->tv_usec < 0 || tv->tv_usec >= 1000000) { error = EINVAL; break; } else if (tv->tv_sec > INT_MAX/hz - 1) { d->bd_rtout = INT_MAX; } else { d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick; } d1123 1 a1123 10 if (tv->tv_sec < 0 || tv->tv_usec < 0 || tv->tv_usec >= 1000000) { error = EINVAL; break; } else if (tv->tv_sec > INT_MAX/hz - 1) { d->bd_rtout = INT_MAX; } else { d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick; } @ 1.216.6.9 log @Apply patch, requested by ozaki-r in ticket #1885: sys/net/bpf.c (apply patch) bpf: allow to read with no filter (regressed at revision 1.213, fixed differently in -current) @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.216.6.8 2023/02/22 19:51:47 martin Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.216.6.8 2023/02/22 19:51:47 martin Exp $"); a1607 2 } else { slen = (u_int)-1; /* No filter means accept all */ @ 1.216.4.1 log @Restore all work from the former pgoyette-localcount branch (which is now abandoned doe to cvs merge botch). The branch now builds, and installs via anita. There are still some problems (cgd is non-functional and all atf tests time-out) but they will get resolved soon. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.216 2017/02/20 03:08:38 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.216 2017/02/20 03:08:38 ozaki-r Exp $"); a80 1 #include a287 1 DEVSW_MODULE_INIT @ 1.216.4.2 log @Remove more unnecessary #include for sys/localcount.h @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.216.4.1 2017/04/27 05:36:38 pgoyette Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.216.4.1 2017/04/27 05:36:38 pgoyette Exp $"); d81 1 @ 1.215 log @typo @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.214 2017/02/13 03:44:45 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.214 2017/02/13 03:44:45 ozaki-r Exp $"); d1383 1 a1383 1 if (d->bd_bif) d1388 2 d2244 1 @ 1.214 log @Update comments to reflect bpf MP-ification @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.213 2017/02/09 09:30:26 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.213 2017/02/09 09:30:26 ozaki-r Exp $"); d187 1 a187 1 #define BPF_DLIST_WRITER_INSEART_HEAD(__d) \ d579 1 a579 1 BPF_DLIST_WRITER_INSEART_HEAD(d); @ 1.213 log @Make bpf MP-safe By the change, bpf_mtap can run without any locks as long as its bpf filter doesn't match a target packet. Pushing data to a bpf buffer still needs a lock. Removing the lock requires big changes and it's a future work. Another known issue is that we need to remain some obsolete variables to avoid breaking kvm(3) users such as netstat and fstat. One problem for MP-ification is that in order to keep statistic counters of bpf_d we need to use atomic operations for them. Once we retire the kvm(3) users, we should make the counters per-CPU and remove the atomic operations. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.212 2017/02/01 08:18:33 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.212 2017/02/01 08:18:33 ozaki-r Exp $"); a441 1 * Must be called at splnet. d873 1 a873 1 * receive and drop counts. Should be called at splnet. @ 1.212 log @Reduce return points @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.211 2017/02/01 08:16:42 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.211 2017/02/01 08:16:42 ozaki-r Exp $"); d80 2 d137 20 d162 17 d243 1 d447 1 d449 1 d470 1 d488 3 d492 3 d504 1 a504 3 /* TODO pserialize_perform(); */ /* TODO psref_target_destroy(); */ BPFIF_DLIST_ENTRY_DESTROY(d); a505 1 /* XXX NOMPSAFE? */ d520 2 d572 1 d575 2 a576 1 d->bd_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); a594 1 int s; a595 1 KERNEL_LOCK(1, NULL); a599 1 KERNEL_UNLOCK_ONE(NULL); d608 1 a608 1 s = splnet(); d610 1 a610 1 callout_stop(&d->bd_callout); d614 2 a615 2 splx(s); bpf_freed(d); a616 1 fp->f_bpf = NULL; d618 1 a619 1 KERNEL_UNLOCK_ONE(NULL); d621 1 a621 2 /* TODO pserialize_perform(); */ /* TODO psref_target_destroy(); */ d623 2 a624 1 d629 1 a657 1 int s; d667 1 a667 2 KERNEL_LOCK(1, NULL); s = splnet(); d669 1 a669 1 callout_stop(&d->bd_callout); d672 1 d678 1 d699 1 a699 3 mutex_enter(d->bd_mtx); error = cv_timedwait_sig(&d->bd_cv, d->bd_mtx, d->bd_rtout); mutex_exit(d->bd_mtx); d731 1 a731 1 splx(s); d740 1 a740 1 s = splnet(); d745 1 a745 2 splx(s); KERNEL_UNLOCK_ONE(NULL); d757 1 a757 1 mutex_enter(d->bd_mtx); d759 1 a759 1 mutex_exit(d->bd_mtx); a779 1 int s; d781 1 a781 1 s = splnet(); d787 1 a787 1 splx(s); d796 1 d799 1 a799 1 int error, s; d801 2 d806 10 a815 1 KERNEL_LOCK(1, NULL); a816 4 if (d->bd_bif == NULL) { KERNEL_UNLOCK_ONE(NULL); return (ENXIO); } d819 5 a823 1 ifp = d->bd_bif->bif_ifp; d826 2 a827 2 KERNEL_UNLOCK_ONE(NULL); return (0); d830 1 a830 1 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp->if_mtu, &m, d832 2 a833 4 if (error) { KERNEL_UNLOCK_ONE(NULL); return (error); } a835 1 KERNEL_UNLOCK_ONE(NULL); d837 2 a838 1 return (EMSGSIZE); a853 1 s = splsoftnet(); a861 2 splx(s); KERNEL_UNLOCK_ONE(NULL); d865 5 a869 1 return (error); d879 4 d893 1 d922 1 a922 1 int s, error = 0; a926 1 KERNEL_LOCK(1, NULL); d935 1 a935 1 s = splnet(); d937 1 a937 1 callout_stop(&d->bd_callout); d939 1 a939 1 splx(s); d954 1 a954 1 s = splnet(); d958 1 a958 1 splx(s); d979 2 d992 2 d1007 1 a1007 1 s = splnet(); d1009 1 a1009 1 splx(s); d1016 1 d1018 1 a1024 1 s = splnet(); d1026 3 d1030 3 d1036 1 a1036 1 splx(s); d1043 1 d1048 1 d1055 1 d1060 1 d1068 1 d1073 1 d1084 1 d1089 1 a1254 1 KERNEL_UNLOCK_ONE(NULL); d1265 4 a1268 4 struct bpf_insn *fcode, *old; bpfjit_func_t jcode, oldj; size_t flen, size = 0, old_size; int s; d1296 5 a1300 1 old_size = d->bd_filter_size; d1302 6 a1307 6 s = splnet(); old = d->bd_filter; d->bd_filter = fcode; d->bd_filter_size = size; oldj = d->bd_jitcode; d->bd_jitcode = jcode; d1309 3 a1311 1 splx(s); d1313 2 a1314 6 if (old) { kmem_free(old, old_size); } if (oldj) { bpf_jit_freecode(oldj); } d1329 1 a1329 1 int unit_seen, i, s, error; d1373 4 d1382 1 a1382 1 s = splnet(); d1393 1 a1393 1 splx(s); d1415 1 a1415 1 KERNEL_LOCK(1, NULL); d1423 1 a1423 1 KERNEL_UNLOCK_ONE(NULL); a1438 1 int s = splnet(); d1444 1 a1444 1 KERNEL_LOCK(1, NULL); d1452 1 d1466 1 d1469 1 a1469 2 KERNEL_UNLOCK_ONE(NULL); splx(s); a1476 1 int s; d1478 1 a1478 2 KERNEL_LOCK(1, NULL); s = splnet(); d1480 1 a1480 2 splx(s); KERNEL_UNLOCK_ONE(NULL); d1489 1 a1489 1 KERNEL_LOCK(1, NULL); d1494 1 a1494 1 KERNEL_UNLOCK_ONE(NULL); a1505 3 int s; KERNEL_LOCK(1, NULL); d1507 1 d1515 1 a1515 1 KERNEL_UNLOCK_ONE(NULL); a1520 1 s = splnet(); d1522 1 a1522 2 splx(s); KERNEL_UNLOCK_ONE(NULL); d1576 3 d1585 1 d1587 2 a1588 1 u_int slen; d1593 1 a1593 1 d->bd_rcount++; d1596 9 a1604 4 if (d->bd_jitcode) slen = d->bd_jitcode(NULL, &args); else slen = bpf_filter_ext(NULL, d->bd_filter, &args); d1613 1 d1616 1 a1722 1 int s; a1732 1 s = splnet(); a1733 1 splx(s); a1747 1 int s; a1758 1 s = splnet(); a1759 1 splx(s); d1792 1 a1821 3 #ifndef NET_MPSAFE KERNEL_LOCK(1, NULL); #endif a1822 3 #ifndef NET_MPSAFE KERNEL_UNLOCK_ONE(NULL); #endif d1880 1 a1880 1 ++d->bd_ccount; d1899 1 d1916 1 d1921 1 a1921 1 ++d->bd_dcount; d1969 1 d1999 13 d2031 3 a2033 5 if (d->bd_filter) kmem_free(d->bd_filter, d->bd_filter_size); if (d->bd_jitcode != NULL) { bpf_jit_freecode(d->bd_jitcode); d2035 1 d2058 1 d2108 1 a2113 1 s = splnet(); d2116 1 a2116 1 splx(s); d2119 1 d2126 4 a2129 2 /* TODO pserialize_perform(); */ /* TODO psref_target_destroy(); */ d2132 1 d2157 2 a2158 1 BPF_IFLIST_READER_FOREACH(bp) { d2168 1 d2180 3 d2187 3 d2194 4 a2197 1 if (n >= bfl->bfl_len) d2199 5 d2206 3 d2212 3 d2225 1 a2225 1 int s, error, opromisc; d2230 1 a2240 1 s = splnet(); d2246 3 d2250 3 a2258 1 splx(s); d2361 1 d2368 1 @ 1.211 log @Kill tsleep/wakeup and use cv @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.210 2017/02/01 08:15:15 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.210 2017/02/01 08:15:15 ozaki-r Exp $"); d635 2 a636 3 splx(s); KERNEL_UNLOCK_ONE(NULL); return (EWOULDBLOCK); d656 3 a658 5 if (error == EINTR || error == ERESTART) { splx(s); KERNEL_UNLOCK_ONE(NULL); return (error); } d674 2 a675 3 splx(s); KERNEL_UNLOCK_ONE(NULL); return (0); d681 1 a681 1 goto done; d699 1 a699 1 done: @ 1.210 log @Make bpf_gstats percpu @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.209 2017/02/01 08:13:45 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.209 2017/02/01 08:13:45 ozaki-r Exp $"); d526 2 d582 3 d652 5 a656 2 error = tsleep(d, PRINET|PCATCH, "bpf", d->bd_rtout); d716 5 a720 1 wakeup(d); @ 1.209 log @Use pslist(9) instead of queue(9) for psz/psref As usual some member variables of struct bpf_d and bpf_if remain to avoid breaking kvm(3) users (netstat and fstat). @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.208 2017/02/01 08:07:27 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.208 2017/02/01 08:07:27 ozaki-r Exp $"); d79 1 d124 9 a132 1 static struct bpf_stat bpf_gstats; d478 1 a478 3 bpf_gstats.bs_recv = 0; bpf_gstats.bs_drop = 0; bpf_gstats.bs_capt = 0; d1506 1 a1506 1 bpf_gstats.bs_recv++; d1797 1 a1797 1 ++bpf_gstats.bs_capt; d1836 1 a1836 1 ++bpf_gstats.bs_drop; d2254 32 d2318 1 a2318 1 NULL, 0, &bpf_gstats, sizeof(bpf_gstats), @ 1.208 log @Use kmem(9) instead of malloc/free @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.207 2017/02/01 08:06:01 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.207 2017/02/01 08:06:01 ozaki-r Exp $"); d135 54 a188 2 static struct bpf_if *bpf_iflist; static LIST_HEAD(, bpf_d) bpf_list; d405 1 a405 2 d->bd_next = bp->bif_dlist; bp->bif_dlist = d; a415 1 struct bpf_d **p; d442 1 d444 8 a451 8 p = &bp->bif_dlist; while (*p != d) { p = &(*p)->bd_next; if (*p == NULL) panic("%s: descriptor not in list", __func__); } *p = (*p)->bd_next; if (bp->bif_dlist == NULL) d456 1 d466 2 a467 1 LIST_INIT(&bpf_list); d517 2 d521 1 a521 1 LIST_INSERT_HEAD(&bpf_list, d, bd_list); d560 1 a560 1 LIST_REMOVE(d, bd_list); d566 4 d1265 1 a1265 1 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { d1485 1 d1492 1 a1492 1 for (struct bpf_d *d = bp->bif_dlist; d != NULL; d = d->bd_next) { d1745 1 a1745 1 if (bp->bif_dlist == NULL) a1945 1 bp->bif_dlist = NULL; d1950 2 d1953 1 a1953 2 bp->bif_next = bpf_iflist; bpf_iflist = bp; d1970 1 a1970 1 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { d1993 1 a1993 1 struct bpf_if *bp, **pbp; d1999 2 a2000 1 LIST_FOREACH(d, &bpf_list, bd_list) { d2010 1 d2015 1 a2015 2 for (bp = bpf_iflist, pbp = &bpf_iflist; bp != NULL; pbp = &bp->bif_next, bp = bp->bif_next) { d2017 4 a2020 1 *pbp = bp->bif_next; d2046 1 a2046 1 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { d2071 1 a2071 1 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { d2101 1 a2101 1 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { d2209 1 a2209 1 LIST_FOREACH(dp, &bpf_list, bd_list) { @ 1.207 log @Make global variables static @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.206 2017/01/25 01:04:23 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.206 2017/01/25 01:04:23 ozaki-r Exp $"); d449 1 a449 1 d = malloc(sizeof(*d), M_DEVBUF, M_WAITOK|M_ZERO); d514 1 a514 1 free(d, M_DEVBUF); d843 5 a847 1 if (d->bd_bif != NULL) d1117 1 a1117 1 size_t flen, size; d1133 1 a1133 1 fcode = malloc(size, M_DEVBUF, M_WAITOK); d1136 1 a1136 1 free(fcode, M_DEVBUF); d1146 2 d1151 1 d1158 1 a1158 1 free(old, M_DEVBUF); d1832 1 a1832 1 d->bd_fbuf = malloc(d->bd_bufsize, M_DEVBUF, M_NOWAIT); d1835 1 a1835 1 d->bd_sbuf = malloc(d->bd_bufsize, M_DEVBUF, M_NOWAIT); d1837 1 a1837 1 free(d->bd_fbuf, M_DEVBUF); d1858 1 a1858 1 free(d->bd_sbuf, M_DEVBUF); d1860 1 a1860 1 free(d->bd_hbuf, M_DEVBUF); d1862 1 a1862 1 free(d->bd_fbuf, M_DEVBUF); d1865 1 a1865 1 free(d->bd_filter, M_DEVBUF); d1881 1 a1881 1 bp = malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT); d1967 1 a1967 1 free(bp, M_DEVBUF); @ 1.206 log @Use bpf_ops for bpf_mtap_softint By doing so we don't need to care whether a kernel enables bpfilter or not. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.205 2017/01/24 09:05:28 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.205 2017/01/24 09:05:28 ozaki-r Exp $"); d111 3 a113 3 int bpf_bufsize = BPF_BUFSIZE; int bpf_maxbufsize = BPF_DFLTBUFSIZE; /* XXX set dynamically, see above */ bool bpf_jit = false; d123 1 a123 1 struct bpf_stat bpf_gstats; d135 2 a136 2 struct bpf_if *bpf_iflist; LIST_HEAD(, bpf_d) bpf_list; @ 1.205 log @Defer bpf_mtap in Rx interrupt context to softint bpf_mtap of some drivers is still called in hardware interrupt context. We want to run them in softint as well as bpf_mtap of most drivers (see if_percpuq_softint and if_input). To this end, bpf_mtap_softint mechanism is implemented; it defers bpf_mtap processing to a dedicated softint for a target driver. By using the machanism, we can move bpf_mtap processing to softint without changing target drivers much while it adds some overhead on CPU and memory. Once target drivers are changed to softint-based, we should return to normal bpf_mtap. Proposed on tech-kern and tech-net @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.204 2017/01/23 10:17:36 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.204 2017/01/23 10:17:36 ozaki-r Exp $"); d1669 2 a1670 2 void bpf_mtap_softint(struct ifnet *ifp, struct mbuf *m) d1677 2 a1678 1 if (bp == NULL || bp->bif_dlist == NULL) d1897 2 a1898 2 void bpf_mtap_softint_init(struct ifnet *ifp) d2231 3 @ 1.204 log @Make bpf_setf static @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.203 2016/07/19 02:47:45 pgoyette Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.203 2016/07/19 02:47:45 pgoyette Exp $"); d48 1 d64 1 d78 1 d1600 86 d1882 1 d1896 23 d1949 10 @ 1.203 log @Fix regression introduced in tests/net/bpf and tests/net/bpfilter The rump code needs to call devsw_attach() in order to assign a dev_major for bpf; it then uses this to create rumps /dev/bpf node. Unfortunately, this leaves the devsw attached, so when the bpf module tries to initialize itself, it gets an EEXIST error and fails. So, once rump has figured what the dev_major should be, call devsw_detach() to remove the devsw. Then, when the module initialization code calls devsw_attach() it will succeed. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.202 2016/07/17 02:49:52 pgoyette Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.202 2016/07/17 02:49:52 pgoyette Exp $"); d147 1 d1105 1 a1105 1 int @ 1.203.2.1 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.216 2017/02/20 03:08:38 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.216 2017/02/20 03:08:38 ozaki-r Exp $"); a47 1 #include "opt_net_mpsafe.h" a62 1 #include a75 4 #include #include #include #include d108 3 a110 3 static int bpf_bufsize = BPF_BUFSIZE; static int bpf_maxbufsize = BPF_DFLTBUFSIZE; /* XXX set dynamically, see above */ static bool bpf_jit = false; d120 1 a120 1 static struct percpu *bpf_gstats_percpu; /* struct bpf_stat */ a121 28 #define BPF_STATINC(id) \ { \ struct bpf_stat *__stats = \ percpu_getref(bpf_gstats_percpu); \ __stats->bs_##id++; \ percpu_putref(bpf_gstats_percpu); \ } /* * Locking notes: * - bpf_mtx (adaptive mutex) protects: * - Gobal lists: bpf_iflist and bpf_dlist * - struct bpf_if * - bpf_close * - bpf_psz (pserialize) * - struct bpf_d has two mutexes: * - bd_buf_mtx (spin mutex) protects the buffers that can be accessed * on packet tapping * - bd_mtx (adaptive mutex) protects member variables other than the buffers * - Locking order: bpf_mtx => bpf_d#bd_mtx => bpf_d#bd_buf_mtx * - struct bpf_d obtained via fp->f_bpf in bpf_read and bpf_write is * never freed because struct bpf_d is only freed in bpf_close and * bpf_close never be called while executing bpf_read and bpf_write * - A filter that is assigned to bpf_d can be replaced with another filter * while tapping packets, so it needs to be done atomically * - struct bpf_d is iterated on bpf_dlist with psz * - struct bpf_if is iterated on bpf_iflist with psz or psref */ a127 17 static struct psref_class *bpf_psref_class __read_mostly; static pserialize_t bpf_psz; static inline void bpf_if_acquire(struct bpf_if *bp, struct psref *psref) { psref_acquire(psref, &bp->bif_psref, bpf_psref_class); } static inline void bpf_if_release(struct bpf_if *bp, struct psref *psref) { psref_release(psref, &bp->bif_psref, bpf_psref_class); } d132 2 a133 54 static struct pslist_head bpf_iflist; static struct pslist_head bpf_dlist; /* Macros for bpf_d on bpf_dlist */ #define BPF_DLIST_WRITER_INSERT_HEAD(__d) \ PSLIST_WRITER_INSERT_HEAD(&bpf_dlist, (__d), bd_bpf_dlist_entry) #define BPF_DLIST_READER_FOREACH(__d) \ PSLIST_READER_FOREACH((__d), &bpf_dlist, struct bpf_d, \ bd_bpf_dlist_entry) #define BPF_DLIST_WRITER_FOREACH(__d) \ PSLIST_WRITER_FOREACH((__d), &bpf_dlist, struct bpf_d, \ bd_bpf_dlist_entry) #define BPF_DLIST_ENTRY_INIT(__d) \ PSLIST_ENTRY_INIT((__d), bd_bpf_dlist_entry) #define BPF_DLIST_WRITER_REMOVE(__d) \ PSLIST_WRITER_REMOVE((__d), bd_bpf_dlist_entry) #define BPF_DLIST_ENTRY_DESTROY(__d) \ PSLIST_ENTRY_DESTROY((__d), bd_bpf_dlist_entry) /* Macros for bpf_if on bpf_iflist */ #define BPF_IFLIST_WRITER_INSERT_HEAD(__bp) \ PSLIST_WRITER_INSERT_HEAD(&bpf_iflist, (__bp), bif_iflist_entry) #define BPF_IFLIST_READER_FOREACH(__bp) \ PSLIST_READER_FOREACH((__bp), &bpf_iflist, struct bpf_if, \ bif_iflist_entry) #define BPF_IFLIST_WRITER_FOREACH(__bp) \ PSLIST_WRITER_FOREACH((__bp), &bpf_iflist, struct bpf_if, \ bif_iflist_entry) #define BPF_IFLIST_WRITER_REMOVE(__bp) \ PSLIST_WRITER_REMOVE((__bp), bif_iflist_entry) #define BPF_IFLIST_ENTRY_INIT(__bp) \ PSLIST_ENTRY_INIT((__bp), bif_iflist_entry) #define BPF_IFLIST_ENTRY_DESTROY(__bp) \ PSLIST_ENTRY_DESTROY((__bp), bif_iflist_entry) /* Macros for bpf_d on bpf_if#bif_dlist_pslist */ #define BPFIF_DLIST_READER_FOREACH(__d, __bp) \ PSLIST_READER_FOREACH((__d), &(__bp)->bif_dlist_head, struct bpf_d, \ bd_bif_dlist_entry) #define BPFIF_DLIST_WRITER_INSERT_HEAD(__bp, __d) \ PSLIST_WRITER_INSERT_HEAD(&(__bp)->bif_dlist_head, (__d), \ bd_bif_dlist_entry) #define BPFIF_DLIST_WRITER_REMOVE(__d) \ PSLIST_WRITER_REMOVE((__d), bd_bif_dlist_entry) #define BPFIF_DLIST_ENTRY_INIT(__d) \ PSLIST_ENTRY_INIT((__d), bd_bif_dlist_entry) #define BPFIF_DLIST_READER_EMPTY(__bp) \ (PSLIST_READER_FIRST(&(__bp)->bif_dlist_head, struct bpf_d, \ bd_bif_dlist_entry) == NULL) #define BPFIF_DLIST_WRITER_EMPTY(__bp) \ (PSLIST_WRITER_FIRST(&(__bp)->bif_dlist_head, struct bpf_d, \ bd_bif_dlist_entry) == NULL) #define BPFIF_DLIST_ENTRY_DESTROY(__d) \ PSLIST_ENTRY_DESTROY((__d), bd_bif_dlist_entry) a139 1 static void bpf_free_filter(struct bpf_filter *); a146 1 static int bpf_setf(struct bpf_d *, struct bpf_program *); d337 1 a341 1 a342 1 KASSERT(mutex_owned(d->bd_mtx)); d349 2 a350 1 BPFIF_DLIST_WRITER_INSERT_HEAD(bp, d); d361 1 a364 1 KASSERT(mutex_owned(d->bd_mtx)); a381 3 #ifndef NET_MPSAFE KERNEL_LOCK(1, NULL); #endif a382 3 #ifndef NET_MPSAFE KERNEL_UNLOCK_ONE(NULL); #endif a387 1 d389 8 a396 5 BPFIF_DLIST_WRITER_REMOVE(d); pserialize_perform(bpf_psz); if (BPFIF_DLIST_WRITER_EMPTY(bp)) { a400 1 } a408 2 bpf_psz = pserialize_create(); bpf_psref_class = psref_class_create("bpf", IPL_SOFTNET); d410 1 a410 2 PSLIST_INIT(&bpf_iflist); PSLIST_INIT(&bpf_dlist); d412 3 a414 1 bpf_gstats_percpu = percpu_alloc(sizeof(struct bpf_stat)); d445 1 a445 1 d = kmem_zalloc(sizeof(*d), KM_SLEEP); a459 6 d->bd_filter = NULL; BPF_DLIST_ENTRY_INIT(d); BPFIF_DLIST_ENTRY_INIT(d); d->bd_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); d->bd_buf_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); cv_init(&d->bd_cv, "bpf"); d462 1 a462 1 BPF_DLIST_WRITER_INSERT_HEAD(d); d477 1 d479 1 d484 1 d493 1 a493 1 mutex_enter(d->bd_mtx); d495 1 a495 1 callout_halt(&d->bd_callout, d->bd_mtx); d499 4 a502 3 mutex_exit(d->bd_mtx); BPF_DLIST_WRITER_REMOVE(d); a503 1 pserialize_perform(bpf_psz); d505 1 a506 4 BPFIF_DLIST_ENTRY_DESTROY(d); BPF_DLIST_ENTRY_DESTROY(d); fp->f_bpf = NULL; bpf_freed(d); d510 1 a510 5 mutex_obj_free(d->bd_mtx); mutex_obj_free(d->bd_buf_mtx); cv_destroy(&d->bd_cv); kmem_free(d, sizeof(*d)); d536 1 d546 2 a547 1 mutex_enter(d->bd_mtx); d549 1 a549 1 callout_halt(&d->bd_callout, d->bd_buf_mtx); a551 1 mutex_exit(d->bd_mtx); a556 1 mutex_enter(d->bd_buf_mtx); d560 3 a562 2 error = EWOULDBLOCK; goto out; d577 7 a583 6 error = cv_timedwait_sig(&d->bd_cv, d->bd_buf_mtx, d->bd_rtout); if (error == EINTR || error == ERESTART) goto out; d599 3 a601 2 error = 0; goto out; d607 1 a607 1 goto out; d612 1 a612 1 mutex_exit(d->bd_buf_mtx); d621 1 a621 1 mutex_enter(d->bd_buf_mtx); d625 3 a627 2 out: mutex_exit(d->bd_buf_mtx); d638 1 a638 5 mutex_enter(d->bd_buf_mtx); cv_broadcast(&d->bd_cv); mutex_exit(d->bd_buf_mtx); d658 1 d660 1 a660 1 mutex_enter(d->bd_mtx); d666 1 a666 1 mutex_exit(d->bd_mtx); a674 1 struct bpf_if *bp; d677 1 a677 1 int error; a678 2 struct psref psref; int bound; d682 5 a686 7 bound = curlwp_bind(); mutex_enter(d->bd_mtx); bp = d->bd_bif; if (bp == NULL) { mutex_exit(d->bd_mtx); error = ENXIO; goto out_bindx; a687 3 bpf_if_acquire(bp, &psref); mutex_exit(d->bd_mtx); d690 1 a690 5 ifp = bp->bif_ifp; if (if_is_deactivated(ifp)) { error = ENXIO; goto out; } d693 2 a694 2 error = 0; goto out; d697 1 a697 1 error = bpf_movein(uio, (int)bp->bif_dlt, ifp->if_mtu, &m, d699 4 a702 2 if (error) goto out; d705 1 d707 1 a707 2 error = EMSGSIZE; goto out; d723 1 d732 2 d737 1 a737 5 out: bpf_if_release(bp, &psref); out_bindx: curlwp_bindx(bound); return error; d742 1 a742 1 * receive and drop counts. a746 4 KASSERT(mutex_owned(d->bd_mtx)); mutex_enter(d->bd_buf_mtx); a756 1 mutex_exit(d->bd_buf_mtx); d785 1 a785 1 int error = 0; d790 1 d799 1 a799 1 mutex_enter(d->bd_mtx); d801 1 a801 1 callout_halt(&d->bd_callout, d->bd_mtx); d803 1 a803 1 mutex_exit(d->bd_mtx); d818 1 a818 1 mutex_enter(d->bd_buf_mtx); d822 1 a822 1 mutex_exit(d->bd_buf_mtx); d839 1 a839 7 /* * Forbid to change the buffer length if buffers are already * allocated. */ mutex_enter(d->bd_mtx); mutex_enter(d->bd_buf_mtx); if (d->bd_bif != NULL || d->bd_sbuf != NULL) a849 2 mutex_exit(d->bd_buf_mtx); mutex_exit(d->bd_mtx); d863 1 a863 1 mutex_enter(d->bd_mtx); d865 1 a865 1 mutex_exit(d->bd_mtx); a871 1 mutex_enter(d->bd_mtx); a872 1 mutex_exit(d->bd_mtx); d879 1 a880 3 #ifndef NET_MPSAFE KERNEL_LOCK(1, NULL); #endif a881 3 #ifndef NET_MPSAFE KERNEL_UNLOCK_ONE(NULL); #endif d885 1 a885 1 mutex_exit(d->bd_mtx); a891 1 mutex_enter(d->bd_mtx); a895 1 mutex_exit(d->bd_mtx); a901 1 mutex_enter(d->bd_mtx); a905 1 mutex_exit(d->bd_mtx); a912 1 mutex_enter(d->bd_mtx); a916 1 mutex_exit(d->bd_mtx); a926 1 mutex_enter(d->bd_mtx); a930 1 mutex_exit(d->bd_mtx); d1096 1 d1104 1 a1104 1 static int d1107 4 a1110 4 struct bpf_insn *fcode; bpfjit_func_t jcode; size_t flen, size = 0; struct bpf_filter *oldf, *newf; d1125 1 a1125 1 fcode = kmem_alloc(size, KM_SLEEP); d1128 1 a1128 1 kmem_free(fcode, size); d1138 5 a1142 12 newf = kmem_alloc(sizeof(*newf), KM_SLEEP); newf->bf_insn = fcode; newf->bf_size = size; newf->bf_jitcode = jcode; d->bd_jitcode = jcode; /* XXX just for kvm(3) users */ /* Need to hold bpf_mtx for pserialize_perform */ mutex_enter(&bpf_mtx); mutex_enter(d->bd_mtx); oldf = d->bd_filter; d->bd_filter = newf; membar_producer(); d1144 1 a1144 3 pserialize_perform(bpf_psz); mutex_exit(d->bd_mtx); mutex_exit(&bpf_mtx); d1146 6 a1151 2 if (oldf != NULL) bpf_free_filter(oldf); d1166 1 a1166 1 int unit_seen, i, error; d1195 1 a1195 1 BPF_IFLIST_WRITER_FOREACH(bp) { a1209 4 /* * bpf_allocbufs is called only here. bpf_mtx ensures that * no race condition happen on d->bd_sbuf. */ d1215 1 a1215 1 mutex_enter(d->bd_mtx); d1217 1 a1217 1 if (d->bd_bif) { a1221 2 BPFIF_DLIST_ENTRY_INIT(d); } d1226 1 a1226 1 mutex_exit(d->bd_mtx); d1248 1 a1248 1 mutex_enter(d->bd_mtx); d1256 1 a1256 1 mutex_exit(d->bd_mtx); d1272 1 d1278 1 a1278 1 mutex_enter(&bpf_mtx); a1285 1 mutex_enter(d->bd_mtx); a1298 1 mutex_exit(d->bd_mtx); d1301 2 a1302 1 mutex_exit(&bpf_mtx); d1310 1 d1312 2 a1313 1 mutex_enter(d->bd_buf_mtx); d1315 2 a1316 1 mutex_exit(d->bd_buf_mtx); d1325 1 a1325 1 mutex_enter(d->bd_buf_mtx); d1330 1 a1330 1 mutex_exit(d->bd_buf_mtx); d1342 3 a1345 1 mutex_enter(d->bd_buf_mtx); d1353 1 a1353 1 mutex_exit(d->bd_buf_mtx); d1359 1 d1361 2 a1362 1 mutex_exit(d->bd_buf_mtx); a1414 4 struct bpf_d *d; int s; KASSERT(!cpu_intr_p()); d1421 2 a1422 4 s = pserialize_read_enter(); BPFIF_DLIST_READER_FOREACH(d, bp) { u_int slen = 0; struct bpf_filter *filter; d1427 2 a1428 2 atomic_inc_ulong(&d->bd_rcount); BPF_STATINC(recv); d1430 4 a1433 9 filter = d->bd_filter; membar_datadep_consumer(); if (filter != NULL) { if (filter->bf_jitcode != NULL) slen = filter->bf_jitcode(NULL, &args); else slen = bpf_filter_ext(NULL, filter->bf_insn, &args); } a1441 1 /* Assume catchpacket doesn't sleep */ a1443 1 pserialize_read_exit(s); d1550 1 d1561 1 d1563 1 d1578 1 d1590 1 d1592 1 a1595 82 static struct mbuf * bpf_mbuf_enqueue(struct bpf_if *bp, struct mbuf *m) { struct mbuf *dup; dup = m_dup(m, 0, M_COPYALL, M_NOWAIT); if (dup == NULL) return NULL; if (bp->bif_mbuf_tail != NULL) { bp->bif_mbuf_tail->m_nextpkt = dup; } else { bp->bif_mbuf_head = dup; } bp->bif_mbuf_tail = dup; #ifdef BPF_MTAP_SOFTINT_DEBUG log(LOG_DEBUG, "%s: enqueued mbuf=%p to %s\n", __func__, dup, bp->bif_ifp->if_xname); #endif return dup; } static struct mbuf * bpf_mbuf_dequeue(struct bpf_if *bp) { struct mbuf *m; int s; /* XXX NOMPSAFE: assumed running on one CPU */ s = splnet(); m = bp->bif_mbuf_head; if (m != NULL) { bp->bif_mbuf_head = m->m_nextpkt; m->m_nextpkt = NULL; if (bp->bif_mbuf_head == NULL) bp->bif_mbuf_tail = NULL; #ifdef BPF_MTAP_SOFTINT_DEBUG log(LOG_DEBUG, "%s: dequeued mbuf=%p from %s\n", __func__, m, bp->bif_ifp->if_xname); #endif } splx(s); return m; } static void bpf_mtap_si(void *arg) { struct bpf_if *bp = arg; struct mbuf *m; while ((m = bpf_mbuf_dequeue(bp)) != NULL) { #ifdef BPF_MTAP_SOFTINT_DEBUG log(LOG_DEBUG, "%s: tapping mbuf=%p on %s\n", __func__, m, bp->bif_ifp->if_xname); #endif bpf_ops->bpf_mtap(bp, m); m_freem(m); } } static void _bpf_mtap_softint(struct ifnet *ifp, struct mbuf *m) { struct bpf_if *bp = ifp->if_bpf; struct mbuf *dup; KASSERT(cpu_intr_p()); /* To avoid extra invocations of the softint */ if (BPFIF_DLIST_READER_EMPTY(bp)) return; KASSERT(bp->bif_si != NULL); dup = bpf_mbuf_enqueue(bp, m); if (dup != NULL) softint_schedule(bp->bif_si); } d1631 2 a1632 2 atomic_inc_ulong(&d->bd_ccount); BPF_STATINC(capt); a1649 1 mutex_enter(d->bd_buf_mtx); a1665 1 mutex_exit(d->bd_buf_mtx); d1670 2 a1671 2 atomic_inc_ulong(&d->bd_dcount); BPF_STATINC(drop); a1717 1 mutex_exit(d->bd_buf_mtx); d1734 1 a1734 1 d->bd_fbuf = kmem_alloc(d->bd_bufsize, KM_NOSLEEP); d1737 1 a1737 1 d->bd_sbuf = kmem_alloc(d->bd_bufsize, KM_NOSLEEP); d1739 1 a1739 1 kmem_free(d->bd_fbuf, d->bd_bufsize); a1746 13 static void bpf_free_filter(struct bpf_filter *filter) { KASSERT(filter != NULL); KASSERT(filter->bf_insn != NULL); kmem_free(filter->bf_insn, filter->bf_size); if (filter->bf_jitcode != NULL) bpf_jit_freecode(filter->bf_jitcode); kmem_free(filter, sizeof(*filter)); } d1760 1 a1760 1 kmem_free(d->bd_sbuf, d->bd_bufsize); d1762 1 a1762 1 kmem_free(d->bd_hbuf, d->bd_bufsize); d1764 1 a1764 1 kmem_free(d->bd_fbuf, d->bd_bufsize); d1766 5 a1770 3 if (d->bd_filter != NULL) { bpf_free_filter(d->bd_filter); d->bd_filter = NULL; a1771 1 d->bd_jitcode = NULL; d1783 1 a1783 1 bp = kmem_alloc(sizeof(*bp), KM_NOSLEEP); d1788 1 a1791 4 bp->bif_si = NULL; BPF_IFLIST_ENTRY_INIT(bp); PSLIST_INIT(&bp->bif_dlist_head); psref_target_init(&bp->bif_psref, bpf_psref_class); d1793 2 a1794 1 BPF_IFLIST_WRITER_INSERT_HEAD(bp); a1804 23 static void _bpf_mtap_softint_init(struct ifnet *ifp) { struct bpf_if *bp; mutex_enter(&bpf_mtx); BPF_IFLIST_WRITER_FOREACH(bp) { if (bp->bif_ifp != ifp) continue; bp->bif_mbuf_head = NULL; bp->bif_mbuf_tail = NULL; bp->bif_si = softint_establish(SOFTINT_NET, bpf_mtap_si, bp); if (bp->bif_si == NULL) panic("%s: softint_establish() failed", __func__); break; } mutex_exit(&bpf_mtx); if (bp == NULL) panic("%s: no bpf_if found for %s", __func__, ifp->if_xname); } d1811 1 a1811 1 struct bpf_if *bp; d1817 1 a1817 3 again_d: BPF_DLIST_WRITER_FOREACH(d) { mutex_enter(d->bd_mtx); d1823 1 d1826 1 a1826 2 mutex_exit(d->bd_mtx); goto again_d; a1827 1 mutex_exit(d->bd_mtx); d1831 2 a1832 1 BPF_IFLIST_WRITER_FOREACH(bp) { d1834 2 a1835 18 BPF_IFLIST_WRITER_REMOVE(bp); pserialize_perform(bpf_psz); psref_target_destroy(&bp->bif_psref, bpf_psref_class); BPF_IFLIST_ENTRY_DESTROY(bp); if (bp->bif_si != NULL) { /* XXX NOMPSAFE: assumed running on one CPU */ s = splnet(); while (bp->bif_mbuf_head != NULL) { struct mbuf *m = bp->bif_mbuf_head; bp->bif_mbuf_head = m->m_nextpkt; m_freem(m); } splx(s); softint_disestablish(bp->bif_si); } kmem_free(bp, sizeof(*bp)); d1850 1 a1850 2 mutex_enter(&bpf_mtx); BPF_IFLIST_WRITER_FOREACH(bp) { a1859 1 mutex_exit(&bpf_mtx); a1870 3 int s, bound; KASSERT(mutex_owned(d->bd_mtx)); d1875 1 a1875 4 bound = curlwp_bind(); s = pserialize_read_enter(); BPF_IFLIST_READER_FOREACH(bp) { d1879 1 a1879 4 struct psref psref; if (n >= bfl->bfl_len) { pserialize_read_exit(s); a1880 5 } bpf_if_acquire(bp, &psref); pserialize_read_exit(s); a1882 3 s = pserialize_read_enter(); bpf_if_release(bp, &psref); a1885 3 pserialize_read_exit(s); curlwp_bindx(bound); d1896 1 a1896 1 int error, opromisc; a1900 1 KASSERT(mutex_owned(d->bd_mtx)); d1905 1 a1905 1 BPF_IFLIST_WRITER_FOREACH(bp) { d1911 1 a1913 1 BPFIF_DLIST_ENTRY_INIT(d); a1916 3 #ifndef NET_MPSAFE KERNEL_LOCK(1, NULL); #endif a1917 3 #ifndef NET_MPSAFE KERNEL_UNLOCK_ONE(NULL); #endif d1924 1 d2013 1 a2013 1 BPF_DLIST_WRITER_FOREACH(dp) { a2026 1 mutex_enter(dp->bd_mtx); a2032 1 mutex_exit(dp->bd_mtx); a2050 32 static void bpf_stats(void *p, void *arg, struct cpu_info *ci __unused) { struct bpf_stat *const stats = p; struct bpf_stat *sum = arg; sum->bs_recv += stats->bs_recv; sum->bs_drop += stats->bs_drop; sum->bs_capt += stats->bs_capt; } static int bpf_sysctl_gstats_handler(SYSCTLFN_ARGS) { struct sysctlnode node; int error; struct bpf_stat sum; memset(&sum, 0, sizeof(sum)); node = *rnode; percpu_foreach(bpf_gstats_percpu, bpf_stats, &sum); node.sysctl_data = ∑ node.sysctl_size = sizeof(sum); error = sysctl_lookup(SYSCTLFN_CALL(&node)); if (error != 0 || newp == NULL) return error; return 0; } d2083 1 a2083 1 bpf_sysctl_gstats_handler, 0, NULL, 0, a2105 3 .bpf_mtap_softint = _bpf_mtap_softint, .bpf_mtap_softint_init = _bpf_mtap_softint_init, @ 1.202 log @Now that we're only calling devsw_attach() in the modular driver, it is not ok for the driver/module to already exist. So don't ignore EEXIST. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.201 2016/07/17 01:16:30 pgoyette Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.201 2016/07/17 01:16:30 pgoyette Exp $"); a61 1 #include d404 2 a405 2 static int doinit(void) d416 1 a416 1 return 0; d420 2 a421 1 * bpfilterattach() is called at boot time. a426 1 static ONCE_DECL(control); a427 1 RUN_ONCE(&control, doinit); a2117 1 d2120 1 a2120 1 bpfilterattach(0); a2124 1 #endif d2127 1 @ 1.201 log @Don't initialize variables that no longer exist in built-in module. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.200 2016/07/17 01:03:46 pgoyette Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.200 2016/07/17 01:03:46 pgoyette Exp $"); d2118 1 a2118 1 int error; a2127 2 if (error == EEXIST) error = 0; /* maybe built-in ... improve eventually */ @ 1.200 log @Don't try to call devsw_attach() for built-in driver code. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.199 2016/06/20 06:46:37 knakahara Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.199 2016/06/20 06:46:37 knakahara Exp $"); a2119 1 bmajor = cmajor = NODEVMAJOR; d2125 1 @ 1.199 log @apply if_output_lock() to L3 callers which call ifp->if_output() of L2(or L3 tunneling). @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.198 2016/06/10 13:31:44 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.198 2016/06/10 13:31:44 ozaki-r Exp $"); d2115 1 d2117 1 d2125 1 d2130 1 @ 1.199.2.1 log @Adapt some modular drivers to the localcount(9) world. We're still not actually using the localcount stuff, but we need to differentiate between built-in vs loaded drivers and allocate a "struct localcount" only for loaded drivers. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.199 2016/06/20 06:46:37 knakahara Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.199 2016/06/20 06:46:37 knakahara Exp $"); a63 1 #include a182 4 #ifdef _MODULE struct localcount bpf_localcount; #endif a194 3 #ifdef _MODULE .d_localcount = &bpf_localcount, #endif a2114 1 #ifdef _MODULE d2116 3 a2118 2 #endif int error = 0; a2122 2 #ifdef _MODULE bmajor = cmajor = NODEVMAJOR; d2125 2 a2126 1 #endif @ 1.199.2.2 log @Instead of repeatedly typing the conditional initialization of the .d_localcount members in the various {b,c}devsw, define an initializer macro and use it. This also removes the need for defining new symbols for each 'struct localcount'. As suggested by riastradh@@ @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.199.2.1 2016/07/17 05:05:10 pgoyette Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.199.2.1 2016/07/17 05:05:10 pgoyette Exp $"); d62 1 d184 4 a188 1 LOCALCOUNT_INITIALIZER d200 3 d413 2 a414 2 static void bpf_init(void) d425 1 a425 1 return; d429 1 a429 2 * bpfilterattach() is called at boot time. We don't need to do anything * here, since any initialization will happen as part of module init code. d435 1 d437 1 d2130 1 a2130 1 bpf_init(); d2135 1 a2137 1 #endif @ 1.199.2.3 log @Rename LOCALCOUNT_INITIALIZER to DEVSW_MODULE_INIT. This better describes what we're doing, and why. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.199.2.2 2016/07/19 06:27:00 pgoyette Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.199.2.2 2016/07/19 06:27:00 pgoyette Exp $"); d184 1 a184 1 DEVSW_MODULE_INIT @ 1.199.2.4 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.216 2017/02/20 03:08:38 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.216 2017/02/20 03:08:38 ozaki-r Exp $"); a47 1 #include "opt_net_mpsafe.h" d63 1 a63 1 #include a76 4 #include #include #include #include d109 3 a111 3 static int bpf_bufsize = BPF_BUFSIZE; static int bpf_maxbufsize = BPF_DFLTBUFSIZE; /* XXX set dynamically, see above */ static bool bpf_jit = false; d121 1 a121 1 static struct percpu *bpf_gstats_percpu; /* struct bpf_stat */ a122 28 #define BPF_STATINC(id) \ { \ struct bpf_stat *__stats = \ percpu_getref(bpf_gstats_percpu); \ __stats->bs_##id++; \ percpu_putref(bpf_gstats_percpu); \ } /* * Locking notes: * - bpf_mtx (adaptive mutex) protects: * - Gobal lists: bpf_iflist and bpf_dlist * - struct bpf_if * - bpf_close * - bpf_psz (pserialize) * - struct bpf_d has two mutexes: * - bd_buf_mtx (spin mutex) protects the buffers that can be accessed * on packet tapping * - bd_mtx (adaptive mutex) protects member variables other than the buffers * - Locking order: bpf_mtx => bpf_d#bd_mtx => bpf_d#bd_buf_mtx * - struct bpf_d obtained via fp->f_bpf in bpf_read and bpf_write is * never freed because struct bpf_d is only freed in bpf_close and * bpf_close never be called while executing bpf_read and bpf_write * - A filter that is assigned to bpf_d can be replaced with another filter * while tapping packets, so it needs to be done atomically * - struct bpf_d is iterated on bpf_dlist with psz * - struct bpf_if is iterated on bpf_iflist with psz or psref */ a128 17 static struct psref_class *bpf_psref_class __read_mostly; static pserialize_t bpf_psz; static inline void bpf_if_acquire(struct bpf_if *bp, struct psref *psref) { psref_acquire(psref, &bp->bif_psref, bpf_psref_class); } static inline void bpf_if_release(struct bpf_if *bp, struct psref *psref) { psref_release(psref, &bp->bif_psref, bpf_psref_class); } d133 2 a134 54 static struct pslist_head bpf_iflist; static struct pslist_head bpf_dlist; /* Macros for bpf_d on bpf_dlist */ #define BPF_DLIST_WRITER_INSERT_HEAD(__d) \ PSLIST_WRITER_INSERT_HEAD(&bpf_dlist, (__d), bd_bpf_dlist_entry) #define BPF_DLIST_READER_FOREACH(__d) \ PSLIST_READER_FOREACH((__d), &bpf_dlist, struct bpf_d, \ bd_bpf_dlist_entry) #define BPF_DLIST_WRITER_FOREACH(__d) \ PSLIST_WRITER_FOREACH((__d), &bpf_dlist, struct bpf_d, \ bd_bpf_dlist_entry) #define BPF_DLIST_ENTRY_INIT(__d) \ PSLIST_ENTRY_INIT((__d), bd_bpf_dlist_entry) #define BPF_DLIST_WRITER_REMOVE(__d) \ PSLIST_WRITER_REMOVE((__d), bd_bpf_dlist_entry) #define BPF_DLIST_ENTRY_DESTROY(__d) \ PSLIST_ENTRY_DESTROY((__d), bd_bpf_dlist_entry) /* Macros for bpf_if on bpf_iflist */ #define BPF_IFLIST_WRITER_INSERT_HEAD(__bp) \ PSLIST_WRITER_INSERT_HEAD(&bpf_iflist, (__bp), bif_iflist_entry) #define BPF_IFLIST_READER_FOREACH(__bp) \ PSLIST_READER_FOREACH((__bp), &bpf_iflist, struct bpf_if, \ bif_iflist_entry) #define BPF_IFLIST_WRITER_FOREACH(__bp) \ PSLIST_WRITER_FOREACH((__bp), &bpf_iflist, struct bpf_if, \ bif_iflist_entry) #define BPF_IFLIST_WRITER_REMOVE(__bp) \ PSLIST_WRITER_REMOVE((__bp), bif_iflist_entry) #define BPF_IFLIST_ENTRY_INIT(__bp) \ PSLIST_ENTRY_INIT((__bp), bif_iflist_entry) #define BPF_IFLIST_ENTRY_DESTROY(__bp) \ PSLIST_ENTRY_DESTROY((__bp), bif_iflist_entry) /* Macros for bpf_d on bpf_if#bif_dlist_pslist */ #define BPFIF_DLIST_READER_FOREACH(__d, __bp) \ PSLIST_READER_FOREACH((__d), &(__bp)->bif_dlist_head, struct bpf_d, \ bd_bif_dlist_entry) #define BPFIF_DLIST_WRITER_INSERT_HEAD(__bp, __d) \ PSLIST_WRITER_INSERT_HEAD(&(__bp)->bif_dlist_head, (__d), \ bd_bif_dlist_entry) #define BPFIF_DLIST_WRITER_REMOVE(__d) \ PSLIST_WRITER_REMOVE((__d), bd_bif_dlist_entry) #define BPFIF_DLIST_ENTRY_INIT(__d) \ PSLIST_ENTRY_INIT((__d), bd_bif_dlist_entry) #define BPFIF_DLIST_READER_EMPTY(__bp) \ (PSLIST_READER_FIRST(&(__bp)->bif_dlist_head, struct bpf_d, \ bd_bif_dlist_entry) == NULL) #define BPFIF_DLIST_WRITER_EMPTY(__bp) \ (PSLIST_WRITER_FIRST(&(__bp)->bif_dlist_head, struct bpf_d, \ bd_bif_dlist_entry) == NULL) #define BPFIF_DLIST_ENTRY_DESTROY(__d) \ PSLIST_ENTRY_DESTROY((__d), bd_bif_dlist_entry) a140 1 static void bpf_free_filter(struct bpf_filter *); a147 1 static int bpf_setf(struct bpf_d *, struct bpf_program *); d339 1 a343 1 a344 1 KASSERT(mutex_owned(d->bd_mtx)); d351 2 a352 1 BPFIF_DLIST_WRITER_INSERT_HEAD(bp, d); d363 1 a366 1 KASSERT(mutex_owned(d->bd_mtx)); a383 3 #ifndef NET_MPSAFE KERNEL_LOCK(1, NULL); #endif a384 3 #ifndef NET_MPSAFE KERNEL_UNLOCK_ONE(NULL); #endif a389 1 d391 8 a398 5 BPFIF_DLIST_WRITER_REMOVE(d); pserialize_perform(bpf_psz); if (BPFIF_DLIST_WRITER_EMPTY(bp)) { a402 1 } a410 2 bpf_psz = pserialize_create(); bpf_psref_class = psref_class_create("bpf", IPL_SOFTNET); d412 1 a412 2 PSLIST_INIT(&bpf_iflist); PSLIST_INIT(&bpf_dlist); d414 3 a416 1 bpf_gstats_percpu = percpu_alloc(sizeof(struct bpf_stat)); d447 1 a447 1 d = kmem_zalloc(sizeof(*d), KM_SLEEP); a461 6 d->bd_filter = NULL; BPF_DLIST_ENTRY_INIT(d); BPFIF_DLIST_ENTRY_INIT(d); d->bd_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); d->bd_buf_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); cv_init(&d->bd_cv, "bpf"); d464 1 a464 1 BPF_DLIST_WRITER_INSERT_HEAD(d); d479 1 d481 1 d486 1 d495 1 a495 1 mutex_enter(d->bd_mtx); d497 1 a497 1 callout_halt(&d->bd_callout, d->bd_mtx); d501 4 a504 3 mutex_exit(d->bd_mtx); BPF_DLIST_WRITER_REMOVE(d); a505 1 pserialize_perform(bpf_psz); d507 1 a508 4 BPFIF_DLIST_ENTRY_DESTROY(d); BPF_DLIST_ENTRY_DESTROY(d); fp->f_bpf = NULL; bpf_freed(d); d512 1 a512 5 mutex_obj_free(d->bd_mtx); mutex_obj_free(d->bd_buf_mtx); cv_destroy(&d->bd_cv); kmem_free(d, sizeof(*d)); d538 1 d548 2 a549 1 mutex_enter(d->bd_mtx); d551 1 a551 1 callout_halt(&d->bd_callout, d->bd_buf_mtx); a553 1 mutex_exit(d->bd_mtx); a558 1 mutex_enter(d->bd_buf_mtx); d562 3 a564 2 error = EWOULDBLOCK; goto out; d579 7 a585 6 error = cv_timedwait_sig(&d->bd_cv, d->bd_buf_mtx, d->bd_rtout); if (error == EINTR || error == ERESTART) goto out; d601 3 a603 2 error = 0; goto out; d609 1 a609 1 goto out; d614 1 a614 1 mutex_exit(d->bd_buf_mtx); d623 1 a623 1 mutex_enter(d->bd_buf_mtx); d627 3 a629 2 out: mutex_exit(d->bd_buf_mtx); d640 1 a640 5 mutex_enter(d->bd_buf_mtx); cv_broadcast(&d->bd_cv); mutex_exit(d->bd_buf_mtx); d660 1 d662 1 a662 1 mutex_enter(d->bd_mtx); d668 1 a668 1 mutex_exit(d->bd_mtx); a676 1 struct bpf_if *bp; d679 1 a679 1 int error; a680 2 struct psref psref; int bound; d684 5 a688 7 bound = curlwp_bind(); mutex_enter(d->bd_mtx); bp = d->bd_bif; if (bp == NULL) { mutex_exit(d->bd_mtx); error = ENXIO; goto out_bindx; a689 3 bpf_if_acquire(bp, &psref); mutex_exit(d->bd_mtx); d692 1 a692 5 ifp = bp->bif_ifp; if (if_is_deactivated(ifp)) { error = ENXIO; goto out; } d695 2 a696 2 error = 0; goto out; d699 1 a699 1 error = bpf_movein(uio, (int)bp->bif_dlt, ifp->if_mtu, &m, d701 4 a704 2 if (error) goto out; d707 1 d709 1 a709 2 error = EMSGSIZE; goto out; d725 1 d734 2 d739 1 a739 5 out: bpf_if_release(bp, &psref); out_bindx: curlwp_bindx(bound); return error; d744 1 a744 1 * receive and drop counts. a748 4 KASSERT(mutex_owned(d->bd_mtx)); mutex_enter(d->bd_buf_mtx); a758 1 mutex_exit(d->bd_buf_mtx); d787 1 a787 1 int error = 0; d792 1 d801 1 a801 1 mutex_enter(d->bd_mtx); d803 1 a803 1 callout_halt(&d->bd_callout, d->bd_mtx); d805 1 a805 1 mutex_exit(d->bd_mtx); d820 1 a820 1 mutex_enter(d->bd_buf_mtx); d824 1 a824 1 mutex_exit(d->bd_buf_mtx); d841 1 a841 7 /* * Forbid to change the buffer length if buffers are already * allocated. */ mutex_enter(d->bd_mtx); mutex_enter(d->bd_buf_mtx); if (d->bd_bif != NULL || d->bd_sbuf != NULL) a851 2 mutex_exit(d->bd_buf_mtx); mutex_exit(d->bd_mtx); d865 1 a865 1 mutex_enter(d->bd_mtx); d867 1 a867 1 mutex_exit(d->bd_mtx); a873 1 mutex_enter(d->bd_mtx); a874 1 mutex_exit(d->bd_mtx); d881 1 a882 3 #ifndef NET_MPSAFE KERNEL_LOCK(1, NULL); #endif a883 3 #ifndef NET_MPSAFE KERNEL_UNLOCK_ONE(NULL); #endif d887 1 a887 1 mutex_exit(d->bd_mtx); a893 1 mutex_enter(d->bd_mtx); a897 1 mutex_exit(d->bd_mtx); a903 1 mutex_enter(d->bd_mtx); a907 1 mutex_exit(d->bd_mtx); a914 1 mutex_enter(d->bd_mtx); a918 1 mutex_exit(d->bd_mtx); a928 1 mutex_enter(d->bd_mtx); a932 1 mutex_exit(d->bd_mtx); d1098 1 d1106 1 a1106 1 static int d1109 4 a1112 4 struct bpf_insn *fcode; bpfjit_func_t jcode; size_t flen, size = 0; struct bpf_filter *oldf, *newf; d1127 1 a1127 1 fcode = kmem_alloc(size, KM_SLEEP); d1130 1 a1130 1 kmem_free(fcode, size); d1140 5 a1144 12 newf = kmem_alloc(sizeof(*newf), KM_SLEEP); newf->bf_insn = fcode; newf->bf_size = size; newf->bf_jitcode = jcode; d->bd_jitcode = jcode; /* XXX just for kvm(3) users */ /* Need to hold bpf_mtx for pserialize_perform */ mutex_enter(&bpf_mtx); mutex_enter(d->bd_mtx); oldf = d->bd_filter; d->bd_filter = newf; membar_producer(); d1146 1 a1146 3 pserialize_perform(bpf_psz); mutex_exit(d->bd_mtx); mutex_exit(&bpf_mtx); d1148 6 a1153 2 if (oldf != NULL) bpf_free_filter(oldf); d1168 1 a1168 1 int unit_seen, i, error; d1197 1 a1197 1 BPF_IFLIST_WRITER_FOREACH(bp) { a1211 4 /* * bpf_allocbufs is called only here. bpf_mtx ensures that * no race condition happen on d->bd_sbuf. */ d1217 1 a1217 1 mutex_enter(d->bd_mtx); d1219 1 a1219 1 if (d->bd_bif) { a1223 2 BPFIF_DLIST_ENTRY_INIT(d); } d1228 1 a1228 1 mutex_exit(d->bd_mtx); d1250 1 a1250 1 mutex_enter(d->bd_mtx); d1258 1 a1258 1 mutex_exit(d->bd_mtx); d1274 1 d1280 1 a1280 1 mutex_enter(&bpf_mtx); a1287 1 mutex_enter(d->bd_mtx); a1300 1 mutex_exit(d->bd_mtx); d1303 2 a1304 1 mutex_exit(&bpf_mtx); d1312 1 d1314 2 a1315 1 mutex_enter(d->bd_buf_mtx); d1317 2 a1318 1 mutex_exit(d->bd_buf_mtx); d1327 1 a1327 1 mutex_enter(d->bd_buf_mtx); d1332 1 a1332 1 mutex_exit(d->bd_buf_mtx); d1344 3 a1347 1 mutex_enter(d->bd_buf_mtx); d1355 1 a1355 1 mutex_exit(d->bd_buf_mtx); d1361 1 d1363 2 a1364 1 mutex_exit(d->bd_buf_mtx); a1416 4 struct bpf_d *d; int s; KASSERT(!cpu_intr_p()); d1423 2 a1424 4 s = pserialize_read_enter(); BPFIF_DLIST_READER_FOREACH(d, bp) { u_int slen = 0; struct bpf_filter *filter; d1429 2 a1430 2 atomic_inc_ulong(&d->bd_rcount); BPF_STATINC(recv); d1432 4 a1435 9 filter = d->bd_filter; membar_datadep_consumer(); if (filter != NULL) { if (filter->bf_jitcode != NULL) slen = filter->bf_jitcode(NULL, &args); else slen = bpf_filter_ext(NULL, filter->bf_insn, &args); } a1443 1 /* Assume catchpacket doesn't sleep */ a1445 1 pserialize_read_exit(s); d1552 1 d1563 1 d1565 1 d1580 1 d1592 1 d1594 1 a1597 82 static struct mbuf * bpf_mbuf_enqueue(struct bpf_if *bp, struct mbuf *m) { struct mbuf *dup; dup = m_dup(m, 0, M_COPYALL, M_NOWAIT); if (dup == NULL) return NULL; if (bp->bif_mbuf_tail != NULL) { bp->bif_mbuf_tail->m_nextpkt = dup; } else { bp->bif_mbuf_head = dup; } bp->bif_mbuf_tail = dup; #ifdef BPF_MTAP_SOFTINT_DEBUG log(LOG_DEBUG, "%s: enqueued mbuf=%p to %s\n", __func__, dup, bp->bif_ifp->if_xname); #endif return dup; } static struct mbuf * bpf_mbuf_dequeue(struct bpf_if *bp) { struct mbuf *m; int s; /* XXX NOMPSAFE: assumed running on one CPU */ s = splnet(); m = bp->bif_mbuf_head; if (m != NULL) { bp->bif_mbuf_head = m->m_nextpkt; m->m_nextpkt = NULL; if (bp->bif_mbuf_head == NULL) bp->bif_mbuf_tail = NULL; #ifdef BPF_MTAP_SOFTINT_DEBUG log(LOG_DEBUG, "%s: dequeued mbuf=%p from %s\n", __func__, m, bp->bif_ifp->if_xname); #endif } splx(s); return m; } static void bpf_mtap_si(void *arg) { struct bpf_if *bp = arg; struct mbuf *m; while ((m = bpf_mbuf_dequeue(bp)) != NULL) { #ifdef BPF_MTAP_SOFTINT_DEBUG log(LOG_DEBUG, "%s: tapping mbuf=%p on %s\n", __func__, m, bp->bif_ifp->if_xname); #endif bpf_ops->bpf_mtap(bp, m); m_freem(m); } } static void _bpf_mtap_softint(struct ifnet *ifp, struct mbuf *m) { struct bpf_if *bp = ifp->if_bpf; struct mbuf *dup; KASSERT(cpu_intr_p()); /* To avoid extra invocations of the softint */ if (BPFIF_DLIST_READER_EMPTY(bp)) return; KASSERT(bp->bif_si != NULL); dup = bpf_mbuf_enqueue(bp, m); if (dup != NULL) softint_schedule(bp->bif_si); } d1633 2 a1634 2 atomic_inc_ulong(&d->bd_ccount); BPF_STATINC(capt); a1651 1 mutex_enter(d->bd_buf_mtx); a1667 1 mutex_exit(d->bd_buf_mtx); d1672 2 a1673 2 atomic_inc_ulong(&d->bd_dcount); BPF_STATINC(drop); a1719 1 mutex_exit(d->bd_buf_mtx); d1736 1 a1736 1 d->bd_fbuf = kmem_alloc(d->bd_bufsize, KM_NOSLEEP); d1739 1 a1739 1 d->bd_sbuf = kmem_alloc(d->bd_bufsize, KM_NOSLEEP); d1741 1 a1741 1 kmem_free(d->bd_fbuf, d->bd_bufsize); a1748 13 static void bpf_free_filter(struct bpf_filter *filter) { KASSERT(filter != NULL); KASSERT(filter->bf_insn != NULL); kmem_free(filter->bf_insn, filter->bf_size); if (filter->bf_jitcode != NULL) bpf_jit_freecode(filter->bf_jitcode); kmem_free(filter, sizeof(*filter)); } d1762 1 a1762 1 kmem_free(d->bd_sbuf, d->bd_bufsize); d1764 1 a1764 1 kmem_free(d->bd_hbuf, d->bd_bufsize); d1766 1 a1766 1 kmem_free(d->bd_fbuf, d->bd_bufsize); d1768 5 a1772 3 if (d->bd_filter != NULL) { bpf_free_filter(d->bd_filter); d->bd_filter = NULL; a1773 1 d->bd_jitcode = NULL; d1785 1 a1785 1 bp = kmem_alloc(sizeof(*bp), KM_NOSLEEP); d1790 1 a1793 4 bp->bif_si = NULL; BPF_IFLIST_ENTRY_INIT(bp); PSLIST_INIT(&bp->bif_dlist_head); psref_target_init(&bp->bif_psref, bpf_psref_class); d1795 2 a1796 1 BPF_IFLIST_WRITER_INSERT_HEAD(bp); a1806 23 static void _bpf_mtap_softint_init(struct ifnet *ifp) { struct bpf_if *bp; mutex_enter(&bpf_mtx); BPF_IFLIST_WRITER_FOREACH(bp) { if (bp->bif_ifp != ifp) continue; bp->bif_mbuf_head = NULL; bp->bif_mbuf_tail = NULL; bp->bif_si = softint_establish(SOFTINT_NET, bpf_mtap_si, bp); if (bp->bif_si == NULL) panic("%s: softint_establish() failed", __func__); break; } mutex_exit(&bpf_mtx); if (bp == NULL) panic("%s: no bpf_if found for %s", __func__, ifp->if_xname); } d1813 1 a1813 1 struct bpf_if *bp; d1819 1 a1819 3 again_d: BPF_DLIST_WRITER_FOREACH(d) { mutex_enter(d->bd_mtx); d1825 1 d1828 1 a1828 2 mutex_exit(d->bd_mtx); goto again_d; a1829 1 mutex_exit(d->bd_mtx); d1833 2 a1834 1 BPF_IFLIST_WRITER_FOREACH(bp) { d1836 2 a1837 18 BPF_IFLIST_WRITER_REMOVE(bp); pserialize_perform(bpf_psz); psref_target_destroy(&bp->bif_psref, bpf_psref_class); BPF_IFLIST_ENTRY_DESTROY(bp); if (bp->bif_si != NULL) { /* XXX NOMPSAFE: assumed running on one CPU */ s = splnet(); while (bp->bif_mbuf_head != NULL) { struct mbuf *m = bp->bif_mbuf_head; bp->bif_mbuf_head = m->m_nextpkt; m_freem(m); } splx(s); softint_disestablish(bp->bif_si); } kmem_free(bp, sizeof(*bp)); d1852 1 a1852 2 mutex_enter(&bpf_mtx); BPF_IFLIST_WRITER_FOREACH(bp) { a1861 1 mutex_exit(&bpf_mtx); a1872 3 int s, bound; KASSERT(mutex_owned(d->bd_mtx)); d1877 1 a1877 4 bound = curlwp_bind(); s = pserialize_read_enter(); BPF_IFLIST_READER_FOREACH(bp) { d1881 1 a1881 4 struct psref psref; if (n >= bfl->bfl_len) { pserialize_read_exit(s); a1882 5 } bpf_if_acquire(bp, &psref); pserialize_read_exit(s); a1884 3 s = pserialize_read_enter(); bpf_if_release(bp, &psref); a1887 3 pserialize_read_exit(s); curlwp_bindx(bound); d1898 1 a1898 1 int error, opromisc; a1902 1 KASSERT(mutex_owned(d->bd_mtx)); d1907 1 a1907 1 BPF_IFLIST_WRITER_FOREACH(bp) { d1913 1 a1915 1 BPFIF_DLIST_ENTRY_INIT(d); a1918 3 #ifndef NET_MPSAFE KERNEL_LOCK(1, NULL); #endif a1919 3 #ifndef NET_MPSAFE KERNEL_UNLOCK_ONE(NULL); #endif d1926 1 d2015 1 a2015 1 BPF_DLIST_WRITER_FOREACH(dp) { a2028 1 mutex_enter(dp->bd_mtx); a2034 1 mutex_exit(dp->bd_mtx); a2052 32 static void bpf_stats(void *p, void *arg, struct cpu_info *ci __unused) { struct bpf_stat *const stats = p; struct bpf_stat *sum = arg; sum->bs_recv += stats->bs_recv; sum->bs_drop += stats->bs_drop; sum->bs_capt += stats->bs_capt; } static int bpf_sysctl_gstats_handler(SYSCTLFN_ARGS) { struct sysctlnode node; int error; struct bpf_stat sum; memset(&sum, 0, sizeof(sum)); node = *rnode; percpu_foreach(bpf_gstats_percpu, bpf_stats, &sum); node.sysctl_data = ∑ node.sysctl_size = sizeof(sum); error = sysctl_lookup(SYSCTLFN_CALL(&node)); if (error != 0 || newp == NULL) return error; return 0; } d2085 1 a2085 1 bpf_sysctl_gstats_handler, 0, NULL, 0, a2107 3 .bpf_mtap_softint = _bpf_mtap_softint, .bpf_mtap_softint_init = _bpf_mtap_softint_init, @ 1.198 log @Avoid storing a pointer of an interface in a mbuf Having a pointer of an interface in a mbuf isn't safe if we remove big kernel locks; an interface object (ifnet) can be destroyed anytime in any packet processing and accessing such object via a pointer is racy. Instead we have to get an object from the interface collection (ifindex2ifnet) via an interface index (if_index) that is stored to a mbuf instead of an pointer. The change provides two APIs: m_{get,put}_rcvif_psref that use psref(9) for sleep-able critical sections and m_{get,put}_rcvif that use pserialize(9) for other critical sections. The change also adds another API called m_get_rcvif_NOMPSAFE, that is NOT MP-safe and for transition moratorium, i.e., it is intended to be used for places where are not planned to be MP-ified soon. The change adds some overhead due to psref to performance sensitive paths, however the overhead is not serious, 2% down at worst. Proposed on tech-kern and tech-net. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.197 2016/06/10 13:27:15 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.197 2016/06/10 13:27:15 ozaki-r Exp $"); d726 1 a726 1 error = (*ifp->if_output)(ifp, m, (struct sockaddr *) &dst, NULL); @ 1.197 log @Introduce m_set_rcvif and m_reset_rcvif The API is used to set (or reset) a received interface of a mbuf. They are counterpart of m_get_rcvif, which will come in another commit, hide internal of rcvif operation, and reduce the diff of the upcoming change. No functional change. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.196 2016/06/07 01:06:28 pgoyette Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.196 2016/06/07 01:06:28 pgoyette Exp $"); d1472 1 a1472 1 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) { d1489 1 a1489 1 bpf_deliver(bp, bpf_mcpy, &mb, pktlen, 0, m->m_pkthdr.rcvif != NULL); d1503 1 a1503 1 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) { d1520 1 a1520 1 bpf_deliver(bp, cpfn, marg, pktlen, buflen, m->m_pkthdr.rcvif != NULL); @ 1.196 log @Create separate modules for i2c_bitbang and bpf_filter so these files can be included in kernels which need them without also duplicating them in other modules. Removes the duplicate symbols I found which prevented loading i2c and bpf modules after having fixed PR 45125. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.195 2016/02/09 08:32:12 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.195 2016/02/09 08:32:12 ozaki-r Exp $"); d303 1 a303 1 m->m_pkthdr.rcvif = NULL; d718 1 a718 1 mc->m_pkthdr.rcvif = ifp; @ 1.195 log @Introduce softint-based if_input This change intends to run the whole network stack in softint context (or normal LWP), not hardware interrupt context. Note that the work is still incomplete by this change; to that end, we also have to softint-ify if_link_state_change (and bpf) which can still run in hardware interrupt. This change softint-ifies at ifp->if_input that is called from each device driver (and ieee80211_input) to ensure Layer 2 runs in softint (e.g., ether_input and bridge_input). To this end, we provide a framework (called percpuq) that utlizes softint(9) and percpu ifqueues. With this patch, rxintr of most drivers just queues received packets and schedules a softint, and the softint dequeues packets and does rest packet processing. To minimize changes to each driver, percpuq is allocated in struct ifnet for now and that is initialized by default (in if_attach). We probably have to move percpuq to softc of each driver, but it's future work. At this point, only wm(4) has percpuq in its softc as a reference implementation. Additional information including performance numbers can be found in the thread at tech-kern@@ and tech-net@@: http://mail-index.netbsd.org/tech-kern/2016/01/14/msg019997.html Acknowledgment: riastradh@@ greatly helped this work. Thank you very much! @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.194 2016/02/01 16:32:28 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.194 2016/02/01 16:32:28 christos Exp $"); d2110 1 a2110 1 MODULE(MODULE_CLASS_DRIVER, bpf, NULL); @ 1.194 log @Do less work under the kernel lock, otherwise dhcpcd aborting causes us to deadlock. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.193 2015/12/16 23:14:42 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.193 2015/12/16 23:14:42 christos Exp $"); d730 1 a730 1 (*ifp->if_input)(ifp, mc); @ 1.193 log @don't free mbuf twice. XXX: pullup 7. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.192 2015/10/14 19:40:09 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.192 2015/10/14 19:40:09 christos Exp $"); d478 1 a478 1 struct bpf_d *d = fp->f_bpf; d484 6 d504 5 a512 4 fp->f_bpf = NULL; mutex_exit(&bpf_mtx); KERNEL_UNLOCK_ONE(NULL); @ 1.192 log @PR/49386: Ryota Ozaki: Add a mutex for bpf creation/removal to avoid races. Add M_CANFAIL to malloc. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.191 2015/05/30 19:14:46 joerg Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.191 2015/05/30 19:14:46 joerg Exp $"); d724 2 a725 1 m_freem(mc); @ 1.191 log @Improve wording. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.190 2014/12/29 13:38:13 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.190 2014/12/29 13:38:13 ozaki-r Exp $"); d343 1 d365 2 d482 1 a496 1 mutex_enter(&bpf_mtx); a497 1 mutex_exit(&bpf_mtx); d504 1 d906 1 d911 1 d934 1 d936 1 d1162 1 d1728 1 a1728 1 d->bd_fbuf = malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK | M_CANFAIL); d1731 1 a1731 1 d->bd_sbuf = malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK | M_CANFAIL); d1781 1 d1793 1 d1809 1 d1833 1 d1894 2 @ 1.190 log @Remove unnecessary variable bc @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.189 2014/09/13 17:18:45 rmind Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.189 2014/09/13 17:18:45 rmind Exp $"); d440 1 a440 1 /* falloc() will use the descriptor for us. */ @ 1.189 log @PR/49190: bpf_deliver: set scratch memory store in bpf_args_t. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.188 2014/09/05 09:22:22 matt Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.188 2014/09/05 09:22:22 matt Exp $"); a1390 1 const bpf_ctx_t *bc = NULL; d1417 1 a1417 1 slen = d->bd_jitcode(bc, &args); d1419 1 a1419 1 slen = bpf_filter_ext(bc, d->bd_filter, &args); @ 1.189.2.1 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.190 2014/12/29 13:38:13 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.190 2014/12/29 13:38:13 ozaki-r Exp $"); d1391 1 d1418 1 a1418 1 slen = d->bd_jitcode(NULL, &args); d1420 1 a1420 1 slen = bpf_filter_ext(NULL, d->bd_filter, &args); @ 1.189.2.2 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.189.2.1 2015/04/06 15:18:22 skrll Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.189.2.1 2015/04/06 15:18:22 skrll Exp $"); d440 1 a440 1 /* falloc() will fill in the descriptor for us. */ d712 1 a712 1 } else d2117 1 a2117 1 * unload could at least in theory be done similarly to d2120 1 a2120 1 * @ 1.189.2.3 log @Sync with HEAD (as of 26th Dec) @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.189.2.2 2015/06/06 14:40:25 skrll Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.189.2.2 2015/06/06 14:40:25 skrll Exp $"); a342 1 KASSERT(mutex_owned(&bpf_mtx)); a363 2 KASSERT(mutex_owned(&bpf_mtx)); a478 1 mutex_enter(&bpf_mtx); d493 1 d495 1 a501 1 mutex_exit(&bpf_mtx); d721 1 a721 2 else m_freem(mc); a902 1 mutex_enter(&bpf_mtx); a906 1 mutex_exit(&bpf_mtx); a928 1 mutex_enter(&bpf_mtx); a929 1 mutex_exit(&bpf_mtx); a1154 1 KASSERT(mutex_owned(&bpf_mtx)); d1720 1 a1720 1 d->bd_fbuf = malloc(d->bd_bufsize, M_DEVBUF, M_NOWAIT); d1723 1 a1723 1 d->bd_sbuf = malloc(d->bd_bufsize, M_DEVBUF, M_NOWAIT); a1772 1 mutex_enter(&bpf_mtx); a1783 1 mutex_exit(&bpf_mtx); a1798 1 mutex_enter(&bpf_mtx); a1821 1 mutex_exit(&bpf_mtx); a1881 2 KASSERT(mutex_owned(&bpf_mtx)); @ 1.189.2.4 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.189.2.3 2015/12/27 12:10:06 skrll Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.189.2.3 2015/12/27 12:10:06 skrll Exp $"); d478 1 a478 1 struct bpf_d *d; a483 6 if ((d = fp->f_bpf) == NULL) { mutex_exit(&bpf_mtx); KERNEL_UNLOCK_ONE(NULL); return 0; } d498 4 a506 5 callout_destroy(&d->bd_callout); seldestroy(&d->bd_sel); softint_disestablish(d->bd_sih); free(d, M_DEVBUF); d723 1 a723 1 ifp->_if_input(ifp, mc); @ 1.189.2.5 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.189.2.4 2016/03/19 11:30:32 skrll Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.189.2.4 2016/03/19 11:30:32 skrll Exp $"); d303 1 a303 1 m_reset_rcvif(m); d718 1 a718 1 m_set_rcvif(mc, ifp); d726 1 a726 1 error = if_output_lock(ifp, ifp, m, (struct sockaddr *) &dst, NULL); d1472 1 a1472 1 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif_index == 0) { d1489 1 a1489 1 bpf_deliver(bp, bpf_mcpy, &mb, pktlen, 0, m->m_pkthdr.rcvif_index != 0); d1503 1 a1503 1 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif_index == 0) { d1520 1 a1520 1 bpf_deliver(bp, cpfn, marg, pktlen, buflen, m->m_pkthdr.rcvif_index != 0); d2110 1 a2110 1 MODULE(MODULE_CLASS_DRIVER, bpf, "bpf_filter"); @ 1.189.2.6 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.189.2.5 2016/07/09 20:25:21 skrll Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.189.2.5 2016/07/09 20:25:21 skrll Exp $"); d62 1 d405 2 a406 2 static void bpf_init(void) d417 1 a417 1 return; d421 1 a421 2 * bpfilterattach() is called at boot time. We don't need to do anything * here, since any initialization will happen as part of module init code. d427 1 d429 1 a2114 1 #ifdef _MODULE d2116 3 a2118 2 #endif int error = 0; d2122 1 a2122 3 bpf_init(); #ifdef _MODULE bmajor = cmajor = NODEVMAJOR; d2125 2 a2128 1 #endif @ 1.189.2.7 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.189.2.6 2016/10/05 20:56:08 skrll Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.189.2.6 2016/10/05 20:56:08 skrll Exp $"); a47 1 #include "opt_net_mpsafe.h" a62 1 #include a75 2 #include #include d108 3 a110 3 static int bpf_bufsize = BPF_BUFSIZE; static int bpf_maxbufsize = BPF_DFLTBUFSIZE; /* XXX set dynamically, see above */ static bool bpf_jit = false; d120 1 a120 9 static struct percpu *bpf_gstats_percpu; /* struct bpf_stat */ #define BPF_STATINC(id) \ { \ struct bpf_stat *__stats = \ percpu_getref(bpf_gstats_percpu); \ __stats->bs_##id++; \ percpu_putref(bpf_gstats_percpu); \ } d132 2 a133 54 static struct pslist_head bpf_iflist; static struct pslist_head bpf_dlist; /* Macros for bpf_d on bpf_dlist */ #define BPF_DLIST_WRITER_INSEART_HEAD(__d) \ PSLIST_WRITER_INSERT_HEAD(&bpf_dlist, (__d), bd_bpf_dlist_entry) #define BPF_DLIST_READER_FOREACH(__d) \ PSLIST_READER_FOREACH((__d), &bpf_dlist, struct bpf_d, \ bd_bpf_dlist_entry) #define BPF_DLIST_WRITER_FOREACH(__d) \ PSLIST_WRITER_FOREACH((__d), &bpf_dlist, struct bpf_d, \ bd_bpf_dlist_entry) #define BPF_DLIST_ENTRY_INIT(__d) \ PSLIST_ENTRY_INIT((__d), bd_bpf_dlist_entry) #define BPF_DLIST_WRITER_REMOVE(__d) \ PSLIST_WRITER_REMOVE((__d), bd_bpf_dlist_entry) #define BPF_DLIST_ENTRY_DESTROY(__d) \ PSLIST_ENTRY_DESTROY((__d), bd_bpf_dlist_entry) /* Macros for bpf_if on bpf_iflist */ #define BPF_IFLIST_WRITER_INSERT_HEAD(__bp) \ PSLIST_WRITER_INSERT_HEAD(&bpf_iflist, (__bp), bif_iflist_entry) #define BPF_IFLIST_READER_FOREACH(__bp) \ PSLIST_READER_FOREACH((__bp), &bpf_iflist, struct bpf_if, \ bif_iflist_entry) #define BPF_IFLIST_WRITER_FOREACH(__bp) \ PSLIST_WRITER_FOREACH((__bp), &bpf_iflist, struct bpf_if, \ bif_iflist_entry) #define BPF_IFLIST_WRITER_REMOVE(__bp) \ PSLIST_WRITER_REMOVE((__bp), bif_iflist_entry) #define BPF_IFLIST_ENTRY_INIT(__bp) \ PSLIST_ENTRY_INIT((__bp), bif_iflist_entry) #define BPF_IFLIST_ENTRY_DESTROY(__bp) \ PSLIST_ENTRY_DESTROY((__bp), bif_iflist_entry) /* Macros for bpf_d on bpf_if#bif_dlist_pslist */ #define BPFIF_DLIST_READER_FOREACH(__d, __bp) \ PSLIST_READER_FOREACH((__d), &(__bp)->bif_dlist_head, struct bpf_d, \ bd_bif_dlist_entry) #define BPFIF_DLIST_WRITER_INSERT_HEAD(__bp, __d) \ PSLIST_WRITER_INSERT_HEAD(&(__bp)->bif_dlist_head, (__d), \ bd_bif_dlist_entry) #define BPFIF_DLIST_WRITER_REMOVE(__d) \ PSLIST_WRITER_REMOVE((__d), bd_bif_dlist_entry) #define BPFIF_DLIST_ENTRY_INIT(__d) \ PSLIST_ENTRY_INIT((__d), bd_bif_dlist_entry) #define BPFIF_DLIST_READER_EMPTY(__bp) \ (PSLIST_READER_FIRST(&(__bp)->bif_dlist_head, struct bpf_d, \ bd_bif_dlist_entry) == NULL) #define BPFIF_DLIST_WRITER_EMPTY(__bp) \ (PSLIST_WRITER_FIRST(&(__bp)->bif_dlist_head, struct bpf_d, \ bd_bif_dlist_entry) == NULL) #define BPFIF_DLIST_ENTRY_DESTROY(__d) \ PSLIST_ENTRY_DESTROY((__d), bd_bif_dlist_entry) a146 1 static int bpf_setf(struct bpf_d *, struct bpf_program *); d349 2 a350 1 BPFIF_DLIST_WRITER_INSERT_HEAD(bp, d); d361 1 a387 1 d389 8 a396 8 BPFIF_DLIST_WRITER_REMOVE(d); /* TODO pserialize_perform(); */ /* TODO psref_target_destroy(); */ BPFIF_DLIST_ENTRY_DESTROY(d); /* XXX NOMPSAFE? */ if (BPFIF_DLIST_WRITER_EMPTY(bp)) { a400 1 } d410 1 a410 2 PSLIST_INIT(&bpf_iflist); PSLIST_INIT(&bpf_dlist); d412 3 a414 1 bpf_gstats_percpu = percpu_alloc(sizeof(struct bpf_stat)); d445 1 a445 1 d = kmem_zalloc(sizeof(*d), KM_SLEEP); a459 4 BPF_DLIST_ENTRY_INIT(d); BPFIF_DLIST_ENTRY_INIT(d); d->bd_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); cv_init(&d->bd_cv, "bpf"); d462 1 a462 1 BPF_DLIST_WRITER_INSEART_HEAD(d); d501 1 a501 1 BPF_DLIST_WRITER_REMOVE(d); a506 4 /* TODO pserialize_perform(); */ /* TODO psref_target_destroy(); */ BPF_DLIST_ENTRY_DESTROY(d); d510 1 a510 4 mutex_obj_free(d->bd_mtx); cv_destroy(&d->bd_cv); kmem_free(d, sizeof(*d)); d560 3 a562 2 error = EWOULDBLOCK; goto out; d577 7 a583 8 mutex_enter(d->bd_mtx); error = cv_timedwait_sig(&d->bd_cv, d->bd_mtx, d->bd_rtout); mutex_exit(d->bd_mtx); if (error == EINTR || error == ERESTART) goto out; d599 3 a601 2 error = 0; goto out; d607 1 a607 1 goto out; d625 1 a625 1 out: d638 1 a638 5 mutex_enter(d->bd_mtx); cv_broadcast(&d->bd_cv); mutex_exit(d->bd_mtx); d839 1 a839 5 /* * Forbid to change the buffer length if buffers are already * allocated. */ if (d->bd_bif != NULL || d->bd_sbuf != NULL) d1104 1 a1104 1 static int d1109 1 a1109 1 size_t flen, size = 0, old_size; d1125 1 a1125 1 fcode = kmem_alloc(size, KM_SLEEP); d1128 1 a1128 1 kmem_free(fcode, size); a1137 2 old_size = d->bd_filter_size; a1140 1 d->bd_filter_size = size; d1147 1 a1147 1 kmem_free(old, old_size); d1195 1 a1195 1 BPF_IFLIST_WRITER_FOREACH(bp) { a1414 1 struct bpf_d *d; d1421 1 a1421 1 BPFIF_DLIST_READER_FOREACH(d, bp) { d1428 1 a1428 1 BPF_STATINC(recv); a1595 87 static struct mbuf * bpf_mbuf_enqueue(struct bpf_if *bp, struct mbuf *m) { struct mbuf *dup; dup = m_dup(m, 0, M_COPYALL, M_NOWAIT); if (dup == NULL) return NULL; if (bp->bif_mbuf_tail != NULL) { bp->bif_mbuf_tail->m_nextpkt = dup; } else { bp->bif_mbuf_head = dup; } bp->bif_mbuf_tail = dup; #ifdef BPF_MTAP_SOFTINT_DEBUG log(LOG_DEBUG, "%s: enqueued mbuf=%p to %s\n", __func__, dup, bp->bif_ifp->if_xname); #endif return dup; } static struct mbuf * bpf_mbuf_dequeue(struct bpf_if *bp) { struct mbuf *m; int s; s = splnet(); m = bp->bif_mbuf_head; if (m != NULL) { bp->bif_mbuf_head = m->m_nextpkt; m->m_nextpkt = NULL; if (bp->bif_mbuf_head == NULL) bp->bif_mbuf_tail = NULL; #ifdef BPF_MTAP_SOFTINT_DEBUG log(LOG_DEBUG, "%s: dequeued mbuf=%p from %s\n", __func__, m, bp->bif_ifp->if_xname); #endif } splx(s); return m; } static void bpf_mtap_si(void *arg) { struct bpf_if *bp = arg; struct mbuf *m; while ((m = bpf_mbuf_dequeue(bp)) != NULL) { #ifdef BPF_MTAP_SOFTINT_DEBUG log(LOG_DEBUG, "%s: tapping mbuf=%p on %s\n", __func__, m, bp->bif_ifp->if_xname); #endif #ifndef NET_MPSAFE KERNEL_LOCK(1, NULL); #endif bpf_ops->bpf_mtap(bp, m); #ifndef NET_MPSAFE KERNEL_UNLOCK_ONE(NULL); #endif m_freem(m); } } static void _bpf_mtap_softint(struct ifnet *ifp, struct mbuf *m) { struct bpf_if *bp = ifp->if_bpf; struct mbuf *dup; KASSERT(cpu_intr_p()); /* To avoid extra invocations of the softint */ if (BPFIF_DLIST_READER_EMPTY(bp)) return; KASSERT(bp->bif_si != NULL); dup = bpf_mbuf_enqueue(bp, m); if (dup != NULL) softint_schedule(bp->bif_si); } d1632 1 a1632 1 BPF_STATINC(capt); d1671 1 a1671 1 BPF_STATINC(drop); d1734 1 a1734 1 d->bd_fbuf = kmem_alloc(d->bd_bufsize, KM_NOSLEEP); d1737 1 a1737 1 d->bd_sbuf = kmem_alloc(d->bd_bufsize, KM_NOSLEEP); d1739 1 a1739 1 kmem_free(d->bd_fbuf, d->bd_bufsize); d1760 1 a1760 1 kmem_free(d->bd_sbuf, d->bd_bufsize); d1762 1 a1762 1 kmem_free(d->bd_hbuf, d->bd_bufsize); d1764 1 a1764 1 kmem_free(d->bd_fbuf, d->bd_bufsize); d1767 1 a1767 1 kmem_free(d->bd_filter, d->bd_filter_size); d1783 1 a1783 1 bp = kmem_alloc(sizeof(*bp), KM_NOSLEEP); d1788 1 a1791 3 bp->bif_si = NULL; BPF_IFLIST_ENTRY_INIT(bp); PSLIST_INIT(&bp->bif_dlist_head); d1793 2 a1794 1 BPF_IFLIST_WRITER_INSERT_HEAD(bp); a1804 23 static void _bpf_mtap_softint_init(struct ifnet *ifp) { struct bpf_if *bp; mutex_enter(&bpf_mtx); BPF_IFLIST_WRITER_FOREACH(bp) { if (bp->bif_ifp != ifp) continue; bp->bif_mbuf_head = NULL; bp->bif_mbuf_tail = NULL; bp->bif_si = softint_establish(SOFTINT_NET, bpf_mtap_si, bp); if (bp->bif_si == NULL) panic("%s: softint_establish() failed", __func__); break; } mutex_exit(&bpf_mtx); if (bp == NULL) panic("%s: no bpf_if found for %s", __func__, ifp->if_xname); } d1811 1 a1811 1 struct bpf_if *bp; d1817 1 a1817 2 again_d: BPF_DLIST_WRITER_FOREACH(d) { a1826 1 goto again_d; d1831 2 a1832 1 BPF_IFLIST_WRITER_FOREACH(bp) { d1834 2 a1835 15 BPF_IFLIST_WRITER_REMOVE(bp); /* TODO pserialize_perform(); */ /* TODO psref_target_destroy(); */ BPF_IFLIST_ENTRY_DESTROY(bp); if (bp->bif_si != NULL) { s = splnet(); while (bp->bif_mbuf_head != NULL) { struct mbuf *m = bp->bif_mbuf_head; bp->bif_mbuf_head = m->m_nextpkt; m_freem(m); } splx(s); softint_disestablish(bp->bif_si); } kmem_free(bp, sizeof(*bp)); d1850 1 a1850 1 BPF_IFLIST_READER_FOREACH(bp) { d1875 1 a1875 1 BPF_IFLIST_READER_FOREACH(bp) { d1905 1 a1905 1 BPF_IFLIST_WRITER_FOREACH(bp) { d2013 1 a2013 1 BPF_DLIST_WRITER_FOREACH(dp) { a2050 32 static void bpf_stats(void *p, void *arg, struct cpu_info *ci __unused) { struct bpf_stat *const stats = p; struct bpf_stat *sum = arg; sum->bs_recv += stats->bs_recv; sum->bs_drop += stats->bs_drop; sum->bs_capt += stats->bs_capt; } static int bpf_sysctl_gstats_handler(SYSCTLFN_ARGS) { struct sysctlnode node; int error; struct bpf_stat sum; memset(&sum, 0, sizeof(sum)); node = *rnode; percpu_foreach(bpf_gstats_percpu, bpf_stats, &sum); node.sysctl_data = ∑ node.sysctl_size = sizeof(sum); error = sysctl_lookup(SYSCTLFN_CALL(&node)); if (error != 0 || newp == NULL) return error; return 0; } d2083 1 a2083 1 bpf_sysctl_gstats_handler, 0, NULL, 0, a2105 3 .bpf_mtap_softint = _bpf_mtap_softint, .bpf_mtap_softint_init = _bpf_mtap_softint_init, @ 1.189.2.8 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.189.2.7 2017/02/05 13:40:57 skrll Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.189.2.7 2017/02/05 13:40:57 skrll Exp $"); a79 2 #include #include a134 20 * Locking notes: * - bpf_mtx (adaptive mutex) protects: * - Gobal lists: bpf_iflist and bpf_dlist * - struct bpf_if * - bpf_close * - bpf_psz (pserialize) * - struct bpf_d has two mutexes: * - bd_buf_mtx (spin mutex) protects the buffers that can be accessed * on packet tapping * - bd_mtx (adaptive mutex) protects member variables other than the buffers * - Locking order: bpf_mtx => bpf_d#bd_mtx => bpf_d#bd_buf_mtx * - struct bpf_d obtained via fp->f_bpf in bpf_read and bpf_write is * never freed because struct bpf_d is only freed in bpf_close and * bpf_close never be called while executing bpf_read and bpf_write * - A filter that is assigned to bpf_d can be replaced with another filter * while tapping packets, so it needs to be done atomically * - struct bpf_d is iterated on bpf_dlist with psz * - struct bpf_if is iterated on bpf_iflist with psz or psref */ /* a139 17 static struct psref_class *bpf_psref_class __read_mostly; static pserialize_t bpf_psz; static inline void bpf_if_acquire(struct bpf_if *bp, struct psref *psref) { psref_acquire(psref, &bp->bif_psref, bpf_psref_class); } static inline void bpf_if_release(struct bpf_if *bp, struct psref *psref) { psref_release(psref, &bp->bif_psref, bpf_psref_class); } d148 1 a148 1 #define BPF_DLIST_WRITER_INSERT_HEAD(__d) \ a203 1 static void bpf_free_filter(struct bpf_filter *); d402 1 a406 1 a407 1 KASSERT(mutex_owned(d->bd_mtx)); a427 1 KASSERT(mutex_owned(d->bd_mtx)); a444 3 #ifndef NET_MPSAFE KERNEL_LOCK(1, NULL); #endif a445 3 #ifndef NET_MPSAFE KERNEL_UNLOCK_ONE(NULL); #endif d455 3 a457 1 pserialize_perform(bpf_psz); d459 1 a473 2 bpf_psz = pserialize_create(); bpf_psref_class = psref_class_create("bpf", IPL_SOFTNET); a523 1 d->bd_filter = NULL; d526 1 a526 2 d->bd_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); d->bd_buf_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); d530 1 a530 1 BPF_DLIST_WRITER_INSERT_HEAD(d); d545 1 d547 1 d552 1 d561 1 a561 1 mutex_enter(d->bd_mtx); d563 1 a563 1 callout_halt(&d->bd_callout, d->bd_mtx); d567 2 a568 2 mutex_exit(d->bd_mtx); d570 1 a571 1 pserialize_perform(bpf_psz); d573 1 d575 2 a576 1 BPFIF_DLIST_ENTRY_DESTROY(d); d578 1 a578 2 fp->f_bpf = NULL; bpf_freed(d); a582 1 mutex_obj_free(d->bd_buf_mtx); d611 1 d621 2 a622 1 mutex_enter(d->bd_mtx); d624 1 a624 1 callout_halt(&d->bd_callout, d->bd_buf_mtx); a626 1 mutex_exit(d->bd_mtx); a631 1 mutex_enter(d->bd_buf_mtx); d652 3 a654 1 error = cv_timedwait_sig(&d->bd_cv, d->bd_buf_mtx, d->bd_rtout); d686 1 a686 1 mutex_exit(d->bd_buf_mtx); d695 1 a695 1 mutex_enter(d->bd_buf_mtx); d700 2 a701 1 mutex_exit(d->bd_buf_mtx); d713 1 a713 1 mutex_enter(d->bd_buf_mtx); d715 1 a715 1 mutex_exit(d->bd_buf_mtx); d736 1 d738 1 a738 1 mutex_enter(d->bd_mtx); d744 1 a744 1 mutex_exit(d->bd_mtx); a752 1 struct bpf_if *bp; d755 1 a755 1 int error; a756 2 struct psref psref; int bound; d760 5 a764 7 bound = curlwp_bind(); mutex_enter(d->bd_mtx); bp = d->bd_bif; if (bp == NULL) { mutex_exit(d->bd_mtx); error = ENXIO; goto out_bindx; a765 3 bpf_if_acquire(bp, &psref); mutex_exit(d->bd_mtx); d768 1 a768 5 ifp = bp->bif_ifp; if (if_is_deactivated(ifp)) { error = ENXIO; goto out; } d771 2 a772 2 error = 0; goto out; d775 1 a775 1 error = bpf_movein(uio, (int)bp->bif_dlt, ifp->if_mtu, &m, d777 4 a780 2 if (error) goto out; d783 1 d785 1 a785 2 error = EMSGSIZE; goto out; d801 1 d810 2 d815 1 a815 5 out: bpf_if_release(bp, &psref); out_bindx: curlwp_bindx(bound); return error; d820 1 a820 1 * receive and drop counts. a824 4 KASSERT(mutex_owned(d->bd_mtx)); mutex_enter(d->bd_buf_mtx); a834 1 mutex_exit(d->bd_buf_mtx); d863 1 a863 1 int error = 0; d868 1 d877 1 a877 1 mutex_enter(d->bd_mtx); d879 1 a879 1 callout_halt(&d->bd_callout, d->bd_mtx); d881 1 a881 1 mutex_exit(d->bd_mtx); d896 1 a896 1 mutex_enter(d->bd_buf_mtx); d900 1 a900 1 mutex_exit(d->bd_buf_mtx); a920 2 mutex_enter(d->bd_mtx); mutex_enter(d->bd_buf_mtx); a931 2 mutex_exit(d->bd_buf_mtx); mutex_exit(d->bd_mtx); d945 1 a945 1 mutex_enter(d->bd_mtx); d947 1 a947 1 mutex_exit(d->bd_mtx); a953 1 mutex_enter(d->bd_mtx); a954 1 mutex_exit(d->bd_mtx); d961 1 a962 3 #ifndef NET_MPSAFE KERNEL_LOCK(1, NULL); #endif a963 3 #ifndef NET_MPSAFE KERNEL_UNLOCK_ONE(NULL); #endif d967 1 a967 1 mutex_exit(d->bd_mtx); a973 1 mutex_enter(d->bd_mtx); a977 1 mutex_exit(d->bd_mtx); a983 1 mutex_enter(d->bd_mtx); a987 1 mutex_exit(d->bd_mtx); a994 1 mutex_enter(d->bd_mtx); a998 1 mutex_exit(d->bd_mtx); a1008 1 mutex_enter(d->bd_mtx); a1012 1 mutex_exit(d->bd_mtx); d1178 1 d1189 4 a1192 4 struct bpf_insn *fcode; bpfjit_func_t jcode; size_t flen, size = 0; struct bpf_filter *oldf, *newf; d1220 1 a1220 5 newf = kmem_alloc(sizeof(*newf), KM_SLEEP); newf->bf_insn = fcode; newf->bf_size = size; newf->bf_jitcode = jcode; d->bd_jitcode = jcode; /* XXX just for kvm(3) users */ d1222 6 a1227 6 /* Need to hold bpf_mtx for pserialize_perform */ mutex_enter(&bpf_mtx); mutex_enter(d->bd_mtx); oldf = d->bd_filter; d->bd_filter = newf; membar_producer(); d1229 1 a1229 3 pserialize_perform(bpf_psz); mutex_exit(d->bd_mtx); mutex_exit(&bpf_mtx); d1231 6 a1236 2 if (oldf != NULL) bpf_free_filter(oldf); d1251 1 a1251 1 int unit_seen, i, error; a1294 4 /* * bpf_allocbufs is called only here. bpf_mtx ensures that * no race condition happen on d->bd_sbuf. */ d1300 1 a1300 1 mutex_enter(d->bd_mtx); d1302 1 a1302 1 if (d->bd_bif) { a1306 2 BPFIF_DLIST_ENTRY_INIT(d); } d1311 1 a1311 1 mutex_exit(d->bd_mtx); d1333 1 a1333 1 mutex_enter(d->bd_mtx); d1341 1 a1341 1 mutex_exit(d->bd_mtx); d1357 1 d1363 1 a1363 1 mutex_enter(&bpf_mtx); a1370 1 mutex_enter(d->bd_mtx); a1383 1 mutex_exit(d->bd_mtx); d1386 2 a1387 1 mutex_exit(&bpf_mtx); d1395 1 d1397 2 a1398 1 mutex_enter(d->bd_buf_mtx); d1400 2 a1401 1 mutex_exit(d->bd_buf_mtx); d1410 1 a1410 1 mutex_enter(d->bd_buf_mtx); d1415 1 a1415 1 mutex_exit(d->bd_buf_mtx); d1427 3 a1430 1 mutex_enter(d->bd_buf_mtx); d1438 1 a1438 1 mutex_exit(d->bd_buf_mtx); d1444 1 d1446 2 a1447 1 mutex_exit(d->bd_buf_mtx); a1500 3 int s; KASSERT(!cpu_intr_p()); a1506 1 s = pserialize_read_enter(); d1508 1 a1508 2 u_int slen = 0; struct bpf_filter *filter; d1513 1 a1513 1 atomic_inc_ulong(&d->bd_rcount); d1516 4 a1519 9 filter = d->bd_filter; membar_datadep_consumer(); if (filter != NULL) { if (filter->bf_jitcode != NULL) slen = filter->bf_jitcode(NULL, &args); else slen = bpf_filter_ext(NULL, filter->bf_insn, &args); } a1527 1 /* Assume catchpacket doesn't sleep */ a1529 1 pserialize_read_exit(s); d1636 1 d1647 1 d1649 1 d1664 1 d1676 1 d1678 1 a1710 1 /* XXX NOMPSAFE: assumed running on one CPU */ d1740 3 d1744 3 d1804 1 a1804 1 atomic_inc_ulong(&d->bd_ccount); a1822 1 mutex_enter(d->bd_buf_mtx); a1838 1 mutex_exit(d->bd_buf_mtx); d1843 1 a1843 1 atomic_inc_ulong(&d->bd_dcount); a1890 1 mutex_exit(d->bd_buf_mtx); a1919 13 static void bpf_free_filter(struct bpf_filter *filter) { KASSERT(filter != NULL); KASSERT(filter->bf_insn != NULL); kmem_free(filter->bf_insn, filter->bf_size); if (filter->bf_jitcode != NULL) bpf_jit_freecode(filter->bf_jitcode); kmem_free(filter, sizeof(*filter)); } d1939 5 a1943 3 if (d->bd_filter != NULL) { bpf_free_filter(d->bd_filter); d->bd_filter = NULL; a1944 1 d->bd_jitcode = NULL; a1966 1 psref_target_init(&bp->bif_psref, bpf_psref_class); a2015 1 mutex_enter(d->bd_mtx); d2021 1 d2024 1 a2024 1 mutex_exit(d->bd_mtx); a2026 1 mutex_exit(d->bd_mtx); d2033 2 a2034 4 pserialize_perform(bpf_psz); psref_target_destroy(&bp->bif_psref, bpf_psref_class); a2036 1 /* XXX NOMPSAFE: assumed running on one CPU */ d2061 1 a2061 2 mutex_enter(&bpf_mtx); BPF_IFLIST_WRITER_FOREACH(bp) { a2070 1 mutex_exit(&bpf_mtx); a2081 3 int s, bound; KASSERT(mutex_owned(d->bd_mtx)); a2085 3 bound = curlwp_bind(); s = pserialize_read_enter(); d2090 1 a2090 4 struct psref psref; if (n >= bfl->bfl_len) { pserialize_read_exit(s); a2091 5 } bpf_if_acquire(bp, &psref); pserialize_read_exit(s); a2093 3 s = pserialize_read_enter(); bpf_if_release(bp, &psref); a2096 3 pserialize_read_exit(s); curlwp_bindx(bound); d2107 1 a2107 1 int error, opromisc; a2111 1 KASSERT(mutex_owned(d->bd_mtx)); d2122 1 a2124 1 BPFIF_DLIST_ENTRY_INIT(d); a2127 3 #ifndef NET_MPSAFE KERNEL_LOCK(1, NULL); #endif a2128 3 #ifndef NET_MPSAFE KERNEL_UNLOCK_ONE(NULL); #endif d2135 1 a2237 1 mutex_enter(dp->bd_mtx); a2243 1 mutex_exit(dp->bd_mtx); @ 1.188 log @Try not to use f_data, use f_{vnode,socket,pipe,mqueue,kqueue,ksem} to get a correctly typed pointer. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.187 2014/08/07 03:40:21 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.187 2014/08/07 03:40:21 ozaki-r Exp $"); d1391 2 a1392 1 struct timespec ts; d1397 1 a1397 1 .mem = NULL, a1399 3 struct bpf_d *d; const bpf_ctx_t *bc = NULL; d1401 1 d1408 1 a1408 1 for (d = bp->bif_dlist; d != NULL; d = d->bd_next) { @ 1.187 log @Use NULL instead of 0 for pointers @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.186 2014/07/28 07:32:46 alnsn Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.186 2014/07/28 07:32:46 alnsn Exp $"); d475 1 a475 1 struct bpf_d *d = fp->f_data; d500 1 a500 1 fp->f_data = NULL; d525 1 a525 1 struct bpf_d *d = fp->f_data; d666 1 a666 1 struct bpf_d *d = fp->f_data; d775 1 a775 1 struct bpf_d *d = fp->f_data; d1231 1 a1231 1 struct bpf_d *d = fp->f_data; d1257 1 a1257 1 struct bpf_d *d = fp->f_data; d1326 1 a1326 1 struct bpf_d *d = fp->f_data; @ 1.187.2.1 log @Pull up following revision(s) (requested by rmind in ticket #106): sys/net/bpf.c: revision 1.189 PR/49190: bpf_deliver: set scratch memory store in bpf_args_t. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.187 2014/08/07 03:40:21 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.187 2014/08/07 03:40:21 ozaki-r Exp $"); d1391 1 a1391 2 const bpf_ctx_t *bc = NULL; uint32_t mem[BPF_MEMWORDS]; d1396 1 a1396 1 .mem = mem, d1399 3 a1402 1 struct timespec ts; d1409 1 a1409 1 for (struct bpf_d *d = bp->bif_dlist; d != NULL; d = d->bd_next) { @ 1.186 log @Enable net.bpf.jit only if MODULAR and BPFJIT. Tweak a warning about postponed jit activation. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.185 2014/07/25 08:10:40 dholland Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.185 2014/07/25 08:10:40 dholland Exp $"); d303 1 a303 1 m->m_pkthdr.rcvif = 0; d390 1 a390 1 if (*p == 0) d394 1 a394 1 if (bp->bif_dlist == 0) d398 2 a399 2 *d->bd_bif->bif_driverp = 0; d->bd_bif = 0; d517 1 a517 1 (d)->bd_fbuf = 0; d549 1 a549 1 while (d->bd_hbuf == 0) { d615 1 a615 1 d->bd_hbuf = 0; d676 1 a676 1 if (d->bd_bif == 0) { d741 1 a741 1 d->bd_hbuf = 0; d830 1 a830 1 if (d->bd_bif != 0) d863 1 a863 1 if (d->bd_bif == 0) { d883 1 a883 1 if (d->bd_bif == 0) d893 1 a893 1 if (d->bd_bif == 0) d903 1 a903 1 if (d->bd_bif == 0) d916 1 a916 1 if (d->bd_bif == 0) d1181 1 a1181 1 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { d1184 1 a1184 1 if (ifp == 0 || d1196 1 a1196 1 if (d->bd_sbuf == 0) { d1653 1 a1653 1 if (d->bd_fbuf == 0) { d1772 1 a1772 1 if (bp == 0) d1775 1 a1775 1 bp->bif_dlist = 0; d1783 1 a1783 1 *bp->bif_driverp = 0; @ 1.185 log @Add d_discard to all struct cdevsw instances I could find. All have been set to "nodiscard"; some should get a real implementation. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.184 2014/07/10 15:32:09 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.184 2014/07/10 15:32:09 christos Exp $"); d1931 1 d1955 1 a1955 1 printf("WARNING jit activation is postponed " d1961 1 d2047 1 d2054 1 @ 1.184 log @initialize args the same way we do in filter. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.183 2014/06/24 10:53:30 alnsn Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.183 2014/06/24 10:53:30 alnsn Exp $"); d194 1 @ 1.183 log @Implement copfuncs and external memory in bpfjit. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.182 2014/03/16 05:20:30 dholland Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.182 2014/03/16 05:20:30 dholland Exp $"); d1391 7 a1397 1 bpf_args_t args; a1402 4 args.pkt = (const uint8_t *)pkt; args.wirelen = pktlen; args.buflen = buflen; @ 1.182 log @Change (mostly mechanically) every cdevsw/bdevsw I can find to use designated initializers. I have not built every extant kernel so I have probably broken at least one build; however I've also found and fixed some wrong cdevsw/bdevsw entries so even if so I think we come out ahead. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.181 2014/02/25 18:30:12 pooka Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.181 2014/02/25 18:30:12 pooka Exp $"); d200 1 d1118 2 a1119 4 if (bpf_jit) { bpf_ctx_t *bc = bpf_default_ctx(); jcode = bpf_jit_generate(bc, fcode, flen); } d1390 2 a1391 7 bpf_ctx_t *bc = bpf_default_ctx(); bpf_args_t args = { .pkt = pkt, .wirelen = pktlen, .buflen = buflen, .arg = NULL }; d1393 2 a1394 1 struct timespec ts; d1397 4 d1416 1 a1416 1 slen = d->bd_jitcode(pkt, pktlen, buflen); @ 1.182.2.1 log @Rebase. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.187 2014/08/07 03:40:21 ozaki-r Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.187 2014/08/07 03:40:21 ozaki-r Exp $"); a193 1 .d_discard = nodiscard, a199 1 d301 1 a301 1 m->m_pkthdr.rcvif = NULL; d388 1 a388 1 if (*p == NULL) d392 1 a392 1 if (bp->bif_dlist == NULL) d396 2 a397 2 *d->bd_bif->bif_driverp = NULL; d->bd_bif = NULL; d515 1 a515 1 (d)->bd_fbuf = NULL; d547 1 a547 1 while (d->bd_hbuf == NULL) { d613 1 a613 1 d->bd_hbuf = NULL; d674 1 a674 1 if (d->bd_bif == NULL) { d739 1 a739 1 d->bd_hbuf = NULL; d828 1 a828 1 if (d->bd_bif != NULL) d861 1 a861 1 if (d->bd_bif == NULL) { d881 1 a881 1 if (d->bd_bif == NULL) d891 1 a891 1 if (d->bd_bif == NULL) d901 1 a901 1 if (d->bd_bif == NULL) d914 1 a914 1 if (d->bd_bif == NULL) d1117 4 a1120 2 if (bpf_jit) jcode = bpf_jit_generate(NULL, fcode, flen); d1181 1 a1181 1 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { d1184 1 a1184 1 if (ifp == NULL || d1196 1 a1196 1 if (d->bd_sbuf == NULL) { d1391 1 a1391 1 struct timespec ts; d1393 1 a1393 1 .pkt = (const uint8_t *)pkt, a1395 1 .mem = NULL, d1399 1 a1399 2 const bpf_ctx_t *bc = NULL; d1417 1 a1417 1 slen = d->bd_jitcode(bc, &args); d1651 1 a1651 1 if (d->bd_fbuf == NULL) { d1770 1 a1770 1 if (bp == NULL) d1773 1 a1773 1 bp->bif_dlist = NULL; d1781 1 a1781 1 *bp->bif_driverp = NULL; a1928 1 #if defined(MODULAR) || defined(BPFJIT) d1952 1 a1952 1 printf("JIT compilation is postponed " a1957 1 #endif a2042 1 #if defined(MODULAR) || defined(BPFJIT) a2048 1 #endif @ 1.181 log @Ensure that the top level sysctl nodes (kern, vfs, net, ...) exist before the sysctl link sets are processed, and remove redundancy. Shaves >13kB off of an amd64 GENERIC, not to mention >1k duplicate lines of code. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.180 2013/12/05 15:55:35 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.180 2013/12/05 15:55:35 christos Exp $"); d184 11 a194 2 bpfopen, noclose, noread, nowrite, noioctl, nostop, notty, nopoll, nommap, nokqfilter, D_OTHER @ 1.180 log @It is silly to kill the system when an interface failed to clear promiscuous mode. Some return EINVAL when they are dying, but others like USB return EIO. Downgrade to a DIAGNOSTIC printf. Same should be done for the malloc/NOWAIT, but this is rarely hit. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.179 2013/11/16 01:13:52 rmind Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.179 2013/11/16 01:13:52 rmind Exp $"); a2025 6 sysctl_createv(&bpf_sysctllog, 0, NULL, NULL, CTLFLAG_PERMANENT, CTLTYPE_NODE, "net", NULL, NULL, 0, NULL, 0, CTL_NET, CTL_EOL); @ 1.179 log @bpf_deliver: convert to bpf_filter_ext(). @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.178 2013/11/15 00:12:44 rmind Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.178 2013/11/15 00:12:44 rmind Exp $"); d359 1 a359 1 int error; d370 4 a373 2 if (error && error != EINVAL) panic("%s: ifpromisc failed: %d", __func__, error); @ 1.178 log @- Add bpf_args_t and convert bpf_filter_ext() to use it. This allows the caller to initialise (and re-use) the memory store. - Add bpf_jit_generate() and bpf_jit_freecode() wrappers. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.177 2013/09/18 23:34:55 rmind Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.177 2013/09/18 23:34:55 rmind Exp $"); d1380 7 d1405 1 a1405 1 if (d->bd_jitcode != NULL) d1408 1 a1408 1 slen = bpf_filter(d->bd_filter, pkt, pktlen, buflen); @ 1.177 log @Add bpf_filter_ext() to use with BPF COP, restore bpf_filter() as it was originally to preserve compatibility. Similarly, add bpf_validate_ext() which takes bpf_ctx_t. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.176 2013/09/09 20:53:51 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.176 2013/09/09 20:53:51 christos Exp $"); d188 17 d1082 1 a1082 1 bpfjit_function_t jcode, oldj; d1106 3 a1108 2 if (bpf_jit && bpfjit_module_ops.bj_generate_code != NULL) { jcode = bpfjit_module_ops.bj_generate_code(fcode, flen); d1125 2 a1126 4 if (oldj != NULL) { KASSERT(bpfjit_module_ops.bj_free_code != NULL); bpfjit_module_ops.bj_free_code(oldj); d1738 1 a1738 2 KASSERT(bpfjit_module_ops.bj_free_code != NULL); bpfjit_module_ops.bj_free_code(d->bd_jitcode); @ 1.176 log @PR/48198: Peter Bex: Avoid kernel panic caused by setting a very small bpf buffer size. XXX: Pullup -6 @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.175 2013/08/30 15:00:08 rmind Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.175 2013/08/30 15:00:08 rmind Exp $"); d1385 1 a1385 2 slen = bpf_filter(bpf_def_ctx, NULL, d->bd_filter, pkt, pktlen, buflen); @ 1.175 log @bpf_filter: add a custom argument which can be passed to coprocessor routine. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.174 2013/08/29 14:25:41 rmind Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.174 2013/08/29 14:25:41 rmind Exp $"); d1579 2 a1580 5 struct bpf_hdr *hp; #ifdef _LP64 struct bpf_hdr32 *hp32; #endif int totlen, curlen; d1595 7 d1642 1 d1645 3 a1647 1 hp32 = (struct bpf_hdr32 *)((char *)d->bd_sbuf + curlen); d1652 1 a1652 5 /* * Copy the packet data into the store buffer and update its length. */ (*cpfn)((u_char *)hp32 + hdrlen, pkt, (hp32->bh_caplen = totlen - hdrlen)); d1656 3 a1658 1 hp = (struct bpf_hdr *)((char *)d->bd_sbuf + curlen); d1663 1 a1663 6 /* * Copy the packet data into the store buffer and update * its length. */ (*cpfn)((u_char *)hp + hdrlen, pkt, (hp->bh_caplen = totlen - hdrlen)); d1665 5 @ 1.174 log @Implement BPF_COP/BPF_COPX instructions in the misc category (BPF_MISC) which add a capability to call external functions in a predetermined way. It can be thought as a BPF "coprocessor" -- a generic mechanism to offload more complex packet inspection operations. There is no default coprocessor and this functionality is not targeted to the /dev/bpf. This is primarily targeted to the kernel subsystems, therefore there is no way to set a custom coprocessor at the userlevel. Discussed on: tech-net@@ OK: core@@ @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.173 2012/10/27 22:36:14 alnsn Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.173 2012/10/27 22:36:14 alnsn Exp $"); d1385 1 a1385 1 slen = bpf_filter(bpf_def_ctx, d->bd_filter, @ 1.173 log @Add bpfjit and enable it for amd64. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.172 2012/09/27 18:28:56 alnsn Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.172 2012/09/27 18:28:56 alnsn Exp $"); d1385 2 a1386 1 slen = bpf_filter(d->bd_filter, pkt, pktlen, buflen); @ 1.173.2.1 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.173 2012/10/27 22:36:14 alnsn Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.173 2012/10/27 22:36:14 alnsn Exp $"); d184 2 a185 11 .d_open = bpfopen, .d_close = noclose, .d_read = noread, .d_write = nowrite, .d_ioctl = noioctl, .d_stop = nostop, .d_tty = notty, .d_poll = nopoll, .d_mmap = nommap, .d_kqfilter = nokqfilter, .d_flag = D_OTHER a187 17 bpfjit_func_t bpf_jit_generate(bpf_ctx_t *bc, void *code, size_t size) { membar_consumer(); if (bpfjit_module_ops.bj_generate_code != NULL) { return bpfjit_module_ops.bj_generate_code(bc, code, size); } return NULL; } void bpf_jit_freecode(bpfjit_func_t jcode) { KASSERT(bpfjit_module_ops.bj_free_code != NULL); bpfjit_module_ops.bj_free_code(jcode); } d342 1 a342 1 int error __diagused; d353 2 a354 4 #ifdef DIAGNOSTIC if (error) printf("%s: ifpromisc failed: %d", __func__, error); #endif d1065 1 a1065 1 bpfjit_func_t jcode, oldj; d1089 2 a1090 3 if (bpf_jit) { bpf_ctx_t *bc = bpf_default_ctx(); jcode = bpf_jit_generate(bc, fcode, flen); d1107 4 a1110 2 if (oldj) { bpf_jit_freecode(oldj); a1363 7 bpf_ctx_t *bc = bpf_default_ctx(); bpf_args_t args = { .pkt = pkt, .wirelen = pktlen, .buflen = buflen, .arg = NULL }; d1382 1 a1382 1 if (d->bd_jitcode) d1385 1 a1385 1 slen = bpf_filter_ext(bc, d->bd_filter, &args); d1578 5 a1582 2 char *h; int totlen, curlen, caplen; a1596 7 /* * If we adjusted totlen to fit the bufsize, it could be that * totlen is smaller than hdrlen because of the link layer header. */ caplen = totlen - hdrlen; if (caplen < 0) caplen = 0; a1636 1 h = (char *)d->bd_sbuf + curlen; d1639 1 a1639 3 struct bpf_hdr32 *hp32; hp32 = (struct bpf_hdr32 *)h; d1644 5 a1648 1 hp32->bh_caplen = caplen; d1652 1 a1652 3 struct bpf_hdr *hp; hp = (struct bpf_hdr *)h; d1657 6 a1662 1 hp->bh_caplen = caplen; a1663 5 /* * Copy the packet data into the store buffer and update its length. */ (*cpfn)(h + hdrlen, pkt, caplen); d1717 2 a1718 1 bpf_jit_freecode(d->bd_jitcode); d1997 6 @ 1.172 log @Remove bpf_jit which was ported from FreeBSD recently. It will soon be replaced with the new bpfjit kernel module. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.171 2012/08/15 20:59:51 alnsn Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.171 2012/08/15 20:59:51 alnsn Exp $"); d83 1 d111 6 d428 1 d1065 1 d1069 1 d1088 4 d1099 2 d1108 5 d1382 5 a1386 1 slen = bpf_filter(d->bd_filter, pkt, pktlen, buflen); d1715 5 d1892 30 d2013 6 @ 1.171 log @Fix two bugs introduced by recent commit. - When handling contiguous buffer in _bpf_tap(), pass its real size rather than 0 to avoid reading packet data as mbuf struct on out-of-bounds loads. - Correctly pass pktlen and buflen arguments from bpf_deliver() to bpf_filter() to avoid reading mbuf struct as packet data. JIT case is still broken. Also, test pointers againts NULL. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.170 2012/08/02 00:40:51 rmind Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.170 2012/08/02 00:40:51 rmind Exp $"); a81 1 #include a121 8 /* BPF JIT compilation. */ static bool bpf_jit_enable __read_mostly = false; #ifndef BPF_JIT #define bpf_jit(x, y) NULL #define bpf_destroy_jit_filter(x) (void)x #endif a1056 1 bpf_jit_filter *jfunc, *ofunc; a1077 3 /* Perform JIT compilation. */ jfunc = bpf_jit(fcode, flen); a1079 1 jfunc = NULL; a1083 1 ofunc = d->bd_bfilter; a1084 1 d->bd_bfilter = jfunc; d1091 1 a1091 3 if (ofunc) { bpf_destroy_jit_filter(ofunc); } a1352 1 bpf_jit_filter *bf; d1361 1 a1361 11 bf = bpf_jit_enable ? d->bd_bfilter : NULL; if (bf) { /* * XXX THIS is totally broken when pkt * points to mbuf. FreeBSD does a runtime * check, we don't. */ slen = (*(bf->func))(pkt, pktlen, pktlen); } else { slen = bpf_filter(d->bd_filter, pkt, pktlen, buflen); } a1689 2 if (d->bd_bfilter) bpf_destroy_jit_filter(d->bd_bfilter); @ 1.171.2.1 log @Resync to 2012-11-19 00:00:00 UTC @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.173 2012/10/27 22:36:14 alnsn Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.173 2012/10/27 22:36:14 alnsn Exp $"); d82 1 a83 1 #include a110 6 bool bpf_jit = false; struct bpfjit_ops bpfjit_module_ops = { .bj_generate_code = NULL, .bj_free_code = NULL }; d123 8 a429 1 d->bd_jitcode = NULL; d1066 1 a1066 1 bpfjit_function_t jcode, oldj; a1069 1 jcode = NULL; d1088 3 a1090 4 membar_consumer(); if (bpf_jit && bpfjit_module_ops.bj_generate_code != NULL) { jcode = bpfjit_module_ops.bj_generate_code(fcode, flen); } d1093 1 d1098 1 d1100 1 a1100 2 oldj = d->bd_jitcode; d->bd_jitcode = jcode; d1107 2 a1108 4 if (oldj != NULL) { KASSERT(bpfjit_module_ops.bj_free_code != NULL); bpfjit_module_ops.bj_free_code(oldj); a1109 1 d1371 1 d1380 9 a1388 3 if (d->bd_jitcode != NULL) slen = d->bd_jitcode(pkt, pktlen, buflen); else d1390 1 a1390 1 d1719 2 a1720 5 if (d->bd_jitcode != NULL) { KASSERT(bpfjit_module_ops.bj_free_code != NULL); bpfjit_module_ops.bj_free_code(d->bd_jitcode); } a1892 30 sysctl_net_bpf_jit(SYSCTLFN_ARGS) { bool newval; int error; struct sysctlnode node; node = *rnode; node.sysctl_data = &newval; newval = bpf_jit; error = sysctl_lookup(SYSCTLFN_CALL(&node)); if (error != 0 || newp == NULL) return error; bpf_jit = newval; /* * Do a full sync to publish new bpf_jit value and * update bpfjit_module_ops.bj_generate_code variable. */ membar_sync(); if (newval && bpfjit_module_ops.bj_generate_code == NULL) { printf("WARNING jit activation is postponed " "until after bpfjit module is loaded\n"); } return 0; } static int a1983 6 CTLTYPE_BOOL, "jit", SYSCTL_DESCR("Toggle Just-In-Time compilation"), sysctl_net_bpf_jit, 0, &bpf_jit, 0, CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL); sysctl_createv(&bpf_sysctllog, 0, NULL, NULL, CTLFLAG_PERMANENT|CTLFLAG_READWRITE, @ 1.171.2.2 log @Rebase to HEAD as of a few days ago. @ text @d1 1 a1 1 /* $NetBSD$ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD$"); d184 2 a185 12 .d_open = bpfopen, .d_close = noclose, .d_read = noread, .d_write = nowrite, .d_ioctl = noioctl, .d_stop = nostop, .d_tty = notty, .d_poll = nopoll, .d_mmap = nommap, .d_kqfilter = nokqfilter, .d_discard = nodiscard, .d_flag = D_OTHER a187 18 bpfjit_func_t bpf_jit_generate(bpf_ctx_t *bc, void *code, size_t size) { membar_consumer(); if (bpfjit_module_ops.bj_generate_code != NULL) { return bpfjit_module_ops.bj_generate_code(bc, code, size); } return NULL; } void bpf_jit_freecode(bpfjit_func_t jcode) { KASSERT(bpfjit_module_ops.bj_free_code != NULL); bpfjit_module_ops.bj_free_code(jcode); } d275 1 a275 1 m->m_pkthdr.rcvif = NULL; d342 1 a342 1 int error __diagused; d353 2 a354 4 #ifdef DIAGNOSTIC if (error) printf("%s: ifpromisc failed: %d", __func__, error); #endif d360 1 a360 1 if (*p == NULL) d364 1 a364 1 if (bp->bif_dlist == NULL) d368 2 a369 2 *d->bd_bif->bif_driverp = NULL; d->bd_bif = NULL; d487 1 a487 1 (d)->bd_fbuf = NULL; d519 1 a519 1 while (d->bd_hbuf == NULL) { d585 1 a585 1 d->bd_hbuf = NULL; d646 1 a646 1 if (d->bd_bif == NULL) { d711 1 a711 1 d->bd_hbuf = NULL; d800 1 a800 1 if (d->bd_bif != NULL) d833 1 a833 1 if (d->bd_bif == NULL) { d853 1 a853 1 if (d->bd_bif == NULL) d863 1 a863 1 if (d->bd_bif == NULL) d873 1 a873 1 if (d->bd_bif == NULL) d886 1 a886 1 if (d->bd_bif == NULL) d1065 1 a1065 1 bpfjit_func_t jcode, oldj; d1089 3 a1091 2 if (bpf_jit) jcode = bpf_jit_generate(NULL, fcode, flen); d1107 4 a1110 2 if (oldj) { bpf_jit_freecode(oldj); d1154 1 a1154 1 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { d1157 1 a1157 1 if (ifp == NULL || d1169 1 a1169 1 if (d->bd_sbuf == NULL) { d1364 1 a1365 10 bpf_args_t args = { .pkt = (const uint8_t *)pkt, .wirelen = pktlen, .buflen = buflen, .mem = NULL, .arg = NULL }; struct bpf_d *d; const bpf_ctx_t *bc = NULL; d1382 2 a1383 2 if (d->bd_jitcode) slen = d->bd_jitcode(bc, &args); d1385 1 a1385 1 slen = bpf_filter_ext(bc, d->bd_filter, &args); d1578 5 a1582 2 char *h; int totlen, curlen, caplen; a1596 7 /* * If we adjusted totlen to fit the bufsize, it could be that * totlen is smaller than hdrlen because of the link layer header. */ caplen = totlen - hdrlen; if (caplen < 0) caplen = 0; d1613 1 a1613 1 if (d->bd_fbuf == NULL) { a1636 1 h = (char *)d->bd_sbuf + curlen; d1639 1 a1639 3 struct bpf_hdr32 *hp32; hp32 = (struct bpf_hdr32 *)h; d1644 5 a1648 1 hp32->bh_caplen = caplen; d1652 1 a1652 3 struct bpf_hdr *hp; hp = (struct bpf_hdr *)h; d1657 6 a1662 1 hp->bh_caplen = caplen; a1663 5 /* * Copy the packet data into the store buffer and update its length. */ (*cpfn)(h + hdrlen, pkt, caplen); d1717 2 a1718 1 bpf_jit_freecode(d->bd_jitcode); d1732 1 a1732 1 if (bp == NULL) d1735 1 a1735 1 bp->bif_dlist = NULL; d1743 1 a1743 1 *bp->bif_driverp = NULL; a1890 1 #if defined(MODULAR) || defined(BPFJIT) d1914 1 a1914 1 printf("JIT compilation is postponed " a1919 1 #endif d1997 6 a2010 1 #if defined(MODULAR) || defined(BPFJIT) a2016 1 #endif @ 1.171.2.3 log @update from HEAD @ text @a47 1 #include "opt_net_mpsafe.h" d62 1 a63 1 #include a76 4 #include #include #include #include d109 3 a111 3 static int bpf_bufsize = BPF_BUFSIZE; static int bpf_maxbufsize = BPF_DFLTBUFSIZE; /* XXX set dynamically, see above */ static bool bpf_jit = false; d121 1 a121 1 static struct percpu *bpf_gstats_percpu; /* struct bpf_stat */ a122 28 #define BPF_STATINC(id) \ { \ struct bpf_stat *__stats = \ percpu_getref(bpf_gstats_percpu); \ __stats->bs_##id++; \ percpu_putref(bpf_gstats_percpu); \ } /* * Locking notes: * - bpf_mtx (adaptive mutex) protects: * - Gobal lists: bpf_iflist and bpf_dlist * - struct bpf_if * - bpf_close * - bpf_psz (pserialize) * - struct bpf_d has two mutexes: * - bd_buf_mtx (spin mutex) protects the buffers that can be accessed * on packet tapping * - bd_mtx (adaptive mutex) protects member variables other than the buffers * - Locking order: bpf_mtx => bpf_d#bd_mtx => bpf_d#bd_buf_mtx * - struct bpf_d obtained via fp->f_bpf in bpf_read and bpf_write is * never freed because struct bpf_d is only freed in bpf_close and * bpf_close never be called while executing bpf_read and bpf_write * - A filter that is assigned to bpf_d can be replaced with another filter * while tapping packets, so it needs to be done atomically * - struct bpf_d is iterated on bpf_dlist with psz * - struct bpf_if is iterated on bpf_iflist with psz or psref */ a128 17 static struct psref_class *bpf_psref_class __read_mostly; static pserialize_t bpf_psz; static inline void bpf_if_acquire(struct bpf_if *bp, struct psref *psref) { psref_acquire(psref, &bp->bif_psref, bpf_psref_class); } static inline void bpf_if_release(struct bpf_if *bp, struct psref *psref) { psref_release(psref, &bp->bif_psref, bpf_psref_class); } d133 2 a134 54 static struct pslist_head bpf_iflist; static struct pslist_head bpf_dlist; /* Macros for bpf_d on bpf_dlist */ #define BPF_DLIST_WRITER_INSERT_HEAD(__d) \ PSLIST_WRITER_INSERT_HEAD(&bpf_dlist, (__d), bd_bpf_dlist_entry) #define BPF_DLIST_READER_FOREACH(__d) \ PSLIST_READER_FOREACH((__d), &bpf_dlist, struct bpf_d, \ bd_bpf_dlist_entry) #define BPF_DLIST_WRITER_FOREACH(__d) \ PSLIST_WRITER_FOREACH((__d), &bpf_dlist, struct bpf_d, \ bd_bpf_dlist_entry) #define BPF_DLIST_ENTRY_INIT(__d) \ PSLIST_ENTRY_INIT((__d), bd_bpf_dlist_entry) #define BPF_DLIST_WRITER_REMOVE(__d) \ PSLIST_WRITER_REMOVE((__d), bd_bpf_dlist_entry) #define BPF_DLIST_ENTRY_DESTROY(__d) \ PSLIST_ENTRY_DESTROY((__d), bd_bpf_dlist_entry) /* Macros for bpf_if on bpf_iflist */ #define BPF_IFLIST_WRITER_INSERT_HEAD(__bp) \ PSLIST_WRITER_INSERT_HEAD(&bpf_iflist, (__bp), bif_iflist_entry) #define BPF_IFLIST_READER_FOREACH(__bp) \ PSLIST_READER_FOREACH((__bp), &bpf_iflist, struct bpf_if, \ bif_iflist_entry) #define BPF_IFLIST_WRITER_FOREACH(__bp) \ PSLIST_WRITER_FOREACH((__bp), &bpf_iflist, struct bpf_if, \ bif_iflist_entry) #define BPF_IFLIST_WRITER_REMOVE(__bp) \ PSLIST_WRITER_REMOVE((__bp), bif_iflist_entry) #define BPF_IFLIST_ENTRY_INIT(__bp) \ PSLIST_ENTRY_INIT((__bp), bif_iflist_entry) #define BPF_IFLIST_ENTRY_DESTROY(__bp) \ PSLIST_ENTRY_DESTROY((__bp), bif_iflist_entry) /* Macros for bpf_d on bpf_if#bif_dlist_pslist */ #define BPFIF_DLIST_READER_FOREACH(__d, __bp) \ PSLIST_READER_FOREACH((__d), &(__bp)->bif_dlist_head, struct bpf_d, \ bd_bif_dlist_entry) #define BPFIF_DLIST_WRITER_INSERT_HEAD(__bp, __d) \ PSLIST_WRITER_INSERT_HEAD(&(__bp)->bif_dlist_head, (__d), \ bd_bif_dlist_entry) #define BPFIF_DLIST_WRITER_REMOVE(__d) \ PSLIST_WRITER_REMOVE((__d), bd_bif_dlist_entry) #define BPFIF_DLIST_ENTRY_INIT(__d) \ PSLIST_ENTRY_INIT((__d), bd_bif_dlist_entry) #define BPFIF_DLIST_READER_EMPTY(__bp) \ (PSLIST_READER_FIRST(&(__bp)->bif_dlist_head, struct bpf_d, \ bd_bif_dlist_entry) == NULL) #define BPFIF_DLIST_WRITER_EMPTY(__bp) \ (PSLIST_WRITER_FIRST(&(__bp)->bif_dlist_head, struct bpf_d, \ bd_bif_dlist_entry) == NULL) #define BPFIF_DLIST_ENTRY_DESTROY(__d) \ PSLIST_ENTRY_DESTROY((__d), bd_bif_dlist_entry) a140 1 static void bpf_free_filter(struct bpf_filter *); a147 1 static int bpf_setf(struct bpf_d *, struct bpf_program *); a169 1 .fo_name = "bpf", d195 1 a195 1 .d_flag = D_OTHER | D_MPSAFE d303 1 a303 1 m_reset_rcvif(m); d338 1 a342 3 KASSERT(mutex_owned(&bpf_mtx)); KASSERT(mutex_owned(d->bd_mtx)); d349 2 a350 1 BPFIF_DLIST_WRITER_INSERT_HEAD(bp, d); d361 1 a363 3 KASSERT(mutex_owned(&bpf_mtx)); KASSERT(mutex_owned(d->bd_mtx)); a379 1 KERNEL_LOCK_UNLESS_NET_MPSAFE(); a380 1 KERNEL_UNLOCK_UNLESS_NET_MPSAFE(); a385 1 d387 8 a394 5 BPFIF_DLIST_WRITER_REMOVE(d); pserialize_perform(bpf_psz); if (BPFIF_DLIST_WRITER_EMPTY(bp)) { a398 1 } d402 2 a403 2 static void bpf_init(void) a406 2 bpf_psz = pserialize_create(); bpf_psref_class = psref_class_create("bpf", IPL_SOFTNET); d408 1 a408 2 PSLIST_INIT(&bpf_iflist); PSLIST_INIT(&bpf_dlist); d410 3 a412 1 bpf_gstats_percpu = percpu_alloc(sizeof(struct bpf_stat)); d414 1 a414 1 return; d418 1 a418 2 * bpfilterattach() is called at boot time. We don't need to do anything * here, since any initialization will happen as part of module init code. d424 1 d426 1 d440 1 a440 1 /* falloc() will fill in the descriptor for us. */ d444 1 a444 1 d = kmem_zalloc(sizeof(*d), KM_SLEEP); a458 6 d->bd_filter = NULL; BPF_DLIST_ENTRY_INIT(d); BPFIF_DLIST_ENTRY_INIT(d); d->bd_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); d->bd_buf_mtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); cv_init(&d->bd_cv, "bpf"); d461 1 a461 1 BPF_DLIST_WRITER_INSERT_HEAD(d); d475 2 a476 1 struct bpf_d *d; d478 1 a478 6 mutex_enter(&bpf_mtx); if ((d = fp->f_bpf) == NULL) { mutex_exit(&bpf_mtx); return 0; } d485 1 a485 1 mutex_enter(d->bd_mtx); d487 1 a487 1 callout_halt(&d->bd_callout, d->bd_mtx); d491 4 a494 5 mutex_exit(d->bd_mtx); BPF_DLIST_WRITER_REMOVE(d); pserialize_perform(bpf_psz); a495 5 BPFIF_DLIST_ENTRY_DESTROY(d); BPF_DLIST_ENTRY_DESTROY(d); fp->f_bpf = NULL; bpf_freed(d); d499 2 a500 3 mutex_obj_free(d->bd_mtx); mutex_obj_free(d->bd_buf_mtx); cv_destroy(&d->bd_cv); d502 1 a502 1 kmem_free(d, sizeof(*d)); d525 1 a525 1 struct bpf_d *d = fp->f_bpf; d528 1 d538 2 a539 1 mutex_enter(d->bd_mtx); d541 1 a541 1 callout_halt(&d->bd_callout, d->bd_buf_mtx); a543 1 mutex_exit(d->bd_mtx); a548 1 mutex_enter(d->bd_buf_mtx); d552 3 a554 2 error = EWOULDBLOCK; goto out; d569 7 a575 6 error = cv_timedwait_sig(&d->bd_cv, d->bd_buf_mtx, d->bd_rtout); if (error == EINTR || error == ERESTART) goto out; d591 3 a593 2 error = 0; goto out; d599 1 a599 1 goto out; d604 1 a604 1 mutex_exit(d->bd_buf_mtx); d613 1 a613 1 mutex_enter(d->bd_buf_mtx); d617 3 a619 2 out: mutex_exit(d->bd_buf_mtx); d630 1 a630 5 mutex_enter(d->bd_buf_mtx); cv_broadcast(&d->bd_cv); mutex_exit(d->bd_buf_mtx); d650 1 d652 1 a652 1 mutex_enter(d->bd_mtx); d658 1 a658 1 mutex_exit(d->bd_mtx); d666 1 a666 2 struct bpf_d *d = fp->f_bpf; struct bpf_if *bp; d669 1 a669 1 int error; a670 2 struct psref psref; int bound; d674 5 a678 7 bound = curlwp_bind(); mutex_enter(d->bd_mtx); bp = d->bd_bif; if (bp == NULL) { mutex_exit(d->bd_mtx); error = ENXIO; goto out_bindx; a679 3 bpf_if_acquire(bp, &psref); mutex_exit(d->bd_mtx); d682 1 a682 5 ifp = bp->bif_ifp; if (if_is_deactivated(ifp)) { error = ENXIO; goto out; } d685 2 a686 2 error = 0; goto out; d689 1 a689 1 error = bpf_movein(uio, (int)bp->bif_dlt, ifp->if_mtu, &m, d691 4 a694 2 if (error) goto out; d697 1 d699 1 a699 2 error = EMSGSIZE; goto out; d708 1 a708 1 m_set_rcvif(mc, ifp); d715 2 a716 1 error = if_output_lock(ifp, ifp, m, (struct sockaddr *) &dst, NULL); d720 2 a721 3 ifp->_if_input(ifp, mc); else m_freem(mc); d723 2 d728 1 a728 5 out: bpf_if_release(bp, &psref); out_bindx: curlwp_bindx(bound); return error; d733 1 a733 1 * receive and drop counts. a737 4 KASSERT(mutex_owned(d->bd_mtx)); mutex_enter(d->bd_buf_mtx); a747 1 mutex_exit(d->bd_buf_mtx); d775 2 a776 2 struct bpf_d *d = fp->f_bpf; int error = 0; d781 1 d790 1 a790 1 mutex_enter(d->bd_mtx); d792 1 a792 1 callout_halt(&d->bd_callout, d->bd_mtx); d794 1 a794 1 mutex_exit(d->bd_mtx); d809 1 a809 1 mutex_enter(d->bd_buf_mtx); d813 1 a813 1 mutex_exit(d->bd_buf_mtx); d830 1 a830 7 /* * Forbid to change the buffer length if buffers are already * allocated. */ mutex_enter(d->bd_mtx); mutex_enter(d->bd_buf_mtx); if (d->bd_bif != NULL || d->bd_sbuf != NULL) a840 2 mutex_exit(d->bd_buf_mtx); mutex_exit(d->bd_mtx); d854 1 a854 1 mutex_enter(d->bd_mtx); d856 1 a856 1 mutex_exit(d->bd_mtx); a862 1 mutex_enter(d->bd_mtx); a863 1 mutex_exit(d->bd_mtx); d870 1 a871 1 KERNEL_LOCK_UNLESS_NET_MPSAFE(); a872 1 KERNEL_UNLOCK_UNLESS_NET_MPSAFE(); d876 1 a876 1 mutex_exit(d->bd_mtx); a882 1 mutex_enter(d->bd_mtx); a886 1 mutex_exit(d->bd_mtx); a892 1 mutex_enter(d->bd_mtx); a896 1 mutex_exit(d->bd_mtx); a902 2 mutex_enter(&bpf_mtx); mutex_enter(d->bd_mtx); a906 2 mutex_exit(d->bd_mtx); mutex_exit(&bpf_mtx); a915 1 mutex_enter(d->bd_mtx); a919 1 mutex_exit(d->bd_mtx); a928 1 mutex_enter(&bpf_mtx); a929 1 mutex_exit(&bpf_mtx); d1083 1 d1091 1 a1091 1 static int d1094 4 a1097 4 struct bpf_insn *fcode; bpfjit_func_t jcode; size_t flen, size = 0; struct bpf_filter *oldf, *newf; d1112 1 a1112 1 fcode = kmem_alloc(size, KM_SLEEP); d1115 1 a1115 1 kmem_free(fcode, size); d1125 5 a1129 12 newf = kmem_alloc(sizeof(*newf), KM_SLEEP); newf->bf_insn = fcode; newf->bf_size = size; newf->bf_jitcode = jcode; d->bd_jitcode = jcode; /* XXX just for kvm(3) users */ /* Need to hold bpf_mtx for pserialize_perform */ mutex_enter(&bpf_mtx); mutex_enter(d->bd_mtx); oldf = d->bd_filter; d->bd_filter = newf; membar_producer(); d1131 1 a1131 3 pserialize_perform(bpf_psz); mutex_exit(d->bd_mtx); mutex_exit(&bpf_mtx); d1133 6 a1138 2 if (oldf != NULL) bpf_free_filter(oldf); d1153 1 a1153 1 int unit_seen, i, error; a1154 1 KASSERT(mutex_owned(&bpf_mtx)); d1181 1 a1181 1 BPF_IFLIST_WRITER_FOREACH(bp) { a1195 4 /* * bpf_allocbufs is called only here. bpf_mtx ensures that * no race condition happen on d->bd_sbuf. */ d1201 1 a1201 1 mutex_enter(d->bd_mtx); d1203 1 a1203 1 if (d->bd_bif) { a1207 2 BPFIF_DLIST_ENTRY_INIT(d); } d1212 1 a1212 1 mutex_exit(d->bd_mtx); d1231 1 a1231 1 struct bpf_d *d = fp->f_bpf; d1234 1 a1234 1 mutex_enter(d->bd_mtx); d1242 1 a1242 1 mutex_exit(d->bd_mtx); d1257 2 a1258 1 struct bpf_d *d = fp->f_bpf; d1264 1 a1264 1 mutex_enter(&bpf_mtx); a1271 1 mutex_enter(d->bd_mtx); a1284 1 mutex_exit(d->bd_mtx); d1287 2 a1288 1 mutex_exit(&bpf_mtx); d1296 1 d1298 2 a1299 1 mutex_enter(d->bd_buf_mtx); d1301 2 a1302 1 mutex_exit(d->bd_buf_mtx); d1311 1 a1311 1 mutex_enter(d->bd_buf_mtx); d1316 1 a1316 1 mutex_exit(d->bd_buf_mtx); d1320 2 a1321 6 static const struct filterops bpfread_filtops = { .f_isfd = 1, .f_attach = NULL, .f_detach = filt_bpfrdetach, .f_event = filt_bpfread, }; d1326 1 a1326 1 struct bpf_d *d = fp->f_bpf; d1328 3 a1331 1 mutex_enter(d->bd_buf_mtx); d1339 1 a1339 1 mutex_exit(d->bd_buf_mtx); d1345 1 d1347 2 a1348 1 mutex_exit(d->bd_buf_mtx); d1391 1 a1391 1 uint32_t mem[BPF_MEMWORDS]; d1396 1 a1396 1 .mem = mem, a1398 2 bool gottime = false; struct timespec ts; a1399 1 int s; d1401 2 a1402 1 KASSERT(!cpu_intr_p()); d1409 2 a1410 4 s = pserialize_read_enter(); BPFIF_DLIST_READER_FOREACH(d, bp) { u_int slen = 0; struct bpf_filter *filter; d1415 2 a1416 2 atomic_inc_ulong(&d->bd_rcount); BPF_STATINC(recv); d1418 4 a1421 9 filter = d->bd_filter; membar_datadep_consumer(); if (filter != NULL) { if (filter->bf_jitcode != NULL) slen = filter->bf_jitcode(NULL, &args); else slen = bpf_filter_ext(NULL, filter->bf_insn, &args); } a1429 1 /* Assume catchpacket doesn't sleep */ a1431 1 pserialize_read_exit(s); d1458 1 a1458 1 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif_index == 0) { d1475 1 a1475 1 bpf_deliver(bp, bpf_mcpy, &mb, pktlen, 0, m->m_pkthdr.rcvif_index != 0); d1489 1 a1489 1 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif_index == 0) { d1506 1 a1506 1 bpf_deliver(bp, cpfn, marg, pktlen, buflen, m->m_pkthdr.rcvif_index != 0); d1538 1 d1549 1 d1551 1 d1566 1 d1578 1 d1580 1 a1583 82 static struct mbuf * bpf_mbuf_enqueue(struct bpf_if *bp, struct mbuf *m) { struct mbuf *dup; dup = m_dup(m, 0, M_COPYALL, M_NOWAIT); if (dup == NULL) return NULL; if (bp->bif_mbuf_tail != NULL) { bp->bif_mbuf_tail->m_nextpkt = dup; } else { bp->bif_mbuf_head = dup; } bp->bif_mbuf_tail = dup; #ifdef BPF_MTAP_SOFTINT_DEBUG log(LOG_DEBUG, "%s: enqueued mbuf=%p to %s\n", __func__, dup, bp->bif_ifp->if_xname); #endif return dup; } static struct mbuf * bpf_mbuf_dequeue(struct bpf_if *bp) { struct mbuf *m; int s; /* XXX NOMPSAFE: assumed running on one CPU */ s = splnet(); m = bp->bif_mbuf_head; if (m != NULL) { bp->bif_mbuf_head = m->m_nextpkt; m->m_nextpkt = NULL; if (bp->bif_mbuf_head == NULL) bp->bif_mbuf_tail = NULL; #ifdef BPF_MTAP_SOFTINT_DEBUG log(LOG_DEBUG, "%s: dequeued mbuf=%p from %s\n", __func__, m, bp->bif_ifp->if_xname); #endif } splx(s); return m; } static void bpf_mtap_si(void *arg) { struct bpf_if *bp = arg; struct mbuf *m; while ((m = bpf_mbuf_dequeue(bp)) != NULL) { #ifdef BPF_MTAP_SOFTINT_DEBUG log(LOG_DEBUG, "%s: tapping mbuf=%p on %s\n", __func__, m, bp->bif_ifp->if_xname); #endif bpf_ops->bpf_mtap(bp, m); m_freem(m); } } static void _bpf_mtap_softint(struct ifnet *ifp, struct mbuf *m) { struct bpf_if *bp = ifp->if_bpf; struct mbuf *dup; KASSERT(cpu_intr_p()); /* To avoid extra invocations of the softint */ if (BPFIF_DLIST_READER_EMPTY(bp)) return; KASSERT(bp->bif_si != NULL); dup = bpf_mbuf_enqueue(bp, m); if (dup != NULL) softint_schedule(bp->bif_si); } d1619 2 a1620 2 atomic_inc_ulong(&d->bd_ccount); BPF_STATINC(capt); a1637 1 mutex_enter(d->bd_buf_mtx); a1653 1 mutex_exit(d->bd_buf_mtx); d1658 2 a1659 2 atomic_inc_ulong(&d->bd_dcount); BPF_STATINC(drop); a1705 1 mutex_exit(d->bd_buf_mtx); d1722 1 a1722 1 d->bd_fbuf = kmem_alloc(d->bd_bufsize, KM_NOSLEEP); d1725 1 a1725 1 d->bd_sbuf = kmem_alloc(d->bd_bufsize, KM_NOSLEEP); d1727 1 a1727 1 kmem_free(d->bd_fbuf, d->bd_bufsize); a1734 13 static void bpf_free_filter(struct bpf_filter *filter) { KASSERT(filter != NULL); KASSERT(filter->bf_insn != NULL); kmem_free(filter->bf_insn, filter->bf_size); if (filter->bf_jitcode != NULL) bpf_jit_freecode(filter->bf_jitcode); kmem_free(filter, sizeof(*filter)); } d1748 1 a1748 1 kmem_free(d->bd_sbuf, d->bd_bufsize); d1750 1 a1750 1 kmem_free(d->bd_hbuf, d->bd_bufsize); d1752 1 a1752 1 kmem_free(d->bd_fbuf, d->bd_bufsize); d1754 5 a1758 3 if (d->bd_filter != NULL) { bpf_free_filter(d->bd_filter); d->bd_filter = NULL; a1759 1 d->bd_jitcode = NULL; d1771 1 a1771 1 bp = kmem_alloc(sizeof(*bp), KM_NOSLEEP); d1775 1 a1775 1 mutex_enter(&bpf_mtx); a1778 4 bp->bif_si = NULL; BPF_IFLIST_ENTRY_INIT(bp); PSLIST_INIT(&bp->bif_dlist_head); psref_target_init(&bp->bif_psref, bpf_psref_class); d1780 2 a1781 1 BPF_IFLIST_WRITER_INSERT_HEAD(bp); a1785 1 mutex_exit(&bpf_mtx); a1790 23 static void _bpf_mtap_softint_init(struct ifnet *ifp) { struct bpf_if *bp; mutex_enter(&bpf_mtx); BPF_IFLIST_WRITER_FOREACH(bp) { if (bp->bif_ifp != ifp) continue; bp->bif_mbuf_head = NULL; bp->bif_mbuf_tail = NULL; bp->bif_si = softint_establish(SOFTINT_NET, bpf_mtap_si, bp); if (bp->bif_si == NULL) panic("%s: softint_establish() failed", __func__); break; } mutex_exit(&bpf_mtx); if (bp == NULL) panic("%s: no bpf_if found for %s", __func__, ifp->if_xname); } d1797 1 a1797 1 struct bpf_if *bp; a1800 1 mutex_enter(&bpf_mtx); d1802 1 a1802 3 again_d: BPF_DLIST_WRITER_FOREACH(d) { mutex_enter(d->bd_mtx); d1808 1 d1811 1 a1811 2 mutex_exit(d->bd_mtx); goto again_d; a1812 1 mutex_exit(d->bd_mtx); d1816 2 a1817 1 BPF_IFLIST_WRITER_FOREACH(bp) { d1819 2 a1820 18 BPF_IFLIST_WRITER_REMOVE(bp); pserialize_perform(bpf_psz); psref_target_destroy(&bp->bif_psref, bpf_psref_class); BPF_IFLIST_ENTRY_DESTROY(bp); if (bp->bif_si != NULL) { /* XXX NOMPSAFE: assumed running on one CPU */ s = splnet(); while (bp->bif_mbuf_head != NULL) { struct mbuf *m = bp->bif_mbuf_head; bp->bif_mbuf_head = m->m_nextpkt; m_freem(m); } splx(s); softint_disestablish(bp->bif_si); } kmem_free(bp, sizeof(*bp)); a1823 1 mutex_exit(&bpf_mtx); d1834 1 a1834 2 mutex_enter(&bpf_mtx); BPF_IFLIST_WRITER_FOREACH(bp) { a1843 1 mutex_exit(&bpf_mtx); a1854 3 int s, bound; KASSERT(mutex_owned(d->bd_mtx)); d1859 1 a1859 4 bound = curlwp_bind(); s = pserialize_read_enter(); BPF_IFLIST_READER_FOREACH(bp) { d1863 1 a1863 4 struct psref psref; if (n >= bfl->bfl_len) { pserialize_read_exit(s); a1864 5 } bpf_if_acquire(bp, &psref); pserialize_read_exit(s); a1866 3 s = pserialize_read_enter(); bpf_if_release(bp, &psref); a1869 3 pserialize_read_exit(s); curlwp_bindx(bound); d1880 1 a1880 1 int error, opromisc; a1883 3 KASSERT(mutex_owned(&bpf_mtx)); KASSERT(mutex_owned(d->bd_mtx)); d1887 1 a1887 1 BPF_IFLIST_WRITER_FOREACH(bp) { d1893 1 a1895 1 BPFIF_DLIST_ENTRY_INIT(d); a1898 1 KERNEL_LOCK_UNLESS_NET_MPSAFE(); a1899 1 KERNEL_UNLOCK_UNLESS_NET_MPSAFE(); d1906 1 d1995 1 a1995 1 BPF_DLIST_WRITER_FOREACH(dp) { a2008 1 mutex_enter(dp->bd_mtx); a2014 1 mutex_exit(dp->bd_mtx); a2032 32 static void bpf_stats(void *p, void *arg, struct cpu_info *ci __unused) { struct bpf_stat *const stats = p; struct bpf_stat *sum = arg; sum->bs_recv += stats->bs_recv; sum->bs_drop += stats->bs_drop; sum->bs_capt += stats->bs_capt; } static int bpf_sysctl_gstats_handler(SYSCTLFN_ARGS) { struct sysctlnode node; int error; struct bpf_stat sum; memset(&sum, 0, sizeof(sum)); node = *rnode; percpu_foreach(bpf_gstats_percpu, bpf_stats, &sum); node.sysctl_data = ∑ node.sysctl_size = sizeof(sum); error = sysctl_lookup(SYSCTLFN_CALL(&node)); if (error != 0 || newp == NULL) return error; return 0; } d2065 1 a2065 1 bpf_sysctl_gstats_handler, 0, NULL, 0, a2087 3 .bpf_mtap_softint = _bpf_mtap_softint, .bpf_mtap_softint_init = _bpf_mtap_softint_init, d2090 1 a2090 1 MODULE(MODULE_CLASS_DRIVER, bpf, "bpf_filter"); a2094 1 #ifdef _MODULE d2096 3 a2098 2 #endif int error = 0; d2102 1 a2102 3 bpf_init(); #ifdef _MODULE bmajor = cmajor = NODEVMAJOR; d2105 2 a2108 1 #endif @ 1.170 log @Build fix for some ports. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.169 2012/08/01 23:24:29 rmind Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.169 2012/08/01 23:24:29 rmind Exp $"); d1382 5 d1389 1 a1389 1 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); d1412 1 a1412 1 bpf_deliver(bp, memcpy, pkt, pktlen, 0, true); d1710 1 a1710 1 if (d->bd_sbuf) { d1712 1 a1712 1 if (d->bd_hbuf) d1714 1 a1714 1 if (d->bd_fbuf) @ 1.169 log @Add BPF JIT compiler, currently supporting amd64 and i386. Code obtained from FreeBSD. Also, make few BPF fixes and simplifications while here. Note that bpf_jit_enable is false for now. OK dyoung@@, some feedback from matt@@ @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.168 2011/12/16 03:05:23 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.168 2011/12/16 03:05:23 christos Exp $"); d127 2 a128 2 #define bpf_jit(x, y) NULL #define bpf_destroy_jit_filter(x) @ 1.168 log @make comment reflect reality @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.167 2011/12/15 22:20:26 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.167 2011/12/15 22:20:26 christos Exp $"); d82 1 a111 1 d123 8 d141 1 a141 1 void *, u_int, u_int, struct ifnet *); d1066 2 a1067 1 u_int flen, size; d1070 24 a1093 11 old = d->bd_filter; if (fp->bf_insns == 0) { if (fp->bf_len != 0) return (EINVAL); s = splnet(); d->bd_filter = 0; reset_d(d); splx(s); if (old != 0) free(old, M_DEVBUF); return (0); a1094 3 flen = fp->bf_len; if (flen > BPF_MAXINSNS) return (EINVAL); d1096 7 a1102 10 size = flen * sizeof(*fp->bf_insns); fcode = malloc(size, M_DEVBUF, M_WAITOK); if (copyin(fp->bf_insns, fcode, size) == 0 && bpf_validate(fcode, (int)flen)) { s = splnet(); d->bd_filter = fcode; reset_d(d); splx(s); if (old != 0) free(old, M_DEVBUF); d1104 5 a1108 1 return (0); d1110 1 a1110 2 free(fcode, M_DEVBUF); return (EINVAL); a1323 33 * Incoming linkage from device drivers. Process the packet pkt, of length * pktlen, which is stored in a contiguous buffer. The packet is parsed * by each process' filter, and if accepted, stashed into the corresponding * buffer. */ static void _bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) { struct bpf_d *d; u_int slen; struct timespec ts; int gottime=0; /* * Note that the ipl does not have to be raised at this point. * The only problem that could arise here is that if two different * interfaces shared any data. This is not the case. */ for (d = bp->bif_dlist; d != 0; d = d->bd_next) { ++d->bd_rcount; ++bpf_gstats.bs_recv; slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); if (slen != 0) { if (!gottime) { nanotime(&ts); gottime = 1; } catchpacket(d, pkt, pktlen, slen, memcpy, &ts); } } } /* d1351 3 a1353 3 * marg pointer to the packet, either a data buffer or an mbuf chain * buflen buffer length, if marg is a data buffer * cpfn a function that can copy marg into the listener's buffer d1355 1 a1355 1 * rcvif either NULL or the interface the packet came in on. d1359 1 a1359 1 void *marg, u_int pktlen, u_int buflen, struct ifnet *rcvif) a1360 1 u_int slen; d1363 10 a1372 1 int gottime = 0; d1374 1 a1374 2 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { if (!d->bd_seesent && (rcvif == NULL)) a1375 9 ++d->bd_rcount; ++bpf_gstats.bs_recv; slen = bpf_filter(d->bd_filter, marg, pktlen, buflen); if (slen != 0) { if(!gottime) { nanotime(&ts); gottime = 1; } catchpacket(d, marg, pktlen, slen, cpfn, &ts); d1377 17 d1398 13 d1438 1 a1438 1 bpf_deliver(bp, bpf_mcpy, &mb, pktlen, 0, m->m_pkthdr.rcvif); d1469 1 a1469 1 bpf_deliver(bp, cpfn, marg, pktlen, buflen, m->m_pkthdr.rcvif); d1705 1 a1705 1 if (d->bd_sbuf != 0) { d1707 1 a1707 1 if (d->bd_hbuf != 0) d1709 1 a1709 1 if (d->bd_fbuf != 0) d1714 2 @ 1.168.6.1 log @Pull up following revision(s) (requested by spz in ticket #941): sys/net/bpf.c: revision 1.176 PR/48198: Peter Bex: Avoid kernel panic caused by setting a very small bpf buffer size. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.168 2011/12/16 03:05:23 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.168 2011/12/16 03:05:23 christos Exp $"); d1563 5 a1567 2 char *h; int totlen, curlen, caplen; a1581 7 /* * If we adjusted totlen to fit the bufsize, it could be that * totlen is smaller than hdrlen because of the link layer header. */ caplen = totlen - hdrlen; if (caplen < 0) caplen = 0; a1621 1 h = (char *)d->bd_sbuf + curlen; d1624 1 a1624 3 struct bpf_hdr32 *hp32; hp32 = (struct bpf_hdr32 *)h; d1629 5 a1633 1 hp32->bh_caplen = caplen; d1637 1 a1637 3 struct bpf_hdr *hp; hp = (struct bpf_hdr *)h; d1642 6 a1647 1 hp->bh_caplen = caplen; a1648 5 /* * Copy the packet data into the store buffer and update its length. */ (*cpfn)(h + hdrlen, pkt, caplen); @ 1.168.8.1 log @Pull up following revision(s) (requested by spz in ticket #941): sys/net/bpf.c: revision 1.176 PR/48198: Peter Bex: Avoid kernel panic caused by setting a very small bpf buffer size. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.168 2011/12/16 03:05:23 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.168 2011/12/16 03:05:23 christos Exp $"); d1563 5 a1567 2 char *h; int totlen, curlen, caplen; a1581 7 /* * If we adjusted totlen to fit the bufsize, it could be that * totlen is smaller than hdrlen because of the link layer header. */ caplen = totlen - hdrlen; if (caplen < 0) caplen = 0; a1621 1 h = (char *)d->bd_sbuf + curlen; d1624 1 a1624 3 struct bpf_hdr32 *hp32; hp32 = (struct bpf_hdr32 *)h; d1629 5 a1633 1 hp32->bh_caplen = caplen; d1637 1 a1637 3 struct bpf_hdr *hp; hp = (struct bpf_hdr *)h; d1642 6 a1647 1 hp->bh_caplen = caplen; a1648 5 /* * Copy the packet data into the store buffer and update its length. */ (*cpfn)(h + hdrlen, pkt, caplen); @ 1.168.2.1 log @Pull up following revision(s) (requested by spz in ticket #941): sys/net/bpf.c: revision 1.176 PR/48198: Peter Bex: Avoid kernel panic caused by setting a very small bpf buffer size. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.168 2011/12/16 03:05:23 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.168 2011/12/16 03:05:23 christos Exp $"); d1563 5 a1567 2 char *h; int totlen, curlen, caplen; a1581 7 /* * If we adjusted totlen to fit the bufsize, it could be that * totlen is smaller than hdrlen because of the link layer header. */ caplen = totlen - hdrlen; if (caplen < 0) caplen = 0; a1621 1 h = (char *)d->bd_sbuf + curlen; d1624 1 a1624 3 struct bpf_hdr32 *hp32; hp32 = (struct bpf_hdr32 *)h; d1629 5 a1633 1 hp32->bh_caplen = caplen; d1637 1 a1637 3 struct bpf_hdr *hp; hp = (struct bpf_hdr *)h; d1642 6 a1647 1 hp->bh_caplen = caplen; a1648 5 /* * Copy the packet data into the store buffer and update its length. */ (*cpfn)(h + hdrlen, pkt, caplen); @ 1.167 log @don't leak mbufs. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.166 2011/08/30 14:22:22 bouyer Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.166 2011/08/30 14:22:22 bouyer Exp $"); d1550 1 d1553 4 a1556 4 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), * otherwise 0. "copy" is the routine called to do the actual data * transfer. memcpy is passed in to copy contiguous chunks, while * bpf_mcpy is passed in to copy mbuf chains. In the latter case, @ 1.166 log @Provide netbsd32 compat for bpf. Beside the ioctls, the structure returned to userland by read(2) also needs to be converted. For this, the bpf descriptor is flagged as compat32 (or not) in the open and ioctl functions (where the user process's pid is also updated in the descriptor). When the bpf buffer is filled in, the 32bits or native header is used depending on the information stored in the descriptor. This won't work if a 64bit binary does the open and ioctls, and then exec a 32bit program which will do the read. But this is very unlikely to happen in real life ... Tested on i386 and loongson; with these changes my loongson can run dhclient and tcpdump with a n32 userland. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.165 2011/06/10 00:10:35 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.165 2011/06/10 00:10:35 christos Exp $"); a683 1 } else d685 1 @ 1.166.2.1 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.166 2011/08/30 14:22:22 bouyer Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.166 2011/08/30 14:22:22 bouyer Exp $"); d684 1 a685 1 } a1549 1 d1552 4 a1555 4 * store buffer. Call the wakeup functions if it's time to wakeup * a listener (buffer full), "cpfn" is the routine called to do the * actual data transfer. memcpy is passed in to copy contiguous chunks, * while bpf_mcpy is passed in to copy mbuf chains. In the latter case, @ 1.166.2.2 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.166.2.1 2012/04/17 00:08:37 yamt Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.166.2.1 2012/04/17 00:08:37 yamt Exp $"); a82 1 #include a109 1 bool bpf_jit = false; a110 4 struct bpfjit_ops bpfjit_module_ops = { .bj_generate_code = NULL, .bj_free_code = NULL }; d133 1 a133 1 void *, u_int, u_int, const bool); a421 1 d->bd_jitcode = NULL; d1058 1 a1058 2 bpfjit_function_t jcode, oldj; size_t flen, size; d1061 12 a1072 1 jcode = NULL; d1074 2 d1077 10 a1086 3 if ((fp->bf_insns == NULL && flen) || flen > BPF_MAXINSNS) { return EINVAL; } d1088 1 a1088 30 if (flen) { /* * Allocate the buffer, copy the byte-code from * userspace and validate it. */ size = flen * sizeof(*fp->bf_insns); fcode = malloc(size, M_DEVBUF, M_WAITOK); if (copyin(fp->bf_insns, fcode, size) != 0 || !bpf_validate(fcode, (int)flen)) { free(fcode, M_DEVBUF); return EINVAL; } membar_consumer(); if (bpf_jit && bpfjit_module_ops.bj_generate_code != NULL) { jcode = bpfjit_module_ops.bj_generate_code(fcode, flen); } } else { fcode = NULL; } s = splnet(); old = d->bd_filter; d->bd_filter = fcode; oldj = d->bd_jitcode; d->bd_jitcode = jcode; reset_d(d); splx(s); if (old) { free(old, M_DEVBUF); d1090 2 a1091 7 if (oldj != NULL) { KASSERT(bpfjit_module_ops.bj_free_code != NULL); bpfjit_module_ops.bj_free_code(oldj); } return 0; d1305 33 d1365 3 a1367 3 * pkt pointer to the packet, either a data buffer or an mbuf chain * buflen buffer length, if pkt is a data buffer * cpfn a function that can copy pkt into the listener's buffer d1369 1 a1369 1 * rcv true if packet came in d1373 1 a1373 1 void *pkt, u_int pktlen, u_int buflen, const bool rcv) d1375 1 d1378 1 a1378 9 bool gottime = false; /* * Note that the IPL does not have to be raised at this point. * The only problem that could arise here is that if two different * interfaces shared any data. This is not the case. */ for (d = bp->bif_dlist; d != NULL; d = d->bd_next) { u_int slen; d1380 2 a1381 12 if (!d->bd_seesent && !rcv) { continue; } d->bd_rcount++; bpf_gstats.bs_recv++; if (d->bd_jitcode != NULL) slen = d->bd_jitcode(pkt, pktlen, buflen); else slen = bpf_filter(d->bd_filter, pkt, pktlen, buflen); if (!slen) { d1383 9 a1392 5 if (!gottime) { gottime = true; nanotime(&ts); } catchpacket(d, pkt, pktlen, slen, cpfn, &ts); a1396 13 * Incoming linkage from device drivers. Process the packet pkt, of length * pktlen, which is stored in a contiguous buffer. The packet is parsed * by each process' filter, and if accepted, stashed into the corresponding * buffer. */ static void _bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) { bpf_deliver(bp, memcpy, pkt, pktlen, pktlen, true); } /* d1424 1 a1424 1 bpf_deliver(bp, bpf_mcpy, &mb, pktlen, 0, m->m_pkthdr.rcvif != NULL); d1455 1 a1455 1 bpf_deliver(bp, cpfn, marg, pktlen, buflen, m->m_pkthdr.rcvif != NULL); d1691 1 a1691 1 if (d->bd_sbuf != NULL) { d1693 1 a1693 1 if (d->bd_hbuf != NULL) d1695 1 a1695 1 if (d->bd_fbuf != NULL) a1699 5 if (d->bd_jitcode != NULL) { KASSERT(bpfjit_module_ops.bj_free_code != NULL); bpfjit_module_ops.bj_free_code(d->bd_jitcode); } a1871 30 sysctl_net_bpf_jit(SYSCTLFN_ARGS) { bool newval; int error; struct sysctlnode node; node = *rnode; node.sysctl_data = &newval; newval = bpf_jit; error = sysctl_lookup(SYSCTLFN_CALL(&node)); if (error != 0 || newp == NULL) return error; bpf_jit = newval; /* * Do a full sync to publish new bpf_jit value and * update bpfjit_module_ops.bj_generate_code variable. */ membar_sync(); if (newval && bpfjit_module_ops.bj_generate_code == NULL) { printf("WARNING jit activation is postponed " "until after bpfjit module is loaded\n"); } return 0; } static int a1962 6 CTLTYPE_BOOL, "jit", SYSCTL_DESCR("Toggle Just-In-Time compilation"), sysctl_net_bpf_jit, 0, &bpf_jit, 0, CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL); sysctl_createv(&bpf_sysctllog, 0, NULL, NULL, CTLFLAG_PERMANENT|CTLFLAG_READWRITE, @ 1.166.2.3 log @sync with head. for a reference, the tree before this commit was tagged as yamt-pagecache-tag8. this commit was splitted into small chunks to avoid a limitation of cvs. ("Protocol error: too many arguments") @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.166.2.2 2012/10/30 17:22:42 yamt Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.166.2.2 2012/10/30 17:22:42 yamt Exp $"); d184 2 a185 11 .d_open = bpfopen, .d_close = noclose, .d_read = noread, .d_write = nowrite, .d_ioctl = noioctl, .d_stop = nostop, .d_tty = notty, .d_poll = nopoll, .d_mmap = nommap, .d_kqfilter = nokqfilter, .d_flag = D_OTHER a187 17 bpfjit_func_t bpf_jit_generate(bpf_ctx_t *bc, void *code, size_t size) { membar_consumer(); if (bpfjit_module_ops.bj_generate_code != NULL) { return bpfjit_module_ops.bj_generate_code(bc, code, size); } return NULL; } void bpf_jit_freecode(bpfjit_func_t jcode) { KASSERT(bpfjit_module_ops.bj_free_code != NULL); bpfjit_module_ops.bj_free_code(jcode); } d342 1 a342 1 int error __diagused; d353 2 a354 4 #ifdef DIAGNOSTIC if (error) printf("%s: ifpromisc failed: %d", __func__, error); #endif d1065 1 a1065 1 bpfjit_func_t jcode, oldj; d1089 2 a1090 3 if (bpf_jit) { bpf_ctx_t *bc = bpf_default_ctx(); jcode = bpf_jit_generate(bc, fcode, flen); d1107 4 a1110 2 if (oldj) { bpf_jit_freecode(oldj); a1363 7 bpf_ctx_t *bc = bpf_default_ctx(); bpf_args_t args = { .pkt = pkt, .wirelen = pktlen, .buflen = buflen, .arg = NULL }; d1382 1 a1382 1 if (d->bd_jitcode) d1385 1 a1385 1 slen = bpf_filter_ext(bc, d->bd_filter, &args); d1578 5 a1582 2 char *h; int totlen, curlen, caplen; a1596 7 /* * If we adjusted totlen to fit the bufsize, it could be that * totlen is smaller than hdrlen because of the link layer header. */ caplen = totlen - hdrlen; if (caplen < 0) caplen = 0; a1636 1 h = (char *)d->bd_sbuf + curlen; d1639 1 a1639 3 struct bpf_hdr32 *hp32; hp32 = (struct bpf_hdr32 *)h; d1644 5 a1648 1 hp32->bh_caplen = caplen; d1652 1 a1652 3 struct bpf_hdr *hp; hp = (struct bpf_hdr *)h; d1657 6 a1662 1 hp->bh_caplen = caplen; a1663 5 /* * Copy the packet data into the store buffer and update its length. */ (*cpfn)(h + hdrlen, pkt, caplen); d1717 2 a1718 1 bpf_jit_freecode(d->bd_jitcode); d1997 6 @ 1.166.6.1 log @merge to -current. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.168 2011/12/16 03:05:23 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.168 2011/12/16 03:05:23 christos Exp $"); d684 1 a685 1 } a1549 1 d1552 4 a1555 4 * store buffer. Call the wakeup functions if it's time to wakeup * a listener (buffer full), "cpfn" is the routine called to do the * actual data transfer. memcpy is passed in to copy contiguous chunks, * while bpf_mcpy is passed in to copy mbuf chains. In the latter case, @ 1.165 log @setting things once is enough. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.164 2011/03/30 21:34:08 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.164 2011/03/30 21:34:08 christos Exp $"); d145 1 d413 4 d746 6 d1533 17 d1563 3 d1567 1 a1567 1 int hdrlen = d->bd_bif->bif_hdrlen; d1585 6 a1590 1 curlen = BPF_WORDALIGN(d->bd_slen); d1621 27 a1647 9 hp = (struct bpf_hdr *)((char *)d->bd_sbuf + curlen); hp->bh_tstamp.tv_sec = ts->tv_sec; hp->bh_tstamp.tv_usec = ts->tv_nsec / 1000; hp->bh_datalen = pktlen; hp->bh_hdrlen = hdrlen; /* * Copy the packet data into the store buffer and update its length. */ (*cpfn)((u_char *)hp + hdrlen, pkt, (hp->bh_caplen = totlen - hdrlen)); d1724 1 a1724 8 /* * Compute the length of the bpf header. This is not necessarily * equal to SIZEOF_BPF_HDR because we want to insert spacing such * that the network layer header begins on a longword boundary (for * performance reasons and to alleviate alignment restrictions). */ bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; d1782 1 a1782 7 /* * Compute the length of the bpf header. This is not necessarily * equal to SIZEOF_BPF_HDR because we want to insert spacing such * that the network layer header begins on a longword boundary (for * performance reasons and to alleviate alignment restrictions). */ bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; @ 1.164 log @lib/44807: something broken in stat(2), return that we are a character device in st_mode. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.163 2011/03/30 18:04:27 bouyer Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.163 2011/03/30 18:04:27 bouyer Exp $"); a1865 1 BPF_EXT(promisc); @ 1.164.2.1 log @Catchup with rmind-uvmplock merge. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.165 2011/06/10 00:10:35 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.165 2011/06/10 00:10:35 christos Exp $"); d1866 1 @ 1.163 log @Allocate buffers with (M_WAITOK | M_CANFAIL) instead of M_NOWAIT. M_NOWAIT cause dhcpd on a low-memory server with lots of interfaces to occasionally fail to start with ENOBUFS; (M_WAITOK | M_CANFAIL) seems to fix this. Tested on 3 different dhcp servers. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.162 2011/01/22 19:12:58 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.162 2011/01/22 19:12:58 christos Exp $"); d1181 1 @ 1.162 log @undo previous. Read the diff wrong. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.161 2011/01/22 16:54:48 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.161 2011/01/22 16:54:48 christos Exp $"); d1610 1 a1610 1 d->bd_fbuf = malloc(d->bd_bufsize, M_DEVBUF, M_NOWAIT); d1613 1 a1613 1 d->bd_sbuf = malloc(d->bd_bufsize, M_DEVBUF, M_NOWAIT); @ 1.161 log @fix comment @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.160 2011/01/02 21:03:45 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.160 2011/01/02 21:03:45 christos Exp $"); d1405 2 a1406 1 * absolutely needed--this mbuf should never go anywhere else. */ @ 1.160 log @kern/44310: Alexander Nasonov: write to /dev/bpf truncates size_t to int @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.159 2010/12/08 17:10:13 pooka Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.159 2010/12/08 17:10:13 pooka Exp $"); d1405 1 a1405 2 * absolutely needed--this mbuf should never go anywhere else. */ @ 1.160.2.1 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.164 2011/03/30 21:34:08 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.164 2011/03/30 21:34:08 christos Exp $"); a1180 1 st->st_mode = S_IFCHR; d1610 1 a1610 1 d->bd_fbuf = malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK | M_CANFAIL); d1613 1 a1613 1 d->bd_sbuf = malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK | M_CANFAIL); @ 1.160.4.1 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.162 2011/01/22 19:12:58 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.162 2011/01/22 19:12:58 christos Exp $"); @ 1.159 log @linkset no more @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.158 2010/04/14 13:31:33 pooka Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.158 2010/04/14 13:31:33 pooka Exp $"); d137 1 a137 1 static int bpf_movein(struct uio *, int, int, d182 1 a182 1 bpf_movein(struct uio *uio, int linktype, int mtu, struct mbuf **mp, d187 3 a189 3 int len; int hlen; int align; d256 1 a256 1 if (len < hlen || len - hlen > mtu) d264 1 a264 1 if ((unsigned)len > MCLBYTES - align) d269 2 a270 2 m->m_pkthdr.len = len - hlen; if (len > MHLEN - align) { d281 1 a281 1 m->m_len -= align; d292 1 a292 1 m->m_len = len; @ 1.158 log @Add a little comment on how bpf can be made unloadable, per pointer from ad. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.157 2010/04/05 07:22:22 joerg Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.157 2010/04/05 07:22:22 joerg Exp $"); d1899 3 a1901 1 SYSCTL_SETUP(sysctl_net_bpf_setup, "sysctl net.bpf subtree setup") d1905 1 a1905 1 sysctl_createv(clog, 0, NULL, NULL, d1912 1 a1912 1 sysctl_createv(clog, 0, NULL, &node, d1919 1 a1919 1 sysctl_createv(clog, 0, NULL, NULL, d1925 1 a1925 1 sysctl_createv(clog, 0, NULL, NULL, d1931 1 a1931 1 sysctl_createv(clog, 0, NULL, NULL, d1977 1 d2002 1 @ 1.157 log @Push the bpf_ops usage back into bpf.h. Push the common ifp->if_bpf check into the inline functions as well the fourth argument for bpf_attach. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.156 2010/03/13 20:38:48 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.156 2010/03/13 20:38:48 christos Exp $"); d1979 18 a1996 2 * bpf_ops is not (yet) referenced in the callers before * attach. maybe other issues too. "safety first". @ 1.156 log @add BIOC{G,S}FEEDBACK which allows one to receive injected outgoing packets via bpf. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.155 2010/01/26 01:06:23 pooka Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.155 2010/01/26 01:06:23 pooka Exp $"); d1299 1 a1299 1 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) d1389 1 a1389 1 bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m) d1419 1 a1419 1 bpf_mtap(struct bpf_if *bp, struct mbuf *m) d1454 1 a1454 1 bpf_mtap_af(struct bpf_if *bp, uint32_t af, struct mbuf *m) d1463 1 a1463 19 bpf_mtap(bp, &m0); } static void bpf_mtap_et(struct bpf_if *bp, uint16_t et, struct mbuf *m) { struct mbuf m0; m0.m_flags = 0; m0.m_next = m; m0.m_len = 14; m0.m_data = m0.m_dat; ((uint32_t *)m0.m_data)[0] = 0; ((uint32_t *)m0.m_data)[1] = 0; ((uint32_t *)m0.m_data)[2] = 0; ((uint16_t *)m0.m_data)[6] = et; bpf_mtap(bp, &m0); d1473 1 a1473 1 bpf_mtap_sl_in(struct bpf_if *bp, u_char *chdr, struct mbuf **m) d1487 1 a1487 1 bpf_mtap(bp, *m); d1499 1 a1499 1 bpf_mtap_sl_out(struct bpf_if *bp, u_char *chdr, struct mbuf *m) d1516 1 a1516 1 bpf_mtap(bp, &m0); d1652 1 a1652 1 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) d1686 1 a1686 1 bpfdetach(struct ifnet *ifp) d1721 1 a1721 1 bpf_change_type(struct ifnet *ifp, u_int dlt, u_int hdrlen) d1940 10 a1949 11 .bpf_attach = bpfattach, .bpf_detach = bpfdetach, .bpf_change_type = bpf_change_type, .bpf_tap = bpf_tap, .bpf_mtap = bpf_mtap, .bpf_mtap2 = bpf_mtap2, .bpf_mtap_af = bpf_mtap_af, .bpf_mtap_et = bpf_mtap_et, .bpf_mtap_sl_in = bpf_mtap_sl_in, .bpf_mtap_sl_out = bpf_mtap_sl_out, @ 1.156.2.1 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.156 2010/03/13 20:38:48 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.156 2010/03/13 20:38:48 christos Exp $"); d1299 1 a1299 1 _bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) d1389 1 a1389 1 _bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m) d1419 1 a1419 1 _bpf_mtap(struct bpf_if *bp, struct mbuf *m) d1454 1 a1454 1 _bpf_mtap_af(struct bpf_if *bp, uint32_t af, struct mbuf *m) d1463 19 a1481 1 _bpf_mtap(bp, &m0); d1491 1 a1491 1 _bpf_mtap_sl_in(struct bpf_if *bp, u_char *chdr, struct mbuf **m) d1505 1 a1505 1 _bpf_mtap(bp, *m); d1517 1 a1517 1 _bpf_mtap_sl_out(struct bpf_if *bp, u_char *chdr, struct mbuf *m) d1534 1 a1534 1 _bpf_mtap(bp, &m0); d1670 1 a1670 1 _bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) d1704 1 a1704 1 _bpfdetach(struct ifnet *ifp) d1739 1 a1739 1 _bpf_change_type(struct ifnet *ifp, u_int dlt, u_int hdrlen) d1958 11 a1968 10 .bpf_attach = _bpfattach, .bpf_detach = _bpfdetach, .bpf_change_type = _bpf_change_type, .bpf_tap = _bpf_tap, .bpf_mtap = _bpf_mtap, .bpf_mtap2 = _bpf_mtap2, .bpf_mtap_af = _bpf_mtap_af, .bpf_mtap_sl_in = _bpf_mtap_sl_in, .bpf_mtap_sl_out = _bpf_mtap_sl_out, d1998 2 a1999 18 * While there is no reference counting for bpf callers, * unload could at least in theory be done similarly to * system call disestablishment. This should even be * a little simpler: * * 1) replace op vector with stubs * 2) post update to all cpus with xc * 3) check that nobody is in bpf anymore * (it's doubtful we'd want something like l_sysent, * but we could do something like *signed* percpu * counters. if the sum is 0, we're good). * 4) if fail, unroll changes * * NOTE: change won't be atomic to the outside. some * packets may be not captured even if unload is * not succesful. I think packet capture not working * is a perfectly logical consequence of trying to * disable packet capture. @ 1.156.2.2 log @sync with head @ text @d1 1 a1 1 /* $NetBSD$ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD$"); d137 1 a137 1 static int bpf_movein(struct uio *, int, uint64_t, d182 1 a182 1 bpf_movein(struct uio *uio, int linktype, uint64_t mtu, struct mbuf **mp, d187 3 a189 3 size_t len; size_t hlen; size_t align; d256 1 a256 1 if (len - hlen > mtu) d264 1 a264 1 if (len + align > MCLBYTES) d269 2 a270 2 m->m_pkthdr.len = (int)(len - hlen); if (len + align > MHLEN) { d281 1 a281 1 m->m_len -= (int)align; d292 1 a292 1 m->m_len = (int)len; d1899 1 a1899 3 static struct sysctllog *bpf_sysctllog; static void sysctl_net_bpf_setup(void) d1903 1 a1903 1 sysctl_createv(&bpf_sysctllog, 0, NULL, NULL, d1910 1 a1910 1 sysctl_createv(&bpf_sysctllog, 0, NULL, &node, d1917 1 a1917 1 sysctl_createv(&bpf_sysctllog, 0, NULL, NULL, d1923 1 a1923 1 sysctl_createv(&bpf_sysctllog, 0, NULL, NULL, d1929 1 a1929 1 sysctl_createv(&bpf_sysctllog, 0, NULL, NULL, a1974 1 sysctl_net_bpf_setup(); a1998 1 /* insert sysctl teardown */ @ 1.156.2.3 log @sync with head @ text @a1180 1 st->st_mode = S_IFCHR; d1610 1 a1610 1 d->bd_fbuf = malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK | M_CANFAIL); d1613 1 a1613 1 d->bd_sbuf = malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK | M_CANFAIL); @ 1.156.2.4 log @sync with head @ text @d1866 1 @ 1.155 log @Include sys/atomic.h now that it's used but gets stealth-included only on some archs. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.154 2010/01/25 22:18:17 pooka Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.154 2010/01/25 22:18:17 pooka Exp $"); d410 1 d626 1 a626 1 struct mbuf *m; d663 10 d675 6 d724 4 d999 14 d1394 6 d1425 6 a1437 1 /*###1299 [cc] warning: assignment from incompatible pointer type%%%*/ @ 1.155.2.1 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD$ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD$"); a409 1 d->bd_feedback = 0; d625 1 a625 1 struct mbuf *m, *mc; a661 10 if (d->bd_feedback) { mc = m_dup(m, 0, M_COPYALL, M_NOWAIT); if (mc != NULL) mc->m_pkthdr.rcvif = ifp; /* Set M_PROMISC for outgoing packets to be discarded. */ if (1 /*d->bd_direction == BPF_D_INOUT*/) m->m_flags |= M_PROMISC; } else mc = NULL; a663 6 if (mc != NULL) { if (error == 0) (*ifp->if_input)(ifp, mc); } else m_freem(mc); a706 4 * BIOCSFEEDBACK Set packet feedback mode. * BIOCGFEEDBACK Get packet feedback mode. * BIOCGSEESENT Get "see sent packets" mode. * BIOCSSEESENT Set "see sent packets" mode. a977 14 /* * Set "feed packets from bpf back to input" mode */ case BIOCSFEEDBACK: d->bd_feedback = *(u_int *)addr; break; /* * Get "feed packets from bpf back to input" mode */ case BIOCGFEEDBACK: *(u_int *)addr = d->bd_feedback; break; d1264 1 a1264 1 _bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) d1354 1 a1354 1 _bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m) a1358 6 /* Skip outgoing duplicate packets. */ if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) { m->m_flags &= ~M_PROMISC; return; } d1378 1 a1378 1 _bpf_mtap(struct bpf_if *bp, struct mbuf *m) a1383 6 /* Skip outgoing duplicate packets. */ if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) { m->m_flags &= ~M_PROMISC; return; } d1391 1 d1408 1 a1408 1 _bpf_mtap_af(struct bpf_if *bp, uint32_t af, struct mbuf *m) d1417 19 a1435 1 _bpf_mtap(bp, &m0); d1445 1 a1445 1 _bpf_mtap_sl_in(struct bpf_if *bp, u_char *chdr, struct mbuf **m) d1459 1 a1459 1 _bpf_mtap(bp, *m); d1471 1 a1471 1 _bpf_mtap_sl_out(struct bpf_if *bp, u_char *chdr, struct mbuf *m) d1488 1 a1488 1 _bpf_mtap(bp, &m0); d1624 1 a1624 1 _bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) d1658 1 a1658 1 _bpfdetach(struct ifnet *ifp) d1693 1 a1693 1 _bpf_change_type(struct ifnet *ifp, u_int dlt, u_int hdrlen) d1912 11 a1922 10 .bpf_attach = _bpfattach, .bpf_detach = _bpfdetach, .bpf_change_type = _bpf_change_type, .bpf_tap = _bpf_tap, .bpf_mtap = _bpf_mtap, .bpf_mtap2 = _bpf_mtap2, .bpf_mtap_af = _bpf_mtap_af, .bpf_mtap_sl_in = _bpf_mtap_sl_in, .bpf_mtap_sl_out = _bpf_mtap_sl_out, d1952 2 a1953 18 * While there is no reference counting for bpf callers, * unload could at least in theory be done similarly to * system call disestablishment. This should even be * a little simpler: * * 1) replace op vector with stubs * 2) post update to all cpus with xc * 3) check that nobody is in bpf anymore * (it's doubtful we'd want something like l_sysent, * but we could do something like *signed* percpu * counters. if the sum is 0, we're good). * 4) if fail, unroll changes * * NOTE: change won't be atomic to the outside. some * packets may be not captured even if unload is * not succesful. I think packet capture not working * is a perfectly logical consequence of trying to * disable packet capture. @ 1.154 log @Make bpf dynamically loadable. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.153 2010/01/19 22:08:00 pooka Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.153 2010/01/19 22:08:00 pooka Exp $"); d63 1 @ 1.153 log @Redefine bpf linkage through an always present op vector, i.e. #if NBPFILTER is no longer required in the client. This change doesn't yet add support for loading bpf as a module, since drivers can register before bpf is attached. However, callers of bpf can now be modularized. Dynamically loadable bpf could probably be done fairly easily with coordination from the stub driver and the real driver by registering attachments in the stub before the real driver is loaded and doing a handoff. ... and I'm not going to ponder the depths of unload here. Tested with i386/MONOLITHIC, modified MONOLITHIC without bpf and rump. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.152 2010/01/17 19:45:06 pooka Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.152 2010/01/17 19:45:06 pooka Exp $"); d61 2 d364 14 d386 1 d388 1 a388 7 mutex_init(&bpf_mtx, MUTEX_DEFAULT, IPL_NONE); LIST_INIT(&bpf_list); bpf_gstats.bs_recv = 0; bpf_gstats.bs_drop = 0; bpf_gstats.bs_capt = 0; d1924 4 a1927 2 void bpf_setops() d1929 27 d1957 6 a1962 1 bpf_ops = &bpf_ops_kernel; @ 1.152 log @Forward declare struct bpf_if and use that as the type for bpf_if instead of "void *". Buys us oo times the type-safety for 0 times the price. (no functional change) @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.151 2010/01/15 22:16:46 pooka Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.151 2010/01/15 22:16:46 pooka Exp $"); d1251 1 a1251 1 void d1341 1 a1341 1 void d1365 1 a1365 1 void d1395 1 a1395 1 void d1408 1 a1408 1 void a1425 1 #if NSL > 0 || NSTRIP > 0 d1432 1 a1432 1 void d1458 1 a1458 1 void a1479 1 #endif d1607 1 a1607 12 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the * fixed size of the link header (variable length headers not yet supported). */ void bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) { bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); } /* * Attach additional dlt for a interface to bpf. dlt is the link layer type; d1611 2 a1612 2 void bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) d1645 1 a1645 1 void d1680 1 a1680 1 void d1898 21 @ 1.151 log @* remove just-for-kicks locking * KNF * remove outdated comment (quite a funny one to read in 2010, though) @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.150 2009/12/20 09:36:06 dsl Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.150 2009/12/20 09:36:06 dsl Exp $"); d1081 1 a1081 1 if ((void **)bp->bif_driverp != &ifp->if_bpf) d1252 1 a1252 1 bpf_tap(void *arg, u_char *pkt, u_int pktlen) a1253 1 struct bpf_if *bp; a1263 1 bp = arg; d1342 1 a1342 1 bpf_mtap2(void *arg, void *data, u_int dlen, struct mbuf *m) a1343 1 struct bpf_if *bp = arg; d1366 1 a1366 1 bpf_mtap(void *arg, struct mbuf *m) a1368 1 struct bpf_if *bp = arg; d1396 1 a1396 1 bpf_mtap_af(void *arg, uint32_t af, struct mbuf *m) d1405 1 a1405 1 bpf_mtap(arg, &m0); d1409 1 a1409 1 bpf_mtap_et(void *arg, uint16_t et, struct mbuf *m) d1423 1 a1423 1 bpf_mtap(arg, &m0); d1434 1 a1434 1 bpf_mtap_sl_in(void *arg, u_char *chdr, struct mbuf **m) d1448 1 a1448 1 bpf_mtap(arg, *m); d1460 1 a1460 1 bpf_mtap_sl_out(void *arg, u_char *chdr, struct mbuf *m) d1477 1 a1477 1 bpf_mtap(arg, &m0); d1625 1 a1625 1 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, void *driverp) d1699 1 a1699 1 if ((void **)bp->bif_driverp == &ifp->if_bpf) @ 1.150 log @If a multithreaded app closes an fd while another thread is blocked in read/write/accept, then the expectation is that the blocked thread will exit and the close complete. Since only one fd is affected, but many fd can refer to the same file, the close code can only request the fs code unblock with ERESTART. Fixed for pipes and sockets, ERESTART will only be generated after such a close - so there should be no change for other programs. Also rename fo_abort() to fo_restart() (this used to be fo_drain()). Fixes PR/26567 @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.149 2009/12/09 21:32:59 dsl Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.149 2009/12/09 21:32:59 dsl Exp $"); a363 6 * Mark a descriptor free by making it point to itself. * This is probably cheaper than marking with a constant since * the address should be in a register anyway. */ /* d370 1 a372 1 mutex_enter(&bpf_mtx); a373 1 mutex_exit(&bpf_mtx); @ 1.149 log @Rename fo_drain() to fo_abort(), 'drain' is used to mean 'wait for output do drain' in many places, whereas fo_drain() was called in order to force blocking read()/write() etc calls to return to userspace so that a close() call from a different thread can complete. In the sockets code comment out the broken code in the inner function, it was being called from compat code. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.148 2009/11/23 02:13:48 rmind Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.148 2009/11/23 02:13:48 rmind Exp $"); d168 1 a168 1 .fo_abort = fnullop_abort, @ 1.148 log @Remove some unecessary includes sys/user.h header. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.147 2009/10/05 17:58:15 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.147 2009/10/05 17:58:15 christos Exp $"); d168 1 a168 1 .fo_drain = fnullop_drain, @ 1.147 log @add the error from ifpromisc to the panic. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.146 2009/04/11 23:05:26 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.146 2009/04/11 23:05:26 christos Exp $"); a55 1 #include @ 1.146 log @Fix locking as Andy explained. Also fill in uid and gid like sys_pipe did. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.145 2009/04/11 15:47:33 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.145 2009/04/11 15:47:33 christos Exp $"); d345 1 a345 1 panic("bpf: ifpromisc failed"); d352 1 a352 1 panic("bpf_detachd: descriptor not in list"); @ 1.145 log @Fix PR/37878 and PR/37550: Provide stat(2) for all devices and don't use fbadop_stat. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.144 2009/04/04 10:12:51 ad Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.144 2009/04/04 10:12:51 ad Exp $"); d1140 2 @ 1.144 log @Add fileops::fo_drain(), to be called from fd_close() when there is more than one active reference to a file descriptor. It should dislodge threads sleeping while holding a reference to the descriptor. Implemented only for sockets but should be extended to pipes, fifos, etc. Fixes the case of a multithreaded process doing something like the following, which would have hung until the process got a signal. thr0 accept(fd, ...) thr1 close(fd) @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.143 2009/03/11 05:55:22 mrg Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.143 2009/03/11 05:55:22 mrg Exp $"); d61 1 d155 1 d166 1 a166 1 .fo_stat = fbadop_stat, d407 2 d483 1 d633 1 d1129 15 @ 1.143 log @like KERN_FILE2: *do* update "needed" when there is no count. we want userland to know what sort of size to provide.. while here, slightly normalise the previous to init_sysctl.c. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.142 2009/01/11 02:45:54 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.142 2009/01/11 02:45:54 christos Exp $"); d159 9 a167 8 bpf_read, bpf_write, bpf_ioctl, fnullop_fcntl, bpf_poll, fbadop_stat, bpf_close, bpf_kqfilter, @ 1.142 log @merge christos-time_t @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.137.2.3 2008/12/28 20:53:44 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.137.2.3 2008/12/28 20:53:44 christos Exp $"); d1849 3 a1851 5 if (elem_count > 0) { needed += elem_size; if (elem_count != INT_MAX) elem_count--; } @ 1.142.2.1 log @Sync with HEAD. Commit is split, to avoid a "too many arguments" protocol error. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.146 2009/04/11 23:05:26 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.146 2009/04/11 23:05:26 christos Exp $"); a60 1 #include a153 1 static int bpf_stat(struct file *, struct stat *); d159 8 a166 9 .fo_read = bpf_read, .fo_write = bpf_write, .fo_ioctl = bpf_ioctl, .fo_fcntl = fnullop_fcntl, .fo_poll = bpf_poll, .fo_stat = bpf_stat, .fo_close = bpf_close, .fo_kqfilter = bpf_kqfilter, .fo_drain = fnullop_drain, a403 2 getnanotime(&d->bd_btime); d->bd_atime = d->bd_mtime = d->bd_btime; a477 1 getnanotime(&d->bd_atime); a626 1 getnanotime(&d->bd_mtime); a1121 17 static int bpf_stat(struct file *fp, struct stat *st) { struct bpf_d *d = fp->f_data; (void)memset(st, 0, sizeof(*st)); KERNEL_LOCK(1, NULL); st->st_dev = makedev(cdevsw_lookup_major(&bpf_cdevsw), d->bd_pid); st->st_atimespec = d->bd_atime; st->st_mtimespec = d->bd_mtime; st->st_ctimespec = st->st_birthtimespec = d->bd_btime; st->st_uid = kauth_cred_geteuid(fp->f_cred); st->st_gid = kauth_cred_getegid(fp->f_cred); KERNEL_UNLOCK_ONE(NULL); return 0; } d1849 5 a1853 3 needed += elem_size; if (elem_count > 0 && elem_count != INT_MAX) elem_count--; @ 1.141 log @- add if_alloc (ours just mallocs), and if_initname and use them (from FreeBSD) - kill memsets where M_ZERO can be used. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.140 2008/05/21 13:48:52 ad Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.140 2008/05/21 13:48:52 ad Exp $"); d143 1 a143 1 void *(*)(void *, const void *, size_t), struct timeval *); d866 30 a906 1 d1241 1 a1241 1 struct timeval tv; d1256 1 a1256 1 microtime(&tv); d1259 1 a1259 1 catchpacket(d, pkt, pktlen, slen, (void *)memcpy, &tv); d1304 1 a1304 1 struct timeval tv; d1315 1 a1315 1 microtime(&tv); d1318 1 a1318 1 catchpacket(d, marg, pktlen, slen, cpfn, &tv); d1481 1 a1481 1 void *(*cpfn)(void *, const void *, size_t), struct timeval *tv) d1535 2 a1536 1 hp->bh_tstamp = *tv; @ 1.141.6.1 log @Pull up following revision(s) (requested by ad in ticket #661): sys/arch/xen/xen/xenevt.c: revision 1.32 sys/compat/svr4/svr4_net.c: revision 1.56 sys/compat/svr4_32/svr4_32_net.c: revision 1.19 sys/dev/dmover/dmover_io.c: revision 1.32 sys/dev/putter/putter.c: revision 1.21 sys/kern/kern_descrip.c: revision 1.190 sys/kern/kern_drvctl.c: revision 1.23 sys/kern/kern_event.c: revision 1.64 sys/kern/sys_mqueue.c: revision 1.14 sys/kern/sys_pipe.c: revision 1.109 sys/kern/sys_socket.c: revision 1.59 sys/kern/uipc_syscalls.c: revision 1.136 sys/kern/vfs_vnops.c: revision 1.164 sys/kern/uipc_socket.c: revision 1.188 sys/net/bpf.c: revision 1.144 sys/net/if_tap.c: revision 1.55 sys/opencrypto/cryptodev.c: revision 1.47 sys/sys/file.h: revision 1.67 sys/sys/param.h: patch sys/sys/socketvar.h: revision 1.119 Add fileops::fo_drain(), to be called from fd_close() when there is more than one active reference to a file descriptor. It should dislodge threads sleeping while holding a reference to the descriptor. Implemented only for sockets but should be extended to pipes, fifos, etc. Fixes the case of a multithreaded process doing something like the following, which would have hung until the process got a signal. thr0 accept(fd, ...) thr1 close(fd) @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.141 2008/06/15 16:37:21 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.141 2008/06/15 16:37:21 christos Exp $"); d159 8 a166 9 .fo_read = bpf_read, .fo_write = bpf_write, .fo_ioctl = bpf_ioctl, .fo_fcntl = fnullop_fcntl, .fo_poll = bpf_poll, .fo_stat = fbadop_stat, .fo_close = bpf_close, .fo_kqfilter = bpf_kqfilter, .fo_drain = fnullop_drain, @ 1.141.6.1.6.1 log @Pull up following revision(s) (requested by spz in ticket #1874): sys/net/bpf.c: revision 1.176 via patch PR/48198: Peter Bex: Avoid kernel panic caused by setting a very small bpf buffer size. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.176 2013/09/09 20:53:51 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.141.6.1 2009/04/04 23:36:28 snj Exp $"); d1456 1 a1456 1 int totlen, curlen, caplen; a1470 7 /* * If we adjusted totlen to fit the bufsize, it could be that * totlen is smaller than hdrlen because of the link layer header. */ caplen = totlen - hdrlen; if (caplen < 0) caplen = 0; a1509 1 hp->bh_caplen = caplen; d1513 1 a1513 1 (*cpfn)((u_char *)hp + hdrlen, pkt, caplen); @ 1.141.6.2 log @Pull up following revision(s) (requested by bouyer in ticket #1587): sys/net/bpf.c: revision 1.163 Allocate buffers with (M_WAITOK | M_CANFAIL) instead of M_NOWAIT. M_NOWAIT cause dhcpd on a low-memory server with lots of interfaces to occasionally fail to start with ENOBUFS; (M_WAITOK | M_CANFAIL) seems to fix this. Tested on 3 different dhcp servers. @ text @d1 1 a1 1 /* $NetBSD$ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD$"); d1531 1 a1531 1 d->bd_fbuf = malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK | M_CANFAIL); d1534 1 a1534 1 d->bd_sbuf = malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK | M_CANFAIL); @ 1.141.6.2.2.1 log @Pull up following revision(s) (requested by spz in ticket #1874): sys/net/bpf.c: revision 1.176 via patch PR/48198: Peter Bex: Avoid kernel panic caused by setting a very small bpf buffer size. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.176 2013/09/09 20:53:51 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.141.6.2 2011/04/05 06:10:50 riz Exp $"); d1456 1 a1456 1 int totlen, curlen, caplen; a1470 7 /* * If we adjusted totlen to fit the bufsize, it could be that * totlen is smaller than hdrlen because of the link layer header. */ caplen = totlen - hdrlen; if (caplen < 0) caplen = 0; a1509 1 hp->bh_caplen = caplen; d1513 1 a1513 1 (*cpfn)((u_char *)hp + hdrlen, pkt, caplen); @ 1.141.6.3 log @Pull up following revision(s) (requested by spz in ticket #1874): sys/net/bpf.c: revision 1.176 via patch PR/48198: Peter Bex: Avoid kernel panic caused by setting a very small bpf buffer size. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.176 2013/09/09 20:53:51 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.141.6.2 2011/04/05 06:10:50 riz Exp $"); d1456 1 a1456 1 int totlen, curlen, caplen; a1470 7 /* * If we adjusted totlen to fit the bufsize, it could be that * totlen is smaller than hdrlen because of the link layer header. */ caplen = totlen - hdrlen; if (caplen < 0) caplen = 0; a1509 1 hp->bh_caplen = caplen; d1513 1 a1513 1 (*cpfn)((u_char *)hp + hdrlen, pkt, caplen); @ 1.141.4.1 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.142 2009/01/11 02:45:54 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.142 2009/01/11 02:45:54 christos Exp $"); d143 1 a143 1 void *(*)(void *, const void *, size_t), struct timespec *); a865 30 #ifdef BIOCGORTIMEOUT /* * Get read timeout. */ case BIOCGORTIMEOUT: { struct timeval50 *tv = addr; tv->tv_sec = d->bd_rtout / hz; tv->tv_usec = (d->bd_rtout % hz) * tick; break; } #endif #ifdef BIOCSORTIMEOUT /* * Set read timeout. */ case BIOCSORTIMEOUT: { struct timeval50 *tv = addr; /* Compute number of ticks. */ d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick; if ((d->bd_rtout == 0) && (tv->tv_usec != 0)) d->bd_rtout = 1; break; } #endif d877 1 d1212 1 a1212 1 struct timespec ts; d1227 1 a1227 1 nanotime(&ts); d1230 1 a1230 1 catchpacket(d, pkt, pktlen, slen, memcpy, &ts); d1275 1 a1275 1 struct timespec ts; d1286 1 a1286 1 nanotime(&ts); d1289 1 a1289 1 catchpacket(d, marg, pktlen, slen, cpfn, &ts); d1452 1 a1452 1 void *(*cpfn)(void *, const void *, size_t), struct timespec *ts) d1506 1 a1506 2 hp->bh_tstamp.tv_sec = ts->tv_sec; hp->bh_tstamp.tv_usec = ts->tv_nsec / 1000; @ 1.141.4.2 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.141.4.1 2009/01/19 13:20:11 skrll Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.141.4.1 2009/01/19 13:20:11 skrll Exp $"); a60 1 #include a153 1 static int bpf_stat(struct file *, struct stat *); d159 8 a166 9 .fo_read = bpf_read, .fo_write = bpf_write, .fo_ioctl = bpf_ioctl, .fo_fcntl = fnullop_fcntl, .fo_poll = bpf_poll, .fo_stat = bpf_stat, .fo_close = bpf_close, .fo_kqfilter = bpf_kqfilter, .fo_drain = fnullop_drain, a403 2 getnanotime(&d->bd_btime); d->bd_atime = d->bd_mtime = d->bd_btime; a477 1 getnanotime(&d->bd_atime); a626 1 getnanotime(&d->bd_mtime); a1121 17 static int bpf_stat(struct file *fp, struct stat *st) { struct bpf_d *d = fp->f_data; (void)memset(st, 0, sizeof(*st)); KERNEL_LOCK(1, NULL); st->st_dev = makedev(cdevsw_lookup_major(&bpf_cdevsw), d->bd_pid); st->st_atimespec = d->bd_atime; st->st_mtimespec = d->bd_mtime; st->st_ctimespec = st->st_birthtimespec = d->bd_btime; st->st_uid = kauth_cred_geteuid(fp->f_cred); st->st_gid = kauth_cred_getegid(fp->f_cred); KERNEL_UNLOCK_ONE(NULL); return 0; } d1849 5 a1853 3 needed += elem_size; if (elem_count > 0 && elem_count != INT_MAX) elem_count--; @ 1.140 log @Acquire kernel_lock in the bpf fileops. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.139 2008/04/24 15:35:30 ad Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.139 2008/04/24 15:35:30 ad Exp $"); d400 1 a400 2 d = malloc(sizeof(*d), M_DEVBUF, M_WAITOK); (void)memset(d, 0, sizeof(*d)); a1316 1 /*###1278 [cc] warning: passing argument 2 of 'bpf_deliver' from incompatible pointer type%%%*/ @ 1.140.2.1 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.141 2008/06/15 16:37:21 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.141 2008/06/15 16:37:21 christos Exp $"); d400 2 a401 1 d = malloc(sizeof(*d), M_DEVBUF, M_WAITOK|M_ZERO); d1318 1 @ 1.139 log @Network protocol interrupts can now block on locks, so merge the globals proclist_mutex and proclist_lock into a single adaptive mutex (proc_lock). Implications: - Inspecting process state requires thread context, so signals can no longer be sent from a hardware interrupt handler. Signal activity must be deferred to a soft interrupt or kthread. - As the proc state locking is simplified, it's now safe to take exit() and wait() out from under kernel_lock. - The system spends less time at IPL_SCHED, and there is less lock activity. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.138 2008/04/20 15:27:10 scw Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.138 2008/04/20 15:27:10 scw Exp $"); d427 2 d451 2 d486 1 d501 1 d521 1 d540 1 d567 1 d622 4 a625 1 if (d->bd_bif == 0) d627 1 d631 2 a632 1 if (uio->uio_resid == 0) d634 1 d638 2 a639 1 if (error) d641 1 d644 1 d655 1 d708 1 d961 1 d1112 1 d1135 1 d1146 1 d1150 1 d1157 1 d1159 1 d1163 3 a1165 1 return (kn->kn_data > 0); d1178 2 d1187 1 d1196 1 @ 1.139.2.1 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.139 2008/04/24 15:35:30 ad Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.139 2008/04/24 15:35:30 ad Exp $"); a60 1 #include d143 1 a143 1 void *(*)(void *, const void *, size_t), struct timespec *); a153 1 static int bpf_stat(struct file *, struct stat *); d159 8 a166 9 .fo_read = bpf_read, .fo_write = bpf_write, .fo_ioctl = bpf_ioctl, .fo_fcntl = fnullop_fcntl, .fo_poll = bpf_poll, .fo_stat = bpf_stat, .fo_close = bpf_close, .fo_kqfilter = bpf_kqfilter, .fo_drain = fnullop_drain, d400 2 a401 1 d = malloc(sizeof(*d), M_DEVBUF, M_WAITOK|M_ZERO); a404 2 getnanotime(&d->bd_btime); d->bd_atime = d->bd_mtime = d->bd_btime; a426 2 KERNEL_LOCK(1, NULL); a448 2 KERNEL_UNLOCK_ONE(NULL); a474 1 getnanotime(&d->bd_atime); a481 1 KERNEL_LOCK(1, NULL); a495 1 KERNEL_UNLOCK_ONE(NULL); a514 1 KERNEL_UNLOCK_ONE(NULL); a532 1 KERNEL_UNLOCK_ONE(NULL); a558 1 KERNEL_UNLOCK_ONE(NULL); d613 1 a613 4 KERNEL_LOCK(1, NULL); if (d->bd_bif == 0) { KERNEL_UNLOCK_ONE(NULL); a614 2 } getnanotime(&d->bd_mtime); d618 1 a618 2 if (uio->uio_resid == 0) { KERNEL_UNLOCK_ONE(NULL); a619 1 } d623 1 a623 2 if (error) { KERNEL_UNLOCK_ONE(NULL); a624 1 } a626 1 KERNEL_UNLOCK_ONE(NULL); a636 1 KERNEL_UNLOCK_ONE(NULL); a688 1 KERNEL_LOCK(1, NULL); a846 30 #ifdef BIOCGORTIMEOUT /* * Get read timeout. */ case BIOCGORTIMEOUT: { struct timeval50 *tv = addr; tv->tv_sec = d->bd_rtout / hz; tv->tv_usec = (d->bd_rtout % hz) * tick; break; } #endif #ifdef BIOCSORTIMEOUT /* * Set read timeout. */ case BIOCSORTIMEOUT: { struct timeval50 *tv = addr; /* Compute number of ticks. */ d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick; if ((d->bd_rtout == 0) && (tv->tv_usec != 0)) d->bd_rtout = 1; break; } #endif d858 1 a940 1 KERNEL_UNLOCK_ONE(NULL); a1072 17 static int bpf_stat(struct file *fp, struct stat *st) { struct bpf_d *d = fp->f_data; (void)memset(st, 0, sizeof(*st)); KERNEL_LOCK(1, NULL); st->st_dev = makedev(cdevsw_lookup_major(&bpf_cdevsw), d->bd_pid); st->st_atimespec = d->bd_atime; st->st_mtimespec = d->bd_mtime; st->st_ctimespec = st->st_birthtimespec = d->bd_btime; st->st_uid = kauth_cred_geteuid(fp->f_cred); st->st_gid = kauth_cred_getegid(fp->f_cred); KERNEL_UNLOCK_ONE(NULL); return 0; } a1090 1 KERNEL_LOCK(1, NULL); a1112 1 KERNEL_UNLOCK_ONE(NULL); a1122 1 KERNEL_LOCK(1, NULL); a1125 1 KERNEL_UNLOCK_ONE(NULL); a1131 1 int rv; a1132 1 KERNEL_LOCK(1, NULL); d1136 1 a1136 3 rv = (kn->kn_data > 0); KERNEL_UNLOCK_ONE(NULL); return rv; a1148 2 KERNEL_LOCK(1, NULL); a1155 1 KERNEL_UNLOCK_ONE(NULL); a1163 1 KERNEL_UNLOCK_ONE(NULL); d1180 1 a1180 1 struct timespec ts; d1195 1 a1195 1 nanotime(&ts); d1198 1 a1198 1 catchpacket(d, pkt, pktlen, slen, memcpy, &ts); d1243 1 a1243 1 struct timespec ts; d1254 1 a1254 1 nanotime(&ts); d1257 1 a1257 1 catchpacket(d, marg, pktlen, slen, cpfn, &ts); d1285 1 d1421 1 a1421 1 void *(*cpfn)(void *, const void *, size_t), struct timespec *ts) d1475 1 a1475 2 hp->bh_tstamp.tv_sec = ts->tv_sec; hp->bh_tstamp.tv_usec = ts->tv_nsec / 1000; d1788 5 a1792 3 needed += elem_size; if (elem_count > 0 && elem_count != INT_MAX) elem_count--; @ 1.139.2.2 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.139.2.1 2009/05/04 08:14:14 yamt Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.139.2.1 2009/05/04 08:14:14 yamt Exp $"); d56 1 a61 3 #include #include #include d169 1 a169 1 .fo_restart = fnullop_restart, d345 1 a345 1 panic("%s: ifpromisc failed: %d", __func__, error); d352 1 a352 1 panic("%s: descriptor not in list", __func__); a362 3 static int doinit(void) { d364 5 a368 10 mutex_init(&bpf_mtx, MUTEX_DEFAULT, IPL_NONE); LIST_INIT(&bpf_list); bpf_gstats.bs_recv = 0; bpf_gstats.bs_drop = 0; bpf_gstats.bs_capt = 0; return 0; } d377 5 a381 1 static ONCE_DECL(control); d383 3 a385 1 RUN_ONCE(&control, doinit); d1089 1 a1089 1 if (bp->bif_driverp != &ifp->if_bpf) d1259 2 a1260 2 static void bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) d1262 1 d1273 1 d1351 2 a1352 2 static void bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m) d1354 1 d1376 2 a1377 2 static void bpf_mtap(struct bpf_if *bp, struct mbuf *m) d1380 1 d1407 2 a1408 2 static void bpf_mtap_af(struct bpf_if *bp, uint32_t af, struct mbuf *m) d1417 1 a1417 1 bpf_mtap(bp, &m0); d1420 2 a1421 2 static void bpf_mtap_et(struct bpf_if *bp, uint16_t et, struct mbuf *m) d1435 1 a1435 1 bpf_mtap(bp, &m0); d1438 1 d1445 2 a1446 2 static void bpf_mtap_sl_in(struct bpf_if *bp, u_char *chdr, struct mbuf **m) d1460 1 a1460 1 bpf_mtap(bp, *m); d1471 2 a1472 2 static void bpf_mtap_sl_out(struct bpf_if *bp, u_char *chdr, struct mbuf *m) d1489 1 a1489 1 bpf_mtap(bp, &m0); d1493 1 d1621 12 a1632 1 * Attach an interface to bpf. dlt is the link layer type; d1636 2 a1637 2 static void bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) d1670 1 a1670 1 static void d1705 1 a1705 1 static void d1711 1 a1711 1 if (bp->bif_driverp == &ifp->if_bpf) a1922 55 struct bpf_ops bpf_ops_kernel = { .bpf_attach = bpfattach, .bpf_detach = bpfdetach, .bpf_change_type = bpf_change_type, .bpf_tap = bpf_tap, .bpf_mtap = bpf_mtap, .bpf_mtap2 = bpf_mtap2, .bpf_mtap_af = bpf_mtap_af, .bpf_mtap_et = bpf_mtap_et, .bpf_mtap_sl_in = bpf_mtap_sl_in, .bpf_mtap_sl_out = bpf_mtap_sl_out, }; MODULE(MODULE_CLASS_DRIVER, bpf, NULL); static int bpf_modcmd(modcmd_t cmd, void *arg) { devmajor_t bmajor, cmajor; int error; bmajor = cmajor = NODEVMAJOR; switch (cmd) { case MODULE_CMD_INIT: bpfilterattach(0); error = devsw_attach("bpf", NULL, &bmajor, &bpf_cdevsw, &cmajor); if (error == EEXIST) error = 0; /* maybe built-in ... improve eventually */ if (error) break; bpf_ops_handover_enter(&bpf_ops_kernel); atomic_swap_ptr(&bpf_ops, &bpf_ops_kernel); bpf_ops_handover_exit(); break; case MODULE_CMD_FINI: /* * bpf_ops is not (yet) referenced in the callers before * attach. maybe other issues too. "safety first". */ error = EOPNOTSUPP; break; default: error = ENOTTY; break; } return error; } @ 1.139.2.3 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.139.2.2 2010/03/11 15:04:26 yamt Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.139.2.2 2010/03/11 15:04:26 yamt Exp $"); a409 1 d->bd_feedback = 0; d625 1 a625 1 struct mbuf *m, *mc; a661 10 if (d->bd_feedback) { mc = m_dup(m, 0, M_COPYALL, M_NOWAIT); if (mc != NULL) mc->m_pkthdr.rcvif = ifp; /* Set M_PROMISC for outgoing packets to be discarded. */ if (1 /*d->bd_direction == BPF_D_INOUT*/) m->m_flags |= M_PROMISC; } else mc = NULL; a663 6 if (mc != NULL) { if (error == 0) (*ifp->if_input)(ifp, mc); } else m_freem(mc); a706 4 * BIOCSFEEDBACK Set packet feedback mode. * BIOCGFEEDBACK Get packet feedback mode. * BIOCGSEESENT Get "see sent packets" mode. * BIOCSSEESENT Set "see sent packets" mode. a977 14 /* * Set "feed packets from bpf back to input" mode */ case BIOCSFEEDBACK: d->bd_feedback = *(u_int *)addr; break; /* * Get "feed packets from bpf back to input" mode */ case BIOCGFEEDBACK: *(u_int *)addr = d->bd_feedback; break; d1264 1 a1264 1 _bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) d1354 1 a1354 1 _bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m) a1358 6 /* Skip outgoing duplicate packets. */ if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) { m->m_flags &= ~M_PROMISC; return; } d1378 1 a1378 1 _bpf_mtap(struct bpf_if *bp, struct mbuf *m) a1383 6 /* Skip outgoing duplicate packets. */ if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) { m->m_flags &= ~M_PROMISC; return; } d1391 1 d1408 1 a1408 1 _bpf_mtap_af(struct bpf_if *bp, uint32_t af, struct mbuf *m) d1417 19 a1435 1 _bpf_mtap(bp, &m0); d1445 1 a1445 1 _bpf_mtap_sl_in(struct bpf_if *bp, u_char *chdr, struct mbuf **m) d1459 1 a1459 1 _bpf_mtap(bp, *m); d1471 1 a1471 1 _bpf_mtap_sl_out(struct bpf_if *bp, u_char *chdr, struct mbuf *m) d1488 1 a1488 1 _bpf_mtap(bp, &m0); d1624 1 a1624 1 _bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) d1658 1 a1658 1 _bpfdetach(struct ifnet *ifp) d1693 1 a1693 1 _bpf_change_type(struct ifnet *ifp, u_int dlt, u_int hdrlen) d1912 11 a1922 10 .bpf_attach = _bpfattach, .bpf_detach = _bpfdetach, .bpf_change_type = _bpf_change_type, .bpf_tap = _bpf_tap, .bpf_mtap = _bpf_mtap, .bpf_mtap2 = _bpf_mtap2, .bpf_mtap_af = _bpf_mtap_af, .bpf_mtap_sl_in = _bpf_mtap_sl_in, .bpf_mtap_sl_out = _bpf_mtap_sl_out, d1952 2 a1953 18 * While there is no reference counting for bpf callers, * unload could at least in theory be done similarly to * system call disestablishment. This should even be * a little simpler: * * 1) replace op vector with stubs * 2) post update to all cpus with xc * 3) check that nobody is in bpf anymore * (it's doubtful we'd want something like l_sysent, * but we could do something like *signed* percpu * counters. if the sum is 0, we're good). * 4) if fail, unroll changes * * NOTE: change won't be atomic to the outside. some * packets may be not captured even if unload is * not succesful. I think packet capture not working * is a perfectly logical consequence of trying to * disable packet capture. @ 1.139.4.1 log @Sync w/ -current. 34 merge conflicts to follow. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.139 2008/04/24 15:35:30 ad Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.139 2008/04/24 15:35:30 ad Exp $"); d400 2 a401 1 d = malloc(sizeof(*d), M_DEVBUF, M_WAITOK|M_ZERO); a426 2 KERNEL_LOCK(1, NULL); a448 2 KERNEL_UNLOCK_ONE(NULL); a481 1 KERNEL_LOCK(1, NULL); a495 1 KERNEL_UNLOCK_ONE(NULL); a514 1 KERNEL_UNLOCK_ONE(NULL); a532 1 KERNEL_UNLOCK_ONE(NULL); a558 1 KERNEL_UNLOCK_ONE(NULL); d613 1 a613 4 KERNEL_LOCK(1, NULL); if (d->bd_bif == 0) { KERNEL_UNLOCK_ONE(NULL); a614 1 } d618 1 a618 2 if (uio->uio_resid == 0) { KERNEL_UNLOCK_ONE(NULL); a619 1 } d623 1 a623 2 if (error) { KERNEL_UNLOCK_ONE(NULL); a624 1 } a626 1 KERNEL_UNLOCK_ONE(NULL); a636 1 KERNEL_UNLOCK_ONE(NULL); a688 1 KERNEL_LOCK(1, NULL); a940 1 KERNEL_UNLOCK_ONE(NULL); a1090 1 KERNEL_LOCK(1, NULL); a1112 1 KERNEL_UNLOCK_ONE(NULL); a1122 1 KERNEL_LOCK(1, NULL); a1125 1 KERNEL_UNLOCK_ONE(NULL); a1131 1 int rv; a1132 1 KERNEL_LOCK(1, NULL); d1136 1 a1136 3 rv = (kn->kn_data > 0); KERNEL_UNLOCK_ONE(NULL); return rv; a1148 2 KERNEL_LOCK(1, NULL); a1155 1 KERNEL_UNLOCK_ONE(NULL); a1163 1 KERNEL_UNLOCK_ONE(NULL); d1285 1 @ 1.138 log @Pull in a couple of fixes from FreeBSD, the first of which addresses a failure of wpa_supplicant(8) to re-key promptly, as reported in http://mail-index.netbsd.org/tech-net/2008/04/18/msg000459.html - Make bpf's read timeout work more correctly with select/poll. - A fix for catchpacket() which delays calling bpf_wakeup() until the state has been updated. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.137 2008/03/26 02:21:52 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.137 2008/03/26 02:21:52 christos Exp $"); d156 1 d407 1 d445 1 d571 1 a571 2 fownsignal(d->bd_pgid, SIGIO, 0, 0, NULL); d575 9 @ 1.137 log @- put const back, no reason to modify the prototype. 1. Please don't cast function pointers to (void *), use the full function prototype cast; this is for archs where a function pointer is not a regular pointer. 2. Compare pointers to NULL not 0. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.136 2008/03/24 12:24:37 yamt Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.136 2008/03/24 12:24:37 yamt Exp $"); d1087 3 a1089 2 if ((d->bd_hlen != 0) || (d->bd_immediate && d->bd_slen != 0)) { a1090 5 } else if (d->bd_state == BPF_TIMED_OUT) { if (d->bd_slen != 0) revents |= events & (POLLIN | POLLRDNORM); else revents |= events & POLLIN; d1415 1 d1449 1 a1449 1 bpf_wakeup(d); d1451 7 d1477 1 a1477 6 if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) /* * Immediate mode is set, or the read timeout has * already expired during a select call. A packet * arrived, so the reader should be woken up. */ @ 1.137.4.1 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.137 2008/03/26 02:21:52 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.137 2008/03/26 02:21:52 christos Exp $"); a155 1 static void bpf_softintr(void *); a405 1 d->bd_sih = softint_establish(SOFTINT_CLOCK, bpf_softintr, d); a442 1 softint_disestablish(d->bd_sih); d568 2 a569 1 softint_schedule(d->bd_sih); a572 9 static void bpf_softintr(void *cookie) { struct bpf_d *d; d = cookie; if (d->bd_async) fownsignal(d->bd_pgid, SIGIO, 0, 0, NULL); } d1087 2 a1088 3 if (d->bd_hlen != 0 || ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) && d->bd_slen != 0)) { d1090 5 a1418 1 int do_wakeup = 0; d1452 1 a1452 1 do_wakeup = 1; a1453 7 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) { /* * Immediate mode is set, or the read timeout has * already expired during a select call. A packet * arrived, so the reader should be woken up. */ do_wakeup = 1; d1473 6 a1478 1 if (do_wakeup) @ 1.137.4.2 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.137.4.1 2008/05/18 12:35:26 yamt Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.137.4.1 2008/05/18 12:35:26 yamt Exp $"); a426 2 KERNEL_LOCK(1, NULL); a448 2 KERNEL_UNLOCK_ONE(NULL); a481 1 KERNEL_LOCK(1, NULL); a495 1 KERNEL_UNLOCK_ONE(NULL); a514 1 KERNEL_UNLOCK_ONE(NULL); a532 1 KERNEL_UNLOCK_ONE(NULL); a558 1 KERNEL_UNLOCK_ONE(NULL); d613 1 a613 4 KERNEL_LOCK(1, NULL); if (d->bd_bif == 0) { KERNEL_UNLOCK_ONE(NULL); a614 1 } d618 1 a618 2 if (uio->uio_resid == 0) { KERNEL_UNLOCK_ONE(NULL); a619 1 } d623 1 a623 2 if (error) { KERNEL_UNLOCK_ONE(NULL); a624 1 } a626 1 KERNEL_UNLOCK_ONE(NULL); a636 1 KERNEL_UNLOCK_ONE(NULL); a688 1 KERNEL_LOCK(1, NULL); a940 1 KERNEL_UNLOCK_ONE(NULL); a1090 1 KERNEL_LOCK(1, NULL); a1112 1 KERNEL_UNLOCK_ONE(NULL); a1122 1 KERNEL_LOCK(1, NULL); a1125 1 KERNEL_UNLOCK_ONE(NULL); a1131 1 int rv; a1132 1 KERNEL_LOCK(1, NULL); d1136 1 a1136 3 rv = (kn->kn_data > 0); KERNEL_UNLOCK_ONE(NULL); return rv; a1148 2 KERNEL_LOCK(1, NULL); a1155 1 KERNEL_UNLOCK_ONE(NULL); a1163 1 KERNEL_UNLOCK_ONE(NULL); @ 1.137.4.3 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.137.4.2 2008/06/04 02:05:47 yamt Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.137.4.2 2008/06/04 02:05:47 yamt Exp $"); d400 2 a401 1 d = malloc(sizeof(*d), M_DEVBUF, M_WAITOK|M_ZERO); d1318 1 @ 1.137.2.1 log @Welcome to the time_t=long long dev_t=uint64_t branch. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.137 2008/03/26 02:21:52 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.137 2008/03/26 02:21:52 christos Exp $"); d143 1 a143 1 void *(*)(void *, const void *, size_t), struct timespec *); a835 30 #ifdef BIOCGORTIMEOUT /* * Get read timeout. */ case BIOCGORTIMEOUT: { struct timeval50 *tv = addr; tv->tv_sec = d->bd_rtout / hz; tv->tv_usec = (d->bd_rtout % hz) * tick; break; } #endif #ifdef BIOCSORTIMEOUT /* * Set read timeout. */ case BIOCSORTIMEOUT: { struct timeval50 *tv = addr; /* Compute number of ticks. */ d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick; if ((d->bd_rtout == 0) && (tv->tv_usec != 0)) d->bd_rtout = 1; break; } #endif d847 1 d1173 1 a1173 1 struct timespec ts; d1188 1 a1188 1 nanotime(&ts); d1191 1 a1191 1 catchpacket(d, pkt, pktlen, slen, memcpy, &ts); d1236 1 a1236 1 struct timespec ts; d1247 1 a1247 1 nanotime(&ts); d1250 1 a1250 1 catchpacket(d, marg, pktlen, slen, cpfn, &ts); d1414 1 a1414 1 void *(*cpfn)(void *, const void *, size_t), struct timespec *ts) d1460 1 a1460 2 hp->bh_tstamp.tv_sec = ts->tv_sec; hp->bh_tstamp.tv_nsec = ts->tv_nsec; @ 1.137.2.2 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.141 2008/06/15 16:37:21 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.141 2008/06/15 16:37:21 christos Exp $"); a155 1 static void bpf_softintr(void *); d399 2 a400 1 d = malloc(sizeof(*d), M_DEVBUF, M_WAITOK|M_ZERO); a405 1 d->bd_sih = softint_establish(SOFTINT_CLOCK, bpf_softintr, d); a424 2 KERNEL_LOCK(1, NULL); a442 1 softint_disestablish(d->bd_sih); a445 2 KERNEL_UNLOCK_ONE(NULL); a478 1 KERNEL_LOCK(1, NULL); a492 1 KERNEL_UNLOCK_ONE(NULL); a511 1 KERNEL_UNLOCK_ONE(NULL); a529 1 KERNEL_UNLOCK_ONE(NULL); a555 1 KERNEL_UNLOCK_ONE(NULL); d568 2 a569 1 softint_schedule(d->bd_sih); a572 9 static void bpf_softintr(void *cookie) { struct bpf_d *d; d = cookie; if (d->bd_async) fownsignal(d->bd_pgid, SIGIO, 0, 0, NULL); } d602 1 a602 4 KERNEL_LOCK(1, NULL); if (d->bd_bif == 0) { KERNEL_UNLOCK_ONE(NULL); a603 1 } d607 1 a607 2 if (uio->uio_resid == 0) { KERNEL_UNLOCK_ONE(NULL); a608 1 } d612 1 a612 2 if (error) { KERNEL_UNLOCK_ONE(NULL); a613 1 } a615 1 KERNEL_UNLOCK_ONE(NULL); a625 1 KERNEL_UNLOCK_ONE(NULL); a677 1 KERNEL_LOCK(1, NULL); a958 1 KERNEL_UNLOCK_ONE(NULL); a1108 1 KERNEL_LOCK(1, NULL); d1116 2 a1117 3 if (d->bd_hlen != 0 || ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) && d->bd_slen != 0)) { d1119 5 a1134 1 KERNEL_UNLOCK_ONE(NULL); a1144 1 KERNEL_LOCK(1, NULL); a1147 1 KERNEL_UNLOCK_ONE(NULL); a1153 1 int rv; a1154 1 KERNEL_LOCK(1, NULL); d1158 1 a1158 3 rv = (kn->kn_data > 0); KERNEL_UNLOCK_ONE(NULL); return rv; a1170 2 KERNEL_LOCK(1, NULL); a1177 1 KERNEL_UNLOCK_ONE(NULL); a1185 1 KERNEL_UNLOCK_ONE(NULL); d1307 1 a1447 1 int do_wakeup = 0; d1481 1 a1481 1 do_wakeup = 1; a1482 7 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) { /* * Immediate mode is set, or the read timeout has * already expired during a select call. A packet * arrived, so the reader should be woken up. */ do_wakeup = 1; d1503 6 a1508 1 if (do_wakeup) @ 1.137.2.3 log @back to usecs now for source compatibility @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.137.2.2 2008/11/01 21:22:28 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.137.2.2 2008/11/01 21:22:28 christos Exp $"); d1536 1 a1536 1 hp->bh_tstamp.tv_usec = ts->tv_nsec / 1000; @ 1.136 log @merge yamt-lazymbuf branch. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.135 2008/03/21 21:55:00 ad Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.135 2008/03/21 21:55:00 ad Exp $"); d129 1 a129 1 void *(*cpfn)(void *, void *, size_t), d133 1 a133 1 static void *bpf_mcpy(void *, void *, size_t); d143 1 a143 1 void *(*)(void *, void *, size_t), struct timeval*); d1201 1 a1201 1 bpf_mcpy(void *dst_arg, void *src_arg, size_t len) d1203 1 a1203 1 struct mbuf *m; d1210 1 a1210 1 if (m == 0) d1213 1 a1213 1 memcpy(dst, mtod(m, void *), count); d1218 1 a1218 1 return (dst_arg); d1231 1 a1231 1 bpf_deliver(struct bpf_if *bp, void *(*cpfn)(void *, void *, size_t), d1278 1 d1288 1 a1288 1 void *(*cpfn)(void *, void *, size_t); d1300 1 d1414 1 a1414 1 void *(*cpfn)(void *, void *, size_t), struct timeval *tv) @ 1.135 log @Catch up with descriptor handling changes. See kern_descrip.c revision 1.173 for details. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.134 2008/03/01 14:16:52 rmind Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.134 2008/03/01 14:16:52 rmind Exp $"); d129 1 a129 1 void *(*cpfn)(void *, const void *, size_t), d133 1 a133 1 static void *bpf_mcpy(void *, const void *, size_t); d143 1 a143 2 void *(*)(void *, const void *, size_t), struct timeval*); d1191 1 a1191 1 catchpacket(d, pkt, pktlen, slen, memcpy, &tv); d1201 1 a1201 1 bpf_mcpy(void *dst_arg, const void *src_arg, size_t len) d1203 1 a1203 1 const struct mbuf *m; d1231 1 a1231 1 bpf_deliver(struct bpf_if *bp, void *(*cpfn)(void *, const void *, size_t), d1287 1 a1287 1 void *(*cpfn)(void *, const void *, size_t); d1295 1 a1295 1 cpfn = memcpy; d1412 1 a1412 1 void *(*cpfn)(void *, const void *, size_t), struct timeval *tv) @ 1.134 log @Welcome to 4.99.55: - Add a lot of missing selinit() and seldestroy() calls. - Merge selwakeup() and selnotify() calls into a single selnotify(). - Add an additional 'events' argument to selnotify() call. It will indicate which event (POLL_IN, POLL_OUT, etc) happen. If unknown, zero may be used. Note: please pass appropriate value of 'events' where possible. Proposed on: @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.133 2008/02/20 17:05:52 matt Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.133 2008/02/20 17:05:52 matt Exp $"); d153 3 a155 3 static int bpf_ioctl(struct file *, u_long, void *, struct lwp *); static int bpf_poll(struct file *, int, struct lwp *); static int bpf_close(struct file *, struct lwp *); d397 1 a397 1 if ((error = falloc(l, &fp, &fd)) != 0) d412 1 a412 1 return fdclone(l, fp, fd, flag, &bpf_fileops, d); d421 1 a421 1 bpf_close(struct file *fp, struct lwp *l) d429 1 a429 1 d->bd_pid = l->l_proc->p_pid; d671 1 a671 1 bpf_ioctl(struct file *fp, u_long cmd, void *addr, struct lwp *l) d679 1 a679 1 d->bd_pid = l->l_proc->p_pid; d923 1 a923 1 error = fsetown(l->l_proc, &d->bd_pgid, cmd, addr); d928 1 a928 1 error = fgetown(l->l_proc, d->bd_pgid, cmd, addr); d1072 1 a1072 1 bpf_poll(struct file *fp, int events, struct lwp *l) d1081 1 a1081 1 d->bd_pid = l->l_proc->p_pid; d1097 1 a1097 1 selrecord(l, &d->bd_sel); @ 1.133 log @s/u_\(int[0-9]*_t\)/u\1/g (change u_int*_t to uint*_t) @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.132 2007/12/20 18:13:26 dyoung Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.132 2007/12/20 18:13:26 dyoung Exp $"); d406 1 d443 1 d571 1 a571 1 selnotify(&d->bd_sel, 0); d1069 1 a1069 1 * Otherwise, return false but make a note that a selwakeup() must be done. @ 1.133.6.1 log @- etc/devfsd.conf: Add some rules to give nodes like /dev/tty and /dev/null better default modes, i.e. 0666. - sbin/init: Run devfsd -s before going to multiuser. - sys/arch: Provide arm32, i386, sparc with a mem_init() function to request device nodes for /dev/null, /dev/zero, etc. - sys/dev: Convert rnd, wd, agp, raid, cd, sd, wsdisplay, wskbd, wsmouse, wsmux, tty, bpf, swap to devfs New World Order. - sys/fs/devfs: Make the visibility attribute of device nodes configurable. Also provide a function to mount a devfs on boot. - sys/kern: Add a new boot flag, -n. This disables devfs support. Unless the -n flag is specified the kernel will mount a devfs file system on boot. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.133 2008/02/20 17:05:52 matt Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.133 2008/02/20 17:05:52 matt Exp $"); a1693 7 void bpf_init(void) { int major = cdevsw_lookup_major(&bpf_cdevsw); device_register_name(makedev(major, 0), NULL, true, DEV_OTHER, "bpf"); } @ 1.133.6.2 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD$ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD$"); d143 2 a144 1 void *(*)(void *, const void *, size_t), struct timeval *); d153 3 a155 3 static int bpf_ioctl(struct file *, u_long, void *); static int bpf_poll(struct file *, int); static int bpf_close(struct file *); d397 1 a397 1 if ((error = fd_allocfile(&fp, &fd)) != 0) a405 1 selinit(&d->bd_sel); d411 1 a411 1 return fd_clone(fp, fd, flag, &bpf_fileops, d); d420 1 a420 1 bpf_close(struct file *fp) d428 1 a428 1 d->bd_pid = curproc->p_pid; a441 1 seldestroy(&d->bd_sel); d569 1 a569 1 selnotify(&d->bd_sel, 0, 0); d669 1 a669 1 bpf_ioctl(struct file *fp, u_long cmd, void *addr) d677 1 a677 1 d->bd_pid = curproc->p_pid; d921 1 a921 1 error = fsetown(&d->bd_pgid, cmd, addr); d926 1 a926 1 error = fgetown(d->bd_pgid, cmd, addr); d1067 1 a1067 1 * Otherwise, return false but make a note that a selnotify() must be done. d1070 1 a1070 1 bpf_poll(struct file *fp, int events) d1079 1 a1079 1 d->bd_pid = curproc->p_pid; d1095 1 a1095 1 selrecord(curlwp, &d->bd_sel); d1190 1 a1190 1 catchpacket(d, pkt, pktlen, slen, (void *)memcpy, &tv); d1209 1 a1209 1 if (m == NULL) d1212 1 a1212 1 memcpy(dst, mtod(m, const void *), count); d1217 1 a1217 1 return dst_arg; a1276 1 /*###1278 [cc] warning: passing argument 2 of 'bpf_deliver' from incompatible pointer type%%%*/ d1294 1 a1294 1 cpfn = (void *)memcpy; a1297 1 /*###1299 [cc] warning: assignment from incompatible pointer type%%%*/ d1411 1 a1411 1 void *(*cpfn)(void *, const void *, size_t), struct timeval *tv) @ 1.133.6.3 log @Sync with HEAD. @ text @a155 1 static void bpf_softintr(void *); a405 1 d->bd_sih = softint_establish(SOFTINT_CLOCK, bpf_softintr, d); a424 2 KERNEL_LOCK(1, NULL); a442 1 softint_disestablish(d->bd_sih); a445 2 KERNEL_UNLOCK_ONE(NULL); a478 1 KERNEL_LOCK(1, NULL); a492 1 KERNEL_UNLOCK_ONE(NULL); a511 1 KERNEL_UNLOCK_ONE(NULL); a529 1 KERNEL_UNLOCK_ONE(NULL); a555 1 KERNEL_UNLOCK_ONE(NULL); d568 2 a569 1 softint_schedule(d->bd_sih); a572 9 static void bpf_softintr(void *cookie) { struct bpf_d *d; d = cookie; if (d->bd_async) fownsignal(d->bd_pgid, SIGIO, 0, 0, NULL); } d602 1 a602 4 KERNEL_LOCK(1, NULL); if (d->bd_bif == 0) { KERNEL_UNLOCK_ONE(NULL); a603 1 } d607 1 a607 2 if (uio->uio_resid == 0) { KERNEL_UNLOCK_ONE(NULL); a608 1 } d612 1 a612 2 if (error) { KERNEL_UNLOCK_ONE(NULL); a613 1 } a615 1 KERNEL_UNLOCK_ONE(NULL); a625 1 KERNEL_UNLOCK_ONE(NULL); a677 1 KERNEL_LOCK(1, NULL); a929 1 KERNEL_UNLOCK_ONE(NULL); a1079 1 KERNEL_LOCK(1, NULL); d1087 2 a1088 3 if (d->bd_hlen != 0 || ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) && d->bd_slen != 0)) { d1090 5 a1105 1 KERNEL_UNLOCK_ONE(NULL); a1115 1 KERNEL_LOCK(1, NULL); a1118 1 KERNEL_UNLOCK_ONE(NULL); a1124 1 int rv; a1125 1 KERNEL_LOCK(1, NULL); d1129 1 a1129 3 rv = (kn->kn_data > 0); KERNEL_UNLOCK_ONE(NULL); return rv; a1141 2 KERNEL_LOCK(1, NULL); a1148 1 KERNEL_UNLOCK_ONE(NULL); a1156 1 KERNEL_UNLOCK_ONE(NULL); a1418 1 int do_wakeup = 0; d1452 1 a1452 1 do_wakeup = 1; a1453 7 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) { /* * Immediate mode is set, or the read timeout has * already expired during a select call. A packet * arrived, so the reader should be woken up. */ do_wakeup = 1; d1473 6 a1478 1 if (do_wakeup) @ 1.133.6.4 log @Sync with HEAD. @ text @d400 2 a401 1 d = malloc(sizeof(*d), M_DEVBUF, M_WAITOK|M_ZERO); d1318 1 @ 1.133.6.5 log @Sync with HEAD. @ text @d143 1 a143 1 void *(*)(void *, const void *, size_t), struct timespec *); a865 30 #ifdef BIOCGORTIMEOUT /* * Get read timeout. */ case BIOCGORTIMEOUT: { struct timeval50 *tv = addr; tv->tv_sec = d->bd_rtout / hz; tv->tv_usec = (d->bd_rtout % hz) * tick; break; } #endif #ifdef BIOCSORTIMEOUT /* * Set read timeout. */ case BIOCSORTIMEOUT: { struct timeval50 *tv = addr; /* Compute number of ticks. */ d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick; if ((d->bd_rtout == 0) && (tv->tv_usec != 0)) d->bd_rtout = 1; break; } #endif d877 1 d1212 1 a1212 1 struct timespec ts; d1227 1 a1227 1 nanotime(&ts); d1230 1 a1230 1 catchpacket(d, pkt, pktlen, slen, memcpy, &ts); d1275 1 a1275 1 struct timespec ts; d1286 1 a1286 1 nanotime(&ts); d1289 1 a1289 1 catchpacket(d, marg, pktlen, slen, cpfn, &ts); d1452 1 a1452 1 void *(*cpfn)(void *, const void *, size_t), struct timespec *ts) d1506 1 a1506 2 hp->bh_tstamp.tv_sec = ts->tv_sec; hp->bh_tstamp.tv_usec = ts->tv_nsec / 1000; @ 1.133.2.1 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.134 2008/03/01 14:16:52 rmind Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.134 2008/03/01 14:16:52 rmind Exp $"); a405 1 selinit(&d->bd_sel); a441 1 seldestroy(&d->bd_sel); d569 1 a569 1 selnotify(&d->bd_sel, 0, 0); d1067 1 a1067 1 * Otherwise, return false but make a note that a selnotify() must be done. @ 1.132 log @Use LIST_FOREACH(). @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.131 2007/12/05 17:20:00 pooka Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.131 2007/12/05 17:20:00 pooka Exp $"); d1314 1 a1314 1 bpf_mtap_af(void *arg, u_int32_t af, struct mbuf *m) d1327 1 a1327 1 bpf_mtap_et(void *arg, u_int16_t et, struct mbuf *m) d1336 4 a1339 4 ((u_int32_t *)m0.m_data)[0] = 0; ((u_int32_t *)m0.m_data)[1] = 0; ((u_int32_t *)m0.m_data)[2] = 0; ((u_int16_t *)m0.m_data)[6] = et; @ 1.131 log @Do not "return 1" from kqfilter for errors. That value is passed directly to the userland caller and results in a mysterious EPERM. Instead, return EINVAL or something else sensible depending on the case. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.130 2007/07/11 21:26:53 xtraeme Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.130 2007/07/11 21:26:53 xtraeme Exp $"); d1580 1 a1580 1 for (d = LIST_FIRST(&bpf_list); d != NULL; d = LIST_NEXT(d, bd_list)) { @ 1.131.4.1 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD$ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD$"); d1580 1 a1580 1 LIST_FOREACH(d, &bpf_list, bd_list) { @ 1.130 log @Replace a simple lock with a mutex and make it static (as it's only used on this file). Ok by ad@@. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.129 2007/07/09 21:10:59 ad Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.129 2007/07/09 21:10:59 ad Exp $"); d1148 1 a1148 1 return (1); @ 1.130.8.1 log @sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.130 2007/07/11 21:26:53 xtraeme Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.130 2007/07/11 21:26:53 xtraeme Exp $"); d1148 1 a1148 1 return (EINVAL); d1580 1 a1580 1 LIST_FOREACH(d, &bpf_list, bd_list) { @ 1.130.8.2 log @sync with HEAD @ text @d1 1 a1 1 /* bpf.c,v 1.130.8.1 2008/01/09 01:57:07 matt Exp */ d42 1 a42 1 __KERNEL_RCSID(0, "bpf.c,v 1.130.8.1 2008/01/09 01:57:07 matt Exp"); a405 1 selinit(&d->bd_sel); a441 1 seldestroy(&d->bd_sel); d569 1 a569 1 selnotify(&d->bd_sel, 0, 0); d1067 1 a1067 1 * Otherwise, return false but make a note that a selnotify() must be done. d1314 1 a1314 1 bpf_mtap_af(void *arg, uint32_t af, struct mbuf *m) d1327 1 a1327 1 bpf_mtap_et(void *arg, uint16_t et, struct mbuf *m) d1336 4 a1339 4 ((uint32_t *)m0.m_data)[0] = 0; ((uint32_t *)m0.m_data)[1] = 0; ((uint32_t *)m0.m_data)[2] = 0; ((uint16_t *)m0.m_data)[6] = et; @ 1.130.6.1 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.131 2007/12/05 17:20:00 pooka Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.131 2007/12/05 17:20:00 pooka Exp $"); d1148 1 a1148 1 return (EINVAL); @ 1.130.14.1 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.131 2007/12/05 17:20:00 pooka Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.131 2007/12/05 17:20:00 pooka Exp $"); d1148 1 a1148 1 return (EINVAL); @ 1.130.14.2 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.132 2007/12/20 18:13:26 dyoung Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.132 2007/12/20 18:13:26 dyoung Exp $"); d1580 1 a1580 1 LIST_FOREACH(d, &bpf_list, bd_list) { @ 1.130.16.1 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.131 2007/12/05 17:20:00 pooka Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.131 2007/12/05 17:20:00 pooka Exp $"); d1148 1 a1148 1 return (EINVAL); @ 1.130.16.2 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.130.16.1 2007/12/08 17:57:52 ad Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.130.16.1 2007/12/08 17:57:52 ad Exp $"); d1580 1 a1580 1 LIST_FOREACH(d, &bpf_list, bd_list) { @ 1.129 log @Merge some of the less invasive changes from the vmlocking branch: - kthread, callout, devsw API changes - select()/poll() improvements - miscellaneous MT safety improvements @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.128 2007/05/30 21:02:03 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.128 2007/05/30 21:02:03 christos Exp $"); d118 1 a118 1 struct simplelock bpf_slock; d374 1 a374 1 simple_lock_init(&bpf_slock); d376 1 a376 1 simple_lock(&bpf_slock); d378 1 a378 1 simple_unlock(&bpf_slock); d407 1 a407 1 simple_lock(&bpf_slock); d409 1 a409 1 simple_unlock(&bpf_slock); d438 1 a438 1 simple_lock(&bpf_slock); d440 1 a440 1 simple_unlock(&bpf_slock); d1746 1 a1746 1 simple_lock(&bpf_slock); d1781 1 a1781 1 simple_unlock(&bpf_slock); @ 1.128 log @Move the nasty ifdefs in one place. Requested by ad and dyoung. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.127 2007/05/29 21:32:29 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.127 2007/05/29 21:32:29 christos Exp $"); d405 1 a405 1 callout_init(&d->bd_callout); d441 1 a569 2 /* XXX */ d->bd_sel.sel_pid = 0; @ 1.127 log @Add a sockaddr_storage member to "struct ifreq" maintaining backwards compatibility with the older ioctls. This avoids stack smashing and abuse of "struct sockaddr" when ioctls placed "struct sockaddr_foo's" that were longer than "struct sockaddr". XXX: Some of the emulations might be broken; I tried to add code for them but I did not test them. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.126 2007/03/04 06:03:14 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.126 2007/03/04 06:03:14 christos Exp $"); a45 1 #include "opt_compat_netbsd.h" a87 4 #if defined(COMPAT_09) || defined(COMPAT_10) || defined(COMPAT_11) || \ defined(COMPAT_12) || defined(COMPAT_13) || defined(COMPAT_14) || \ defined(COMPAT_15) || defined(COMPAT_16) || defined(COMPAT_20) || \ defined(COMPAT_30) || defined(COMPAT_40) a88 1 #endif @ 1.126 log @Kill caddr_t; there will be some MI fallout, but it will be fixed shortly. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.125 2006/11/16 01:33:40 christos Exp $ */ d42 8 a49 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.125 2006/11/16 01:33:40 christos Exp $"); d88 6 a93 4 #if defined(_KERNEL_OPT) #include "opt_bpf.h" #include "sl.h" #include "strip.h" d808 3 d821 3 @ 1.126.4.1 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.129 2007/07/09 21:10:59 ad Exp $ */ d42 1 a42 7 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.129 2007/07/09 21:10:59 ad Exp $"); #if defined(_KERNEL_OPT) #include "opt_bpf.h" #include "sl.h" #include "strip.h" #endif d81 5 a85 2 #include d402 1 a402 1 callout_init(&d->bd_callout, 0); a437 1 callout_destroy(&d->bd_callout); d566 2 a798 3 #ifdef OBIOCGETIF case OBIOCGETIF: #endif a808 3 #ifdef OBIOCSETIF case OBIOCSETIF: #endif @ 1.126.2.1 log @Changes to select/poll: - Make them MP safe and decouple from the proc locks. - selwakeup: don't call p_find, or traverse per-proc LWP lists (ouch). - selwakeup: don't lock the sleep queue unless we need to. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.126 2007/03/04 06:03:14 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.126 2007/03/04 06:03:14 christos Exp $"); d566 2 @ 1.126.2.2 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.126.2.1 2007/04/10 00:22:12 ad Exp $ */ d42 1 a42 7 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.126.2.1 2007/04/10 00:22:12 ad Exp $"); #if defined(_KERNEL_OPT) #include "opt_bpf.h" #include "sl.h" #include "strip.h" #endif d81 5 a85 2 #include a796 3 #ifdef OBIOCGETIF case OBIOCGETIF: #endif a806 3 #ifdef OBIOCSETIF case OBIOCSETIF: #endif @ 1.126.2.3 log @Adapt to callout API change. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.126.2.2 2007/06/09 23:58:09 ad Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.126.2.2 2007/06/09 23:58:09 ad Exp $"); d405 1 a405 1 callout_init(&d->bd_callout, 0); a440 1 callout_destroy(&d->bd_callout); @ 1.126.2.4 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.126.2.3 2007/07/01 21:50:41 ad Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.126.2.3 2007/07/01 21:50:41 ad Exp $"); d118 1 a118 1 static kmutex_t bpf_mtx; d374 1 a374 1 mutex_init(&bpf_mtx, MUTEX_DEFAULT, IPL_NONE); d376 1 a376 1 mutex_enter(&bpf_mtx); d378 1 a378 1 mutex_exit(&bpf_mtx); d1746 1 a1746 1 mutex_enter(&bpf_mtx); d1781 1 a1781 1 mutex_exit(&bpf_mtx); @ 1.126.2.5 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.126.2.4 2007/07/15 13:27:52 ad Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.126.2.4 2007/07/15 13:27:52 ad Exp $"); d407 1 a407 1 mutex_enter(&bpf_mtx); d409 1 a409 1 mutex_exit(&bpf_mtx); d438 1 a438 1 mutex_enter(&bpf_mtx); d440 1 a440 1 mutex_exit(&bpf_mtx); @ 1.125 log @__unused removal on arguments; approved by core. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.124 2006/10/25 20:28:45 elad Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.124 2006/10/25 20:28:45 elad Exp $"); d1013 1 a1013 1 if ((caddr_t *)bp->bif_driverp != &ifp->if_bpf) d1448 1 a1448 1 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); d1605 1 a1605 1 if ((caddr_t *)bp->bif_driverp == &ifp->if_bpf) @ 1.125.4.1 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.125 2006/11/16 01:33:40 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.125 2006/11/16 01:33:40 christos Exp $"); d1013 1 a1013 1 if ((void **)bp->bif_driverp != &ifp->if_bpf) d1448 1 a1448 1 hp = (struct bpf_hdr *)((char *)d->bd_sbuf + curlen); d1605 1 a1605 1 if ((void **)bp->bif_driverp == &ifp->if_bpf) @ 1.124 log @Kill some KAUTH_GENERIC_ISSUSER uses. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.123 2006/10/12 01:32:27 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.123 2006/10/12 01:32:27 christos Exp $"); d369 1 a369 1 bpfilterattach(int n __unused) d387 1 a387 1 bpfopen(dev_t dev __unused, int flag __unused, int mode __unused, struct lwp *l) d459 2 a460 2 bpf_read(struct file *fp, off_t *offp __unused, struct uio *uio, kauth_cred_t cred __unused, int flags __unused) d588 2 a589 2 bpf_write(struct file *fp, off_t *offp __unused, struct uio *uio, kauth_cred_t cred __unused, int flags __unused) d1113 1 a1113 1 filt_bpfread(struct knote *kn, long hint __unused) @ 1.123 log @- sprinkle __unused on function decls. - fix a couple of unused bugs - no more -Wno-unused for i386 @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.122 2006/08/28 00:09:28 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.122 2006/08/28 00:09:28 christos Exp $"); d1722 5 a1726 3 if ((error = kauth_authorize_generic(l->l_cred, KAUTH_GENERIC_ISSUSER, &l->l_acflag))) return (error); @ 1.122 log @add missing initializer @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.121 2006/08/04 23:18:53 martin Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.121 2006/08/04 23:18:53 martin Exp $"); d369 1 a369 1 bpfilterattach(int n) d387 1 a387 1 bpfopen(dev_t dev, int flag, int mode, struct lwp *l) d459 2 a460 2 bpf_read(struct file *fp, off_t *offp, struct uio *uio, kauth_cred_t cred, int flags) d588 2 a589 2 bpf_write(struct file *fp, off_t *offp, struct uio *uio, kauth_cred_t cred, int flags) d1113 1 a1113 1 filt_bpfread(struct knote *kn, long hint) @ 1.122.2.1 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.125 2006/11/16 01:33:40 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.125 2006/11/16 01:33:40 christos Exp $"); d460 1 a460 1 kauth_cred_t cred, int flags) d589 1 a589 1 kauth_cred_t cred, int flags) d1722 3 a1724 5 /* BPF peers is privileged information. */ error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_INTERFACE, KAUTH_REQ_NETWORK_INTERFACE_GETPRIV, NULL, NULL, NULL); if (error) return (EPERM); @ 1.122.4.1 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.122 2006/08/28 00:09:28 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.122 2006/08/28 00:09:28 christos Exp $"); d369 1 a369 1 bpfilterattach(int n __unused) d387 1 a387 1 bpfopen(dev_t dev __unused, int flag __unused, int mode __unused, struct lwp *l) d459 2 a460 2 bpf_read(struct file *fp, off_t *offp __unused, struct uio *uio, kauth_cred_t cred __unused, int flags __unused) d588 2 a589 2 bpf_write(struct file *fp, off_t *offp __unused, struct uio *uio, kauth_cred_t cred __unused, int flags __unused) d1113 1 a1113 1 filt_bpfread(struct knote *kn, long hint __unused) @ 1.122.4.2 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.122.4.1 2006/10/22 06:07:24 yamt Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.122.4.1 2006/10/22 06:07:24 yamt Exp $"); d369 1 a369 1 bpfilterattach(int n) d387 1 a387 1 bpfopen(dev_t dev, int flag, int mode, struct lwp *l) d459 2 a460 2 bpf_read(struct file *fp, off_t *offp, struct uio *uio, kauth_cred_t cred, int flags) d588 2 a589 2 bpf_write(struct file *fp, off_t *offp, struct uio *uio, kauth_cred_t cred, int flags) d1113 1 a1113 1 filt_bpfread(struct knote *kn, long hint) d1722 3 a1724 5 /* BPF peers is privileged information. */ error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_INTERFACE, KAUTH_REQ_NETWORK_INTERFACE_GETPRIV, NULL, NULL, NULL); if (error) return (EPERM); @ 1.121 log @Fix typo in comment @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.120 2006/07/26 13:54:13 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.120 2006/07/26 13:54:13 christos Exp $"); d170 1 a170 1 nostop, notty, nopoll, nommap, nokqfilter, @ 1.120 log @Patch from Dheeraj S, inspired by the following FreeBSD change: Rather than calling mircotime() in catchpacket(), make catchpacket() take a timeval indicating when the packet was captured. Move microtime() to the calling functions and grab the timestamp as soon as we know that we're going to call catchpacket at least once. This means that we call microtime() once per matched packet, as opposed to once per matched packet per bpf listener. It also means that we return the same timestamp to all bpf listeners, rather than slightly different ones. It would be more accurate to call microtime() even earlier for all packets, as you have to grab (1+#listener) locks before you can determine if the packet will be logged. You could always grab a timestamp before the locks, but microtime() can be costly, so this didn't seem like a good idea. (I guess most ethernet interfaces will have a bpf listener these days because of dhclient. That means that we could be doing two bpf locks on most packets going through the interface.) @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.119 2006/07/23 22:06:12 ad Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.119 2006/07/23 22:06:12 ad Exp $"); d469 1 a469 1 * as kernel buffers. @ 1.119 log @Use the LWP cached credentials where sane. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.118 2006/06/27 10:45:09 tron Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.118 2006/06/27 10:45:09 tron Exp $"); d140 2 a141 1 void *(*)(void *, const void *, size_t)); d372 1 a372 1 d676 1 a676 1 d1072 1 a1072 1 d1164 3 d1177 7 a1183 2 if (slen != 0) catchpacket(d, pkt, pktlen, slen, memcpy); d1227 2 d1236 7 a1242 2 if (slen != 0) catchpacket(d, marg, pktlen, slen, cpfn); d1403 1 a1403 1 void *(*cpfn)(void *, const void *, size_t)) d1449 1 a1449 1 microtime(&hp->bh_tstamp); d1735 1 a1735 1 d1758 1 a1758 1 d1774 1 a1774 1 d1815 1 a1815 1 @ 1.118 log @Make this build with GCC 4.x. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.117 2006/05/14 21:19:33 elad Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.117 2006/05/14 21:19:33 elad Exp $"); d393 1 a393 1 if ((error = falloc(l->l_proc, &fp, &fd)) != 0) d1706 2 a1707 3 if ((error = kauth_authorize_generic(l->l_proc->p_cred, KAUTH_GENERIC_ISSUSER, &l->l_proc->p_acflag))) @ 1.117 log @integrate kauth. @ text @d1 1 a1 1 /* $NetBSD$ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD$"); d1012 1 a1012 1 if (bp->bif_driverp != (struct bpf_if **)&ifp->if_bpf) d1589 1 a1589 1 if (bp->bif_driverp == (struct bpf_if **)&ifp->if_bpf) @ 1.117.4.1 log @Merge from HEAD. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.118 2006/06/27 10:45:09 tron Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.118 2006/06/27 10:45:09 tron Exp $"); d1012 1 a1012 1 if ((caddr_t *)bp->bif_driverp != &ifp->if_bpf) d1589 1 a1589 1 if ((caddr_t *)bp->bif_driverp == &ifp->if_bpf) @ 1.116 log @quell GCC 4.1 uninitialised variable warnings. XXX: we should audit the tree for which old ones are no longer needed after getting the older compilers out of the tree.. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.115 2005/12/26 15:45:48 rpaulo Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.115 2005/12/26 15:45:48 rpaulo Exp $"); d67 1 d145 1 a145 1 static int bpf_read(struct file *, off_t *, struct uio *, struct ucred *, d147 1 a147 1 static int bpf_write(struct file *, off_t *, struct uio *, struct ucred *, d459 1 a459 1 struct ucred *cred, int flags) d588 1 a588 1 struct ucred *cred, int flags) d1706 3 a1708 1 if ((error = suser(l->l_proc->p_ucred, &l->l_proc->p_acflag))) @ 1.115 log @Kill BPF_KERN_FILTER. Seems like it died with the new pppd import. No replies from tech-kern@@, but who introduced this option 8 years ago (Christos) said it's ok to remove it. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.114 2005/12/24 20:45:09 perry Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.114 2005/12/24 20:45:09 perry Exp $"); d595 2 @ 1.115.4.1 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.122 2006/08/28 00:09:28 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.122 2006/08/28 00:09:28 christos Exp $"); a66 1 #include d139 1 a139 2 void *(*)(void *, const void *, size_t), struct timeval*); d144 1 a144 1 static int bpf_read(struct file *, off_t *, struct uio *, kauth_cred_t, d146 1 a146 1 static int bpf_write(struct file *, off_t *, struct uio *, kauth_cred_t, d168 1 a168 1 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER d370 1 a370 1 d392 1 a392 1 if ((error = falloc(l, &fp, &fd)) != 0) d458 1 a458 1 kauth_cred_t cred, int flags) d467 1 a467 1 * the kernel buffers. d587 1 a587 1 kauth_cred_t cred, int flags) a594 2 m = NULL; /* XXX gcc */ d672 1 a672 1 d1009 1 a1009 1 if ((caddr_t *)bp->bif_driverp != &ifp->if_bpf) d1068 1 a1068 1 a1159 3 struct timeval tv; int gottime=0; d1170 2 a1171 7 if (slen != 0) { if (!gottime) { microtime(&tv); gottime = 1; } catchpacket(d, pkt, pktlen, slen, memcpy, &tv); } a1214 2 struct timeval tv; int gottime = 0; d1222 2 a1223 7 if (slen != 0) { if(!gottime) { microtime(&tv); gottime = 1; } catchpacket(d, marg, pktlen, slen, cpfn, &tv); } d1384 1 a1384 1 void *(*cpfn)(void *, const void *, size_t), struct timeval *tv) d1430 1 a1430 1 hp->bh_tstamp = *tv; d1586 1 a1586 1 if ((caddr_t *)bp->bif_driverp == &ifp->if_bpf) d1703 1 a1703 2 if ((error = kauth_authorize_generic(l->l_cred, KAUTH_GENERIC_ISSUSER, &l->l_acflag))) d1715 1 a1715 1 d1738 1 a1738 1 d1754 1 a1754 1 d1795 1 a1795 1 @ 1.115.6.1 log @Sync with head. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.115 2005/12/26 15:45:48 rpaulo Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.115 2005/12/26 15:45:48 rpaulo Exp $"); a66 1 #include d144 1 a144 1 static int bpf_read(struct file *, off_t *, struct uio *, kauth_cred_t, d146 1 a146 1 static int bpf_write(struct file *, off_t *, struct uio *, kauth_cred_t, d458 1 a458 1 kauth_cred_t cred, int flags) d587 1 a587 1 kauth_cred_t cred, int flags) a594 2 m = NULL; /* XXX gcc */ d1703 1 a1703 3 if ((error = kauth_authorize_generic(l->l_proc->p_cred, KAUTH_GENERIC_ISSUSER, &l->l_proc->p_acflag))) @ 1.115.12.1 log @Merge 2006-05-24 NetBSD-current into the "peter-altq" branch. @ text @d1 1 a1 1 /* $NetBSD$ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD$"); a66 1 #include d144 1 a144 1 static int bpf_read(struct file *, off_t *, struct uio *, kauth_cred_t, d146 1 a146 1 static int bpf_write(struct file *, off_t *, struct uio *, kauth_cred_t, d458 1 a458 1 kauth_cred_t cred, int flags) d587 1 a587 1 kauth_cred_t cred, int flags) a594 2 m = NULL; /* XXX gcc */ d1703 1 a1703 3 if ((error = kauth_authorize_generic(l->l_proc->p_cred, KAUTH_GENERIC_ISSUSER, &l->l_proc->p_acflag))) @ 1.115.8.1 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.115 2005/12/26 15:45:48 rpaulo Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.115 2005/12/26 15:45:48 rpaulo Exp $"); a66 1 #include d144 1 a144 1 static int bpf_read(struct file *, off_t *, struct uio *, kauth_cred_t, d146 1 a146 1 static int bpf_write(struct file *, off_t *, struct uio *, kauth_cred_t, d458 1 a458 1 kauth_cred_t cred, int flags) d587 1 a587 1 kauth_cred_t cred, int flags) a594 2 m = NULL; /* XXX gcc */ d1703 1 a1703 3 if ((error = kauth_authorize_generic(l->l_proc->p_cred, KAUTH_GENERIC_ISSUSER, &l->l_proc->p_acflag))) @ 1.115.8.2 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.115.8.1 2006/05/24 10:58:56 yamt Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.115.8.1 2006/05/24 10:58:56 yamt Exp $"); d140 1 a140 2 void *(*)(void *, const void *, size_t), struct timeval*); d371 1 a371 1 d393 1 a393 1 if ((error = falloc(l, &fp, &fd)) != 0) d468 1 a468 1 * the kernel buffers. d675 1 a675 1 d1012 1 a1012 1 if ((caddr_t *)bp->bif_driverp != &ifp->if_bpf) d1071 1 a1071 1 a1162 3 struct timeval tv; int gottime=0; d1173 2 a1174 7 if (slen != 0) { if (!gottime) { microtime(&tv); gottime = 1; } catchpacket(d, pkt, pktlen, slen, memcpy, &tv); } a1217 2 struct timeval tv; int gottime = 0; d1225 2 a1226 7 if (slen != 0) { if(!gottime) { microtime(&tv); gottime = 1; } catchpacket(d, marg, pktlen, slen, cpfn, &tv); } d1387 1 a1387 1 void *(*cpfn)(void *, const void *, size_t), struct timeval *tv) d1433 1 a1433 1 hp->bh_tstamp = *tv; d1589 1 a1589 1 if ((caddr_t *)bp->bif_driverp == &ifp->if_bpf) d1706 3 a1708 2 if ((error = kauth_authorize_generic(l->l_cred, KAUTH_GENERIC_ISSUSER, &l->l_acflag))) d1720 1 a1720 1 d1743 1 a1743 1 d1759 1 a1759 1 d1800 1 a1800 1 @ 1.115.8.3 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.115.8.2 2006/08/11 15:46:14 yamt Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.115.8.2 2006/08/11 15:46:14 yamt Exp $"); d170 1 a170 1 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER @ 1.115.10.1 log @Adapt to kernel authorization KPI. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.115 2005/12/26 15:45:48 rpaulo Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.115 2005/12/26 15:45:48 rpaulo Exp $"); d144 1 a144 1 static int bpf_read(struct file *, off_t *, struct uio *, kauth_cred_t, d146 1 a146 1 static int bpf_write(struct file *, off_t *, struct uio *, kauth_cred_t, d458 1 a458 1 kauth_cred_t cred, int flags) d587 1 a587 1 kauth_cred_t cred, int flags) d1703 1 a1703 3 if ((error = generic_authorize(l->l_proc->p_cred, KAUTH_GENERIC_ISSUSER, &l->l_proc->p_acflag))) @ 1.115.10.2 log @generic_authorize() -> kauth_authorize_generic(). @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.115.10.1 2006/03/08 01:11:55 elad Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.115.10.1 2006/03/08 01:11:55 elad Exp $"); d1703 1 a1703 1 if ((error = kauth_authorize_generic(l->l_proc->p_cred, @ 1.115.10.3 log @- Move kauth_cred_t declaration to - Cleanup struct ucred; forward declarations that are unused. - Don't include in any header, but include it in the c files that need it. Approved by core. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.115.10.2 2006/03/10 15:05:22 elad Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.115.10.2 2006/03/10 15:05:22 elad Exp $"); a66 1 #include @ 1.115.10.4 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.115.10.3 2006/05/06 23:31:58 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.115.10.3 2006/05/06 23:31:58 christos Exp $"); a595 2 m = NULL; /* XXX gcc */ @ 1.114 log @Remove leading __ from __(const|inline|signed|volatile) -- it is obsolete. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.113 2005/12/14 22:46:52 rpaulo Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.113 2005/12/14 22:46:52 rpaulo Exp $"); a643 5 #ifdef BPF_KERN_FILTER extern struct bpf_insn *bpf_tcp_filter; extern struct bpf_insn *bpf_udp_filter; #endif a666 3 #ifdef BPF_KERN_FILTER struct bpf_insn **p; #endif a732 30 #ifdef BPF_KERN_FILTER /* * Set TCP or UDP reject filter. */ case BIOCSTCPF: case BIOCSUDPF: if (!suser()) { error = EPERM; break; } /* Validate and store filter */ error = bpf_setf(d, addr); /* Free possible old filter */ if (cmd == BIOCSTCPF) p = &bpf_tcp_filter; else p = &bpf_udp_filter; if (*p != NULL) free(*p, M_DEVBUF); /* Steal new filter (noop if error) */ s = splnet(); *p = d->bd_filter; d->bd_filter = NULL; splx(s); break; #endif @ 1.113 log @Correct typo in comments. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.112 2005/12/11 12:24:51 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.112 2005/12/11 12:24:51 christos Exp $"); d136 1 a136 1 static __inline void d556 1 a556 1 static __inline void d1247 1 a1247 1 static __inline void @ 1.112 log @merge ktrace-lwp. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.82.2.10 2005/11/10 14:10:32 skrll Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.82.2.10 2005/11/10 14:10:32 skrll Exp $"); d663 2 a664 2 * BIOGHDRCMPLT Get "header already complete" flag. * BIOSHDRCMPLT Set "header already complete" flag. @ 1.111 log @Use ANSI function declarations everywhere and a consistent indentation on them. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.110 2005/08/04 19:30:47 rpaulo Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.110 2005/08/04 19:30:47 rpaulo Exp $"); d148 3 a150 3 static int bpf_ioctl(struct file *, u_long, void *, struct proc *); static int bpf_poll(struct file *, int, struct proc *); static int bpf_close(struct file *, struct proc *); d385 1 a385 1 bpfopen(dev_t dev, int flag, int mode, struct proc *p) d392 1 a392 1 if ((error = falloc(p, &fp, &fd)) != 0) d399 1 a399 1 d->bd_pid = p->p_pid; d406 1 a406 1 return fdclone(p, fp, fd, flag, &bpf_fileops, d); d415 1 a415 1 bpf_close(struct file *fp, struct proc *p) d423 1 a423 1 d->bd_pid = p->p_pid; d668 1 a668 1 bpf_ioctl(struct file *fp, u_long cmd, void *addr, struct proc *p) d679 1 a679 1 d->bd_pid = p->p_pid; d947 1 a947 1 error = fsetown(p, &d->bd_pgid, cmd, addr); d952 1 a952 1 error = fgetown(p, d->bd_pgid, cmd, addr); d1096 1 a1096 1 bpf_poll(struct file *fp, int events, struct proc *p) d1105 1 a1105 1 d->bd_pid = p->p_pid; d1121 1 a1121 1 selrecord(p, &d->bd_sel); @ 1.110 log @Implemented the kernel part of BPF statistics and BPF peers, net.bpf.stats and net.bpf.peers sysctls respectively. A new structure was added to describe the external (user viewable) representation of a BPF file; a new entry was added to the bpf_d structure to store the PID of the calling process; a simple_lock was added to protect the insert/removal from the net.bpf.peers sysctl handler. This idea came from FreeBSD (Christian S.J. Peron) but while it is implemented with sysctl's it differs a bit. Reviewed by: christos@@ and atatat@@ (who gave me the tip for the net.bpf.peers sysctl helper function). @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.109 2005/06/22 10:36:16 peter Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.109 2005/06/22 10:36:16 peter Exp $"); d172 2 a173 6 bpf_movein(uio, linktype, mtu, mp, sockp) struct uio *uio; int linktype; int mtu; struct mbuf **mp; struct sockaddr *sockp; d296 1 a296 3 bpf_attachd(d, bp) struct bpf_d *d; struct bpf_if *bp; d314 1 a314 2 bpf_detachd(d) struct bpf_d *d; d367 1 a367 2 bpfilterattach(n) int n; d385 1 a385 5 bpfopen(dev, flag, mode, p) dev_t dev; int flag; int mode; struct proc *p; d458 1 a458 1 struct ucred *cred, int flags) d557 1 a557 2 bpf_wakeup(d) struct bpf_d *d; d570 1 a570 2 bpf_timed_out(arg) void *arg; d587 1 a587 1 struct ucred *cred, int flags) d630 1 a630 2 reset_d(d) struct bpf_d *d; d1249 1 a1249 1 void *marg, u_int pktlen, u_int buflen, struct ifnet *rcvif) d1422 1 a1422 1 void *(*cpfn)(void *, const void *, size_t)) @ 1.109 log @Missing m_freem() in bpf_write. PR/29138. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.108 2005/06/20 02:49:19 atatat Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.108 2005/06/20 02:49:19 atatat Exp $"); d104 12 d377 3 d381 5 d411 1 d414 1 d416 1 d432 5 d445 1 d447 1 d691 5 d1117 5 d1221 1 d1273 1 d1444 1 d1471 1 d1741 71 a1811 1 SYSCTL_SETUP(sysctl_net_bfp_setup, "sysctl net.bpf subtree setup") d1828 1 a1828 1 if (node != NULL) d1835 14 @ 1.109.2.1 log @de-constify mbuf. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.109 2005/06/22 10:36:16 peter Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.109 2005/06/22 10:36:16 peter Exp $"); d113 1 a113 1 void *(*cpfn)(void *, void *, size_t), d117 1 a117 1 static void *bpf_mcpy(void *, void *, size_t); d127 1 a127 1 void *(*)(void *, void *, size_t)); d1183 1 a1183 1 catchpacket(d, pkt, pktlen, slen, (void *)memcpy); d1192 1 a1192 1 bpf_mcpy(void *dst_arg, void *src_arg, size_t len) d1194 1 a1194 1 struct mbuf *m; d1222 1 a1222 1 bpf_deliver(struct bpf_if *bp, void *(*cpfn)(void *, void *, size_t), d1270 1 a1270 1 void *(*cpfn)(void *, void *, size_t); d1278 1 a1278 1 cpfn = (void *)memcpy; d1395 1 a1395 1 void *(*cpfn)(void *, void *, size_t)) @ 1.109.2.2 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.109.2.1 2005/07/07 12:03:16 yamt Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.109.2.1 2005/07/07 12:03:16 yamt Exp $"); a66 1 #include a103 12 /* * Global BPF statistics returned by net.bpf.stats sysctl. */ struct bpf_stat bpf_gstats; /* * Use a mutex to avoid a race condition between gathering the stats/peers * and opening/closing the device. */ struct simplelock bpf_slock; d124 1 a124 1 static inline void d132 1 a132 1 static int bpf_read(struct file *, off_t *, struct uio *, kauth_cred_t, d134 1 a134 1 static int bpf_write(struct file *, off_t *, struct uio *, kauth_cred_t, d136 3 a138 3 static int bpf_ioctl(struct file *, u_long, void *, struct lwp *); static int bpf_poll(struct file *, int, struct lwp *); static int bpf_close(struct file *, struct lwp *); d160 6 a165 2 bpf_movein(struct uio *uio, int linktype, int mtu, struct mbuf **mp, struct sockaddr *sockp) d288 3 a290 1 bpf_attachd(struct bpf_d *d, struct bpf_if *bp) d308 2 a309 1 bpf_detachd(struct bpf_d *d) d362 2 a363 1 bpfilterattach(int n) a364 3 simple_lock_init(&bpf_slock); simple_lock(&bpf_slock); a365 5 simple_unlock(&bpf_slock); bpf_gstats.bs_recv = 0; bpf_gstats.bs_drop = 0; bpf_gstats.bs_capt = 0; d373 5 a377 1 bpfopen(dev_t dev, int flag, int mode, struct lwp *l) d384 1 a384 1 if ((error = falloc(l->l_proc, &fp, &fd)) != 0) a390 1 d->bd_pid = l->l_proc->p_pid; a392 1 simple_lock(&bpf_slock); a393 1 simple_unlock(&bpf_slock); d395 1 a395 1 return fdclone(l, fp, fd, flag, &bpf_fileops, d); d404 1 a404 1 bpf_close(struct file *fp, struct lwp *l) a408 5 /* * Refresh the PID associated with this bpf file. */ d->bd_pid = l->l_proc->p_pid; a416 1 simple_lock(&bpf_slock); a417 1 simple_unlock(&bpf_slock); d440 1 a440 1 kauth_cred_t cred, int flags) d538 3 a540 2 static inline void bpf_wakeup(struct bpf_d *d) d553 2 a554 1 bpf_timed_out(void *arg) d571 1 a571 1 kauth_cred_t cred, int flags) a578 2 m = NULL; /* XXX gcc */ d614 2 a615 1 reset_d(struct bpf_d *d) d629 5 d648 2 a649 2 * BIOCGHDRCMPLT Get "header already complete" flag. * BIOCSHDRCMPLT Set "header already complete" flag. d653 1 a653 1 bpf_ioctl(struct file *fp, u_long cmd, void *addr, struct lwp *l) d657 3 a660 5 /* * Refresh the PID associated with this bpf file. */ d->bd_pid = l->l_proc->p_pid; d721 30 d927 1 a927 1 error = fsetown(l->l_proc, &d->bd_pgid, cmd, addr); d932 1 a932 1 error = fgetown(l->l_proc, d->bd_pgid, cmd, addr); d1076 1 a1076 1 bpf_poll(struct file *fp, int events, struct lwp *l) a1081 5 /* * Refresh the PID associated with this bpf file. */ d->bd_pid = l->l_proc->p_pid; d1096 1 a1096 1 selrecord(l, &d->bd_sel); a1180 1 ++bpf_gstats.bs_recv; d1221 1 a1221 1 static inline void d1223 1 a1223 1 void *marg, u_int pktlen, u_int buflen, struct ifnet *rcvif) a1231 1 ++bpf_gstats.bs_recv; d1395 1 a1395 1 void *(*cpfn)(void *, void *, size_t)) a1401 1 ++bpf_gstats.bs_capt; a1427 1 ++bpf_gstats.bs_drop; d1697 1 a1697 73 static int sysctl_net_bpf_peers(SYSCTLFN_ARGS) { int error, elem_count; struct bpf_d *dp; struct bpf_d_ext dpe; size_t len, needed, elem_size, out_size; char *sp; if (namelen == 1 && name[0] == CTL_QUERY) return (sysctl_query(SYSCTLFN_CALL(rnode))); if (namelen != 2) return (EINVAL); if ((error = kauth_authorize_generic(l->l_proc->p_cred, KAUTH_GENERIC_ISSUSER, &l->l_proc->p_acflag))) return (error); len = (oldp != NULL) ? *oldlenp : 0; sp = oldp; elem_size = name[0]; elem_count = name[1]; out_size = MIN(sizeof(dpe), elem_size); needed = 0; if (elem_size < 1 || elem_count < 0) return (EINVAL); simple_lock(&bpf_slock); LIST_FOREACH(dp, &bpf_list, bd_list) { if (len >= elem_size && elem_count > 0) { #define BPF_EXT(field) dpe.bde_ ## field = dp->bd_ ## field BPF_EXT(bufsize); BPF_EXT(promisc); BPF_EXT(promisc); BPF_EXT(state); BPF_EXT(immediate); BPF_EXT(hdrcmplt); BPF_EXT(seesent); BPF_EXT(pid); BPF_EXT(rcount); BPF_EXT(dcount); BPF_EXT(ccount); #undef BPF_EXT if (dp->bd_bif) (void)strlcpy(dpe.bde_ifname, dp->bd_bif->bif_ifp->if_xname, IFNAMSIZ - 1); else dpe.bde_ifname[0] = '\0'; error = copyout(&dpe, sp, out_size); if (error) break; sp += elem_size; len -= elem_size; } if (elem_count > 0) { needed += elem_size; if (elem_count != INT_MAX) elem_count--; } } simple_unlock(&bpf_slock); *oldlenp = needed; return (error); } SYSCTL_SETUP(sysctl_net_bpf_setup, "sysctl net.bpf subtree setup") d1714 1 a1714 1 if (node != NULL) { a1720 14 sysctl_createv(clog, 0, NULL, NULL, CTLFLAG_PERMANENT, CTLTYPE_STRUCT, "stats", SYSCTL_DESCR("BPF stats"), NULL, 0, &bpf_gstats, sizeof(bpf_gstats), CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL); sysctl_createv(clog, 0, NULL, NULL, CTLFLAG_PERMANENT, CTLTYPE_STRUCT, "peers", SYSCTL_DESCR("BPF peers"), sysctl_net_bpf_peers, 0, NULL, 0, CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL); } @ 1.109.2.3 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.109.2.2 2006/06/21 15:10:26 yamt Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.109.2.2 2006/06/21 15:10:26 yamt Exp $"); d140 1 a140 1 void *(*)(void *, void *, size_t), struct timeval*); d169 1 a169 1 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER d371 1 a371 1 d393 1 a393 1 if ((error = falloc(l, &fp, &fd)) != 0) d459 1 a459 1 kauth_cred_t cred, int flags) d468 1 a468 1 * the kernel buffers. d588 1 a588 1 kauth_cred_t cred, int flags) d675 1 a675 1 d1012 1 a1012 1 if ((caddr_t *)bp->bif_driverp != &ifp->if_bpf) d1071 1 a1071 1 a1162 3 struct timeval tv; int gottime=0; d1173 2 a1174 7 if (slen != 0) { if (!gottime) { microtime(&tv); gottime = 1; } catchpacket(d, pkt, pktlen, slen, (void *)memcpy, &tv); } a1217 2 struct timeval tv; int gottime = 0; d1225 2 a1226 7 if (slen != 0) { if(!gottime) { microtime(&tv); gottime = 1; } catchpacket(d, marg, pktlen, slen, cpfn, &tv); } d1387 1 a1387 1 void *(*cpfn)(void *, void *, size_t), struct timeval *tv) d1433 1 a1433 1 hp->bh_tstamp = *tv; d1589 1 a1589 1 if ((caddr_t *)bp->bif_driverp == &ifp->if_bpf) d1706 4 a1709 5 /* BPF peers is privileged information. */ error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_INTERFACE, KAUTH_REQ_NETWORK_INTERFACE_GETPRIV, NULL, NULL, NULL); if (error) return (EPERM); d1720 1 a1720 1 d1743 1 a1743 1 d1759 1 a1759 1 d1800 1 a1800 1 @ 1.109.2.4 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.109.2.3 2006/12/30 20:50:20 yamt Exp $ */ d42 1 a42 7 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.109.2.3 2006/12/30 20:50:20 yamt Exp $"); #if defined(_KERNEL_OPT) #include "opt_bpf.h" #include "sl.h" #include "strip.h" #endif d81 5 a85 2 #include d115 1 a115 1 static kmutex_t bpf_mtx; d370 1 a370 1 mutex_init(&bpf_mtx, MUTEX_DEFAULT, IPL_NONE); d372 1 a372 1 mutex_enter(&bpf_mtx); d374 1 a374 1 mutex_exit(&bpf_mtx); d401 1 a401 1 callout_init(&d->bd_callout, 0); d403 1 a403 1 mutex_enter(&bpf_mtx); d405 1 a405 1 mutex_exit(&bpf_mtx); d434 1 a434 1 mutex_enter(&bpf_mtx); d436 1 a436 2 mutex_exit(&bpf_mtx); callout_destroy(&d->bd_callout); d565 2 a797 3 #ifdef OBIOCGETIF case OBIOCGETIF: #endif a807 3 #ifdef OBIOCSETIF case OBIOCSETIF: #endif d1012 1 a1012 1 if ((void **)bp->bif_driverp != &ifp->if_bpf) d1447 1 a1447 1 hp = (struct bpf_hdr *)((char *)d->bd_sbuf + curlen); d1604 1 a1604 1 if ((void **)bp->bif_driverp == &ifp->if_bpf) d1737 1 a1737 1 mutex_enter(&bpf_mtx); d1772 1 a1772 1 mutex_exit(&bpf_mtx); @ 1.109.2.5 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.109.2.4 2007/09/03 14:42:00 yamt Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.109.2.4 2007/09/03 14:42:00 yamt Exp $"); d1147 1 a1147 1 return (EINVAL); @ 1.109.2.6 log @sync with head @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.109.2.5 2007/12/07 17:34:14 yamt Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.109.2.5 2007/12/07 17:34:14 yamt Exp $"); d1579 1 a1579 1 LIST_FOREACH(d, &bpf_list, bd_list) { @ 1.109.2.7 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.109.2.6 2008/01/21 09:46:59 yamt Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.109.2.6 2008/01/21 09:46:59 yamt Exp $"); d1313 1 a1313 1 bpf_mtap_af(void *arg, uint32_t af, struct mbuf *m) d1326 1 a1326 1 bpf_mtap_et(void *arg, uint16_t et, struct mbuf *m) d1335 4 a1338 4 ((uint32_t *)m0.m_data)[0] = 0; ((uint32_t *)m0.m_data)[1] = 0; ((uint32_t *)m0.m_data)[2] = 0; ((uint16_t *)m0.m_data)[6] = et; @ 1.109.2.8 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.109.2.7 2008/02/27 08:37:00 yamt Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.109.2.7 2008/02/27 08:37:00 yamt Exp $"); a404 1 selinit(&d->bd_sel); a440 1 seldestroy(&d->bd_sel); d568 1 a568 1 selnotify(&d->bd_sel, 0, 0); d1066 1 a1066 1 * Otherwise, return false but make a note that a selnotify() must be done. @ 1.109.2.9 log @sync with head. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.109.2.8 2008/03/17 09:15:41 yamt Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.109.2.8 2008/03/17 09:15:41 yamt Exp $"); d152 3 a154 3 static int bpf_ioctl(struct file *, u_long, void *); static int bpf_poll(struct file *, int); static int bpf_close(struct file *); d396 1 a396 1 if ((error = fd_allocfile(&fp, &fd)) != 0) d411 1 a411 1 return fd_clone(fp, fd, flag, &bpf_fileops, d); d420 1 a420 1 bpf_close(struct file *fp) d428 1 a428 1 d->bd_pid = curproc->p_pid; d670 1 a670 1 bpf_ioctl(struct file *fp, u_long cmd, void *addr) d678 1 a678 1 d->bd_pid = curproc->p_pid; d922 1 a922 1 error = fsetown(&d->bd_pgid, cmd, addr); d927 1 a927 1 error = fgetown(d->bd_pgid, cmd, addr); d1071 1 a1071 1 bpf_poll(struct file *fp, int events) d1080 1 a1080 1 d->bd_pid = curproc->p_pid; d1096 1 a1096 1 selrecord(curlwp, &d->bd_sel); @ 1.108 log @Change the rest of the sysctl subsystem to use const consistently. The __UNCONST macro is now used only where necessary and the RW macros are gone. Most of the changes here are consumers of the sysctl_createv(9) interface that now takes a pair of const pointers which used not to be. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.107 2005/02/26 22:45:09 perry Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.107 2005/02/26 22:45:09 perry Exp $"); d592 2 a593 1 if (m->m_pkthdr.len > ifp->if_mtu) d595 1 @ 1.107 log @nuke trailing whitespace @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.106 2005/02/12 23:14:03 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.106 2005/02/12 23:14:03 christos Exp $"); d1697 1 a1697 1 struct sysctlnode *node; @ 1.106 log @pass the flag to fdclone. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.105 2004/11/30 04:28:43 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.105 2004/11/30 04:28:43 christos Exp $"); d1299 1 a1299 1 @ 1.105 log @Clonify bpf. I am not changing /dev/bpfX -> /dev/bpf until all userland programs have been fixed. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.104 2004/08/19 20:58:23 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.104 2004/08/19 20:58:23 christos Exp $"); d395 1 a395 1 return fdclone(p, fp, fd, &bpf_fileops, d); @ 1.105.4.1 log @sync with -current @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.107 2005/02/26 22:45:09 perry Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.107 2005/02/26 22:45:09 perry Exp $"); d395 1 a395 1 return fdclone(p, fp, fd, flag, &bpf_fileops, d); d1299 1 a1299 1 @ 1.105.6.1 log @sync with head. xen and whitespace. xen part is not finished. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.105 2004/11/30 04:28:43 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.105 2004/11/30 04:28:43 christos Exp $"); d395 1 a395 1 return fdclone(p, fp, fd, flag, &bpf_fileops, d); d1299 1 a1299 1 @ 1.104 log @Factor out the hand-crafting of mbufs from the interface files. Reviewed by gimpy. XXX: I could have used bpf_mtap2 on some of the new functions, but I chose not to, because I just wanted to do what amounts to a code move. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.103 2004/08/19 18:33:24 christos Exp $ */ d42 1 a42 3 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.103 2004/08/19 18:33:24 christos Exp $"); #include "bpfilter.h" d54 1 d57 1 d109 1 a109 1 struct bpf_d bpf_dtab[NBPFILTER]; d132 20 a152 6 dev_type_close(bpfclose); dev_type_read(bpfread); dev_type_write(bpfwrite); dev_type_ioctl(bpfioctl); dev_type_poll(bpfpoll); dev_type_kqfilter(bpfkqfilter); d155 2 a156 2 bpfopen, bpfclose, bpfread, bpfwrite, bpfioctl, nostop, notty, bpfpoll, nommap, bpfkqfilter, a355 3 #define D_ISFREE(d) ((d) == (d)->bd_next) #define D_MARKFREE(d) ((d)->bd_next = (d)) #define D_MARKUSED(d) ((d)->bd_next = 0) d365 1 a365 7 int i; /* * Mark all the descriptors free. */ for (i = 0; i < NBPFILTER; ++i) D_MARKFREE(&bpf_dtab[i]); d369 1 a369 2 * Open ethernet device. Returns ENXIO for illegal minor device number, * EBUSY if file is open by another process. d380 2 d383 3 a385 9 if (minor(dev) >= NBPFILTER) return (ENXIO); /* * Each minor can be opened by only one process. If the requested * minor is in use, return EBUSY. */ d = &bpf_dtab[minor(dev)]; if (!D_ISFREE(d)) return (EBUSY); d387 2 a388 2 /* Mark "free" and do most initialization. */ memset((char *)d, 0, sizeof(*d)); d393 3 a395 1 return (0); d403 2 a404 6 int bpfclose(dev, flag, mode, p) dev_t dev; int flag; int mode; struct proc *p; d406 1 a406 1 struct bpf_d *d = &bpf_dtab[minor(dev)]; d417 3 d438 3 a440 5 int bpfread(dev, uio, ioflag) dev_t dev; struct uio *uio; int ioflag; d442 1 a442 1 struct bpf_d *d = &bpf_dtab[minor(dev)]; d465 1 a465 1 if (ioflag & IO_NDELAY) { d569 3 a571 5 int bpfwrite(dev, uio, ioflag) dev_t dev; struct uio *uio; int ioflag; d573 1 a573 1 struct bpf_d *d = &bpf_dtab[minor(dev)]; d650 2 a651 2 int bpfioctl(dev_t dev, u_long cmd, caddr_t arg, int flag, struct proc *p) d653 1 a653 1 struct bpf_d *d = &bpf_dtab[minor(dev)]; a657 1 void *addr = arg; d1073 2 a1074 2 int bpfpoll(dev_t dev, int events, struct proc *p) d1076 1 a1076 1 struct bpf_d *d = &bpf_dtab[minor(dev)]; d1133 2 a1134 2 int bpfkqfilter(dev_t dev, struct knote *kn) d1136 1 a1136 1 struct bpf_d *d = &bpf_dtab[minor(dev)]; a1499 2 D_MARKFREE(d); d1557 1 a1557 4 int i, s, cmaj; /* locate the major number */ cmaj = cdevsw_lookup_major(&bpf_cdevsw); d1560 2 a1561 4 for (i = 0; i < NBPFILTER; ++i) { d = &bpf_dtab[i]; if (!D_ISFREE(d) && d->bd_bif != NULL && d->bd_bif->bif_ifp == ifp) { a1569 1 vdevgone(cmaj, i, i, VCHR); @ 1.103 log @- ansify - remove unnecessary casts - change caddr_t to void * - no functional change. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.102 2004/08/05 03:58:58 enami Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.102 2004/08/05 03:58:58 enami Exp $"); d69 1 d82 2 d1293 95 @ 1.102 log @Don't refuse to attach an interface even if it is down so that one can capture the very first packet when an interface is up. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.101 2004/06/06 04:35:53 dyoung Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.101 2004/06/06 04:35:53 dyoung Exp $"); d108 1 a108 1 static int bpf_allocbufs __P((struct bpf_d *)); d112 9 a120 9 static void bpf_freed __P((struct bpf_d *)); static void bpf_ifname __P((struct ifnet *, struct ifreq *)); static void *bpf_mcpy __P((void *, const void *, size_t)); static int bpf_movein __P((struct uio *, int, int, struct mbuf **, struct sockaddr *)); static void bpf_attachd __P((struct bpf_d *, struct bpf_if *)); static void bpf_detachd __P((struct bpf_d *)); static int bpf_setif __P((struct bpf_d *, struct ifreq *)); static void bpf_timed_out __P((void *)); d122 6 a127 6 bpf_wakeup __P((struct bpf_d *)); static void catchpacket __P((struct bpf_d *, u_char *, u_int, u_int, void *(*)(void *, const void *, size_t))); static void reset_d __P((struct bpf_d *)); static int bpf_getdltlist __P((struct bpf_d *, struct bpf_dltlist *)); static int bpf_setdlt __P((struct bpf_d *, u_int)); d249 1 a249 1 error = uiomove(mtod(m, caddr_t), len, uio); d253 1 a253 1 memcpy(sockp->sa_data, mtod(m, caddr_t), hlen); d481 1 a481 1 error = tsleep((caddr_t)d, PRINET|PCATCH, "bpf", d540 1 a540 1 wakeup((caddr_t)d); d554 1 a554 1 struct bpf_d *d = (struct bpf_d *)arg; d651 1 a651 6 bpfioctl(dev, cmd, addr, flag, p) dev_t dev; u_long cmd; caddr_t addr; int flag; struct proc *p; d658 1 d717 1 a717 1 error = bpf_setf(d, (struct bpf_program *)addr); d732 1 a732 1 error = bpf_setf(d, (struct bpf_program *)addr); d740 1 a740 1 free((caddr_t)*p, M_DEVBUF); d796 1 a796 1 error = bpf_getdltlist(d, (struct bpf_dltlist *)addr); d816 1 a816 1 bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr); d823 1 a823 1 error = bpf_setif(d, (struct ifreq *)addr); d831 1 a831 1 struct timeval *tv = (struct timeval *)addr; d845 1 a845 1 struct timeval *tv = (struct timeval *)addr; d857 1 a857 1 struct bpf_stat *bs = (struct bpf_stat *)addr; d867 1 a867 1 struct bpf_stat_old *bs = (struct bpf_stat_old *)addr; d883 1 a883 1 struct bpf_version *bv = (struct bpf_version *)addr; d942 1 a942 3 bpf_setf(d, fp) struct bpf_d *d; struct bpf_program *fp; d957 1 a957 1 free((caddr_t)old, M_DEVBUF); d965 2 a966 2 fcode = (struct bpf_insn *)malloc(size, M_DEVBUF, M_WAITOK); if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && d973 1 a973 1 free((caddr_t)old, M_DEVBUF); d977 1 a977 1 free((caddr_t)fcode, M_DEVBUF); d987 1 a987 3 bpf_setif(d, ifr) struct bpf_d *d; struct ifreq *ifr; d1061 1 a1061 3 bpf_ifname(ifp, ifr) struct ifnet *ifp; struct ifreq *ifr; a1062 1 d1075 1 a1075 4 bpfpoll(dev, events, p) dev_t dev; int events; struct proc *p; d1135 1 a1135 3 bpfkqfilter(dev, kn) dev_t dev; struct knote *kn; d1167 1 a1167 4 bpf_tap(arg, pkt, pktlen) caddr_t arg; u_char *pkt; u_int pktlen; d1177 1 a1177 1 bp = (struct bpf_if *)arg; d1191 1 a1191 4 bpf_mcpy(dst_arg, src_arg, len) void *dst_arg; const void *src_arg; size_t len; d1203 1 a1203 1 memcpy((caddr_t)dst, mtod(m, caddr_t), count); d1242 1 a1242 5 bpf_mtap2(arg, data, dlen, m) caddr_t arg; void *data; u_int dlen; struct mbuf *m; d1244 1 a1244 1 struct bpf_if *bp = (struct bpf_if *)arg; d1267 1 a1267 3 bpf_mtap(arg, m) caddr_t arg; struct mbuf *m; d1269 2 a1270 2 void *(*cpfn) __P((void *, const void *, size_t)); struct bpf_if *bp = (struct bpf_if *)arg; d1298 2 a1299 5 catchpacket(d, pkt, pktlen, snaplen, cpfn) struct bpf_d *d; u_char *pkt; u_int pktlen, snaplen; void *(*cpfn) __P((void *, const void *, size_t)); d1369 1 a1369 2 bpf_allocbufs(d) struct bpf_d *d; d1372 1 a1372 1 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_NOWAIT); d1375 1 a1375 1 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_NOWAIT); d1390 1 a1390 2 bpf_freed(d) struct bpf_d *d; d1405 1 a1405 1 free((caddr_t)d->bd_filter, M_DEVBUF); d1415 1 a1415 3 bpfattach(ifp, dlt, hdrlen) struct ifnet *ifp; u_int dlt, hdrlen; d1427 1 a1427 4 bpfattach2(ifp, dlt, hdrlen, driverp) struct ifnet *ifp; u_int dlt, hdrlen; caddr_t *driverp; d1430 1 a1430 1 bp = (struct bpf_if *)malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT); d1435 1 a1435 1 bp->bif_driverp = (struct bpf_if **)driverp; d1461 1 a1461 2 bpfdetach(ifp) struct ifnet *ifp; d1502 1 a1502 3 bpf_change_type(ifp, dlt, hdrlen) struct ifnet *ifp; u_int dlt, hdrlen; d1528 1 a1528 3 bpf_getdltlist(d, bfl) struct bpf_d *d; struct bpf_dltlist *bfl; d1556 1 a1556 3 bpf_setdlt(d, dlt) struct bpf_d *d; u_int dlt; @ 1.101 log @Per Matt Thomas' and Darren Reed's suggestions: Add bpf_deliver prototype. Rename bpf_measure to m_length and move it to sys/sys/mbuf.h. I make m_length an inline function in the header file to preserve its performance characteristics, for better or for worse. Optimize m_length: use the length in m_pkthdr.len, if M_PKTHDR. In bpf_deliver, zero the on-stack mbuf before we do anything else with it. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.100 2004/05/29 14:18:33 darrenr Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.100 2004/05/29 14:18:33 darrenr Exp $"); a1037 1 * If it's not up, return an error. a1041 3 if ((ifp->if_flags & IFF_UP) == 0) return (ENETDOWN); @ 1.100 log @back out previous change - these diffs aren't what I'd tested. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.98 2004/05/25 04:33:59 atatat Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.98 2004/05/25 04:33:59 atatat Exp $"); d109 3 a1262 12 static __inline u_int bpf_measure(struct mbuf *m) { struct mbuf *m0; u_int pktlen; pktlen = 0; for (m0 = m; m0 != 0; m0 = m0->m_next) pktlen += m0->m_len; return pktlen; } d1278 1 a1278 1 pktlen = bpf_measure(m) + dlen; d1285 1 d1306 1 a1306 1 pktlen = bpf_measure(m); @ 1.99 log @add mmap(2) interface to bpf(4) devices, along with BIOCMMAPINFO ioctl call for applications to interact with the bpf device for the purpose of using mmap to examinen captured data. @ text @a124 2 static int bpf_mmapinfo __P((struct bpf_d *, struct bpf_mmapinfo *)); static int bpf_waitfordata __P((struct bpf_d *)); a132 1 dev_type_mmap(bpfmmap); d136 1 a136 1 nostop, notty, bpfpoll, bpfmmap, bpfkqfilter, d423 5 a427 8 do { \ (d)->bd_hbuf = (d)->bd_sbuf; \ (d)->bd_hlen = (d)->bd_slen; \ (d)->bd_sbuf = (d)->bd_fbuf; \ (d)->bd_slen = 0; \ (d)->bd_fbuf = 0; \ } while (0) a453 37 error = bpf_waitfordata(d); if (error != 0) goto done; /* * At this point, we know we have something in the hold slot. */ splx(s); /* * Move data from hold buffer into user space. * We know the entire buffer is transferred since * we checked above that the read buffer is bpf_bufsize bytes. */ error = uiomove(d->bd_hbuf, d->bd_hlen, uio); s = splnet(); d->bd_fbuf = d->bd_hbuf; d->bd_hbuf = 0; d->bd_hlen = 0; done: splx(s); if (error == -1) error = 0; return (error); } /* * NOTE: splnet() is assumed to be held when calling this function. * It is left to the caller to drop the spl. */ static int bpf_waitfordata(d) struct bpf_d *d; { int error; d462 1 d498 3 a500 6 if (d->bd_slen == 0) return -1; if (d->bd_mapbuf == -1) { ROTATE_BUFFERS(d); break; d502 2 d506 1 a506 1 return error; d508 11 d520 7 a526 1 return 0; a912 4 case BIOCMMAPINFO: error = bpf_mmapinfo(d, (struct bpf_mmapinfo *)addr); break; d1369 1 a1369 1 if (d->bd_fbuf == 0 || d->bd_mapbuf != -1) { d1416 2 a1417 2 d->bd_bufs[1] = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_NOWAIT); if (d->bd_bufs[1] == NULL) d1419 3 a1421 6 d->bd_fbuf = d->bd_bufs[1]; d->bd_bufs[0] = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_NOWAIT); if (d->bd_bufs[0] == NULL) { free(d->bd_bufs[1], M_DEVBUF); d->bd_bufs[1] = NULL; d->bd_fbuf = NULL; a1423 2 d->bd_sbuf = d->bd_bufs[0]; d->bd_fbuf = d->bd_bufs[1]; a1425 1 d->bd_mapbuf = -1; d1442 7 a1448 4 if (d->bd_bufs[0] != 0) free(d->bd_bufs[0], M_DEVBUF); if (d->bd_bufs[1] != 0) free(d->bd_bufs[1], M_DEVBUF); a1627 88 /* * Provide a mmap(2) interface to the BPF buffers. * Read-only mapping (PROT_READ) is enforced by this driver - an application * using this should never write to this buffer and especially not with copy * on write as the real buffer contents will then disappear from the process * view. * * An application should create two maps: one for each buffer that bpf has * internally and use the information returned from BIOCMMAPINFO to determine * which one has valid data in it and how much data is valid. */ paddr_t bpfmmap(dev_t dev, off_t off, int prot) { struct bpf_d *d; u_int uoff; if (prot != VM_PROT_READ) return -1; if (off & PAGE_MASK) panic("bpfmmap"); d = &bpf_dtab[minor(dev)]; uoff = (u_int)off; if (uoff >= 0 && uoff < d->bd_bufsize) return (atop(d->bd_bufs[0] + uoff)); if (uoff >= d->bd_bufsize && uoff < (d->bd_bufsize * 2)) return (atop(d->bd_bufs[1] + (uoff - d->bd_bufsize))); /* Page not found. */ return (-1); } static int bpf_mmapinfo(d, info) struct bpf_d *d; struct bpf_mmapinfo *info; { int which, s, error; s = splnet(); if (info->bpm_op == BPM_RELEASE) { /* only want to unlock */ d->bd_mapbuf = -1; splx(s); return 0; } /* * Currently only two operations are supported, release and acquire. * If it's not one of these then return an error. */ if (info->bpm_op != BPM_ACQUIRE) { splx(s); return EINVAL; } /* * An incoming call must give up the current buffer locked for use * with mmap, if it has one, so that bpf has somewhere to write new * data when this call returns. */ if (d->bd_mapbuf != -1) { d->bd_fbuf = d->bd_hbuf; d->bd_hbuf = NULL; d->bd_hlen = 0; d->bd_mapbuf = -1; } error = bpf_waitfordata(d); if (error == 0) { if (d->bd_hbuf == d->bd_bufs[0]) which = 0; else if (d->bd_hbuf == d->bd_bufs[1]) which = 1; else which = -1; d->bd_mapbuf = which; info->bpm_len = d->bd_hlen; info->bpm_which = which; } splx(s); return 0; } a1690 2 @ 1.98 log @Sysctl descriptions under net subtree (net.key not done) @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.97 2004/05/19 13:09:11 darrenr Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.97 2004/05/19 13:09:11 darrenr Exp $"); d125 2 d135 1 d139 1 a139 1 nostop, notty, bpfpoll, nommap, bpfkqfilter, d426 8 a433 5 (d)->bd_hbuf = (d)->bd_sbuf; \ (d)->bd_hlen = (d)->bd_slen; \ (d)->bd_sbuf = (d)->bd_fbuf; \ (d)->bd_slen = 0; \ (d)->bd_fbuf = 0; d460 37 a504 1 splx(s); d540 6 a545 3 if (d->bd_slen == 0) { splx(s); return (0); a546 2 ROTATE_BUFFERS(d); break; d549 1 a549 1 goto done; a550 4 /* * At this point, we know we have something in the hold slot. */ splx(s); d552 1 a552 14 /* * Move data from hold buffer into user space. * We know the entire buffer is transferred since * we checked above that the read buffer is bpf_bufsize bytes. */ error = uiomove(d->bd_hbuf, d->bd_hlen, uio); s = splnet(); d->bd_fbuf = d->bd_hbuf; d->bd_hbuf = 0; d->bd_hlen = 0; done: splx(s); return (error); d939 4 d1399 1 a1399 1 if (d->bd_fbuf == 0) { d1446 2 a1447 2 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_NOWAIT); if (!d->bd_fbuf) d1449 6 a1454 3 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_NOWAIT); if (!d->bd_sbuf) { free(d->bd_fbuf, M_DEVBUF); d1457 2 d1461 1 d1478 4 a1481 7 if (d->bd_sbuf != 0) { free(d->bd_sbuf, M_DEVBUF); if (d->bd_hbuf != 0) free(d->bd_hbuf, M_DEVBUF); if (d->bd_fbuf != 0) free(d->bd_fbuf, M_DEVBUF); } d1661 88 d1812 2 @ 1.97 log @reapply a change that got undone with more recent changes to bpf to wakeup any sleepers _after_ the device info has been updated, not before. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.96 2004/04/30 22:07:21 dyoung Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.96 2004/04/30 22:07:21 dyoung Exp $"); d1679 2 a1680 1 CTLTYPE_NODE, "bpf", NULL, d1686 2 a1687 1 CTLTYPE_INT, "maxbufsize", NULL, @ 1.96 log @Add bpf_mtap2, which taps a packet whose head is in a void *buffer and whose tail is in an mbuf chain. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.95 2004/04/20 10:51:09 darrenr Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.95 2004/04/20 10:51:09 darrenr Exp $"); d1380 1 a1380 7 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) /* * Immediate mode is set, or the read timeout has * already expired during a select call. A packet * arrived, so the reader should be woken up. */ bpf_wakeup(d); d1394 12 @ 1.95 log @If we timeout waiting for data on the bpf device, allow data in the current storage buffer (bd_sbuf) to indicate that there is data present. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.94 2004/04/15 14:56:57 darrenr Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.94 2004/04/15 14:56:57 darrenr Exp $"); d1235 67 d1311 1 a1311 3 struct bpf_d *d; u_int pktlen, slen, buflen; struct mbuf *m0; d1314 1 a1314 3 pktlen = 0; for (m0 = m; m0 != 0; m0 = m0->m_next) pktlen += m0->m_len; d1326 1 a1326 8 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL)) continue; ++d->bd_rcount; slen = bpf_filter(d->bd_filter, marg, pktlen, buflen); if (slen != 0) catchpacket(d, marg, pktlen, slen, cpfn); } @ 1.94 log @Add a count of the number of packets that match the bpf filter applied to a particule device. In doing this, make a new the bpf_stat structure with members that are u_long rather than u_int, matching the counters in the bpf_d. the original bpf_stat is now bpf_stat_old and so to the original ioctl is preserved as BIOCGSTATSOLD. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.93 2004/04/14 21:34:26 darrenr Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.93 2004/04/14 21:34:26 darrenr Exp $"); d1105 4 a1108 1 revents |= events & POLLIN; @ 1.93 log @* from bpf 1.2a1, use the IO_NDELAY flag in bpfread() to indicate whether or not a read operation should be allowed to sleep. This allows the use of bd_rtout with a value of "-1" to be eliminated (signed comparison and assignment to an unsigned long.) * in 1.91, a change was introduced that had bpfpoll() returning POLLRDNORM set when the timeout expired. This impacted poorly on performance as well as causing select to return an fd available for reading when it wasn't. Change the behaviour here to only allow the possibility of POLLIN being returned as active in the event of a timeout. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.92 2004/04/11 01:41:01 darrenr Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.92 2004/04/11 01:41:01 darrenr Exp $"); d621 1 d862 10 d1288 2 @ 1.92 log @from freebsd's kern/36219, the if expression in deciding whether or not to return something check the value of bd_state in the wrong place. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.91 2004/04/10 23:31:51 darrenr Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.91 2004/04/10 23:31:51 darrenr Exp $"); d460 9 d478 2 a479 7 if (d->bd_rtout != -1) error = tsleep((caddr_t)d, PRINET|PCATCH, "bpf", d->bd_rtout); else { /* User requested non-blocking I/O */ error = EWOULDBLOCK; } d903 5 a907 4 if (*(int *)addr) d->bd_rtout = -1; else d->bd_rtout = 0; d1090 2 a1091 3 if (d->bd_hlen != 0 || ((d->bd_immediate && d->bd_slen != 0) || d->bd_state == BPF_TIMED_OUT)) { d1093 2 @ 1.91 log @Fix bpf so that select will return for a timeout (from FreeBSD.) Fix the behaviour of BIOCIMMEDIATE (fix from LBL BPF code via FreeBSD.) In bpf_mtap(), optimise the calling of bpf_filter() and catchpacket() based on whether or not the entire packet is in one mbuf (based on similar change FreeBSD but fixes BIOC*SEESENT issue with that.) Copy the implementation of BIOCSSEESENT, BIOCGSEESENT by FreeBSD. Review Assistance: Guy Harris PRs: kern/8674, kern/12170 @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.90 2004/03/24 15:34:54 atatat Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.90 2004/03/24 15:34:54 atatat Exp $"); d1086 2 a1087 2 ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) && d->bd_slen != 0)) d1089 1 a1089 1 else { @ 1.90 log @Tango on sysctl_createv() and flags. The flags have all been renamed, and sysctl_createv() now uses more arguments. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.89 2004/01/22 00:32:41 jonathan Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.89 2004/01/22 00:32:41 jonathan Exp $"); d117 1 d384 2 d406 3 d438 1 d450 4 d460 1 a460 5 if (d->bd_immediate) { if (d->bd_slen == 0) { splx(s); return (EWOULDBLOCK); } d473 2 a474 5 if (d->bd_rtout == -1) { /* User requested non-blocking I/O */ error = EWOULDBLOCK; } else error = 0; d542 18 d656 6 d884 14 d1085 3 a1087 1 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) d1089 1 a1089 1 else d1091 7 d1222 1 d1225 1 a1225 1 u_int pktlen, slen; d1227 1 d1233 10 d1244 2 d1247 1 a1247 1 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); d1249 1 a1249 1 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcpy); d1302 7 a1308 1 } a1321 8 if (d->bd_immediate) { /* * Immediate mode is set. A packet arrived so any * reads should be woken up. */ bpf_wakeup(d); } @ 1.90.2.1 log @Pullup rev 1.91-1.95 (requested by darrenr in ticket #167) Reduce bpf buffer to 32k from 1M to reduce kernel memory usage from userland binaries. Fix bpf so that select will return for a timeout. Fix the behaviour of BIOCIMMEDIATE. In bpf_mtap(), optimise the calling of bpf_filter() and catchpacket() based on whether or not the entire packet is in one mbuf. Various other bpf fixes, including PR#8674, PR#12170 @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.90 2004/03/24 15:34:54 atatat Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.90 2004/03/24 15:34:54 atatat Exp $"); a116 1 static void bpf_timed_out __P((void *)); a382 2 d->bd_seesent = 1; callout_init(&d->bd_callout); a402 3 if (d->bd_state == BPF_WAITING) callout_stop(&d->bd_callout); d->bd_state = BPF_IDLE; a431 1 int timed_out; a442 4 if (d->bd_state == BPF_WAITING) callout_stop(&d->bd_callout); timed_out = (d->bd_state == BPF_TIMED_OUT); d->bd_state = BPF_IDLE; d449 1 a449 1 if (ioflag & IO_NDELAY) { a453 5 ROTATE_BUFFERS(d); break; } if ((d->bd_immediate || timed_out) && d->bd_slen != 0) { d462 10 a471 2 error = tsleep((caddr_t)d, PRINET|PCATCH, "bpf", d->bd_rtout); a537 18 static void bpf_timed_out(arg) void *arg; { struct bpf_d *d = (struct bpf_d *)arg; int s; s = splnet(); if (d->bd_state == BPF_WAITING) { d->bd_state = BPF_TIMED_OUT; if (d->bd_slen != 0) bpf_wakeup(d); } splx(s); } a594 1 d->bd_ccount = 0; a633 6 s = splnet(); if (d->bd_state == BPF_WAITING) callout_stop(&d->bd_callout); d->bd_state = BPF_IDLE; splx(s); a828 10 bs->bs_capt = d->bd_ccount; break; } case BIOCGSTATSOLD: { struct bpf_stat_old *bs = (struct bpf_stat_old *)addr; bs->bs_recv = d->bd_rcount; bs->bs_drop = d->bd_dcount; a855 14 /* * Get "see sent packets" flag */ case BIOCGSEESENT: *(u_int *)addr = d->bd_seesent; break; /* * Set "see sent" packets flag */ case BIOCSSEESENT: d->bd_seesent = *(u_int *)addr; break; d857 4 a860 5 /* * No need to do anything special as we use IO_NDELAY in * bpfread() as an indication of whether or not to block * the read. */ d1043 1 a1043 2 if ((d->bd_hlen != 0) || (d->bd_immediate && d->bd_slen != 0)) { d1045 1 a1045 6 } else if (d->bd_state == BPF_TIMED_OUT) { if (d->bd_slen != 0) revents |= events & (POLLIN | POLLRDNORM); else revents |= events & POLLIN; } else { a1046 7 /* Start the read timeout if necessary */ if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { callout_reset(&d->bd_callout, d->bd_rtout, bpf_timed_out, d); d->bd_state = BPF_WAITING; } } a1170 1 void *(*cpfn) __P((void *, const void *, size_t)); d1173 1 a1173 1 u_int pktlen, slen, buflen; a1174 1 void *marg; a1179 10 if (pktlen == m->m_len) { cpfn = memcpy; marg = mtod(m, void *); buflen = pktlen; } else { cpfn = bpf_mcpy; marg = m; buflen = 0; } a1180 2 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL)) continue; d1182 1 a1182 1 slen = bpf_filter(d->bd_filter, marg, pktlen, buflen); d1184 1 a1184 1 catchpacket(d, marg, pktlen, slen, cpfn); a1205 2 ++d->bd_ccount; d1237 1 a1237 7 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) /* * Immediate mode is set, or the read timeout has * already expired during a select call. A packet * arrived, so the reader should be woken up. */ bpf_wakeup(d); d1251 8 @ 1.90.2.2 log @Pull up revision 1.98 (requested by atatat in ticket #391): Sysctl descriptions under net subtree (net.key not done) @ text @d1 1 a1 1 /* $NetBSD$ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD$"); d1617 1 a1617 2 CTLTYPE_NODE, "bpf", SYSCTL_DESCR("BPF options"), d1623 1 a1623 2 CTLTYPE_INT, "maxbufsize", SYSCTL_DESCR("Maximum size for data capture buffer"), @ 1.89 log @Make bpf_maxbufsize writable via sysctl, as written by Andrew Brown. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.88 2004/01/21 23:59:12 jonathan Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.88 2004/01/21 23:59:12 jonathan Exp $"); d1523 2 a1524 1 sysctl_createv(SYSCTL_PERMANENT, d1530 3 a1532 2 sysctl_createv(SYSCTL_PERMANENT, CTLTYPE_NODE, "bpf", &node, d1536 2 a1537 1 sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READWRITE, @ 1.88 log @Fix an Emacs finger-glitch (missing semicolon#). @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.87 2004/01/21 22:15:16 jonathan Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.87 2004/01/21 22:15:16 jonathan Exp $"); d66 1 d94 3 a96 3 * The default read buffer size, and limit for BIOCSBLEN, is patchable. * XXX both should be made sysctl'able, and the defaults computed * dynamically based on available memory size and available mbuf clusters. d99 1 a99 1 int bpf_maxbufsize = (1024 * 1024); /* XXX set dynamically, see above */ d1497 42 @ 1.87 log @Update bpf buffer parameters, as per recent discussion on tech-net. Increase the default bpf buffer size used by naive apps that don't do BIOCSBLEN, from 8k to 32k. The former value of 8192 is too small to hold a normal jumbo Ethernet frame (circa 9k), 16k is a little small for Large-jumbo (~16k) frames supported by newer gigabit Ethernet/10Gbe, so (somewhat arbitrarily) increase the default to 32k. Increase the upper limit to which BIOSBLEN can raise bpf buffer-size drastically, to 1 Mbyte. State-of-the-art for packet capture circa 1999 was around 256k; savvy NetBSD developers now use 1 Mbyte. Note that libpcap has been updated to do binary-search on BIOCSBLEN values up to 1 Mbyte. Work is in progress to make both values sysctl'able. Source comments note that consensus on tech-net is that we should find some heuristic to set the boot-time default values dynamically, based on system memory. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.86 2003/09/22 13:00:01 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.86 2003/09/22 13:00:01 christos Exp $"); d98 1 a98 1 int bpf_maxbufsize = (1024 * 1024) /* XXX set dynamically, see above */ @ 1.86 log @- pass signo to fownsignal [ok by jd] - make urg signal handling use fownsignal - remove out of band detection in sowakeup @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.85 2003/09/21 19:17:13 jdolecek Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.85 2003/09/21 19:17:13 jdolecek Exp $"); d83 5 a87 1 # define BPF_BUFSIZE 8192 /* 4096 too small for FDDI frames */ d93 3 a95 1 * The default read buffer size is patchable. d98 1 d672 2 a673 2 if (size > BPF_MAXBUFSIZE) *(u_int *)addr = size = BPF_MAXBUFSIZE; @ 1.85 log @cleanup & uniform descriptor owner handling: * introduce fsetown(), fgetown(), fownsignal() - this sets/retrieves/signals the owner of descriptor, according to appropriate sematics of TIOCSPGRP/FIOSETOWN/SIOCSPGRP/TIOCGPGRP/FIOGETOWN/SIOCGPGRP ioctl; use these routines instead of custom code where appropriate * make every place handling TIOCSPGRP/TIOCGPGRP handle also FIOSETOWN/FIOGETOWN properly, and remove the translation of FIO[SG]OWN to TIOC[SG]PGRP in sys_ioctl() & sys_fcntl() * also remove the socket-specific hack in sys_ioctl()/sys_fcntl() and pass the ioctls down to soo_ioctl() as any other ioctl change discussed on tech-kern@@ @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.84 2003/08/13 19:44:12 wrstuden Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.84 2003/08/13 19:44:12 wrstuden Exp $"); d523 1 a523 1 fownsignal(d->bd_pgid, 0, 0, NULL); @ 1.84 log @Include correct file for defopt. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.83 2003/08/07 16:32:47 agc Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.83 2003/08/07 16:32:47 agc Exp $"); a520 2 struct proc *p; d522 2 a523 6 if (d->bd_async) { if (d->bd_pgid > 0) gsignal (d->bd_pgid, SIGIO); else if (d->bd_pgid && (p = pfind (-d->bd_pgid)) != NULL) psignal (p, SIGIO); } a621 1 pid_t pgid; a858 9 /* * N.B. ioctl (FIOSETOWN) and fcntl (F_SETOWN) both end up doing * the equivalent of a TIOCSPGRP and hence end up here. *However* * TIOCSPGRP's arg is a process group if it's positive and a process * id if it's negative. This is exactly the opposite of what the * other two functions want! * There is code in ioctl and fcntl to make the SETOWN calls do * a TIOCSPGRP with the pgid of the process if a pid is given. */ d860 2 a861 5 pgid = *(int *)addr; if (pgid != 0) error = pgid_in_session(p, pgid); if (error == 0) d->bd_pgid = pgid; d865 2 a866 1 *(int *)addr = d->bd_pgid; @ 1.83 log @Move UCB-licensed code from 4-clause to 3-clause licence. Patches provided by Joel Baker in PR 22364, verified by myself. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.82 2003/06/29 22:31:49 fvdl Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.82 2003/06/29 22:31:49 fvdl Exp $"); d79 1 a79 1 #include "bpf.h" @ 1.82 log @Back out the lwp/ktrace changes. They contained a lot of colateral damage, and need to be examined and discussed more. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.79 2003/06/19 06:25:41 itojun Exp $ */ d20 1 a20 5 * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.79 2003/06/19 06:25:41 itojun Exp $"); @ 1.82.2.1 log @Apply the aborted ktrace-lwp changes to a specific branch. This is just for others to review, I'm concerned that patch fuziness may have resulted in some errant code being generated but I'll look at that later by comparing the diff from the base to the branch with the file I attempt to apply to it. This will, at the very least, put the changes in a better context for others to review them and attempt to tinker with removing passing of 'struct lwp' through the kernel. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.82 2003/06/29 22:31:49 fvdl Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.82 2003/06/29 22:31:49 fvdl Exp $"); d358 1 a358 1 bpfopen(dev, flag, mode, l) d362 1 a362 1 struct lwp *l; d389 1 a389 1 bpfclose(dev, flag, mode, l) d393 1 a393 1 struct lwp *l; d623 1 a623 1 bpfioctl(dev, cmd, addr, flag, l) d628 1 a628 1 struct lwp *l; d882 1 a882 1 error = pgid_in_session(l->l_proc, pgid); d1043 1 a1043 1 bpfpoll(dev, events, l) d1046 1 a1046 1 struct lwp *l; d1060 1 a1060 1 selrecord(l, &d->bd_sel); @ 1.82.2.2 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.82.2.1 2003/07/02 15:26:55 darrenr Exp $ */ d20 5 a24 1 * 3. Neither the name of the University nor the names of its contributors d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.82.2.1 2003/07/02 15:26:55 darrenr Exp $"); a69 1 #include d83 1 a83 1 #include "opt_bpf.h" d87 1 a87 5 /* * 4096 is too small for FDDI frames. 8192 is too small for gigabit Ethernet * jumbos (circa 9k), ATM, or Intel gig/10gig ethernet jumbos (16k). */ # define BPF_BUFSIZE 32768 d93 1 a93 3 * The default read buffer size, and limit for BIOCSBLEN, is sysctl'able. * XXX the default values should be computed dynamically based * on available memory size and available mbuf clusters. a95 1 int bpf_maxbufsize = BPF_DFLTBUFSIZE; /* XXX set dynamically, see above */ a104 3 static void bpf_deliver(struct bpf_if *, void *(*cpfn)(void *, const void *, size_t), void *, u_int, u_int, struct ifnet *); a112 1 static void bpf_timed_out __P((void *)); a378 2 d->bd_seesent = 1; callout_init(&d->bd_callout); a398 3 if (d->bd_state == BPF_WAITING) callout_stop(&d->bd_callout); d->bd_state = BPF_IDLE; a427 1 int timed_out; a438 4 if (d->bd_state == BPF_WAITING) callout_stop(&d->bd_callout); timed_out = (d->bd_state == BPF_TIMED_OUT); d->bd_state = BPF_IDLE; d445 1 a445 1 if (ioflag & IO_NDELAY) { a449 5 ROTATE_BUFFERS(d); break; } if ((d->bd_immediate || timed_out) && d->bd_slen != 0) { d458 10 a467 2 error = tsleep((caddr_t)d, PRINET|PCATCH, "bpf", d->bd_rtout); d525 2 d528 6 a533 2 if (d->bd_async) fownsignal(d->bd_pgid, SIGIO, 0, 0, NULL); a539 18 static void bpf_timed_out(arg) void *arg; { struct bpf_d *d = (struct bpf_d *)arg; int s; s = splnet(); if (d->bd_state == BPF_WAITING) { d->bd_state = BPF_TIMED_OUT; if (d->bd_slen != 0) bpf_wakeup(d); } splx(s); } a596 1 d->bd_ccount = 0; d632 1 a636 6 s = splnet(); if (d->bd_state == BPF_WAITING) callout_stop(&d->bd_callout); d->bd_state = BPF_IDLE; splx(s); d676 2 a677 2 if (size > bpf_maxbufsize) *(u_int *)addr = size = bpf_maxbufsize; a831 10 bs->bs_capt = d->bd_ccount; break; } case BIOCGSTATSOLD: { struct bpf_stat_old *bs = (struct bpf_stat_old *)addr; bs->bs_recv = d->bd_rcount; bs->bs_drop = d->bd_dcount; a858 14 /* * Get "see sent packets" flag */ case BIOCGSEESENT: *(u_int *)addr = d->bd_seesent; break; /* * Set "see sent" packets flag */ case BIOCSSEESENT: d->bd_seesent = *(u_int *)addr; break; d860 4 a863 5 /* * No need to do anything special as we use IO_NDELAY in * bpfread() as an indication of whether or not to block * the read. */ d870 9 d880 5 a884 2 case FIOSETOWN: error = fsetown(l->l_proc, &d->bd_pgid, cmd, addr); d888 1 a888 2 case FIOGETOWN: error = fgetown(l->l_proc, d->bd_pgid, cmd, addr); d1057 1 a1057 2 if ((d->bd_hlen != 0) || (d->bd_immediate && d->bd_slen != 0)) { d1059 1 a1059 6 } else if (d->bd_state == BPF_TIMED_OUT) { if (d->bd_slen != 0) revents |= events & (POLLIN | POLLRDNORM); else revents |= events & POLLIN; } else { a1060 7 /* Start the read timeout if necessary */ if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { callout_reset(&d->bd_callout, d->bd_rtout, bpf_timed_out, d); d->bd_state = BPF_WAITING; } } a1177 56 * Dispatch a packet to all the listeners on interface bp. * * marg pointer to the packet, either a data buffer or an mbuf chain * buflen buffer length, if marg is a data buffer * cpfn a function that can copy marg into the listener's buffer * pktlen length of the packet * rcvif either NULL or the interface the packet came in on. */ static __inline void bpf_deliver(struct bpf_if *bp, void *(*cpfn)(void *, const void *, size_t), void *marg, u_int pktlen, u_int buflen, struct ifnet *rcvif) { u_int slen; struct bpf_d *d; for (d = bp->bif_dlist; d != 0; d = d->bd_next) { if (!d->bd_seesent && (rcvif == NULL)) continue; ++d->bd_rcount; slen = bpf_filter(d->bd_filter, marg, pktlen, buflen); if (slen != 0) catchpacket(d, marg, pktlen, slen, cpfn); } } /* * Incoming linkage from device drivers, when the head of the packet is in * a buffer, and the tail is in an mbuf chain. */ void bpf_mtap2(arg, data, dlen, m) caddr_t arg; void *data; u_int dlen; struct mbuf *m; { struct bpf_if *bp = (struct bpf_if *)arg; u_int pktlen; struct mbuf mb; pktlen = m_length(m) + dlen; /* * Craft on-stack mbuf suitable for passing to bpf_filter. * Note that we cut corners here; we only setup what's * absolutely needed--this mbuf should never go anywhere else. */ (void)memset(&mb, 0, sizeof(mb)); mb.m_next = m; mb.m_data = data; mb.m_len = dlen; bpf_deliver(bp, bpf_mcpy, &mb, pktlen, 0, m->m_pkthdr.rcvif); } /* a1184 1 void *(*cpfn) __P((void *, const void *, size_t)); d1186 3 a1188 2 u_int pktlen, buflen; void *marg; d1190 3 a1192 1 pktlen = m_length(m); d1194 5 a1198 8 if (pktlen == m->m_len) { cpfn = memcpy; marg = mtod(m, void *); buflen = pktlen; } else { cpfn = bpf_mcpy; marg = m; buflen = 0; a1199 2 bpf_deliver(bp, cpfn, marg, pktlen, buflen, m->m_pkthdr.rcvif); a1219 2 ++d->bd_ccount; d1266 1 a1266 5 /* * Call bpf_wakeup after bd_slen has been updated so that kevent(2) * will cause filt_bpfread() to be called with it adjusted. */ if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) d1268 2 a1269 3 * Immediate mode is set, or the read timeout has * already expired during a select call. A packet * arrived, so the reader should be woken up. d1272 1 a1510 47 static int sysctl_net_bpf_maxbufsize(SYSCTLFN_ARGS) { int newsize, error; struct sysctlnode node; node = *rnode; node.sysctl_data = &newsize; newsize = bpf_maxbufsize; error = sysctl_lookup(SYSCTLFN_CALL(&node)); if (error || newp == NULL) return (error); if (newsize < BPF_MINBUFSIZE || newsize > BPF_MAXBUFSIZE) return (EINVAL); bpf_maxbufsize = newsize; return (0); } SYSCTL_SETUP(sysctl_net_bfp_setup, "sysctl net.bpf subtree setup") { struct sysctlnode *node; sysctl_createv(clog, 0, NULL, NULL, CTLFLAG_PERMANENT, CTLTYPE_NODE, "net", NULL, NULL, 0, NULL, 0, CTL_NET, CTL_EOL); node = NULL; sysctl_createv(clog, 0, NULL, &node, CTLFLAG_PERMANENT, CTLTYPE_NODE, "bpf", SYSCTL_DESCR("BPF options"), NULL, 0, NULL, 0, CTL_NET, CTL_CREATE, CTL_EOL); if (node != NULL) sysctl_createv(clog, 0, NULL, NULL, CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "maxbufsize", SYSCTL_DESCR("Maximum size for data capture buffer"), sysctl_net_bpf_maxbufsize, 0, &bpf_maxbufsize, 0, CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL); } @ 1.82.2.3 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.82.2.2 2004/08/03 10:54:11 skrll Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.82.2.2 2004/08/03 10:54:11 skrll Exp $"); d1038 1 d1043 3 @ 1.82.2.4 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.82.2.3 2004/08/12 11:42:20 skrll Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.82.2.3 2004/08/12 11:42:20 skrll Exp $"); a68 1 #include a80 2 #include "sl.h" #include "strip.h" d108 1 a108 1 static int bpf_allocbufs(struct bpf_d *); d112 9 a120 9 static void bpf_freed(struct bpf_d *); static void bpf_ifname(struct ifnet *, struct ifreq *); static void *bpf_mcpy(void *, const void *, size_t); static int bpf_movein(struct uio *, int, int, struct mbuf **, struct sockaddr *); static void bpf_attachd(struct bpf_d *, struct bpf_if *); static void bpf_detachd(struct bpf_d *); static int bpf_setif(struct bpf_d *, struct ifreq *); static void bpf_timed_out(void *); d122 6 a127 6 bpf_wakeup(struct bpf_d *); static void catchpacket(struct bpf_d *, u_char *, u_int, u_int, void *(*)(void *, const void *, size_t)); static void reset_d(struct bpf_d *); static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *); static int bpf_setdlt(struct bpf_d *, u_int); d249 1 a249 1 error = uiomove(mtod(m, void *), len, uio); d253 1 a253 1 memcpy(sockp->sa_data, mtod(m, void *), hlen); d481 1 a481 1 error = tsleep(d, PRINET|PCATCH, "bpf", d540 1 a540 1 wakeup(d); d554 1 a554 1 struct bpf_d *d = arg; d651 6 a656 1 bpfioctl(dev_t dev, u_long cmd, caddr_t arg, int flag, struct lwp *l) a662 1 void *addr = arg; d721 1 a721 1 error = bpf_setf(d, addr); d736 1 a736 1 error = bpf_setf(d, addr); d744 1 a744 1 free(*p, M_DEVBUF); d800 1 a800 1 error = bpf_getdltlist(d, addr); d820 1 a820 1 bpf_ifname(d->bd_bif->bif_ifp, addr); d827 1 a827 1 error = bpf_setif(d, addr); d835 1 a835 1 struct timeval *tv = addr; d849 1 a849 1 struct timeval *tv = addr; d861 1 a861 1 struct bpf_stat *bs = addr; d871 1 a871 1 struct bpf_stat_old *bs = addr; d887 1 a887 1 struct bpf_version *bv = addr; d946 3 a948 1 bpf_setf(struct bpf_d *d, struct bpf_program *fp) d963 1 a963 1 free(old, M_DEVBUF); d971 2 a972 2 fcode = malloc(size, M_DEVBUF, M_WAITOK); if (copyin(fp->bf_insns, fcode, size) == 0 && d979 1 a979 1 free(old, M_DEVBUF); d983 1 a983 1 free(fcode, M_DEVBUF); d993 3 a995 1 bpf_setif(struct bpf_d *d, struct ifreq *ifr) d1069 3 a1071 1 bpf_ifname(struct ifnet *ifp, struct ifreq *ifr) d1073 1 d1086 4 a1089 1 bpfpoll(dev_t dev, int events, struct lwp *l) d1149 3 a1151 1 bpfkqfilter(dev_t dev, struct knote *kn) d1183 4 a1186 1 bpf_tap(void *arg, u_char *pkt, u_int pktlen) d1196 1 a1196 1 bp = arg; d1210 4 a1213 1 bpf_mcpy(void *dst_arg, const void *src_arg, size_t len) d1225 1 a1225 1 memcpy(dst, mtod(m, void *), count); d1264 5 a1268 1 bpf_mtap2(void *arg, void *data, u_int dlen, struct mbuf *m) d1270 1 a1270 1 struct bpf_if *bp = arg; d1293 3 a1295 1 bpf_mtap(void *arg, struct mbuf *m) d1297 2 a1298 2 void *(*cpfn)(void *, const void *, size_t); struct bpf_if *bp = arg; a1317 95 * We need to prepend the address family as * a four byte field. Cons up a dummy header * to pacify bpf. This is safe because bpf * will only read from the mbuf (i.e., it won't * try to free it or keep a pointer a to it). */ void bpf_mtap_af(void *arg, u_int32_t af, struct mbuf *m) { struct mbuf m0; m0.m_flags = 0; m0.m_next = m; m0.m_len = 4; m0.m_data = (char *)⁡ bpf_mtap(arg, &m0); } void bpf_mtap_et(void *arg, u_int16_t et, struct mbuf *m) { struct mbuf m0; m0.m_flags = 0; m0.m_next = m; m0.m_len = 14; m0.m_data = m0.m_dat; ((u_int32_t *)m0.m_data)[0] = 0; ((u_int32_t *)m0.m_data)[1] = 0; ((u_int32_t *)m0.m_data)[2] = 0; ((u_int16_t *)m0.m_data)[6] = et; bpf_mtap(arg, &m0); } #if NSL > 0 || NSTRIP > 0 /* * Put the SLIP pseudo-"link header" in place. * Note this M_PREPEND() should never fail, * swince we know we always have enough space * in the input buffer. */ void bpf_mtap_sl_in(void *arg, u_char *chdr, struct mbuf **m) { int s; u_char *hp; M_PREPEND(*m, SLIP_HDRLEN, M_DONTWAIT); if (*m == NULL) return; hp = mtod(*m, u_char *); hp[SLX_DIR] = SLIPDIR_IN; (void)memcpy(&hp[SLX_CHDR], chdr, CHDR_LEN); s = splnet(); bpf_mtap(arg, *m); splx(s); m_adj(*m, SLIP_HDRLEN); } /* * Put the SLIP pseudo-"link header" in * place. The compressed header is now * at the beginning of the mbuf. */ void bpf_mtap_sl_out(void *arg, u_char *chdr, struct mbuf *m) { struct mbuf m0; u_char *hp; int s; m0.m_flags = 0; m0.m_next = m; m0.m_data = m0.m_dat; m0.m_len = SLIP_HDRLEN; hp = mtod(&m0, u_char *); hp[SLX_DIR] = SLIPDIR_OUT; (void)memcpy(&hp[SLX_CHDR], chdr, CHDR_LEN); s = splnet(); bpf_mtap(arg, &m0); splx(s); m_freem(m); } #endif /* d1326 5 a1330 2 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen, void *(*cpfn)(void *, const void *, size_t)) d1400 2 a1401 1 bpf_allocbufs(struct bpf_d *d) d1404 1 a1404 1 d->bd_fbuf = malloc(d->bd_bufsize, M_DEVBUF, M_NOWAIT); d1407 1 a1407 1 d->bd_sbuf = malloc(d->bd_bufsize, M_DEVBUF, M_NOWAIT); d1422 2 a1423 1 bpf_freed(struct bpf_d *d) d1438 1 a1438 1 free(d->bd_filter, M_DEVBUF); d1448 3 a1450 1 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) d1462 4 a1465 1 bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, void *driverp) d1468 1 a1468 1 bp = malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT); d1473 1 a1473 1 bp->bif_driverp = driverp; d1499 2 a1500 1 bpfdetach(struct ifnet *ifp) d1541 3 a1543 1 bpf_change_type(struct ifnet *ifp, u_int dlt, u_int hdrlen) d1569 3 a1571 1 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl) d1599 3 a1601 1 bpf_setdlt(struct bpf_d *d, u_int dlt) @ 1.82.2.5 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.104 2004/08/19 20:58:23 christos Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.104 2004/08/19 20:58:23 christos Exp $"); d369 1 a369 1 bpfopen(dev, flag, mode, p) d373 1 a373 1 struct proc *p; d402 1 a402 1 bpfclose(dev, flag, mode, p) d406 1 a406 1 struct proc *p; d654 1 a654 1 bpfioctl(dev_t dev, u_long cmd, caddr_t arg, int flag, struct proc *p) d929 1 a929 1 error = fsetown(p, &d->bd_pgid, cmd, addr); d934 1 a934 1 error = fgetown(p, d->bd_pgid, cmd, addr); d1078 1 a1078 1 bpfpoll(dev_t dev, int events, struct proc *p) d1098 1 a1098 1 selrecord(p, &d->bd_sel); @ 1.82.2.6 log @Fix the sync with head I botched. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.82.2.4 2004/08/25 06:58:58 skrll Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.82.2.4 2004/08/25 06:58:58 skrll Exp $"); d369 1 a369 1 bpfopen(dev, flag, mode, l) d373 1 a373 1 struct lwp *l; d402 1 a402 1 bpfclose(dev, flag, mode, l) d406 1 a406 1 struct lwp *l; d654 1 a654 1 bpfioctl(dev_t dev, u_long cmd, caddr_t arg, int flag, struct lwp *l) d929 1 a929 1 error = fsetown(l->l_proc, &d->bd_pgid, cmd, addr); d934 1 a934 1 error = fgetown(l->l_proc, d->bd_pgid, cmd, addr); d1078 1 a1078 1 bpfpoll(dev_t dev, int events, struct lwp *l) d1098 1 a1098 1 selrecord(l, &d->bd_sel); @ 1.82.2.7 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.82.2.6 2004/09/21 13:36:35 skrll Exp $ */ d42 3 a44 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.82.2.6 2004/09/21 13:36:35 skrll Exp $"); a55 1 #include a57 1 #include d109 1 a109 1 LIST_HEAD(, bpf_d) bpf_list; a131 20 static int bpf_read(struct file *, off_t *, struct uio *, struct ucred *, int); static int bpf_write(struct file *, off_t *, struct uio *, struct ucred *, int); static int bpf_ioctl(struct file *, u_long, void *, struct lwp *); static int bpf_poll(struct file *, int, struct lwp *); static int bpf_close(struct file *, struct lwp *); static int bpf_kqfilter(struct file *, struct knote *); static const struct fileops bpf_fileops = { bpf_read, bpf_write, bpf_ioctl, fnullop_fcntl, bpf_poll, fbadop_stat, bpf_close, bpf_kqfilter, }; d133 6 d141 2 a142 2 bpfopen, noclose, noread, nowrite, noioctl, nostop, notty, nopoll, nommap, nokqfilter, d342 3 d354 7 a360 1 LIST_INIT(&bpf_list); d364 2 a365 1 * Open ethernet device. Clones. a375 2 struct file *fp; int error, fd; d377 9 a385 3 /* falloc() will use the descriptor for us. */ if ((error = falloc(l->l_proc, &fp, &fd)) != 0) return error; d387 2 a388 2 d = malloc(sizeof(*d), M_DEVBUF, M_WAITOK); (void)memset(d, 0, sizeof(*d)); d393 1 a393 3 LIST_INSERT_HEAD(&bpf_list, d, bd_list); return fdclone(l, fp, fd, &bpf_fileops, d); d401 6 a406 2 static int bpf_close(struct file *fp, struct lwp *l) d408 1 a408 1 struct bpf_d *d = fp->f_data; a418 3 LIST_REMOVE(d, bd_list); free(d, M_DEVBUF); fp->f_data = NULL; d437 5 a441 3 static int bpf_read(struct file *fp, off_t *offp, struct uio *uio, struct ucred *cred, int flags) d443 1 a443 1 struct bpf_d *d = fp->f_data; d466 1 a466 1 if (fp->f_flag & FNONBLOCK) { d570 5 a574 3 static int bpf_write(struct file *fp, off_t *offp, struct uio *uio, struct ucred *cred, int flags) d576 1 a576 1 struct bpf_d *d = fp->f_data; d653 2 a654 2 static int bpf_ioctl(struct file *fp, u_long cmd, void *addr, struct lwp *l) d656 1 a656 1 struct bpf_d *d = fp->f_data; d661 1 d1077 2 a1078 2 static int bpf_poll(struct file *fp, int events, struct lwp *l) d1080 1 a1080 1 struct bpf_d *d = fp->f_data; d1137 2 a1138 2 static int bpf_kqfilter(struct file *fp, struct knote *kn) d1140 1 a1140 1 struct bpf_d *d = fp->f_data; d1504 2 d1563 4 a1566 1 int s; d1569 4 a1572 2 for (d = LIST_FIRST(&bpf_list); d != NULL; d = LIST_NEXT(d, bd_list)) { if (d->bd_bif != NULL && d->bd_bif->bif_ifp == ifp) { d1581 1 @ 1.82.2.8 log @Sync with HEAD. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.82.2.7 2004/12/18 09:32:50 skrll Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.82.2.7 2004/12/18 09:32:50 skrll Exp $"); d395 1 a395 1 return fdclone(l, fp, fd, flag, &bpf_fileops, d); @ 1.82.2.9 log @Sync with HEAD. Hi Perry! @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.82.2.8 2005/02/15 21:33:29 skrll Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.82.2.8 2005/02/15 21:33:29 skrll Exp $"); d1299 1 a1299 1 @ 1.82.2.10 log @Sync with HEAD. Here we go again... @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.82.2.9 2005/03/04 16:52:56 skrll Exp $ */ d42 1 a42 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.82.2.9 2005/03/04 16:52:56 skrll Exp $"); a103 12 /* * Global BPF statistics returned by net.bpf.stats sysctl. */ struct bpf_stat bpf_gstats; /* * Use a mutex to avoid a race condition between gathering the stats/peers * and opening/closing the device. */ struct simplelock bpf_slock; d160 6 a165 2 bpf_movein(struct uio *uio, int linktype, int mtu, struct mbuf **mp, struct sockaddr *sockp) d288 3 a290 1 bpf_attachd(struct bpf_d *d, struct bpf_if *bp) d308 2 a309 1 bpf_detachd(struct bpf_d *d) d362 2 a363 1 bpfilterattach(int n) a364 3 simple_lock_init(&bpf_slock); simple_lock(&bpf_slock); a365 5 simple_unlock(&bpf_slock); bpf_gstats.bs_recv = 0; bpf_gstats.bs_drop = 0; bpf_gstats.bs_capt = 0; d373 5 a377 1 bpfopen(dev_t dev, int flag, int mode, struct lwp *l) a390 1 d->bd_pid = l->l_proc->p_pid; a392 1 simple_lock(&bpf_slock); a393 1 simple_unlock(&bpf_slock); a408 5 /* * Refresh the PID associated with this bpf file. */ d->bd_pid = l->l_proc->p_pid; a416 1 simple_lock(&bpf_slock); a417 1 simple_unlock(&bpf_slock); d440 1 a440 1 struct ucred *cred, int flags) d539 2 a540 1 bpf_wakeup(struct bpf_d *d) d553 2 a554 1 bpf_timed_out(void *arg) d571 1 a571 1 struct ucred *cred, int flags) d592 1 a592 2 if (m->m_pkthdr.len > ifp->if_mtu) { m_freem(m); a593 1 } d612 2 a613 1 reset_d(struct bpf_d *d) a658 5 /* * Refresh the PID associated with this bpf file. */ d->bd_pid = l->l_proc->p_pid; a1079 5 /* * Refresh the PID associated with this bpf file. */ d->bd_pid = l->l_proc->p_pid; a1178 1 ++bpf_gstats.bs_recv; d1221 1 a1221 1 void *marg, u_int pktlen, u_int buflen, struct ifnet *rcvif) a1229 1 ++bpf_gstats.bs_recv; d1393 1 a1393 1 void *(*cpfn)(void *, const void *, size_t)) a1399 1 ++bpf_gstats.bs_capt; a1425 1 ++bpf_gstats.bs_drop; d1695 1 a1695 2 static int sysctl_net_bpf_peers(SYSCTLFN_ARGS) d1697 1 a1697 70 int error, elem_count; struct bpf_d *dp; struct bpf_d_ext dpe; size_t len, needed, elem_size, out_size; char *sp; if (namelen == 1 && name[0] == CTL_QUERY) return (sysctl_query(SYSCTLFN_CALL(rnode))); if (namelen != 2) return (EINVAL); if ((error = suser(l->l_proc->p_ucred, &l->l_proc->p_acflag))) return (error); len = (oldp != NULL) ? *oldlenp : 0; sp = oldp; elem_size = name[0]; elem_count = name[1]; out_size = MIN(sizeof(dpe), elem_size); needed = 0; if (elem_size < 1 || elem_count < 0) return (EINVAL); simple_lock(&bpf_slock); LIST_FOREACH(dp, &bpf_list, bd_list) { if (len >= elem_size && elem_count > 0) { #define BPF_EXT(field) dpe.bde_ ## field = dp->bd_ ## field BPF_EXT(bufsize); BPF_EXT(promisc); BPF_EXT(promisc); BPF_EXT(state); BPF_EXT(immediate); BPF_EXT(hdrcmplt); BPF_EXT(seesent); BPF_EXT(pid); BPF_EXT(rcount); BPF_EXT(dcount); BPF_EXT(ccount); #undef BPF_EXT if (dp->bd_bif) (void)strlcpy(dpe.bde_ifname, dp->bd_bif->bif_ifp->if_xname, IFNAMSIZ - 1); else dpe.bde_ifname[0] = '\0'; error = copyout(&dpe, sp, out_size); if (error) break; sp += elem_size; len -= elem_size; } if (elem_count > 0) { needed += elem_size; if (elem_count != INT_MAX) elem_count--; } } simple_unlock(&bpf_slock); *oldlenp = needed; return (error); } SYSCTL_SETUP(sysctl_net_bpf_setup, "sysctl net.bpf subtree setup") { const struct sysctlnode *node; d1712 1 a1712 1 if (node != NULL) { a1718 14 sysctl_createv(clog, 0, NULL, NULL, CTLFLAG_PERMANENT, CTLTYPE_STRUCT, "stats", SYSCTL_DESCR("BPF stats"), NULL, 0, &bpf_gstats, sizeof(bpf_gstats), CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL); sysctl_createv(clog, 0, NULL, NULL, CTLFLAG_PERMANENT, CTLTYPE_STRUCT, "peers", SYSCTL_DESCR("BPF peers"), sysctl_net_bpf_peers, 0, NULL, 0, CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL); } @ 1.81 log @From OpenBSD 1.33-1.34: When using bpf(4) in immediate mode, and using kevent(2) to receive notification of packet arrival, the usermode application isn't notified until a second packet arrives. This is because KNOTE() calls filt_bpfread() before bd_slen has been updated with the newly arrived packet length, so it looks like there is no data there. Moving the bpf_wakeup() call for immediate mode to after bd_slen is set fixes it. From: wayne@@epipe.com.au in pr 3175 @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.80 2003/06/28 14:22:06 darrenr Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.80 2003/06/28 14:22:06 darrenr Exp $"); d358 1 a358 1 bpfopen(dev, flag, mode, l) d362 1 a362 1 struct lwp *l; d389 1 a389 1 bpfclose(dev, flag, mode, l) d393 1 a393 1 struct lwp *l; d623 1 a623 1 bpfioctl(dev, cmd, addr, flag, l) d628 1 a628 1 struct lwp *l; d882 1 a882 1 error = pgid_in_session(l->l_proc, pgid); d1043 1 a1043 1 bpfpoll(dev, events, l) d1046 1 a1046 1 struct lwp *l; d1060 1 a1060 1 selrecord(l, &d->bd_sel); @ 1.80 log @Pass lwp pointers throughtout the kernel, as required, so that the lwpid can be inserted into ktrace records. The general change has been to replace "struct proc *" with "struct lwp *" in various function prototypes, pass the lwp through and use l_proc to get the process pointer when needed. Bump the kernel rev up to 1.6V @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.79 2003/06/19 06:25:41 itojun Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.79 2003/06/19 06:25:41 itojun Exp $"); a1251 6 else if (d->bd_immediate) /* * Immediate mode is set. A packet arrived so any * reads should be woken up. */ bpf_wakeup(d); d1265 8 @ 1.79 log @avoid panic in malloc() under extremely low memory situation. OpenBSD problem report 2235, 2236, 2640. fix by Otto Moerbeek. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.78 2003/03/13 10:18:35 dsl Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.78 2003/03/13 10:18:35 dsl Exp $"); d358 1 a358 1 bpfopen(dev, flag, mode, p) d362 1 a362 1 struct proc *p; d389 1 a389 1 bpfclose(dev, flag, mode, p) d393 1 a393 1 struct proc *p; d623 1 a623 1 bpfioctl(dev, cmd, addr, flag, p) d628 1 a628 1 struct proc *p; d882 1 a882 1 error = pgid_in_session(p, pgid); d1043 1 a1043 1 bpfpoll(dev, events, p) d1046 1 a1046 1 struct proc *p; d1060 1 a1060 1 selrecord(p, &d->bd_sel); @ 1.78 log @Check that the process/process group id passed to TIOCSPRP is in the session of the current process. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.77 2003/02/26 06:31:12 matt Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.77 2003/02/26 06:31:12 matt Exp $"); d1281 8 a1288 2 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); @ 1.77 log @Add MBUFTRACE kernel option. Do a little mbuf rework while here. Change all uses of MGET*(*, M_WAIT, *) to m_get*(M_WAIT, *). These are not performance critical and making them call m_get saves considerable space. Add m_clget analogue of MCLGET and make corresponding change for M_WAIT uses. Modify netinet, gem, fxp, tulip, nfs to support MBUFTRACE. Begin to change netstat to use sysctl. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.76 2002/11/26 18:51:18 christos Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.76 2002/11/26 18:51:18 christos Exp $"); d632 1 d875 3 a877 2 * other two functions want! Therefore there is code in ioctl and * fcntl to negate the arg before calling here. d880 5 a884 1 d->bd_pgid = *(int *)addr; @ 1.76 log @si_ -> sel_ @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.75 2002/10/23 09:14:41 jdolecek Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.75 2002/10/23 09:14:41 jdolecek Exp $"); d224 1 a224 3 MGETHDR(m, M_WAIT, MT_DATA); if (m == 0) return (ENOBUFS); d228 1 a228 1 MCLGET(m, M_WAIT); @ 1.75 log @merge kqueue branch into -current kqueue provides a stateful and efficient event notification framework currently supported events include socket, file, directory, fifo, pipe, tty and device changes, and monitoring of processes and signals kqueue is supported by all writable filesystems in NetBSD tree (with exception of Coda) and all device drivers supporting poll(2) based on work done by Jonathan Lemon for FreeBSD initial NetBSD port done by Luke Mewburn and Jason Thorpe @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.74 2002/09/25 22:21:46 thorpej Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.74 2002/09/25 22:21:46 thorpej Exp $"); d539 1 a539 1 d->bd_sel.si_pid = 0; d1070 1 a1070 1 SLIST_REMOVE(&d->bd_sel.si_klist, kn, knote, kn_selnext); d1099 1 a1099 1 klist = &d->bd_sel.si_klist; @ 1.74 log @Don't include . @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.73 2002/09/24 03:14:43 itojun Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.73 2002/09/24 03:14:43 itojun Exp $"); d127 1 d131 1 a131 1 nostop, notty, bpfpoll, nommap, d537 1 a537 1 selwakeup(&d->bd_sel); d1061 53 @ 1.73 log @backout recent changes, for PR 18392. bpf_mtap() gets called with not-well-initialized mbuf, so we need to go through it without touching m->m_pkthdr.len and such. it's part of our bpf_mtap() API (at least today). @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.72 2002/09/19 03:04:32 atatat Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.72 2002/09/19 03:04:32 atatat Exp $"); a57 1 #include @ 1.72 log @Add a missing semi-colon. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.71 2002/09/19 01:16:58 darrenr Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.71 2002/09/19 01:16:58 darrenr Exp $"); d1133 3 a1135 12 if ((m->m_flags & M_PKTHDR) != 0) { pktlen = m->m_pkthdr.len; } else { pktlen = 0; for (m0 = m; m0 != 0; m0 = m0->m_next) pktlen += m0->m_len; } if (pktlen == m->m_len) { bpf_tap(arg, mtod(m, u_char *), pktlen); return; } @ 1.71 log @For the trivial case where the packet is only in one mbuf, call bpf_tap() (idea from FreeBSD) - alternative to changing bpf_filter() to be aware of kernel calling convetion where 0 is passed as the length for mbufs. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.70 2002/09/19 00:34:00 darrenr Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.70 2002/09/19 00:34:00 darrenr Exp $"); d1134 1 a1134 1 pktlen = m->m_pkthdr.len @ 1.70 log @If M_PKTHDR is set we can use m_pkthdr.len instead of the for loop. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.69 2002/09/15 23:44:12 thorpej Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.69 2002/09/15 23:44:12 thorpej Exp $"); d1139 5 @ 1.69 log @In bpf_setdlt(), preserve the promiscuous mode setting of the descriptor. From David Young , slight change by me. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.68 2002/09/11 05:36:26 itojun Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.68 2002/09/11 05:36:26 itojun Exp $"); d1133 7 a1139 3 pktlen = 0; for (m0 = m; m0 != 0; m0 = m0->m_next) pktlen += m0->m_len; @ 1.68 log @KNF - return is not a function. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.67 2002/09/06 13:24:01 gehenna Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.67 2002/09/06 13:24:01 gehenna Exp $"); d1417 1 a1417 1 int s; d1431 1 d1435 8 @ 1.67 log @Merge the gehenna-devsw branch into the trunk. This merge changes the device switch tables from static array to dynamically generated by config(8). - All device switches is defined as a constant structure in device drivers. - The new grammer ``device-major'' is introduced to ``files''. device-major char [block ] [] - All device major numbers must be listed up in port dependent majors. by using this grammer. - Added the new naming convention. The name of the device switch must be _[bc]devsw for auto-generation of device switch tables. - The backward compatibility of loading block/character device switch by LKM framework is broken. This is necessary to convert from block/character device major to device name in runtime and vice versa. - The restriction to assign device major by LKM is completely removed. We don't need to reserve LKM entries for dynamic loading of device switch. - In compile time, device major numbers list is packed into the kernel and the LKM framework will refer it to assign device major number dynamically. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.66 2002/08/28 09:34:57 onoe Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.66 2002/08/28 09:34:57 onoe Exp $"); d1117 1 a1117 1 return(dst_arg); @ 1.66 log @Define new kernel interface bpfattach2() to register another data link type for the driver, which will be used for 802.11 drivers. Also add 2 APIs to get a list of available DLTs and use one for them. BIOCGDLTLIST (struct bpf_dltlist) BIOCSDLT (u_int) @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.65 2002/06/06 23:54:47 wrstuden Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.65 2002/06/06 23:54:47 wrstuden Exp $"); a113 1 int bpfpoll __P((dev_t, int, struct proc *)); d122 12 d1321 1 a1321 3 for (cmaj = 0; cmaj <= nchrdev; cmaj++) if (cdevsw[cmaj].d_open == bpfopen) break; @ 1.65 log @defparam BPF_BUFSIZE @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.64 2002/03/23 15:55:21 darrenr Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.64 2002/03/23 15:55:21 darrenr Exp $"); d120 2 d751 20 d971 3 d1255 15 d1276 1 a1276 1 bp->bif_driverp = (struct bpf_if **)&ifp->if_bpf; d1331 1 d1337 1 a1337 1 break; d1343 1 a1343 1 * Change the data link type of a BPF instance. d1368 59 @ 1.64 log @If someone is poll'ing to write to bpf, assume that it can always be done and include POLLOUT and POLLWRNORM in the returned events flag set. Derived from FreeBSD. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.63 2001/11/12 23:49:33 lukem Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.63 2001/11/12 23:49:33 lukem Exp $"); d82 4 @ 1.64.2.1 log @Add the character device switch. Replace the direct-access to devsw table with calling devsw APIs. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.64 2002/03/23 15:55:21 darrenr Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.64 2002/03/23 15:55:21 darrenr Exp $"); d110 1 a116 12 dev_type_open(bpfopen); dev_type_close(bpfclose); dev_type_read(bpfread); dev_type_write(bpfwrite); dev_type_ioctl(bpfioctl); dev_type_poll(bpfpoll); const struct cdevsw bpf_cdevsw = { bpfopen, bpfclose, bpfread, bpfwrite, bpfioctl, nostop, notty, bpfpoll, nommap, }; d1266 3 a1268 1 cmaj = cdevsw_lookup_major(&bpf_cdevsw); @ 1.64.2.2 log @catch up with -current. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.65 2002/06/06 23:54:47 wrstuden Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.65 2002/06/06 23:54:47 wrstuden Exp $"); a81 4 #if defined(_KERNEL_OPT) #include "bpf.h" #endif @ 1.64.2.3 log @catch up with -current. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.66 2002/08/28 09:34:57 onoe Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.66 2002/08/28 09:34:57 onoe Exp $"); a118 2 static int bpf_getdltlist __P((struct bpf_d *, struct bpf_dltlist *)); static int bpf_setdlt __P((struct bpf_d *, u_int)); a759 20 * Get a list of supported device parameters. */ case BIOCGDLTLIST: if (d->bd_bif == 0) error = EINVAL; else error = bpf_getdltlist(d, (struct bpf_dltlist *)addr); break; /* * Set device parameters. */ case BIOCSDLT: if (d->bd_bif == 0) error = EINVAL; else error = bpf_setdlt(d, *(u_int *)addr); break; /* a959 3 /* skip additional entry */ if (bp->bif_driverp != (struct bpf_if **)&ifp->if_bpf) continue; a1240 15 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); } /* * Attach additional dlt for a interface to bpf. dlt is the link layer type; * hdrlen is the fixed size of the link header for the specified dlt * (variable length headers not yet supported). */ void bpfattach2(ifp, dlt, hdrlen, driverp) struct ifnet *ifp; u_int dlt, hdrlen; caddr_t *driverp; { d1247 1 a1247 1 bp->bif_driverp = (struct bpf_if **)driverp; a1299 1 again: d1305 1 a1305 1 goto again; d1311 1 a1311 1 * Change the data link type of a interface. a1335 59 } /* * Get a list of available data link type of the interface. */ static int bpf_getdltlist(d, bfl) struct bpf_d *d; struct bpf_dltlist *bfl; { int n, error; struct ifnet *ifp; struct bpf_if *bp; ifp = d->bd_bif->bif_ifp; n = 0; error = 0; for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { if (bp->bif_ifp != ifp) continue; if (bfl->bfl_list != NULL) { if (n >= bfl->bfl_len) return ENOMEM; error = copyout(&bp->bif_dlt, bfl->bfl_list + n, sizeof(u_int)); } n++; } bfl->bfl_len = n; return error; } /* * Set the data link type of a BPF instance. */ static int bpf_setdlt(d, dlt) struct bpf_d *d; u_int dlt; { int s; struct ifnet *ifp; struct bpf_if *bp; if (d->bd_bif->bif_dlt == dlt) return 0; ifp = d->bd_bif->bif_ifp; for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) break; } if (bp == NULL) return EINVAL; s = splnet(); bpf_detachd(d); bpf_attachd(d, bp); reset_d(d); splx(s); return 0; @ 1.63 log @add RCSIDs @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.62 2001/09/10 23:11:06 bjh21 Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD$"); d993 3 a995 1 * Return true iff the specific operation will not block indefinitely. a1004 1 int revents = 0; d1006 1 d1008 1 a1008 3 /* * An imitation of the FIONREAD ioctl code. */ d1010 3 @ 1.62 log @Add MI Econet support. This is lacking any interfaces to higher-layer protocols, and lacking any timeouts, but it basically works, doing four-way handshakes in both directions and incoming Machine Peek operations. Oh, and Econet is Acorn's ancient, proprietary 500kbit/s networking technology. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.61 2001/04/13 23:30:11 thorpej Exp $ */ d44 3 @ 1.61 log @Remove the use of splimp() from the NetBSD kernel. splnet() and only splnet() is allowed for the protection of data structures used by network devices. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.60 2000/12/29 01:55:49 thorpej Exp $ */ d170 6 @ 1.61.2.1 log @Add kqueue support. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.61 2001/04/13 23:30:11 thorpej Exp $ */ d511 3 a513 1 selnotify(&d->bd_sel, 0); d985 1 a985 1 * Otherwise, return false but make a note that a selnotify() must be done. a1008 53 } static void filt_bpfrdetach(struct knote *kn) { struct bpf_d *d = (void *) kn->kn_hook; int s; s = splnet(); SLIST_REMOVE(&d->bd_sel.si_klist, kn, knote, kn_selnext); splx(s); } static int filt_bpfread(struct knote *kn, long hint) { struct bpf_d *d = (void *) kn->kn_hook; kn->kn_data = d->bd_hlen; if (d->bd_immediate) kn->kn_data += d->bd_slen; return (kn->kn_data > 0); } static const struct filterops bpfread_filtops = { 1, NULL, filt_bpfrdetach, filt_bpfread }; int bpfkqfilter(dev, kn) dev_t dev; struct knote *kn; { struct bpf_d *d = &bpf_dtab[minor(dev)]; struct klist *klist; int s; switch (kn->kn_filter) { case EVFILT_READ: klist = &d->bd_sel.si_klist; kn->kn_fop = &bpfread_filtops; break; default: return (1); } kn->kn_hook = (void *) d; s = splnet(); SLIST_INSERT_HEAD(klist, kn, kn_selnext); splx(s); return (0); @ 1.61.2.2 log @Update the kqueue branch to HEAD. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.61.2.1 2001/09/08 03:15:37 thorpej Exp $ */ a169 6 break; case DLT_ECONET: sockp->sa_family = AF_UNSPEC; hlen = 6; align = 2; @ 1.61.2.3 log @Sync kqueue branch with -current. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.61.2.2 2001/09/13 01:16:21 thorpej Exp $ */ a43 3 #include __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.63 2001/11/12 23:49:33 lukem Exp $"); @ 1.61.2.4 log @catch up with -current on kqueue branch @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.61.2.3 2002/01/10 20:01:56 thorpej Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.61.2.3 2002/01/10 20:01:56 thorpej Exp $"); a82 4 #if defined(_KERNEL_OPT) #include "bpf.h" #endif d991 2 a992 4 * Return true iff the specific operation will not block indefinitely - with * the assumption that it is safe to positively acknowledge a request for the * ability to write to the BPF device. * Otherwise, return false but make a note that a selwakeup() must be done. d1001 1 a1002 1 int revents; d1004 3 a1006 1 revents = events & (POLLOUT | POLLWRNORM); a1007 3 /* * An imitation of the FIONREAD ioctl code. */ @ 1.61.2.5 log @sync kqueue branch with HEAD @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.61.2.4 2002/06/23 17:50:20 jdolecek Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.61.2.4 2002/06/23 17:50:20 jdolecek Exp $"); a119 2 static int bpf_getdltlist __P((struct bpf_d *, struct bpf_dltlist *)); static int bpf_setdlt __P((struct bpf_d *, u_int)); a746 20 * Get a list of supported device parameters. */ case BIOCGDLTLIST: if (d->bd_bif == 0) error = EINVAL; else error = bpf_getdltlist(d, (struct bpf_dltlist *)addr); break; /* * Set device parameters. */ case BIOCSDLT: if (d->bd_bif == 0) error = EINVAL; else error = bpf_setdlt(d, *(u_int *)addr); break; /* a946 3 /* skip additional entry */ if (bp->bif_driverp != (struct bpf_if **)&ifp->if_bpf) continue; a1280 15 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); } /* * Attach additional dlt for a interface to bpf. dlt is the link layer type; * hdrlen is the fixed size of the link header for the specified dlt * (variable length headers not yet supported). */ void bpfattach2(ifp, dlt, hdrlen, driverp) struct ifnet *ifp; u_int dlt, hdrlen; caddr_t *driverp; { d1287 1 a1287 1 bp->bif_driverp = (struct bpf_if **)driverp; a1341 1 again: d1347 1 a1347 1 goto again; d1353 1 a1353 1 * Change the data link type of a interface. a1377 59 } /* * Get a list of available data link type of the interface. */ static int bpf_getdltlist(d, bfl) struct bpf_d *d; struct bpf_dltlist *bfl; { int n, error; struct ifnet *ifp; struct bpf_if *bp; ifp = d->bd_bif->bif_ifp; n = 0; error = 0; for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { if (bp->bif_ifp != ifp) continue; if (bfl->bfl_list != NULL) { if (n >= bfl->bfl_len) return ENOMEM; error = copyout(&bp->bif_dlt, bfl->bfl_list + n, sizeof(u_int)); } n++; } bfl->bfl_len = n; return error; } /* * Set the data link type of a BPF instance. */ static int bpf_setdlt(d, dlt) struct bpf_d *d; u_int dlt; { int s; struct ifnet *ifp; struct bpf_if *bp; if (d->bd_bif->bif_dlt == dlt) return 0; ifp = d->bd_bif->bif_ifp; for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) break; } if (bp == NULL) return EINVAL; s = splnet(); bpf_detachd(d); bpf_attachd(d, bp); reset_d(d); splx(s); return 0; @ 1.61.2.6 log @do not need the (void *) cast for kn_hook anymore @ text @d1 1 a1 1 /* $NetBSD$ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD$"); d1053 1 a1053 1 struct bpf_d *d = kn->kn_hook; d1064 1 a1064 1 struct bpf_d *d = kn->kn_hook; d1094 1 a1094 1 kn->kn_hook = d; @ 1.61.2.7 log @sync kqueue with -current; this includes merge of gehenna-devsw branch, merge of i386 MP branch, and part of autoconf rototil work @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.61.2.6 2002/10/02 22:02:30 jdolecek Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.61.2.6 2002/10/02 22:02:30 jdolecek Exp $"); d58 1 d114 1 a122 13 dev_type_open(bpfopen); dev_type_close(bpfclose); dev_type_read(bpfread); dev_type_write(bpfwrite); dev_type_ioctl(bpfioctl); dev_type_poll(bpfpoll); dev_type_kqfilter(bpfkqfilter); const struct cdevsw bpf_cdevsw = { bpfopen, bpfclose, bpfread, bpfwrite, bpfioctl, nostop, notty, bpfpoll, nommap, bpfkqfilter, }; d1157 1 a1157 1 return (dst_arg); d1361 3 a1363 1 cmaj = cdevsw_lookup_major(&bpf_cdevsw); d1459 1 a1459 1 int s, error, opromisc; a1472 1 opromisc = d->bd_promisc; a1475 8 if (opromisc) { error = ifpromisc(bp->bif_ifp, 1); if (error) printf("%s: bpf_setdlt: ifpromisc failed (%d)\n", bp->bif_ifp->if_xname, error); else d->bd_promisc = 1; } @ 1.61.4.1 log @Commit my "devvp" changes to the thorpej-devvp branch. This replaces the use of dev_t in most places with a struct vnode *. This will form the basic infrastructure for real cloning device support (besides being architecurally cleaner -- it'll be good to get away from using numbers to represent objects). @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.61 2001/04/13 23:30:11 thorpej Exp $ */ a58 2 #include d107 1 d334 2 a335 2 bpfopen(devvp, flag, mode, p) struct vnode *devvp; d342 1 a342 1 if (minor(devvp->v_rdev) >= NBPFILTER) d348 1 a348 1 d = &bpf_dtab[minor(devvp->v_rdev)]; a355 2 devvp->v_devcookie = d; d365 2 a366 2 bpfclose(devvp, flag, mode, p) struct vnode *devvp; d371 1 a371 1 struct bpf_d *d = devvp->v_devcookie; d398 2 a399 2 bpfread(devvp, uio, ioflag) struct vnode *devvp; d403 1 a403 1 struct bpf_d *d = devvp->v_devcookie; d517 2 a518 2 bpfwrite(devvp, uio, ioflag) struct vnode *devvp; d522 1 a522 1 struct bpf_d *d = devvp->v_devcookie; d599 2 a600 2 bpfioctl(devvp, cmd, addr, flag, p) struct vnode *devvp; d606 1 a606 1 struct bpf_d *d = devvp->v_devcookie; d988 2 a989 2 bpfpoll(devvp, events, p) struct vnode *devvp; d993 1 a993 1 struct bpf_d *d = devvp->v_devcookie; @ 1.61.4.2 log @* add a VCLONED vnode flag that indicates a vnode representing a cloned device. * rename REVOKEALL to REVOKEALIAS, and add a REVOKECLONE flag, to pass to VOP_REVOKE * the revoke system call will revoke all aliases, as before, but not the clones * vdevgone is called when detaching a device, so make it use REVOKECLONE to get rid of all clones as well * clean up all uses of VOP_OPEN wrt. locking. * add a few VOPS to spec_vnops that need to do something when it's a clone vnode (access and getattr) * add a copy of the vnode vattr structure of the original 'master' vnode to the specinfo of a cloned vnode. could possibly redirect getattr to the 'master' vnode, but this has issues with revoke * add a vdev_reassignvp function that disassociates a vnode from its original device, and reassociates it with the specified dev_t. to be used by cloning devices only, in case a new minor is allocated. * change all direct references in drivers to v_devcookie and v_rdev to vdev_privdata(vp) and vdev_rdev(vp). for diagnostic purposes when debugging race conditions that still exist wrt. locking and revoking vnodes. * make the locking state of a vnode consistent when passed to d_open and d_close (unlocked). locked would be better, but has some deadlock issues @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.61.4.1 2001/09/07 04:45:41 thorpej Exp $ */ a341 1 dev_t rdev; d343 1 a343 3 rdev = vdev_rdev(devvp); if (minor(rdev) >= NBPFILTER) d349 1 a349 1 d = &bpf_dtab[minor(rdev)]; d357 1 a357 1 vdev_setprivdata(devvp, d); d374 1 a374 1 struct bpf_d *d; a376 2 d = vdev_privdata(devvp); d406 1 a406 1 struct bpf_d *d; a409 2 d = vdev_privdata(devvp); d525 1 a525 1 struct bpf_d *d; a530 2 d = vdev_privdata(devvp); d609 1 a609 1 struct bpf_d *d; a614 2 d = vdev_privdata(devvp); d996 3 a998 7 struct bpf_d *d; int revents; int s; revents = 0; d = vdev_privdata(devvp); s = splnet(); @ 1.61.4.3 log @Catch up with -current. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.61.4.2 2001/09/26 15:28:25 fvdl Exp $ */ a170 6 break; case DLT_ECONET: sockp->sa_family = AF_UNSPEC; hlen = 6; align = 2; @ 1.60 log @Fix non-blocking BPF reads, from Guy Harris, kern/11836. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.59 2000/12/12 17:55:21 thorpej Exp $ */ d236 1 a236 1 * Must be called at splimp. d374 1 a374 1 s = splimp(); d414 1 a414 1 s = splimp(); d484 1 a484 1 s = splimp(); d558 1 a558 1 * receive and drop counts. Should be called at splimp. d625 1 a625 1 s = splimp(); d689 1 a689 1 s = splimp(); d700 1 a700 1 s = splimp(); d716 1 a716 1 s = splimp(); d861 1 a861 1 s = splimp(); d877 1 a877 1 s = splimp(); d951 1 a951 1 s = splimp(); d995 1 a995 1 int s = splimp(); d1267 1 a1267 1 s = splimp(); @ 1.60.2.1 log @Catch up to -current. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.60 2000/12/29 01:55:49 thorpej Exp $ */ d236 1 a236 1 * Must be called at splnet. d374 1 a374 1 s = splnet(); d414 1 a414 1 s = splnet(); d484 1 a484 1 s = splnet(); d558 1 a558 1 * receive and drop counts. Should be called at splnet. d625 1 a625 1 s = splnet(); d689 1 a689 1 s = splnet(); d700 1 a700 1 s = splnet(); d716 1 a716 1 s = splnet(); d861 1 a861 1 s = splnet(); d877 1 a877 1 s = splnet(); d951 1 a951 1 s = splnet(); d995 1 a995 1 int s = splnet(); d1267 1 a1267 1 s = splnet(); @ 1.60.2.2 log @Catch up to -current. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.60.2.1 2001/06/21 20:07:53 nathanw Exp $ */ a169 6 break; case DLT_ECONET: sockp->sa_family = AF_UNSPEC; hlen = 6; align = 2; @ 1.60.2.3 log @Catch up to -current. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.60.2.2 2001/09/21 22:36:43 nathanw Exp $ */ a43 3 #include __KERNEL_RCSID(0, "$NetBSD$"); @ 1.60.2.4 log @Catch up to -current. (CVS: It's not just a program. It's an adventure!) @ text @d1 1 a1 1 /* $NetBSD$ */ d993 1 a993 3 * Return true iff the specific operation will not block indefinitely - with * the assumption that it is safe to positively acknowledge a request for the * ability to write to the BPF device. d1003 1 a1004 1 int revents; d1006 3 a1008 1 revents = events & (POLLOUT | POLLWRNORM); a1009 3 /* * An imitation of the FIONREAD ioctl code. */ @ 1.60.2.5 log @Catch up to -current. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.60.2.4 2002/04/01 07:48:18 nathanw Exp $ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.60.2.4 2002/04/01 07:48:18 nathanw Exp $"); a81 4 #if defined(_KERNEL_OPT) #include "bpf.h" #endif @ 1.60.2.6 log @Catch up to -current. @ text @d1 1 a1 1 /* $NetBSD$ */ d46 1 a46 1 __KERNEL_RCSID(0, "$NetBSD$"); d114 1 a119 14 static int bpf_getdltlist __P((struct bpf_d *, struct bpf_dltlist *)); static int bpf_setdlt __P((struct bpf_d *, u_int)); dev_type_open(bpfopen); dev_type_close(bpfclose); dev_type_read(bpfread); dev_type_write(bpfwrite); dev_type_ioctl(bpfioctl); dev_type_poll(bpfpoll); const struct cdevsw bpf_cdevsw = { bpfopen, bpfclose, bpfread, bpfwrite, bpfioctl, nostop, notty, bpfpoll, nommap, }; a748 20 * Get a list of supported device parameters. */ case BIOCGDLTLIST: if (d->bd_bif == 0) error = EINVAL; else error = bpf_getdltlist(d, (struct bpf_dltlist *)addr); break; /* * Set device parameters. */ case BIOCSDLT: if (d->bd_bif == 0) error = EINVAL; else error = bpf_setdlt(d, *(u_int *)addr); break; /* a948 3 /* skip additional entry */ if (bp->bif_driverp != (struct bpf_if **)&ifp->if_bpf) continue; d1081 1 a1081 1 return (dst_arg); a1229 15 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); } /* * Attach additional dlt for a interface to bpf. dlt is the link layer type; * hdrlen is the fixed size of the link header for the specified dlt * (variable length headers not yet supported). */ void bpfattach2(ifp, dlt, hdrlen, driverp) struct ifnet *ifp; u_int dlt, hdrlen; caddr_t *driverp; { d1236 1 a1236 1 bp->bif_driverp = (struct bpf_if **)driverp; d1270 3 a1272 1 cmaj = cdevsw_lookup_major(&bpf_cdevsw); a1290 1 again: d1296 1 a1296 1 goto again; d1302 1 a1302 1 * Change the data link type of a interface. a1326 68 } /* * Get a list of available data link type of the interface. */ static int bpf_getdltlist(d, bfl) struct bpf_d *d; struct bpf_dltlist *bfl; { int n, error; struct ifnet *ifp; struct bpf_if *bp; ifp = d->bd_bif->bif_ifp; n = 0; error = 0; for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { if (bp->bif_ifp != ifp) continue; if (bfl->bfl_list != NULL) { if (n >= bfl->bfl_len) return ENOMEM; error = copyout(&bp->bif_dlt, bfl->bfl_list + n, sizeof(u_int)); } n++; } bfl->bfl_len = n; return error; } /* * Set the data link type of a BPF instance. */ static int bpf_setdlt(d, dlt) struct bpf_d *d; u_int dlt; { int s, error, opromisc; struct ifnet *ifp; struct bpf_if *bp; if (d->bd_bif->bif_dlt == dlt) return 0; ifp = d->bd_bif->bif_ifp; for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) break; } if (bp == NULL) return EINVAL; s = splnet(); opromisc = d->bd_promisc; bpf_detachd(d); bpf_attachd(d, bp); reset_d(d); if (opromisc) { error = ifpromisc(bp->bif_ifp, 1); if (error) printf("%s: bpf_setdlt: ifpromisc failed (%d)\n", bp->bif_ifp->if_xname, error); else d->bd_promisc = 1; } splx(s); return 0; @ 1.60.2.7 log @Catch up to -current. @ text @d58 1 @ 1.60.2.8 log @Catch up to -current @ text @a126 1 dev_type_kqfilter(bpfkqfilter); d130 1 a130 1 nostop, notty, bpfpoll, nommap, bpfkqfilter, d536 1 a536 1 selnotify(&d->bd_sel, 0); a1059 53 } static void filt_bpfrdetach(struct knote *kn) { struct bpf_d *d = kn->kn_hook; int s; s = splnet(); SLIST_REMOVE(&d->bd_sel.si_klist, kn, knote, kn_selnext); splx(s); } static int filt_bpfread(struct knote *kn, long hint) { struct bpf_d *d = kn->kn_hook; kn->kn_data = d->bd_hlen; if (d->bd_immediate) kn->kn_data += d->bd_slen; return (kn->kn_data > 0); } static const struct filterops bpfread_filtops = { 1, NULL, filt_bpfrdetach, filt_bpfread }; int bpfkqfilter(dev, kn) dev_t dev; struct knote *kn; { struct bpf_d *d = &bpf_dtab[minor(dev)]; struct klist *klist; int s; switch (kn->kn_filter) { case EVFILT_READ: klist = &d->bd_sel.si_klist; kn->kn_fop = &bpfread_filtops; break; default: return (1); } kn->kn_hook = d; s = splnet(); SLIST_INSERT_HEAD(klist, kn, kn_selnext); splx(s); return (0); @ 1.60.2.9 log @Sync with HEAD. @ text @d539 1 a539 1 d->bd_sel.sel_pid = 0; d1070 1 a1070 1 SLIST_REMOVE(&d->bd_sel.sel_klist, kn, knote, kn_selnext); d1099 1 a1099 1 klist = &d->bd_sel.sel_klist; @ 1.59 log @Use to get the DLT_* constants. Also change bpfattach() and bpf_change_type() to take just a pointer to the ifnet, rather than a pointer to the ifnet and a pointer to a member of the ifnet (the bpf pointer). We'll let this ride on the Dec 12 1.5N version bump. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.58 2000/07/04 18:46:49 thorpej Exp $ */ d437 7 a443 2 else error = EWOULDBLOCK; /* User requested non-blocking I/O */ d1206 2 a1207 3 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *) * in the driver's softc; dlt is the link layer type; hdrlen is the fixed * size of the link header (variable length headers not yet supported). @ 1.58 log @Move ifpromimsc() to if.c @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.57 2000/05/28 18:17:09 jhawk Exp $ */ d1206 1 a1206 2 bpfattach(driverp, ifp, dlt, hdrlen) caddr_t *driverp; d1216 1 a1216 1 bp->bif_driverp = (struct bpf_if **)driverp; d1285 2 a1286 2 bpf_change_type(driverp, dlt, hdrlen) caddr_t *driverp; d1292 1 a1292 1 if (bp->bif_driverp == (struct bpf_if **)driverp) @ 1.57 log @Ensure that all callers of pfind() can deal with pfind(0) returning a real procp* rather than NULL. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.56 2000/05/28 02:49:35 matt Exp $ */ a1307 52 } /* XXX This routine belongs in net/if.c. */ /* * Set/clear promiscuous mode on interface ifp based on the truth value * of pswitch. The calls are reference counted so that only the first * "on" request actually has an effect, as does the final "off" request. * Results are undefined if the "off" and "on" requests are not matched. */ int ifpromisc(ifp, pswitch) struct ifnet *ifp; int pswitch; { int pcount, ret; short flags; struct ifreq ifr; pcount = ifp->if_pcount; flags = ifp->if_flags; if (pswitch) { /* * If the device is not configured up, we cannot put it in * promiscuous mode. */ if ((ifp->if_flags & IFF_UP) == 0) return (ENETDOWN); if (ifp->if_pcount++ != 0) return (0); ifp->if_flags |= IFF_PROMISC; } else { if (--ifp->if_pcount > 0) return (0); ifp->if_flags &= ~IFF_PROMISC; /* * If the device is not configured up, we should not need to * turn off promiscuous mode (device should have turned it * off when interface went down; and will look at IFF_PROMISC * again next time interface comes up). */ if ((ifp->if_flags & IFF_UP) == 0) return (0); } memset((caddr_t)&ifr, 0, sizeof(ifr)); ifr.ifr_flags = ifp->if_flags; ret = (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr); /* Restore interface state if not successful */ if (ret != 0) { ifp->if_pcount = pcount; ifp->if_flags = flags; } return (ret); @ 1.57.2.1 log @Pull up revision 1.60 (requested by thorpej): Fix non-blocking BPF reads. Fixes PR kern/11836. @ text @d1 1 a1 1 /* $NetBSD$ */ d437 2 a438 7 else { if (d->bd_rtout == -1) { /* User requested non-blocking I/O */ error = EWOULDBLOCK; } else error = 0; } d1201 3 a1203 2 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the * fixed size of the link header (variable length headers not yet supported). @ 1.56 log @Fix bpf output on fddi to actually work. Make it compatible with ULTRIX and Tru64. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.55 2000/05/12 05:58:01 jonathan Exp $ */ d502 1 a502 1 else if ((p = pfind (-d->bd_pgid)) != NULL) @ 1.55 log @Make BPF_BUFSIZE overridable: 8192 is smaller than MTU of some devices. TODO: defopt, or make sysctl'able (c.f. FreeBSD). @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.54 2000/04/12 04:20:47 chs Exp $ */ d166 3 a168 3 sockp->sa_family = AF_UNSPEC; /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ hlen = 24; d521 1 a521 1 static struct sockaddr dst; d531 2 a532 1 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp->if_mtu, &m, &dst); d540 1 a540 1 dst.sa_family = pseudo_AF_HDRCMPLT; d543 1 a543 1 error = (*ifp->if_output)(ifp, m, &dst, NULL); @ 1.55.2.1 log @Sync w/ netbsd-1-5-base. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.57 2000/05/28 18:17:09 jhawk Exp $ */ d166 3 a168 3 sockp->sa_family = AF_LINK; /* XXX 4(FORMAC)+6(dst)+6(src) */ hlen = 16; d502 1 a502 1 else if (d->bd_pgid && (p = pfind (-d->bd_pgid)) != NULL) d521 1 a521 1 static struct sockaddr_storage dst; d531 1 a531 2 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp->if_mtu, &m, (struct sockaddr *) &dst); d539 1 a539 1 dst.ss_family = pseudo_AF_HDRCMPLT; d542 1 a542 1 error = (*ifp->if_output)(ifp, m, (struct sockaddr *) &dst, NULL); @ 1.54 log @remove support for sunos and ancient BSDs. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.53 2000/03/30 09:45:33 augustss Exp $ */ d80 3 a82 1 #define BPF_BUFSIZE 8192 /* 4096 too small for FDDI frames */ @ 1.53 log @Kill some more register declarations. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.52 2000/03/13 23:52:39 soren Exp $ */ a59 3 #if defined(sparc) && BSD < 199103 #include #endif a79 10 /* * Older BSDs don't have kernel malloc. */ #if BSD < 199103 extern bcopy(); static caddr_t bpf_alloc(); #include #define BPF_BUFSIZE (MCLBYTES-8) #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio) #else a80 2 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) #endif a201 1 #if BSD >= 199103 a203 4 #else MCLGET(m); if (m->m_len != MCLBYTES) { #endif a210 1 #if BSD >= 199103 a211 3 #else m->m_off += align; #endif d215 1 a215 1 error = UIOMOVE(mtod(m, caddr_t), len, UIO_WRITE, uio); a219 1 #if BSD >= 199103 a220 3 #else m->m_off += hlen; #endif a381 39 * Support for SunOS, which does not have tsleep. */ #if BSD < 199103 static bpf_timeout(arg) caddr_t arg; { struct bpf_d *d = (struct bpf_d *)arg; d->bd_timedout = 1; wakeup(arg); } #define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan) int bpf_sleep(d) struct bpf_d *d; { int rto = d->bd_rtout; int st; if (rto != 0) { d->bd_timedout = 0; timeout(bpf_timeout, (caddr_t)d, rto); } st = sleep((caddr_t)d, PRINET|PCATCH); if (rto != 0) { if (d->bd_timedout == 0) untimeout(bpf_timeout, (caddr_t)d); else if (st == 0) return EWOULDBLOCK; } return (st != 0) ? EINTR : 0; } #else #define BPF_SLEEP tsleep #endif /* d433 1 a433 1 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf", d475 1 a475 1 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio); a503 1 #if BSD >= 199103 a506 7 #else if (d->bd_selproc) { selwakeup(d->bd_selproc, (int)d->bd_selcoll); d->bd_selcoll = 0; d->bd_selproc = 0; } #endif d540 1 a540 5 #if BSD >= 199103 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0); #else error = (*ifp->if_output)(ifp, m, &dst); #endif a637 3 #if BSD < 199103 error = EINVAL; #else a648 1 #endif a1145 1 #if BSD >= 199103 a1146 5 #elif defined(sun) uniqtime(&hp->bh_tstamp); #else hp->bh_tstamp = time; #endif a1208 6 #if BSD < 199103 static struct bpf_if bpf_ifs[NBPFILTER]; static int bpfifno; bp = (bpfifno < NBPFILTER) ? &bpf_ifs[bpfifno++] : 0; #else a1209 1 #endif a1306 1 #if BSD >= 199103 a1357 32 #endif #if BSD < 199103 /* * Allocate some memory for bpf. This is temporary SunOS support, and * is admittedly a hack. * If resources unavailable, return 0. */ static caddr_t bpf_alloc(size, canwait) int size; int canwait; { struct mbuf *m; if ((unsigned)size > (MCLBYTES-8)) return 0; MGET(m, canwait, MT_DATA); if (m == 0) return 0; if ((unsigned)size > (MLEN-8)) { MCLGET(m); if (m->m_len != MCLBYTES) { m_freem(m); return 0; } } *mtod(m, struct mbuf **) = m; return mtod(m, caddr_t) + 8; } #endif @ 1.52 log @Fix doubled 'the's in comments. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.51 2000/02/02 09:03:41 enami Exp $ */ d129 1 a129 1 register struct uio *uio; d132 2 a133 2 register struct mbuf **mp; register struct sockaddr *sockp; d366 1 a366 1 register struct bpf_d *d; d397 2 a398 2 register struct bpf_d *d = &bpf_dtab[minor(dev)]; register int s; d426 1 a426 1 register struct bpf_d *d; d428 2 a429 2 register int rto = d->bd_rtout; register int st; d465 1 a465 1 register struct uio *uio; d468 1 a468 1 register struct bpf_d *d = &bpf_dtab[minor(dev)]; d559 1 a559 1 register struct bpf_d *d; d590 1 a590 1 register struct bpf_d *d = &bpf_dtab[minor(dev)]; d677 1 a677 1 register struct bpf_d *d = &bpf_dtab[minor(dev)]; d680 1 a680 1 register struct bpf_insn **p; d723 1 a723 1 register u_int size = *(u_int *)addr; d1064 1 a1064 1 register dev_t dev; d1068 1 a1068 1 register struct bpf_d *d = &bpf_dtab[minor(dev)]; d1070 1 a1070 1 register int s = splimp(); d1095 2 a1096 2 register u_char *pkt; register u_int pktlen; d1099 2 a1100 2 register struct bpf_d *d; register u_int slen; d1123 1 a1123 1 register size_t len; d1125 2 a1126 2 register const struct mbuf *m; register u_int count; d1178 8 a1185 8 register struct bpf_d *d; register u_char *pkt; register u_int pktlen, snaplen; register void *(*cpfn) __P((void *, const void *, size_t)); { register struct bpf_hdr *hp; register int totlen, curlen; register int hdrlen = d->bd_bif->bif_hdrlen; d1250 1 a1250 1 register struct bpf_d *d; d1266 1 a1266 1 register struct bpf_d *d; d1413 2 a1414 2 register struct ifnet *ifp; register int pswitch; d1416 2 a1417 2 register int pcount, ret; register short flags; d1465 2 a1466 2 register int size; register int canwait; d1468 1 a1468 1 register struct mbuf *m; @ 1.51 log @Revoke bpf device on detach. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.50 2000/02/02 08:36:02 enami Exp $ */ d1348 1 a1348 1 /* Nuke the the vnodes for any open instances */ @ 1.50 log @Since we are allowed to wait, no need to check the return value. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.49 2000/02/02 07:45:13 enami Exp $ */ d57 1 d1340 24 @ 1.49 log @Remove duplicated forward declarations. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.48 2000/01/31 23:06:12 thorpej Exp $ */ a948 2 if (fcode == 0) return (ENOMEM); d1251 1 a1252 3 if (d->bd_fbuf == 0) return (ENOBUFS); a1253 4 if (d->bd_sbuf == 0) { free(d->bd_fbuf, M_DEVBUF); return (ENOBUFS); } @ 1.48 log @Implement bpfdetach(). @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.47 1999/05/11 02:11:08 thorpej Exp $ */ a110 2 static int bpf_allocbufs __P((struct bpf_d *)); static void bpf_freed __P((struct bpf_d *)); a111 1 static void bpf_ifname __P((struct ifnet *, struct ifreq *)); @ 1.47 log @* Add the ability to change the data link type on the fly. * Define two more data link types: NetBSD PPP-over-serial and NetBSD PPP-over-Ethernet. (Different PPP encaps have different header formats!) @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.46 1998/12/04 11:04:37 bouyer Exp $ */ d1340 19 @ 1.47.2.1 log @Update thorpej_scsipi to -current as of a month ago @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.58 2000/07/04 18:46:49 thorpej Exp $ */ a56 1 #include d59 3 d82 12 a93 2 #ifndef BPF_BUFSIZE # define BPF_BUFSIZE 8192 /* 4096 too small for FDDI frames */ d111 2 d115 1 d131 1 a131 1 struct uio *uio; d134 2 a135 2 struct mbuf **mp; struct sockaddr *sockp; d181 3 a183 3 sockp->sa_family = AF_LINK; /* XXX 4(FORMAC)+6(dst)+6(src) */ hlen = 16; d219 1 d222 4 d233 1 d235 3 d241 1 a241 1 error = uiomove(mtod(m, caddr_t), len, uio); d246 1 d248 3 d368 1 a368 1 struct bpf_d *d; d399 2 a400 2 struct bpf_d *d = &bpf_dtab[minor(dev)]; int s; d412 39 d467 1 a467 1 struct uio *uio; d470 1 a470 1 struct bpf_d *d = &bpf_dtab[minor(dev)]; d502 1 a502 1 error = tsleep((caddr_t)d, PRINET|PCATCH, "bpf", d544 1 a544 1 error = uiomove(d->bd_hbuf, d->bd_hlen, uio); d561 1 a561 1 struct bpf_d *d; d569 1 a569 1 else if (d->bd_pgid && (p = pfind (-d->bd_pgid)) != NULL) d573 1 d577 7 d592 1 a592 1 struct bpf_d *d = &bpf_dtab[minor(dev)]; d596 1 a596 1 static struct sockaddr_storage dst; d606 1 a606 2 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp->if_mtu, &m, (struct sockaddr *) &dst); d614 1 a614 1 dst.ss_family = pseudo_AF_HDRCMPLT; d617 5 a621 1 error = (*ifp->if_output)(ifp, m, (struct sockaddr *) &dst, NULL); d679 1 a679 1 struct bpf_d *d = &bpf_dtab[minor(dev)]; d682 1 a682 1 struct bpf_insn **p; d719 3 d725 1 a725 1 u_int size = *(u_int *)addr; d733 1 d952 2 d1068 1 a1068 1 dev_t dev; d1072 1 a1072 1 struct bpf_d *d = &bpf_dtab[minor(dev)]; d1074 1 a1074 1 int s = splimp(); d1099 2 a1100 2 u_char *pkt; u_int pktlen; d1103 2 a1104 2 struct bpf_d *d; u_int slen; d1127 1 a1127 1 size_t len; d1129 2 a1130 2 const struct mbuf *m; u_int count; d1182 8 a1189 8 struct bpf_d *d; u_char *pkt; u_int pktlen, snaplen; void *(*cpfn) __P((void *, const void *, size_t)); { struct bpf_hdr *hp; int totlen, curlen; int hdrlen = d->bd_bif->bif_hdrlen; d1233 1 d1235 5 d1254 1 a1254 1 struct bpf_d *d; d1256 3 a1259 1 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); d1261 4 d1276 1 a1276 1 struct bpf_d *d; d1308 6 d1315 1 a1342 43 * Remove an interface from bpf. */ void bpfdetach(ifp) struct ifnet *ifp; { struct bpf_if *bp, **pbp; struct bpf_d *d; int i, s, cmaj; /* locate the major number */ for (cmaj = 0; cmaj <= nchrdev; cmaj++) if (cdevsw[cmaj].d_open == bpfopen) break; /* Nuke the vnodes for any open instances */ for (i = 0; i < NBPFILTER; ++i) { d = &bpf_dtab[i]; if (!D_ISFREE(d) && d->bd_bif != NULL && d->bd_bif->bif_ifp == ifp) { /* * Detach the descriptor from an interface now. * It will be free'ed later by close routine. */ s = splimp(); d->bd_promisc = 0; /* we can't touch device. */ bpf_detachd(d); splx(s); vdevgone(cmaj, i, i, VCHR); } } for (bp = bpf_iflist, pbp = &bpf_iflist; bp != NULL; pbp = &bp->bif_next, bp = bp->bif_next) { if (bp->bif_ifp == ifp) { *pbp = bp->bif_next; free(bp, M_DEVBUF); break; } } } /* d1369 85 @ 1.47.2.2 log @Sync with HEAD (for UBC fixes). @ text @d1 1 a1 1 /* $NetBSD$ */ d1206 2 a1207 1 bpfattach(ifp, dlt, hdrlen) d1217 1 a1217 1 bp->bif_driverp = (struct bpf_if **)&ifp->if_bpf; d1286 2 a1287 2 bpf_change_type(ifp, dlt, hdrlen) struct ifnet *ifp; d1293 1 a1293 1 if (bp->bif_driverp == (struct bpf_if **)&ifp->if_bpf) @ 1.47.2.3 log @Sync with HEAD @ text @d437 2 a438 7 else { if (d->bd_rtout == -1) { /* User requested non-blocking I/O */ error = EWOULDBLOCK; } else error = 0; } d1201 3 a1203 2 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the * fixed size of the link header (variable length headers not yet supported). @ 1.47.2.4 log @Sync with HEAD @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.47.2.3 2001/01/05 17:36:48 bouyer Exp $ */ d236 1 a236 1 * Must be called at splnet. d374 1 a374 1 s = splnet(); d414 1 a414 1 s = splnet(); d484 1 a484 1 s = splnet(); d558 1 a558 1 * receive and drop counts. Should be called at splnet. d625 1 a625 1 s = splnet(); d689 1 a689 1 s = splnet(); d700 1 a700 1 s = splnet(); d716 1 a716 1 s = splnet(); d861 1 a861 1 s = splnet(); d877 1 a877 1 s = splnet(); d951 1 a951 1 s = splnet(); d995 1 a995 1 int s = splnet(); d1267 1 a1267 1 s = splnet(); @ 1.46 log @Init the decriptors at boot time rather than at interface attach time. Now that we have pcmcia hot-plug, it's not the same. Fixes kern/3189. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.45 1998/11/05 22:50:15 jonathan Exp $ */ d1340 28 @ 1.46.6.1 log @Sync w/ -current. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.47 1999/05/11 02:11:08 thorpej Exp $ */ a1339 28 } /* * Change the data link type of a BPF instance. */ void bpf_change_type(driverp, dlt, hdrlen) caddr_t *driverp; u_int dlt, hdrlen; { struct bpf_if *bp; for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) { if (bp->bif_driverp == (struct bpf_if **)driverp) break; } if (bp == NULL) panic("bpf_change_type"); bp->bif_dlt = dlt; /* * Compute the length of the bpf header. This is not necessarily * equal to SIZEOF_BPF_HDR because we want to insert spacing such * that the network layer header begins on a longword boundary (for * performance reasons and to alleviate alignment restrictions). */ bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; @ 1.46.2.1 log @The beginnings of interface detach support. Still some bugs, but mostly works for me. This work was originally by Bill Studenmund, and cleaned up by me. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.46 1998/12/04 11:04:37 bouyer Exp $ */ a1321 1 if_addref(ifp); @ 1.45 log @Increase compiled-in default bpf buffer size from 4096 to 8192. (the libpcap API provides no way to resize the inkernel buffe,r and 4096 is too small to capture maximum-sized FDDI frames.) @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.44 1998/08/18 06:32:13 thorpej Exp $ */ a109 13 #if BSD >= 199207 || NetBSD0_9 >= 2 /* * bpfilterattach() is called at boot time in new systems. We do * nothing here since old systems will not call this. */ /* ARGSUSED */ void bpfilterattach(n) int n; { } #endif d340 17 a1307 1 int i; a1335 7 /* * Mark all the descriptors free if this hasn't been done. */ if (!D_ISFREE(&bpf_dtab[0])) for (i = 0; i < NBPFILTER; ++i) D_MARKFREE(&bpf_dtab[i]); @ 1.44 log @Add some braces to make egcs happy (ambiguous else warning). @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.43 1998/08/06 04:37:57 perry Exp $ */ d92 1 a92 1 #define BPF_BUFSIZE 4096 @ 1.43 log @Sigh. "consts in prototypes can be quite a drag..." fix last two fixes one more time, this time dealing with ugly prototype issues, including the fact that the bcopy returns nothing, but memcpy returns a void *. Never mind that we don't use it... @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.42 1998/08/06 04:25:55 perry Exp $ */ d562 1 a562 1 if (d->bd_async) d567 1 d1075 1 a1075 1 if (events & (POLLIN | POLLRDNORM)) d1080 1 @ 1.42 log @Fix botched prototype decl in last fix. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.41 1998/08/06 04:24:25 perry Exp $ */ d129 1 a129 1 static void bpf_mcpy __P((void *, const void *, size_t)); d139 1 a139 1 void (*)(const void *, void *, size_t))); d1117 1 a1117 1 static void d1138 1 d1179 1 a1179 1 register void (*cpfn) __P((const void *, void *, size_t)); @ 1.41 log @Convert bcopy,bzero to memcpy,memset This was semi-nontrivial, since a function pointer to bcopy gets used in this file. Note #1: The catchpacket routine, which takes a function pointer to bpf_mcpy or memcpy, should probably be converted to take a flag that just says which is used, so memcpy can be inlined. Note #2: The code is heavily #ifdef'ed to run on older operating systems. We probably want to clean that cruft out, unless someone is planning a new release of the code at LBL (doubtful.) @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.40 1998/04/30 00:08:19 thorpej Exp $ */ d129 1 a129 1 static void bpf_mcpy __P((const void *, void *, size_t)); @ 1.40 log @Implement two new BPF ioctls: BPFGHDRCMPLT and BPFSHDRCMPLT, to get/set the "header already complete" flag. This allows BPF writers to spoof layer 2 source addresses (providing the layer 2 in use supports it) in applications where this is necessary. From Greg Smith . @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.39 1998/03/01 02:25:04 fvdl Exp $ */ d129 1 a129 1 static void bpf_mcopy __P((const void *, void *, size_t)); d258 1 a258 1 bcopy(mtod(m, caddr_t), sockp->sa_data, hlen); d377 1 a377 1 bzero((char *)d, sizeof(*d)); d1052 1 a1052 1 bcopy(ifp->if_xname, ifr->ifr_name, IFNAMSIZ); d1109 1 a1109 1 catchpacket(d, pkt, pktlen, slen, bcopy); d1118 2 a1119 1 bpf_mcopy(src_arg, dst_arg, len) a1120 1 void *dst_arg; d1131 1 a1131 1 panic("bpf_mcopy"); d1133 1 a1133 1 bcopy(mtod(m, caddr_t), (caddr_t)dst, count); d1161 1 a1161 1 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy); d1169 2 a1170 2 * transfer. bcopy is passed in to copy contiguous chunks, while * bpf_mcopy is passed in to copy mbuf chains. In the latter case, d1238 1 a1238 1 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); d1385 1 a1385 1 bzero((caddr_t)&ifr, sizeof(ifr)); @ 1.39 log @Merge with Lite2 + local changes @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.38 1997/10/12 16:35:10 mycroft Exp $ */ d608 3 d662 2 d878 7 @ 1.38 log @Do *not* free the mbuf chain we just created. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.37 1997/10/09 18:58:08 christos Exp $ */ d40 1 a40 1 * @@(#)bpf.c 8.2 (Berkeley) 3/28/94 @ 1.37 log @GC bd_sig @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.36 1997/10/09 18:17:19 christos Exp $ */ d268 3 a270 1 bad: @ 1.36 log @Sync with bpf-1.2a1 - whitespace - add rcsid; our sccsid is newer than the one on 1.2a1. - change prototype to add mtu - change size_t to u_int for consistency. - add alignment stuff in bpf_movein - add more consistency checks bpf_movein - use one uiomove and then bcopy the data in bpf_movein - update the comment for the panic when ifpromisc fails. - separate the case when we have non blocking I/O and no data and return EWOULDBLOCK - check for other errors and return them - pass the mtu to bpf_movein - Add the BPF_KERN_FILTER junk, just so that we keep up with the code - remove BIOCSRSIG, BIOCGRSIG; SIGIO does this well. - don't add the SIOCGIFADDR stuff (it is bogus) - Check for malloc return for consistency. - comment should say poll - change formatting to match the current code. - save and restore the pcount and flags in case we fail to set the interface into promiscuous mode. - fix spelling typo. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.35 1997/03/17 06:45:20 scottr Exp $ */ a376 1 d->bd_sig = SIGIO; d560 1 a560 1 if (d->bd_async && d->bd_sig) d562 1 a562 1 gsignal (d->bd_pgid, d->bd_sig); d564 1 a564 1 psignal (p, d->bd_sig); @ 1.35 log @if_arc.h is in net, not netinet. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.34 1997/03/15 18:12:18 is Exp $ */ d9 1 a9 1 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence d41 2 d130 1 a130 1 static int bpf_movein __P((struct uio *, int, d138 1 a138 1 static void catchpacket __P((struct bpf_d *, u_char *, size_t, size_t, d143 1 a143 1 bpf_movein(uio, linktype, mp, sockp) d146 1 d154 1 d170 1 d176 1 d182 1 d184 1 d190 1 d197 1 d203 1 d211 13 a223 1 if ((unsigned)len > MCLBYTES) d227 2 d231 1 a231 2 if (len > MHLEN) { d243 14 a256 5 m->m_len = len; *mp = m; /* * Make room for link header. */ d258 1 a258 1 m->m_len -= hlen; d264 1 a264 3 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio); if (error) goto bad; d266 2 a267 3 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio); if (!error) return (0); d313 8 a320 1 error = ifpromisc(bp->bif_ifp, 0); a321 5 /* * Something is really wrong if we were able to put * the driver into promiscuous mode, but can't * take it out. */ d483 5 a487 1 if (d->bd_immediate && d->bd_slen != 0) { d526 2 d545 1 a546 1 d600 1 a600 1 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst); d639 5 d670 3 d732 30 a898 16 case BIOCSRSIG: /* Set receive signal */ { u_int sig; sig = *(u_int *)addr; if (sig >= NSIG) error = EINVAL; else d->bd_sig = sig; break; } case BIOCGRSIG: *(u_int *)addr = d->bd_sig; break; d934 2 d1043 1 a1043 1 * Support for select() system call d1085 1 a1085 1 register size_t slen; d1137 1 a1137 1 size_t pktlen, slen; d1164 1 a1164 1 register size_t pktlen, snaplen; d1340 2 a1341 2 struct ifnet *ifp; int pswitch; d1343 2 d1347 2 d1372 1 d1374 7 a1380 1 return ((*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr)); d1388 1 a1388 1 * If resources unavaiable, return 0. @ 1.35.4.1 log @Update marc-pcmcia branch from trunk. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.38 1997/10/12 16:35:10 mycroft Exp $ */ d9 1 a9 1 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence a40 2 * static char rcsid[] = * "Header: bpf.c,v 1.67 96/09/26 22:00:52 leres Exp "; d128 1 a128 1 static int bpf_movein __P((struct uio *, int, int, d136 1 a136 1 static void catchpacket __P((struct bpf_d *, u_char *, u_int, u_int, d141 1 a141 1 bpf_movein(uio, linktype, mtu, mp, sockp) a143 1 int mtu; a150 1 int align; a165 1 align = 0; a170 1 align = 0; a175 1 /* 6(dst)+6(src)+2(type) */ a176 1 align = 2; a181 1 align = 5; a187 1 align = 0; a192 1 align = 0; d200 1 a200 13 /* * If there aren't enough bytes for a link level header or the * packet length exceeds the interface mtu, return an error. */ if (len < hlen || len - hlen > mtu) return (EMSGSIZE); /* * XXX Avoid complicated buffer chaining --- * bail if it won't fit in a single mbuf. * (Take into account possible alignment bytes) */ if ((unsigned)len > MCLBYTES - align) a203 2 if (m == 0) return (ENOBUFS); d206 2 a207 1 if (len > MHLEN - align) { d219 5 a223 14 /* Insure the data is properly aligned */ if (align > 0) { #if BSD >= 199103 m->m_data += align; #else m->m_off += align; #endif m->m_len -= align; } error = UIOMOVE(mtod(m, caddr_t), len, UIO_WRITE, uio); if (error) goto bad; d225 1 a225 1 bcopy(mtod(m, caddr_t), sockp->sa_data, hlen); d231 3 a233 1 len -= hlen; d235 4 a238 5 m->m_len = len; *mp = m; return (0); bad: d283 1 a283 8 /* * Take device out of promiscuous mode. Since we were * able to enter promiscuous mode, we should be able * to turn it off. But we can get an error if * the interface was configured down, so only panic * if we don't get an unexpected error. */ error = ifpromisc(bp->bif_ifp, 0); d285 5 d345 1 d451 1 a451 5 if (d->bd_immediate) { if (d->bd_slen == 0) { splx(s); return (EWOULDBLOCK); } a489 2 if (error != 0) goto done; a506 1 done: d508 1 d523 1 a523 1 if (d->bd_async) d525 1 a525 1 gsignal (d->bd_pgid, SIGIO); d527 1 a527 1 psignal (p, SIGIO); d562 1 a562 1 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp->if_mtu, &m, &dst); a600 5 #ifdef BPF_KERN_FILTER extern struct bpf_insn *bpf_tcp_filter; extern struct bpf_insn *bpf_udp_filter; #endif a626 3 #ifdef BPF_KERN_FILTER register struct bpf_insn **p; #endif a685 30 #ifdef BPF_KERN_FILTER /* * Set TCP or UDP reject filter. */ case BIOCSTCPF: case BIOCSUDPF: if (!suser()) { error = EPERM; break; } /* Validate and store filter */ error = bpf_setf(d, (struct bpf_program *)addr); /* Free possible old filter */ if (cmd == BIOCSTCPF) p = &bpf_tcp_filter; else p = &bpf_udp_filter; if (*p != NULL) free((caddr_t)*p, M_DEVBUF); /* Steal new filter (noop if error) */ s = splimp(); *p = d->bd_filter; d->bd_filter = NULL; splx(s); break; #endif d823 16 a873 2 if (fcode == 0) return (ENOMEM); d981 1 a981 1 * Support for poll() system call d1023 1 a1023 1 register u_int slen; d1075 1 a1075 1 u_int pktlen, slen; d1102 1 a1102 1 register u_int pktlen, snaplen; d1278 2 a1279 2 register struct ifnet *ifp; register int pswitch; a1280 2 register int pcount, ret; register short flags; a1282 2 pcount = ifp->if_pcount; flags = ifp->if_flags; a1305 1 bzero((caddr_t)&ifr, sizeof(ifr)); d1307 1 a1307 7 ret = (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr); /* Restore interface state if not successful */ if (ret != 0) { ifp->if_pcount = pcount; ifp->if_flags = flags; } return (ret); d1315 1 a1315 1 * If resources unavailable, return 0. @ 1.34 log @New ARP system, supports IPv4 over any hardware link. Some of the stuff (e.g., rarpd, bootpd, dhcpd etc., libsa) still will only support Ethernet. Tcpdump itself should be ok, but libpcap needs lot of work. For the detailed change history, look at the commit log entries for the is-newarp branch. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.33 1997/02/21 23:59:35 thorpej Exp $ */ d74 1 a77 1 #include @ 1.33 log @Don't let the read timeout get inadvertently rounded down to 0. From John Hawkinson , PR #2531. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.32 1996/10/13 02:10:56 christos Exp $ */ d74 2 d78 1 a78 1 #include @ 1.32 log @backout previous kprintf change @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.31 1996/10/10 22:59:41 christos Exp $ */ d749 2 @ 1.32.4.1 log @Snapshot of new ARP code. Our old ARP code was hardwired for 6-byte length medium addresses, while the protocol is designed for any size. This snapshot contains a first hack at getting rid of Ethernet specific data structures. The ep driver is updated (and tested on the PCI bus), the iy and fpa drivers have been updated, but not real life tested yet. If you want to test this with other drivers, you have to update them first yourself, and probably tag the relevant directories. Better contact me if you want to do this. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.32 1996/10/13 02:10:56 christos Exp $ */ a72 2 #include @ 1.32.4.2 log @netinet/if_ether.h -> netinet/if_inarp.h @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.32.4.1 1997/02/07 18:06:53 is Exp $ */ d78 1 a78 1 #include @ 1.32.4.3 log @Merge in changes from The Trunk @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.32.4.2 1997/03/09 20:58:59 is Exp $ */ a750 2 if ((d->bd_rtout == 0) && (tv->tv_usec != 0)) d->bd_rtout = 1; @ 1.31 log @- printf -> kprintf, sprintf -> ksprintf @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.30 1996/09/07 12:41:25 mycroft Exp $ */ d1260 1 a1260 1 kprintf("bpf: %s attached\n", ifp->if_xname); @ 1.30 log @Implement poll(2). @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.29 1996/06/14 22:21:54 cgd Exp $ */ d1260 1 a1260 1 printf("bpf: %s attached\n", ifp->if_xname); @ 1.29 log @avoid unnecessary checks of m_get/MGET/etc.'s return values. When they're called with M_WAIT, they are defined to never return NULL. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.28 1996/05/22 13:41:54 mycroft Exp $ */ d65 4 a73 2 #include a76 1 #include d131 1 a131 3 #if BSD >= 199103 int bpfselect __P((dev_t, int, struct proc *)); #endif a976 16 * The new select interface passes down the proc pointer; the old select * stubs had to grab it out of the user struct. This glue allows either case. */ #if BSD >= 199103 #define bpf_select bpfselect #else int bpfselect(dev, rw) register dev_t dev; int rw; { return (bpf_select(dev, rw, u.u_procp)); } #endif /* d983 1 a983 1 bpf_select(dev, rw, p) d985 1 a985 1 int rw; d988 3 a990 2 register struct bpf_d *d; register int s; a991 2 if (rw != FREAD) return (0); d995 5 a999 1 d = &bpf_dtab[minor(dev)]; a1000 23 s = splimp(); if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) { /* * There is data waiting. */ splx(s); return (1); } #if BSD >= 199103 selrecord(p, &d->bd_sel); #else /* * No data ready. If there's already a select() waiting on this * minor device then this is a collision. This shouldn't happen * because minors really should not be shared, but if a process * forks while one of these is open, it is possible that both * processes could select on the same descriptor. */ if (d->bd_selproc && d->bd_selproc->p_wchan == (caddr_t)&selwait) d->bd_selcoll = 1; else d->bd_selproc = p; #endif d1002 1 a1002 1 return (0); @ 1.28 log @Remove duplicate definition of bpf_setif(). @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.27 1996/05/07 05:26:02 thorpej Exp $ */ a202 2 if (m == 0) return (ENOBUFS); @ 1.27 log @Kill a couple of unnecessary calls to strlen(). @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.26 1996/05/07 02:40:22 thorpej Exp $ */ a128 1 static int bpf_setif __P((struct bpf_d *, struct ifreq *)); @ 1.26 log @Changed struct ifnet to have a pointer to the softc of the underlying device and a printable "external name" (name + unit number), thus eliminating if_name and if_unit. Updated interface to (*if_watchdog)() and (*if_reset)() to take a struct ifnet *, rather than a unit number. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.25 1996/03/30 21:57:30 christos Exp $ */ a932 1 strlen(ifp->if_xname) != strlen(ifr->ifr_name) || @ 1.25 log @Eliminate need for and remove net_conf.h @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.24 1996/02/13 21:59:53 christos Exp $ */ d901 1 a901 1 int unit, s, error; d904 3 a906 4 * Separate string into name part and unit number. Put a null * byte at the end of the name part, and compute the number. * If the a unit number is unspecified, the default is 0, * as initialized above. XXX This should be common code. d908 1 a908 1 unit = 0; d910 13 a922 8 cp[sizeof(ifr->ifr_name) - 1] = '\0'; while (*cp++) { if (*cp >= '0' && *cp <= '9') { unit = *cp - '0'; *cp++ = '\0'; while (*cp) unit = 10 * unit + *cp++ - '0'; break; d925 1 d932 3 a934 2 if (ifp == 0 || unit != ifp->if_unit || strcmp(ifp->if_name, ifr->ifr_name) != 0) d970 1 a970 2 * Convert an interface name plus unit number of an ifp to a single * name which is returned in the ifr. a976 2 char *s = ifp->if_name; char *d = ifr->ifr_name; d978 1 a978 3 while ((*d++ = *s++) != '\0') continue; sprintf(d, "%d", ifp->if_unit); d1301 1 a1301 1 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); @ 1.24 log @Net prototypes @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.23 1995/09/27 18:30:37 thorpej Exp $ */ d54 1 a68 1 #include @ 1.23 log @Enhancements to the bpf from Stu Grossman : * grok FIONBIO, FIOASYNC, and TIOC{G,S}PGRP * add BIOC{G,S}RSIG; get/set the signal to be delivered to the process or process group upon packet reception. Defaults to SIGIO. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.22 1995/08/13 04:15:38 mycroft Exp $ */ d68 1 d126 3 a128 1 struct mbuf **, struct sockaddr *)); d131 3 d136 2 a137 2 static void catchpacket __P((struct bpf_d *, u_char *, size_t, size_t, void (*)(const void *, void *, size_t))); d326 1 a326 1 bpfopen(dev, flag) d329 2 d358 1 a358 1 bpfclose(dev, flag) d361 2 d430 1 a430 1 bpfread(dev, uio) d433 1 d528 1 a528 1 else if (p = pfind (-d->bd_pgid)) d545 1 a545 1 bpfwrite(dev, uio) d548 1 d620 1 a620 1 bpfioctl(dev, cmd, addr, flag) d625 1 d975 1 a975 1 while (*d++ = *s++) d977 1 a977 3 /* XXX Assume that unit number is less than 10. */ *d++ = ifp->if_unit + '0'; *d = '\0'; @ 1.22 log @Don't pass through SIOCGIFADDR, per Steve McCanne. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.21 1995/08/12 23:59:17 mycroft Exp $ */ d339 1 d451 5 a455 2 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf", d->bd_rtout); d511 2 d514 6 d782 44 @ 1.21 log @splnet --> splsoftnet @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.20 1995/07/23 16:29:47 mycroft Exp $ */ a580 1 * SIOCGIFADDR Get interface address - convenient hook to driver. a624 13 break; } case SIOCGIFADDR: { struct ifnet *ifp; if (d->bd_bif == 0) error = EINVAL; else { ifp = d->bd_bif->bif_ifp; error = (*ifp->if_ioctl)(ifp, cmd, addr); } @ 1.20 log @For outgoing packets, always allocate a header mbuf and fill it in. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.19 1995/04/22 13:26:20 cgd Exp $ */ d547 1 a547 1 s = splnet(); @ 1.19 log @copy routines should take size_t lengths for prototype consistency. don't assume that tick is >= 1000; loses badly on alpha (div. by zero) only try unaligned copies if NetBSD's UNALIGNED_ACCESS symbol is defined. various misc type size cleanups, mostly short -> int16_t. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.18 1995/03/22 16:08:32 mycroft Exp $ */ d125 1 a125 1 struct mbuf **, struct sockaddr *, int *)); d135 1 a135 1 bpf_movein(uio, linktype, mp, sockp, datlen) d137 1 a137 1 int linktype, *datlen; a193 1 *datlen = len - hlen; d197 1 a197 1 MGET(m, M_WAIT, MT_DATA); d200 4 a203 1 if (len > MLEN) { a530 1 int datlen; d540 1 a540 1 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen); d544 1 a544 1 if (datlen > ifp->if_mtu) @ 1.18 log @Fix panic when an interface in promiscuous mode goes down and the BPF user tries to turn off promiscuous mode. From Lon Willett. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.17 1995/02/23 07:19:49 glass Exp $ */ d123 1 a123 1 static void bpf_mcopy __P((const void *, void *, u_int)); d130 2 a131 2 static void catchpacket __P((struct bpf_d *, u_char *, u_int, u_int, void (*)(const void *, void *, u_int))); a737 1 u_long msec; d739 2 a740 7 /* Compute number of milliseconds. */ msec = tv->tv_sec * 1000 + tv->tv_usec / 1000; /* Scale milliseconds to ticks. Assume hard clock has millisecond or greater resolution (i.e. tick >= 1000). For 10ms hardclock, tick/1000 = 10, so rtout<-msec/10. */ d->bd_rtout = msec / (tick / 1000); a749 1 u_long msec = d->bd_rtout; d751 2 a752 3 msec *= tick / 1000; tv->tv_sec = msec / 1000; tv->tv_usec = msec % 1000; d1005 1 a1005 1 register u_int slen; d1028 1 a1028 1 register u_int len; d1057 1 a1057 1 u_int pktlen, slen; d1084 2 a1085 2 register u_int pktlen, snaplen; register void (*cpfn) __P((const void *, void *, u_int)); @ 1.17 log @preliminary arcnet support. uses lame but RFC address resolution @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.16 1994/10/30 21:48:43 cgd Exp $ */ d274 2 d277 2 a278 1 if (ifpromisc(bp->bif_ifp, 0)) a1271 6 /* * If the device is not configured up, we cannot put it in * promiscuous mode. */ if ((ifp->if_flags & IFF_UP) == 0) return (ENETDOWN); d1274 6 d1287 8 @ 1.16 log @be more careful with types, also pull in headers where necessary. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.15 1994/07/15 22:29:32 cgd Exp $ */ d72 1 d171 5 @ 1.15 log @don't use inline, use __inline, like cdefs intends (so it can kill it if nongcc @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.14 1994/06/29 21:23:15 cgd Exp $ */ d589 1 a589 1 int cmd; @ 1.14 log @this is what cdefs.h is for @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.13 1994/06/29 06:35:52 cgd Exp $ */ d127 1 a127 1 static inline void d492 1 a492 1 static inline void @ 1.14.2.1 log @updates from trunk. basically, C language errors. @ text @d1 1 a1 1 /* $NetBSD: bpf.c,v 1.14 1994/06/29 21:23:15 cgd Exp $ */ d127 1 a127 1 static __inline void d492 1 a492 1 static __inline void @ 1.13 log @New RCS ID's, take two. they're more aesthecially pleasant, and use 'NetBSD' @ text @d1 1 a1 1 /* $NetBSD$ */ a43 6 #ifndef __GNUC__ #define inline #else #define inline __inline #endif @ 1.12 log @Update to 4.4-Lite networking code, with a few local changes. @ text @d1 2 d40 1 a40 2 * from: @@(#)bpf.c 8.2 (Berkeley) 3/28/94 * $Id: $ @ 1.11 log @new from mccanne. be afraid. @ text @d1 3 a3 3 /*- * Copyright (c) 1991-1993 The Regents of the University of California. * All rights reserved. d7 1 a7 1 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence d38 1 a38 3 * @@(#)bpf.c 7.4 (Berkeley) 6/17/91 * * from: Header: bpf.c,v 1.3 93/12/11 02:52:18 mccanne Exp d44 5 a48 1 #if (NBPFILTER > 0) d54 1 a58 1 #include d61 3 d75 3 a78 1 #include d80 13 a92 2 #include "sl.h" #include "ppp.h" d94 1 a94 3 #ifndef BPF_BUFSIZE #define BPF_BUFSIZE NBPG #endif d105 1 a105 1 struct bpf_if *bpf_iflist; d108 6 a113 6 static void bpf_ifname(); static void catchpacket(); static void bpf_freed(); static int bpf_setif(); static int bpf_allocbufs(); d119 116 d300 1 a300 1 * Mark a descriptor free by making it point to itself. d319 1 a319 1 d360 39 d400 1 a400 1 * into the hold slot, and the free buffer into the store slot. d408 1 a408 1 (d)->bd_fbuf = 0; d413 1 a413 1 bpfread(dev, uio, ioflag) a415 1 int ioflag; d422 1 a422 1 * Restrict application to use a buffer the same size as a434 8 if (ioflag & IO_NDELAY) { if (d->bd_slen == 0) { splx(s); return (EWOULDBLOCK); } ROTATE_BUFFERS(d); break; } d444 13 a456 3 error = tsleep((caddr_t)d, PWAIT | PCATCH, "bpf", d->bd_rtout); if (error != 0) { if (error == EWOULDBLOCK) { d458 3 a460 3 * On a timeout, return what's in the buffer, * which may be nothing. If there is something * in the store buffer, we can do a rotation. a461 13 if (d->bd_hbuf) /* * We filled up the buffer in between * getting the timeout and arriving * here, so we don't need to rotate. */ break; if (d->bd_slen == 0) { splx(s); return (0); } ROTATE_BUFFERS(d); d463 4 d468 2 a469 2 splx(s); return (error); d476 2 a477 2 /* d482 1 a482 1 error = uiomove(d->bd_hbuf, d->bd_hlen, uio); d489 1 a489 1 d495 1 a495 1 * If there are processes sleeping on this descriptor, wake them up. d502 1 a502 1 #if (BSD > 199103) || defined(__NetBSD__) d520 6 a525 6 register struct bpf_if *bp = bpf_dtab[minor(dev)].bd_bif; register struct ifnet *ifp; register struct mbuf *m; register u_int len, hlen; register int error, s; struct sockaddr dst; d527 1 a527 1 if (bp == 0) d530 1 a530 15 /* * Build a sockaddr based on the data link layer type. * The AF_UNSPEC kludge allows us to hand the link level * header to the driver via the sockaddr. This isn't * very clean. It would be better if AF_UNSPEC meant that * the driver shouldn't bother with encapsulation (i.e., the * link header is already in the mbuf). The code here is * structured this way, then things are kludged back before * calling if_output. * * NOTE: When adding new link layers make sure the driver supports * AF_UNSPEC and that the link header can fit in the sa_data * field of a sockaddr. */ switch (bp->bif_dlt) { d532 2 a533 6 #if NSL > 0 case DLT_SLIP: dst.sa_family = AF_INET; hlen = 0; break; #endif d535 3 a537 17 #if NPPP > 0 case DLT_PPP: dst.sa_family = AF_UNSPEC; hlen = 0; break; #endif case DLT_EN10MB: dst.sa_family = AF_UNSPEC; hlen = 14; break; case DLT_FDDI: dst.sa_family = AF_UNSPEC; /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ hlen = 24; break; d539 1 a539 22 case DLT_NULL: dst.sa_family = AF_UNSPEC; hlen = 0; break; default: return (EIO); } ifp = bp->bif_ifp; len = uio->uio_resid; /* * If we didn't get enough for the link level header, or we * exceed the interface's mtu, return an error. */ if (len < hlen || len - hlen > ifp->if_mtu) return (EMSGSIZE); /* * XXX Avoid complicated buffer chaining --- * bail if it won't fit in a single mbuf. */ if (len > MCLBYTES) a541 27 MGETHDR(m, M_WAIT, MT_DATA); if (m == 0) return (ENOBUFS); if (len > MLEN) { MCLGET(m, M_WAIT); if ((m->m_flags & M_EXT) == 0) { m_freem(m); return (ENOBUFS); } } /* * Move the whole packet, including the data link header, * into the mbuf. Then, copy the link header back out of the * packet into the sockaddr. Finally, strip the link header * from the front of the mbuf. */ error = uiomove(mtod(m, caddr_t), len, uio); if (error) { m_freem(m); return (error); } if (hlen > 0) { bcopy(mtod(m, caddr_t), dst.sa_data, hlen); m->m_data += hlen; len -= hlen; } m->m_pkthdr.len = m->m_len = len; d543 1 d545 3 d550 1 a550 1 * The driver frees the mbuf. a606 4 case FIONBIO: case FIOASYNC: break; d613 1 a613 1 d616 1 a616 1 if (d->bd_hbuf) d632 1 a632 1 error = (*ifp->if_ioctl)(ifp, cmd, addr); d648 3 d662 1 d668 1 a668 1 case BIOCSETF: d731 1 a731 1 case BIOCSRTIMEOUT: d749 1 a749 1 case BIOCGRTIMEOUT: d786 1 a786 1 } d791 1 a791 1 /* a821 2 if (fcode == 0) return (ENOMEM); d853 1 a853 1 * byte at the end of the name part, and compute the number. d875 1 a875 1 if (ifp == 0 || unit != ifp->if_unit d896 1 a896 1 /* d931 16 a947 1 * Inspired by the code in tty.c for the same purpose. d953 1 a953 1 bpfselect(dev, rw, p) d960 1 a960 1 d967 1 a967 1 d976 3 d981 1 a981 1 * minor device then this is a collision. This shouldn't happen a985 3 #if defined(__NetBSD__) selrecord(p, &d->bd_sel); #else a989 1 d991 1 a991 1 splx(s); a1009 1 d1029 7 a1035 2 bpf_mcopy(src, dst, len) u_char *src; a1036 4 register int len; { register struct mbuf *m = (struct mbuf *)src; register unsigned count; d1038 2 d1043 1 a1043 1 count = MIN(m->m_len, len); d1089 1 a1089 1 register void (*cpfn)(); d1100 1 a1100 1 totlen = hdrlen + MIN(snaplen, pktlen); d1115 2 a1116 2 /* * We haven't completed the previous read yet, d1126 1 a1126 1 else if (d->bd_immediate) d1137 1 d1139 5 d1153 1 a1153 1 /* d1213 3 d1217 2 d1220 4 a1223 4 if (bp == 0) { printf("bpf: no buffers in attach"); return; } d1236 2 a1237 2 * equal to SIZEOF_BPF_HDR because we want to insert spacing such * that the network layer header begins on a longword boundary (for d1254 1 d1268 1 a1268 1 /* d1287 17 d1305 14 a1318 1 #endif (NBPFILTER > 0) @ 1.10 log @Get the pkthdr.len calculation right. @ text @d2 1 a2 1 * Copyright (c) 1990-1991 The Regents of the University of California. d38 4 a41 2 * from: @@(#)bpf.c 7.5 (Berkeley) 7/15/91 * $Id: bpf.c,v 1.9 1994/01/12 00:38:50 deraadt Exp $ d46 1 a46 1 #if NBPFILTER > 0 a51 2 #include #include d55 1 d57 1 a58 3 #if defined(sparc) && BSD < 199103 #include #endif d61 1 d64 1 a64 2 #include #include a65 1 #include d69 6 a74 2 #include #include d76 2 a77 12 /* * Older BSDs don't have kernel malloc. */ #if BSD < 199103 extern bcopy(); static caddr_t bpf_alloc(); #include #define BPF_BUFSIZE (MCLBYTES-8) #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio) #else #define BPF_BUFSIZE 4096 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) a79 2 #define PRINET 26 /* interruptible */ d89 1 a89 1 struct bpf_if *bpf_iflist; a95 1 static int bpf_initd(); a103 91 static int bpf_movein(uio, linktype, mp, sockp) register struct uio *uio; int linktype; register struct mbuf **mp; register struct sockaddr *sockp; { struct mbuf *m; int error; int len; int hlen; /* * Build a sockaddr based on the data link layer type. * We do this at this level because the ethernet header * is copied directly into the data field of the sockaddr. * In the case of SLIP, there is no header and the packet * is forwarded as is. * Also, we are careful to leave room at the front of the mbuf * for the link level header. */ switch (linktype) { case DLT_SLIP: sockp->sa_family = AF_INET; hlen = 0; break; case DLT_EN10MB: sockp->sa_family = AF_UNSPEC; /* XXX Would MAXLINKHDR be better? */ hlen = sizeof(struct ether_header); break; case DLT_FDDI: sockp->sa_family = AF_UNSPEC; /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ hlen = 24; break; case DLT_NULL: sockp->sa_family = AF_UNSPEC; hlen = 0; break; default: return (EIO); } len = uio->uio_resid; if ((unsigned)len > MCLBYTES) return (EIO); MGETHDR(m, M_WAIT, MT_DATA); if (m == 0) return (ENOBUFS); if (len > MLEN) { #if BSD >= 199103 MCLGET(m, M_WAIT); if ((m->m_flags & M_EXT) == 0) { #else MCLGET(m); if (m->m_len != MCLBYTES) { #endif error = ENOBUFS; goto bad; } } m->m_len = m->m_pkthdr.len = len - hlen; *mp = m; /* * Make room for link header. */ if (hlen != 0) { #if BSD >= 199103 m->m_data += hlen; /* XXX */ #else m->m_off += hlen; #endif error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio); if (error) goto bad; } error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio); if (!error) return (0); bad: m_freem(m); return (error); } d168 1 a168 1 * Mark a descriptor free by making it point to itself. d187 1 a187 1 a227 39 * Support for SunOS, which does not have tsleep. */ #if BSD < 199103 static bpf_timeout(arg) caddr_t arg; { struct bpf_d *d = (struct bpf_d *)arg; d->bd_timedout = 1; wakeup(arg); } #define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan) int bpf_sleep(d) register struct bpf_d *d; { register int rto = d->bd_rtout; register int st; if (rto != 0) { d->bd_timedout = 0; timeout(bpf_timeout, (caddr_t)d, rto); } st = sleep((caddr_t)d, PRINET|PCATCH); if (rto != 0) { if (d->bd_timedout == 0) untimeout(bpf_timeout, (caddr_t)d); else if (st == 0) return EWOULDBLOCK; } return (st != 0) ? EINTR : 0; } #else #define BPF_SLEEP tsleep #endif /* d229 1 a229 1 * into the hold slot, and the free buffer into the store slot. d237 1 a237 1 (d)->bd_fbuf = 0; d242 1 a242 1 bpfread(dev, uio) d245 1 d252 1 a252 1 * Restrict application to use a buffer the same size as d265 8 d282 3 a284 13 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf", d->bd_rtout); if (error == EINTR || error == ERESTART) { splx(s); return (error); } if (error == EWOULDBLOCK) { /* * On a timeout, return what's in the buffer, * which may be nothing. If there is something * in the store buffer, we can rotate the buffers. */ if (d->bd_hbuf) d286 3 a288 3 * We filled up the buffer in between * getting the timeout and arriving * here, so we don't need to rotate. d290 13 a303 4 if (d->bd_slen == 0) { splx(s); return (0); d305 2 a306 2 ROTATE_BUFFERS(d); break; d313 2 a314 2 /* d319 1 a319 1 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio); d326 1 a326 1 d332 1 a332 1 * If there are processes sleeping on this descriptor, wake them up. d357 6 a362 5 register struct bpf_d *d = &bpf_dtab[minor(dev)]; struct ifnet *ifp; struct mbuf *m; int error, s; static struct sockaddr dst; d364 1 a364 1 if (d->bd_bif == 0) d367 57 a423 1 ifp = d->bd_bif->bif_ifp; d425 5 a429 3 if (uio->uio_resid == 0) return (0); if (uio->uio_resid > ifp->if_mtu) d432 19 a450 2 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst); if (error) d452 7 a458 1 a459 1 #if BSD >= 199103 a460 3 #else error = (*ifp->if_output)(ifp, m, &dst); #endif d463 1 a463 1 * The driver frees the mbuf. d520 4 d530 1 a530 1 d533 1 a533 1 if (d->bd_hbuf) d549 1 a549 1 error = (*ifp->if_ioctl)(ifp, cmd, addr); a564 3 #if BSD < 199103 error = EINVAL; #else a575 1 #endif d581 1 a581 1 case BIOCSETF: d644 1 a644 1 case BIOCSRTIMEOUT: d662 1 a662 1 case BIOCGRTIMEOUT: d699 1 a699 1 } d704 1 a704 1 /* d735 2 d768 1 a768 1 * byte at the end of the name part, and compute the number. d790 1 a790 1 if (ifp == 0 || unit != ifp->if_unit d811 1 a811 1 /* a845 16 * The new select interface passes down the proc pointer; the old select * stubs had to grab it out of the user struct. This glue allows either case. */ #if BSD >= 199103 #define bpf_select bpfselect #else int bpfselect(dev, rw) register dev_t dev; int rw; { return (bpf_select(dev, rw, u.u_procp)); } #endif /* d853 1 a853 1 bpf_select(dev, rw, p) d860 1 a860 1 d867 1 a867 1 a875 3 #if defined(__NetBSD__) selrecord(p, &d->bd_sel); #else d878 1 a878 1 * minor device then this is a collision. This shouldn't happen d883 3 d890 1 d892 1 a892 1 splx(s); d911 1 d1014 2 a1015 2 /* * We haven't completed the previous read yet, d1025 1 a1025 1 else if (d->bd_immediate) a1035 1 #if BSD >= 199103 a1036 5 #elif defined(sun) uniqtime(&hp->bh_tstamp); #else hp->bh_tstamp = time; #endif d1046 1 a1046 1 /* a1105 3 #if BSD < 199103 static struct bpf_if bpf_ifs[NBPFILTER]; static int bpfifno; a1106 2 bp = (bpfifno < NBPFILTER) ? &bpf_ifs[bpfifno++] : 0; #else d1108 4 a1111 4 #endif if (bp == 0) panic("bpfattach"); d1124 2 a1125 2 * equal to SIZEOF_BPF_HDR because we want to insert spacing such * that the network layer header begins on a longword boundary (for d1137 1 d1139 1 a1141 1 #if BSD >= 199103 d1155 1 a1155 1 /* a1173 1 #endif d1175 1 a1175 31 #if BSD < 199103 /* * Allocate some memory for bpf. This is temporary SunOS support, and * is admittedly a hack. * If resources unavaiable, return 0. */ static caddr_t bpf_alloc(size, canwait) register int size; register int canwait; { register struct mbuf *m; if ((unsigned)size > (MCLBYTES-8)) return 0; MGET(m, canwait, MT_DATA); if (m == 0) return 0; if ((unsigned)size > (MLEN-8)) { MCLGET(m); if (m->m_len != MCLBYTES) { m_freem(m); return 0; } } *mtod(m, struct mbuf **) = m; return mtod(m, caddr_t) + 8; } #endif #endif @ 1.9 log @writing out of bpf; use a hdr mbuf and set the pkthdr.len as well. (rarpd now works with if_ep.c!) @ text @d39 1 a39 1 * $Id: bpf.c,v 1.8 1993/12/18 00:40:49 mycroft Exp $ d183 1 a183 2 m->m_len = len; m->m_pkthdr.len = len + hlen; a188 1 m->m_len -= hlen; @ 1.8 log @Canonicalize all #includes. @ text @d39 1 a39 1 * $Id: bpf.c,v 1.7 1993/11/23 04:51:25 cgd Exp $ d168 1 a168 1 MGET(m, M_WAIT, MT_DATA); d184 1 @ 1.7 log @defines change @ text @d39 1 a39 1 * $Id: bpf.c,v 1.6 1993/11/15 09:56:46 deraadt Exp $ a55 1 a61 1 d64 3 a67 1 a70 2 #include a72 1 #include @ 1.6 log @add bpfilterattach(), as in magnum @ text @d39 1 a39 1 * $Id: bpf.c,v 1.5 1993/05/18 18:19:50 cgd Exp $ a45 4 #ifndef __386BSD__ #define __386BSD__ #endif d476 1 a476 1 #if (BSD > 199103) || defined(__386BSD__) d949 1 a949 1 #if defined(__386BSD__) @ 1.5 log @make kernel select interface be one-stop shopping & clean it all up. @ text @d39 1 a39 1 * $Id$ d115 6 @ 1.5.4.1 log @Make all files using spl*() #include cpu.h. Changes from trunk. @ text @d39 1 a39 1 * $Id: bpf.c,v 1.5 1993/05/18 18:19:50 cgd Exp $ a79 2 #include @ 1.5.4.2 log @Add dummy bpfilterattach() to make autoconfig happy. @ text @d39 1 a39 1 * $Id: bpf.c,v 1.5.4.1 1993/09/24 08:53:51 mycroft Exp $ a116 6 void bpfilterattach(n) int n; { } @ 1.5.4.3 log @defines change @ text @d39 1 a39 1 * $Id: bpf.c,v 1.5.4.2 1993/10/09 09:53:23 mycroft Exp $ d46 4 d482 1 a482 1 #if (BSD > 199103) || defined(__NetBSD__) d955 1 a955 1 #if defined(__NetBSD__) @ 1.5.4.4 log @Remove remaining sleep()s. @ text @d39 1 a39 1 * $Id: bpf.c,v 1.5.4.3 1993/11/23 04:52:03 cgd Exp $ d361 1 a361 1 st = tsleep((caddr_t)d, PRINET|PCATCH, "bpf_sleep", 0); @ 1.5.4.5 log @Path from Andrew Moore to make sure the ether type field is correct when sending raw packets. @ text @d39 1 a39 1 * $Id: bpf.c,v 1.5.4.4 1993/11/27 19:43:01 mycroft Exp $ a202 10 if (linktype == DLT_EN10MB) { /* * ether_output() routine does a htons() on the type * field, so here we make sure it's in host order. */ struct ether_header *eh; eh = (struct ether_header *)sockp->sa_data; eh->ether_type = ntohs(eh->ether_type); } @ 1.4 log @fixes stupid piece of bpf code that duplicates cdefs.h's handling of 'inline' in such a way as to cause stupid warnings. @ text @d38 2 a39 4 * @@(#)bpf.c 7.5 (Berkeley) 7/15/91 * * static char rcsid[] = * "$Header: /b/source/CVS/src/sys.386bsd/net/bpf.c,v 1.3 1993/04/05 22:04:09 deraadt Exp $"; d59 1 d474 1 a474 1 #if BSD > 199103 a478 7 #if defined(__386BSD__) if (d->bd_selpid) { selwakeup(d->bd_selpid, (int)d->bd_selcoll); d->bd_selcoll = 0; d->bd_selpid = 0; /* XXX */ } #else a484 1 #endif a930 3 #if defined(__386BSD__) struct proc *p2; #endif d947 1 a947 1 #if 0 a956 6 #if defined(__386BSD__) if (d->bd_selpid && (p2=pfind(d->bd_selpid)) && p2->p_wchan == (caddr_t)&selwait) d->bd_selcoll = 1; else d->bd_selpid = p->p_pid; #else a960 1 #endif @ 1.3 log @selwakeup() takes a "pid_t" rather than "struct proc *" now. @ text @d41 1 a41 1 * "$Header: /b/source/CVS/src/sys.386bsd/net/bpf.c,v 1.2 1993/03/25 00:27:49 cgd Exp $"; a50 6 #ifndef __GNUC__ #define inline #else #define inline __inline__ #endif @ 1.2 log @added BPF support, as provided by David Greenman (davidg@@implode.rain.com) @ text @d41 1 a41 1 * "$Header: /b/source/CVS/src/sys.386bsd/net/bpf.c,v 1.1.1.1 93/03/21 09:46:05 cgd Exp $"; d48 4 d481 1 a481 1 #if 0 d486 7 d499 1 d946 3 d975 6 d986 1 @ 1.1 log @Initial revision @ text @d2 1 a2 1 * Copyright (c) 1991 The Regents of the University of California. d7 2 a8 1 * to Berkeley by Steven McCanne of Lawrence Berkeley Laboratory. d38 1 a38 1 * @@(#)bpf.c 7.4 (Berkeley) 6/17/91 d41 1 a41 1 * "$Header: /usr/bill/working/sys/net/RCS/bpf.c,v 1.1 92/01/15 20:28:02 william Exp Locker: william $"; d46 1 a46 1 #if (NBPFILTER > 0) d48 6 d59 1 d65 1 a65 1 #ifdef sparc d84 14 d103 1 a103 1 int bpf_bufsize = MCLBYTES; a107 4 * * We really don't need NBPFILTER bpf_if entries, but this eliminates * the need to account for all possible drivers here. * This problem will go away when these structures are allocated dynamically. d109 2 a110 2 static struct bpf_if *bpf_iflist; static struct bpf_d bpf_dtab[NBPFILTER]; d114 1 d117 1 d141 1 d153 1 a153 1 case DLT_FDDI: d159 5 d176 1 d179 4 d192 1 a192 1 if (hlen) { d194 1 d196 4 a199 2 error = uiomove((caddr_t)sockp->sa_data, hlen, uio); d203 2 a204 2 error = uiomove(mtod(m, caddr_t), len - hlen, uio); if (!error) d212 1 a212 1 * Attach 'd' to the bpf interface 'bp', i.e. make 'd' listen on 'bp'. d220 5 a224 1 /* Point d at bp. */ a225 2 /* Add d to bp's list of listeners. */ a228 3 /* * Let the driver know we're here (if it doesn't already). */ d232 3 d255 1 a255 1 panic("bpf_detachd: ifpromisc failed"); d257 1 a257 1 /* Remove 'd' from the interface's descriptor list. */ d275 1 a275 1 * Mark a descriptor free by making it point to itself. d284 2 a285 4 * bpfopen - open ethernet device * * Errors: ENXIO - illegal minor device number * EBUSY - too many files open a292 1 int error, s; d294 1 a294 1 a296 1 a300 1 s = splimp(); d302 1 a302 2 if (!D_ISFREE(d)) { splx(s); d304 4 a307 4 } else /* Mark "free" and do most initialization. */ bzero((char *)d, sizeof(*d)); splx(s); a308 5 error = bpf_initd(d); if (error) { D_MARKFREE(d); return (error); } d317 1 d323 1 a323 1 int s; d329 26 d356 12 a367 10 /* Free the buffer space. */ if (d->bd_hbuf) free(d->bd_hbuf, M_DEVBUF); if (d->bd_fbuf) free(d->bd_fbuf, M_DEVBUF); free(d->bd_sbuf, M_DEVBUF); if (d->bd_filter) free((caddr_t)d->bd_filter, M_DEVBUF); D_MARKFREE(d); d369 3 d375 1 a375 1 * into the hold slot, and the free buffer into the store slot. d383 1 a383 1 (d)->bd_fbuf = 0; d397 1 a397 1 * Restrict application to use a buffer the same size as d405 3 a407 3 * If the hold buffer is empty, then set a timer and sleep * until either the timeout has occurred or enough packets have * arrived to fill the store buffer. d419 2 a420 1 error = tsleep((caddr_t)d, PRINET|PCATCH, "bpf", d->bd_rtout); d433 1 a433 1 * We filled up the buffer in between d451 2 a452 2 /* d457 1 a457 1 error = uiomove(d->bd_hbuf, d->bd_hlen, uio); d462 1 d464 1 a464 1 d470 1 a470 1 * If there are processes sleeping on this descriptor, wake them up. d477 5 d487 1 d516 3 d520 1 d523 1 a523 1 * The driver frees the mbuf. d529 2 a530 2 * Reset a descriptor by flushing its packet bufferand clearing the receive * and drop counts. Should be called at splimp. d542 1 a549 1 * BIOCGFLEN Get max filter len. d561 1 d586 1 a586 1 d589 1 a589 1 if (d->bd_hbuf) d605 1 a605 1 error = (*ifp->if_ioctl)(ifp, cmd, addr); d611 1 a611 1 * Get max filter len. d613 2 a614 2 case BIOCGFLEN: *(u_int *)addr = BPF_MAXINSNS; d616 1 d618 1 a618 1 * Get buffer len [for read()]. d620 16 a635 2 case BIOCGBLEN: *(u_int *)addr = d->bd_bufsize; d639 1 a639 1 * Set ethernet read filter. d641 1 a641 1 case BIOCSETF: a666 1 d->bd_promisc = 1; d668 2 d704 1 a704 1 case BIOCSRTIMEOUT: d722 1 a722 1 case BIOCGRTIMEOUT: d751 9 d764 2 a765 2 /* * Set d's packet filter program to 'fp'. If 'd' already has a filter, d795 2 a796 4 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size)) return (EINVAL); if (bpf_validate(fcode, (int)flen)) { d811 3 a813 2 * Detach 'd' from its current interface (if attached at all) and attach to * the interface named 'name'. Return ioctl error code or 0. d822 1 a822 1 int unit, s; d826 1 a826 1 * byte at the end of the name part, and compute the number. d848 1 a848 1 if (ifp == 0 || unit != ifp->if_unit d852 1 a852 2 * We found the requested interface. If we're * already attached to it, just flush the buffer. d854 3 d860 6 d869 1 a869 1 /* d885 2 a886 2 * Lookup the name of the 'ifp' interface and return it in 'ifr->ifr_name'. * We augment the ifp's base name with its unit number. d897 1 a897 1 ; d904 16 d923 2 a924 3 * bpfselect - returns true iff the specific operation * will not block indefinitely. Otherwise, return * false but make a note that a selwakeup() must be done. d927 1 a927 1 bpfselect(dev, rw, p) d934 1 a934 1 d941 1 a941 1 d950 3 d955 1 a955 1 * minor device then this is a collision. This shouldn't happen d964 2 a965 2 splx(s); d970 4 a973 1 * bpf_tap - incoming linkage from device drivers d1023 1 a1023 2 * bpf_mtap - incoming linkage from device drivers, when packet * is in an mbuf chain d1036 1 a1036 1 for (m0 = m; m0 != m; m0 = m0->m_next) d1050 4 a1053 4 * otherwise 0. 'copy' is the routine called to do the actual data * transfer. 'bcopy' is passed in to copy contiguous chunks, while * 'bpf_mcopy' is passed in to copy mbuf chains. In the latter * case, 'pkt' is really an mbuf. d1086 2 a1087 2 /* * We haven't completed the previous read yet, d1097 1 a1097 1 else if (d->bd_immediate) d1108 3 a1110 1 #ifdef sun a1112 3 #ifdef hp300 microtime(&hp->bh_tstamp); #else a1114 1 #endif d1124 1 a1124 1 /* d1128 1 a1128 1 bpf_initd(d) a1130 1 d->bd_bufsize = bpf_bufsize; d1146 29 a1174 2 * Register 'ifp' with bpf. XXX * and 'driverp' is a pointer to the 'struct bpf_if *' in the driver's softc. d1184 3 d1188 2 d1191 1 d1207 2 a1208 2 * equal to SIZEOF_BPF_HDR because we want to insert spacing such * that the network layer header begins on a longword boundary (for d1223 1 d1226 1 a1226 1 * Set/clear promiscuous mode on interface ifp based on the truth value` d1228 2 a1229 2 * on request actually has an effect, as does the final off request. * Results are undefined if the off and on requests are not matched. d1237 1 a1237 1 /* d1256 17 d1274 15 a1288 1 #endif (NBPFILTER > 0) @ 1.1.1.1 log @initial import of 386bsd-0.1 sources @ text @@ 1.1.1.2 log @Import 4.4BSD-Lite for reference @ text @d1 3 a3 3 /* * Copyright (c) 1990, 1991, 1993 * The Regents of the University of California. All rights reserved. d7 1 a7 2 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence * Berkeley Laboratory. d37 1 a37 1 * @@(#)bpf.c 8.2 (Berkeley) 3/28/94 d40 1 a40 1 * "$Header: bpf.c,v 1.33 91/10/27 21:21:58 mccanne Exp $"; d45 1 a45 7 #if NBPFILTER > 0 #ifndef __GNUC__ #define inline #else #define inline __inline #endif d51 1 a51 1 #include a54 1 #include d57 1 a57 1 #if defined(sparc) && BSD < 199103 a75 14 /* * Older BSDs don't have kernel malloc. */ #if BSD < 199103 extern bcopy(); static caddr_t bpf_alloc(); #include #define BPF_BUFSIZE (MCLBYTES-8) #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio) #else #define BPF_BUFSIZE 4096 #define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) #endif d81 1 a81 1 int bpf_bufsize = BPF_BUFSIZE; d86 12 a97 33 */ struct bpf_if *bpf_iflist; struct bpf_d bpf_dtab[NBPFILTER]; #if BSD >= 199207 /* * bpfilterattach() is called at boot time in new systems. We do * nothing here since old systems will not call this. */ /* ARGSUSED */ void bpfilterattach(n) int n; { } #endif static int bpf_allocbufs __P((struct bpf_d *)); static int bpf_allocbufs __P((struct bpf_d *)); static void bpf_freed __P((struct bpf_d *)); static void bpf_freed __P((struct bpf_d *)); static void bpf_ifname __P((struct ifnet *, struct ifreq *)); static void bpf_ifname __P((struct ifnet *, struct ifreq *)); static void bpf_mcopy __P((const void *, void *, u_int)); static int bpf_movein __P((struct uio *, int, struct mbuf **, struct sockaddr *, int *)); static int bpf_setif __P((struct bpf_d *, struct ifreq *)); static int bpf_setif __P((struct bpf_d *, struct ifreq *)); static inline void bpf_wakeup __P((struct bpf_d *)); static void catchpacket __P((struct bpf_d *, u_char *, u_int, u_int, void (*)(const void *, void *, u_int))); static void reset_d __P((struct bpf_d *)); d100 1 a100 1 bpf_movein(uio, linktype, mp, sockp, datlen) d102 1 a102 1 int linktype, *datlen; a120 1 d132 1 a132 1 case DLT_FDDI: a137 5 case DLT_NULL: sockp->sa_family = AF_UNSPEC; hlen = 0; break; a142 1 *datlen = len - hlen; a149 1 #if BSD >= 199103 a151 4 #else MCLGET(m); if (m->m_len != MCLBYTES) { #endif d161 1 a161 1 if (hlen != 0) { a162 1 #if BSD >= 199103 d164 2 a165 4 #else m->m_off += hlen; #endif error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio); d169 2 a170 2 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio); if (!error) d178 1 a178 1 * Attach file to the bpf interface, i.e. make d listen on bp. d186 1 a186 5 /* * Point d at bp, and add d to the interface's list of listeners. * Finally, point the driver's bpf cookie at the interface so * it will divert packets to bpf. */ d188 2 d193 3 a198 3 /* * Detach a file from its interface. */ d219 1 a219 1 panic("bpf: ifpromisc failed"); d221 1 a221 1 /* Remove d from the interface's descriptor list. */ d239 1 a239 1 * Mark a descriptor free by making it point to itself. d248 4 a251 2 * Open ethernet device. Returns ENXIO for illegal minor device number, * EBUSY if file is open by another process. d259 1 d261 1 a261 1 d264 1 d269 1 d271 2 a272 1 if (!D_ISFREE(d)) d274 4 d279 5 a283 4 /* Mark "free" and do most initialization. */ bzero((char *)d, sizeof(*d)); d->bd_bufsize = bpf_bufsize; a291 1 int d297 1 a297 1 register int s; a302 1 bpf_freed(d); d304 10 a313 1 return (0); a316 39 * Support for SunOS, which does not have tsleep. */ #if BSD < 199103 static bpf_timeout(arg) caddr_t arg; { struct bpf_d *d = (struct bpf_d *)arg; d->bd_timedout = 1; wakeup(arg); } #define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan) int bpf_sleep(d) register struct bpf_d *d; { register int rto = d->bd_rtout; register int st; if (rto != 0) { d->bd_timedout = 0; timeout(bpf_timeout, (caddr_t)d, rto); } st = sleep((caddr_t)d, PRINET|PCATCH); if (rto != 0) { if (d->bd_timedout == 0) untimeout(bpf_timeout, (caddr_t)d); else if (st == 0) return EWOULDBLOCK; } return (st != 0) ? EINTR : 0; } #else #define BPF_SLEEP tsleep #endif /* d318 1 a318 1 * into the hold slot, and the free buffer into the store slot. d326 1 a326 1 (d)->bd_fbuf = 0; d340 1 a340 1 * Restrict application to use a buffer the same size as d348 3 a350 3 * If the hold buffer is empty, then do a timed sleep, which * ends when the timeout expires or when enough packets * have arrived to fill the store buffer. d362 1 a362 2 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf", d->bd_rtout); d375 1 a375 1 * We filled up the buffer in between d393 2 a394 2 /* d399 1 a399 1 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio); a403 1 d->bd_hlen = 0; d405 1 a405 1 d411 1 a411 1 * If there are processes sleeping on this descriptor, wake them up. a417 5 #if BSD >= 199103 selwakeup(&d->bd_sel); /* XXX */ d->bd_sel.si_pid = 0; #else a422 1 #endif a434 1 int datlen; d443 2 d446 1 a446 1 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen); a449 3 if (datlen > ifp->if_mtu) return (EMSGSIZE); a450 3 #if BSD >= 199103 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0); #else a451 1 #endif d454 1 a454 1 * The driver frees the mbuf. d460 2 a461 2 * Reset a descriptor by flushing its packet buffer and clearing the * receive and drop counts. Should be called at splimp. a472 1 d->bd_hlen = 0; d480 1 a491 1 * BIOCVERSION Get filter language version. d516 1 a516 1 d519 1 a519 1 if (d->bd_hbuf) d535 1 a535 1 error = (*ifp->if_ioctl)(ifp, cmd, addr); d541 6 d554 1 a554 1 * Set buffer length. d556 1 a556 22 case BIOCSBLEN: #if BSD < 199103 error = EINVAL; #else if (d->bd_bif != 0) error = EINVAL; else { register u_int size = *(u_int *)addr; if (size > BPF_MAXBUFSIZE) *(u_int *)addr = size = BPF_MAXBUFSIZE; else if (size < BPF_MINBUFSIZE) *(u_int *)addr = size = BPF_MINBUFSIZE; d->bd_bufsize = size; } #endif break; /* * Set link layer read filter. */ case BIOCSETF: d582 1 a583 2 if (error == 0) d->bd_promisc = 1; d618 1 a618 1 case BIOCSRTIMEOUT: d636 1 a636 1 case BIOCGRTIMEOUT: a664 9 case BIOCVERSION: { struct bpf_version *bv = (struct bpf_version *)addr; bv->bv_major = BPF_MAJOR_VERSION; bv->bv_minor = BPF_MINOR_VERSION; break; } d669 2 a670 2 /* * Set d's packet filter program to fp. If this file already has a filter, d700 4 a703 2 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && bpf_validate(fcode, (int)flen)) { d718 2 a719 3 * Detach a file from its current interface (if attached at all) and attach * to the interface indicated by the name stored in ifr. * Return an errno or 0. d728 1 a728 1 int unit, s, error; d732 1 a732 1 * byte at the end of the name part, and compute the number. d754 1 a754 1 if (ifp == 0 || unit != ifp->if_unit d758 2 a759 1 * We found the requested interface. a760 3 * Allocate the packet buffers if we need to. * If we're already attached to requested interface, * just flush the buffer. a763 6 if (d->bd_sbuf == 0) { error = bpf_allocbufs(d); if (error != 0) return (error); } d767 1 a767 1 /* d783 2 a784 2 * Convert an interface name plus unit number of an ifp to a single * name which is returned in the ifr. d795 1 a795 1 continue; a801 16 * The new select interface passes down the proc pointer; the old select * stubs had to grab it out of the user struct. This glue allows either case. */ #if BSD >= 199103 #define bpf_select bpfselect #else int bpfselect(dev, rw) register dev_t dev; int rw; { return (bpf_select(dev, rw, u.u_procp)); } #endif /* d803 1 d805 3 a807 2 * Return true iff the specific operation will not block indefinitely. * Otherwise, return false but make a note that a selwakeup() must be done. d810 1 a810 1 bpf_select(dev, rw, p) d817 1 a817 1 d824 1 a824 1 a832 3 #if BSD >= 199103 selrecord(p, &d->bd_sel); #else d835 1 a835 1 * minor device then this is a collision. This shouldn't happen d844 2 a845 2 #endif splx(s); d850 1 a850 4 * Incoming linkage from device drivers. Process the packet pkt, of length * pktlen, which is stored in a contiguous buffer. The packet is parsed * by each process' filter, and if accepted, stashed into the corresponding * buffer. d880 4 a883 4 bpf_mcopy(src_arg, dst_arg, len) const void *src_arg; void *dst_arg; register u_int len; d885 2 a886 3 register const struct mbuf *m; register u_int count; u_char *dst; a887 2 m = src_arg; dst = dst_arg; d891 1 a891 1 count = min(m->m_len, len); d900 2 a901 1 * Incoming linkage from device drivers, when packet is in an mbuf chain. d914 1 a914 1 for (m0 = m; m0 != 0; m0 = m0->m_next) d928 4 a931 4 * otherwise 0. "copy" is the routine called to do the actual data * transfer. bcopy is passed in to copy contiguous chunks, while * bpf_mcopy is passed in to copy mbuf chains. In the latter case, * pkt is really an mbuf. d938 1 a938 1 register void (*cpfn)(const void *, void *, u_int); d949 1 a949 1 totlen = hdrlen + min(snaplen, pktlen); d964 2 a965 2 /* * We haven't completed the previous read yet, d975 1 a975 1 else if (d->bd_immediate) d986 4 a989 1 #if BSD >= 199103 a990 2 #elif defined(sun) uniqtime(&hp->bh_tstamp); d994 1 d1004 1 a1004 1 /* d1008 1 a1008 1 bpf_allocbufs(d) d1011 1 d1027 2 a1028 29 * Free buffers currently in use by a descriptor. * Called on close. */ static void bpf_freed(d) register struct bpf_d *d; { /* * We don't need to lock out interrupts since this descriptor has * been detached from its interface and it yet hasn't been marked * free. */ if (d->bd_sbuf != 0) { free(d->bd_sbuf, M_DEVBUF); if (d->bd_hbuf != 0) free(d->bd_hbuf, M_DEVBUF); if (d->bd_fbuf != 0) free(d->bd_fbuf, M_DEVBUF); } if (d->bd_filter) free((caddr_t)d->bd_filter, M_DEVBUF); D_MARKFREE(d); } /* * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *) * in the driver's softc; dlt is the link layer type; hdrlen is the fixed * size of the link header (variable length headers not yet supported). a1037 3 #if BSD < 199103 static struct bpf_if bpf_ifs[NBPFILTER]; static int bpfifno; a1038 2 bp = (bpfifno < NBPFILTER) ? &bpf_ifs[bpfifno++] : 0; #else a1039 1 #endif d1055 2 a1056 2 * equal to SIZEOF_BPF_HDR because we want to insert spacing such * that the network layer header begins on a longword boundary (for a1070 1 #if BSD >= 199103 d1073 1 a1073 1 * Set/clear promiscuous mode on interface ifp based on the truth value d1075 2 a1076 2 * "on" request actually has an effect, as does the final "off" request. * Results are undefined if the "off" and "on" requests are not matched. d1084 1 a1084 1 /* a1102 14 #endif #if BSD < 199103 /* * Allocate some memory for bpf. This is temporary SunOS support, and * is admittedly a hack. * If resources unavaiable, return 0. */ static caddr_t bpf_alloc(size, canwait) register int size; register int canwait; { register struct mbuf *m; d1104 1 a1104 18 if ((unsigned)size > (MCLBYTES-8)) return 0; MGET(m, canwait, MT_DATA); if (m == 0) return 0; if ((unsigned)size > (MLEN-8)) { MCLGET(m); if (m->m_len != MCLBYTES) { m_freem(m); return 0; } } *mtod(m, struct mbuf **) = m; return mtod(m, caddr_t) + 8; } #endif #endif @ 1.1.1.3 log @Import 4.4BSD-Lite2 @ text @d38 1 a38 1 * @@(#)bpf.c 8.4 (Berkeley) 1/9/95 d48 6 d136 1 a136 1 static __inline void d496 1 a496 1 static __inline void d593 1 a593 1 u_long cmd; @