| @@ -1,1412 +1,1412 @@ | | | @@ -1,1412 +1,1412 @@ |
1 | /* $NetBSD: if_tap.c,v 1.57 2009/04/11 23:05:26 christos Exp $ */ | | 1 | /* $NetBSD: if_tap.c,v 1.58 2009/07/23 17:53:17 plunky Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (c) 2003, 2004, 2008, 2009 The NetBSD Foundation. | | 4 | * Copyright (c) 2003, 2004, 2008, 2009 The NetBSD Foundation. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * Redistribution and use in source and binary forms, with or without | | 7 | * Redistribution and use in source and binary forms, with or without |
8 | * modification, are permitted provided that the following conditions | | 8 | * modification, are permitted provided that the following conditions |
9 | * are met: | | 9 | * are met: |
10 | * 1. Redistributions of source code must retain the above copyright | | 10 | * 1. Redistributions of source code must retain the above copyright |
11 | * notice, this list of conditions and the following disclaimer. | | 11 | * notice, this list of conditions and the following disclaimer. |
12 | * 2. Redistributions in binary form must reproduce the above copyright | | 12 | * 2. Redistributions in binary form must reproduce the above copyright |
13 | * notice, this list of conditions and the following disclaimer in the | | 13 | * notice, this list of conditions and the following disclaimer in the |
14 | * documentation and/or other materials provided with the distribution. | | 14 | * documentation and/or other materials provided with the distribution. |
15 | * | | 15 | * |
16 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 16 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
17 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 17 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
18 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 18 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
19 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 19 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
20 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 20 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
21 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 21 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
22 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 22 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
23 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 23 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
24 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 24 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
25 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 25 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
26 | * POSSIBILITY OF SUCH DAMAGE. | | 26 | * POSSIBILITY OF SUCH DAMAGE. |
27 | */ | | 27 | */ |
28 | | | 28 | |
29 | /* | | 29 | /* |
30 | * tap(4) is a virtual Ethernet interface. It appears as a real Ethernet | | 30 | * tap(4) is a virtual Ethernet interface. It appears as a real Ethernet |
31 | * device to the system, but can also be accessed by userland through a | | 31 | * device to the system, but can also be accessed by userland through a |
32 | * character device interface, which allows reading and injecting frames. | | 32 | * character device interface, which allows reading and injecting frames. |
33 | */ | | 33 | */ |
34 | | | 34 | |
35 | #include <sys/cdefs.h> | | 35 | #include <sys/cdefs.h> |
36 | __KERNEL_RCSID(0, "$NetBSD: if_tap.c,v 1.57 2009/04/11 23:05:26 christos Exp $"); | | 36 | __KERNEL_RCSID(0, "$NetBSD: if_tap.c,v 1.58 2009/07/23 17:53:17 plunky Exp $"); |
37 | | | 37 | |
38 | #if defined(_KERNEL_OPT) | | 38 | #if defined(_KERNEL_OPT) |
39 | #include "bpfilter.h" | | 39 | #include "bpfilter.h" |
40 | #include "opt_modular.h" | | 40 | #include "opt_modular.h" |
41 | #include "opt_compat_netbsd.h" | | 41 | #include "opt_compat_netbsd.h" |
42 | #endif | | 42 | #endif |
43 | | | 43 | |
44 | #include <sys/param.h> | | 44 | #include <sys/param.h> |
45 | #include <sys/systm.h> | | 45 | #include <sys/systm.h> |
46 | #include <sys/kernel.h> | | 46 | #include <sys/kernel.h> |
47 | #include <sys/malloc.h> | | 47 | #include <sys/malloc.h> |
48 | #include <sys/conf.h> | | 48 | #include <sys/conf.h> |
49 | #include <sys/device.h> | | 49 | #include <sys/device.h> |
50 | #include <sys/file.h> | | 50 | #include <sys/file.h> |
51 | #include <sys/filedesc.h> | | 51 | #include <sys/filedesc.h> |
52 | #include <sys/ksyms.h> | | 52 | #include <sys/ksyms.h> |
53 | #include <sys/poll.h> | | 53 | #include <sys/poll.h> |
54 | #include <sys/proc.h> | | 54 | #include <sys/proc.h> |
55 | #include <sys/select.h> | | 55 | #include <sys/select.h> |
56 | #include <sys/sockio.h> | | 56 | #include <sys/sockio.h> |
57 | #if defined(COMPAT_40) || defined(MODULAR) | | 57 | #if defined(COMPAT_40) || defined(MODULAR) |
58 | #include <sys/sysctl.h> | | 58 | #include <sys/sysctl.h> |
59 | #endif | | 59 | #endif |
60 | #include <sys/kauth.h> | | 60 | #include <sys/kauth.h> |
61 | #include <sys/mutex.h> | | 61 | #include <sys/mutex.h> |
62 | #include <sys/simplelock.h> | | 62 | #include <sys/simplelock.h> |
63 | #include <sys/intr.h> | | 63 | #include <sys/intr.h> |
64 | #include <sys/stat.h> | | 64 | #include <sys/stat.h> |
65 | | | 65 | |
66 | #include <net/if.h> | | 66 | #include <net/if.h> |
67 | #include <net/if_dl.h> | | 67 | #include <net/if_dl.h> |
68 | #include <net/if_ether.h> | | 68 | #include <net/if_ether.h> |
69 | #include <net/if_media.h> | | 69 | #include <net/if_media.h> |
70 | #include <net/if_tap.h> | | 70 | #include <net/if_tap.h> |
71 | #if NBPFILTER > 0 | | 71 | #if NBPFILTER > 0 |
72 | #include <net/bpf.h> | | 72 | #include <net/bpf.h> |
73 | #endif | | 73 | #endif |
74 | | | 74 | |
75 | #include <compat/sys/sockio.h> | | 75 | #include <compat/sys/sockio.h> |
76 | | | 76 | |
77 | #if defined(COMPAT_40) || defined(MODULAR) | | 77 | #if defined(COMPAT_40) || defined(MODULAR) |
78 | /* | | 78 | /* |
79 | * sysctl node management | | 79 | * sysctl node management |
80 | * | | 80 | * |
81 | * It's not really possible to use a SYSCTL_SETUP block with | | 81 | * It's not really possible to use a SYSCTL_SETUP block with |
82 | * current module implementation, so it is easier to just define | | 82 | * current module implementation, so it is easier to just define |
83 | * our own function. | | 83 | * our own function. |
84 | * | | 84 | * |
85 | * The handler function is a "helper" in Andrew Brown's sysctl | | 85 | * The handler function is a "helper" in Andrew Brown's sysctl |
86 | * framework terminology. It is used as a gateway for sysctl | | 86 | * framework terminology. It is used as a gateway for sysctl |
87 | * requests over the nodes. | | 87 | * requests over the nodes. |
88 | * | | 88 | * |
89 | * tap_log allows the module to log creations of nodes and | | 89 | * tap_log allows the module to log creations of nodes and |
90 | * destroy them all at once using sysctl_teardown. | | 90 | * destroy them all at once using sysctl_teardown. |
91 | */ | | 91 | */ |
92 | static int tap_node; | | 92 | static int tap_node; |
93 | static int tap_sysctl_handler(SYSCTLFN_PROTO); | | 93 | static int tap_sysctl_handler(SYSCTLFN_PROTO); |
94 | SYSCTL_SETUP_PROTO(sysctl_tap_setup); | | 94 | SYSCTL_SETUP_PROTO(sysctl_tap_setup); |
95 | #endif | | 95 | #endif |
96 | | | 96 | |
97 | /* | | 97 | /* |
98 | * Since we're an Ethernet device, we need the 3 following | | 98 | * Since we're an Ethernet device, we need the 3 following |
99 | * components: a leading struct device, a struct ethercom, | | 99 | * components: a leading struct device, a struct ethercom, |
100 | * and also a struct ifmedia since we don't attach a PHY to | | 100 | * and also a struct ifmedia since we don't attach a PHY to |
101 | * ourselves. We could emulate one, but there's no real | | 101 | * ourselves. We could emulate one, but there's no real |
102 | * point. | | 102 | * point. |
103 | */ | | 103 | */ |
104 | | | 104 | |
105 | struct tap_softc { | | 105 | struct tap_softc { |
106 | device_t sc_dev; | | 106 | device_t sc_dev; |
107 | struct ifmedia sc_im; | | 107 | struct ifmedia sc_im; |
108 | struct ethercom sc_ec; | | 108 | struct ethercom sc_ec; |
109 | int sc_flags; | | 109 | int sc_flags; |
110 | #define TAP_INUSE 0x00000001 /* tap device can only be opened once */ | | 110 | #define TAP_INUSE 0x00000001 /* tap device can only be opened once */ |
111 | #define TAP_ASYNCIO 0x00000002 /* user is using async I/O (SIGIO) on the device */ | | 111 | #define TAP_ASYNCIO 0x00000002 /* user is using async I/O (SIGIO) on the device */ |
112 | #define TAP_NBIO 0x00000004 /* user wants calls to avoid blocking */ | | 112 | #define TAP_NBIO 0x00000004 /* user wants calls to avoid blocking */ |
113 | #define TAP_GOING 0x00000008 /* interface is being destroyed */ | | 113 | #define TAP_GOING 0x00000008 /* interface is being destroyed */ |
114 | struct selinfo sc_rsel; | | 114 | struct selinfo sc_rsel; |
115 | pid_t sc_pgid; /* For async. IO */ | | 115 | pid_t sc_pgid; /* For async. IO */ |
116 | kmutex_t sc_rdlock; | | 116 | kmutex_t sc_rdlock; |
117 | struct simplelock sc_kqlock; | | 117 | struct simplelock sc_kqlock; |
118 | void *sc_sih; | | 118 | void *sc_sih; |
119 | struct timespec sc_atime; | | 119 | struct timespec sc_atime; |
120 | struct timespec sc_mtime; | | 120 | struct timespec sc_mtime; |
121 | struct timespec sc_btime; | | 121 | struct timespec sc_btime; |
122 | }; | | 122 | }; |
123 | | | 123 | |
124 | /* autoconf(9) glue */ | | 124 | /* autoconf(9) glue */ |
125 | | | 125 | |
126 | void tapattach(int); | | 126 | void tapattach(int); |
127 | | | 127 | |
128 | static int tap_match(device_t, cfdata_t, void *); | | 128 | static int tap_match(device_t, cfdata_t, void *); |
129 | static void tap_attach(device_t, device_t, void *); | | 129 | static void tap_attach(device_t, device_t, void *); |
130 | static int tap_detach(device_t, int); | | 130 | static int tap_detach(device_t, int); |
131 | | | 131 | |
132 | CFATTACH_DECL_NEW(tap, sizeof(struct tap_softc), | | 132 | CFATTACH_DECL_NEW(tap, sizeof(struct tap_softc), |
133 | tap_match, tap_attach, tap_detach, NULL); | | 133 | tap_match, tap_attach, tap_detach, NULL); |
134 | extern struct cfdriver tap_cd; | | 134 | extern struct cfdriver tap_cd; |
135 | | | 135 | |
136 | /* Real device access routines */ | | 136 | /* Real device access routines */ |
137 | static int tap_dev_close(struct tap_softc *); | | 137 | static int tap_dev_close(struct tap_softc *); |
138 | static int tap_dev_read(int, struct uio *, int); | | 138 | static int tap_dev_read(int, struct uio *, int); |
139 | static int tap_dev_write(int, struct uio *, int); | | 139 | static int tap_dev_write(int, struct uio *, int); |
140 | static int tap_dev_ioctl(int, u_long, void *, struct lwp *); | | 140 | static int tap_dev_ioctl(int, u_long, void *, struct lwp *); |
141 | static int tap_dev_poll(int, int, struct lwp *); | | 141 | static int tap_dev_poll(int, int, struct lwp *); |
142 | static int tap_dev_kqfilter(int, struct knote *); | | 142 | static int tap_dev_kqfilter(int, struct knote *); |
143 | | | 143 | |
144 | /* Fileops access routines */ | | 144 | /* Fileops access routines */ |
145 | static int tap_fops_close(file_t *); | | 145 | static int tap_fops_close(file_t *); |
146 | static int tap_fops_read(file_t *, off_t *, struct uio *, | | 146 | static int tap_fops_read(file_t *, off_t *, struct uio *, |
147 | kauth_cred_t, int); | | 147 | kauth_cred_t, int); |
148 | static int tap_fops_write(file_t *, off_t *, struct uio *, | | 148 | static int tap_fops_write(file_t *, off_t *, struct uio *, |
149 | kauth_cred_t, int); | | 149 | kauth_cred_t, int); |
150 | static int tap_fops_ioctl(file_t *, u_long, void *); | | 150 | static int tap_fops_ioctl(file_t *, u_long, void *); |
151 | static int tap_fops_poll(file_t *, int); | | 151 | static int tap_fops_poll(file_t *, int); |
152 | static int tap_fops_stat(file_t *, struct stat *); | | 152 | static int tap_fops_stat(file_t *, struct stat *); |
153 | static int tap_fops_kqfilter(file_t *, struct knote *); | | 153 | static int tap_fops_kqfilter(file_t *, struct knote *); |
154 | | | 154 | |
155 | static const struct fileops tap_fileops = { | | 155 | static const struct fileops tap_fileops = { |
156 | .fo_read = tap_fops_read, | | 156 | .fo_read = tap_fops_read, |
157 | .fo_write = tap_fops_write, | | 157 | .fo_write = tap_fops_write, |
158 | .fo_ioctl = tap_fops_ioctl, | | 158 | .fo_ioctl = tap_fops_ioctl, |
159 | .fo_fcntl = fnullop_fcntl, | | 159 | .fo_fcntl = fnullop_fcntl, |
160 | .fo_poll = tap_fops_poll, | | 160 | .fo_poll = tap_fops_poll, |
161 | .fo_stat = tap_fops_stat, | | 161 | .fo_stat = tap_fops_stat, |
162 | .fo_close = tap_fops_close, | | 162 | .fo_close = tap_fops_close, |
163 | .fo_kqfilter = tap_fops_kqfilter, | | 163 | .fo_kqfilter = tap_fops_kqfilter, |
164 | .fo_drain = fnullop_drain, | | 164 | .fo_drain = fnullop_drain, |
165 | }; | | 165 | }; |
166 | | | 166 | |
167 | /* Helper for cloning open() */ | | 167 | /* Helper for cloning open() */ |
168 | static int tap_dev_cloner(struct lwp *); | | 168 | static int tap_dev_cloner(struct lwp *); |
169 | | | 169 | |
170 | /* Character device routines */ | | 170 | /* Character device routines */ |
171 | static int tap_cdev_open(dev_t, int, int, struct lwp *); | | 171 | static int tap_cdev_open(dev_t, int, int, struct lwp *); |
172 | static int tap_cdev_close(dev_t, int, int, struct lwp *); | | 172 | static int tap_cdev_close(dev_t, int, int, struct lwp *); |
173 | static int tap_cdev_read(dev_t, struct uio *, int); | | 173 | static int tap_cdev_read(dev_t, struct uio *, int); |
174 | static int tap_cdev_write(dev_t, struct uio *, int); | | 174 | static int tap_cdev_write(dev_t, struct uio *, int); |
175 | static int tap_cdev_ioctl(dev_t, u_long, void *, int, struct lwp *); | | 175 | static int tap_cdev_ioctl(dev_t, u_long, void *, int, struct lwp *); |
176 | static int tap_cdev_poll(dev_t, int, struct lwp *); | | 176 | static int tap_cdev_poll(dev_t, int, struct lwp *); |
177 | static int tap_cdev_kqfilter(dev_t, struct knote *); | | 177 | static int tap_cdev_kqfilter(dev_t, struct knote *); |
178 | | | 178 | |
179 | const struct cdevsw tap_cdevsw = { | | 179 | const struct cdevsw tap_cdevsw = { |
180 | tap_cdev_open, tap_cdev_close, | | 180 | tap_cdev_open, tap_cdev_close, |
181 | tap_cdev_read, tap_cdev_write, | | 181 | tap_cdev_read, tap_cdev_write, |
182 | tap_cdev_ioctl, nostop, notty, | | 182 | tap_cdev_ioctl, nostop, notty, |
183 | tap_cdev_poll, nommap, | | 183 | tap_cdev_poll, nommap, |
184 | tap_cdev_kqfilter, | | 184 | tap_cdev_kqfilter, |
185 | D_OTHER, | | 185 | D_OTHER, |
186 | }; | | 186 | }; |
187 | | | 187 | |
188 | #define TAP_CLONER 0xfffff /* Maximal minor value */ | | 188 | #define TAP_CLONER 0xfffff /* Maximal minor value */ |
189 | | | 189 | |
190 | /* kqueue-related routines */ | | 190 | /* kqueue-related routines */ |
191 | static void tap_kqdetach(struct knote *); | | 191 | static void tap_kqdetach(struct knote *); |
192 | static int tap_kqread(struct knote *, long); | | 192 | static int tap_kqread(struct knote *, long); |
193 | | | 193 | |
194 | /* | | 194 | /* |
195 | * Those are needed by the if_media interface. | | 195 | * Those are needed by the if_media interface. |
196 | */ | | 196 | */ |
197 | | | 197 | |
198 | static int tap_mediachange(struct ifnet *); | | 198 | static int tap_mediachange(struct ifnet *); |
199 | static void tap_mediastatus(struct ifnet *, struct ifmediareq *); | | 199 | static void tap_mediastatus(struct ifnet *, struct ifmediareq *); |
200 | | | 200 | |
201 | /* | | 201 | /* |
202 | * Those are needed by the ifnet interface, and would typically be | | 202 | * Those are needed by the ifnet interface, and would typically be |
203 | * there for any network interface driver. | | 203 | * there for any network interface driver. |
204 | * Some other routines are optional: watchdog and drain. | | 204 | * Some other routines are optional: watchdog and drain. |
205 | */ | | 205 | */ |
206 | | | 206 | |
207 | static void tap_start(struct ifnet *); | | 207 | static void tap_start(struct ifnet *); |
208 | static void tap_stop(struct ifnet *, int); | | 208 | static void tap_stop(struct ifnet *, int); |
209 | static int tap_init(struct ifnet *); | | 209 | static int tap_init(struct ifnet *); |
210 | static int tap_ioctl(struct ifnet *, u_long, void *); | | 210 | static int tap_ioctl(struct ifnet *, u_long, void *); |
211 | | | 211 | |
212 | /* Internal functions */ | | 212 | /* Internal functions */ |
213 | #if defined(COMPAT_40) || defined(MODULAR) | | 213 | #if defined(COMPAT_40) || defined(MODULAR) |
214 | static int tap_lifaddr(struct ifnet *, u_long, struct ifaliasreq *); | | 214 | static int tap_lifaddr(struct ifnet *, u_long, struct ifaliasreq *); |
215 | #endif | | 215 | #endif |
216 | static void tap_softintr(void *); | | 216 | static void tap_softintr(void *); |
217 | | | 217 | |
218 | /* | | 218 | /* |
219 | * tap is a clonable interface, although it is highly unrealistic for | | 219 | * tap is a clonable interface, although it is highly unrealistic for |
220 | * an Ethernet device. | | 220 | * an Ethernet device. |
221 | * | | 221 | * |
222 | * Here are the bits needed for a clonable interface. | | 222 | * Here are the bits needed for a clonable interface. |
223 | */ | | 223 | */ |
224 | static int tap_clone_create(struct if_clone *, int); | | 224 | static int tap_clone_create(struct if_clone *, int); |
225 | static int tap_clone_destroy(struct ifnet *); | | 225 | static int tap_clone_destroy(struct ifnet *); |
226 | | | 226 | |
227 | struct if_clone tap_cloners = IF_CLONE_INITIALIZER("tap", | | 227 | struct if_clone tap_cloners = IF_CLONE_INITIALIZER("tap", |
228 | tap_clone_create, | | 228 | tap_clone_create, |
229 | tap_clone_destroy); | | 229 | tap_clone_destroy); |
230 | | | 230 | |
231 | /* Helper functionis shared by the two cloning code paths */ | | 231 | /* Helper functionis shared by the two cloning code paths */ |
232 | static struct tap_softc * tap_clone_creator(int); | | 232 | static struct tap_softc * tap_clone_creator(int); |
233 | int tap_clone_destroyer(device_t); | | 233 | int tap_clone_destroyer(device_t); |
234 | | | 234 | |
235 | void | | 235 | void |
236 | tapattach(int n) | | 236 | tapattach(int n) |
237 | { | | 237 | { |
238 | int error; | | 238 | int error; |
239 | | | 239 | |
240 | error = config_cfattach_attach(tap_cd.cd_name, &tap_ca); | | 240 | error = config_cfattach_attach(tap_cd.cd_name, &tap_ca); |
241 | if (error) { | | 241 | if (error) { |
242 | aprint_error("%s: unable to register cfattach\n", | | 242 | aprint_error("%s: unable to register cfattach\n", |
243 | tap_cd.cd_name); | | 243 | tap_cd.cd_name); |
244 | (void)config_cfdriver_detach(&tap_cd); | | 244 | (void)config_cfdriver_detach(&tap_cd); |
245 | return; | | 245 | return; |
246 | } | | 246 | } |
247 | | | 247 | |
248 | if_clone_attach(&tap_cloners); | | 248 | if_clone_attach(&tap_cloners); |
249 | } | | 249 | } |
250 | | | 250 | |
251 | /* Pretty much useless for a pseudo-device */ | | 251 | /* Pretty much useless for a pseudo-device */ |
252 | static int | | 252 | static int |
253 | tap_match(device_t parent, cfdata_t cfdata, void *arg) | | 253 | tap_match(device_t parent, cfdata_t cfdata, void *arg) |
254 | { | | 254 | { |
255 | | | 255 | |
256 | return (1); | | 256 | return (1); |
257 | } | | 257 | } |
258 | | | 258 | |
259 | void | | 259 | void |
260 | tap_attach(device_t parent, device_t self, void *aux) | | 260 | tap_attach(device_t parent, device_t self, void *aux) |
261 | { | | 261 | { |
262 | struct tap_softc *sc = device_private(self); | | 262 | struct tap_softc *sc = device_private(self); |
263 | struct ifnet *ifp; | | 263 | struct ifnet *ifp; |
264 | #if defined(COMPAT_40) || defined(MODULAR) | | 264 | #if defined(COMPAT_40) || defined(MODULAR) |
265 | const struct sysctlnode *node; | | 265 | const struct sysctlnode *node; |
266 | int error; | | 266 | int error; |
267 | #endif | | 267 | #endif |
268 | uint8_t enaddr[ETHER_ADDR_LEN] = | | 268 | uint8_t enaddr[ETHER_ADDR_LEN] = |
269 | { 0xf2, 0x0b, 0xa4, 0xff, 0xff, 0xff }; | | 269 | { 0xf2, 0x0b, 0xa4, 0xff, 0xff, 0xff }; |
270 | char enaddrstr[3 * ETHER_ADDR_LEN]; | | 270 | char enaddrstr[3 * ETHER_ADDR_LEN]; |
271 | struct timeval tv; | | 271 | struct timeval tv; |
272 | uint32_t ui; | | 272 | uint32_t ui; |
273 | | | 273 | |
274 | sc->sc_dev = self; | | 274 | sc->sc_dev = self; |
275 | sc->sc_sih = softint_establish(SOFTINT_CLOCK, tap_softintr, sc); | | 275 | sc->sc_sih = softint_establish(SOFTINT_CLOCK, tap_softintr, sc); |
276 | getnanotime(&sc->sc_btime); | | 276 | getnanotime(&sc->sc_btime); |
277 | sc->sc_atime = sc->sc_mtime = sc->sc_btime; | | 277 | sc->sc_atime = sc->sc_mtime = sc->sc_btime; |
278 | | | 278 | |
279 | if (!pmf_device_register(self, NULL, NULL)) | | 279 | if (!pmf_device_register(self, NULL, NULL)) |
280 | aprint_error_dev(self, "couldn't establish power handler\n"); | | 280 | aprint_error_dev(self, "couldn't establish power handler\n"); |
281 | | | 281 | |
282 | /* | | 282 | /* |
283 | * In order to obtain unique initial Ethernet address on a host, | | 283 | * In order to obtain unique initial Ethernet address on a host, |
284 | * do some randomisation using the current uptime. It's not meant | | 284 | * do some randomisation using the current uptime. It's not meant |
285 | * for anything but avoiding hard-coding an address. | | 285 | * for anything but avoiding hard-coding an address. |
286 | */ | | 286 | */ |
287 | getmicrouptime(&tv); | | 287 | getmicrouptime(&tv); |
288 | ui = (tv.tv_sec ^ tv.tv_usec) & 0xffffff; | | 288 | ui = (tv.tv_sec ^ tv.tv_usec) & 0xffffff; |
289 | memcpy(enaddr+3, (uint8_t *)&ui, 3); | | 289 | memcpy(enaddr+3, (uint8_t *)&ui, 3); |
290 | | | 290 | |
291 | aprint_verbose_dev(self, "Ethernet address %s\n", | | 291 | aprint_verbose_dev(self, "Ethernet address %s\n", |
292 | ether_snprintf(enaddrstr, sizeof(enaddrstr), enaddr)); | | 292 | ether_snprintf(enaddrstr, sizeof(enaddrstr), enaddr)); |
293 | | | 293 | |
294 | /* | | 294 | /* |
295 | * Why 1000baseT? Why not? You can add more. | | 295 | * Why 1000baseT? Why not? You can add more. |
296 | * | | 296 | * |
297 | * Note that there are 3 steps: init, one or several additions to | | 297 | * Note that there are 3 steps: init, one or several additions to |
298 | * list of supported media, and in the end, the selection of one | | 298 | * list of supported media, and in the end, the selection of one |
299 | * of them. | | 299 | * of them. |
300 | */ | | 300 | */ |
301 | ifmedia_init(&sc->sc_im, 0, tap_mediachange, tap_mediastatus); | | 301 | ifmedia_init(&sc->sc_im, 0, tap_mediachange, tap_mediastatus); |
302 | ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_1000_T, 0, NULL); | | 302 | ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_1000_T, 0, NULL); |
303 | ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL); | | 303 | ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL); |
304 | ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_100_TX, 0, NULL); | | 304 | ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_100_TX, 0, NULL); |
305 | ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); | | 305 | ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); |
306 | ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_10_T, 0, NULL); | | 306 | ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_10_T, 0, NULL); |
307 | ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); | | 307 | ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); |
308 | ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_AUTO, 0, NULL); | | 308 | ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_AUTO, 0, NULL); |
309 | ifmedia_set(&sc->sc_im, IFM_ETHER|IFM_AUTO); | | 309 | ifmedia_set(&sc->sc_im, IFM_ETHER|IFM_AUTO); |
310 | | | 310 | |
311 | /* | | 311 | /* |
312 | * One should note that an interface must do multicast in order | | 312 | * One should note that an interface must do multicast in order |
313 | * to support IPv6. | | 313 | * to support IPv6. |
314 | */ | | 314 | */ |
315 | ifp = &sc->sc_ec.ec_if; | | 315 | ifp = &sc->sc_ec.ec_if; |
316 | strcpy(ifp->if_xname, device_xname(self)); | | 316 | strcpy(ifp->if_xname, device_xname(self)); |
317 | ifp->if_softc = sc; | | 317 | ifp->if_softc = sc; |
318 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; | | 318 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
319 | ifp->if_ioctl = tap_ioctl; | | 319 | ifp->if_ioctl = tap_ioctl; |
320 | ifp->if_start = tap_start; | | 320 | ifp->if_start = tap_start; |
321 | ifp->if_stop = tap_stop; | | 321 | ifp->if_stop = tap_stop; |
322 | ifp->if_init = tap_init; | | 322 | ifp->if_init = tap_init; |
323 | IFQ_SET_READY(&ifp->if_snd); | | 323 | IFQ_SET_READY(&ifp->if_snd); |
324 | | | 324 | |
325 | sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU; | | 325 | sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU; |
326 | | | 326 | |
327 | /* Those steps are mandatory for an Ethernet driver, the fisrt call | | 327 | /* Those steps are mandatory for an Ethernet driver, the fisrt call |
328 | * being common to all network interface drivers. */ | | 328 | * being common to all network interface drivers. */ |
329 | if_attach(ifp); | | 329 | if_attach(ifp); |
330 | ether_ifattach(ifp, enaddr); | | 330 | ether_ifattach(ifp, enaddr); |
331 | | | 331 | |
332 | sc->sc_flags = 0; | | 332 | sc->sc_flags = 0; |
333 | | | 333 | |
334 | #if defined(COMPAT_40) || defined(MODULAR) | | 334 | #if defined(COMPAT_40) || defined(MODULAR) |
335 | /* | | 335 | /* |
336 | * Add a sysctl node for that interface. | | 336 | * Add a sysctl node for that interface. |
337 | * | | 337 | * |
338 | * The pointer transmitted is not a string, but instead a pointer to | | 338 | * The pointer transmitted is not a string, but instead a pointer to |
339 | * the softc structure, which we can use to build the string value on | | 339 | * the softc structure, which we can use to build the string value on |
340 | * the fly in the helper function of the node. See the comments for | | 340 | * the fly in the helper function of the node. See the comments for |
341 | * tap_sysctl_handler for details. | | 341 | * tap_sysctl_handler for details. |
342 | * | | 342 | * |
343 | * Usually sysctl_createv is called with CTL_CREATE as the before-last | | 343 | * Usually sysctl_createv is called with CTL_CREATE as the before-last |
344 | * component. However, we can allocate a number ourselves, as we are | | 344 | * component. However, we can allocate a number ourselves, as we are |
345 | * the only consumer of the net.link.<iface> node. In this case, the | | 345 | * the only consumer of the net.link.<iface> node. In this case, the |
346 | * unit number is conveniently used to number the node. CTL_CREATE | | 346 | * unit number is conveniently used to number the node. CTL_CREATE |
347 | * would just work, too. | | 347 | * would just work, too. |
348 | */ | | 348 | */ |
349 | if ((error = sysctl_createv(NULL, 0, NULL, | | 349 | if ((error = sysctl_createv(NULL, 0, NULL, |
350 | &node, CTLFLAG_READWRITE, | | 350 | &node, CTLFLAG_READWRITE, |
351 | CTLTYPE_STRING, device_xname(self), NULL, | | 351 | CTLTYPE_STRING, device_xname(self), NULL, |
352 | tap_sysctl_handler, 0, sc, 18, | | 352 | tap_sysctl_handler, 0, sc, 18, |
353 | CTL_NET, AF_LINK, tap_node, device_unit(sc->sc_dev), | | 353 | CTL_NET, AF_LINK, tap_node, device_unit(sc->sc_dev), |
354 | CTL_EOL)) != 0) | | 354 | CTL_EOL)) != 0) |
355 | aprint_error_dev(self, "sysctl_createv returned %d, ignoring\n", | | 355 | aprint_error_dev(self, "sysctl_createv returned %d, ignoring\n", |
356 | error); | | 356 | error); |
357 | #endif | | 357 | #endif |
358 | | | 358 | |
359 | /* | | 359 | /* |
360 | * Initialize the two locks for the device. | | 360 | * Initialize the two locks for the device. |
361 | * | | 361 | * |
362 | * We need a lock here because even though the tap device can be | | 362 | * We need a lock here because even though the tap device can be |
363 | * opened only once, the file descriptor might be passed to another | | 363 | * opened only once, the file descriptor might be passed to another |
364 | * process, say a fork(2)ed child. | | 364 | * process, say a fork(2)ed child. |
365 | * | | 365 | * |
366 | * The Giant saves us from most of the hassle, but since the read | | 366 | * The Giant saves us from most of the hassle, but since the read |
367 | * operation can sleep, we don't want two processes to wake up at | | 367 | * operation can sleep, we don't want two processes to wake up at |
368 | * the same moment and both try and dequeue a single packet. | | 368 | * the same moment and both try and dequeue a single packet. |
369 | * | | 369 | * |
370 | * The queue for event listeners (used by kqueue(9), see below) has | | 370 | * The queue for event listeners (used by kqueue(9), see below) has |
371 | * to be protected, too, but we don't need the same level of | | 371 | * to be protected, too, but we don't need the same level of |
372 | * complexity for that lock, so a simple spinning lock is fine. | | 372 | * complexity for that lock, so a simple spinning lock is fine. |
373 | */ | | 373 | */ |
374 | mutex_init(&sc->sc_rdlock, MUTEX_DEFAULT, IPL_NONE); | | 374 | mutex_init(&sc->sc_rdlock, MUTEX_DEFAULT, IPL_NONE); |
375 | simple_lock_init(&sc->sc_kqlock); | | 375 | simple_lock_init(&sc->sc_kqlock); |
376 | | | 376 | |
377 | selinit(&sc->sc_rsel); | | 377 | selinit(&sc->sc_rsel); |
378 | } | | 378 | } |
379 | | | 379 | |
380 | /* | | 380 | /* |
381 | * When detaching, we do the inverse of what is done in the attach | | 381 | * When detaching, we do the inverse of what is done in the attach |
382 | * routine, in reversed order. | | 382 | * routine, in reversed order. |
383 | */ | | 383 | */ |
384 | static int | | 384 | static int |
385 | tap_detach(device_t self, int flags) | | 385 | tap_detach(device_t self, int flags) |
386 | { | | 386 | { |
387 | struct tap_softc *sc = device_private(self); | | 387 | struct tap_softc *sc = device_private(self); |
388 | struct ifnet *ifp = &sc->sc_ec.ec_if; | | 388 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
389 | #if defined(COMPAT_40) || defined(MODULAR) | | 389 | #if defined(COMPAT_40) || defined(MODULAR) |
390 | int error; | | 390 | int error; |
391 | #endif | | 391 | #endif |
392 | int s; | | 392 | int s; |
393 | | | 393 | |
394 | sc->sc_flags |= TAP_GOING; | | 394 | sc->sc_flags |= TAP_GOING; |
395 | s = splnet(); | | 395 | s = splnet(); |
396 | tap_stop(ifp, 1); | | 396 | tap_stop(ifp, 1); |
397 | if_down(ifp); | | 397 | if_down(ifp); |
398 | splx(s); | | 398 | splx(s); |
399 | | | 399 | |
400 | softint_disestablish(sc->sc_sih); | | 400 | softint_disestablish(sc->sc_sih); |
401 | | | 401 | |
402 | #if defined(COMPAT_40) || defined(MODULAR) | | 402 | #if defined(COMPAT_40) || defined(MODULAR) |
403 | /* | | 403 | /* |
404 | * Destroying a single leaf is a very straightforward operation using | | 404 | * Destroying a single leaf is a very straightforward operation using |
405 | * sysctl_destroyv. One should be sure to always end the path with | | 405 | * sysctl_destroyv. One should be sure to always end the path with |
406 | * CTL_EOL. | | 406 | * CTL_EOL. |
407 | */ | | 407 | */ |
408 | if ((error = sysctl_destroyv(NULL, CTL_NET, AF_LINK, tap_node, | | 408 | if ((error = sysctl_destroyv(NULL, CTL_NET, AF_LINK, tap_node, |
409 | device_unit(sc->sc_dev), CTL_EOL)) != 0) | | 409 | device_unit(sc->sc_dev), CTL_EOL)) != 0) |
410 | aprint_error_dev(self, | | 410 | aprint_error_dev(self, |
411 | "sysctl_destroyv returned %d, ignoring\n", error); | | 411 | "sysctl_destroyv returned %d, ignoring\n", error); |
412 | #endif | | 412 | #endif |
413 | ether_ifdetach(ifp); | | 413 | ether_ifdetach(ifp); |
414 | if_detach(ifp); | | 414 | if_detach(ifp); |
415 | ifmedia_delete_instance(&sc->sc_im, IFM_INST_ANY); | | 415 | ifmedia_delete_instance(&sc->sc_im, IFM_INST_ANY); |
416 | seldestroy(&sc->sc_rsel); | | 416 | seldestroy(&sc->sc_rsel); |
417 | mutex_destroy(&sc->sc_rdlock); | | 417 | mutex_destroy(&sc->sc_rdlock); |
418 | | | 418 | |
419 | pmf_device_deregister(self); | | 419 | pmf_device_deregister(self); |
420 | | | 420 | |
421 | return (0); | | 421 | return (0); |
422 | } | | 422 | } |
423 | | | 423 | |
424 | /* | | 424 | /* |
425 | * This function is called by the ifmedia layer to notify the driver | | 425 | * This function is called by the ifmedia layer to notify the driver |
426 | * that the user requested a media change. A real driver would | | 426 | * that the user requested a media change. A real driver would |
427 | * reconfigure the hardware. | | 427 | * reconfigure the hardware. |
428 | */ | | 428 | */ |
429 | static int | | 429 | static int |
430 | tap_mediachange(struct ifnet *ifp) | | 430 | tap_mediachange(struct ifnet *ifp) |
431 | { | | 431 | { |
432 | return (0); | | 432 | return (0); |
433 | } | | 433 | } |
434 | | | 434 | |
435 | /* | | 435 | /* |
436 | * Here the user asks for the currently used media. | | 436 | * Here the user asks for the currently used media. |
437 | */ | | 437 | */ |
438 | static void | | 438 | static void |
439 | tap_mediastatus(struct ifnet *ifp, struct ifmediareq *imr) | | 439 | tap_mediastatus(struct ifnet *ifp, struct ifmediareq *imr) |
440 | { | | 440 | { |
441 | struct tap_softc *sc = (struct tap_softc *)ifp->if_softc; | | 441 | struct tap_softc *sc = (struct tap_softc *)ifp->if_softc; |
442 | imr->ifm_active = sc->sc_im.ifm_cur->ifm_media; | | 442 | imr->ifm_active = sc->sc_im.ifm_cur->ifm_media; |
443 | } | | 443 | } |
444 | | | 444 | |
445 | /* | | 445 | /* |
446 | * This is the function where we SEND packets. | | 446 | * This is the function where we SEND packets. |
447 | * | | 447 | * |
448 | * There is no 'receive' equivalent. A typical driver will get | | 448 | * There is no 'receive' equivalent. A typical driver will get |
449 | * interrupts from the hardware, and from there will inject new packets | | 449 | * interrupts from the hardware, and from there will inject new packets |
450 | * into the network stack. | | 450 | * into the network stack. |
451 | * | | 451 | * |
452 | * Once handled, a packet must be freed. A real driver might not be able | | 452 | * Once handled, a packet must be freed. A real driver might not be able |
453 | * to fit all the pending packets into the hardware, and is allowed to | | 453 | * to fit all the pending packets into the hardware, and is allowed to |
454 | * return before having sent all the packets. It should then use the | | 454 | * return before having sent all the packets. It should then use the |
455 | * if_flags flag IFF_OACTIVE to notify the upper layer. | | 455 | * if_flags flag IFF_OACTIVE to notify the upper layer. |
456 | * | | 456 | * |
457 | * There are also other flags one should check, such as IFF_PAUSE. | | 457 | * There are also other flags one should check, such as IFF_PAUSE. |
458 | * | | 458 | * |
459 | * It is our duty to make packets available to BPF listeners. | | 459 | * It is our duty to make packets available to BPF listeners. |
460 | * | | 460 | * |
461 | * You should be aware that this function is called by the Ethernet layer | | 461 | * You should be aware that this function is called by the Ethernet layer |
462 | * at splnet(). | | 462 | * at splnet(). |
463 | * | | 463 | * |
464 | * When the device is opened, we have to pass the packet(s) to the | | 464 | * When the device is opened, we have to pass the packet(s) to the |
465 | * userland. For that we stay in OACTIVE mode while the userland gets | | 465 | * userland. For that we stay in OACTIVE mode while the userland gets |
466 | * the packets, and we send a signal to the processes waiting to read. | | 466 | * the packets, and we send a signal to the processes waiting to read. |
467 | * | | 467 | * |
468 | * wakeup(sc) is the counterpart to the tsleep call in | | 468 | * wakeup(sc) is the counterpart to the tsleep call in |
469 | * tap_dev_read, while selnotify() is used for kevent(2) and | | 469 | * tap_dev_read, while selnotify() is used for kevent(2) and |
470 | * poll(2) (which includes select(2)) listeners. | | 470 | * poll(2) (which includes select(2)) listeners. |
471 | */ | | 471 | */ |
472 | static void | | 472 | static void |
473 | tap_start(struct ifnet *ifp) | | 473 | tap_start(struct ifnet *ifp) |
474 | { | | 474 | { |
475 | struct tap_softc *sc = (struct tap_softc *)ifp->if_softc; | | 475 | struct tap_softc *sc = (struct tap_softc *)ifp->if_softc; |
476 | struct mbuf *m0; | | 476 | struct mbuf *m0; |
477 | | | 477 | |
478 | if ((sc->sc_flags & TAP_INUSE) == 0) { | | 478 | if ((sc->sc_flags & TAP_INUSE) == 0) { |
479 | /* Simply drop packets */ | | 479 | /* Simply drop packets */ |
480 | for(;;) { | | 480 | for(;;) { |
481 | IFQ_DEQUEUE(&ifp->if_snd, m0); | | 481 | IFQ_DEQUEUE(&ifp->if_snd, m0); |
482 | if (m0 == NULL) | | 482 | if (m0 == NULL) |
483 | return; | | 483 | return; |
484 | | | 484 | |
485 | ifp->if_opackets++; | | 485 | ifp->if_opackets++; |
486 | #if NBPFILTER > 0 | | 486 | #if NBPFILTER > 0 |
487 | if (ifp->if_bpf) | | 487 | if (ifp->if_bpf) |
488 | bpf_mtap(ifp->if_bpf, m0); | | 488 | bpf_mtap(ifp->if_bpf, m0); |
489 | #endif | | 489 | #endif |
490 | | | 490 | |
491 | m_freem(m0); | | 491 | m_freem(m0); |
492 | } | | 492 | } |
493 | } else if (!IFQ_IS_EMPTY(&ifp->if_snd)) { | | 493 | } else if (!IFQ_IS_EMPTY(&ifp->if_snd)) { |
494 | ifp->if_flags |= IFF_OACTIVE; | | 494 | ifp->if_flags |= IFF_OACTIVE; |
495 | wakeup(sc); | | 495 | wakeup(sc); |
496 | selnotify(&sc->sc_rsel, 0, 1); | | 496 | selnotify(&sc->sc_rsel, 0, 1); |
497 | if (sc->sc_flags & TAP_ASYNCIO) | | 497 | if (sc->sc_flags & TAP_ASYNCIO) |
498 | softint_schedule(sc->sc_sih); | | 498 | softint_schedule(sc->sc_sih); |
499 | } | | 499 | } |
500 | } | | 500 | } |
501 | | | 501 | |
502 | static void | | 502 | static void |
503 | tap_softintr(void *cookie) | | 503 | tap_softintr(void *cookie) |
504 | { | | 504 | { |
505 | struct tap_softc *sc; | | 505 | struct tap_softc *sc; |
506 | struct ifnet *ifp; | | 506 | struct ifnet *ifp; |
507 | int a, b; | | 507 | int a, b; |
508 | | | 508 | |
509 | sc = cookie; | | 509 | sc = cookie; |
510 | | | 510 | |
511 | if (sc->sc_flags & TAP_ASYNCIO) { | | 511 | if (sc->sc_flags & TAP_ASYNCIO) { |
512 | ifp = &sc->sc_ec.ec_if; | | 512 | ifp = &sc->sc_ec.ec_if; |
513 | if (ifp->if_flags & IFF_RUNNING) { | | 513 | if (ifp->if_flags & IFF_RUNNING) { |
514 | a = POLL_IN; | | 514 | a = POLL_IN; |
515 | b = POLLIN|POLLRDNORM; | | 515 | b = POLLIN|POLLRDNORM; |
516 | } else { | | 516 | } else { |
517 | a = POLL_HUP; | | 517 | a = POLL_HUP; |
518 | b = 0; | | 518 | b = 0; |
519 | } | | 519 | } |
520 | fownsignal(sc->sc_pgid, SIGIO, a, b, NULL); | | 520 | fownsignal(sc->sc_pgid, SIGIO, a, b, NULL); |
521 | } | | 521 | } |
522 | } | | 522 | } |
523 | | | 523 | |
524 | /* | | 524 | /* |
525 | * A typical driver will only contain the following handlers for | | 525 | * A typical driver will only contain the following handlers for |
526 | * ioctl calls, except SIOCSIFPHYADDR. | | 526 | * ioctl calls, except SIOCSIFPHYADDR. |
527 | * The latter is a hack I used to set the Ethernet address of the | | 527 | * The latter is a hack I used to set the Ethernet address of the |
528 | * faked device. | | 528 | * faked device. |
529 | * | | 529 | * |
530 | * Note that both ifmedia_ioctl() and ether_ioctl() have to be | | 530 | * Note that both ifmedia_ioctl() and ether_ioctl() have to be |
531 | * called under splnet(). | | 531 | * called under splnet(). |
532 | */ | | 532 | */ |
533 | static int | | 533 | static int |
534 | tap_ioctl(struct ifnet *ifp, u_long cmd, void *data) | | 534 | tap_ioctl(struct ifnet *ifp, u_long cmd, void *data) |
535 | { | | 535 | { |
536 | struct tap_softc *sc = (struct tap_softc *)ifp->if_softc; | | 536 | struct tap_softc *sc = (struct tap_softc *)ifp->if_softc; |
537 | struct ifreq *ifr = (struct ifreq *)data; | | 537 | struct ifreq *ifr = (struct ifreq *)data; |
538 | int s, error; | | 538 | int s, error; |
539 | | | 539 | |
540 | s = splnet(); | | 540 | s = splnet(); |
541 | | | 541 | |
542 | switch (cmd) { | | 542 | switch (cmd) { |
543 | #ifdef OSIOCSIFMEDIA | | 543 | #ifdef OSIOCSIFMEDIA |
544 | case OSIOCSIFMEDIA: | | 544 | case OSIOCSIFMEDIA: |
545 | #endif | | 545 | #endif |
546 | case SIOCSIFMEDIA: | | 546 | case SIOCSIFMEDIA: |
547 | case SIOCGIFMEDIA: | | 547 | case SIOCGIFMEDIA: |
548 | error = ifmedia_ioctl(ifp, ifr, &sc->sc_im, cmd); | | 548 | error = ifmedia_ioctl(ifp, ifr, &sc->sc_im, cmd); |
549 | break; | | 549 | break; |
550 | #if defined(COMPAT_40) || defined(MODULAR) | | 550 | #if defined(COMPAT_40) || defined(MODULAR) |
551 | case SIOCSIFPHYADDR: | | 551 | case SIOCSIFPHYADDR: |
552 | error = tap_lifaddr(ifp, cmd, (struct ifaliasreq *)data); | | 552 | error = tap_lifaddr(ifp, cmd, (struct ifaliasreq *)data); |
553 | break; | | 553 | break; |
554 | #endif | | 554 | #endif |
555 | default: | | 555 | default: |
556 | error = ether_ioctl(ifp, cmd, data); | | 556 | error = ether_ioctl(ifp, cmd, data); |
557 | if (error == ENETRESET) | | 557 | if (error == ENETRESET) |
558 | error = 0; | | 558 | error = 0; |
559 | break; | | 559 | break; |
560 | } | | 560 | } |
561 | | | 561 | |
562 | splx(s); | | 562 | splx(s); |
563 | | | 563 | |
564 | return (error); | | 564 | return (error); |
565 | } | | 565 | } |
566 | | | 566 | |
567 | #if defined(COMPAT_40) || defined(MODULAR) | | 567 | #if defined(COMPAT_40) || defined(MODULAR) |
568 | /* | | 568 | /* |
569 | * Helper function to set Ethernet address. This has been replaced by | | 569 | * Helper function to set Ethernet address. This has been replaced by |
570 | * the generic SIOCALIFADDR ioctl on a PF_LINK socket. | | 570 | * the generic SIOCALIFADDR ioctl on a PF_LINK socket. |
571 | */ | | 571 | */ |
572 | static int | | 572 | static int |
573 | tap_lifaddr(struct ifnet *ifp, u_long cmd, struct ifaliasreq *ifra) | | 573 | tap_lifaddr(struct ifnet *ifp, u_long cmd, struct ifaliasreq *ifra) |
574 | { | | 574 | { |
575 | const struct sockaddr *sa = &ifra->ifra_addr; | | 575 | const struct sockaddr *sa = &ifra->ifra_addr; |
576 | | | 576 | |
577 | if (sa->sa_family != AF_LINK) | | 577 | if (sa->sa_family != AF_LINK) |
578 | return (EINVAL); | | 578 | return (EINVAL); |
579 | | | 579 | |
580 | if_set_sadl(ifp, sa->sa_data, ETHER_ADDR_LEN, false); | | 580 | if_set_sadl(ifp, sa->sa_data, ETHER_ADDR_LEN, false); |
581 | | | 581 | |
582 | return (0); | | 582 | return (0); |
583 | } | | 583 | } |
584 | #endif | | 584 | #endif |
585 | | | 585 | |
586 | /* | | 586 | /* |
587 | * _init() would typically be called when an interface goes up, | | 587 | * _init() would typically be called when an interface goes up, |
588 | * meaning it should configure itself into the state in which it | | 588 | * meaning it should configure itself into the state in which it |
589 | * can send packets. | | 589 | * can send packets. |
590 | */ | | 590 | */ |
591 | static int | | 591 | static int |
592 | tap_init(struct ifnet *ifp) | | 592 | tap_init(struct ifnet *ifp) |
593 | { | | 593 | { |
594 | ifp->if_flags |= IFF_RUNNING; | | 594 | ifp->if_flags |= IFF_RUNNING; |
595 | | | 595 | |
596 | tap_start(ifp); | | 596 | tap_start(ifp); |
597 | | | 597 | |
598 | return (0); | | 598 | return (0); |
599 | } | | 599 | } |
600 | | | 600 | |
601 | /* | | 601 | /* |
602 | * _stop() is called when an interface goes down. It is our | | 602 | * _stop() is called when an interface goes down. It is our |
603 | * responsability to validate that state by clearing the | | 603 | * responsability to validate that state by clearing the |
604 | * IFF_RUNNING flag. | | 604 | * IFF_RUNNING flag. |
605 | * | | 605 | * |
606 | * We have to wake up all the sleeping processes to have the pending | | 606 | * We have to wake up all the sleeping processes to have the pending |
607 | * read requests cancelled. | | 607 | * read requests cancelled. |
608 | */ | | 608 | */ |
609 | static void | | 609 | static void |
610 | tap_stop(struct ifnet *ifp, int disable) | | 610 | tap_stop(struct ifnet *ifp, int disable) |
611 | { | | 611 | { |
612 | struct tap_softc *sc = (struct tap_softc *)ifp->if_softc; | | 612 | struct tap_softc *sc = (struct tap_softc *)ifp->if_softc; |
613 | | | 613 | |
614 | ifp->if_flags &= ~IFF_RUNNING; | | 614 | ifp->if_flags &= ~IFF_RUNNING; |
615 | wakeup(sc); | | 615 | wakeup(sc); |
616 | selnotify(&sc->sc_rsel, 0, 1); | | 616 | selnotify(&sc->sc_rsel, 0, 1); |
617 | if (sc->sc_flags & TAP_ASYNCIO) | | 617 | if (sc->sc_flags & TAP_ASYNCIO) |
618 | softint_schedule(sc->sc_sih); | | 618 | softint_schedule(sc->sc_sih); |
619 | } | | 619 | } |
620 | | | 620 | |
621 | /* | | 621 | /* |
622 | * The 'create' command of ifconfig can be used to create | | 622 | * The 'create' command of ifconfig can be used to create |
623 | * any numbered instance of a given device. Thus we have to | | 623 | * any numbered instance of a given device. Thus we have to |
624 | * make sure we have enough room in cd_devs to create the | | 624 | * make sure we have enough room in cd_devs to create the |
625 | * user-specified instance. config_attach_pseudo will do this | | 625 | * user-specified instance. config_attach_pseudo will do this |
626 | * for us. | | 626 | * for us. |
627 | */ | | 627 | */ |
628 | static int | | 628 | static int |
629 | tap_clone_create(struct if_clone *ifc, int unit) | | 629 | tap_clone_create(struct if_clone *ifc, int unit) |
630 | { | | 630 | { |
631 | if (tap_clone_creator(unit) == NULL) { | | 631 | if (tap_clone_creator(unit) == NULL) { |
632 | aprint_error("%s%d: unable to attach an instance\n", | | 632 | aprint_error("%s%d: unable to attach an instance\n", |
633 | tap_cd.cd_name, unit); | | 633 | tap_cd.cd_name, unit); |
634 | return (ENXIO); | | 634 | return (ENXIO); |
635 | } | | 635 | } |
636 | | | 636 | |
637 | return (0); | | 637 | return (0); |
638 | } | | 638 | } |
639 | | | 639 | |
640 | /* | | 640 | /* |
641 | * tap(4) can be cloned by two ways: | | 641 | * tap(4) can be cloned by two ways: |
642 | * using 'ifconfig tap0 create', which will use the network | | 642 | * using 'ifconfig tap0 create', which will use the network |
643 | * interface cloning API, and call tap_clone_create above. | | 643 | * interface cloning API, and call tap_clone_create above. |
644 | * opening the cloning device node, whose minor number is TAP_CLONER. | | 644 | * opening the cloning device node, whose minor number is TAP_CLONER. |
645 | * See below for an explanation on how this part work. | | 645 | * See below for an explanation on how this part work. |
646 | */ | | 646 | */ |
647 | static struct tap_softc * | | 647 | static struct tap_softc * |
648 | tap_clone_creator(int unit) | | 648 | tap_clone_creator(int unit) |
649 | { | | 649 | { |
650 | struct cfdata *cf; | | 650 | struct cfdata *cf; |
651 | | | 651 | |
652 | cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK); | | 652 | cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK); |
653 | cf->cf_name = tap_cd.cd_name; | | 653 | cf->cf_name = tap_cd.cd_name; |
654 | cf->cf_atname = tap_ca.ca_name; | | 654 | cf->cf_atname = tap_ca.ca_name; |
655 | if (unit == -1) { | | 655 | if (unit == -1) { |
656 | /* let autoconf find the first free one */ | | 656 | /* let autoconf find the first free one */ |
657 | cf->cf_unit = 0; | | 657 | cf->cf_unit = 0; |
658 | cf->cf_fstate = FSTATE_STAR; | | 658 | cf->cf_fstate = FSTATE_STAR; |
659 | } else { | | 659 | } else { |
660 | cf->cf_unit = unit; | | 660 | cf->cf_unit = unit; |
661 | cf->cf_fstate = FSTATE_FOUND; | | 661 | cf->cf_fstate = FSTATE_NOTFOUND; |
662 | } | | 662 | } |
663 | | | 663 | |
664 | return device_private(config_attach_pseudo(cf)); | | 664 | return device_private(config_attach_pseudo(cf)); |
665 | } | | 665 | } |
666 | | | 666 | |
667 | /* | | 667 | /* |
668 | * The clean design of if_clone and autoconf(9) makes that part | | 668 | * The clean design of if_clone and autoconf(9) makes that part |
669 | * really straightforward. The second argument of config_detach | | 669 | * really straightforward. The second argument of config_detach |
670 | * means neither QUIET nor FORCED. | | 670 | * means neither QUIET nor FORCED. |
671 | */ | | 671 | */ |
672 | static int | | 672 | static int |
673 | tap_clone_destroy(struct ifnet *ifp) | | 673 | tap_clone_destroy(struct ifnet *ifp) |
674 | { | | 674 | { |
675 | struct tap_softc *sc = ifp->if_softc; | | 675 | struct tap_softc *sc = ifp->if_softc; |
676 | | | 676 | |
677 | return tap_clone_destroyer(sc->sc_dev); | | 677 | return tap_clone_destroyer(sc->sc_dev); |
678 | } | | 678 | } |
679 | | | 679 | |
680 | int | | 680 | int |
681 | tap_clone_destroyer(device_t dev) | | 681 | tap_clone_destroyer(device_t dev) |
682 | { | | 682 | { |
683 | cfdata_t cf = device_cfdata(dev); | | 683 | cfdata_t cf = device_cfdata(dev); |
684 | int error; | | 684 | int error; |
685 | | | 685 | |
686 | if ((error = config_detach(dev, 0)) != 0) | | 686 | if ((error = config_detach(dev, 0)) != 0) |
687 | aprint_error_dev(dev, "unable to detach instance\n"); | | 687 | aprint_error_dev(dev, "unable to detach instance\n"); |
688 | free(cf, M_DEVBUF); | | 688 | free(cf, M_DEVBUF); |
689 | | | 689 | |
690 | return (error); | | 690 | return (error); |
691 | } | | 691 | } |
692 | | | 692 | |
693 | /* | | 693 | /* |
694 | * tap(4) is a bit of an hybrid device. It can be used in two different | | 694 | * tap(4) is a bit of an hybrid device. It can be used in two different |
695 | * ways: | | 695 | * ways: |
696 | * 1. ifconfig tapN create, then use /dev/tapN to read/write off it. | | 696 | * 1. ifconfig tapN create, then use /dev/tapN to read/write off it. |
697 | * 2. open /dev/tap, get a new interface created and read/write off it. | | 697 | * 2. open /dev/tap, get a new interface created and read/write off it. |
698 | * That interface is destroyed when the process that had it created exits. | | 698 | * That interface is destroyed when the process that had it created exits. |
699 | * | | 699 | * |
700 | * The first way is managed by the cdevsw structure, and you access interfaces | | 700 | * The first way is managed by the cdevsw structure, and you access interfaces |
701 | * through a (major, minor) mapping: tap4 is obtained by the minor number | | 701 | * through a (major, minor) mapping: tap4 is obtained by the minor number |
702 | * 4. The entry points for the cdevsw interface are prefixed by tap_cdev_. | | 702 | * 4. The entry points for the cdevsw interface are prefixed by tap_cdev_. |
703 | * | | 703 | * |
704 | * The second way is the so-called "cloning" device. It's a special minor | | 704 | * The second way is the so-called "cloning" device. It's a special minor |
705 | * number (chosen as the maximal number, to allow as much tap devices as | | 705 | * number (chosen as the maximal number, to allow as much tap devices as |
706 | * possible). The user first opens the cloner (e.g., /dev/tap), and that | | 706 | * possible). The user first opens the cloner (e.g., /dev/tap), and that |
707 | * call ends in tap_cdev_open. The actual place where it is handled is | | 707 | * call ends in tap_cdev_open. The actual place where it is handled is |
708 | * tap_dev_cloner. | | 708 | * tap_dev_cloner. |
709 | * | | 709 | * |
710 | * An tap device cannot be opened more than once at a time, so the cdevsw | | 710 | * An tap device cannot be opened more than once at a time, so the cdevsw |
711 | * part of open() does nothing but noting that the interface is being used and | | 711 | * part of open() does nothing but noting that the interface is being used and |
712 | * hence ready to actually handle packets. | | 712 | * hence ready to actually handle packets. |
713 | */ | | 713 | */ |
714 | | | 714 | |
715 | static int | | 715 | static int |
716 | tap_cdev_open(dev_t dev, int flags, int fmt, struct lwp *l) | | 716 | tap_cdev_open(dev_t dev, int flags, int fmt, struct lwp *l) |
717 | { | | 717 | { |
718 | struct tap_softc *sc; | | 718 | struct tap_softc *sc; |
719 | | | 719 | |
720 | if (minor(dev) == TAP_CLONER) | | 720 | if (minor(dev) == TAP_CLONER) |
721 | return tap_dev_cloner(l); | | 721 | return tap_dev_cloner(l); |
722 | | | 722 | |
723 | sc = device_lookup_private(&tap_cd, minor(dev)); | | 723 | sc = device_lookup_private(&tap_cd, minor(dev)); |
724 | if (sc == NULL) | | 724 | if (sc == NULL) |
725 | return (ENXIO); | | 725 | return (ENXIO); |
726 | | | 726 | |
727 | /* The device can only be opened once */ | | 727 | /* The device can only be opened once */ |
728 | if (sc->sc_flags & TAP_INUSE) | | 728 | if (sc->sc_flags & TAP_INUSE) |
729 | return (EBUSY); | | 729 | return (EBUSY); |
730 | sc->sc_flags |= TAP_INUSE; | | 730 | sc->sc_flags |= TAP_INUSE; |
731 | return (0); | | 731 | return (0); |
732 | } | | 732 | } |
733 | | | 733 | |
734 | /* | | 734 | /* |
735 | * There are several kinds of cloning devices, and the most simple is the one | | 735 | * There are several kinds of cloning devices, and the most simple is the one |
736 | * tap(4) uses. What it does is change the file descriptor with a new one, | | 736 | * tap(4) uses. What it does is change the file descriptor with a new one, |
737 | * with its own fileops structure (which maps to the various read, write, | | 737 | * with its own fileops structure (which maps to the various read, write, |
738 | * ioctl functions). It starts allocating a new file descriptor with falloc, | | 738 | * ioctl functions). It starts allocating a new file descriptor with falloc, |
739 | * then actually creates the new tap devices. | | 739 | * then actually creates the new tap devices. |
740 | * | | 740 | * |
741 | * Once those two steps are successful, we can re-wire the existing file | | 741 | * Once those two steps are successful, we can re-wire the existing file |
742 | * descriptor to its new self. This is done with fdclone(): it fills the fp | | 742 | * descriptor to its new self. This is done with fdclone(): it fills the fp |
743 | * structure as needed (notably f_data gets filled with the fifth parameter | | 743 | * structure as needed (notably f_data gets filled with the fifth parameter |
744 | * passed, the unit of the tap device which will allows us identifying the | | 744 | * passed, the unit of the tap device which will allows us identifying the |
745 | * device later), and returns EMOVEFD. | | 745 | * device later), and returns EMOVEFD. |
746 | * | | 746 | * |
747 | * That magic value is interpreted by sys_open() which then replaces the | | 747 | * That magic value is interpreted by sys_open() which then replaces the |
748 | * current file descriptor by the new one (through a magic member of struct | | 748 | * current file descriptor by the new one (through a magic member of struct |
749 | * lwp, l_dupfd). | | 749 | * lwp, l_dupfd). |
750 | * | | 750 | * |
751 | * The tap device is flagged as being busy since it otherwise could be | | 751 | * The tap device is flagged as being busy since it otherwise could be |
752 | * externally accessed through the corresponding device node with the cdevsw | | 752 | * externally accessed through the corresponding device node with the cdevsw |
753 | * interface. | | 753 | * interface. |
754 | */ | | 754 | */ |
755 | | | 755 | |
756 | static int | | 756 | static int |
757 | tap_dev_cloner(struct lwp *l) | | 757 | tap_dev_cloner(struct lwp *l) |
758 | { | | 758 | { |
759 | struct tap_softc *sc; | | 759 | struct tap_softc *sc; |
760 | file_t *fp; | | 760 | file_t *fp; |
761 | int error, fd; | | 761 | int error, fd; |
762 | | | 762 | |
763 | if ((error = fd_allocfile(&fp, &fd)) != 0) | | 763 | if ((error = fd_allocfile(&fp, &fd)) != 0) |
764 | return (error); | | 764 | return (error); |
765 | | | 765 | |
766 | if ((sc = tap_clone_creator(-1)) == NULL) { | | 766 | if ((sc = tap_clone_creator(-1)) == NULL) { |
767 | fd_abort(curproc, fp, fd); | | 767 | fd_abort(curproc, fp, fd); |
768 | return (ENXIO); | | 768 | return (ENXIO); |
769 | } | | 769 | } |
770 | | | 770 | |
771 | sc->sc_flags |= TAP_INUSE; | | 771 | sc->sc_flags |= TAP_INUSE; |
772 | | | 772 | |
773 | return fd_clone(fp, fd, FREAD|FWRITE, &tap_fileops, | | 773 | return fd_clone(fp, fd, FREAD|FWRITE, &tap_fileops, |
774 | (void *)(intptr_t)device_unit(sc->sc_dev)); | | 774 | (void *)(intptr_t)device_unit(sc->sc_dev)); |
775 | } | | 775 | } |
776 | | | 776 | |
777 | /* | | 777 | /* |
778 | * While all other operations (read, write, ioctl, poll and kqfilter) are | | 778 | * While all other operations (read, write, ioctl, poll and kqfilter) are |
779 | * really the same whether we are in cdevsw or fileops mode, the close() | | 779 | * really the same whether we are in cdevsw or fileops mode, the close() |
780 | * function is slightly different in the two cases. | | 780 | * function is slightly different in the two cases. |
781 | * | | 781 | * |
782 | * As for the other, the core of it is shared in tap_dev_close. What | | 782 | * As for the other, the core of it is shared in tap_dev_close. What |
783 | * it does is sufficient for the cdevsw interface, but the cloning interface | | 783 | * it does is sufficient for the cdevsw interface, but the cloning interface |
784 | * needs another thing: the interface is destroyed when the processes that | | 784 | * needs another thing: the interface is destroyed when the processes that |
785 | * created it closes it. | | 785 | * created it closes it. |
786 | */ | | 786 | */ |
787 | static int | | 787 | static int |
788 | tap_cdev_close(dev_t dev, int flags, int fmt, | | 788 | tap_cdev_close(dev_t dev, int flags, int fmt, |
789 | struct lwp *l) | | 789 | struct lwp *l) |
790 | { | | 790 | { |
791 | struct tap_softc *sc = | | 791 | struct tap_softc *sc = |
792 | device_lookup_private(&tap_cd, minor(dev)); | | 792 | device_lookup_private(&tap_cd, minor(dev)); |
793 | | | 793 | |
794 | if (sc == NULL) | | 794 | if (sc == NULL) |
795 | return (ENXIO); | | 795 | return (ENXIO); |
796 | | | 796 | |
797 | return tap_dev_close(sc); | | 797 | return tap_dev_close(sc); |
798 | } | | 798 | } |
799 | | | 799 | |
800 | /* | | 800 | /* |
801 | * It might happen that the administrator used ifconfig to externally destroy | | 801 | * It might happen that the administrator used ifconfig to externally destroy |
802 | * the interface. In that case, tap_fops_close will be called while | | 802 | * the interface. In that case, tap_fops_close will be called while |
803 | * tap_detach is already happening. If we called it again from here, we | | 803 | * tap_detach is already happening. If we called it again from here, we |
804 | * would dead lock. TAP_GOING ensures that this situation doesn't happen. | | 804 | * would dead lock. TAP_GOING ensures that this situation doesn't happen. |
805 | */ | | 805 | */ |
806 | static int | | 806 | static int |
807 | tap_fops_close(file_t *fp) | | 807 | tap_fops_close(file_t *fp) |
808 | { | | 808 | { |
809 | int unit = (intptr_t)fp->f_data; | | 809 | int unit = (intptr_t)fp->f_data; |
810 | struct tap_softc *sc; | | 810 | struct tap_softc *sc; |
811 | int error; | | 811 | int error; |
812 | | | 812 | |
813 | sc = device_lookup_private(&tap_cd, unit); | | 813 | sc = device_lookup_private(&tap_cd, unit); |
814 | if (sc == NULL) | | 814 | if (sc == NULL) |
815 | return (ENXIO); | | 815 | return (ENXIO); |
816 | | | 816 | |
817 | /* tap_dev_close currently always succeeds, but it might not | | 817 | /* tap_dev_close currently always succeeds, but it might not |
818 | * always be the case. */ | | 818 | * always be the case. */ |
819 | KERNEL_LOCK(1, NULL); | | 819 | KERNEL_LOCK(1, NULL); |
820 | if ((error = tap_dev_close(sc)) != 0) { | | 820 | if ((error = tap_dev_close(sc)) != 0) { |
821 | KERNEL_UNLOCK_ONE(NULL); | | 821 | KERNEL_UNLOCK_ONE(NULL); |
822 | return (error); | | 822 | return (error); |
823 | } | | 823 | } |
824 | | | 824 | |
825 | /* Destroy the device now that it is no longer useful, | | 825 | /* Destroy the device now that it is no longer useful, |
826 | * unless it's already being destroyed. */ | | 826 | * unless it's already being destroyed. */ |
827 | if ((sc->sc_flags & TAP_GOING) != 0) { | | 827 | if ((sc->sc_flags & TAP_GOING) != 0) { |
828 | KERNEL_UNLOCK_ONE(NULL); | | 828 | KERNEL_UNLOCK_ONE(NULL); |
829 | return (0); | | 829 | return (0); |
830 | } | | 830 | } |
831 | | | 831 | |
832 | error = tap_clone_destroyer(sc->sc_dev); | | 832 | error = tap_clone_destroyer(sc->sc_dev); |
833 | KERNEL_UNLOCK_ONE(NULL); | | 833 | KERNEL_UNLOCK_ONE(NULL); |
834 | return error; | | 834 | return error; |
835 | } | | 835 | } |
836 | | | 836 | |
837 | static int | | 837 | static int |
838 | tap_dev_close(struct tap_softc *sc) | | 838 | tap_dev_close(struct tap_softc *sc) |
839 | { | | 839 | { |
840 | struct ifnet *ifp; | | 840 | struct ifnet *ifp; |
841 | int s; | | 841 | int s; |
842 | | | 842 | |
843 | s = splnet(); | | 843 | s = splnet(); |
844 | /* Let tap_start handle packets again */ | | 844 | /* Let tap_start handle packets again */ |
845 | ifp = &sc->sc_ec.ec_if; | | 845 | ifp = &sc->sc_ec.ec_if; |
846 | ifp->if_flags &= ~IFF_OACTIVE; | | 846 | ifp->if_flags &= ~IFF_OACTIVE; |
847 | | | 847 | |
848 | /* Purge output queue */ | | 848 | /* Purge output queue */ |
849 | if (!(IFQ_IS_EMPTY(&ifp->if_snd))) { | | 849 | if (!(IFQ_IS_EMPTY(&ifp->if_snd))) { |
850 | struct mbuf *m; | | 850 | struct mbuf *m; |
851 | | | 851 | |
852 | for (;;) { | | 852 | for (;;) { |
853 | IFQ_DEQUEUE(&ifp->if_snd, m); | | 853 | IFQ_DEQUEUE(&ifp->if_snd, m); |
854 | if (m == NULL) | | 854 | if (m == NULL) |
855 | break; | | 855 | break; |
856 | | | 856 | |
857 | ifp->if_opackets++; | | 857 | ifp->if_opackets++; |
858 | #if NBPFILTER > 0 | | 858 | #if NBPFILTER > 0 |
859 | if (ifp->if_bpf) | | 859 | if (ifp->if_bpf) |
860 | bpf_mtap(ifp->if_bpf, m); | | 860 | bpf_mtap(ifp->if_bpf, m); |
861 | #endif | | 861 | #endif |
862 | } | | 862 | } |
863 | } | | 863 | } |
864 | splx(s); | | 864 | splx(s); |
865 | | | 865 | |
866 | sc->sc_flags &= ~(TAP_INUSE | TAP_ASYNCIO); | | 866 | sc->sc_flags &= ~(TAP_INUSE | TAP_ASYNCIO); |
867 | | | 867 | |
868 | return (0); | | 868 | return (0); |
869 | } | | 869 | } |
870 | | | 870 | |
871 | static int | | 871 | static int |
872 | tap_cdev_read(dev_t dev, struct uio *uio, int flags) | | 872 | tap_cdev_read(dev_t dev, struct uio *uio, int flags) |
873 | { | | 873 | { |
874 | return tap_dev_read(minor(dev), uio, flags); | | 874 | return tap_dev_read(minor(dev), uio, flags); |
875 | } | | 875 | } |
876 | | | 876 | |
877 | static int | | 877 | static int |
878 | tap_fops_read(file_t *fp, off_t *offp, struct uio *uio, | | 878 | tap_fops_read(file_t *fp, off_t *offp, struct uio *uio, |
879 | kauth_cred_t cred, int flags) | | 879 | kauth_cred_t cred, int flags) |
880 | { | | 880 | { |
881 | int error; | | 881 | int error; |
882 | | | 882 | |
883 | KERNEL_LOCK(1, NULL); | | 883 | KERNEL_LOCK(1, NULL); |
884 | error = tap_dev_read((intptr_t)fp->f_data, uio, flags); | | 884 | error = tap_dev_read((intptr_t)fp->f_data, uio, flags); |
885 | KERNEL_UNLOCK_ONE(NULL); | | 885 | KERNEL_UNLOCK_ONE(NULL); |
886 | return error; | | 886 | return error; |
887 | } | | 887 | } |
888 | | | 888 | |
889 | static int | | 889 | static int |
890 | tap_dev_read(int unit, struct uio *uio, int flags) | | 890 | tap_dev_read(int unit, struct uio *uio, int flags) |
891 | { | | 891 | { |
892 | struct tap_softc *sc = | | 892 | struct tap_softc *sc = |
893 | device_lookup_private(&tap_cd, unit); | | 893 | device_lookup_private(&tap_cd, unit); |
894 | struct ifnet *ifp; | | 894 | struct ifnet *ifp; |
895 | struct mbuf *m, *n; | | 895 | struct mbuf *m, *n; |
896 | int error = 0, s; | | 896 | int error = 0, s; |
897 | | | 897 | |
898 | if (sc == NULL) | | 898 | if (sc == NULL) |
899 | return (ENXIO); | | 899 | return (ENXIO); |
900 | | | 900 | |
901 | getnanotime(&sc->sc_atime); | | 901 | getnanotime(&sc->sc_atime); |
902 | | | 902 | |
903 | ifp = &sc->sc_ec.ec_if; | | 903 | ifp = &sc->sc_ec.ec_if; |
904 | if ((ifp->if_flags & IFF_UP) == 0) | | 904 | if ((ifp->if_flags & IFF_UP) == 0) |
905 | return (EHOSTDOWN); | | 905 | return (EHOSTDOWN); |
906 | | | 906 | |
907 | /* | | 907 | /* |
908 | * In the TAP_NBIO case, we have to make sure we won't be sleeping | | 908 | * In the TAP_NBIO case, we have to make sure we won't be sleeping |
909 | */ | | 909 | */ |
910 | if ((sc->sc_flags & TAP_NBIO) != 0) { | | 910 | if ((sc->sc_flags & TAP_NBIO) != 0) { |
911 | if (!mutex_tryenter(&sc->sc_rdlock)) | | 911 | if (!mutex_tryenter(&sc->sc_rdlock)) |
912 | return (EWOULDBLOCK); | | 912 | return (EWOULDBLOCK); |
913 | } else { | | 913 | } else { |
914 | mutex_enter(&sc->sc_rdlock); | | 914 | mutex_enter(&sc->sc_rdlock); |
915 | } | | 915 | } |
916 | | | 916 | |
917 | s = splnet(); | | 917 | s = splnet(); |
918 | if (IFQ_IS_EMPTY(&ifp->if_snd)) { | | 918 | if (IFQ_IS_EMPTY(&ifp->if_snd)) { |
919 | ifp->if_flags &= ~IFF_OACTIVE; | | 919 | ifp->if_flags &= ~IFF_OACTIVE; |
920 | /* | | 920 | /* |
921 | * We must release the lock before sleeping, and re-acquire it | | 921 | * We must release the lock before sleeping, and re-acquire it |
922 | * after. | | 922 | * after. |
923 | */ | | 923 | */ |
924 | mutex_exit(&sc->sc_rdlock); | | 924 | mutex_exit(&sc->sc_rdlock); |
925 | if (sc->sc_flags & TAP_NBIO) | | 925 | if (sc->sc_flags & TAP_NBIO) |
926 | error = EWOULDBLOCK; | | 926 | error = EWOULDBLOCK; |
927 | else | | 927 | else |
928 | error = tsleep(sc, PSOCK|PCATCH, "tap", 0); | | 928 | error = tsleep(sc, PSOCK|PCATCH, "tap", 0); |
929 | splx(s); | | 929 | splx(s); |
930 | | | 930 | |
931 | if (error != 0) | | 931 | if (error != 0) |
932 | return (error); | | 932 | return (error); |
933 | /* The device might have been downed */ | | 933 | /* The device might have been downed */ |
934 | if ((ifp->if_flags & IFF_UP) == 0) | | 934 | if ((ifp->if_flags & IFF_UP) == 0) |
935 | return (EHOSTDOWN); | | 935 | return (EHOSTDOWN); |
936 | if ((sc->sc_flags & TAP_NBIO)) { | | 936 | if ((sc->sc_flags & TAP_NBIO)) { |
937 | if (!mutex_tryenter(&sc->sc_rdlock)) | | 937 | if (!mutex_tryenter(&sc->sc_rdlock)) |
938 | return (EWOULDBLOCK); | | 938 | return (EWOULDBLOCK); |
939 | } else { | | 939 | } else { |
940 | mutex_enter(&sc->sc_rdlock); | | 940 | mutex_enter(&sc->sc_rdlock); |
941 | } | | 941 | } |
942 | s = splnet(); | | 942 | s = splnet(); |
943 | } | | 943 | } |
944 | | | 944 | |
945 | IFQ_DEQUEUE(&ifp->if_snd, m); | | 945 | IFQ_DEQUEUE(&ifp->if_snd, m); |
946 | ifp->if_flags &= ~IFF_OACTIVE; | | 946 | ifp->if_flags &= ~IFF_OACTIVE; |
947 | splx(s); | | 947 | splx(s); |
948 | if (m == NULL) { | | 948 | if (m == NULL) { |
949 | error = 0; | | 949 | error = 0; |
950 | goto out; | | 950 | goto out; |
951 | } | | 951 | } |
952 | | | 952 | |
953 | ifp->if_opackets++; | | 953 | ifp->if_opackets++; |
954 | #if NBPFILTER > 0 | | 954 | #if NBPFILTER > 0 |
955 | if (ifp->if_bpf) | | 955 | if (ifp->if_bpf) |
956 | bpf_mtap(ifp->if_bpf, m); | | 956 | bpf_mtap(ifp->if_bpf, m); |
957 | #endif | | 957 | #endif |
958 | | | 958 | |
959 | /* | | 959 | /* |
960 | * One read is one packet. | | 960 | * One read is one packet. |
961 | */ | | 961 | */ |
962 | do { | | 962 | do { |
963 | error = uiomove(mtod(m, void *), | | 963 | error = uiomove(mtod(m, void *), |
964 | min(m->m_len, uio->uio_resid), uio); | | 964 | min(m->m_len, uio->uio_resid), uio); |
965 | MFREE(m, n); | | 965 | MFREE(m, n); |
966 | m = n; | | 966 | m = n; |
967 | } while (m != NULL && uio->uio_resid > 0 && error == 0); | | 967 | } while (m != NULL && uio->uio_resid > 0 && error == 0); |
968 | | | 968 | |
969 | if (m != NULL) | | 969 | if (m != NULL) |
970 | m_freem(m); | | 970 | m_freem(m); |
971 | | | 971 | |
972 | out: | | 972 | out: |
973 | mutex_exit(&sc->sc_rdlock); | | 973 | mutex_exit(&sc->sc_rdlock); |
974 | return (error); | | 974 | return (error); |
975 | } | | 975 | } |
976 | | | 976 | |
977 | static int | | 977 | static int |
978 | tap_fops_stat(file_t *fp, struct stat *st) | | 978 | tap_fops_stat(file_t *fp, struct stat *st) |
979 | { | | 979 | { |
980 | int error; | | 980 | int error; |
981 | struct tap_softc *sc; | | 981 | struct tap_softc *sc; |
982 | int unit = (uintptr_t)fp->f_data; | | 982 | int unit = (uintptr_t)fp->f_data; |
983 | | | 983 | |
984 | (void)memset(st, 0, sizeof(*st)); | | 984 | (void)memset(st, 0, sizeof(*st)); |
985 | | | 985 | |
986 | KERNEL_LOCK(1, NULL); | | 986 | KERNEL_LOCK(1, NULL); |
987 | sc = device_lookup_private(&tap_cd, unit); | | 987 | sc = device_lookup_private(&tap_cd, unit); |
988 | if (sc == NULL) { | | 988 | if (sc == NULL) { |
989 | error = ENXIO; | | 989 | error = ENXIO; |
990 | goto out; | | 990 | goto out; |
991 | } | | 991 | } |
992 | | | 992 | |
993 | st->st_dev = makedev(cdevsw_lookup_major(&tap_cdevsw), unit); | | 993 | st->st_dev = makedev(cdevsw_lookup_major(&tap_cdevsw), unit); |
994 | st->st_atimespec = sc->sc_atime; | | 994 | st->st_atimespec = sc->sc_atime; |
995 | st->st_mtimespec = sc->sc_mtime; | | 995 | st->st_mtimespec = sc->sc_mtime; |
996 | st->st_ctimespec = st->st_birthtimespec = sc->sc_btime; | | 996 | st->st_ctimespec = st->st_birthtimespec = sc->sc_btime; |
997 | st->st_uid = kauth_cred_geteuid(fp->f_cred); | | 997 | st->st_uid = kauth_cred_geteuid(fp->f_cred); |
998 | st->st_gid = kauth_cred_getegid(fp->f_cred); | | 998 | st->st_gid = kauth_cred_getegid(fp->f_cred); |
999 | out: | | 999 | out: |
1000 | KERNEL_UNLOCK_ONE(NULL); | | 1000 | KERNEL_UNLOCK_ONE(NULL); |
1001 | return error; | | 1001 | return error; |
1002 | } | | 1002 | } |
1003 | | | 1003 | |
1004 | static int | | 1004 | static int |
1005 | tap_cdev_write(dev_t dev, struct uio *uio, int flags) | | 1005 | tap_cdev_write(dev_t dev, struct uio *uio, int flags) |
1006 | { | | 1006 | { |
1007 | return tap_dev_write(minor(dev), uio, flags); | | 1007 | return tap_dev_write(minor(dev), uio, flags); |
1008 | } | | 1008 | } |
1009 | | | 1009 | |
1010 | static int | | 1010 | static int |
1011 | tap_fops_write(file_t *fp, off_t *offp, struct uio *uio, | | 1011 | tap_fops_write(file_t *fp, off_t *offp, struct uio *uio, |
1012 | kauth_cred_t cred, int flags) | | 1012 | kauth_cred_t cred, int flags) |
1013 | { | | 1013 | { |
1014 | int error; | | 1014 | int error; |
1015 | | | 1015 | |
1016 | KERNEL_LOCK(1, NULL); | | 1016 | KERNEL_LOCK(1, NULL); |
1017 | error = tap_dev_write((intptr_t)fp->f_data, uio, flags); | | 1017 | error = tap_dev_write((intptr_t)fp->f_data, uio, flags); |
1018 | KERNEL_UNLOCK_ONE(NULL); | | 1018 | KERNEL_UNLOCK_ONE(NULL); |
1019 | return error; | | 1019 | return error; |
1020 | } | | 1020 | } |
1021 | | | 1021 | |
1022 | static int | | 1022 | static int |
1023 | tap_dev_write(int unit, struct uio *uio, int flags) | | 1023 | tap_dev_write(int unit, struct uio *uio, int flags) |
1024 | { | | 1024 | { |
1025 | struct tap_softc *sc = | | 1025 | struct tap_softc *sc = |
1026 | device_lookup_private(&tap_cd, unit); | | 1026 | device_lookup_private(&tap_cd, unit); |
1027 | struct ifnet *ifp; | | 1027 | struct ifnet *ifp; |
1028 | struct mbuf *m, **mp; | | 1028 | struct mbuf *m, **mp; |
1029 | int error = 0; | | 1029 | int error = 0; |
1030 | int s; | | 1030 | int s; |
1031 | | | 1031 | |
1032 | if (sc == NULL) | | 1032 | if (sc == NULL) |
1033 | return (ENXIO); | | 1033 | return (ENXIO); |
1034 | | | 1034 | |
1035 | getnanotime(&sc->sc_mtime); | | 1035 | getnanotime(&sc->sc_mtime); |
1036 | ifp = &sc->sc_ec.ec_if; | | 1036 | ifp = &sc->sc_ec.ec_if; |
1037 | | | 1037 | |
1038 | /* One write, one packet, that's the rule */ | | 1038 | /* One write, one packet, that's the rule */ |
1039 | MGETHDR(m, M_DONTWAIT, MT_DATA); | | 1039 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
1040 | if (m == NULL) { | | 1040 | if (m == NULL) { |
1041 | ifp->if_ierrors++; | | 1041 | ifp->if_ierrors++; |
1042 | return (ENOBUFS); | | 1042 | return (ENOBUFS); |
1043 | } | | 1043 | } |
1044 | m->m_pkthdr.len = uio->uio_resid; | | 1044 | m->m_pkthdr.len = uio->uio_resid; |
1045 | | | 1045 | |
1046 | mp = &m; | | 1046 | mp = &m; |
1047 | while (error == 0 && uio->uio_resid > 0) { | | 1047 | while (error == 0 && uio->uio_resid > 0) { |
1048 | if (*mp != m) { | | 1048 | if (*mp != m) { |
1049 | MGET(*mp, M_DONTWAIT, MT_DATA); | | 1049 | MGET(*mp, M_DONTWAIT, MT_DATA); |
1050 | if (*mp == NULL) { | | 1050 | if (*mp == NULL) { |
1051 | error = ENOBUFS; | | 1051 | error = ENOBUFS; |
1052 | break; | | 1052 | break; |
1053 | } | | 1053 | } |
1054 | } | | 1054 | } |
1055 | (*mp)->m_len = min(MHLEN, uio->uio_resid); | | 1055 | (*mp)->m_len = min(MHLEN, uio->uio_resid); |
1056 | error = uiomove(mtod(*mp, void *), (*mp)->m_len, uio); | | 1056 | error = uiomove(mtod(*mp, void *), (*mp)->m_len, uio); |
1057 | mp = &(*mp)->m_next; | | 1057 | mp = &(*mp)->m_next; |
1058 | } | | 1058 | } |
1059 | if (error) { | | 1059 | if (error) { |
1060 | ifp->if_ierrors++; | | 1060 | ifp->if_ierrors++; |
1061 | m_freem(m); | | 1061 | m_freem(m); |
1062 | return (error); | | 1062 | return (error); |
1063 | } | | 1063 | } |
1064 | | | 1064 | |
1065 | ifp->if_ipackets++; | | 1065 | ifp->if_ipackets++; |
1066 | m->m_pkthdr.rcvif = ifp; | | 1066 | m->m_pkthdr.rcvif = ifp; |
1067 | | | 1067 | |
1068 | #if NBPFILTER > 0 | | 1068 | #if NBPFILTER > 0 |
1069 | if (ifp->if_bpf) | | 1069 | if (ifp->if_bpf) |
1070 | bpf_mtap(ifp->if_bpf, m); | | 1070 | bpf_mtap(ifp->if_bpf, m); |
1071 | #endif | | 1071 | #endif |
1072 | s =splnet(); | | 1072 | s =splnet(); |
1073 | (*ifp->if_input)(ifp, m); | | 1073 | (*ifp->if_input)(ifp, m); |
1074 | splx(s); | | 1074 | splx(s); |
1075 | | | 1075 | |
1076 | return (0); | | 1076 | return (0); |
1077 | } | | 1077 | } |
1078 | | | 1078 | |
1079 | static int | | 1079 | static int |
1080 | tap_cdev_ioctl(dev_t dev, u_long cmd, void *data, int flags, | | 1080 | tap_cdev_ioctl(dev_t dev, u_long cmd, void *data, int flags, |
1081 | struct lwp *l) | | 1081 | struct lwp *l) |
1082 | { | | 1082 | { |
1083 | return tap_dev_ioctl(minor(dev), cmd, data, l); | | 1083 | return tap_dev_ioctl(minor(dev), cmd, data, l); |
1084 | } | | 1084 | } |
1085 | | | 1085 | |
1086 | static int | | 1086 | static int |
1087 | tap_fops_ioctl(file_t *fp, u_long cmd, void *data) | | 1087 | tap_fops_ioctl(file_t *fp, u_long cmd, void *data) |
1088 | { | | 1088 | { |
1089 | return tap_dev_ioctl((intptr_t)fp->f_data, cmd, data, curlwp); | | 1089 | return tap_dev_ioctl((intptr_t)fp->f_data, cmd, data, curlwp); |
1090 | } | | 1090 | } |
1091 | | | 1091 | |
1092 | static int | | 1092 | static int |
1093 | tap_dev_ioctl(int unit, u_long cmd, void *data, struct lwp *l) | | 1093 | tap_dev_ioctl(int unit, u_long cmd, void *data, struct lwp *l) |
1094 | { | | 1094 | { |
1095 | struct tap_softc *sc = | | 1095 | struct tap_softc *sc = |
1096 | device_lookup_private(&tap_cd, unit); | | 1096 | device_lookup_private(&tap_cd, unit); |
1097 | int error = 0; | | 1097 | int error = 0; |
1098 | | | 1098 | |
1099 | if (sc == NULL) | | 1099 | if (sc == NULL) |
1100 | return (ENXIO); | | 1100 | return (ENXIO); |
1101 | | | 1101 | |
1102 | switch (cmd) { | | 1102 | switch (cmd) { |
1103 | case FIONREAD: | | 1103 | case FIONREAD: |
1104 | { | | 1104 | { |
1105 | struct ifnet *ifp = &sc->sc_ec.ec_if; | | 1105 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
1106 | struct mbuf *m; | | 1106 | struct mbuf *m; |
1107 | int s; | | 1107 | int s; |
1108 | | | 1108 | |
1109 | s = splnet(); | | 1109 | s = splnet(); |
1110 | IFQ_POLL(&ifp->if_snd, m); | | 1110 | IFQ_POLL(&ifp->if_snd, m); |
1111 | | | 1111 | |
1112 | if (m == NULL) | | 1112 | if (m == NULL) |
1113 | *(int *)data = 0; | | 1113 | *(int *)data = 0; |
1114 | else | | 1114 | else |
1115 | *(int *)data = m->m_pkthdr.len; | | 1115 | *(int *)data = m->m_pkthdr.len; |
1116 | splx(s); | | 1116 | splx(s); |
1117 | } break; | | 1117 | } break; |
1118 | case TIOCSPGRP: | | 1118 | case TIOCSPGRP: |
1119 | case FIOSETOWN: | | 1119 | case FIOSETOWN: |
1120 | error = fsetown(&sc->sc_pgid, cmd, data); | | 1120 | error = fsetown(&sc->sc_pgid, cmd, data); |
1121 | break; | | 1121 | break; |
1122 | case TIOCGPGRP: | | 1122 | case TIOCGPGRP: |
1123 | case FIOGETOWN: | | 1123 | case FIOGETOWN: |
1124 | error = fgetown(sc->sc_pgid, cmd, data); | | 1124 | error = fgetown(sc->sc_pgid, cmd, data); |
1125 | break; | | 1125 | break; |
1126 | case FIOASYNC: | | 1126 | case FIOASYNC: |
1127 | if (*(int *)data) | | 1127 | if (*(int *)data) |
1128 | sc->sc_flags |= TAP_ASYNCIO; | | 1128 | sc->sc_flags |= TAP_ASYNCIO; |
1129 | else | | 1129 | else |
1130 | sc->sc_flags &= ~TAP_ASYNCIO; | | 1130 | sc->sc_flags &= ~TAP_ASYNCIO; |
1131 | break; | | 1131 | break; |
1132 | case FIONBIO: | | 1132 | case FIONBIO: |
1133 | if (*(int *)data) | | 1133 | if (*(int *)data) |
1134 | sc->sc_flags |= TAP_NBIO; | | 1134 | sc->sc_flags |= TAP_NBIO; |
1135 | else | | 1135 | else |
1136 | sc->sc_flags &= ~TAP_NBIO; | | 1136 | sc->sc_flags &= ~TAP_NBIO; |
1137 | break; | | 1137 | break; |
1138 | #ifdef OTAPGIFNAME | | 1138 | #ifdef OTAPGIFNAME |
1139 | case OTAPGIFNAME: | | 1139 | case OTAPGIFNAME: |
1140 | #endif | | 1140 | #endif |
1141 | case TAPGIFNAME: | | 1141 | case TAPGIFNAME: |
1142 | { | | 1142 | { |
1143 | struct ifreq *ifr = (struct ifreq *)data; | | 1143 | struct ifreq *ifr = (struct ifreq *)data; |
1144 | struct ifnet *ifp = &sc->sc_ec.ec_if; | | 1144 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
1145 | | | 1145 | |
1146 | strlcpy(ifr->ifr_name, ifp->if_xname, IFNAMSIZ); | | 1146 | strlcpy(ifr->ifr_name, ifp->if_xname, IFNAMSIZ); |
1147 | } break; | | 1147 | } break; |
1148 | default: | | 1148 | default: |
1149 | error = ENOTTY; | | 1149 | error = ENOTTY; |
1150 | break; | | 1150 | break; |
1151 | } | | 1151 | } |
1152 | | | 1152 | |
1153 | return (0); | | 1153 | return (0); |
1154 | } | | 1154 | } |
1155 | | | 1155 | |
1156 | static int | | 1156 | static int |
1157 | tap_cdev_poll(dev_t dev, int events, struct lwp *l) | | 1157 | tap_cdev_poll(dev_t dev, int events, struct lwp *l) |
1158 | { | | 1158 | { |
1159 | return tap_dev_poll(minor(dev), events, l); | | 1159 | return tap_dev_poll(minor(dev), events, l); |
1160 | } | | 1160 | } |
1161 | | | 1161 | |
1162 | static int | | 1162 | static int |
1163 | tap_fops_poll(file_t *fp, int events) | | 1163 | tap_fops_poll(file_t *fp, int events) |
1164 | { | | 1164 | { |
1165 | return tap_dev_poll((intptr_t)fp->f_data, events, curlwp); | | 1165 | return tap_dev_poll((intptr_t)fp->f_data, events, curlwp); |
1166 | } | | 1166 | } |
1167 | | | 1167 | |
1168 | static int | | 1168 | static int |
1169 | tap_dev_poll(int unit, int events, struct lwp *l) | | 1169 | tap_dev_poll(int unit, int events, struct lwp *l) |
1170 | { | | 1170 | { |
1171 | struct tap_softc *sc = | | 1171 | struct tap_softc *sc = |
1172 | device_lookup_private(&tap_cd, unit); | | 1172 | device_lookup_private(&tap_cd, unit); |
1173 | int revents = 0; | | 1173 | int revents = 0; |
1174 | | | 1174 | |
1175 | if (sc == NULL) | | 1175 | if (sc == NULL) |
1176 | return POLLERR; | | 1176 | return POLLERR; |
1177 | | | 1177 | |
1178 | if (events & (POLLIN|POLLRDNORM)) { | | 1178 | if (events & (POLLIN|POLLRDNORM)) { |
1179 | struct ifnet *ifp = &sc->sc_ec.ec_if; | | 1179 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
1180 | struct mbuf *m; | | 1180 | struct mbuf *m; |
1181 | int s; | | 1181 | int s; |
1182 | | | 1182 | |
1183 | s = splnet(); | | 1183 | s = splnet(); |
1184 | IFQ_POLL(&ifp->if_snd, m); | | 1184 | IFQ_POLL(&ifp->if_snd, m); |
1185 | splx(s); | | 1185 | splx(s); |
1186 | | | 1186 | |
1187 | if (m != NULL) | | 1187 | if (m != NULL) |
1188 | revents |= events & (POLLIN|POLLRDNORM); | | 1188 | revents |= events & (POLLIN|POLLRDNORM); |
1189 | else { | | 1189 | else { |
1190 | simple_lock(&sc->sc_kqlock); | | 1190 | simple_lock(&sc->sc_kqlock); |
1191 | selrecord(l, &sc->sc_rsel); | | 1191 | selrecord(l, &sc->sc_rsel); |
1192 | simple_unlock(&sc->sc_kqlock); | | 1192 | simple_unlock(&sc->sc_kqlock); |
1193 | } | | 1193 | } |
1194 | } | | 1194 | } |
1195 | revents |= events & (POLLOUT|POLLWRNORM); | | 1195 | revents |= events & (POLLOUT|POLLWRNORM); |
1196 | | | 1196 | |
1197 | return (revents); | | 1197 | return (revents); |
1198 | } | | 1198 | } |
1199 | | | 1199 | |
1200 | static struct filterops tap_read_filterops = { 1, NULL, tap_kqdetach, | | 1200 | static struct filterops tap_read_filterops = { 1, NULL, tap_kqdetach, |
1201 | tap_kqread }; | | 1201 | tap_kqread }; |
1202 | static struct filterops tap_seltrue_filterops = { 1, NULL, tap_kqdetach, | | 1202 | static struct filterops tap_seltrue_filterops = { 1, NULL, tap_kqdetach, |
1203 | filt_seltrue }; | | 1203 | filt_seltrue }; |
1204 | | | 1204 | |
1205 | static int | | 1205 | static int |
1206 | tap_cdev_kqfilter(dev_t dev, struct knote *kn) | | 1206 | tap_cdev_kqfilter(dev_t dev, struct knote *kn) |
1207 | { | | 1207 | { |
1208 | return tap_dev_kqfilter(minor(dev), kn); | | 1208 | return tap_dev_kqfilter(minor(dev), kn); |
1209 | } | | 1209 | } |
1210 | | | 1210 | |
1211 | static int | | 1211 | static int |
1212 | tap_fops_kqfilter(file_t *fp, struct knote *kn) | | 1212 | tap_fops_kqfilter(file_t *fp, struct knote *kn) |
1213 | { | | 1213 | { |
1214 | return tap_dev_kqfilter((intptr_t)fp->f_data, kn); | | 1214 | return tap_dev_kqfilter((intptr_t)fp->f_data, kn); |
1215 | } | | 1215 | } |
1216 | | | 1216 | |
1217 | static int | | 1217 | static int |
1218 | tap_dev_kqfilter(int unit, struct knote *kn) | | 1218 | tap_dev_kqfilter(int unit, struct knote *kn) |
1219 | { | | 1219 | { |
1220 | struct tap_softc *sc = | | 1220 | struct tap_softc *sc = |
1221 | device_lookup_private(&tap_cd, unit); | | 1221 | device_lookup_private(&tap_cd, unit); |
1222 | | | 1222 | |
1223 | if (sc == NULL) | | 1223 | if (sc == NULL) |
1224 | return (ENXIO); | | 1224 | return (ENXIO); |
1225 | | | 1225 | |
1226 | KERNEL_LOCK(1, NULL); | | 1226 | KERNEL_LOCK(1, NULL); |
1227 | switch(kn->kn_filter) { | | 1227 | switch(kn->kn_filter) { |
1228 | case EVFILT_READ: | | 1228 | case EVFILT_READ: |
1229 | kn->kn_fop = &tap_read_filterops; | | 1229 | kn->kn_fop = &tap_read_filterops; |
1230 | break; | | 1230 | break; |
1231 | case EVFILT_WRITE: | | 1231 | case EVFILT_WRITE: |
1232 | kn->kn_fop = &tap_seltrue_filterops; | | 1232 | kn->kn_fop = &tap_seltrue_filterops; |
1233 | break; | | 1233 | break; |
1234 | default: | | 1234 | default: |
1235 | KERNEL_UNLOCK_ONE(NULL); | | 1235 | KERNEL_UNLOCK_ONE(NULL); |
1236 | return (EINVAL); | | 1236 | return (EINVAL); |
1237 | } | | 1237 | } |
1238 | | | 1238 | |
1239 | kn->kn_hook = sc; | | 1239 | kn->kn_hook = sc; |
1240 | simple_lock(&sc->sc_kqlock); | | 1240 | simple_lock(&sc->sc_kqlock); |
1241 | SLIST_INSERT_HEAD(&sc->sc_rsel.sel_klist, kn, kn_selnext); | | 1241 | SLIST_INSERT_HEAD(&sc->sc_rsel.sel_klist, kn, kn_selnext); |
1242 | simple_unlock(&sc->sc_kqlock); | | 1242 | simple_unlock(&sc->sc_kqlock); |
1243 | KERNEL_UNLOCK_ONE(NULL); | | 1243 | KERNEL_UNLOCK_ONE(NULL); |
1244 | return (0); | | 1244 | return (0); |
1245 | } | | 1245 | } |
1246 | | | 1246 | |
1247 | static void | | 1247 | static void |
1248 | tap_kqdetach(struct knote *kn) | | 1248 | tap_kqdetach(struct knote *kn) |
1249 | { | | 1249 | { |
1250 | struct tap_softc *sc = (struct tap_softc *)kn->kn_hook; | | 1250 | struct tap_softc *sc = (struct tap_softc *)kn->kn_hook; |
1251 | | | 1251 | |
1252 | KERNEL_LOCK(1, NULL); | | 1252 | KERNEL_LOCK(1, NULL); |
1253 | simple_lock(&sc->sc_kqlock); | | 1253 | simple_lock(&sc->sc_kqlock); |
1254 | SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext); | | 1254 | SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext); |
1255 | simple_unlock(&sc->sc_kqlock); | | 1255 | simple_unlock(&sc->sc_kqlock); |
1256 | KERNEL_UNLOCK_ONE(NULL); | | 1256 | KERNEL_UNLOCK_ONE(NULL); |
1257 | } | | 1257 | } |
1258 | | | 1258 | |
1259 | static int | | 1259 | static int |
1260 | tap_kqread(struct knote *kn, long hint) | | 1260 | tap_kqread(struct knote *kn, long hint) |
1261 | { | | 1261 | { |
1262 | struct tap_softc *sc = (struct tap_softc *)kn->kn_hook; | | 1262 | struct tap_softc *sc = (struct tap_softc *)kn->kn_hook; |
1263 | struct ifnet *ifp = &sc->sc_ec.ec_if; | | 1263 | struct ifnet *ifp = &sc->sc_ec.ec_if; |
1264 | struct mbuf *m; | | 1264 | struct mbuf *m; |
1265 | int s, rv; | | 1265 | int s, rv; |
1266 | | | 1266 | |
1267 | KERNEL_LOCK(1, NULL); | | 1267 | KERNEL_LOCK(1, NULL); |
1268 | s = splnet(); | | 1268 | s = splnet(); |
1269 | IFQ_POLL(&ifp->if_snd, m); | | 1269 | IFQ_POLL(&ifp->if_snd, m); |
1270 | | | 1270 | |
1271 | if (m == NULL) | | 1271 | if (m == NULL) |
1272 | kn->kn_data = 0; | | 1272 | kn->kn_data = 0; |
1273 | else | | 1273 | else |
1274 | kn->kn_data = m->m_pkthdr.len; | | 1274 | kn->kn_data = m->m_pkthdr.len; |
1275 | splx(s); | | 1275 | splx(s); |
1276 | rv = (kn->kn_data != 0 ? 1 : 0); | | 1276 | rv = (kn->kn_data != 0 ? 1 : 0); |
1277 | KERNEL_UNLOCK_ONE(NULL); | | 1277 | KERNEL_UNLOCK_ONE(NULL); |
1278 | return rv; | | 1278 | return rv; |
1279 | } | | 1279 | } |
1280 | | | 1280 | |
1281 | #if defined(COMPAT_40) || defined(MODULAR) | | 1281 | #if defined(COMPAT_40) || defined(MODULAR) |
1282 | /* | | 1282 | /* |
1283 | * sysctl management routines | | 1283 | * sysctl management routines |
1284 | * You can set the address of an interface through: | | 1284 | * You can set the address of an interface through: |
1285 | * net.link.tap.tap<number> | | 1285 | * net.link.tap.tap<number> |
1286 | * | | 1286 | * |
1287 | * Note the consistent use of tap_log in order to use | | 1287 | * Note the consistent use of tap_log in order to use |
1288 | * sysctl_teardown at unload time. | | 1288 | * sysctl_teardown at unload time. |
1289 | * | | 1289 | * |
1290 | * In the kernel you will find a lot of SYSCTL_SETUP blocks. Those | | 1290 | * In the kernel you will find a lot of SYSCTL_SETUP blocks. Those |
1291 | * blocks register a function in a special section of the kernel | | 1291 | * blocks register a function in a special section of the kernel |
1292 | * (called a link set) which is used at init_sysctl() time to cycle | | 1292 | * (called a link set) which is used at init_sysctl() time to cycle |
1293 | * through all those functions to create the kernel's sysctl tree. | | 1293 | * through all those functions to create the kernel's sysctl tree. |
1294 | * | | 1294 | * |
1295 | * It is not possible to use link sets in a module, so the | | 1295 | * It is not possible to use link sets in a module, so the |
1296 | * easiest is to simply call our own setup routine at load time. | | 1296 | * easiest is to simply call our own setup routine at load time. |
1297 | * | | 1297 | * |
1298 | * In the SYSCTL_SETUP blocks you find in the kernel, nodes have the | | 1298 | * In the SYSCTL_SETUP blocks you find in the kernel, nodes have the |
1299 | * CTLFLAG_PERMANENT flag, meaning they cannot be removed. Once the | | 1299 | * CTLFLAG_PERMANENT flag, meaning they cannot be removed. Once the |
1300 | * whole kernel sysctl tree is built, it is not possible to add any | | 1300 | * whole kernel sysctl tree is built, it is not possible to add any |
1301 | * permanent node. | | 1301 | * permanent node. |
1302 | * | | 1302 | * |
1303 | * It should be noted that we're not saving the sysctlnode pointer | | 1303 | * It should be noted that we're not saving the sysctlnode pointer |
1304 | * we are returned when creating the "tap" node. That structure | | 1304 | * we are returned when creating the "tap" node. That structure |
1305 | * cannot be trusted once out of the calling function, as it might | | 1305 | * cannot be trusted once out of the calling function, as it might |
1306 | * get reused. So we just save the MIB number, and always give the | | 1306 | * get reused. So we just save the MIB number, and always give the |
1307 | * full path starting from the root for later calls to sysctl_createv | | 1307 | * full path starting from the root for later calls to sysctl_createv |
1308 | * and sysctl_destroyv. | | 1308 | * and sysctl_destroyv. |
1309 | */ | | 1309 | */ |
1310 | SYSCTL_SETUP(sysctl_tap_setup, "sysctl net.link.tap subtree setup") | | 1310 | SYSCTL_SETUP(sysctl_tap_setup, "sysctl net.link.tap subtree setup") |
1311 | { | | 1311 | { |
1312 | const struct sysctlnode *node; | | 1312 | const struct sysctlnode *node; |
1313 | int error = 0; | | 1313 | int error = 0; |
1314 | | | 1314 | |
1315 | if ((error = sysctl_createv(clog, 0, NULL, NULL, | | 1315 | if ((error = sysctl_createv(clog, 0, NULL, NULL, |
1316 | CTLFLAG_PERMANENT, | | 1316 | CTLFLAG_PERMANENT, |
1317 | CTLTYPE_NODE, "net", NULL, | | 1317 | CTLTYPE_NODE, "net", NULL, |
1318 | NULL, 0, NULL, 0, | | 1318 | NULL, 0, NULL, 0, |
1319 | CTL_NET, CTL_EOL)) != 0) | | 1319 | CTL_NET, CTL_EOL)) != 0) |
1320 | return; | | 1320 | return; |
1321 | | | 1321 | |
1322 | if ((error = sysctl_createv(clog, 0, NULL, NULL, | | 1322 | if ((error = sysctl_createv(clog, 0, NULL, NULL, |
1323 | CTLFLAG_PERMANENT, | | 1323 | CTLFLAG_PERMANENT, |
1324 | CTLTYPE_NODE, "link", NULL, | | 1324 | CTLTYPE_NODE, "link", NULL, |
1325 | NULL, 0, NULL, 0, | | 1325 | NULL, 0, NULL, 0, |
1326 | CTL_NET, AF_LINK, CTL_EOL)) != 0) | | 1326 | CTL_NET, AF_LINK, CTL_EOL)) != 0) |
1327 | return; | | 1327 | return; |
1328 | | | 1328 | |
1329 | /* | | 1329 | /* |
1330 | * The first four parameters of sysctl_createv are for management. | | 1330 | * The first four parameters of sysctl_createv are for management. |
1331 | * | | 1331 | * |
1332 | * The four that follows, here starting with a '0' for the flags, | | 1332 | * The four that follows, here starting with a '0' for the flags, |
1333 | * describe the node. | | 1333 | * describe the node. |
1334 | * | | 1334 | * |
1335 | * The next series of four set its value, through various possible | | 1335 | * The next series of four set its value, through various possible |
1336 | * means. | | 1336 | * means. |
1337 | * | | 1337 | * |
1338 | * Last but not least, the path to the node is described. That path | | 1338 | * Last but not least, the path to the node is described. That path |
1339 | * is relative to the given root (third argument). Here we're | | 1339 | * is relative to the given root (third argument). Here we're |
1340 | * starting from the root. | | 1340 | * starting from the root. |
1341 | */ | | 1341 | */ |
1342 | if ((error = sysctl_createv(clog, 0, NULL, &node, | | 1342 | if ((error = sysctl_createv(clog, 0, NULL, &node, |
1343 | CTLFLAG_PERMANENT, | | 1343 | CTLFLAG_PERMANENT, |
1344 | CTLTYPE_NODE, "tap", NULL, | | 1344 | CTLTYPE_NODE, "tap", NULL, |
1345 | NULL, 0, NULL, 0, | | 1345 | NULL, 0, NULL, 0, |
1346 | CTL_NET, AF_LINK, CTL_CREATE, CTL_EOL)) != 0) | | 1346 | CTL_NET, AF_LINK, CTL_CREATE, CTL_EOL)) != 0) |
1347 | return; | | 1347 | return; |
1348 | tap_node = node->sysctl_num; | | 1348 | tap_node = node->sysctl_num; |
1349 | } | | 1349 | } |
1350 | | | 1350 | |
1351 | /* | | 1351 | /* |
1352 | * The helper functions make Andrew Brown's interface really | | 1352 | * The helper functions make Andrew Brown's interface really |
1353 | * shine. It makes possible to create value on the fly whether | | 1353 | * shine. It makes possible to create value on the fly whether |
1354 | * the sysctl value is read or written. | | 1354 | * the sysctl value is read or written. |
1355 | * | | 1355 | * |
1356 | * As shown as an example in the man page, the first step is to | | 1356 | * As shown as an example in the man page, the first step is to |
1357 | * create a copy of the node to have sysctl_lookup work on it. | | 1357 | * create a copy of the node to have sysctl_lookup work on it. |
1358 | * | | 1358 | * |
1359 | * Here, we have more work to do than just a copy, since we have | | 1359 | * Here, we have more work to do than just a copy, since we have |
1360 | * to create the string. The first step is to collect the actual | | 1360 | * to create the string. The first step is to collect the actual |
1361 | * value of the node, which is a convenient pointer to the softc | | 1361 | * value of the node, which is a convenient pointer to the softc |
1362 | * of the interface. From there we create the string and use it | | 1362 | * of the interface. From there we create the string and use it |
1363 | * as the value, but only for the *copy* of the node. | | 1363 | * as the value, but only for the *copy* of the node. |
1364 | * | | 1364 | * |
1365 | * Then we let sysctl_lookup do the magic, which consists in | | 1365 | * Then we let sysctl_lookup do the magic, which consists in |
1366 | * setting oldp and newp as required by the operation. When the | | 1366 | * setting oldp and newp as required by the operation. When the |
1367 | * value is read, that means that the string will be copied to | | 1367 | * value is read, that means that the string will be copied to |
1368 | * the user, and when it is written, the new value will be copied | | 1368 | * the user, and when it is written, the new value will be copied |
1369 | * over in the addr array. | | 1369 | * over in the addr array. |
1370 | * | | 1370 | * |
1371 | * If newp is NULL, the user was reading the value, so we don't | | 1371 | * If newp is NULL, the user was reading the value, so we don't |
1372 | * have anything else to do. If a new value was written, we | | 1372 | * have anything else to do. If a new value was written, we |
1373 | * have to check it. | | 1373 | * have to check it. |
1374 | * | | 1374 | * |
1375 | * If it is incorrect, we can return an error and leave 'node' as | | 1375 | * If it is incorrect, we can return an error and leave 'node' as |
1376 | * it is: since it is a copy of the actual node, the change will | | 1376 | * it is: since it is a copy of the actual node, the change will |
1377 | * be forgotten. | | 1377 | * be forgotten. |
1378 | * | | 1378 | * |
1379 | * Upon a correct input, we commit the change to the ifnet | | 1379 | * Upon a correct input, we commit the change to the ifnet |
1380 | * structure of our interface. | | 1380 | * structure of our interface. |
1381 | */ | | 1381 | */ |
1382 | static int | | 1382 | static int |
1383 | tap_sysctl_handler(SYSCTLFN_ARGS) | | 1383 | tap_sysctl_handler(SYSCTLFN_ARGS) |
1384 | { | | 1384 | { |
1385 | struct sysctlnode node; | | 1385 | struct sysctlnode node; |
1386 | struct tap_softc *sc; | | 1386 | struct tap_softc *sc; |
1387 | struct ifnet *ifp; | | 1387 | struct ifnet *ifp; |
1388 | int error; | | 1388 | int error; |
1389 | size_t len; | | 1389 | size_t len; |
1390 | char addr[3 * ETHER_ADDR_LEN]; | | 1390 | char addr[3 * ETHER_ADDR_LEN]; |
1391 | uint8_t enaddr[ETHER_ADDR_LEN]; | | 1391 | uint8_t enaddr[ETHER_ADDR_LEN]; |
1392 | | | 1392 | |
1393 | node = *rnode; | | 1393 | node = *rnode; |
1394 | sc = node.sysctl_data; | | 1394 | sc = node.sysctl_data; |
1395 | ifp = &sc->sc_ec.ec_if; | | 1395 | ifp = &sc->sc_ec.ec_if; |
1396 | (void)ether_snprintf(addr, sizeof(addr), CLLADDR(ifp->if_sadl)); | | 1396 | (void)ether_snprintf(addr, sizeof(addr), CLLADDR(ifp->if_sadl)); |
1397 | node.sysctl_data = addr; | | 1397 | node.sysctl_data = addr; |
1398 | error = sysctl_lookup(SYSCTLFN_CALL(&node)); | | 1398 | error = sysctl_lookup(SYSCTLFN_CALL(&node)); |
1399 | if (error || newp == NULL) | | 1399 | if (error || newp == NULL) |
1400 | return (error); | | 1400 | return (error); |
1401 | | | 1401 | |
1402 | len = strlen(addr); | | 1402 | len = strlen(addr); |
1403 | if (len < 11 || len > 17) | | 1403 | if (len < 11 || len > 17) |
1404 | return (EINVAL); | | 1404 | return (EINVAL); |
1405 | | | 1405 | |
1406 | /* Commit change */ | | 1406 | /* Commit change */ |
1407 | if (ether_nonstatic_aton(enaddr, addr) != 0) | | 1407 | if (ether_nonstatic_aton(enaddr, addr) != 0) |
1408 | return (EINVAL); | | 1408 | return (EINVAL); |
1409 | if_set_sadl(ifp, enaddr, ETHER_ADDR_LEN, false); | | 1409 | if_set_sadl(ifp, enaddr, ETHER_ADDR_LEN, false); |
1410 | return (error); | | 1410 | return (error); |
1411 | } | | 1411 | } |
1412 | #endif | | 1412 | #endif |