Wed Jul 14 06:23:06 2021 UTC ()
Make an mbuf writable before un-tagging


(yamaguchi)
diff -r1.157 -r1.158 src/sys/net/if_vlan.c

cvs diff -r1.157 -r1.158 src/sys/net/if_vlan.c (switch to unified diff)

--- src/sys/net/if_vlan.c 2021/07/06 02:39:46 1.157
+++ src/sys/net/if_vlan.c 2021/07/14 06:23:06 1.158
@@ -1,1729 +1,1737 @@ @@ -1,1729 +1,1737 @@
1/* $NetBSD: if_vlan.c,v 1.157 2021/07/06 02:39:46 yamaguchi Exp $ */ 1/* $NetBSD: if_vlan.c,v 1.158 2021/07/14 06:23:06 yamaguchi Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc. 4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, and by Jason R. Thorpe of Zembu Labs, Inc. 8 * by Andrew Doran, and by Jason R. Thorpe of Zembu Labs, Inc.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Copyright 1998 Massachusetts Institute of Technology 33 * Copyright 1998 Massachusetts Institute of Technology
34 * 34 *
35 * Permission to use, copy, modify, and distribute this software and 35 * Permission to use, copy, modify, and distribute this software and
36 * its documentation for any purpose and without fee is hereby 36 * its documentation for any purpose and without fee is hereby
37 * granted, provided that both the above copyright notice and this 37 * granted, provided that both the above copyright notice and this
38 * permission notice appear in all copies, that both the above 38 * permission notice appear in all copies, that both the above
39 * copyright notice and this permission notice appear in all 39 * copyright notice and this permission notice appear in all
40 * supporting documentation, and that the name of M.I.T. not be used 40 * supporting documentation, and that the name of M.I.T. not be used
41 * in advertising or publicity pertaining to distribution of the 41 * in advertising or publicity pertaining to distribution of the
42 * software without specific, written prior permission. M.I.T. makes 42 * software without specific, written prior permission. M.I.T. makes
43 * no representations about the suitability of this software for any 43 * no representations about the suitability of this software for any
44 * purpose. It is provided "as is" without express or implied 44 * purpose. It is provided "as is" without express or implied
45 * warranty. 45 * warranty.
46 * 46 *
47 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 47 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
48 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 48 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
49 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 49 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
50 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 50 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
51 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 51 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
52 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 52 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
53 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 53 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
54 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 54 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
55 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 55 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
56 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 56 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
57 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 57 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE. 58 * SUCH DAMAGE.
59 * 59 *
60 * from FreeBSD: if_vlan.c,v 1.16 2000/03/26 15:21:40 charnier Exp 60 * from FreeBSD: if_vlan.c,v 1.16 2000/03/26 15:21:40 charnier Exp
61 * via OpenBSD: if_vlan.c,v 1.4 2000/05/15 19:15:00 chris Exp 61 * via OpenBSD: if_vlan.c,v 1.4 2000/05/15 19:15:00 chris Exp
62 */ 62 */
63 63
64/* 64/*
65 * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs. Might be 65 * if_vlan.c - pseudo-device driver for IEEE 802.1Q virtual LANs. Might be
66 * extended some day to also handle IEEE 802.1P priority tagging. This is 66 * extended some day to also handle IEEE 802.1P priority tagging. This is
67 * sort of sneaky in the implementation, since we need to pretend to be 67 * sort of sneaky in the implementation, since we need to pretend to be
68 * enough of an Ethernet implementation to make ARP work. The way we do 68 * enough of an Ethernet implementation to make ARP work. The way we do
69 * this is by telling everyone that we are an Ethernet interface, and then 69 * this is by telling everyone that we are an Ethernet interface, and then
70 * catch the packets that ether_output() left on our output queue when it 70 * catch the packets that ether_output() left on our output queue when it
71 * calls if_start(), rewrite them for use by the real outgoing interface, 71 * calls if_start(), rewrite them for use by the real outgoing interface,
72 * and ask it to send them. 72 * and ask it to send them.
73 * 73 *
74 * TODO: 74 * TODO:
75 * 75 *
76 * - Need some way to notify vlan interfaces when the parent 76 * - Need some way to notify vlan interfaces when the parent
77 * interface changes MTU. 77 * interface changes MTU.
78 */ 78 */
79 79
80#include <sys/cdefs.h> 80#include <sys/cdefs.h>
81__KERNEL_RCSID(0, "$NetBSD: if_vlan.c,v 1.157 2021/07/06 02:39:46 yamaguchi Exp $"); 81__KERNEL_RCSID(0, "$NetBSD: if_vlan.c,v 1.158 2021/07/14 06:23:06 yamaguchi Exp $");
82 82
83#ifdef _KERNEL_OPT 83#ifdef _KERNEL_OPT
84#include "opt_inet.h" 84#include "opt_inet.h"
85#include "opt_net_mpsafe.h" 85#include "opt_net_mpsafe.h"
86#endif 86#endif
87 87
88#include <sys/param.h> 88#include <sys/param.h>
89#include <sys/systm.h> 89#include <sys/systm.h>
90#include <sys/kernel.h> 90#include <sys/kernel.h>
91#include <sys/mbuf.h> 91#include <sys/mbuf.h>
92#include <sys/queue.h> 92#include <sys/queue.h>
93#include <sys/socket.h> 93#include <sys/socket.h>
94#include <sys/sockio.h> 94#include <sys/sockio.h>
95#include <sys/systm.h> 95#include <sys/systm.h>
96#include <sys/proc.h> 96#include <sys/proc.h>
97#include <sys/kauth.h> 97#include <sys/kauth.h>
98#include <sys/mutex.h> 98#include <sys/mutex.h>
99#include <sys/kmem.h> 99#include <sys/kmem.h>
100#include <sys/cpu.h> 100#include <sys/cpu.h>
101#include <sys/pserialize.h> 101#include <sys/pserialize.h>
102#include <sys/psref.h> 102#include <sys/psref.h>
103#include <sys/pslist.h> 103#include <sys/pslist.h>
104#include <sys/atomic.h> 104#include <sys/atomic.h>
105#include <sys/device.h> 105#include <sys/device.h>
106#include <sys/module.h> 106#include <sys/module.h>
107 107
108#include <net/bpf.h> 108#include <net/bpf.h>
109#include <net/if.h> 109#include <net/if.h>
110#include <net/if_dl.h> 110#include <net/if_dl.h>
111#include <net/if_types.h> 111#include <net/if_types.h>
112#include <net/if_ether.h> 112#include <net/if_ether.h>
113#include <net/if_vlanvar.h> 113#include <net/if_vlanvar.h>
114 114
115#ifdef INET 115#ifdef INET
116#include <netinet/in.h> 116#include <netinet/in.h>
117#include <netinet/if_inarp.h> 117#include <netinet/if_inarp.h>
118#endif 118#endif
119#ifdef INET6 119#ifdef INET6
120#include <netinet6/in6_ifattach.h> 120#include <netinet6/in6_ifattach.h>
121#include <netinet6/in6_var.h> 121#include <netinet6/in6_var.h>
122#include <netinet6/nd6.h> 122#include <netinet6/nd6.h>
123#endif 123#endif
124 124
125#include "ioconf.h" 125#include "ioconf.h"
126 126
127struct vlan_mc_entry { 127struct vlan_mc_entry {
128 LIST_ENTRY(vlan_mc_entry) mc_entries; 128 LIST_ENTRY(vlan_mc_entry) mc_entries;
129 /* 129 /*
130 * A key to identify this entry. The mc_addr below can't be 130 * A key to identify this entry. The mc_addr below can't be
131 * used since multiple sockaddr may mapped into the same 131 * used since multiple sockaddr may mapped into the same
132 * ether_multi (e.g., AF_UNSPEC). 132 * ether_multi (e.g., AF_UNSPEC).
133 */ 133 */
134 struct ether_multi *mc_enm; 134 struct ether_multi *mc_enm;
135 struct sockaddr_storage mc_addr; 135 struct sockaddr_storage mc_addr;
136}; 136};
137 137
138struct ifvlan_linkmib { 138struct ifvlan_linkmib {
139 struct ifvlan *ifvm_ifvlan; 139 struct ifvlan *ifvm_ifvlan;
140 const struct vlan_multisw *ifvm_msw; 140 const struct vlan_multisw *ifvm_msw;
141 int ifvm_encaplen; /* encapsulation length */ 141 int ifvm_encaplen; /* encapsulation length */
142 int ifvm_mtufudge; /* MTU fudged by this much */ 142 int ifvm_mtufudge; /* MTU fudged by this much */
143 int ifvm_mintu; /* min transmission unit */ 143 int ifvm_mintu; /* min transmission unit */
144 uint16_t ifvm_proto; /* encapsulation ethertype */ 144 uint16_t ifvm_proto; /* encapsulation ethertype */
145 uint16_t ifvm_tag; /* tag to apply on packets */ 145 uint16_t ifvm_tag; /* tag to apply on packets */
146 struct ifnet *ifvm_p; /* parent interface of this vlan */ 146 struct ifnet *ifvm_p; /* parent interface of this vlan */
147 147
148 struct psref_target ifvm_psref; 148 struct psref_target ifvm_psref;
149}; 149};
150 150
151struct ifvlan { 151struct ifvlan {
152 struct ethercom ifv_ec; 152 struct ethercom ifv_ec;
153 struct ifvlan_linkmib *ifv_mib; /* 153 struct ifvlan_linkmib *ifv_mib; /*
154 * reader must use vlan_getref_linkmib() 154 * reader must use vlan_getref_linkmib()
155 * instead of direct dereference 155 * instead of direct dereference
156 */ 156 */
157 kmutex_t ifv_lock; /* writer lock for ifv_mib */ 157 kmutex_t ifv_lock; /* writer lock for ifv_mib */
158 pserialize_t ifv_psz; 158 pserialize_t ifv_psz;
159 159
160 LIST_HEAD(__vlan_mchead, vlan_mc_entry) ifv_mc_listhead; 160 LIST_HEAD(__vlan_mchead, vlan_mc_entry) ifv_mc_listhead;
161 LIST_ENTRY(ifvlan) ifv_list; 161 LIST_ENTRY(ifvlan) ifv_list;
162 struct pslist_entry ifv_hash; 162 struct pslist_entry ifv_hash;
163 int ifv_flags; 163 int ifv_flags;
164}; 164};
165 165
166#define IFVF_PROMISC 0x01 /* promiscuous mode enabled */ 166#define IFVF_PROMISC 0x01 /* promiscuous mode enabled */
167 167
168#define ifv_if ifv_ec.ec_if 168#define ifv_if ifv_ec.ec_if
169 169
170#define ifv_msw ifv_mib.ifvm_msw 170#define ifv_msw ifv_mib.ifvm_msw
171#define ifv_encaplen ifv_mib.ifvm_encaplen 171#define ifv_encaplen ifv_mib.ifvm_encaplen
172#define ifv_mtufudge ifv_mib.ifvm_mtufudge 172#define ifv_mtufudge ifv_mib.ifvm_mtufudge
173#define ifv_mintu ifv_mib.ifvm_mintu 173#define ifv_mintu ifv_mib.ifvm_mintu
174#define ifv_tag ifv_mib.ifvm_tag 174#define ifv_tag ifv_mib.ifvm_tag
175 175
176struct vlan_multisw { 176struct vlan_multisw {
177 int (*vmsw_addmulti)(struct ifvlan *, struct ifreq *); 177 int (*vmsw_addmulti)(struct ifvlan *, struct ifreq *);
178 int (*vmsw_delmulti)(struct ifvlan *, struct ifreq *); 178 int (*vmsw_delmulti)(struct ifvlan *, struct ifreq *);
179 void (*vmsw_purgemulti)(struct ifvlan *); 179 void (*vmsw_purgemulti)(struct ifvlan *);
180}; 180};
181 181
182static int vlan_ether_addmulti(struct ifvlan *, struct ifreq *); 182static int vlan_ether_addmulti(struct ifvlan *, struct ifreq *);
183static int vlan_ether_delmulti(struct ifvlan *, struct ifreq *); 183static int vlan_ether_delmulti(struct ifvlan *, struct ifreq *);
184static void vlan_ether_purgemulti(struct ifvlan *); 184static void vlan_ether_purgemulti(struct ifvlan *);
185 185
186const struct vlan_multisw vlan_ether_multisw = { 186const struct vlan_multisw vlan_ether_multisw = {
187 .vmsw_addmulti = vlan_ether_addmulti, 187 .vmsw_addmulti = vlan_ether_addmulti,
188 .vmsw_delmulti = vlan_ether_delmulti, 188 .vmsw_delmulti = vlan_ether_delmulti,
189 .vmsw_purgemulti = vlan_ether_purgemulti, 189 .vmsw_purgemulti = vlan_ether_purgemulti,
190}; 190};
191 191
192static int vlan_clone_create(struct if_clone *, int); 192static int vlan_clone_create(struct if_clone *, int);
193static int vlan_clone_destroy(struct ifnet *); 193static int vlan_clone_destroy(struct ifnet *);
194static int vlan_config(struct ifvlan *, struct ifnet *, uint16_t); 194static int vlan_config(struct ifvlan *, struct ifnet *, uint16_t);
195static int vlan_ioctl(struct ifnet *, u_long, void *); 195static int vlan_ioctl(struct ifnet *, u_long, void *);
196static void vlan_start(struct ifnet *); 196static void vlan_start(struct ifnet *);
197static int vlan_transmit(struct ifnet *, struct mbuf *); 197static int vlan_transmit(struct ifnet *, struct mbuf *);
198static void vlan_unconfig(struct ifnet *); 198static void vlan_unconfig(struct ifnet *);
199static int vlan_unconfig_locked(struct ifvlan *, struct ifvlan_linkmib *); 199static int vlan_unconfig_locked(struct ifvlan *, struct ifvlan_linkmib *);
200static void vlan_hash_init(void); 200static void vlan_hash_init(void);
201static int vlan_hash_fini(void); 201static int vlan_hash_fini(void);
202static int vlan_tag_hash(uint16_t, u_long); 202static int vlan_tag_hash(uint16_t, u_long);
203static struct ifvlan_linkmib* vlan_getref_linkmib(struct ifvlan *, 203static struct ifvlan_linkmib* vlan_getref_linkmib(struct ifvlan *,
204 struct psref *); 204 struct psref *);
205static void vlan_putref_linkmib(struct ifvlan_linkmib *, struct psref *); 205static void vlan_putref_linkmib(struct ifvlan_linkmib *, struct psref *);
206static void vlan_linkmib_update(struct ifvlan *, struct ifvlan_linkmib *); 206static void vlan_linkmib_update(struct ifvlan *, struct ifvlan_linkmib *);
207static struct ifvlan_linkmib* vlan_lookup_tag_psref(struct ifnet *, 207static struct ifvlan_linkmib* vlan_lookup_tag_psref(struct ifnet *,
208 uint16_t, struct psref *); 208 uint16_t, struct psref *);
209 209
210static struct { 210static struct {
211 kmutex_t lock; 211 kmutex_t lock;
212 LIST_HEAD(vlan_ifvlist, ifvlan) list; 212 LIST_HEAD(vlan_ifvlist, ifvlan) list;
213} ifv_list __cacheline_aligned; 213} ifv_list __cacheline_aligned;
214 214
215 215
216#if !defined(VLAN_TAG_HASH_SIZE) 216#if !defined(VLAN_TAG_HASH_SIZE)
217#define VLAN_TAG_HASH_SIZE 32 217#define VLAN_TAG_HASH_SIZE 32
218#endif 218#endif
219static struct { 219static struct {
220 kmutex_t lock; 220 kmutex_t lock;
221 struct pslist_head *lists; 221 struct pslist_head *lists;
222 u_long mask; 222 u_long mask;
223} ifv_hash __cacheline_aligned = { 223} ifv_hash __cacheline_aligned = {
224 .lists = NULL, 224 .lists = NULL,
225 .mask = 0, 225 .mask = 0,
226}; 226};
227 227
228pserialize_t vlan_psz __read_mostly; 228pserialize_t vlan_psz __read_mostly;
229static struct psref_class *ifvm_psref_class __read_mostly; 229static struct psref_class *ifvm_psref_class __read_mostly;
230 230
231struct if_clone vlan_cloner = 231struct if_clone vlan_cloner =
232 IF_CLONE_INITIALIZER("vlan", vlan_clone_create, vlan_clone_destroy); 232 IF_CLONE_INITIALIZER("vlan", vlan_clone_create, vlan_clone_destroy);
233 233
234/* Used to pad ethernet frames with < ETHER_MIN_LEN bytes */ 234/* Used to pad ethernet frames with < ETHER_MIN_LEN bytes */
235static char vlan_zero_pad_buff[ETHER_MIN_LEN]; 235static char vlan_zero_pad_buff[ETHER_MIN_LEN];
236 236
237static inline int 237static inline int
238vlan_safe_ifpromisc(struct ifnet *ifp, int pswitch) 238vlan_safe_ifpromisc(struct ifnet *ifp, int pswitch)
239{ 239{
240 int e; 240 int e;
241 241
242 KERNEL_LOCK_UNLESS_NET_MPSAFE(); 242 KERNEL_LOCK_UNLESS_NET_MPSAFE();
243 e = ifpromisc(ifp, pswitch); 243 e = ifpromisc(ifp, pswitch);
244 KERNEL_UNLOCK_UNLESS_NET_MPSAFE(); 244 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
245 245
246 return e; 246 return e;
247} 247}
248 248
249static inline int 249static inline int
250vlan_safe_ifpromisc_locked(struct ifnet *ifp, int pswitch) 250vlan_safe_ifpromisc_locked(struct ifnet *ifp, int pswitch)
251{ 251{
252 int e; 252 int e;
253 253
254 KERNEL_LOCK_UNLESS_NET_MPSAFE(); 254 KERNEL_LOCK_UNLESS_NET_MPSAFE();
255 e = ifpromisc_locked(ifp, pswitch); 255 e = ifpromisc_locked(ifp, pswitch);
256 KERNEL_UNLOCK_UNLESS_NET_MPSAFE(); 256 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
257 257
258 return e; 258 return e;
259} 259}
260 260
261void 261void
262vlanattach(int n) 262vlanattach(int n)
263{ 263{
264 264
265 /* 265 /*
266 * Nothing to do here, initialization is handled by the 266 * Nothing to do here, initialization is handled by the
267 * module initialization code in vlaninit() below. 267 * module initialization code in vlaninit() below.
268 */ 268 */
269} 269}
270 270
271static void 271static void
272vlaninit(void) 272vlaninit(void)
273{ 273{
274 mutex_init(&ifv_list.lock, MUTEX_DEFAULT, IPL_NONE); 274 mutex_init(&ifv_list.lock, MUTEX_DEFAULT, IPL_NONE);
275 LIST_INIT(&ifv_list.list); 275 LIST_INIT(&ifv_list.list);
276 276
277 mutex_init(&ifv_hash.lock, MUTEX_DEFAULT, IPL_NONE); 277 mutex_init(&ifv_hash.lock, MUTEX_DEFAULT, IPL_NONE);
278 vlan_psz = pserialize_create(); 278 vlan_psz = pserialize_create();
279 ifvm_psref_class = psref_class_create("vlanlinkmib", IPL_SOFTNET); 279 ifvm_psref_class = psref_class_create("vlanlinkmib", IPL_SOFTNET);
280 if_clone_attach(&vlan_cloner); 280 if_clone_attach(&vlan_cloner);
281 281
282 vlan_hash_init(); 282 vlan_hash_init();
283 MODULE_HOOK_SET(if_vlan_vlan_input_hook, vlan_input); 283 MODULE_HOOK_SET(if_vlan_vlan_input_hook, vlan_input);
284} 284}
285 285
286static int 286static int
287vlandetach(void) 287vlandetach(void)
288{ 288{
289 bool is_empty; 289 bool is_empty;
290 int error; 290 int error;
291 291
292 mutex_enter(&ifv_list.lock); 292 mutex_enter(&ifv_list.lock);
293 is_empty = LIST_EMPTY(&ifv_list.list); 293 is_empty = LIST_EMPTY(&ifv_list.list);
294 mutex_exit(&ifv_list.lock); 294 mutex_exit(&ifv_list.lock);
295 295
296 if (!is_empty) 296 if (!is_empty)
297 return EBUSY; 297 return EBUSY;
298 298
299 error = vlan_hash_fini(); 299 error = vlan_hash_fini();
300 if (error != 0) 300 if (error != 0)
301 return error; 301 return error;
302 302
303 if_clone_detach(&vlan_cloner); 303 if_clone_detach(&vlan_cloner);
304 psref_class_destroy(ifvm_psref_class); 304 psref_class_destroy(ifvm_psref_class);
305 pserialize_destroy(vlan_psz); 305 pserialize_destroy(vlan_psz);
306 mutex_destroy(&ifv_hash.lock); 306 mutex_destroy(&ifv_hash.lock);
307 mutex_destroy(&ifv_list.lock); 307 mutex_destroy(&ifv_list.lock);
308 308
309 MODULE_HOOK_UNSET(if_vlan_vlan_input_hook); 309 MODULE_HOOK_UNSET(if_vlan_vlan_input_hook);
310 return 0; 310 return 0;
311} 311}
312 312
313static void 313static void
314vlan_reset_linkname(struct ifnet *ifp) 314vlan_reset_linkname(struct ifnet *ifp)
315{ 315{
316 316
317 /* 317 /*
318 * We start out with a "802.1Q VLAN" type and zero-length 318 * We start out with a "802.1Q VLAN" type and zero-length
319 * addresses. When we attach to a parent interface, we 319 * addresses. When we attach to a parent interface, we
320 * inherit its type, address length, address, and data link 320 * inherit its type, address length, address, and data link
321 * type. 321 * type.
322 */ 322 */
323 323
324 ifp->if_type = IFT_L2VLAN; 324 ifp->if_type = IFT_L2VLAN;
325 ifp->if_addrlen = 0; 325 ifp->if_addrlen = 0;
326 ifp->if_dlt = DLT_NULL; 326 ifp->if_dlt = DLT_NULL;
327 if_alloc_sadl(ifp); 327 if_alloc_sadl(ifp);
328} 328}
329 329
330static int 330static int
331vlan_clone_create(struct if_clone *ifc, int unit) 331vlan_clone_create(struct if_clone *ifc, int unit)
332{ 332{
333 struct ifvlan *ifv; 333 struct ifvlan *ifv;
334 struct ifnet *ifp; 334 struct ifnet *ifp;
335 struct ifvlan_linkmib *mib; 335 struct ifvlan_linkmib *mib;
336 336
337 ifv = malloc(sizeof(struct ifvlan), M_DEVBUF, M_WAITOK | M_ZERO); 337 ifv = malloc(sizeof(struct ifvlan), M_DEVBUF, M_WAITOK | M_ZERO);
338 mib = kmem_zalloc(sizeof(struct ifvlan_linkmib), KM_SLEEP); 338 mib = kmem_zalloc(sizeof(struct ifvlan_linkmib), KM_SLEEP);
339 ifp = &ifv->ifv_if; 339 ifp = &ifv->ifv_if;
340 LIST_INIT(&ifv->ifv_mc_listhead); 340 LIST_INIT(&ifv->ifv_mc_listhead);
341 341
342 mib->ifvm_ifvlan = ifv; 342 mib->ifvm_ifvlan = ifv;
343 mib->ifvm_p = NULL; 343 mib->ifvm_p = NULL;
344 psref_target_init(&mib->ifvm_psref, ifvm_psref_class); 344 psref_target_init(&mib->ifvm_psref, ifvm_psref_class);
345 345
346 mutex_init(&ifv->ifv_lock, MUTEX_DEFAULT, IPL_NONE); 346 mutex_init(&ifv->ifv_lock, MUTEX_DEFAULT, IPL_NONE);
347 ifv->ifv_psz = pserialize_create(); 347 ifv->ifv_psz = pserialize_create();
348 ifv->ifv_mib = mib; 348 ifv->ifv_mib = mib;
349 349
350 mutex_enter(&ifv_list.lock); 350 mutex_enter(&ifv_list.lock);
351 LIST_INSERT_HEAD(&ifv_list.list, ifv, ifv_list); 351 LIST_INSERT_HEAD(&ifv_list.list, ifv, ifv_list);
352 mutex_exit(&ifv_list.lock); 352 mutex_exit(&ifv_list.lock);
353 353
354 if_initname(ifp, ifc->ifc_name, unit); 354 if_initname(ifp, ifc->ifc_name, unit);
355 ifp->if_softc = ifv; 355 ifp->if_softc = ifv;
356 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 356 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
357#ifdef NET_MPSAFE 357#ifdef NET_MPSAFE
358 ifp->if_extflags = IFEF_MPSAFE; 358 ifp->if_extflags = IFEF_MPSAFE;
359#endif 359#endif
360 ifp->if_start = vlan_start; 360 ifp->if_start = vlan_start;
361 ifp->if_transmit = vlan_transmit; 361 ifp->if_transmit = vlan_transmit;
362 ifp->if_ioctl = vlan_ioctl; 362 ifp->if_ioctl = vlan_ioctl;
363 IFQ_SET_READY(&ifp->if_snd); 363 IFQ_SET_READY(&ifp->if_snd);
364 if_initialize(ifp); 364 if_initialize(ifp);
365 /* 365 /*
366 * Set the link state to down. 366 * Set the link state to down.
367 * When the parent interface attaches we will use that link state. 367 * When the parent interface attaches we will use that link state.
368 * When the parent interface link state changes, so will ours. 368 * When the parent interface link state changes, so will ours.
369 * When the parent interface detaches, set the link state to down. 369 * When the parent interface detaches, set the link state to down.
370 */ 370 */
371 ifp->if_link_state = LINK_STATE_DOWN; 371 ifp->if_link_state = LINK_STATE_DOWN;
372 372
373 vlan_reset_linkname(ifp); 373 vlan_reset_linkname(ifp);
374 if_register(ifp); 374 if_register(ifp);
375 return 0; 375 return 0;
376} 376}
377 377
378static int 378static int
379vlan_clone_destroy(struct ifnet *ifp) 379vlan_clone_destroy(struct ifnet *ifp)
380{ 380{
381 struct ifvlan *ifv = ifp->if_softc; 381 struct ifvlan *ifv = ifp->if_softc;
382 382
383 mutex_enter(&ifv_list.lock); 383 mutex_enter(&ifv_list.lock);
384 LIST_REMOVE(ifv, ifv_list); 384 LIST_REMOVE(ifv, ifv_list);
385 mutex_exit(&ifv_list.lock); 385 mutex_exit(&ifv_list.lock);
386 386
387 IFNET_LOCK(ifp); 387 IFNET_LOCK(ifp);
388 vlan_unconfig(ifp); 388 vlan_unconfig(ifp);
389 IFNET_UNLOCK(ifp); 389 IFNET_UNLOCK(ifp);
390 if_detach(ifp); 390 if_detach(ifp);
391 391
392 psref_target_destroy(&ifv->ifv_mib->ifvm_psref, ifvm_psref_class); 392 psref_target_destroy(&ifv->ifv_mib->ifvm_psref, ifvm_psref_class);
393 kmem_free(ifv->ifv_mib, sizeof(struct ifvlan_linkmib)); 393 kmem_free(ifv->ifv_mib, sizeof(struct ifvlan_linkmib));
394 pserialize_destroy(ifv->ifv_psz); 394 pserialize_destroy(ifv->ifv_psz);
395 mutex_destroy(&ifv->ifv_lock); 395 mutex_destroy(&ifv->ifv_lock);
396 free(ifv, M_DEVBUF); 396 free(ifv, M_DEVBUF);
397 397
398 return 0; 398 return 0;
399} 399}
400 400
401/* 401/*
402 * Configure a VLAN interface. 402 * Configure a VLAN interface.
403 */ 403 */
404static int 404static int
405vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag) 405vlan_config(struct ifvlan *ifv, struct ifnet *p, uint16_t tag)
406{ 406{
407 struct ifnet *ifp = &ifv->ifv_if; 407 struct ifnet *ifp = &ifv->ifv_if;
408 struct ifvlan_linkmib *nmib = NULL; 408 struct ifvlan_linkmib *nmib = NULL;
409 struct ifvlan_linkmib *omib = NULL; 409 struct ifvlan_linkmib *omib = NULL;
410 struct ifvlan_linkmib *checkmib; 410 struct ifvlan_linkmib *checkmib;
411 struct psref_target *nmib_psref = NULL; 411 struct psref_target *nmib_psref = NULL;
412 const uint16_t vid = EVL_VLANOFTAG(tag); 412 const uint16_t vid = EVL_VLANOFTAG(tag);
413 int error = 0; 413 int error = 0;
414 int idx; 414 int idx;
415 bool omib_cleanup = false; 415 bool omib_cleanup = false;
416 struct psref psref; 416 struct psref psref;
417 417
418 /* VLAN ID 0 and 4095 are reserved in the spec */ 418 /* VLAN ID 0 and 4095 are reserved in the spec */
419 if ((vid == 0) || (vid == 0xfff)) 419 if ((vid == 0) || (vid == 0xfff))
420 return EINVAL; 420 return EINVAL;
421 421
422 nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP); 422 nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP);
423 mutex_enter(&ifv->ifv_lock); 423 mutex_enter(&ifv->ifv_lock);
424 omib = ifv->ifv_mib; 424 omib = ifv->ifv_mib;
425 425
426 if (omib->ifvm_p != NULL) { 426 if (omib->ifvm_p != NULL) {
427 error = EBUSY; 427 error = EBUSY;
428 goto done; 428 goto done;
429 } 429 }
430 430
431 /* Duplicate check */ 431 /* Duplicate check */
432 checkmib = vlan_lookup_tag_psref(p, vid, &psref); 432 checkmib = vlan_lookup_tag_psref(p, vid, &psref);
433 if (checkmib != NULL) { 433 if (checkmib != NULL) {
434 vlan_putref_linkmib(checkmib, &psref); 434 vlan_putref_linkmib(checkmib, &psref);
435 error = EEXIST; 435 error = EEXIST;
436 goto done; 436 goto done;
437 } 437 }
438 438
439 *nmib = *omib; 439 *nmib = *omib;
440 nmib_psref = &nmib->ifvm_psref; 440 nmib_psref = &nmib->ifvm_psref;
441 441
442 psref_target_init(nmib_psref, ifvm_psref_class); 442 psref_target_init(nmib_psref, ifvm_psref_class);
443 443
444 switch (p->if_type) { 444 switch (p->if_type) {
445 case IFT_ETHER: 445 case IFT_ETHER:
446 { 446 {
447 struct ethercom *ec = (void *)p; 447 struct ethercom *ec = (void *)p;
448 struct vlanid_list *vidmem; 448 struct vlanid_list *vidmem;
449 449
450 nmib->ifvm_msw = &vlan_ether_multisw; 450 nmib->ifvm_msw = &vlan_ether_multisw;
451 nmib->ifvm_encaplen = ETHER_VLAN_ENCAP_LEN; 451 nmib->ifvm_encaplen = ETHER_VLAN_ENCAP_LEN;
452 nmib->ifvm_mintu = ETHERMIN; 452 nmib->ifvm_mintu = ETHERMIN;
453 453
454 if (ec->ec_nvlans++ == 0) { 454 if (ec->ec_nvlans++ == 0) {
455 IFNET_LOCK(p); 455 IFNET_LOCK(p);
456 error = ether_enable_vlan_mtu(p); 456 error = ether_enable_vlan_mtu(p);
457 IFNET_UNLOCK(p); 457 IFNET_UNLOCK(p);
458 if (error >= 0) { 458 if (error >= 0) {
459 if (error) { 459 if (error) {
460 ec->ec_nvlans--; 460 ec->ec_nvlans--;
461 goto done; 461 goto done;
462 } 462 }
463 nmib->ifvm_mtufudge = 0; 463 nmib->ifvm_mtufudge = 0;
464 } else { 464 } else {
465 /* 465 /*
466 * Fudge the MTU by the encapsulation size. This 466 * Fudge the MTU by the encapsulation size. This
467 * makes us incompatible with strictly compliant 467 * makes us incompatible with strictly compliant
468 * 802.1Q implementations, but allows us to use 468 * 802.1Q implementations, but allows us to use
469 * the feature with other NetBSD 469 * the feature with other NetBSD
470 * implementations, which might still be useful. 470 * implementations, which might still be useful.
471 */ 471 */
472 nmib->ifvm_mtufudge = nmib->ifvm_encaplen; 472 nmib->ifvm_mtufudge = nmib->ifvm_encaplen;
473 } 473 }
474 error = 0; 474 error = 0;
475 } 475 }
476 /* Add a vid to the list */ 476 /* Add a vid to the list */
477 vidmem = kmem_alloc(sizeof(struct vlanid_list), KM_SLEEP); 477 vidmem = kmem_alloc(sizeof(struct vlanid_list), KM_SLEEP);
478 vidmem->vid = vid; 478 vidmem->vid = vid;
479 ETHER_LOCK(ec); 479 ETHER_LOCK(ec);
480 SIMPLEQ_INSERT_TAIL(&ec->ec_vids, vidmem, vid_list); 480 SIMPLEQ_INSERT_TAIL(&ec->ec_vids, vidmem, vid_list);
481 ETHER_UNLOCK(ec); 481 ETHER_UNLOCK(ec);
482 482
483 if (ec->ec_vlan_cb != NULL) { 483 if (ec->ec_vlan_cb != NULL) {
484 /* 484 /*
485 * Call ec_vlan_cb(). It will setup VLAN HW filter or 485 * Call ec_vlan_cb(). It will setup VLAN HW filter or
486 * HW tagging function. 486 * HW tagging function.
487 */ 487 */
488 error = (*ec->ec_vlan_cb)(ec, vid, true); 488 error = (*ec->ec_vlan_cb)(ec, vid, true);
489 if (error) { 489 if (error) {
490 ec->ec_nvlans--; 490 ec->ec_nvlans--;
491 if (ec->ec_nvlans == 0) { 491 if (ec->ec_nvlans == 0) {
492 IFNET_LOCK(p); 492 IFNET_LOCK(p);
493 (void)ether_disable_vlan_mtu(p); 493 (void)ether_disable_vlan_mtu(p);
494 IFNET_UNLOCK(p); 494 IFNET_UNLOCK(p);
495 } 495 }
496 goto done; 496 goto done;
497 } 497 }
498 } 498 }
499 /* 499 /*
500 * If the parent interface can do hardware-assisted 500 * If the parent interface can do hardware-assisted
501 * VLAN encapsulation, then propagate its hardware- 501 * VLAN encapsulation, then propagate its hardware-
502 * assisted checksumming flags and tcp segmentation 502 * assisted checksumming flags and tcp segmentation
503 * offload. 503 * offload.
504 */ 504 */
505 if (ec->ec_capabilities & ETHERCAP_VLAN_HWTAGGING) { 505 if (ec->ec_capabilities & ETHERCAP_VLAN_HWTAGGING) {
506 ifp->if_capabilities = p->if_capabilities & 506 ifp->if_capabilities = p->if_capabilities &
507 (IFCAP_TSOv4 | IFCAP_TSOv6 | 507 (IFCAP_TSOv4 | IFCAP_TSOv6 |
508 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 508 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
509 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 509 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
510 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx | 510 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
511 IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx | 511 IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx |
512 IFCAP_CSUM_UDPv6_Tx | IFCAP_CSUM_UDPv6_Rx); 512 IFCAP_CSUM_UDPv6_Tx | IFCAP_CSUM_UDPv6_Rx);
513 } 513 }
514 514
515 /* 515 /*
516 * We inherit the parent's Ethernet address. 516 * We inherit the parent's Ethernet address.
517 */ 517 */
518 ether_ifattach(ifp, CLLADDR(p->if_sadl)); 518 ether_ifattach(ifp, CLLADDR(p->if_sadl));
519 ifp->if_hdrlen = sizeof(struct ether_vlan_header); /* XXX? */ 519 ifp->if_hdrlen = sizeof(struct ether_vlan_header); /* XXX? */
520 break; 520 break;
521 } 521 }
522 522
523 default: 523 default:
524 error = EPROTONOSUPPORT; 524 error = EPROTONOSUPPORT;
525 goto done; 525 goto done;
526 } 526 }
527 527
528 nmib->ifvm_p = p; 528 nmib->ifvm_p = p;
529 nmib->ifvm_tag = vid; 529 nmib->ifvm_tag = vid;
530 ifv->ifv_if.if_mtu = p->if_mtu - nmib->ifvm_mtufudge; 530 ifv->ifv_if.if_mtu = p->if_mtu - nmib->ifvm_mtufudge;
531 ifv->ifv_if.if_flags = p->if_flags & 531 ifv->ifv_if.if_flags = p->if_flags &
532 (IFF_UP | IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 532 (IFF_UP | IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
533 533
534 /* 534 /*
535 * Inherit the if_type from the parent. This allows us 535 * Inherit the if_type from the parent. This allows us
536 * to participate in bridges of that type. 536 * to participate in bridges of that type.
537 */ 537 */
538 ifv->ifv_if.if_type = p->if_type; 538 ifv->ifv_if.if_type = p->if_type;
539 539
540 PSLIST_ENTRY_INIT(ifv, ifv_hash); 540 PSLIST_ENTRY_INIT(ifv, ifv_hash);
541 idx = vlan_tag_hash(vid, ifv_hash.mask); 541 idx = vlan_tag_hash(vid, ifv_hash.mask);
542 542
543 mutex_enter(&ifv_hash.lock); 543 mutex_enter(&ifv_hash.lock);
544 PSLIST_WRITER_INSERT_HEAD(&ifv_hash.lists[idx], ifv, ifv_hash); 544 PSLIST_WRITER_INSERT_HEAD(&ifv_hash.lists[idx], ifv, ifv_hash);
545 mutex_exit(&ifv_hash.lock); 545 mutex_exit(&ifv_hash.lock);
546 546
547 vlan_linkmib_update(ifv, nmib); 547 vlan_linkmib_update(ifv, nmib);
548 nmib = NULL; 548 nmib = NULL;
549 nmib_psref = NULL; 549 nmib_psref = NULL;
550 omib_cleanup = true; 550 omib_cleanup = true;
551 551
552 552
553 /* 553 /*
554 * We inherit the parents link state. 554 * We inherit the parents link state.
555 */ 555 */
556 if_link_state_change(&ifv->ifv_if, p->if_link_state); 556 if_link_state_change(&ifv->ifv_if, p->if_link_state);
557 557
558done: 558done:
559 mutex_exit(&ifv->ifv_lock); 559 mutex_exit(&ifv->ifv_lock);
560 560
561 if (nmib_psref) 561 if (nmib_psref)
562 psref_target_destroy(nmib_psref, ifvm_psref_class); 562 psref_target_destroy(nmib_psref, ifvm_psref_class);
563 if (nmib) 563 if (nmib)
564 kmem_free(nmib, sizeof(*nmib)); 564 kmem_free(nmib, sizeof(*nmib));
565 if (omib_cleanup) 565 if (omib_cleanup)
566 kmem_free(omib, sizeof(*omib)); 566 kmem_free(omib, sizeof(*omib));
567 567
568 return error; 568 return error;
569} 569}
570 570
571/* 571/*
572 * Unconfigure a VLAN interface. 572 * Unconfigure a VLAN interface.
573 */ 573 */
574static void 574static void
575vlan_unconfig(struct ifnet *ifp) 575vlan_unconfig(struct ifnet *ifp)
576{ 576{
577 struct ifvlan *ifv = ifp->if_softc; 577 struct ifvlan *ifv = ifp->if_softc;
578 struct ifvlan_linkmib *nmib = NULL; 578 struct ifvlan_linkmib *nmib = NULL;
579 int error; 579 int error;
580 580
581 KASSERT(IFNET_LOCKED(ifp)); 581 KASSERT(IFNET_LOCKED(ifp));
582 582
583 nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP); 583 nmib = kmem_alloc(sizeof(*nmib), KM_SLEEP);
584 584
585 mutex_enter(&ifv->ifv_lock); 585 mutex_enter(&ifv->ifv_lock);
586 error = vlan_unconfig_locked(ifv, nmib); 586 error = vlan_unconfig_locked(ifv, nmib);
587 mutex_exit(&ifv->ifv_lock); 587 mutex_exit(&ifv->ifv_lock);
588 588
589 if (error) 589 if (error)
590 kmem_free(nmib, sizeof(*nmib)); 590 kmem_free(nmib, sizeof(*nmib));
591} 591}
592static int 592static int
593vlan_unconfig_locked(struct ifvlan *ifv, struct ifvlan_linkmib *nmib) 593vlan_unconfig_locked(struct ifvlan *ifv, struct ifvlan_linkmib *nmib)
594{ 594{
595 struct ifnet *p; 595 struct ifnet *p;
596 struct ifnet *ifp = &ifv->ifv_if; 596 struct ifnet *ifp = &ifv->ifv_if;
597 struct psref_target *nmib_psref = NULL; 597 struct psref_target *nmib_psref = NULL;
598 struct ifvlan_linkmib *omib; 598 struct ifvlan_linkmib *omib;
599 int error = 0; 599 int error = 0;
600 600
601 KASSERT(IFNET_LOCKED(ifp)); 601 KASSERT(IFNET_LOCKED(ifp));
602 KASSERT(mutex_owned(&ifv->ifv_lock)); 602 KASSERT(mutex_owned(&ifv->ifv_lock));
603 603
604 ifp->if_flags &= ~(IFF_UP | IFF_RUNNING); 604 ifp->if_flags &= ~(IFF_UP | IFF_RUNNING);
605 605
606 omib = ifv->ifv_mib; 606 omib = ifv->ifv_mib;
607 p = omib->ifvm_p; 607 p = omib->ifvm_p;
608 608
609 if (p == NULL) { 609 if (p == NULL) {
610 error = -1; 610 error = -1;
611 goto done; 611 goto done;
612 } 612 }
613 613
614 *nmib = *omib; 614 *nmib = *omib;
615 nmib_psref = &nmib->ifvm_psref; 615 nmib_psref = &nmib->ifvm_psref;
616 psref_target_init(nmib_psref, ifvm_psref_class); 616 psref_target_init(nmib_psref, ifvm_psref_class);
617 617
618 /* 618 /*
619 * Since the interface is being unconfigured, we need to empty the 619 * Since the interface is being unconfigured, we need to empty the
620 * list of multicast groups that we may have joined while we were 620 * list of multicast groups that we may have joined while we were
621 * alive and remove them from the parent's list also. 621 * alive and remove them from the parent's list also.
622 */ 622 */
623 (*nmib->ifvm_msw->vmsw_purgemulti)(ifv); 623 (*nmib->ifvm_msw->vmsw_purgemulti)(ifv);
624 624
625 /* Disconnect from parent. */ 625 /* Disconnect from parent. */
626 switch (p->if_type) { 626 switch (p->if_type) {
627 case IFT_ETHER: 627 case IFT_ETHER:
628 { 628 {
629 struct ethercom *ec = (void *)p; 629 struct ethercom *ec = (void *)p;
630 struct vlanid_list *vlanidp; 630 struct vlanid_list *vlanidp;
631 uint16_t vid = EVL_VLANOFTAG(nmib->ifvm_tag); 631 uint16_t vid = EVL_VLANOFTAG(nmib->ifvm_tag);
632 632
633 ETHER_LOCK(ec); 633 ETHER_LOCK(ec);
634 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) { 634 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
635 if (vlanidp->vid == vid) { 635 if (vlanidp->vid == vid) {
636 SIMPLEQ_REMOVE(&ec->ec_vids, vlanidp, 636 SIMPLEQ_REMOVE(&ec->ec_vids, vlanidp,
637 vlanid_list, vid_list); 637 vlanid_list, vid_list);
638 break; 638 break;
639 } 639 }
640 } 640 }
641 ETHER_UNLOCK(ec); 641 ETHER_UNLOCK(ec);
642 if (vlanidp != NULL) 642 if (vlanidp != NULL)
643 kmem_free(vlanidp, sizeof(*vlanidp)); 643 kmem_free(vlanidp, sizeof(*vlanidp));
644 644
645 if (ec->ec_vlan_cb != NULL) { 645 if (ec->ec_vlan_cb != NULL) {
646 /* 646 /*
647 * Call ec_vlan_cb(). It will setup VLAN HW filter or 647 * Call ec_vlan_cb(). It will setup VLAN HW filter or
648 * HW tagging function. 648 * HW tagging function.
649 */ 649 */
650 (void)(*ec->ec_vlan_cb)(ec, vid, false); 650 (void)(*ec->ec_vlan_cb)(ec, vid, false);
651 } 651 }
652 if (--ec->ec_nvlans == 0) { 652 if (--ec->ec_nvlans == 0) {
653 IFNET_LOCK(p); 653 IFNET_LOCK(p);
654 (void)ether_disable_vlan_mtu(p); 654 (void)ether_disable_vlan_mtu(p);
655 IFNET_UNLOCK(p); 655 IFNET_UNLOCK(p);
656 } 656 }
657 657
658 /* XXX ether_ifdetach must not be called with IFNET_LOCK */ 658 /* XXX ether_ifdetach must not be called with IFNET_LOCK */
659 mutex_exit(&ifv->ifv_lock); 659 mutex_exit(&ifv->ifv_lock);
660 IFNET_UNLOCK(ifp); 660 IFNET_UNLOCK(ifp);
661 ether_ifdetach(ifp); 661 ether_ifdetach(ifp);
662 IFNET_LOCK(ifp); 662 IFNET_LOCK(ifp);
663 mutex_enter(&ifv->ifv_lock); 663 mutex_enter(&ifv->ifv_lock);
664 664
665 /* if_free_sadl must be called with IFNET_LOCK */ 665 /* if_free_sadl must be called with IFNET_LOCK */
666 if_free_sadl(ifp, 1); 666 if_free_sadl(ifp, 1);
667 667
668 /* Restore vlan_ioctl overwritten by ether_ifdetach */ 668 /* Restore vlan_ioctl overwritten by ether_ifdetach */
669 ifp->if_ioctl = vlan_ioctl; 669 ifp->if_ioctl = vlan_ioctl;
670 vlan_reset_linkname(ifp); 670 vlan_reset_linkname(ifp);
671 break; 671 break;
672 } 672 }
673 673
674 default: 674 default:
675 panic("%s: impossible", __func__); 675 panic("%s: impossible", __func__);
676 } 676 }
677 677
678 nmib->ifvm_p = NULL; 678 nmib->ifvm_p = NULL;
679 ifv->ifv_if.if_mtu = 0; 679 ifv->ifv_if.if_mtu = 0;
680 ifv->ifv_flags = 0; 680 ifv->ifv_flags = 0;
681 681
682 mutex_enter(&ifv_hash.lock); 682 mutex_enter(&ifv_hash.lock);
683 PSLIST_WRITER_REMOVE(ifv, ifv_hash); 683 PSLIST_WRITER_REMOVE(ifv, ifv_hash);
684 pserialize_perform(vlan_psz); 684 pserialize_perform(vlan_psz);
685 mutex_exit(&ifv_hash.lock); 685 mutex_exit(&ifv_hash.lock);
686 PSLIST_ENTRY_DESTROY(ifv, ifv_hash); 686 PSLIST_ENTRY_DESTROY(ifv, ifv_hash);
687 687
688 vlan_linkmib_update(ifv, nmib); 688 vlan_linkmib_update(ifv, nmib);
689 if_link_state_change(ifp, LINK_STATE_DOWN); 689 if_link_state_change(ifp, LINK_STATE_DOWN);
690 690
691 mutex_exit(&ifv->ifv_lock); 691 mutex_exit(&ifv->ifv_lock);
692 692
693 nmib_psref = NULL; 693 nmib_psref = NULL;
694 kmem_free(omib, sizeof(*omib)); 694 kmem_free(omib, sizeof(*omib));
695 695
696#ifdef INET6 696#ifdef INET6
697 KERNEL_LOCK_UNLESS_NET_MPSAFE(); 697 KERNEL_LOCK_UNLESS_NET_MPSAFE();
698 /* To delete v6 link local addresses */ 698 /* To delete v6 link local addresses */
699 if (in6_present) 699 if (in6_present)
700 in6_ifdetach(ifp); 700 in6_ifdetach(ifp);
701 KERNEL_UNLOCK_UNLESS_NET_MPSAFE(); 701 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
702#endif 702#endif
703 703
704 if ((ifp->if_flags & IFF_PROMISC) != 0) 704 if ((ifp->if_flags & IFF_PROMISC) != 0)
705 vlan_safe_ifpromisc_locked(ifp, 0); 705 vlan_safe_ifpromisc_locked(ifp, 0);
706 if_down_locked(ifp); 706 if_down_locked(ifp);
707 ifp->if_capabilities = 0; 707 ifp->if_capabilities = 0;
708 mutex_enter(&ifv->ifv_lock); 708 mutex_enter(&ifv->ifv_lock);
709done: 709done:
710 710
711 if (nmib_psref) 711 if (nmib_psref)
712 psref_target_destroy(nmib_psref, ifvm_psref_class); 712 psref_target_destroy(nmib_psref, ifvm_psref_class);
713 713
714 return error; 714 return error;
715} 715}
716 716
717static void 717static void
718vlan_hash_init(void) 718vlan_hash_init(void)
719{ 719{
720 720
721 ifv_hash.lists = hashinit(VLAN_TAG_HASH_SIZE, HASH_PSLIST, true, 721 ifv_hash.lists = hashinit(VLAN_TAG_HASH_SIZE, HASH_PSLIST, true,
722 &ifv_hash.mask); 722 &ifv_hash.mask);
723} 723}
724 724
725static int 725static int
726vlan_hash_fini(void) 726vlan_hash_fini(void)
727{ 727{
728 int i; 728 int i;
729 729
730 mutex_enter(&ifv_hash.lock); 730 mutex_enter(&ifv_hash.lock);
731 731
732 for (i = 0; i < ifv_hash.mask + 1; i++) { 732 for (i = 0; i < ifv_hash.mask + 1; i++) {
733 if (PSLIST_WRITER_FIRST(&ifv_hash.lists[i], struct ifvlan, 733 if (PSLIST_WRITER_FIRST(&ifv_hash.lists[i], struct ifvlan,
734 ifv_hash) != NULL) { 734 ifv_hash) != NULL) {
735 mutex_exit(&ifv_hash.lock); 735 mutex_exit(&ifv_hash.lock);
736 return EBUSY; 736 return EBUSY;
737 } 737 }
738 } 738 }
739 739
740 for (i = 0; i < ifv_hash.mask + 1; i++) 740 for (i = 0; i < ifv_hash.mask + 1; i++)
741 PSLIST_DESTROY(&ifv_hash.lists[i]); 741 PSLIST_DESTROY(&ifv_hash.lists[i]);
742 742
743 mutex_exit(&ifv_hash.lock); 743 mutex_exit(&ifv_hash.lock);
744 744
745 hashdone(ifv_hash.lists, HASH_PSLIST, ifv_hash.mask); 745 hashdone(ifv_hash.lists, HASH_PSLIST, ifv_hash.mask);
746 746
747 ifv_hash.lists = NULL; 747 ifv_hash.lists = NULL;
748 ifv_hash.mask = 0; 748 ifv_hash.mask = 0;
749 749
750 return 0; 750 return 0;
751} 751}
752 752
753static int 753static int
754vlan_tag_hash(uint16_t tag, u_long mask) 754vlan_tag_hash(uint16_t tag, u_long mask)
755{ 755{
756 uint32_t hash; 756 uint32_t hash;
757 757
758 hash = (tag >> 8) ^ tag; 758 hash = (tag >> 8) ^ tag;
759 hash = (hash >> 2) ^ hash; 759 hash = (hash >> 2) ^ hash;
760 760
761 return hash & mask; 761 return hash & mask;
762} 762}
763 763
764static struct ifvlan_linkmib * 764static struct ifvlan_linkmib *
765vlan_getref_linkmib(struct ifvlan *sc, struct psref *psref) 765vlan_getref_linkmib(struct ifvlan *sc, struct psref *psref)
766{ 766{
767 struct ifvlan_linkmib *mib; 767 struct ifvlan_linkmib *mib;
768 int s; 768 int s;
769 769
770 s = pserialize_read_enter(); 770 s = pserialize_read_enter();
771 mib = atomic_load_consume(&sc->ifv_mib); 771 mib = atomic_load_consume(&sc->ifv_mib);
772 if (mib == NULL) { 772 if (mib == NULL) {
773 pserialize_read_exit(s); 773 pserialize_read_exit(s);
774 return NULL; 774 return NULL;
775 } 775 }
776 psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class); 776 psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class);
777 pserialize_read_exit(s); 777 pserialize_read_exit(s);
778 778
779 return mib; 779 return mib;
780} 780}
781 781
782static void 782static void
783vlan_putref_linkmib(struct ifvlan_linkmib *mib, struct psref *psref) 783vlan_putref_linkmib(struct ifvlan_linkmib *mib, struct psref *psref)
784{ 784{
785 if (mib == NULL) 785 if (mib == NULL)
786 return; 786 return;
787 psref_release(psref, &mib->ifvm_psref, ifvm_psref_class); 787 psref_release(psref, &mib->ifvm_psref, ifvm_psref_class);
788} 788}
789 789
790static struct ifvlan_linkmib * 790static struct ifvlan_linkmib *
791vlan_lookup_tag_psref(struct ifnet *ifp, uint16_t tag, struct psref *psref) 791vlan_lookup_tag_psref(struct ifnet *ifp, uint16_t tag, struct psref *psref)
792{ 792{
793 int idx; 793 int idx;
794 int s; 794 int s;
795 struct ifvlan *sc; 795 struct ifvlan *sc;
796 796
797 idx = vlan_tag_hash(tag, ifv_hash.mask); 797 idx = vlan_tag_hash(tag, ifv_hash.mask);
798 798
799 s = pserialize_read_enter(); 799 s = pserialize_read_enter();
800 PSLIST_READER_FOREACH(sc, &ifv_hash.lists[idx], struct ifvlan, 800 PSLIST_READER_FOREACH(sc, &ifv_hash.lists[idx], struct ifvlan,
801 ifv_hash) { 801 ifv_hash) {
802 struct ifvlan_linkmib *mib = atomic_load_consume(&sc->ifv_mib); 802 struct ifvlan_linkmib *mib = atomic_load_consume(&sc->ifv_mib);
803 if (mib == NULL) 803 if (mib == NULL)
804 continue; 804 continue;
805 if (mib->ifvm_tag != tag) 805 if (mib->ifvm_tag != tag)
806 continue; 806 continue;
807 if (mib->ifvm_p != ifp) 807 if (mib->ifvm_p != ifp)
808 continue; 808 continue;
809 809
810 psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class); 810 psref_acquire(psref, &mib->ifvm_psref, ifvm_psref_class);
811 pserialize_read_exit(s); 811 pserialize_read_exit(s);
812 return mib; 812 return mib;
813 } 813 }
814 pserialize_read_exit(s); 814 pserialize_read_exit(s);
815 return NULL; 815 return NULL;
816} 816}
817 817
818static void 818static void
819vlan_linkmib_update(struct ifvlan *ifv, struct ifvlan_linkmib *nmib) 819vlan_linkmib_update(struct ifvlan *ifv, struct ifvlan_linkmib *nmib)
820{ 820{
821 struct ifvlan_linkmib *omib = ifv->ifv_mib; 821 struct ifvlan_linkmib *omib = ifv->ifv_mib;
822 822
823 KASSERT(mutex_owned(&ifv->ifv_lock)); 823 KASSERT(mutex_owned(&ifv->ifv_lock));
824 824
825 atomic_store_release(&ifv->ifv_mib, nmib); 825 atomic_store_release(&ifv->ifv_mib, nmib);
826 826
827 pserialize_perform(ifv->ifv_psz); 827 pserialize_perform(ifv->ifv_psz);
828 psref_target_destroy(&omib->ifvm_psref, ifvm_psref_class); 828 psref_target_destroy(&omib->ifvm_psref, ifvm_psref_class);
829} 829}
830 830
831/* 831/*
832 * Called when a parent interface is detaching; destroy any VLAN 832 * Called when a parent interface is detaching; destroy any VLAN
833 * configuration for the parent interface. 833 * configuration for the parent interface.
834 */ 834 */
835void 835void
836vlan_ifdetach(struct ifnet *p) 836vlan_ifdetach(struct ifnet *p)
837{ 837{
838 struct ifvlan *ifv; 838 struct ifvlan *ifv;
839 struct ifvlan_linkmib *mib, **nmibs; 839 struct ifvlan_linkmib *mib, **nmibs;
840 struct psref psref; 840 struct psref psref;
841 int error; 841 int error;
842 int bound; 842 int bound;
843 int i, cnt = 0; 843 int i, cnt = 0;
844 844
845 bound = curlwp_bind(); 845 bound = curlwp_bind();
846 846
847 mutex_enter(&ifv_list.lock); 847 mutex_enter(&ifv_list.lock);
848 LIST_FOREACH(ifv, &ifv_list.list, ifv_list) { 848 LIST_FOREACH(ifv, &ifv_list.list, ifv_list) {
849 mib = vlan_getref_linkmib(ifv, &psref); 849 mib = vlan_getref_linkmib(ifv, &psref);
850 if (mib == NULL) 850 if (mib == NULL)
851 continue; 851 continue;
852 852
853 if (mib->ifvm_p == p) 853 if (mib->ifvm_p == p)
854 cnt++; 854 cnt++;
855 855
856 vlan_putref_linkmib(mib, &psref); 856 vlan_putref_linkmib(mib, &psref);
857 } 857 }
858 mutex_exit(&ifv_list.lock); 858 mutex_exit(&ifv_list.lock);
859 859
860 if (cnt == 0) { 860 if (cnt == 0) {
861 curlwp_bindx(bound); 861 curlwp_bindx(bound);
862 return; 862 return;
863 } 863 }
864 864
865 /* 865 /*
866 * The value of "cnt" does not increase while ifv_list.lock 866 * The value of "cnt" does not increase while ifv_list.lock
867 * and ifv->ifv_lock are released here, because the parent 867 * and ifv->ifv_lock are released here, because the parent
868 * interface is detaching. 868 * interface is detaching.
869 */ 869 */
870 nmibs = kmem_alloc(sizeof(*nmibs) * cnt, KM_SLEEP); 870 nmibs = kmem_alloc(sizeof(*nmibs) * cnt, KM_SLEEP);
871 for (i = 0; i < cnt; i++) { 871 for (i = 0; i < cnt; i++) {
872 nmibs[i] = kmem_alloc(sizeof(*nmibs[i]), KM_SLEEP); 872 nmibs[i] = kmem_alloc(sizeof(*nmibs[i]), KM_SLEEP);
873 } 873 }
874 874
875 mutex_enter(&ifv_list.lock); 875 mutex_enter(&ifv_list.lock);
876 876
877 i = 0; 877 i = 0;
878 LIST_FOREACH(ifv, &ifv_list.list, ifv_list) { 878 LIST_FOREACH(ifv, &ifv_list.list, ifv_list) {
879 struct ifnet *ifp = &ifv->ifv_if; 879 struct ifnet *ifp = &ifv->ifv_if;
880 880
881 /* IFNET_LOCK must be held before ifv_lock. */ 881 /* IFNET_LOCK must be held before ifv_lock. */
882 IFNET_LOCK(ifp); 882 IFNET_LOCK(ifp);
883 mutex_enter(&ifv->ifv_lock); 883 mutex_enter(&ifv->ifv_lock);
884 884
885 /* XXX ifv_mib = NULL? */ 885 /* XXX ifv_mib = NULL? */
886 if (ifv->ifv_mib->ifvm_p == p) { 886 if (ifv->ifv_mib->ifvm_p == p) {
887 KASSERTMSG(i < cnt, 887 KASSERTMSG(i < cnt,
888 "no memory for unconfig, parent=%s", p->if_xname); 888 "no memory for unconfig, parent=%s", p->if_xname);
889 error = vlan_unconfig_locked(ifv, nmibs[i]); 889 error = vlan_unconfig_locked(ifv, nmibs[i]);
890 if (!error) { 890 if (!error) {
891 nmibs[i] = NULL; 891 nmibs[i] = NULL;
892 i++; 892 i++;
893 } 893 }
894 894
895 } 895 }
896 896
897 mutex_exit(&ifv->ifv_lock); 897 mutex_exit(&ifv->ifv_lock);
898 IFNET_UNLOCK(ifp); 898 IFNET_UNLOCK(ifp);
899 } 899 }
900 900
901 mutex_exit(&ifv_list.lock); 901 mutex_exit(&ifv_list.lock);
902 902
903 curlwp_bindx(bound); 903 curlwp_bindx(bound);
904 904
905 for (i = 0; i < cnt; i++) { 905 for (i = 0; i < cnt; i++) {
906 if (nmibs[i]) 906 if (nmibs[i])
907 kmem_free(nmibs[i], sizeof(*nmibs[i])); 907 kmem_free(nmibs[i], sizeof(*nmibs[i]));
908 } 908 }
909 909
910 kmem_free(nmibs, sizeof(*nmibs) * cnt); 910 kmem_free(nmibs, sizeof(*nmibs) * cnt);
911 911
912 return; 912 return;
913} 913}
914 914
915static int 915static int
916vlan_set_promisc(struct ifnet *ifp) 916vlan_set_promisc(struct ifnet *ifp)
917{ 917{
918 struct ifvlan *ifv = ifp->if_softc; 918 struct ifvlan *ifv = ifp->if_softc;
919 struct ifvlan_linkmib *mib; 919 struct ifvlan_linkmib *mib;
920 struct psref psref; 920 struct psref psref;
921 int error = 0; 921 int error = 0;
922 int bound; 922 int bound;
923 923
924 bound = curlwp_bind(); 924 bound = curlwp_bind();
925 mib = vlan_getref_linkmib(ifv, &psref); 925 mib = vlan_getref_linkmib(ifv, &psref);
926 if (mib == NULL) { 926 if (mib == NULL) {
927 curlwp_bindx(bound); 927 curlwp_bindx(bound);
928 return EBUSY; 928 return EBUSY;
929 } 929 }
930 930
931 if ((ifp->if_flags & IFF_PROMISC) != 0) { 931 if ((ifp->if_flags & IFF_PROMISC) != 0) {
932 if ((ifv->ifv_flags & IFVF_PROMISC) == 0) { 932 if ((ifv->ifv_flags & IFVF_PROMISC) == 0) {
933 error = vlan_safe_ifpromisc(mib->ifvm_p, 1); 933 error = vlan_safe_ifpromisc(mib->ifvm_p, 1);
934 if (error == 0) 934 if (error == 0)
935 ifv->ifv_flags |= IFVF_PROMISC; 935 ifv->ifv_flags |= IFVF_PROMISC;
936 } 936 }
937 } else { 937 } else {
938 if ((ifv->ifv_flags & IFVF_PROMISC) != 0) { 938 if ((ifv->ifv_flags & IFVF_PROMISC) != 0) {
939 error = vlan_safe_ifpromisc(mib->ifvm_p, 0); 939 error = vlan_safe_ifpromisc(mib->ifvm_p, 0);
940 if (error == 0) 940 if (error == 0)
941 ifv->ifv_flags &= ~IFVF_PROMISC; 941 ifv->ifv_flags &= ~IFVF_PROMISC;
942 } 942 }
943 } 943 }
944 vlan_putref_linkmib(mib, &psref); 944 vlan_putref_linkmib(mib, &psref);
945 curlwp_bindx(bound); 945 curlwp_bindx(bound);
946 946
947 return error; 947 return error;
948} 948}
949 949
950static int 950static int
951vlan_ioctl(struct ifnet *ifp, u_long cmd, void *data) 951vlan_ioctl(struct ifnet *ifp, u_long cmd, void *data)
952{ 952{
953 struct lwp *l = curlwp; 953 struct lwp *l = curlwp;
954 struct ifvlan *ifv = ifp->if_softc; 954 struct ifvlan *ifv = ifp->if_softc;
955 struct ifaddr *ifa = (struct ifaddr *) data; 955 struct ifaddr *ifa = (struct ifaddr *) data;
956 struct ifreq *ifr = (struct ifreq *) data; 956 struct ifreq *ifr = (struct ifreq *) data;
957 struct ifnet *pr; 957 struct ifnet *pr;
958 struct ifcapreq *ifcr; 958 struct ifcapreq *ifcr;
959 struct vlanreq vlr; 959 struct vlanreq vlr;
960 struct ifvlan_linkmib *mib; 960 struct ifvlan_linkmib *mib;
961 struct psref psref; 961 struct psref psref;
962 int error = 0; 962 int error = 0;
963 int bound; 963 int bound;
964 964
965 switch (cmd) { 965 switch (cmd) {
966 case SIOCSIFMTU: 966 case SIOCSIFMTU:
967 bound = curlwp_bind(); 967 bound = curlwp_bind();
968 mib = vlan_getref_linkmib(ifv, &psref); 968 mib = vlan_getref_linkmib(ifv, &psref);
969 if (mib == NULL) { 969 if (mib == NULL) {
970 curlwp_bindx(bound); 970 curlwp_bindx(bound);
971 error = EBUSY; 971 error = EBUSY;
972 break; 972 break;
973 } 973 }
974 974
975 if (mib->ifvm_p == NULL) { 975 if (mib->ifvm_p == NULL) {
976 vlan_putref_linkmib(mib, &psref); 976 vlan_putref_linkmib(mib, &psref);
977 curlwp_bindx(bound); 977 curlwp_bindx(bound);
978 error = EINVAL; 978 error = EINVAL;
979 } else if ( 979 } else if (
980 ifr->ifr_mtu > (mib->ifvm_p->if_mtu - mib->ifvm_mtufudge) || 980 ifr->ifr_mtu > (mib->ifvm_p->if_mtu - mib->ifvm_mtufudge) ||
981 ifr->ifr_mtu < (mib->ifvm_mintu - mib->ifvm_mtufudge)) { 981 ifr->ifr_mtu < (mib->ifvm_mintu - mib->ifvm_mtufudge)) {
982 vlan_putref_linkmib(mib, &psref); 982 vlan_putref_linkmib(mib, &psref);
983 curlwp_bindx(bound); 983 curlwp_bindx(bound);
984 error = EINVAL; 984 error = EINVAL;
985 } else { 985 } else {
986 vlan_putref_linkmib(mib, &psref); 986 vlan_putref_linkmib(mib, &psref);
987 curlwp_bindx(bound); 987 curlwp_bindx(bound);
988 988
989 error = ifioctl_common(ifp, cmd, data); 989 error = ifioctl_common(ifp, cmd, data);
990 if (error == ENETRESET) 990 if (error == ENETRESET)
991 error = 0; 991 error = 0;
992 } 992 }
993 993
994 break; 994 break;
995 995
996 case SIOCSETVLAN: 996 case SIOCSETVLAN:
997 if ((error = kauth_authorize_network(l->l_cred, 997 if ((error = kauth_authorize_network(l->l_cred,
998 KAUTH_NETWORK_INTERFACE, 998 KAUTH_NETWORK_INTERFACE,
999 KAUTH_REQ_NETWORK_INTERFACE_SETPRIV, ifp, (void *)cmd, 999 KAUTH_REQ_NETWORK_INTERFACE_SETPRIV, ifp, (void *)cmd,
1000 NULL)) != 0) 1000 NULL)) != 0)
1001 break; 1001 break;
1002 if ((error = copyin(ifr->ifr_data, &vlr, sizeof(vlr))) != 0) 1002 if ((error = copyin(ifr->ifr_data, &vlr, sizeof(vlr))) != 0)
1003 break; 1003 break;
1004 1004
1005 if (vlr.vlr_parent[0] == '\0') { 1005 if (vlr.vlr_parent[0] == '\0') {
1006 bound = curlwp_bind(); 1006 bound = curlwp_bind();
1007 mib = vlan_getref_linkmib(ifv, &psref); 1007 mib = vlan_getref_linkmib(ifv, &psref);
1008 if (mib == NULL) { 1008 if (mib == NULL) {
1009 curlwp_bindx(bound); 1009 curlwp_bindx(bound);
1010 error = EBUSY; 1010 error = EBUSY;
1011 break; 1011 break;
1012 } 1012 }
1013 1013
1014 if (mib->ifvm_p != NULL && 1014 if (mib->ifvm_p != NULL &&
1015 (ifp->if_flags & IFF_PROMISC) != 0) 1015 (ifp->if_flags & IFF_PROMISC) != 0)
1016 error = vlan_safe_ifpromisc(mib->ifvm_p, 0); 1016 error = vlan_safe_ifpromisc(mib->ifvm_p, 0);
1017 1017
1018 vlan_putref_linkmib(mib, &psref); 1018 vlan_putref_linkmib(mib, &psref);
1019 curlwp_bindx(bound); 1019 curlwp_bindx(bound);
1020 1020
1021 vlan_unconfig(ifp); 1021 vlan_unconfig(ifp);
1022 break; 1022 break;
1023 } 1023 }
1024 if (vlr.vlr_tag != EVL_VLANOFTAG(vlr.vlr_tag)) { 1024 if (vlr.vlr_tag != EVL_VLANOFTAG(vlr.vlr_tag)) {
1025 error = EINVAL; /* check for valid tag */ 1025 error = EINVAL; /* check for valid tag */
1026 break; 1026 break;
1027 } 1027 }
1028 if ((pr = ifunit(vlr.vlr_parent)) == NULL) { 1028 if ((pr = ifunit(vlr.vlr_parent)) == NULL) {
1029 error = ENOENT; 1029 error = ENOENT;
1030 break; 1030 break;
1031 } 1031 }
1032 1032
1033 error = vlan_config(ifv, pr, vlr.vlr_tag); 1033 error = vlan_config(ifv, pr, vlr.vlr_tag);
1034 if (error != 0) 1034 if (error != 0)
1035 break; 1035 break;
1036 1036
1037 /* Update promiscuous mode, if necessary. */ 1037 /* Update promiscuous mode, if necessary. */
1038 vlan_set_promisc(ifp); 1038 vlan_set_promisc(ifp);
1039 1039
1040 ifp->if_flags |= IFF_RUNNING; 1040 ifp->if_flags |= IFF_RUNNING;
1041 break; 1041 break;
1042 1042
1043 case SIOCGETVLAN: 1043 case SIOCGETVLAN:
1044 memset(&vlr, 0, sizeof(vlr)); 1044 memset(&vlr, 0, sizeof(vlr));
1045 bound = curlwp_bind(); 1045 bound = curlwp_bind();
1046 mib = vlan_getref_linkmib(ifv, &psref); 1046 mib = vlan_getref_linkmib(ifv, &psref);
1047 if (mib == NULL) { 1047 if (mib == NULL) {
1048 curlwp_bindx(bound); 1048 curlwp_bindx(bound);
1049 error = EBUSY; 1049 error = EBUSY;
1050 break; 1050 break;
1051 } 1051 }
1052 if (mib->ifvm_p != NULL) { 1052 if (mib->ifvm_p != NULL) {
1053 snprintf(vlr.vlr_parent, sizeof(vlr.vlr_parent), "%s", 1053 snprintf(vlr.vlr_parent, sizeof(vlr.vlr_parent), "%s",
1054 mib->ifvm_p->if_xname); 1054 mib->ifvm_p->if_xname);
1055 vlr.vlr_tag = mib->ifvm_tag; 1055 vlr.vlr_tag = mib->ifvm_tag;
1056 } 1056 }
1057 vlan_putref_linkmib(mib, &psref); 1057 vlan_putref_linkmib(mib, &psref);
1058 curlwp_bindx(bound); 1058 curlwp_bindx(bound);
1059 error = copyout(&vlr, ifr->ifr_data, sizeof(vlr)); 1059 error = copyout(&vlr, ifr->ifr_data, sizeof(vlr));
1060 break; 1060 break;
1061 1061
1062 case SIOCSIFFLAGS: 1062 case SIOCSIFFLAGS:
1063 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 1063 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1064 break; 1064 break;
1065 /* 1065 /*
1066 * For promiscuous mode, we enable promiscuous mode on 1066 * For promiscuous mode, we enable promiscuous mode on
1067 * the parent if we need promiscuous on the VLAN interface. 1067 * the parent if we need promiscuous on the VLAN interface.
1068 */ 1068 */
1069 bound = curlwp_bind(); 1069 bound = curlwp_bind();
1070 mib = vlan_getref_linkmib(ifv, &psref); 1070 mib = vlan_getref_linkmib(ifv, &psref);
1071 if (mib == NULL) { 1071 if (mib == NULL) {
1072 curlwp_bindx(bound); 1072 curlwp_bindx(bound);
1073 error = EBUSY; 1073 error = EBUSY;
1074 break; 1074 break;
1075 } 1075 }
1076 1076
1077 if (mib->ifvm_p != NULL) 1077 if (mib->ifvm_p != NULL)
1078 error = vlan_set_promisc(ifp); 1078 error = vlan_set_promisc(ifp);
1079 vlan_putref_linkmib(mib, &psref); 1079 vlan_putref_linkmib(mib, &psref);
1080 curlwp_bindx(bound); 1080 curlwp_bindx(bound);
1081 break; 1081 break;
1082 1082
1083 case SIOCADDMULTI: 1083 case SIOCADDMULTI:
1084 mutex_enter(&ifv->ifv_lock); 1084 mutex_enter(&ifv->ifv_lock);
1085 mib = ifv->ifv_mib; 1085 mib = ifv->ifv_mib;
1086 if (mib == NULL) { 1086 if (mib == NULL) {
1087 error = EBUSY; 1087 error = EBUSY;
1088 mutex_exit(&ifv->ifv_lock); 1088 mutex_exit(&ifv->ifv_lock);
1089 break; 1089 break;
1090 } 1090 }
1091 1091
1092 error = (mib->ifvm_p != NULL) ? 1092 error = (mib->ifvm_p != NULL) ?
1093 (*mib->ifvm_msw->vmsw_addmulti)(ifv, ifr) : EINVAL; 1093 (*mib->ifvm_msw->vmsw_addmulti)(ifv, ifr) : EINVAL;
1094 mib = NULL; 1094 mib = NULL;
1095 mutex_exit(&ifv->ifv_lock); 1095 mutex_exit(&ifv->ifv_lock);
1096 break; 1096 break;
1097 1097
1098 case SIOCDELMULTI: 1098 case SIOCDELMULTI:
1099 mutex_enter(&ifv->ifv_lock); 1099 mutex_enter(&ifv->ifv_lock);
1100 mib = ifv->ifv_mib; 1100 mib = ifv->ifv_mib;
1101 if (mib == NULL) { 1101 if (mib == NULL) {
1102 error = EBUSY; 1102 error = EBUSY;
1103 mutex_exit(&ifv->ifv_lock); 1103 mutex_exit(&ifv->ifv_lock);
1104 break; 1104 break;
1105 } 1105 }
1106 error = (mib->ifvm_p != NULL) ? 1106 error = (mib->ifvm_p != NULL) ?
1107 (*mib->ifvm_msw->vmsw_delmulti)(ifv, ifr) : EINVAL; 1107 (*mib->ifvm_msw->vmsw_delmulti)(ifv, ifr) : EINVAL;
1108 mib = NULL; 1108 mib = NULL;
1109 mutex_exit(&ifv->ifv_lock); 1109 mutex_exit(&ifv->ifv_lock);
1110 break; 1110 break;
1111 1111
1112 case SIOCSIFCAP: 1112 case SIOCSIFCAP:
1113 ifcr = data; 1113 ifcr = data;
1114 /* make sure caps are enabled on parent */ 1114 /* make sure caps are enabled on parent */
1115 bound = curlwp_bind(); 1115 bound = curlwp_bind();
1116 mib = vlan_getref_linkmib(ifv, &psref); 1116 mib = vlan_getref_linkmib(ifv, &psref);
1117 if (mib == NULL) { 1117 if (mib == NULL) {
1118 curlwp_bindx(bound); 1118 curlwp_bindx(bound);
1119 error = EBUSY; 1119 error = EBUSY;
1120 break; 1120 break;
1121 } 1121 }
1122 1122
1123 if (mib->ifvm_p == NULL) { 1123 if (mib->ifvm_p == NULL) {
1124 vlan_putref_linkmib(mib, &psref); 1124 vlan_putref_linkmib(mib, &psref);
1125 curlwp_bindx(bound); 1125 curlwp_bindx(bound);
1126 error = EINVAL; 1126 error = EINVAL;
1127 break; 1127 break;
1128 } 1128 }
1129 if ((mib->ifvm_p->if_capenable & ifcr->ifcr_capenable) != 1129 if ((mib->ifvm_p->if_capenable & ifcr->ifcr_capenable) !=
1130 ifcr->ifcr_capenable) { 1130 ifcr->ifcr_capenable) {
1131 vlan_putref_linkmib(mib, &psref); 1131 vlan_putref_linkmib(mib, &psref);
1132 curlwp_bindx(bound); 1132 curlwp_bindx(bound);
1133 error = EINVAL; 1133 error = EINVAL;
1134 break; 1134 break;
1135 } 1135 }
1136 1136
1137 vlan_putref_linkmib(mib, &psref); 1137 vlan_putref_linkmib(mib, &psref);
1138 curlwp_bindx(bound); 1138 curlwp_bindx(bound);
1139 1139
1140 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET) 1140 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET)
1141 error = 0; 1141 error = 0;
1142 break; 1142 break;
1143 case SIOCINITIFADDR: 1143 case SIOCINITIFADDR:
1144 bound = curlwp_bind(); 1144 bound = curlwp_bind();
1145 mib = vlan_getref_linkmib(ifv, &psref); 1145 mib = vlan_getref_linkmib(ifv, &psref);
1146 if (mib == NULL) { 1146 if (mib == NULL) {
1147 curlwp_bindx(bound); 1147 curlwp_bindx(bound);
1148 error = EBUSY; 1148 error = EBUSY;
1149 break; 1149 break;
1150 } 1150 }
1151 1151
1152 if (mib->ifvm_p == NULL) { 1152 if (mib->ifvm_p == NULL) {
1153 error = EINVAL; 1153 error = EINVAL;
1154 vlan_putref_linkmib(mib, &psref); 1154 vlan_putref_linkmib(mib, &psref);
1155 curlwp_bindx(bound); 1155 curlwp_bindx(bound);
1156 break; 1156 break;
1157 } 1157 }
1158 vlan_putref_linkmib(mib, &psref); 1158 vlan_putref_linkmib(mib, &psref);
1159 curlwp_bindx(bound); 1159 curlwp_bindx(bound);
1160 1160
1161 ifp->if_flags |= IFF_UP; 1161 ifp->if_flags |= IFF_UP;
1162#ifdef INET 1162#ifdef INET
1163 if (ifa->ifa_addr->sa_family == AF_INET) 1163 if (ifa->ifa_addr->sa_family == AF_INET)
1164 arp_ifinit(ifp, ifa); 1164 arp_ifinit(ifp, ifa);
1165#endif 1165#endif
1166 break; 1166 break;
1167 1167
1168 default: 1168 default:
1169 error = ether_ioctl(ifp, cmd, data); 1169 error = ether_ioctl(ifp, cmd, data);
1170 } 1170 }
1171 1171
1172 return error; 1172 return error;
1173} 1173}
1174 1174
1175static int 1175static int
1176vlan_ether_addmulti(struct ifvlan *ifv, struct ifreq *ifr) 1176vlan_ether_addmulti(struct ifvlan *ifv, struct ifreq *ifr)
1177{ 1177{
1178 const struct sockaddr *sa = ifreq_getaddr(SIOCADDMULTI, ifr); 1178 const struct sockaddr *sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1179 struct vlan_mc_entry *mc; 1179 struct vlan_mc_entry *mc;
1180 uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN]; 1180 uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN];
1181 struct ifvlan_linkmib *mib; 1181 struct ifvlan_linkmib *mib;
1182 int error; 1182 int error;
1183 1183
1184 KASSERT(mutex_owned(&ifv->ifv_lock)); 1184 KASSERT(mutex_owned(&ifv->ifv_lock));
1185 1185
1186 if (sa->sa_len > sizeof(struct sockaddr_storage)) 1186 if (sa->sa_len > sizeof(struct sockaddr_storage))
1187 return EINVAL; 1187 return EINVAL;
1188 1188
1189 error = ether_addmulti(sa, &ifv->ifv_ec); 1189 error = ether_addmulti(sa, &ifv->ifv_ec);
1190 if (error != ENETRESET) 1190 if (error != ENETRESET)
1191 return error; 1191 return error;
1192 1192
1193 /* 1193 /*
1194 * This is a new multicast address. We have to tell parent 1194 * This is a new multicast address. We have to tell parent
1195 * about it. Also, remember this multicast address so that 1195 * about it. Also, remember this multicast address so that
1196 * we can delete it on unconfigure. 1196 * we can delete it on unconfigure.
1197 */ 1197 */
1198 mc = malloc(sizeof(struct vlan_mc_entry), M_DEVBUF, M_NOWAIT); 1198 mc = malloc(sizeof(struct vlan_mc_entry), M_DEVBUF, M_NOWAIT);
1199 if (mc == NULL) { 1199 if (mc == NULL) {
1200 error = ENOMEM; 1200 error = ENOMEM;
1201 goto alloc_failed; 1201 goto alloc_failed;
1202 } 1202 }
1203 1203
1204 /* 1204 /*
1205 * Since ether_addmulti() returned ENETRESET, the following two 1205 * Since ether_addmulti() returned ENETRESET, the following two
1206 * statements shouldn't fail. Here ifv_ec is implicitly protected 1206 * statements shouldn't fail. Here ifv_ec is implicitly protected
1207 * by the ifv_lock lock. 1207 * by the ifv_lock lock.
1208 */ 1208 */
1209 error = ether_multiaddr(sa, addrlo, addrhi); 1209 error = ether_multiaddr(sa, addrlo, addrhi);
1210 KASSERT(error == 0); 1210 KASSERT(error == 0);
1211 1211
1212 ETHER_LOCK(&ifv->ifv_ec); 1212 ETHER_LOCK(&ifv->ifv_ec);
1213 mc->mc_enm = ether_lookup_multi(addrlo, addrhi, &ifv->ifv_ec); 1213 mc->mc_enm = ether_lookup_multi(addrlo, addrhi, &ifv->ifv_ec);
1214 ETHER_UNLOCK(&ifv->ifv_ec); 1214 ETHER_UNLOCK(&ifv->ifv_ec);
1215 1215
1216 KASSERT(mc->mc_enm != NULL); 1216 KASSERT(mc->mc_enm != NULL);
1217 1217
1218 memcpy(&mc->mc_addr, sa, sa->sa_len); 1218 memcpy(&mc->mc_addr, sa, sa->sa_len);
1219 LIST_INSERT_HEAD(&ifv->ifv_mc_listhead, mc, mc_entries); 1219 LIST_INSERT_HEAD(&ifv->ifv_mc_listhead, mc, mc_entries);
1220 1220
1221 mib = ifv->ifv_mib; 1221 mib = ifv->ifv_mib;
1222 1222
1223 KERNEL_LOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p); 1223 KERNEL_LOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p);
1224 error = if_mcast_op(mib->ifvm_p, SIOCADDMULTI, sa); 1224 error = if_mcast_op(mib->ifvm_p, SIOCADDMULTI, sa);
1225 KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p); 1225 KERNEL_UNLOCK_UNLESS_IFP_MPSAFE(mib->ifvm_p);
1226 1226
1227 if (error != 0) 1227 if (error != 0)
1228 goto ioctl_failed; 1228 goto ioctl_failed;
1229 return error; 1229 return error;
1230 1230
1231ioctl_failed: 1231ioctl_failed:
1232 LIST_REMOVE(mc, mc_entries); 1232 LIST_REMOVE(mc, mc_entries);
1233 free(mc, M_DEVBUF); 1233 free(mc, M_DEVBUF);
1234 1234
1235alloc_failed: 1235alloc_failed:
1236 (void)ether_delmulti(sa, &ifv->ifv_ec); 1236 (void)ether_delmulti(sa, &ifv->ifv_ec);
1237 return error; 1237 return error;
1238} 1238}
1239 1239
1240static int 1240static int
1241vlan_ether_delmulti(struct ifvlan *ifv, struct ifreq *ifr) 1241vlan_ether_delmulti(struct ifvlan *ifv, struct ifreq *ifr)
1242{ 1242{
1243 const struct sockaddr *sa = ifreq_getaddr(SIOCDELMULTI, ifr); 1243 const struct sockaddr *sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1244 struct ether_multi *enm; 1244 struct ether_multi *enm;
1245 struct vlan_mc_entry *mc; 1245 struct vlan_mc_entry *mc;
1246 struct ifvlan_linkmib *mib; 1246 struct ifvlan_linkmib *mib;
1247 uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN]; 1247 uint8_t addrlo[ETHER_ADDR_LEN], addrhi[ETHER_ADDR_LEN];
1248 int error; 1248 int error;
1249 1249
1250 KASSERT(mutex_owned(&ifv->ifv_lock)); 1250 KASSERT(mutex_owned(&ifv->ifv_lock));
1251 1251
1252 /* 1252 /*
1253 * Find a key to lookup vlan_mc_entry. We have to do this 1253 * Find a key to lookup vlan_mc_entry. We have to do this
1254 * before calling ether_delmulti for obvious reasons. 1254 * before calling ether_delmulti for obvious reasons.
1255 */ 1255 */
1256 if ((error = ether_multiaddr(sa, addrlo, addrhi)) != 0) 1256 if ((error = ether_multiaddr(sa, addrlo, addrhi)) != 0)
1257 return error; 1257 return error;
1258 1258
1259 ETHER_LOCK(&ifv->ifv_ec); 1259 ETHER_LOCK(&ifv->ifv_ec);
1260 enm = ether_lookup_multi(addrlo, addrhi, &ifv->ifv_ec); 1260 enm = ether_lookup_multi(addrlo, addrhi, &ifv->ifv_ec);
1261 ETHER_UNLOCK(&ifv->ifv_ec); 1261 ETHER_UNLOCK(&ifv->ifv_ec);
1262 if (enm == NULL) 1262 if (enm == NULL)
1263 return EINVAL; 1263 return EINVAL;
1264 1264
1265 LIST_FOREACH(mc, &ifv->ifv_mc_listhead, mc_entries) { 1265 LIST_FOREACH(mc, &ifv->ifv_mc_listhead, mc_entries) {
1266 if (mc->mc_enm == enm) 1266 if (mc->mc_enm == enm)
1267 break; 1267 break;
1268 } 1268 }
1269 1269
1270 /* We woun't delete entries we didn't add */ 1270 /* We woun't delete entries we didn't add */
1271 if (mc == NULL) 1271 if (mc == NULL)
1272 return EINVAL; 1272 return EINVAL;
1273 1273
1274 error = ether_delmulti(sa, &ifv->ifv_ec); 1274 error = ether_delmulti(sa, &ifv->ifv_ec);
1275 if (error != ENETRESET) 1275 if (error != ENETRESET)
1276 return error; 1276 return error;
1277 1277
1278 /* We no longer use this multicast address. Tell parent so. */ 1278 /* We no longer use this multicast address. Tell parent so. */
1279 mib = ifv->ifv_mib; 1279 mib = ifv->ifv_mib;
1280 error = if_mcast_op(mib->ifvm_p, SIOCDELMULTI, sa); 1280 error = if_mcast_op(mib->ifvm_p, SIOCDELMULTI, sa);
1281 1281
1282 if (error == 0) { 1282 if (error == 0) {
1283 /* And forget about this address. */ 1283 /* And forget about this address. */
1284 LIST_REMOVE(mc, mc_entries); 1284 LIST_REMOVE(mc, mc_entries);
1285 free(mc, M_DEVBUF); 1285 free(mc, M_DEVBUF);
1286 } else { 1286 } else {
1287 (void)ether_addmulti(sa, &ifv->ifv_ec); 1287 (void)ether_addmulti(sa, &ifv->ifv_ec);
1288 } 1288 }
1289 1289
1290 return error; 1290 return error;
1291} 1291}
1292 1292
1293/* 1293/*
1294 * Delete any multicast address we have asked to add from parent 1294 * Delete any multicast address we have asked to add from parent
1295 * interface. Called when the vlan is being unconfigured. 1295 * interface. Called when the vlan is being unconfigured.
1296 */ 1296 */
1297static void 1297static void
1298vlan_ether_purgemulti(struct ifvlan *ifv) 1298vlan_ether_purgemulti(struct ifvlan *ifv)
1299{ 1299{
1300 struct vlan_mc_entry *mc; 1300 struct vlan_mc_entry *mc;
1301 struct ifvlan_linkmib *mib; 1301 struct ifvlan_linkmib *mib;
1302 1302
1303 KASSERT(mutex_owned(&ifv->ifv_lock)); 1303 KASSERT(mutex_owned(&ifv->ifv_lock));
1304 mib = ifv->ifv_mib; 1304 mib = ifv->ifv_mib;
1305 if (mib == NULL) { 1305 if (mib == NULL) {
1306 return; 1306 return;
1307 } 1307 }
1308 1308
1309 while ((mc = LIST_FIRST(&ifv->ifv_mc_listhead)) != NULL) { 1309 while ((mc = LIST_FIRST(&ifv->ifv_mc_listhead)) != NULL) {
1310 (void)if_mcast_op(mib->ifvm_p, SIOCDELMULTI, 1310 (void)if_mcast_op(mib->ifvm_p, SIOCDELMULTI,
1311 sstocsa(&mc->mc_addr)); 1311 sstocsa(&mc->mc_addr));
1312 LIST_REMOVE(mc, mc_entries); 1312 LIST_REMOVE(mc, mc_entries);
1313 free(mc, M_DEVBUF); 1313 free(mc, M_DEVBUF);
1314 } 1314 }
1315} 1315}
1316 1316
1317static void 1317static void
1318vlan_start(struct ifnet *ifp) 1318vlan_start(struct ifnet *ifp)
1319{ 1319{
1320 struct ifvlan *ifv = ifp->if_softc; 1320 struct ifvlan *ifv = ifp->if_softc;
1321 struct ifnet *p; 1321 struct ifnet *p;
1322 struct ethercom *ec; 1322 struct ethercom *ec;
1323 struct mbuf *m; 1323 struct mbuf *m;
1324 struct ifvlan_linkmib *mib; 1324 struct ifvlan_linkmib *mib;
1325 struct psref psref; 1325 struct psref psref;
1326 int error; 1326 int error;
1327 1327
1328 mib = vlan_getref_linkmib(ifv, &psref); 1328 mib = vlan_getref_linkmib(ifv, &psref);
1329 if (mib == NULL) 1329 if (mib == NULL)
1330 return; 1330 return;
1331 1331
1332 if (__predict_false(mib->ifvm_p == NULL)) { 1332 if (__predict_false(mib->ifvm_p == NULL)) {
1333 vlan_putref_linkmib(mib, &psref); 1333 vlan_putref_linkmib(mib, &psref);
1334 return; 1334 return;
1335 } 1335 }
1336 1336
1337 p = mib->ifvm_p; 1337 p = mib->ifvm_p;
1338 ec = (void *)mib->ifvm_p; 1338 ec = (void *)mib->ifvm_p;
1339 1339
1340 ifp->if_flags |= IFF_OACTIVE; 1340 ifp->if_flags |= IFF_OACTIVE;
1341 1341
1342 for (;;) { 1342 for (;;) {
1343 IFQ_DEQUEUE(&ifp->if_snd, m); 1343 IFQ_DEQUEUE(&ifp->if_snd, m);
1344 if (m == NULL) 1344 if (m == NULL)
1345 break; 1345 break;
1346 1346
1347#ifdef ALTQ 1347#ifdef ALTQ
1348 /* 1348 /*
1349 * KERNEL_LOCK is required for ALTQ even if NET_MPSAFE is 1349 * KERNEL_LOCK is required for ALTQ even if NET_MPSAFE is
1350 * defined. 1350 * defined.
1351 */ 1351 */
1352 KERNEL_LOCK(1, NULL); 1352 KERNEL_LOCK(1, NULL);
1353 /* 1353 /*
1354 * If ALTQ is enabled on the parent interface, do 1354 * If ALTQ is enabled on the parent interface, do
1355 * classification; the queueing discipline might 1355 * classification; the queueing discipline might
1356 * not require classification, but might require 1356 * not require classification, but might require
1357 * the address family/header pointer in the pktattr. 1357 * the address family/header pointer in the pktattr.
1358 */ 1358 */
1359 if (ALTQ_IS_ENABLED(&p->if_snd)) { 1359 if (ALTQ_IS_ENABLED(&p->if_snd)) {
1360 switch (p->if_type) { 1360 switch (p->if_type) {
1361 case IFT_ETHER: 1361 case IFT_ETHER:
1362 altq_etherclassify(&p->if_snd, m); 1362 altq_etherclassify(&p->if_snd, m);
1363 break; 1363 break;
1364 default: 1364 default:
1365 panic("%s: impossible (altq)", __func__); 1365 panic("%s: impossible (altq)", __func__);
1366 } 1366 }
1367 } 1367 }
1368 KERNEL_UNLOCK_ONE(NULL); 1368 KERNEL_UNLOCK_ONE(NULL);
1369#endif /* ALTQ */ 1369#endif /* ALTQ */
1370 1370
1371 bpf_mtap(ifp, m, BPF_D_OUT); 1371 bpf_mtap(ifp, m, BPF_D_OUT);
1372 /* 1372 /*
1373 * If the parent can insert the tag itself, just mark 1373 * If the parent can insert the tag itself, just mark
1374 * the tag in the mbuf header. 1374 * the tag in the mbuf header.
1375 */ 1375 */
1376 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) { 1376 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
1377 vlan_set_tag(m, mib->ifvm_tag); 1377 vlan_set_tag(m, mib->ifvm_tag);
1378 } else { 1378 } else {
1379 /* 1379 /*
1380 * insert the tag ourselves 1380 * insert the tag ourselves
1381 */ 1381 */
1382 M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT); 1382 M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT);
1383 if (m == NULL) { 1383 if (m == NULL) {
1384 printf("%s: unable to prepend encap header", 1384 printf("%s: unable to prepend encap header",
1385 p->if_xname); 1385 p->if_xname);
1386 if_statinc(ifp, if_oerrors); 1386 if_statinc(ifp, if_oerrors);
1387 continue; 1387 continue;
1388 } 1388 }
1389 1389
1390 switch (p->if_type) { 1390 switch (p->if_type) {
1391 case IFT_ETHER: 1391 case IFT_ETHER:
1392 { 1392 {
1393 struct ether_vlan_header *evl; 1393 struct ether_vlan_header *evl;
1394 1394
1395 if (m->m_len < sizeof(struct ether_vlan_header)) 1395 if (m->m_len < sizeof(struct ether_vlan_header))
1396 m = m_pullup(m, 1396 m = m_pullup(m,
1397 sizeof(struct ether_vlan_header)); 1397 sizeof(struct ether_vlan_header));
1398 if (m == NULL) { 1398 if (m == NULL) {
1399 printf("%s: unable to pullup encap " 1399 printf("%s: unable to pullup encap "
1400 "header", p->if_xname); 1400 "header", p->if_xname);
1401 if_statinc(ifp, if_oerrors); 1401 if_statinc(ifp, if_oerrors);
1402 continue; 1402 continue;
1403 } 1403 }
1404 1404
1405 /* 1405 /*
1406 * Transform the Ethernet header into an 1406 * Transform the Ethernet header into an
1407 * Ethernet header with 802.1Q encapsulation. 1407 * Ethernet header with 802.1Q encapsulation.
1408 */ 1408 */
1409 memmove(mtod(m, void *), 1409 memmove(mtod(m, void *),
1410 mtod(m, char *) + mib->ifvm_encaplen, 1410 mtod(m, char *) + mib->ifvm_encaplen,
1411 sizeof(struct ether_header)); 1411 sizeof(struct ether_header));
1412 evl = mtod(m, struct ether_vlan_header *); 1412 evl = mtod(m, struct ether_vlan_header *);
1413 evl->evl_proto = evl->evl_encap_proto; 1413 evl->evl_proto = evl->evl_encap_proto;
1414 evl->evl_encap_proto = htons(ETHERTYPE_VLAN); 1414 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1415 evl->evl_tag = htons(mib->ifvm_tag); 1415 evl->evl_tag = htons(mib->ifvm_tag);
1416 1416
1417 /* 1417 /*
1418 * To cater for VLAN-aware layer 2 ethernet 1418 * To cater for VLAN-aware layer 2 ethernet
1419 * switches which may need to strip the tag 1419 * switches which may need to strip the tag
1420 * before forwarding the packet, make sure 1420 * before forwarding the packet, make sure
1421 * the packet+tag is at least 68 bytes long. 1421 * the packet+tag is at least 68 bytes long.
1422 * This is necessary because our parent will 1422 * This is necessary because our parent will
1423 * only pad to 64 bytes (ETHER_MIN_LEN) and 1423 * only pad to 64 bytes (ETHER_MIN_LEN) and
1424 * some switches will not pad by themselves 1424 * some switches will not pad by themselves
1425 * after deleting a tag. 1425 * after deleting a tag.
1426 */ 1426 */
1427 const size_t min_data_len = ETHER_MIN_LEN - 1427 const size_t min_data_len = ETHER_MIN_LEN -
1428 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; 1428 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
1429 if (m->m_pkthdr.len < min_data_len) { 1429 if (m->m_pkthdr.len < min_data_len) {
1430 m_copyback(m, m->m_pkthdr.len, 1430 m_copyback(m, m->m_pkthdr.len,
1431 min_data_len - m->m_pkthdr.len, 1431 min_data_len - m->m_pkthdr.len,
1432 vlan_zero_pad_buff); 1432 vlan_zero_pad_buff);
1433 } 1433 }
1434 break; 1434 break;
1435 } 1435 }
1436 1436
1437 default: 1437 default:
1438 panic("%s: impossible", __func__); 1438 panic("%s: impossible", __func__);
1439 } 1439 }
1440 } 1440 }
1441 1441
1442 if ((p->if_flags & IFF_RUNNING) == 0) { 1442 if ((p->if_flags & IFF_RUNNING) == 0) {
1443 m_freem(m); 1443 m_freem(m);
1444 continue; 1444 continue;
1445 } 1445 }
1446 1446
1447 error = if_transmit_lock(p, m); 1447 error = if_transmit_lock(p, m);
1448 if (error) { 1448 if (error) {
1449 /* mbuf is already freed */ 1449 /* mbuf is already freed */
1450 if_statinc(ifp, if_oerrors); 1450 if_statinc(ifp, if_oerrors);
1451 continue; 1451 continue;
1452 } 1452 }
1453 if_statinc(ifp, if_opackets); 1453 if_statinc(ifp, if_opackets);
1454 } 1454 }
1455 1455
1456 ifp->if_flags &= ~IFF_OACTIVE; 1456 ifp->if_flags &= ~IFF_OACTIVE;
1457 1457
1458 /* Remove reference to mib before release */ 1458 /* Remove reference to mib before release */
1459 vlan_putref_linkmib(mib, &psref); 1459 vlan_putref_linkmib(mib, &psref);
1460} 1460}
1461 1461
1462static int 1462static int
1463vlan_transmit(struct ifnet *ifp, struct mbuf *m) 1463vlan_transmit(struct ifnet *ifp, struct mbuf *m)
1464{ 1464{
1465 struct ifvlan *ifv = ifp->if_softc; 1465 struct ifvlan *ifv = ifp->if_softc;
1466 struct ifnet *p; 1466 struct ifnet *p;
1467 struct ethercom *ec; 1467 struct ethercom *ec;
1468 struct ifvlan_linkmib *mib; 1468 struct ifvlan_linkmib *mib;
1469 struct psref psref; 1469 struct psref psref;
1470 int error; 1470 int error;
1471 size_t pktlen = m->m_pkthdr.len; 1471 size_t pktlen = m->m_pkthdr.len;
1472 bool mcast = (m->m_flags & M_MCAST) != 0; 1472 bool mcast = (m->m_flags & M_MCAST) != 0;
1473 1473
1474 mib = vlan_getref_linkmib(ifv, &psref); 1474 mib = vlan_getref_linkmib(ifv, &psref);
1475 if (mib == NULL) { 1475 if (mib == NULL) {
1476 m_freem(m); 1476 m_freem(m);
1477 return ENETDOWN; 1477 return ENETDOWN;
1478 } 1478 }
1479 1479
1480 if (__predict_false(mib->ifvm_p == NULL)) { 1480 if (__predict_false(mib->ifvm_p == NULL)) {
1481 vlan_putref_linkmib(mib, &psref); 1481 vlan_putref_linkmib(mib, &psref);
1482 m_freem(m); 1482 m_freem(m);
1483 return ENETDOWN; 1483 return ENETDOWN;
1484 } 1484 }
1485 1485
1486 p = mib->ifvm_p; 1486 p = mib->ifvm_p;
1487 ec = (void *)mib->ifvm_p; 1487 ec = (void *)mib->ifvm_p;
1488 1488
1489 bpf_mtap(ifp, m, BPF_D_OUT); 1489 bpf_mtap(ifp, m, BPF_D_OUT);
1490 1490
1491 if ((error = pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_OUT)) != 0) 1491 if ((error = pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_OUT)) != 0)
1492 goto out; 1492 goto out;
1493 if (m == NULL) 1493 if (m == NULL)
1494 goto out; 1494 goto out;
1495 1495
1496 /* 1496 /*
1497 * If the parent can insert the tag itself, just mark 1497 * If the parent can insert the tag itself, just mark
1498 * the tag in the mbuf header. 1498 * the tag in the mbuf header.
1499 */ 1499 */
1500 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) { 1500 if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
1501 vlan_set_tag(m, mib->ifvm_tag); 1501 vlan_set_tag(m, mib->ifvm_tag);
1502 } else { 1502 } else {
1503 /* 1503 /*
1504 * insert the tag ourselves 1504 * insert the tag ourselves
1505 */ 1505 */
1506 M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT); 1506 M_PREPEND(m, mib->ifvm_encaplen, M_DONTWAIT);
1507 if (m == NULL) { 1507 if (m == NULL) {
1508 printf("%s: unable to prepend encap header", 1508 printf("%s: unable to prepend encap header",
1509 p->if_xname); 1509 p->if_xname);
1510 if_statinc(ifp, if_oerrors); 1510 if_statinc(ifp, if_oerrors);
1511 error = ENOBUFS; 1511 error = ENOBUFS;
1512 goto out; 1512 goto out;
1513 } 1513 }
1514 1514
1515 switch (p->if_type) { 1515 switch (p->if_type) {
1516 case IFT_ETHER: 1516 case IFT_ETHER:
1517 { 1517 {
1518 struct ether_vlan_header *evl; 1518 struct ether_vlan_header *evl;
1519 1519
1520 if (m->m_len < sizeof(struct ether_vlan_header)) 1520 if (m->m_len < sizeof(struct ether_vlan_header))
1521 m = m_pullup(m, 1521 m = m_pullup(m,
1522 sizeof(struct ether_vlan_header)); 1522 sizeof(struct ether_vlan_header));
1523 if (m == NULL) { 1523 if (m == NULL) {
1524 printf("%s: unable to pullup encap " 1524 printf("%s: unable to pullup encap "
1525 "header", p->if_xname); 1525 "header", p->if_xname);
1526 if_statinc(ifp, if_oerrors); 1526 if_statinc(ifp, if_oerrors);
1527 error = ENOBUFS; 1527 error = ENOBUFS;
1528 goto out; 1528 goto out;
1529 } 1529 }
1530 1530
1531 /* 1531 /*
1532 * Transform the Ethernet header into an 1532 * Transform the Ethernet header into an
1533 * Ethernet header with 802.1Q encapsulation. 1533 * Ethernet header with 802.1Q encapsulation.
1534 */ 1534 */
1535 memmove(mtod(m, void *), 1535 memmove(mtod(m, void *),
1536 mtod(m, char *) + mib->ifvm_encaplen, 1536 mtod(m, char *) + mib->ifvm_encaplen,
1537 sizeof(struct ether_header)); 1537 sizeof(struct ether_header));
1538 evl = mtod(m, struct ether_vlan_header *); 1538 evl = mtod(m, struct ether_vlan_header *);
1539 evl->evl_proto = evl->evl_encap_proto; 1539 evl->evl_proto = evl->evl_encap_proto;
1540 evl->evl_encap_proto = htons(ETHERTYPE_VLAN); 1540 evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1541 evl->evl_tag = htons(mib->ifvm_tag); 1541 evl->evl_tag = htons(mib->ifvm_tag);
1542 1542
1543 /* 1543 /*
1544 * To cater for VLAN-aware layer 2 ethernet 1544 * To cater for VLAN-aware layer 2 ethernet
1545 * switches which may need to strip the tag 1545 * switches which may need to strip the tag
1546 * before forwarding the packet, make sure 1546 * before forwarding the packet, make sure
1547 * the packet+tag is at least 68 bytes long. 1547 * the packet+tag is at least 68 bytes long.
1548 * This is necessary because our parent will 1548 * This is necessary because our parent will
1549 * only pad to 64 bytes (ETHER_MIN_LEN) and 1549 * only pad to 64 bytes (ETHER_MIN_LEN) and
1550 * some switches will not pad by themselves 1550 * some switches will not pad by themselves
1551 * after deleting a tag. 1551 * after deleting a tag.
1552 */ 1552 */
1553 const size_t min_data_len = ETHER_MIN_LEN - 1553 const size_t min_data_len = ETHER_MIN_LEN -
1554 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; 1554 ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
1555 if (m->m_pkthdr.len < min_data_len) { 1555 if (m->m_pkthdr.len < min_data_len) {
1556 m_copyback(m, m->m_pkthdr.len, 1556 m_copyback(m, m->m_pkthdr.len,
1557 min_data_len - m->m_pkthdr.len, 1557 min_data_len - m->m_pkthdr.len,
1558 vlan_zero_pad_buff); 1558 vlan_zero_pad_buff);
1559 } 1559 }
1560 break; 1560 break;
1561 } 1561 }
1562 1562
1563 default: 1563 default:
1564 panic("%s: impossible", __func__); 1564 panic("%s: impossible", __func__);
1565 } 1565 }
1566 } 1566 }
1567 1567
1568 if ((p->if_flags & IFF_RUNNING) == 0) { 1568 if ((p->if_flags & IFF_RUNNING) == 0) {
1569 m_freem(m); 1569 m_freem(m);
1570 error = ENETDOWN; 1570 error = ENETDOWN;
1571 goto out; 1571 goto out;
1572 } 1572 }
1573 1573
1574 error = if_transmit_lock(p, m); 1574 error = if_transmit_lock(p, m);
1575 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 1575 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1576 if (error) { 1576 if (error) {
1577 /* mbuf is already freed */ 1577 /* mbuf is already freed */
1578 if_statinc_ref(nsr, if_oerrors); 1578 if_statinc_ref(nsr, if_oerrors);
1579 } else { 1579 } else {
1580 if_statinc_ref(nsr, if_opackets); 1580 if_statinc_ref(nsr, if_opackets);
1581 if_statadd_ref(nsr, if_obytes, pktlen); 1581 if_statadd_ref(nsr, if_obytes, pktlen);
1582 if (mcast) 1582 if (mcast)
1583 if_statinc_ref(nsr, if_omcasts); 1583 if_statinc_ref(nsr, if_omcasts);
1584 } 1584 }
1585 IF_STAT_PUTREF(ifp); 1585 IF_STAT_PUTREF(ifp);
1586 1586
1587out: 1587out:
1588 /* Remove reference to mib before release */ 1588 /* Remove reference to mib before release */
1589 vlan_putref_linkmib(mib, &psref); 1589 vlan_putref_linkmib(mib, &psref);
1590 return error; 1590 return error;
1591} 1591}
1592 1592
1593/* 1593/*
1594 * Given an Ethernet frame, find a valid vlan interface corresponding to the 1594 * Given an Ethernet frame, find a valid vlan interface corresponding to the
1595 * given source interface and tag, then run the real packet through the 1595 * given source interface and tag, then run the real packet through the
1596 * parent's input routine. 1596 * parent's input routine.
1597 */ 1597 */
1598void 1598void
1599vlan_input(struct ifnet *ifp, struct mbuf *m) 1599vlan_input(struct ifnet *ifp, struct mbuf *m)
1600{ 1600{
1601 struct ifvlan *ifv; 1601 struct ifvlan *ifv;
1602 uint16_t vid; 1602 uint16_t vid;
1603 struct ifvlan_linkmib *mib; 1603 struct ifvlan_linkmib *mib;
1604 struct psref psref; 1604 struct psref psref;
1605 bool have_vtag; 1605 bool have_vtag;
1606 1606
1607 have_vtag = vlan_has_tag(m); 1607 have_vtag = vlan_has_tag(m);
1608 if (have_vtag) { 1608 if (have_vtag) {
1609 vid = EVL_VLANOFTAG(vlan_get_tag(m)); 1609 vid = EVL_VLANOFTAG(vlan_get_tag(m));
1610 m->m_flags &= ~M_VLANTAG; 1610 m->m_flags &= ~M_VLANTAG;
1611 } else { 1611 } else {
1612 struct ether_vlan_header *evl; 1612 struct ether_vlan_header *evl;
1613 1613
1614 if (ifp->if_type != IFT_ETHER) { 1614 if (ifp->if_type != IFT_ETHER) {
1615 panic("%s: impossible", __func__); 1615 panic("%s: impossible", __func__);
1616 } 1616 }
1617 1617
1618 if (m->m_len < sizeof(struct ether_vlan_header) && 1618 if (m->m_len < sizeof(struct ether_vlan_header) &&
1619 (m = m_pullup(m, 1619 (m = m_pullup(m,
1620 sizeof(struct ether_vlan_header))) == NULL) { 1620 sizeof(struct ether_vlan_header))) == NULL) {
1621 printf("%s: no memory for VLAN header, " 1621 printf("%s: no memory for VLAN header, "
1622 "dropping packet.\n", ifp->if_xname); 1622 "dropping packet.\n", ifp->if_xname);
1623 return; 1623 return;
1624 } 1624 }
 1625
 1626 if (m_makewritable(&m, 0,
 1627 sizeof(struct ether_vlan_header), M_DONTWAIT)) {
 1628 m_freem(m);
 1629 if_statinc(ifp, if_ierrors);
 1630 return;
 1631 }
 1632
1625 evl = mtod(m, struct ether_vlan_header *); 1633 evl = mtod(m, struct ether_vlan_header *);
1626 KASSERT(ntohs(evl->evl_encap_proto) == ETHERTYPE_VLAN); 1634 KASSERT(ntohs(evl->evl_encap_proto) == ETHERTYPE_VLAN);
1627 1635
1628 vid = EVL_VLANOFTAG(ntohs(evl->evl_tag)); 1636 vid = EVL_VLANOFTAG(ntohs(evl->evl_tag));
1629 1637
1630 /* 1638 /*
1631 * Restore the original ethertype. We'll remove 1639 * Restore the original ethertype. We'll remove
1632 * the encapsulation after we've found the vlan 1640 * the encapsulation after we've found the vlan
1633 * interface corresponding to the tag. 1641 * interface corresponding to the tag.
1634 */ 1642 */
1635 evl->evl_encap_proto = evl->evl_proto; 1643 evl->evl_encap_proto = evl->evl_proto;
1636 } 1644 }
1637 1645
1638 mib = vlan_lookup_tag_psref(ifp, vid, &psref); 1646 mib = vlan_lookup_tag_psref(ifp, vid, &psref);
1639 if (mib == NULL) { 1647 if (mib == NULL) {
1640 m_freem(m); 1648 m_freem(m);
1641 if_statinc(ifp, if_noproto); 1649 if_statinc(ifp, if_noproto);
1642 return; 1650 return;
1643 } 1651 }
1644 KASSERT(mib->ifvm_encaplen == ETHER_VLAN_ENCAP_LEN); 1652 KASSERT(mib->ifvm_encaplen == ETHER_VLAN_ENCAP_LEN);
1645 1653
1646 ifv = mib->ifvm_ifvlan; 1654 ifv = mib->ifvm_ifvlan;
1647 if ((ifv->ifv_if.if_flags & (IFF_UP | IFF_RUNNING)) != 1655 if ((ifv->ifv_if.if_flags & (IFF_UP | IFF_RUNNING)) !=
1648 (IFF_UP | IFF_RUNNING)) { 1656 (IFF_UP | IFF_RUNNING)) {
1649 m_freem(m); 1657 m_freem(m);
1650 if_statinc(ifp, if_noproto); 1658 if_statinc(ifp, if_noproto);
1651 goto out; 1659 goto out;
1652 } 1660 }
1653 1661
1654 /* 1662 /*
1655 * Now, remove the encapsulation header. The original 1663 * Now, remove the encapsulation header. The original
1656 * header has already been fixed up above. 1664 * header has already been fixed up above.
1657 */ 1665 */
1658 if (!have_vtag) { 1666 if (!have_vtag) {
1659 memmove(mtod(m, char *) + mib->ifvm_encaplen, 1667 memmove(mtod(m, char *) + mib->ifvm_encaplen,
1660 mtod(m, void *), sizeof(struct ether_header)); 1668 mtod(m, void *), sizeof(struct ether_header));
1661 m_adj(m, mib->ifvm_encaplen); 1669 m_adj(m, mib->ifvm_encaplen);
1662 } 1670 }
1663 1671
1664 /* 1672 /*
1665 * Drop promiscuously received packets if we are not in 1673 * Drop promiscuously received packets if we are not in
1666 * promiscuous mode 1674 * promiscuous mode
1667 */ 1675 */
1668 if ((m->m_flags & (M_BCAST | M_MCAST)) == 0 && 1676 if ((m->m_flags & (M_BCAST | M_MCAST)) == 0 &&
1669 (ifp->if_flags & IFF_PROMISC) && 1677 (ifp->if_flags & IFF_PROMISC) &&
1670 (ifv->ifv_if.if_flags & IFF_PROMISC) == 0) { 1678 (ifv->ifv_if.if_flags & IFF_PROMISC) == 0) {
1671 struct ether_header *eh; 1679 struct ether_header *eh;
1672 1680
1673 eh = mtod(m, struct ether_header *); 1681 eh = mtod(m, struct ether_header *);
1674 if (memcmp(CLLADDR(ifv->ifv_if.if_sadl), 1682 if (memcmp(CLLADDR(ifv->ifv_if.if_sadl),
1675 eh->ether_dhost, ETHER_ADDR_LEN) != 0) { 1683 eh->ether_dhost, ETHER_ADDR_LEN) != 0) {
1676 m_freem(m); 1684 m_freem(m);
1677 if_statinc(&ifv->ifv_if, if_ierrors); 1685 if_statinc(&ifv->ifv_if, if_ierrors);
1678 goto out; 1686 goto out;
1679 } 1687 }
1680 } 1688 }
1681 1689
1682 m_set_rcvif(m, &ifv->ifv_if); 1690 m_set_rcvif(m, &ifv->ifv_if);
1683 1691
1684 if (pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_IN) != 0) 1692 if (pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_IN) != 0)
1685 goto out; 1693 goto out;
1686 if (m == NULL) 1694 if (m == NULL)
1687 goto out; 1695 goto out;
1688 1696
1689 m->m_flags &= ~M_PROMISC; 1697 m->m_flags &= ~M_PROMISC;
1690 if_input(&ifv->ifv_if, m); 1698 if_input(&ifv->ifv_if, m);
1691out: 1699out:
1692 vlan_putref_linkmib(mib, &psref); 1700 vlan_putref_linkmib(mib, &psref);
1693} 1701}
1694 1702
1695/* 1703/*
1696 * If the parent link state changed, the vlan link state should change also. 1704 * If the parent link state changed, the vlan link state should change also.
1697 */ 1705 */
1698void 1706void
1699vlan_link_state_changed(struct ifnet *p, int link_state) 1707vlan_link_state_changed(struct ifnet *p, int link_state)
1700{ 1708{
1701 struct ifvlan *ifv; 1709 struct ifvlan *ifv;
1702 struct ifvlan_linkmib *mib; 1710 struct ifvlan_linkmib *mib;
1703 struct psref psref; 1711 struct psref psref;
1704 struct ifnet *ifp; 1712 struct ifnet *ifp;
1705 1713
1706 mutex_enter(&ifv_list.lock); 1714 mutex_enter(&ifv_list.lock);
1707 1715
1708 LIST_FOREACH(ifv, &ifv_list.list, ifv_list) { 1716 LIST_FOREACH(ifv, &ifv_list.list, ifv_list) {
1709 mib = vlan_getref_linkmib(ifv, &psref); 1717 mib = vlan_getref_linkmib(ifv, &psref);
1710 if (mib == NULL) 1718 if (mib == NULL)
1711 continue; 1719 continue;
1712 1720
1713 if (mib->ifvm_p == p) { 1721 if (mib->ifvm_p == p) {
1714 ifp = &mib->ifvm_ifvlan->ifv_if; 1722 ifp = &mib->ifvm_ifvlan->ifv_if;
1715 if_link_state_change(ifp, link_state); 1723 if_link_state_change(ifp, link_state);
1716 } 1724 }
1717 1725
1718 vlan_putref_linkmib(mib, &psref); 1726 vlan_putref_linkmib(mib, &psref);
1719 } 1727 }
1720 1728
1721 mutex_exit(&ifv_list.lock); 1729 mutex_exit(&ifv_list.lock);
1722} 1730}
1723 1731
1724/* 1732/*
1725 * Module infrastructure 1733 * Module infrastructure
1726 */ 1734 */
1727#include "if_module.h" 1735#include "if_module.h"
1728 1736
1729IF_MODULE(MODULE_CLASS_DRIVER, vlan, NULL) 1737IF_MODULE(MODULE_CLASS_DRIVER, vlan, NULL)