Tue Nov 15 10:47:39 2022 UTC ()
arp: Validate ARP source hardware address matches Ethernet source

RFC 5227 section 1.1 states that for a DaD ARP probe the sender hardware
address must match the hardware address of the interface sending the
packet.

We can now verify this by checking the mbuf tag PACKET_TAG_ETHERNET_SRC.

This fixes an obsure issue where an old router was sending out bogus
ARP probes.

Thanks to Ryo Shimizu <ryo@nerv.org> for the re-implementation.


(roy)
diff -r1.322 -r1.323 src/sys/net/if_ethersubr.c
diff -r1.310 -r1.311 src/sys/netinet/if_arp.c
diff -r1.235 -r1.236 src/sys/sys/mbuf.h

cvs diff -r1.322 -r1.323 src/sys/net/if_ethersubr.c (switch to unified diff)

--- src/sys/net/if_ethersubr.c 2022/11/15 09:14:28 1.322
+++ src/sys/net/if_ethersubr.c 2022/11/15 10:47:39 1.323
@@ -1,1887 +1,1900 @@ @@ -1,1887 +1,1900 @@
1/* $NetBSD: if_ethersubr.c,v 1.322 2022/11/15 09:14:28 roy Exp $ */ 1/* $NetBSD: if_ethersubr.c,v 1.323 2022/11/15 10:47:39 roy Exp $ */
2 2
3/* 3/*
4 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. 4 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the project nor the names of its contributors 15 * 3. Neither the name of the project nor the names of its contributors
16 * may be used to endorse or promote products derived from this software 16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission. 17 * without specific prior written permission.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND 19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE. 29 * SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Copyright (c) 1982, 1989, 1993 33 * Copyright (c) 1982, 1989, 1993
34 * The Regents of the University of California. All rights reserved. 34 * The Regents of the University of California. All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions 37 * modification, are permitted provided that the following conditions
38 * are met: 38 * are met:
39 * 1. Redistributions of source code must retain the above copyright 39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer. 40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright 41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the 42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution. 43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the University nor the names of its contributors 44 * 3. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software 45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission. 46 * without specific prior written permission.
47 * 47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE. 58 * SUCH DAMAGE.
59 * 59 *
60 * @(#)if_ethersubr.c 8.2 (Berkeley) 4/4/96 60 * @(#)if_ethersubr.c 8.2 (Berkeley) 4/4/96
61 */ 61 */
62 62
63#include <sys/cdefs.h> 63#include <sys/cdefs.h>
64__KERNEL_RCSID(0, "$NetBSD: if_ethersubr.c,v 1.322 2022/11/15 09:14:28 roy Exp $"); 64__KERNEL_RCSID(0, "$NetBSD: if_ethersubr.c,v 1.323 2022/11/15 10:47:39 roy Exp $");
65 65
66#ifdef _KERNEL_OPT 66#ifdef _KERNEL_OPT
67#include "opt_inet.h" 67#include "opt_inet.h"
68#include "opt_atalk.h" 68#include "opt_atalk.h"
69#include "opt_mbuftrace.h" 69#include "opt_mbuftrace.h"
70#include "opt_mpls.h" 70#include "opt_mpls.h"
71#include "opt_gateway.h" 71#include "opt_gateway.h"
72#include "opt_pppoe.h" 72#include "opt_pppoe.h"
73#include "opt_net_mpsafe.h" 73#include "opt_net_mpsafe.h"
74#endif 74#endif
75 75
76#include "vlan.h" 76#include "vlan.h"
77#include "pppoe.h" 77#include "pppoe.h"
78#include "bridge.h" 78#include "bridge.h"
79#include "arp.h" 79#include "arp.h"
80#include "agr.h" 80#include "agr.h"
81 81
82#include <sys/sysctl.h> 82#include <sys/sysctl.h>
83#include <sys/mbuf.h> 83#include <sys/mbuf.h>
84#include <sys/mutex.h> 84#include <sys/mutex.h>
85#include <sys/ioctl.h> 85#include <sys/ioctl.h>
86#include <sys/errno.h> 86#include <sys/errno.h>
87#include <sys/device.h> 87#include <sys/device.h>
88#include <sys/entropy.h> 88#include <sys/entropy.h>
89#include <sys/rndsource.h> 89#include <sys/rndsource.h>
90#include <sys/cpu.h> 90#include <sys/cpu.h>
91#include <sys/kmem.h> 91#include <sys/kmem.h>
92#include <sys/hook.h> 92#include <sys/hook.h>
93 93
94#include <net/if.h> 94#include <net/if.h>
95#include <net/route.h> 95#include <net/route.h>
96#include <net/if_llc.h> 96#include <net/if_llc.h>
97#include <net/if_dl.h> 97#include <net/if_dl.h>
98#include <net/if_types.h> 98#include <net/if_types.h>
99#include <net/pktqueue.h> 99#include <net/pktqueue.h>
100 100
101#include <net/if_media.h> 101#include <net/if_media.h>
102#include <dev/mii/mii.h> 102#include <dev/mii/mii.h>
103#include <dev/mii/miivar.h> 103#include <dev/mii/miivar.h>
104 104
105#if NARP == 0 105#if NARP == 0
106/* 106/*
107 * XXX there should really be a way to issue this warning from within config(8) 107 * XXX there should really be a way to issue this warning from within config(8)
108 */ 108 */
109#error You have included NETATALK or a pseudo-device in your configuration that depends on the presence of ethernet interfaces, but have no such interfaces configured. Check if you really need pseudo-device bridge, pppoe, vlan or options NETATALK. 109#error You have included NETATALK or a pseudo-device in your configuration that depends on the presence of ethernet interfaces, but have no such interfaces configured. Check if you really need pseudo-device bridge, pppoe, vlan or options NETATALK.
110#endif 110#endif
111 111
112#include <net/bpf.h> 112#include <net/bpf.h>
113 113
114#include <net/if_ether.h> 114#include <net/if_ether.h>
115#include <net/if_vlanvar.h> 115#include <net/if_vlanvar.h>
116 116
117#if NPPPOE > 0 117#if NPPPOE > 0
118#include <net/if_pppoe.h> 118#include <net/if_pppoe.h>
119#endif 119#endif
120 120
121#if NAGR > 0 121#if NAGR > 0
122#include <net/ether_slowprotocols.h> 122#include <net/ether_slowprotocols.h>
123#include <net/agr/ieee8023ad.h> 123#include <net/agr/ieee8023ad.h>
124#include <net/agr/if_agrvar.h> 124#include <net/agr/if_agrvar.h>
125#endif 125#endif
126 126
127#if NBRIDGE > 0 127#if NBRIDGE > 0
128#include <net/if_bridgevar.h> 128#include <net/if_bridgevar.h>
129#endif 129#endif
130 130
131#include <netinet/in.h> 131#include <netinet/in.h>
132#ifdef INET 132#ifdef INET
133#include <netinet/in_var.h> 133#include <netinet/in_var.h>
134#endif 134#endif
135#include <netinet/if_inarp.h> 135#include <netinet/if_inarp.h>
136 136
137#ifdef INET6 137#ifdef INET6
138#ifndef INET 138#ifndef INET
139#include <netinet/in.h> 139#include <netinet/in.h>
140#endif 140#endif
141#include <netinet6/in6_var.h> 141#include <netinet6/in6_var.h>
142#include <netinet6/nd6.h> 142#include <netinet6/nd6.h>
143#endif 143#endif
144 144
145#include "carp.h" 145#include "carp.h"
146#if NCARP > 0 146#if NCARP > 0
147#include <netinet/ip_carp.h> 147#include <netinet/ip_carp.h>
148#endif 148#endif
149 149
150#ifdef NETATALK 150#ifdef NETATALK
151#include <netatalk/at.h> 151#include <netatalk/at.h>
152#include <netatalk/at_var.h> 152#include <netatalk/at_var.h>
153#include <netatalk/at_extern.h> 153#include <netatalk/at_extern.h>
154 154
155#define llc_snap_org_code llc_un.type_snap.org_code 155#define llc_snap_org_code llc_un.type_snap.org_code
156#define llc_snap_ether_type llc_un.type_snap.ether_type 156#define llc_snap_ether_type llc_un.type_snap.ether_type
157 157
158extern u_char at_org_code[3]; 158extern u_char at_org_code[3];
159extern u_char aarp_org_code[3]; 159extern u_char aarp_org_code[3];
160#endif /* NETATALK */ 160#endif /* NETATALK */
161 161
162#ifdef MPLS 162#ifdef MPLS
163#include <netmpls/mpls.h> 163#include <netmpls/mpls.h>
164#include <netmpls/mpls_var.h> 164#include <netmpls/mpls_var.h>
165#endif 165#endif
166 166
167CTASSERT(sizeof(struct ether_addr) == 6); 167CTASSERT(sizeof(struct ether_addr) == 6);
168CTASSERT(sizeof(struct ether_header) == 14); 168CTASSERT(sizeof(struct ether_header) == 14);
169 169
170#ifdef DIAGNOSTIC 170#ifdef DIAGNOSTIC
171static struct timeval bigpktppslim_last; 171static struct timeval bigpktppslim_last;
172static int bigpktppslim = 2; /* XXX */ 172static int bigpktppslim = 2; /* XXX */
173static int bigpktpps_count; 173static int bigpktpps_count;
174static kmutex_t bigpktpps_lock __cacheline_aligned; 174static kmutex_t bigpktpps_lock __cacheline_aligned;
175#endif 175#endif
176 176
177const uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = 177const uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] =
178 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 178 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
179const uint8_t ethermulticastaddr_slowprotocols[ETHER_ADDR_LEN] = 179const uint8_t ethermulticastaddr_slowprotocols[ETHER_ADDR_LEN] =
180 { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x02 }; 180 { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x02 };
181#define senderr(e) { error = (e); goto bad;} 181#define senderr(e) { error = (e); goto bad;}
182 182
183static pktq_rps_hash_func_t ether_pktq_rps_hash_p; 183static pktq_rps_hash_func_t ether_pktq_rps_hash_p;
184 184
185static int ether_output(struct ifnet *, struct mbuf *, 185static int ether_output(struct ifnet *, struct mbuf *,
186 const struct sockaddr *, const struct rtentry *); 186 const struct sockaddr *, const struct rtentry *);
187 187
188/* 188/*
189 * Ethernet output routine. 189 * Ethernet output routine.
190 * Encapsulate a packet of type family for the local net. 190 * Encapsulate a packet of type family for the local net.
191 * Assumes that ifp is actually pointer to ethercom structure. 191 * Assumes that ifp is actually pointer to ethercom structure.
192 */ 192 */
193static int 193static int
194ether_output(struct ifnet * const ifp0, struct mbuf * const m0, 194ether_output(struct ifnet * const ifp0, struct mbuf * const m0,
195 const struct sockaddr * const dst, const struct rtentry *rt) 195 const struct sockaddr * const dst, const struct rtentry *rt)
196{ 196{
197 uint8_t esrc[ETHER_ADDR_LEN], edst[ETHER_ADDR_LEN]; 197 uint8_t esrc[ETHER_ADDR_LEN], edst[ETHER_ADDR_LEN];
198 uint16_t etype = 0; 198 uint16_t etype = 0;
199 int error = 0, hdrcmplt = 0; 199 int error = 0, hdrcmplt = 0;
200 struct mbuf *m = m0; 200 struct mbuf *m = m0;
201 struct mbuf *mcopy = NULL; 201 struct mbuf *mcopy = NULL;
202 struct ether_header *eh; 202 struct ether_header *eh;
203 struct ifnet *ifp = ifp0; 203 struct ifnet *ifp = ifp0;
204#ifdef INET 204#ifdef INET
205 struct arphdr *ah; 205 struct arphdr *ah;
206#endif 206#endif
207#ifdef NETATALK 207#ifdef NETATALK
208 struct at_ifaddr *aa; 208 struct at_ifaddr *aa;
209#endif 209#endif
210 210
211#ifdef MBUFTRACE 211#ifdef MBUFTRACE
212 m_claimm(m, ifp->if_mowner); 212 m_claimm(m, ifp->if_mowner);
213#endif 213#endif
214 214
215#if NCARP > 0 215#if NCARP > 0
216 if (ifp->if_type == IFT_CARP) { 216 if (ifp->if_type == IFT_CARP) {
217 struct ifaddr *ifa; 217 struct ifaddr *ifa;
218 int s = pserialize_read_enter(); 218 int s = pserialize_read_enter();
219 219
220 /* loop back if this is going to the carp interface */ 220 /* loop back if this is going to the carp interface */
221 if (dst != NULL && ifp0->if_link_state == LINK_STATE_UP && 221 if (dst != NULL && ifp0->if_link_state == LINK_STATE_UP &&
222 (ifa = ifa_ifwithaddr(dst)) != NULL) { 222 (ifa = ifa_ifwithaddr(dst)) != NULL) {
223 if (ifa->ifa_ifp == ifp0) { 223 if (ifa->ifa_ifp == ifp0) {
224 pserialize_read_exit(s); 224 pserialize_read_exit(s);
225 return looutput(ifp0, m, dst, rt); 225 return looutput(ifp0, m, dst, rt);
226 } 226 }
227 } 227 }
228 pserialize_read_exit(s); 228 pserialize_read_exit(s);
229 229
230 ifp = ifp->if_carpdev; 230 ifp = ifp->if_carpdev;
231 /* ac = (struct arpcom *)ifp; */ 231 /* ac = (struct arpcom *)ifp; */
232 232
233 if ((ifp0->if_flags & (IFF_UP | IFF_RUNNING)) != 233 if ((ifp0->if_flags & (IFF_UP | IFF_RUNNING)) !=
234 (IFF_UP | IFF_RUNNING)) 234 (IFF_UP | IFF_RUNNING))
235 senderr(ENETDOWN); 235 senderr(ENETDOWN);
236 } 236 }
237#endif 237#endif
238 238
239 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) != (IFF_UP | IFF_RUNNING)) 239 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) != (IFF_UP | IFF_RUNNING))
240 senderr(ENETDOWN); 240 senderr(ENETDOWN);
241 241
242 switch (dst->sa_family) { 242 switch (dst->sa_family) {
243 243
244#ifdef INET 244#ifdef INET
245 case AF_INET: 245 case AF_INET:
246 if (m->m_flags & M_BCAST) { 246 if (m->m_flags & M_BCAST) {
247 memcpy(edst, etherbroadcastaddr, sizeof(edst)); 247 memcpy(edst, etherbroadcastaddr, sizeof(edst));
248 } else if (m->m_flags & M_MCAST) { 248 } else if (m->m_flags & M_MCAST) {
249 ETHER_MAP_IP_MULTICAST(&satocsin(dst)->sin_addr, edst); 249 ETHER_MAP_IP_MULTICAST(&satocsin(dst)->sin_addr, edst);
250 } else { 250 } else {
251 error = arpresolve(ifp0, rt, m, dst, edst, sizeof(edst)); 251 error = arpresolve(ifp0, rt, m, dst, edst, sizeof(edst));
252 if (error) 252 if (error)
253 return (error == EWOULDBLOCK) ? 0 : error; 253 return (error == EWOULDBLOCK) ? 0 : error;
254 } 254 }
255 /* If broadcasting on a simplex interface, loopback a copy */ 255 /* If broadcasting on a simplex interface, loopback a copy */
256 if ((m->m_flags & M_BCAST) && (ifp->if_flags & IFF_SIMPLEX)) 256 if ((m->m_flags & M_BCAST) && (ifp->if_flags & IFF_SIMPLEX))
257 mcopy = m_copypacket(m, M_DONTWAIT); 257 mcopy = m_copypacket(m, M_DONTWAIT);
258 etype = htons(ETHERTYPE_IP); 258 etype = htons(ETHERTYPE_IP);
259 break; 259 break;
260 260
261 case AF_ARP: 261 case AF_ARP:
262 ah = mtod(m, struct arphdr *); 262 ah = mtod(m, struct arphdr *);
263 if (m->m_flags & M_BCAST) { 263 if (m->m_flags & M_BCAST) {
264 memcpy(edst, etherbroadcastaddr, sizeof(edst)); 264 memcpy(edst, etherbroadcastaddr, sizeof(edst));
265 } else { 265 } else {
266 void *tha = ar_tha(ah); 266 void *tha = ar_tha(ah);
267 267
268 if (tha == NULL) { 268 if (tha == NULL) {
269 /* fake with ARPHRD_IEEE1394 */ 269 /* fake with ARPHRD_IEEE1394 */
270 m_freem(m); 270 m_freem(m);
271 return 0; 271 return 0;
272 } 272 }
273 memcpy(edst, tha, sizeof(edst)); 273 memcpy(edst, tha, sizeof(edst));
274 } 274 }
275 275
276 ah->ar_hrd = htons(ARPHRD_ETHER); 276 ah->ar_hrd = htons(ARPHRD_ETHER);
277 277
278 switch (ntohs(ah->ar_op)) { 278 switch (ntohs(ah->ar_op)) {
279 case ARPOP_REVREQUEST: 279 case ARPOP_REVREQUEST:
280 case ARPOP_REVREPLY: 280 case ARPOP_REVREPLY:
281 etype = htons(ETHERTYPE_REVARP); 281 etype = htons(ETHERTYPE_REVARP);
282 break; 282 break;
283 283
284 case ARPOP_REQUEST: 284 case ARPOP_REQUEST:
285 case ARPOP_REPLY: 285 case ARPOP_REPLY:
286 default: 286 default:
287 etype = htons(ETHERTYPE_ARP); 287 etype = htons(ETHERTYPE_ARP);
288 } 288 }
289 break; 289 break;
290#endif 290#endif
291 291
292#ifdef INET6 292#ifdef INET6
293 case AF_INET6: 293 case AF_INET6:
294 if (m->m_flags & M_BCAST) { 294 if (m->m_flags & M_BCAST) {
295 memcpy(edst, etherbroadcastaddr, sizeof(edst)); 295 memcpy(edst, etherbroadcastaddr, sizeof(edst));
296 } else if (m->m_flags & M_MCAST) { 296 } else if (m->m_flags & M_MCAST) {
297 ETHER_MAP_IPV6_MULTICAST(&satocsin6(dst)->sin6_addr, 297 ETHER_MAP_IPV6_MULTICAST(&satocsin6(dst)->sin6_addr,
298 edst); 298 edst);
299 } else { 299 } else {
300 error = nd6_resolve(ifp0, rt, m, dst, edst, 300 error = nd6_resolve(ifp0, rt, m, dst, edst,
301 sizeof(edst)); 301 sizeof(edst));
302 if (error) 302 if (error)
303 return (error == EWOULDBLOCK) ? 0 : error; 303 return (error == EWOULDBLOCK) ? 0 : error;
304 } 304 }
305 etype = htons(ETHERTYPE_IPV6); 305 etype = htons(ETHERTYPE_IPV6);
306 break; 306 break;
307#endif 307#endif
308 308
309#ifdef NETATALK 309#ifdef NETATALK
310 case AF_APPLETALK: { 310 case AF_APPLETALK: {
311 struct ifaddr *ifa; 311 struct ifaddr *ifa;
312 int s; 312 int s;
313 313
314 KERNEL_LOCK(1, NULL); 314 KERNEL_LOCK(1, NULL);
315 315
316 if (!aarpresolve(ifp, m, (const struct sockaddr_at *)dst, edst)) { 316 if (!aarpresolve(ifp, m, (const struct sockaddr_at *)dst, edst)) {
317 KERNEL_UNLOCK_ONE(NULL); 317 KERNEL_UNLOCK_ONE(NULL);
318 return 0; 318 return 0;
319 } 319 }
320 320
321 /* 321 /*
322 * ifaddr is the first thing in at_ifaddr 322 * ifaddr is the first thing in at_ifaddr
323 */ 323 */
324 s = pserialize_read_enter(); 324 s = pserialize_read_enter();
325 ifa = at_ifawithnet((const struct sockaddr_at *)dst, ifp); 325 ifa = at_ifawithnet((const struct sockaddr_at *)dst, ifp);
326 if (ifa == NULL) { 326 if (ifa == NULL) {
327 pserialize_read_exit(s); 327 pserialize_read_exit(s);
328 KERNEL_UNLOCK_ONE(NULL); 328 KERNEL_UNLOCK_ONE(NULL);
329 senderr(EADDRNOTAVAIL); 329 senderr(EADDRNOTAVAIL);
330 } 330 }
331 aa = (struct at_ifaddr *)ifa; 331 aa = (struct at_ifaddr *)ifa;
332 332
333 /* 333 /*
334 * In the phase 2 case, we need to prepend an mbuf for the 334 * In the phase 2 case, we need to prepend an mbuf for the
335 * llc header. 335 * llc header.
336 */ 336 */
337 if (aa->aa_flags & AFA_PHASE2) { 337 if (aa->aa_flags & AFA_PHASE2) {
338 struct llc llc; 338 struct llc llc;
339 339
340 M_PREPEND(m, sizeof(struct llc), M_DONTWAIT); 340 M_PREPEND(m, sizeof(struct llc), M_DONTWAIT);
341 if (m == NULL) { 341 if (m == NULL) {
342 pserialize_read_exit(s); 342 pserialize_read_exit(s);
343 KERNEL_UNLOCK_ONE(NULL); 343 KERNEL_UNLOCK_ONE(NULL);
344 senderr(ENOBUFS); 344 senderr(ENOBUFS);
345 } 345 }
346 346
347 llc.llc_dsap = llc.llc_ssap = LLC_SNAP_LSAP; 347 llc.llc_dsap = llc.llc_ssap = LLC_SNAP_LSAP;
348 llc.llc_control = LLC_UI; 348 llc.llc_control = LLC_UI;
349 memcpy(llc.llc_snap_org_code, at_org_code, 349 memcpy(llc.llc_snap_org_code, at_org_code,
350 sizeof(llc.llc_snap_org_code)); 350 sizeof(llc.llc_snap_org_code));
351 llc.llc_snap_ether_type = htons(ETHERTYPE_ATALK); 351 llc.llc_snap_ether_type = htons(ETHERTYPE_ATALK);
352 memcpy(mtod(m, void *), &llc, sizeof(struct llc)); 352 memcpy(mtod(m, void *), &llc, sizeof(struct llc));
353 } else { 353 } else {
354 etype = htons(ETHERTYPE_ATALK); 354 etype = htons(ETHERTYPE_ATALK);
355 } 355 }
356 pserialize_read_exit(s); 356 pserialize_read_exit(s);
357 KERNEL_UNLOCK_ONE(NULL); 357 KERNEL_UNLOCK_ONE(NULL);
358 break; 358 break;
359 } 359 }
360#endif /* NETATALK */ 360#endif /* NETATALK */
361 361
362 case pseudo_AF_HDRCMPLT: 362 case pseudo_AF_HDRCMPLT:
363 hdrcmplt = 1; 363 hdrcmplt = 1;
364 memcpy(esrc, 364 memcpy(esrc,
365 ((const struct ether_header *)dst->sa_data)->ether_shost, 365 ((const struct ether_header *)dst->sa_data)->ether_shost,
366 sizeof(esrc)); 366 sizeof(esrc));
367 /* FALLTHROUGH */ 367 /* FALLTHROUGH */
368 368
369 case AF_UNSPEC: 369 case AF_UNSPEC:
370 memcpy(edst, 370 memcpy(edst,
371 ((const struct ether_header *)dst->sa_data)->ether_dhost, 371 ((const struct ether_header *)dst->sa_data)->ether_dhost,
372 sizeof(edst)); 372 sizeof(edst));
373 /* AF_UNSPEC doesn't swap the byte order of the ether_type. */ 373 /* AF_UNSPEC doesn't swap the byte order of the ether_type. */
374 etype = ((const struct ether_header *)dst->sa_data)->ether_type; 374 etype = ((const struct ether_header *)dst->sa_data)->ether_type;
375 break; 375 break;
376 376
377 default: 377 default:
378 printf("%s: can't handle af%d\n", ifp->if_xname, 378 printf("%s: can't handle af%d\n", ifp->if_xname,
379 dst->sa_family); 379 dst->sa_family);
380 senderr(EAFNOSUPPORT); 380 senderr(EAFNOSUPPORT);
381 } 381 }
382 382
383#ifdef MPLS 383#ifdef MPLS
384 { 384 {
385 struct m_tag *mtag; 385 struct m_tag *mtag;
386 mtag = m_tag_find(m, PACKET_TAG_MPLS); 386 mtag = m_tag_find(m, PACKET_TAG_MPLS);
387 if (mtag != NULL) { 387 if (mtag != NULL) {
388 /* Having the tag itself indicates it's MPLS */ 388 /* Having the tag itself indicates it's MPLS */
389 etype = htons(ETHERTYPE_MPLS); 389 etype = htons(ETHERTYPE_MPLS);
390 m_tag_delete(m, mtag); 390 m_tag_delete(m, mtag);
391 } 391 }
392 } 392 }
393#endif 393#endif
394 394
395 if (mcopy) 395 if (mcopy)
396 (void)looutput(ifp, mcopy, dst, rt); 396 (void)looutput(ifp, mcopy, dst, rt);
397 397
398 KASSERT((m->m_flags & M_PKTHDR) != 0); 398 KASSERT((m->m_flags & M_PKTHDR) != 0);
399 399
400 /* 400 /*
401 * If no ether type is set, this must be a 802.2 formatted packet. 401 * If no ether type is set, this must be a 802.2 formatted packet.
402 */ 402 */
403 if (etype == 0) 403 if (etype == 0)
404 etype = htons(m->m_pkthdr.len); 404 etype = htons(m->m_pkthdr.len);
405 405
406 /* 406 /*
407 * Add local net header. If no space in first mbuf, allocate another. 407 * Add local net header. If no space in first mbuf, allocate another.
408 */ 408 */
409 M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT); 409 M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT);
410 if (m == NULL) 410 if (m == NULL)
411 senderr(ENOBUFS); 411 senderr(ENOBUFS);
412 412
413 eh = mtod(m, struct ether_header *); 413 eh = mtod(m, struct ether_header *);
414 /* Note: etype is already in network byte order. */ 414 /* Note: etype is already in network byte order. */
415 memcpy(&eh->ether_type, &etype, sizeof(eh->ether_type)); 415 memcpy(&eh->ether_type, &etype, sizeof(eh->ether_type));
416 memcpy(eh->ether_dhost, edst, sizeof(edst)); 416 memcpy(eh->ether_dhost, edst, sizeof(edst));
417 if (hdrcmplt) { 417 if (hdrcmplt) {
418 memcpy(eh->ether_shost, esrc, sizeof(eh->ether_shost)); 418 memcpy(eh->ether_shost, esrc, sizeof(eh->ether_shost));
419 } else { 419 } else {
420 memcpy(eh->ether_shost, CLLADDR(ifp->if_sadl), 420 memcpy(eh->ether_shost, CLLADDR(ifp->if_sadl),
421 sizeof(eh->ether_shost)); 421 sizeof(eh->ether_shost));
422 } 422 }
423 423
424#if NCARP > 0 424#if NCARP > 0
425 if (ifp0 != ifp && ifp0->if_type == IFT_CARP) { 425 if (ifp0 != ifp && ifp0->if_type == IFT_CARP) {
426 memcpy(eh->ether_shost, CLLADDR(ifp0->if_sadl), 426 memcpy(eh->ether_shost, CLLADDR(ifp0->if_sadl),
427 sizeof(eh->ether_shost)); 427 sizeof(eh->ether_shost));
428 } 428 }
429#endif 429#endif
430 430
431 if ((error = pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_OUT)) != 0) 431 if ((error = pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_OUT)) != 0)
432 return error; 432 return error;
433 if (m == NULL) 433 if (m == NULL)
434 return 0; 434 return 0;
435 435
436#if NBRIDGE > 0 436#if NBRIDGE > 0
437 /* 437 /*
438 * Bridges require special output handling. 438 * Bridges require special output handling.
439 */ 439 */
440 if (ifp->if_bridge) 440 if (ifp->if_bridge)
441 return bridge_output(ifp, m, NULL, NULL); 441 return bridge_output(ifp, m, NULL, NULL);
442#endif 442#endif
443 443
444#if NCARP > 0 444#if NCARP > 0
445 if (ifp != ifp0) 445 if (ifp != ifp0)
446 if_statadd(ifp0, if_obytes, m->m_pkthdr.len + ETHER_HDR_LEN); 446 if_statadd(ifp0, if_obytes, m->m_pkthdr.len + ETHER_HDR_LEN);
447#endif 447#endif
448 448
449#ifdef ALTQ 449#ifdef ALTQ
450 KERNEL_LOCK(1, NULL); 450 KERNEL_LOCK(1, NULL);
451 /* 451 /*
452 * If ALTQ is enabled on the parent interface, do 452 * If ALTQ is enabled on the parent interface, do
453 * classification; the queueing discipline might not 453 * classification; the queueing discipline might not
454 * require classification, but might require the 454 * require classification, but might require the
455 * address family/header pointer in the pktattr. 455 * address family/header pointer in the pktattr.
456 */ 456 */
457 if (ALTQ_IS_ENABLED(&ifp->if_snd)) 457 if (ALTQ_IS_ENABLED(&ifp->if_snd))
458 altq_etherclassify(&ifp->if_snd, m); 458 altq_etherclassify(&ifp->if_snd, m);
459 KERNEL_UNLOCK_ONE(NULL); 459 KERNEL_UNLOCK_ONE(NULL);
460#endif 460#endif
461 return ifq_enqueue(ifp, m); 461 return ifq_enqueue(ifp, m);
462 462
463bad: 463bad:
464 if_statinc(ifp, if_oerrors); 464 if_statinc(ifp, if_oerrors);
465 if (m) 465 if (m)
466 m_freem(m); 466 m_freem(m);
467 return error; 467 return error;
468} 468}
469 469
470#ifdef ALTQ 470#ifdef ALTQ
471/* 471/*
472 * This routine is a slight hack to allow a packet to be classified 472 * This routine is a slight hack to allow a packet to be classified
473 * if the Ethernet headers are present. It will go away when ALTQ's 473 * if the Ethernet headers are present. It will go away when ALTQ's
474 * classification engine understands link headers. 474 * classification engine understands link headers.
475 * 475 *
476 * XXX: We may need to do m_pullups here. First to ensure struct ether_header 476 * XXX: We may need to do m_pullups here. First to ensure struct ether_header
477 * is indeed contiguous, then to read the LLC and so on. 477 * is indeed contiguous, then to read the LLC and so on.
478 */ 478 */
479void 479void
480altq_etherclassify(struct ifaltq *ifq, struct mbuf *m) 480altq_etherclassify(struct ifaltq *ifq, struct mbuf *m)
481{ 481{
482 struct ether_header *eh; 482 struct ether_header *eh;
483 struct mbuf *mtop = m; 483 struct mbuf *mtop = m;
484 uint16_t ether_type; 484 uint16_t ether_type;
485 int hlen, af, hdrsize; 485 int hlen, af, hdrsize;
486 void *hdr; 486 void *hdr;
487 487
488 KASSERT((mtop->m_flags & M_PKTHDR) != 0); 488 KASSERT((mtop->m_flags & M_PKTHDR) != 0);
489 489
490 hlen = ETHER_HDR_LEN; 490 hlen = ETHER_HDR_LEN;
491 eh = mtod(m, struct ether_header *); 491 eh = mtod(m, struct ether_header *);
492 492
493 ether_type = htons(eh->ether_type); 493 ether_type = htons(eh->ether_type);
494 494
495 if (ether_type < ETHERMTU) { 495 if (ether_type < ETHERMTU) {
496 /* LLC/SNAP */ 496 /* LLC/SNAP */
497 struct llc *llc = (struct llc *)(eh + 1); 497 struct llc *llc = (struct llc *)(eh + 1);
498 hlen += 8; 498 hlen += 8;
499 499
500 if (m->m_len < hlen || 500 if (m->m_len < hlen ||
501 llc->llc_dsap != LLC_SNAP_LSAP || 501 llc->llc_dsap != LLC_SNAP_LSAP ||
502 llc->llc_ssap != LLC_SNAP_LSAP || 502 llc->llc_ssap != LLC_SNAP_LSAP ||
503 llc->llc_control != LLC_UI) { 503 llc->llc_control != LLC_UI) {
504 /* Not SNAP. */ 504 /* Not SNAP. */
505 goto bad; 505 goto bad;
506 } 506 }
507 507
508 ether_type = htons(llc->llc_un.type_snap.ether_type); 508 ether_type = htons(llc->llc_un.type_snap.ether_type);
509 } 509 }
510 510
511 switch (ether_type) { 511 switch (ether_type) {
512 case ETHERTYPE_IP: 512 case ETHERTYPE_IP:
513 af = AF_INET; 513 af = AF_INET;
514 hdrsize = 20; /* sizeof(struct ip) */ 514 hdrsize = 20; /* sizeof(struct ip) */
515 break; 515 break;
516 516
517 case ETHERTYPE_IPV6: 517 case ETHERTYPE_IPV6:
518 af = AF_INET6; 518 af = AF_INET6;
519 hdrsize = 40; /* sizeof(struct ip6_hdr) */ 519 hdrsize = 40; /* sizeof(struct ip6_hdr) */
520 break; 520 break;
521 521
522 default: 522 default:
523 af = AF_UNSPEC; 523 af = AF_UNSPEC;
524 hdrsize = 0; 524 hdrsize = 0;
525 break; 525 break;
526 } 526 }
527 527
528 while (m->m_len <= hlen) { 528 while (m->m_len <= hlen) {
529 hlen -= m->m_len; 529 hlen -= m->m_len;
530 m = m->m_next; 530 m = m->m_next;
531 if (m == NULL) 531 if (m == NULL)
532 goto bad; 532 goto bad;
533 } 533 }
534 534
535 if (m->m_len < (hlen + hdrsize)) { 535 if (m->m_len < (hlen + hdrsize)) {
536 /* 536 /*
537 * protocol header not in a single mbuf. 537 * protocol header not in a single mbuf.
538 * We can't cope with this situation right 538 * We can't cope with this situation right
539 * now (but it shouldn't ever happen, really, anyhow). 539 * now (but it shouldn't ever happen, really, anyhow).
540 */ 540 */
541#ifdef DEBUG 541#ifdef DEBUG
542 printf("altq_etherclassify: headers span multiple mbufs: " 542 printf("altq_etherclassify: headers span multiple mbufs: "
543 "%d < %d\n", m->m_len, (hlen + hdrsize)); 543 "%d < %d\n", m->m_len, (hlen + hdrsize));
544#endif 544#endif
545 goto bad; 545 goto bad;
546 } 546 }
547 547
548 m->m_data += hlen; 548 m->m_data += hlen;
549 m->m_len -= hlen; 549 m->m_len -= hlen;
550 550
551 hdr = mtod(m, void *); 551 hdr = mtod(m, void *);
552 552
553 if (ALTQ_NEEDS_CLASSIFY(ifq)) { 553 if (ALTQ_NEEDS_CLASSIFY(ifq)) {
554 mtop->m_pkthdr.pattr_class = 554 mtop->m_pkthdr.pattr_class =
555 (*ifq->altq_classify)(ifq->altq_clfier, m, af); 555 (*ifq->altq_classify)(ifq->altq_clfier, m, af);
556 } 556 }
557 mtop->m_pkthdr.pattr_af = af; 557 mtop->m_pkthdr.pattr_af = af;
558 mtop->m_pkthdr.pattr_hdr = hdr; 558 mtop->m_pkthdr.pattr_hdr = hdr;
559 559
560 m->m_data -= hlen; 560 m->m_data -= hlen;
561 m->m_len += hlen; 561 m->m_len += hlen;
562 562
563 return; 563 return;
564 564
565bad: 565bad:
566 mtop->m_pkthdr.pattr_class = NULL; 566 mtop->m_pkthdr.pattr_class = NULL;
567 mtop->m_pkthdr.pattr_hdr = NULL; 567 mtop->m_pkthdr.pattr_hdr = NULL;
568 mtop->m_pkthdr.pattr_af = AF_UNSPEC; 568 mtop->m_pkthdr.pattr_af = AF_UNSPEC;
569} 569}
570#endif /* ALTQ */ 570#endif /* ALTQ */
571 571
572#if defined (LLC) || defined (NETATALK) 572#if defined (LLC) || defined (NETATALK)
573static void 573static void
574ether_input_llc(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh) 574ether_input_llc(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh)
575{ 575{
576 pktqueue_t *pktq = NULL; 576 pktqueue_t *pktq = NULL;
577 struct llc *l; 577 struct llc *l;
578 578
579 if (m->m_len < sizeof(*eh) + sizeof(struct llc)) 579 if (m->m_len < sizeof(*eh) + sizeof(struct llc))
580 goto error; 580 goto error;
581 581
582 l = (struct llc *)(eh+1); 582 l = (struct llc *)(eh+1);
583 switch (l->llc_dsap) { 583 switch (l->llc_dsap) {
584#ifdef NETATALK 584#ifdef NETATALK
585 case LLC_SNAP_LSAP: 585 case LLC_SNAP_LSAP:
586 switch (l->llc_control) { 586 switch (l->llc_control) {
587 case LLC_UI: 587 case LLC_UI:
588 if (l->llc_ssap != LLC_SNAP_LSAP) 588 if (l->llc_ssap != LLC_SNAP_LSAP)
589 goto error; 589 goto error;
590 590
591 if (memcmp(&(l->llc_snap_org_code)[0], 591 if (memcmp(&(l->llc_snap_org_code)[0],
592 at_org_code, sizeof(at_org_code)) == 0 && 592 at_org_code, sizeof(at_org_code)) == 0 &&
593 ntohs(l->llc_snap_ether_type) == 593 ntohs(l->llc_snap_ether_type) ==
594 ETHERTYPE_ATALK) { 594 ETHERTYPE_ATALK) {
595 pktq = at_pktq2; 595 pktq = at_pktq2;
596 m_adj(m, sizeof(struct ether_header) 596 m_adj(m, sizeof(struct ether_header)
597 + sizeof(struct llc)); 597 + sizeof(struct llc));
598 break; 598 break;
599 } 599 }
600 600
601 if (memcmp(&(l->llc_snap_org_code)[0], 601 if (memcmp(&(l->llc_snap_org_code)[0],
602 aarp_org_code, 602 aarp_org_code,
603 sizeof(aarp_org_code)) == 0 && 603 sizeof(aarp_org_code)) == 0 &&
604 ntohs(l->llc_snap_ether_type) == 604 ntohs(l->llc_snap_ether_type) ==
605 ETHERTYPE_AARP) { 605 ETHERTYPE_AARP) {
606 m_adj(m, sizeof(struct ether_header) 606 m_adj(m, sizeof(struct ether_header)
607 + sizeof(struct llc)); 607 + sizeof(struct llc));
608 aarpinput(ifp, m); /* XXX queue? */ 608 aarpinput(ifp, m); /* XXX queue? */
609 return; 609 return;
610 } 610 }
611 611
612 default: 612 default:
613 goto error; 613 goto error;
614 } 614 }
615 break; 615 break;
616#endif 616#endif
617 default: 617 default:
618 goto noproto; 618 goto noproto;
619 } 619 }
620 620
621 KASSERT(pktq != NULL); 621 KASSERT(pktq != NULL);
622 if (__predict_false(!pktq_enqueue(pktq, m, 0))) { 622 if (__predict_false(!pktq_enqueue(pktq, m, 0))) {
623 m_freem(m); 623 m_freem(m);
624 } 624 }
625 return; 625 return;
626 626
627noproto: 627noproto:
628 m_freem(m); 628 m_freem(m);
629 if_statinc(ifp, if_noproto); 629 if_statinc(ifp, if_noproto);
630 return; 630 return;
631error: 631error:
632 m_freem(m); 632 m_freem(m);
633 if_statinc(ifp, if_ierrors); 633 if_statinc(ifp, if_ierrors);
634 return; 634 return;
635} 635}
636#endif /* defined (LLC) || defined (NETATALK) */ 636#endif /* defined (LLC) || defined (NETATALK) */
637 637
638/* 638/*
639 * Process a received Ethernet packet; 639 * Process a received Ethernet packet;
640 * the packet is in the mbuf chain m with 640 * the packet is in the mbuf chain m with
641 * the ether header. 641 * the ether header.
642 */ 642 */
643void 643void
644ether_input(struct ifnet *ifp, struct mbuf *m) 644ether_input(struct ifnet *ifp, struct mbuf *m)
645{ 645{
646#if NVLAN > 0 || defined(MBUFTRACE) 646#if NVLAN > 0 || defined(MBUFTRACE)
647 struct ethercom *ec = (struct ethercom *) ifp; 647 struct ethercom *ec = (struct ethercom *) ifp;
648#endif 648#endif
649 pktqueue_t *pktq = NULL; 649 pktqueue_t *pktq = NULL;
650 uint16_t etype; 650 uint16_t etype;
651 struct ether_header *eh; 651 struct ether_header *eh;
652 size_t ehlen; 652 size_t ehlen;
653 static int earlypkts; 653 static int earlypkts;
654 654
655 /* No RPS for not-IP. */ 655 /* No RPS for not-IP. */
656 pktq_rps_hash_func_t rps_hash = NULL; 656 pktq_rps_hash_func_t rps_hash = NULL;
657 657
658 KASSERT(!cpu_intr_p()); 658 KASSERT(!cpu_intr_p());
659 KASSERT((m->m_flags & M_PKTHDR) != 0); 659 KASSERT((m->m_flags & M_PKTHDR) != 0);
660 660
661 if ((ifp->if_flags & IFF_UP) == 0) 661 if ((ifp->if_flags & IFF_UP) == 0)
662 goto drop; 662 goto drop;
663 663
664#ifdef MBUFTRACE 664#ifdef MBUFTRACE
665 m_claimm(m, &ec->ec_rx_mowner); 665 m_claimm(m, &ec->ec_rx_mowner);
666#endif 666#endif
667 667
668 if (__predict_false(m->m_len < sizeof(*eh))) { 668 if (__predict_false(m->m_len < sizeof(*eh))) {
669 if ((m = m_pullup(m, sizeof(*eh))) == NULL) { 669 if ((m = m_pullup(m, sizeof(*eh))) == NULL) {
670 if_statinc(ifp, if_ierrors); 670 if_statinc(ifp, if_ierrors);
671 return; 671 return;
672 } 672 }
673 } 673 }
674 674
675 eh = mtod(m, struct ether_header *); 675 eh = mtod(m, struct ether_header *);
676 etype = ntohs(eh->ether_type); 676 etype = ntohs(eh->ether_type);
677 ehlen = sizeof(*eh); 677 ehlen = sizeof(*eh);
678 678
679 if (__predict_false(earlypkts < 100 || 679 if (__predict_false(earlypkts < 100 ||
680 entropy_epoch() == (unsigned)-1)) { 680 entropy_epoch() == (unsigned)-1)) {
681 rnd_add_data(NULL, eh, ehlen, 0); 681 rnd_add_data(NULL, eh, ehlen, 0);
682 earlypkts++; 682 earlypkts++;
683 } 683 }
684 684
685 /* 685 /*
686 * Determine if the packet is within its size limits. For MPLS the 686 * Determine if the packet is within its size limits. For MPLS the
687 * header length is variable, so we skip the check. 687 * header length is variable, so we skip the check.
688 */ 688 */
689 if (etype != ETHERTYPE_MPLS && m->m_pkthdr.len > 689 if (etype != ETHERTYPE_MPLS && m->m_pkthdr.len >
690 ETHER_MAX_FRAME(ifp, etype, m->m_flags & M_HASFCS)) { 690 ETHER_MAX_FRAME(ifp, etype, m->m_flags & M_HASFCS)) {
691#ifdef DIAGNOSTIC 691#ifdef DIAGNOSTIC
692 mutex_enter(&bigpktpps_lock); 692 mutex_enter(&bigpktpps_lock);
693 if (ppsratecheck(&bigpktppslim_last, &bigpktpps_count, 693 if (ppsratecheck(&bigpktppslim_last, &bigpktpps_count,
694 bigpktppslim)) { 694 bigpktppslim)) {
695 printf("%s: discarding oversize frame (len=%d)\n", 695 printf("%s: discarding oversize frame (len=%d)\n",
696 ifp->if_xname, m->m_pkthdr.len); 696 ifp->if_xname, m->m_pkthdr.len);
697 } 697 }
698 mutex_exit(&bigpktpps_lock); 698 mutex_exit(&bigpktpps_lock);
699#endif 699#endif
700 goto error; 700 goto error;
701 } 701 }
702 702
703 if (ETHER_IS_MULTICAST(eh->ether_dhost)) { 703 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
704 /* 704 /*
705 * If this is not a simplex interface, drop the packet 705 * If this is not a simplex interface, drop the packet
706 * if it came from us. 706 * if it came from us.
707 */ 707 */
708 if ((ifp->if_flags & IFF_SIMPLEX) == 0 && 708 if ((ifp->if_flags & IFF_SIMPLEX) == 0 &&
709 memcmp(CLLADDR(ifp->if_sadl), eh->ether_shost, 709 memcmp(CLLADDR(ifp->if_sadl), eh->ether_shost,
710 ETHER_ADDR_LEN) == 0) { 710 ETHER_ADDR_LEN) == 0) {
711 goto drop; 711 goto drop;
712 } 712 }
713 713
714 if (memcmp(etherbroadcastaddr, 714 if (memcmp(etherbroadcastaddr,
715 eh->ether_dhost, ETHER_ADDR_LEN) == 0) 715 eh->ether_dhost, ETHER_ADDR_LEN) == 0)
716 m->m_flags |= M_BCAST; 716 m->m_flags |= M_BCAST;
717 else 717 else
718 m->m_flags |= M_MCAST; 718 m->m_flags |= M_MCAST;
719 if_statinc(ifp, if_imcasts); 719 if_statinc(ifp, if_imcasts);
720 } 720 }
721 721
722 /* If the CRC is still on the packet, trim it off. */ 722 /* If the CRC is still on the packet, trim it off. */
723 if (m->m_flags & M_HASFCS) { 723 if (m->m_flags & M_HASFCS) {
724 m_adj(m, -ETHER_CRC_LEN); 724 m_adj(m, -ETHER_CRC_LEN);
725 m->m_flags &= ~M_HASFCS; 725 m->m_flags &= ~M_HASFCS;
726 } 726 }
727 727
728 if_statadd(ifp, if_ibytes, m->m_pkthdr.len); 728 if_statadd(ifp, if_ibytes, m->m_pkthdr.len);
729 729
730 if (!vlan_has_tag(m) && etype == ETHERTYPE_VLAN) { 730 if (!vlan_has_tag(m) && etype == ETHERTYPE_VLAN) {
731 m = ether_strip_vlantag(m); 731 m = ether_strip_vlantag(m);
732 if (m == NULL) { 732 if (m == NULL) {
733 if_statinc(ifp, if_ierrors); 733 if_statinc(ifp, if_ierrors);
734 return; 734 return;
735 } 735 }
736 736
737 eh = mtod(m, struct ether_header *); 737 eh = mtod(m, struct ether_header *);
738 etype = ntohs(eh->ether_type); 738 etype = ntohs(eh->ether_type);
739 ehlen = sizeof(*eh); 739 ehlen = sizeof(*eh);
740 } 740 }
741 741
742 if ((m->m_flags & (M_BCAST | M_MCAST | M_PROMISC)) == 0 && 742 if ((m->m_flags & (M_BCAST | M_MCAST | M_PROMISC)) == 0 &&
743 (ifp->if_flags & IFF_PROMISC) != 0 && 743 (ifp->if_flags & IFF_PROMISC) != 0 &&
744 memcmp(CLLADDR(ifp->if_sadl), eh->ether_dhost, 744 memcmp(CLLADDR(ifp->if_sadl), eh->ether_dhost,
745 ETHER_ADDR_LEN) != 0) { 745 ETHER_ADDR_LEN) != 0) {
746 m->m_flags |= M_PROMISC; 746 m->m_flags |= M_PROMISC;
747 } 747 }
748 748
749 if ((m->m_flags & M_PROMISC) == 0) { 749 if ((m->m_flags & M_PROMISC) == 0) {
750 if (pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_IN) != 0) 750 if (pfil_run_hooks(ifp->if_pfil, &m, ifp, PFIL_IN) != 0)
751 return; 751 return;
752 if (m == NULL) 752 if (m == NULL)
753 return; 753 return;
754 754
755 eh = mtod(m, struct ether_header *); 755 eh = mtod(m, struct ether_header *);
756 etype = ntohs(eh->ether_type); 756 etype = ntohs(eh->ether_type);
757 } 757 }
758 758
759 /* 759 /*
760 * Processing a logical interfaces that are able 760 * Processing a logical interfaces that are able
761 * to configure vlan(4). 761 * to configure vlan(4).
762 */ 762 */
763#if NAGR > 0 763#if NAGR > 0
764 if (ifp->if_lagg != NULL && 764 if (ifp->if_lagg != NULL &&
765 __predict_true(etype != ETHERTYPE_SLOWPROTOCOLS)) { 765 __predict_true(etype != ETHERTYPE_SLOWPROTOCOLS)) {
766 m->m_flags &= ~M_PROMISC; 766 m->m_flags &= ~M_PROMISC;
767 agr_input(ifp, m); 767 agr_input(ifp, m);
768 return; 768 return;
769 } 769 }
770#endif 770#endif
771 771
772 /* 772 /*
773 * VLAN processing. 773 * VLAN processing.
774 * 774 *
775 * VLAN provides service delimiting so the frames are 775 * VLAN provides service delimiting so the frames are
776 * processed before other handlings. If a VLAN interface 776 * processed before other handlings. If a VLAN interface
777 * does not exist to take those frames, they're returned 777 * does not exist to take those frames, they're returned
778 * to ether_input(). 778 * to ether_input().
779 */ 779 */
780 780
781 if (vlan_has_tag(m)) { 781 if (vlan_has_tag(m)) {
782 if (EVL_VLANOFTAG(vlan_get_tag(m)) == 0) { 782 if (EVL_VLANOFTAG(vlan_get_tag(m)) == 0) {
783 if (etype == ETHERTYPE_VLAN || 783 if (etype == ETHERTYPE_VLAN ||
784 etype == ETHERTYPE_QINQ) 784 etype == ETHERTYPE_QINQ)
785 goto drop; 785 goto drop;
786 786
787 /* XXX we should actually use the prio value? */ 787 /* XXX we should actually use the prio value? */
788 m->m_flags &= ~M_VLANTAG; 788 m->m_flags &= ~M_VLANTAG;
789 } else { 789 } else {
790#if NVLAN > 0 790#if NVLAN > 0
791 if (ec->ec_nvlans > 0) { 791 if (ec->ec_nvlans > 0) {
792 m = vlan_input(ifp, m); 792 m = vlan_input(ifp, m);
793 793
794 /* vlan_input() called ether_input() recursively */ 794 /* vlan_input() called ether_input() recursively */
795 if (m == NULL) 795 if (m == NULL)
796 return; 796 return;
797 } 797 }
798#endif 798#endif
799 /* drop VLAN frames not for this port. */ 799 /* drop VLAN frames not for this port. */
800 goto noproto; 800 goto noproto;
801 } 801 }
802 } 802 }
803 803
804#if NCARP > 0 804#if NCARP > 0
805 if (__predict_false(ifp->if_carp && ifp->if_type != IFT_CARP)) { 805 if (__predict_false(ifp->if_carp && ifp->if_type != IFT_CARP)) {
806 /* 806 /*
807 * Clear M_PROMISC, in case the packet comes from a 807 * Clear M_PROMISC, in case the packet comes from a
808 * vlan. 808 * vlan.
809 */ 809 */
810 m->m_flags &= ~M_PROMISC; 810 m->m_flags &= ~M_PROMISC;
811 if (carp_input(m, (uint8_t *)&eh->ether_shost, 811 if (carp_input(m, (uint8_t *)&eh->ether_shost,
812 (uint8_t *)&eh->ether_dhost, eh->ether_type) == 0) 812 (uint8_t *)&eh->ether_dhost, eh->ether_type) == 0)
813 return; 813 return;
814 } 814 }
815#endif 815#endif
816 816
817 /* 817 /*
818 * Handle protocols that expect to have the Ethernet header 818 * Handle protocols that expect to have the Ethernet header
819 * (and possibly FCS) intact. 819 * (and possibly FCS) intact.
820 */ 820 */
821 switch (etype) { 821 switch (etype) {
822#if NPPPOE > 0 822#if NPPPOE > 0
823 case ETHERTYPE_PPPOEDISC: 823 case ETHERTYPE_PPPOEDISC:
824 pppoedisc_input(ifp, m); 824 pppoedisc_input(ifp, m);
825 return; 825 return;
826 826
827 case ETHERTYPE_PPPOE: 827 case ETHERTYPE_PPPOE:
828 pppoe_input(ifp, m); 828 pppoe_input(ifp, m);
829 return; 829 return;
830#endif 830#endif
831 831
832 case ETHERTYPE_SLOWPROTOCOLS: { 832 case ETHERTYPE_SLOWPROTOCOLS: {
833 uint8_t subtype; 833 uint8_t subtype;
834 834
835 if (m->m_pkthdr.len < sizeof(*eh) + sizeof(subtype)) 835 if (m->m_pkthdr.len < sizeof(*eh) + sizeof(subtype))
836 goto error; 836 goto error;
837 837
838 m_copydata(m, sizeof(*eh), sizeof(subtype), &subtype); 838 m_copydata(m, sizeof(*eh), sizeof(subtype), &subtype);
839 switch (subtype) { 839 switch (subtype) {
840#if NAGR > 0 840#if NAGR > 0
841 case SLOWPROTOCOLS_SUBTYPE_LACP: 841 case SLOWPROTOCOLS_SUBTYPE_LACP:
842 if (ifp->if_lagg != NULL) { 842 if (ifp->if_lagg != NULL) {
843 ieee8023ad_lacp_input(ifp, m); 843 ieee8023ad_lacp_input(ifp, m);
844 return; 844 return;
845 } 845 }
846 break; 846 break;
847 847
848 case SLOWPROTOCOLS_SUBTYPE_MARKER: 848 case SLOWPROTOCOLS_SUBTYPE_MARKER:
849 if (ifp->if_lagg != NULL) { 849 if (ifp->if_lagg != NULL) {
850 ieee8023ad_marker_input(ifp, m); 850 ieee8023ad_marker_input(ifp, m);
851 return; 851 return;
852 } 852 }
853 break; 853 break;
854#endif 854#endif
855 855
856 default: 856 default:
857 if (subtype == 0 || subtype > 10) { 857 if (subtype == 0 || subtype > 10) {
858 /* illegal value */ 858 /* illegal value */
859 goto error; 859 goto error;
860 } 860 }
861 /* unknown subtype */ 861 /* unknown subtype */
862 break; 862 break;
863 } 863 }
864 } 864 }
865 /* FALLTHROUGH */ 865 /* FALLTHROUGH */
866 default: 866 default:
867 if (m->m_flags & M_PROMISC) 867 if (m->m_flags & M_PROMISC)
868 goto drop; 868 goto drop;
869 } 869 }
870 870
871 /* If the CRC is still on the packet, trim it off. */ 871 /* If the CRC is still on the packet, trim it off. */
872 if (m->m_flags & M_HASFCS) { 872 if (m->m_flags & M_HASFCS) {
873 m_adj(m, -ETHER_CRC_LEN); 873 m_adj(m, -ETHER_CRC_LEN);
874 m->m_flags &= ~M_HASFCS; 874 m->m_flags &= ~M_HASFCS;
875 } 875 }
876 876
877 /* etype represents the size of the payload in this case */ 877 /* etype represents the size of the payload in this case */
878 if (etype <= ETHERMTU + sizeof(struct ether_header)) { 878 if (etype <= ETHERMTU + sizeof(struct ether_header)) {
879 KASSERT(ehlen == sizeof(*eh)); 879 KASSERT(ehlen == sizeof(*eh));
880#if defined (LLC) || defined (NETATALK) 880#if defined (LLC) || defined (NETATALK)
881 ether_input_llc(ifp, m, eh); 881 ether_input_llc(ifp, m, eh);
882 return; 882 return;
883#else 883#else
884 /* ethertype of 0-1500 is regarded as noproto */ 884 /* ethertype of 0-1500 is regarded as noproto */
885 goto noproto; 885 goto noproto;
886#endif 886#endif
887 } 887 }
888 888
 889 /* For ARP packets, store the source address so that
 890 * ARP DAD probes can be validated. */
 891 if (etype == ETHERTYPE_ARP) {
 892 struct m_tag *mtag;
 893
 894 mtag = m_tag_get(PACKET_TAG_ETHERNET_SRC, ETHER_ADDR_LEN,
 895 M_NOWAIT);
 896 if (mtag != NULL) {
 897 memcpy(mtag + 1, &eh->ether_shost, ETHER_ADDR_LEN);
 898 m_tag_prepend(m, mtag);
 899 }
 900 }
 901
889 /* Strip off the Ethernet header. */ 902 /* Strip off the Ethernet header. */
890 m_adj(m, ehlen); 903 m_adj(m, ehlen);
891 904
892 switch (etype) { 905 switch (etype) {
893#ifdef INET 906#ifdef INET
894 case ETHERTYPE_IP: 907 case ETHERTYPE_IP:
895#ifdef GATEWAY 908#ifdef GATEWAY
896 if (ipflow_fastforward(m)) 909 if (ipflow_fastforward(m))
897 return; 910 return;
898#endif 911#endif
899 pktq = ip_pktq; 912 pktq = ip_pktq;
900 rps_hash = atomic_load_relaxed(&ether_pktq_rps_hash_p); 913 rps_hash = atomic_load_relaxed(&ether_pktq_rps_hash_p);
901 break; 914 break;
902 915
903 case ETHERTYPE_ARP: 916 case ETHERTYPE_ARP:
904 pktq = arp_pktq; 917 pktq = arp_pktq;
905 break; 918 break;
906 919
907 case ETHERTYPE_REVARP: 920 case ETHERTYPE_REVARP:
908 revarpinput(m); /* XXX queue? */ 921 revarpinput(m); /* XXX queue? */
909 return; 922 return;
910#endif 923#endif
911 924
912#ifdef INET6 925#ifdef INET6
913 case ETHERTYPE_IPV6: 926 case ETHERTYPE_IPV6:
914 if (__predict_false(!in6_present)) 927 if (__predict_false(!in6_present))
915 goto noproto; 928 goto noproto;
916#ifdef GATEWAY 929#ifdef GATEWAY
917 if (ip6flow_fastforward(&m)) 930 if (ip6flow_fastforward(&m))
918 return; 931 return;
919#endif 932#endif
920 pktq = ip6_pktq; 933 pktq = ip6_pktq;
921 rps_hash = atomic_load_relaxed(&ether_pktq_rps_hash_p); 934 rps_hash = atomic_load_relaxed(&ether_pktq_rps_hash_p);
922 break; 935 break;
923#endif 936#endif
924 937
925#ifdef NETATALK 938#ifdef NETATALK
926 case ETHERTYPE_ATALK: 939 case ETHERTYPE_ATALK:
927 pktq = at_pktq1; 940 pktq = at_pktq1;
928 break; 941 break;
929 942
930 case ETHERTYPE_AARP: 943 case ETHERTYPE_AARP:
931 aarpinput(ifp, m); /* XXX queue? */ 944 aarpinput(ifp, m); /* XXX queue? */
932 return; 945 return;
933#endif 946#endif
934 947
935#ifdef MPLS 948#ifdef MPLS
936 case ETHERTYPE_MPLS: 949 case ETHERTYPE_MPLS:
937 pktq = mpls_pktq; 950 pktq = mpls_pktq;
938 break; 951 break;
939#endif 952#endif
940 953
941 default: 954 default:
942 goto noproto; 955 goto noproto;
943 } 956 }
944 957
945 KASSERT(pktq != NULL); 958 KASSERT(pktq != NULL);
946 const uint32_t h = rps_hash ? pktq_rps_hash(&rps_hash, m) : 0; 959 const uint32_t h = rps_hash ? pktq_rps_hash(&rps_hash, m) : 0;
947 if (__predict_false(!pktq_enqueue(pktq, m, h))) { 960 if (__predict_false(!pktq_enqueue(pktq, m, h))) {
948 m_freem(m); 961 m_freem(m);
949 } 962 }
950 return; 963 return;
951 964
952drop: 965drop:
953 m_freem(m); 966 m_freem(m);
954 if_statinc(ifp, if_iqdrops); 967 if_statinc(ifp, if_iqdrops);
955 return; 968 return;
956noproto: 969noproto:
957 m_freem(m); 970 m_freem(m);
958 if_statinc(ifp, if_noproto); 971 if_statinc(ifp, if_noproto);
959 return; 972 return;
960error: 973error:
961 m_freem(m); 974 m_freem(m);
962 if_statinc(ifp, if_ierrors); 975 if_statinc(ifp, if_ierrors);
963 return; 976 return;
964} 977}
965 978
966static void 979static void
967ether_bpf_mtap(struct bpf_if *bp, struct mbuf *m, u_int direction) 980ether_bpf_mtap(struct bpf_if *bp, struct mbuf *m, u_int direction)
968{ 981{
969 struct ether_vlan_header evl; 982 struct ether_vlan_header evl;
970 struct m_hdr mh, md; 983 struct m_hdr mh, md;
971 984
972 KASSERT(bp != NULL); 985 KASSERT(bp != NULL);
973 986
974 if (!vlan_has_tag(m)) { 987 if (!vlan_has_tag(m)) {
975 bpf_mtap3(bp, m, direction); 988 bpf_mtap3(bp, m, direction);
976 return; 989 return;
977 } 990 }
978 991
979 memcpy(&evl, mtod(m, char *), ETHER_HDR_LEN); 992 memcpy(&evl, mtod(m, char *), ETHER_HDR_LEN);
980 evl.evl_proto = evl.evl_encap_proto; 993 evl.evl_proto = evl.evl_encap_proto;
981 evl.evl_encap_proto = htons(ETHERTYPE_VLAN); 994 evl.evl_encap_proto = htons(ETHERTYPE_VLAN);
982 evl.evl_tag = htons(vlan_get_tag(m)); 995 evl.evl_tag = htons(vlan_get_tag(m));
983 996
984 md.mh_flags = 0; 997 md.mh_flags = 0;
985 md.mh_data = m->m_data + ETHER_HDR_LEN; 998 md.mh_data = m->m_data + ETHER_HDR_LEN;
986 md.mh_len = m->m_len - ETHER_HDR_LEN; 999 md.mh_len = m->m_len - ETHER_HDR_LEN;
987 md.mh_next = m->m_next; 1000 md.mh_next = m->m_next;
988 1001
989 mh.mh_flags = 0; 1002 mh.mh_flags = 0;
990 mh.mh_data = (char *)&evl; 1003 mh.mh_data = (char *)&evl;
991 mh.mh_len = sizeof(evl); 1004 mh.mh_len = sizeof(evl);
992 mh.mh_next = (struct mbuf *)&md; 1005 mh.mh_next = (struct mbuf *)&md;
993 1006
994 bpf_mtap3(bp, (struct mbuf *)&mh, direction); 1007 bpf_mtap3(bp, (struct mbuf *)&mh, direction);
995} 1008}
996 1009
997/* 1010/*
998 * Convert Ethernet address to printable (loggable) representation. 1011 * Convert Ethernet address to printable (loggable) representation.
999 */ 1012 */
1000char * 1013char *
1001ether_sprintf(const u_char *ap) 1014ether_sprintf(const u_char *ap)
1002{ 1015{
1003 static char etherbuf[3 * ETHER_ADDR_LEN]; 1016 static char etherbuf[3 * ETHER_ADDR_LEN];
1004 return ether_snprintf(etherbuf, sizeof(etherbuf), ap); 1017 return ether_snprintf(etherbuf, sizeof(etherbuf), ap);
1005} 1018}
1006 1019
1007char * 1020char *
1008ether_snprintf(char *buf, size_t len, const u_char *ap) 1021ether_snprintf(char *buf, size_t len, const u_char *ap)
1009{ 1022{
1010 char *cp = buf; 1023 char *cp = buf;
1011 size_t i; 1024 size_t i;
1012 1025
1013 for (i = 0; i < len / 3; i++) { 1026 for (i = 0; i < len / 3; i++) {
1014 *cp++ = hexdigits[*ap >> 4]; 1027 *cp++ = hexdigits[*ap >> 4];
1015 *cp++ = hexdigits[*ap++ & 0xf]; 1028 *cp++ = hexdigits[*ap++ & 0xf];
1016 *cp++ = ':'; 1029 *cp++ = ':';
1017 } 1030 }
1018 *--cp = '\0'; 1031 *--cp = '\0';
1019 return buf; 1032 return buf;
1020} 1033}
1021 1034
1022/* 1035/*
1023 * Perform common duties while attaching to interface list 1036 * Perform common duties while attaching to interface list
1024 */ 1037 */
1025void 1038void
1026ether_ifattach(struct ifnet *ifp, const uint8_t *lla) 1039ether_ifattach(struct ifnet *ifp, const uint8_t *lla)
1027{ 1040{
1028 struct ethercom *ec = (struct ethercom *)ifp; 1041 struct ethercom *ec = (struct ethercom *)ifp;
1029 char xnamebuf[HOOKNAMSIZ]; 1042 char xnamebuf[HOOKNAMSIZ];
1030 1043
1031 ifp->if_type = IFT_ETHER; 1044 ifp->if_type = IFT_ETHER;
1032 ifp->if_hdrlen = ETHER_HDR_LEN; 1045 ifp->if_hdrlen = ETHER_HDR_LEN;
1033 ifp->if_dlt = DLT_EN10MB; 1046 ifp->if_dlt = DLT_EN10MB;
1034 ifp->if_mtu = ETHERMTU; 1047 ifp->if_mtu = ETHERMTU;
1035 ifp->if_output = ether_output; 1048 ifp->if_output = ether_output;
1036 ifp->_if_input = ether_input; 1049 ifp->_if_input = ether_input;
1037 ifp->if_bpf_mtap = ether_bpf_mtap; 1050 ifp->if_bpf_mtap = ether_bpf_mtap;
1038 if (ifp->if_baudrate == 0) 1051 if (ifp->if_baudrate == 0)
1039 ifp->if_baudrate = IF_Mbps(10); /* just a default */ 1052 ifp->if_baudrate = IF_Mbps(10); /* just a default */
1040 1053
1041 if (lla != NULL) 1054 if (lla != NULL)
1042 if_set_sadl(ifp, lla, ETHER_ADDR_LEN, !ETHER_IS_LOCAL(lla)); 1055 if_set_sadl(ifp, lla, ETHER_ADDR_LEN, !ETHER_IS_LOCAL(lla));
1043 1056
1044 LIST_INIT(&ec->ec_multiaddrs); 1057 LIST_INIT(&ec->ec_multiaddrs);
1045 SIMPLEQ_INIT(&ec->ec_vids); 1058 SIMPLEQ_INIT(&ec->ec_vids);
1046 ec->ec_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 1059 ec->ec_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
1047 ec->ec_flags = 0; 1060 ec->ec_flags = 0;
1048 ifp->if_broadcastaddr = etherbroadcastaddr; 1061 ifp->if_broadcastaddr = etherbroadcastaddr;
1049 bpf_attach(ifp, DLT_EN10MB, sizeof(struct ether_header)); 1062 bpf_attach(ifp, DLT_EN10MB, sizeof(struct ether_header));
1050 snprintf(xnamebuf, sizeof(xnamebuf), 1063 snprintf(xnamebuf, sizeof(xnamebuf),
1051 "%s-ether_ifdetachhooks", ifp->if_xname); 1064 "%s-ether_ifdetachhooks", ifp->if_xname);
1052 ec->ec_ifdetach_hooks = simplehook_create(IPL_NET, xnamebuf); 1065 ec->ec_ifdetach_hooks = simplehook_create(IPL_NET, xnamebuf);
1053#ifdef MBUFTRACE 1066#ifdef MBUFTRACE
1054 mowner_init_owner(&ec->ec_tx_mowner, ifp->if_xname, "tx"); 1067 mowner_init_owner(&ec->ec_tx_mowner, ifp->if_xname, "tx");
1055 mowner_init_owner(&ec->ec_rx_mowner, ifp->if_xname, "rx"); 1068 mowner_init_owner(&ec->ec_rx_mowner, ifp->if_xname, "rx");
1056 MOWNER_ATTACH(&ec->ec_tx_mowner); 1069 MOWNER_ATTACH(&ec->ec_tx_mowner);
1057 MOWNER_ATTACH(&ec->ec_rx_mowner); 1070 MOWNER_ATTACH(&ec->ec_rx_mowner);
1058 ifp->if_mowner = &ec->ec_tx_mowner; 1071 ifp->if_mowner = &ec->ec_tx_mowner;
1059#endif 1072#endif
1060} 1073}
1061 1074
1062void 1075void
1063ether_ifdetach(struct ifnet *ifp) 1076ether_ifdetach(struct ifnet *ifp)
1064{ 1077{
1065 struct ethercom *ec = (void *) ifp; 1078 struct ethercom *ec = (void *) ifp;
1066 struct ether_multi *enm; 1079 struct ether_multi *enm;
1067 1080
1068 IFNET_ASSERT_UNLOCKED(ifp); 1081 IFNET_ASSERT_UNLOCKED(ifp);
1069 /* 1082 /*
1070 * Prevent further calls to ioctl (for example turning off 1083 * Prevent further calls to ioctl (for example turning off
1071 * promiscuous mode from the bridge code), which eventually can 1084 * promiscuous mode from the bridge code), which eventually can
1072 * call if_init() which can cause panics because the interface 1085 * call if_init() which can cause panics because the interface
1073 * is in the process of being detached. Return device not configured 1086 * is in the process of being detached. Return device not configured
1074 * instead. 1087 * instead.
1075 */ 1088 */
1076 ifp->if_ioctl = __FPTRCAST(int (*)(struct ifnet *, u_long, void *), 1089 ifp->if_ioctl = __FPTRCAST(int (*)(struct ifnet *, u_long, void *),
1077 enxio); 1090 enxio);
1078 1091
1079 simplehook_dohooks(ec->ec_ifdetach_hooks); 1092 simplehook_dohooks(ec->ec_ifdetach_hooks);
1080 KASSERT(!simplehook_has_hooks(ec->ec_ifdetach_hooks)); 1093 KASSERT(!simplehook_has_hooks(ec->ec_ifdetach_hooks));
1081 simplehook_destroy(ec->ec_ifdetach_hooks); 1094 simplehook_destroy(ec->ec_ifdetach_hooks);
1082 1095
1083 bpf_detach(ifp); 1096 bpf_detach(ifp);
1084 1097
1085 ETHER_LOCK(ec); 1098 ETHER_LOCK(ec);
1086 KASSERT(ec->ec_nvlans == 0); 1099 KASSERT(ec->ec_nvlans == 0);
1087 while ((enm = LIST_FIRST(&ec->ec_multiaddrs)) != NULL) { 1100 while ((enm = LIST_FIRST(&ec->ec_multiaddrs)) != NULL) {
1088 LIST_REMOVE(enm, enm_list); 1101 LIST_REMOVE(enm, enm_list);
1089 kmem_free(enm, sizeof(*enm)); 1102 kmem_free(enm, sizeof(*enm));
1090 ec->ec_multicnt--; 1103 ec->ec_multicnt--;
1091 } 1104 }
1092 ETHER_UNLOCK(ec); 1105 ETHER_UNLOCK(ec);
1093 1106
1094 mutex_obj_free(ec->ec_lock); 1107 mutex_obj_free(ec->ec_lock);
1095 ec->ec_lock = NULL; 1108 ec->ec_lock = NULL;
1096 1109
1097 ifp->if_mowner = NULL; 1110 ifp->if_mowner = NULL;
1098 MOWNER_DETACH(&ec->ec_rx_mowner); 1111 MOWNER_DETACH(&ec->ec_rx_mowner);
1099 MOWNER_DETACH(&ec->ec_tx_mowner); 1112 MOWNER_DETACH(&ec->ec_tx_mowner);
1100} 1113}
1101 1114
1102void * 1115void *
1103ether_ifdetachhook_establish(struct ifnet *ifp, 1116ether_ifdetachhook_establish(struct ifnet *ifp,
1104 void (*fn)(void *), void *arg) 1117 void (*fn)(void *), void *arg)
1105{ 1118{
1106 struct ethercom *ec; 1119 struct ethercom *ec;
1107 khook_t *hk; 1120 khook_t *hk;
1108 1121
1109 if (ifp->if_type != IFT_ETHER) 1122 if (ifp->if_type != IFT_ETHER)
1110 return NULL; 1123 return NULL;
1111 1124
1112 ec = (struct ethercom *)ifp; 1125 ec = (struct ethercom *)ifp;
1113 hk = simplehook_establish(ec->ec_ifdetach_hooks, 1126 hk = simplehook_establish(ec->ec_ifdetach_hooks,
1114 fn, arg); 1127 fn, arg);
1115 1128
1116 return (void *)hk; 1129 return (void *)hk;
1117} 1130}
1118 1131
1119void 1132void
1120ether_ifdetachhook_disestablish(struct ifnet *ifp, 1133ether_ifdetachhook_disestablish(struct ifnet *ifp,
1121 void *vhook, kmutex_t *lock) 1134 void *vhook, kmutex_t *lock)
1122{ 1135{
1123 struct ethercom *ec; 1136 struct ethercom *ec;
1124 1137
1125 if (vhook == NULL) 1138 if (vhook == NULL)
1126 return; 1139 return;
1127 1140
1128 ec = (struct ethercom *)ifp; 1141 ec = (struct ethercom *)ifp;
1129 simplehook_disestablish(ec->ec_ifdetach_hooks, vhook, lock); 1142 simplehook_disestablish(ec->ec_ifdetach_hooks, vhook, lock);
1130} 1143}
1131 1144
1132#if 0 1145#if 0
1133/* 1146/*
1134 * This is for reference. We have a table-driven version 1147 * This is for reference. We have a table-driven version
1135 * of the little-endian crc32 generator, which is faster 1148 * of the little-endian crc32 generator, which is faster
1136 * than the double-loop. 1149 * than the double-loop.
1137 */ 1150 */
1138uint32_t 1151uint32_t
1139ether_crc32_le(const uint8_t *buf, size_t len) 1152ether_crc32_le(const uint8_t *buf, size_t len)
1140{ 1153{
1141 uint32_t c, crc, carry; 1154 uint32_t c, crc, carry;
1142 size_t i, j; 1155 size_t i, j;
1143 1156
1144 crc = 0xffffffffU; /* initial value */ 1157 crc = 0xffffffffU; /* initial value */
1145 1158
1146 for (i = 0; i < len; i++) { 1159 for (i = 0; i < len; i++) {
1147 c = buf[i]; 1160 c = buf[i];
1148 for (j = 0; j < 8; j++) { 1161 for (j = 0; j < 8; j++) {
1149 carry = ((crc & 0x01) ? 1 : 0) ^ (c & 0x01); 1162 carry = ((crc & 0x01) ? 1 : 0) ^ (c & 0x01);
1150 crc >>= 1; 1163 crc >>= 1;
1151 c >>= 1; 1164 c >>= 1;
1152 if (carry) 1165 if (carry)
1153 crc = (crc ^ ETHER_CRC_POLY_LE); 1166 crc = (crc ^ ETHER_CRC_POLY_LE);
1154 } 1167 }
1155 } 1168 }
1156 1169
1157 return (crc); 1170 return (crc);
1158} 1171}
1159#else 1172#else
1160uint32_t 1173uint32_t
1161ether_crc32_le(const uint8_t *buf, size_t len) 1174ether_crc32_le(const uint8_t *buf, size_t len)
1162{ 1175{
1163 static const uint32_t crctab[] = { 1176 static const uint32_t crctab[] = {
1164 0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 1177 0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
1165 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, 1178 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
1166 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 1179 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
1167 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c 1180 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c
1168 }; 1181 };
1169 uint32_t crc; 1182 uint32_t crc;
1170 size_t i; 1183 size_t i;
1171 1184
1172 crc = 0xffffffffU; /* initial value */ 1185 crc = 0xffffffffU; /* initial value */
1173 1186
1174 for (i = 0; i < len; i++) { 1187 for (i = 0; i < len; i++) {
1175 crc ^= buf[i]; 1188 crc ^= buf[i];
1176 crc = (crc >> 4) ^ crctab[crc & 0xf]; 1189 crc = (crc >> 4) ^ crctab[crc & 0xf];
1177 crc = (crc >> 4) ^ crctab[crc & 0xf]; 1190 crc = (crc >> 4) ^ crctab[crc & 0xf];
1178 } 1191 }
1179 1192
1180 return (crc); 1193 return (crc);
1181} 1194}
1182#endif 1195#endif
1183 1196
1184uint32_t 1197uint32_t
1185ether_crc32_be(const uint8_t *buf, size_t len) 1198ether_crc32_be(const uint8_t *buf, size_t len)
1186{ 1199{
1187 uint32_t c, crc, carry; 1200 uint32_t c, crc, carry;
1188 size_t i, j; 1201 size_t i, j;
1189 1202
1190 crc = 0xffffffffU; /* initial value */ 1203 crc = 0xffffffffU; /* initial value */
1191 1204
1192 for (i = 0; i < len; i++) { 1205 for (i = 0; i < len; i++) {
1193 c = buf[i]; 1206 c = buf[i];
1194 for (j = 0; j < 8; j++) { 1207 for (j = 0; j < 8; j++) {
1195 carry = ((crc & 0x80000000U) ? 1 : 0) ^ (c & 0x01); 1208 carry = ((crc & 0x80000000U) ? 1 : 0) ^ (c & 0x01);
1196 crc <<= 1; 1209 crc <<= 1;
1197 c >>= 1; 1210 c >>= 1;
1198 if (carry) 1211 if (carry)
1199 crc = (crc ^ ETHER_CRC_POLY_BE) | carry; 1212 crc = (crc ^ ETHER_CRC_POLY_BE) | carry;
1200 } 1213 }
1201 } 1214 }
1202 1215
1203 return (crc); 1216 return (crc);
1204} 1217}
1205 1218
1206#ifdef INET 1219#ifdef INET
1207const uint8_t ether_ipmulticast_min[ETHER_ADDR_LEN] = 1220const uint8_t ether_ipmulticast_min[ETHER_ADDR_LEN] =
1208 { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 }; 1221 { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
1209const uint8_t ether_ipmulticast_max[ETHER_ADDR_LEN] = 1222const uint8_t ether_ipmulticast_max[ETHER_ADDR_LEN] =
1210 { 0x01, 0x00, 0x5e, 0x7f, 0xff, 0xff }; 1223 { 0x01, 0x00, 0x5e, 0x7f, 0xff, 0xff };
1211#endif 1224#endif
1212#ifdef INET6 1225#ifdef INET6
1213const uint8_t ether_ip6multicast_min[ETHER_ADDR_LEN] = 1226const uint8_t ether_ip6multicast_min[ETHER_ADDR_LEN] =
1214 { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 }; 1227 { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
1215const uint8_t ether_ip6multicast_max[ETHER_ADDR_LEN] = 1228const uint8_t ether_ip6multicast_max[ETHER_ADDR_LEN] =
1216 { 0x33, 0x33, 0xff, 0xff, 0xff, 0xff }; 1229 { 0x33, 0x33, 0xff, 0xff, 0xff, 0xff };
1217#endif 1230#endif
1218 1231
1219/* 1232/*
1220 * ether_aton implementation, not using a static buffer. 1233 * ether_aton implementation, not using a static buffer.
1221 */ 1234 */
1222int 1235int
1223ether_aton_r(u_char *dest, size_t len, const char *str) 1236ether_aton_r(u_char *dest, size_t len, const char *str)
1224{ 1237{
1225 const u_char *cp = (const void *)str; 1238 const u_char *cp = (const void *)str;
1226 u_char *ep; 1239 u_char *ep;
1227 1240
1228#define atox(c) (((c) <= '9') ? ((c) - '0') : ((toupper(c) - 'A') + 10)) 1241#define atox(c) (((c) <= '9') ? ((c) - '0') : ((toupper(c) - 'A') + 10))
1229 1242
1230 if (len < ETHER_ADDR_LEN) 1243 if (len < ETHER_ADDR_LEN)
1231 return ENOSPC; 1244 return ENOSPC;
1232 1245
1233 ep = dest + ETHER_ADDR_LEN; 1246 ep = dest + ETHER_ADDR_LEN;
1234 1247
1235 while (*cp) { 1248 while (*cp) {
1236 if (!isxdigit(*cp)) 1249 if (!isxdigit(*cp))
1237 return EINVAL; 1250 return EINVAL;
1238 1251
1239 *dest = atox(*cp); 1252 *dest = atox(*cp);
1240 cp++; 1253 cp++;
1241 if (isxdigit(*cp)) { 1254 if (isxdigit(*cp)) {
1242 *dest = (*dest << 4) | atox(*cp); 1255 *dest = (*dest << 4) | atox(*cp);
1243 cp++; 1256 cp++;
1244 } 1257 }
1245 dest++; 1258 dest++;
1246 1259
1247 if (dest == ep) 1260 if (dest == ep)
1248 return (*cp == '\0') ? 0 : ENAMETOOLONG; 1261 return (*cp == '\0') ? 0 : ENAMETOOLONG;
1249 1262
1250 switch (*cp) { 1263 switch (*cp) {
1251 case ':': 1264 case ':':
1252 case '-': 1265 case '-':
1253 case '.': 1266 case '.':
1254 cp++; 1267 cp++;
1255 break; 1268 break;
1256 } 1269 }
1257 } 1270 }
1258 return ENOBUFS; 1271 return ENOBUFS;
1259} 1272}
1260 1273
1261/* 1274/*
1262 * Convert a sockaddr into an Ethernet address or range of Ethernet 1275 * Convert a sockaddr into an Ethernet address or range of Ethernet
1263 * addresses. 1276 * addresses.
1264 */ 1277 */
1265int 1278int
1266ether_multiaddr(const struct sockaddr *sa, uint8_t addrlo[ETHER_ADDR_LEN], 1279ether_multiaddr(const struct sockaddr *sa, uint8_t addrlo[ETHER_ADDR_LEN],
1267 uint8_t addrhi[ETHER_ADDR_LEN]) 1280 uint8_t addrhi[ETHER_ADDR_LEN])
1268{ 1281{
1269#ifdef INET 1282#ifdef INET
1270 const struct sockaddr_in *sin; 1283 const struct sockaddr_in *sin;
1271#endif 1284#endif
1272#ifdef INET6 1285#ifdef INET6
1273 const struct sockaddr_in6 *sin6; 1286 const struct sockaddr_in6 *sin6;
1274#endif 1287#endif
1275 1288
1276 switch (sa->sa_family) { 1289 switch (sa->sa_family) {
1277 1290
1278 case AF_UNSPEC: 1291 case AF_UNSPEC:
1279 memcpy(addrlo, sa->sa_data, ETHER_ADDR_LEN); 1292 memcpy(addrlo, sa->sa_data, ETHER_ADDR_LEN);
1280 memcpy(addrhi, addrlo, ETHER_ADDR_LEN); 1293 memcpy(addrhi, addrlo, ETHER_ADDR_LEN);
1281 break; 1294 break;
1282 1295
1283#ifdef INET 1296#ifdef INET
1284 case AF_INET: 1297 case AF_INET:
1285 sin = satocsin(sa); 1298 sin = satocsin(sa);
1286 if (sin->sin_addr.s_addr == INADDR_ANY) { 1299 if (sin->sin_addr.s_addr == INADDR_ANY) {
1287 /* 1300 /*
1288 * An IP address of INADDR_ANY means listen to 1301 * An IP address of INADDR_ANY means listen to
1289 * or stop listening to all of the Ethernet 1302 * or stop listening to all of the Ethernet
1290 * multicast addresses used for IP. 1303 * multicast addresses used for IP.
1291 * (This is for the sake of IP multicast routers.) 1304 * (This is for the sake of IP multicast routers.)
1292 */ 1305 */
1293 memcpy(addrlo, ether_ipmulticast_min, ETHER_ADDR_LEN); 1306 memcpy(addrlo, ether_ipmulticast_min, ETHER_ADDR_LEN);
1294 memcpy(addrhi, ether_ipmulticast_max, ETHER_ADDR_LEN); 1307 memcpy(addrhi, ether_ipmulticast_max, ETHER_ADDR_LEN);
1295 } else { 1308 } else {
1296 ETHER_MAP_IP_MULTICAST(&sin->sin_addr, addrlo); 1309 ETHER_MAP_IP_MULTICAST(&sin->sin_addr, addrlo);
1297 memcpy(addrhi, addrlo, ETHER_ADDR_LEN); 1310 memcpy(addrhi, addrlo, ETHER_ADDR_LEN);
1298 } 1311 }
1299 break; 1312 break;
1300#endif 1313#endif
1301#ifdef INET6 1314#ifdef INET6
1302 case AF_INET6: 1315 case AF_INET6:
1303 sin6 = satocsin6(sa); 1316 sin6 = satocsin6(sa);
1304 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 1317 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1305 /* 1318 /*
1306 * An IP6 address of 0 means listen to or stop 1319 * An IP6 address of 0 means listen to or stop
1307 * listening to all of the Ethernet multicast 1320 * listening to all of the Ethernet multicast
1308 * address used for IP6. 1321 * address used for IP6.
1309 * (This is used for multicast routers.) 1322 * (This is used for multicast routers.)
1310 */ 1323 */
1311 memcpy(addrlo, ether_ip6multicast_min, ETHER_ADDR_LEN); 1324 memcpy(addrlo, ether_ip6multicast_min, ETHER_ADDR_LEN);
1312 memcpy(addrhi, ether_ip6multicast_max, ETHER_ADDR_LEN); 1325 memcpy(addrhi, ether_ip6multicast_max, ETHER_ADDR_LEN);
1313 } else { 1326 } else {
1314 ETHER_MAP_IPV6_MULTICAST(&sin6->sin6_addr, addrlo); 1327 ETHER_MAP_IPV6_MULTICAST(&sin6->sin6_addr, addrlo);
1315 memcpy(addrhi, addrlo, ETHER_ADDR_LEN); 1328 memcpy(addrhi, addrlo, ETHER_ADDR_LEN);
1316 } 1329 }
1317 break; 1330 break;
1318#endif 1331#endif
1319 1332
1320 default: 1333 default:
1321 return EAFNOSUPPORT; 1334 return EAFNOSUPPORT;
1322 } 1335 }
1323 return 0; 1336 return 0;
1324} 1337}
1325 1338
1326/* 1339/*
1327 * Add an Ethernet multicast address or range of addresses to the list for a 1340 * Add an Ethernet multicast address or range of addresses to the list for a
1328 * given interface. 1341 * given interface.
1329 */ 1342 */
1330int 1343int
1331ether_addmulti(const struct sockaddr *sa, struct ethercom *ec) 1344ether_addmulti(const struct sockaddr *sa, struct ethercom *ec)
1332{ 1345{
1333 struct ether_multi *enm, *_enm; 1346 struct ether_multi *enm, *_enm;
1334 u_char addrlo[ETHER_ADDR_LEN]; 1347 u_char addrlo[ETHER_ADDR_LEN];
1335 u_char addrhi[ETHER_ADDR_LEN]; 1348 u_char addrhi[ETHER_ADDR_LEN];
1336 int error = 0; 1349 int error = 0;
1337 1350
1338 /* Allocate out of lock */ 1351 /* Allocate out of lock */
1339 enm = kmem_alloc(sizeof(*enm), KM_SLEEP); 1352 enm = kmem_alloc(sizeof(*enm), KM_SLEEP);
1340 1353
1341 ETHER_LOCK(ec); 1354 ETHER_LOCK(ec);
1342 error = ether_multiaddr(sa, addrlo, addrhi); 1355 error = ether_multiaddr(sa, addrlo, addrhi);
1343 if (error != 0) 1356 if (error != 0)
1344 goto out; 1357 goto out;
1345 1358
1346 /* 1359 /*
1347 * Verify that we have valid Ethernet multicast addresses. 1360 * Verify that we have valid Ethernet multicast addresses.
1348 */ 1361 */
1349 if (!ETHER_IS_MULTICAST(addrlo) || !ETHER_IS_MULTICAST(addrhi)) { 1362 if (!ETHER_IS_MULTICAST(addrlo) || !ETHER_IS_MULTICAST(addrhi)) {
1350 error = EINVAL; 1363 error = EINVAL;
1351 goto out; 1364 goto out;
1352 } 1365 }
1353 1366
1354 /* 1367 /*
1355 * See if the address range is already in the list. 1368 * See if the address range is already in the list.
1356 */ 1369 */
1357 _enm = ether_lookup_multi(addrlo, addrhi, ec); 1370 _enm = ether_lookup_multi(addrlo, addrhi, ec);
1358 if (_enm != NULL) { 1371 if (_enm != NULL) {
1359 /* 1372 /*
1360 * Found it; just increment the reference count. 1373 * Found it; just increment the reference count.
1361 */ 1374 */
1362 ++_enm->enm_refcount; 1375 ++_enm->enm_refcount;
1363 error = 0; 1376 error = 0;
1364 goto out; 1377 goto out;
1365 } 1378 }
1366 1379
1367 /* 1380 /*
1368 * Link a new multicast record into the interface's multicast list. 1381 * Link a new multicast record into the interface's multicast list.
1369 */ 1382 */
1370 memcpy(enm->enm_addrlo, addrlo, ETHER_ADDR_LEN); 1383 memcpy(enm->enm_addrlo, addrlo, ETHER_ADDR_LEN);
1371 memcpy(enm->enm_addrhi, addrhi, ETHER_ADDR_LEN); 1384 memcpy(enm->enm_addrhi, addrhi, ETHER_ADDR_LEN);
1372 enm->enm_refcount = 1; 1385 enm->enm_refcount = 1;
1373 LIST_INSERT_HEAD(&ec->ec_multiaddrs, enm, enm_list); 1386 LIST_INSERT_HEAD(&ec->ec_multiaddrs, enm, enm_list);
1374 ec->ec_multicnt++; 1387 ec->ec_multicnt++;
1375 1388
1376 /* 1389 /*
1377 * Return ENETRESET to inform the driver that the list has changed 1390 * Return ENETRESET to inform the driver that the list has changed
1378 * and its reception filter should be adjusted accordingly. 1391 * and its reception filter should be adjusted accordingly.
1379 */ 1392 */
1380 error = ENETRESET; 1393 error = ENETRESET;
1381 enm = NULL; 1394 enm = NULL;
1382 1395
1383out: 1396out:
1384 ETHER_UNLOCK(ec); 1397 ETHER_UNLOCK(ec);
1385 if (enm != NULL) 1398 if (enm != NULL)
1386 kmem_free(enm, sizeof(*enm)); 1399 kmem_free(enm, sizeof(*enm));
1387 return error; 1400 return error;
1388} 1401}
1389 1402
1390/* 1403/*
1391 * Delete a multicast address record. 1404 * Delete a multicast address record.
1392 */ 1405 */
1393int 1406int
1394ether_delmulti(const struct sockaddr *sa, struct ethercom *ec) 1407ether_delmulti(const struct sockaddr *sa, struct ethercom *ec)
1395{ 1408{
1396 struct ether_multi *enm; 1409 struct ether_multi *enm;
1397 u_char addrlo[ETHER_ADDR_LEN]; 1410 u_char addrlo[ETHER_ADDR_LEN];
1398 u_char addrhi[ETHER_ADDR_LEN]; 1411 u_char addrhi[ETHER_ADDR_LEN];
1399 int error; 1412 int error;
1400 1413
1401 ETHER_LOCK(ec); 1414 ETHER_LOCK(ec);
1402 error = ether_multiaddr(sa, addrlo, addrhi); 1415 error = ether_multiaddr(sa, addrlo, addrhi);
1403 if (error != 0) 1416 if (error != 0)
1404 goto error; 1417 goto error;
1405 1418
1406 /* 1419 /*
1407 * Look up the address in our list. 1420 * Look up the address in our list.
1408 */ 1421 */
1409 enm = ether_lookup_multi(addrlo, addrhi, ec); 1422 enm = ether_lookup_multi(addrlo, addrhi, ec);
1410 if (enm == NULL) { 1423 if (enm == NULL) {
1411 error = ENXIO; 1424 error = ENXIO;
1412 goto error; 1425 goto error;
1413 } 1426 }
1414 if (--enm->enm_refcount != 0) { 1427 if (--enm->enm_refcount != 0) {
1415 /* 1428 /*
1416 * Still some claims to this record. 1429 * Still some claims to this record.
1417 */ 1430 */
1418 error = 0; 1431 error = 0;
1419 goto error; 1432 goto error;
1420 } 1433 }
1421 1434
1422 /* 1435 /*
1423 * No remaining claims to this record; unlink and free it. 1436 * No remaining claims to this record; unlink and free it.
1424 */ 1437 */
1425 LIST_REMOVE(enm, enm_list); 1438 LIST_REMOVE(enm, enm_list);
1426 ec->ec_multicnt--; 1439 ec->ec_multicnt--;
1427 ETHER_UNLOCK(ec); 1440 ETHER_UNLOCK(ec);
1428 kmem_free(enm, sizeof(*enm)); 1441 kmem_free(enm, sizeof(*enm));
1429 1442
1430 /* 1443 /*
1431 * Return ENETRESET to inform the driver that the list has changed 1444 * Return ENETRESET to inform the driver that the list has changed
1432 * and its reception filter should be adjusted accordingly. 1445 * and its reception filter should be adjusted accordingly.
1433 */ 1446 */
1434 return ENETRESET; 1447 return ENETRESET;
1435 1448
1436error: 1449error:
1437 ETHER_UNLOCK(ec); 1450 ETHER_UNLOCK(ec);
1438 return error; 1451 return error;
1439} 1452}
1440 1453
1441void 1454void
1442ether_set_ifflags_cb(struct ethercom *ec, ether_cb_t cb) 1455ether_set_ifflags_cb(struct ethercom *ec, ether_cb_t cb)
1443{ 1456{
1444 ec->ec_ifflags_cb = cb; 1457 ec->ec_ifflags_cb = cb;
1445} 1458}
1446 1459
1447void 1460void
1448ether_set_vlan_cb(struct ethercom *ec, ether_vlancb_t cb) 1461ether_set_vlan_cb(struct ethercom *ec, ether_vlancb_t cb)
1449{ 1462{
1450 1463
1451 ec->ec_vlan_cb = cb; 1464 ec->ec_vlan_cb = cb;
1452} 1465}
1453 1466
1454static int 1467static int
1455ether_ioctl_reinit(struct ethercom *ec) 1468ether_ioctl_reinit(struct ethercom *ec)
1456{ 1469{
1457 struct ifnet *ifp = &ec->ec_if; 1470 struct ifnet *ifp = &ec->ec_if;
1458 int error; 1471 int error;
1459 1472
1460 KASSERTMSG(IFNET_LOCKED(ifp), "%s", ifp->if_xname); 1473 KASSERTMSG(IFNET_LOCKED(ifp), "%s", ifp->if_xname);
1461 1474
1462 switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) { 1475 switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
1463 case IFF_RUNNING: 1476 case IFF_RUNNING:
1464 /* 1477 /*
1465 * If interface is marked down and it is running, 1478 * If interface is marked down and it is running,
1466 * then stop and disable it. 1479 * then stop and disable it.
1467 */ 1480 */
1468 if_stop(ifp, 1); 1481 if_stop(ifp, 1);
1469 break; 1482 break;
1470 case IFF_UP: 1483 case IFF_UP:
1471 /* 1484 /*
1472 * If interface is marked up and it is stopped, then 1485 * If interface is marked up and it is stopped, then
1473 * start it. 1486 * start it.
1474 */ 1487 */
1475 return if_init(ifp); 1488 return if_init(ifp);
1476 case IFF_UP | IFF_RUNNING: 1489 case IFF_UP | IFF_RUNNING:
1477 error = 0; 1490 error = 0;
1478 if (ec->ec_ifflags_cb != NULL) { 1491 if (ec->ec_ifflags_cb != NULL) {
1479 error = (*ec->ec_ifflags_cb)(ec); 1492 error = (*ec->ec_ifflags_cb)(ec);
1480 if (error == ENETRESET) { 1493 if (error == ENETRESET) {
1481 /* 1494 /*
1482 * Reset the interface to pick up 1495 * Reset the interface to pick up
1483 * changes in any other flags that 1496 * changes in any other flags that
1484 * affect the hardware state. 1497 * affect the hardware state.
1485 */ 1498 */
1486 return if_init(ifp); 1499 return if_init(ifp);
1487 } 1500 }
1488 } else 1501 } else
1489 error = if_init(ifp); 1502 error = if_init(ifp);
1490 return error; 1503 return error;
1491 case 0: 1504 case 0:
1492 break; 1505 break;
1493 } 1506 }
1494 1507
1495 return 0; 1508 return 0;
1496} 1509}
1497 1510
1498/* 1511/*
1499 * Common ioctls for Ethernet interfaces. Note, we must be 1512 * Common ioctls for Ethernet interfaces. Note, we must be
1500 * called at splnet(). 1513 * called at splnet().
1501 */ 1514 */
1502int 1515int
1503ether_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1516ether_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1504{ 1517{
1505 struct ethercom *ec = (void *)ifp; 1518 struct ethercom *ec = (void *)ifp;
1506 struct eccapreq *eccr; 1519 struct eccapreq *eccr;
1507 struct ifreq *ifr = (struct ifreq *)data; 1520 struct ifreq *ifr = (struct ifreq *)data;
1508 struct if_laddrreq *iflr = data; 1521 struct if_laddrreq *iflr = data;
1509 const struct sockaddr_dl *sdl; 1522 const struct sockaddr_dl *sdl;
1510 static const uint8_t zero[ETHER_ADDR_LEN]; 1523 static const uint8_t zero[ETHER_ADDR_LEN];
1511 int error; 1524 int error;
1512 1525
1513 switch (cmd) { 1526 switch (cmd) {
1514 case SIOCINITIFADDR: 1527 case SIOCINITIFADDR:
1515 { 1528 {
1516 struct ifaddr *ifa = (struct ifaddr *)data; 1529 struct ifaddr *ifa = (struct ifaddr *)data;
1517 if (ifa->ifa_addr->sa_family != AF_LINK 1530 if (ifa->ifa_addr->sa_family != AF_LINK
1518 && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) != 1531 && (ifp->if_flags & (IFF_UP | IFF_RUNNING)) !=
1519 (IFF_UP | IFF_RUNNING)) { 1532 (IFF_UP | IFF_RUNNING)) {
1520 ifp->if_flags |= IFF_UP; 1533 ifp->if_flags |= IFF_UP;
1521 if ((error = if_init(ifp)) != 0) 1534 if ((error = if_init(ifp)) != 0)
1522 return error; 1535 return error;
1523 } 1536 }
1524#ifdef INET 1537#ifdef INET
1525 if (ifa->ifa_addr->sa_family == AF_INET) 1538 if (ifa->ifa_addr->sa_family == AF_INET)
1526 arp_ifinit(ifp, ifa); 1539 arp_ifinit(ifp, ifa);
1527#endif 1540#endif
1528 return 0; 1541 return 0;
1529 } 1542 }
1530 1543
1531 case SIOCSIFMTU: 1544 case SIOCSIFMTU:
1532 { 1545 {
1533 int maxmtu; 1546 int maxmtu;
1534 1547
1535 if (ec->ec_capabilities & ETHERCAP_JUMBO_MTU) 1548 if (ec->ec_capabilities & ETHERCAP_JUMBO_MTU)
1536 maxmtu = ETHERMTU_JUMBO; 1549 maxmtu = ETHERMTU_JUMBO;
1537 else 1550 else
1538 maxmtu = ETHERMTU; 1551 maxmtu = ETHERMTU;
1539 1552
1540 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > maxmtu) 1553 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > maxmtu)
1541 return EINVAL; 1554 return EINVAL;
1542 else if ((error = ifioctl_common(ifp, cmd, data)) != ENETRESET) 1555 else if ((error = ifioctl_common(ifp, cmd, data)) != ENETRESET)
1543 return error; 1556 return error;
1544 else if (ifp->if_flags & IFF_UP) { 1557 else if (ifp->if_flags & IFF_UP) {
1545 /* Make sure the device notices the MTU change. */ 1558 /* Make sure the device notices the MTU change. */
1546 return if_init(ifp); 1559 return if_init(ifp);
1547 } else 1560 } else
1548 return 0; 1561 return 0;
1549 } 1562 }
1550 1563
1551 case SIOCSIFFLAGS: 1564 case SIOCSIFFLAGS:
1552 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 1565 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
1553 return error; 1566 return error;
1554 return ether_ioctl_reinit(ec); 1567 return ether_ioctl_reinit(ec);
1555 case SIOCGIFFLAGS: 1568 case SIOCGIFFLAGS:
1556 error = ifioctl_common(ifp, cmd, data); 1569 error = ifioctl_common(ifp, cmd, data);
1557 if (error == 0) { 1570 if (error == 0) {
1558 /* Set IFF_ALLMULTI for backcompat */ 1571 /* Set IFF_ALLMULTI for backcompat */
1559 ifr->ifr_flags |= (ec->ec_flags & ETHER_F_ALLMULTI) ? 1572 ifr->ifr_flags |= (ec->ec_flags & ETHER_F_ALLMULTI) ?
1560 IFF_ALLMULTI : 0; 1573 IFF_ALLMULTI : 0;
1561 } 1574 }
1562 return error; 1575 return error;
1563 case SIOCGETHERCAP: 1576 case SIOCGETHERCAP:
1564 eccr = (struct eccapreq *)data; 1577 eccr = (struct eccapreq *)data;
1565 eccr->eccr_capabilities = ec->ec_capabilities; 1578 eccr->eccr_capabilities = ec->ec_capabilities;
1566 eccr->eccr_capenable = ec->ec_capenable; 1579 eccr->eccr_capenable = ec->ec_capenable;
1567 return 0; 1580 return 0;
1568 case SIOCSETHERCAP: 1581 case SIOCSETHERCAP:
1569 eccr = (struct eccapreq *)data; 1582 eccr = (struct eccapreq *)data;
1570 if ((eccr->eccr_capenable & ~ec->ec_capabilities) != 0) 1583 if ((eccr->eccr_capenable & ~ec->ec_capabilities) != 0)
1571 return EINVAL; 1584 return EINVAL;
1572 if (eccr->eccr_capenable == ec->ec_capenable) 1585 if (eccr->eccr_capenable == ec->ec_capenable)
1573 return 0; 1586 return 0;
1574#if 0 /* notyet */ 1587#if 0 /* notyet */
1575 ec->ec_capenable = (ec->ec_capenable & ETHERCAP_CANTCHANGE) 1588 ec->ec_capenable = (ec->ec_capenable & ETHERCAP_CANTCHANGE)
1576 | (eccr->eccr_capenable & ~ETHERCAP_CANTCHANGE); 1589 | (eccr->eccr_capenable & ~ETHERCAP_CANTCHANGE);
1577#else 1590#else
1578 ec->ec_capenable = eccr->eccr_capenable; 1591 ec->ec_capenable = eccr->eccr_capenable;
1579#endif 1592#endif
1580 return ether_ioctl_reinit(ec); 1593 return ether_ioctl_reinit(ec);
1581 case SIOCADDMULTI: 1594 case SIOCADDMULTI:
1582 return ether_addmulti(ifreq_getaddr(cmd, ifr), ec); 1595 return ether_addmulti(ifreq_getaddr(cmd, ifr), ec);
1583 case SIOCDELMULTI: 1596 case SIOCDELMULTI:
1584 return ether_delmulti(ifreq_getaddr(cmd, ifr), ec); 1597 return ether_delmulti(ifreq_getaddr(cmd, ifr), ec);
1585 case SIOCSIFMEDIA: 1598 case SIOCSIFMEDIA:
1586 case SIOCGIFMEDIA: 1599 case SIOCGIFMEDIA:
1587 if (ec->ec_mii != NULL) 1600 if (ec->ec_mii != NULL)
1588 return ifmedia_ioctl(ifp, ifr, &ec->ec_mii->mii_media, 1601 return ifmedia_ioctl(ifp, ifr, &ec->ec_mii->mii_media,
1589 cmd); 1602 cmd);
1590 else if (ec->ec_ifmedia != NULL) 1603 else if (ec->ec_ifmedia != NULL)
1591 return ifmedia_ioctl(ifp, ifr, ec->ec_ifmedia, cmd); 1604 return ifmedia_ioctl(ifp, ifr, ec->ec_ifmedia, cmd);
1592 else 1605 else
1593 return ENOTTY; 1606 return ENOTTY;
1594 break; 1607 break;
1595 case SIOCALIFADDR: 1608 case SIOCALIFADDR:
1596 sdl = satocsdl(sstocsa(&iflr->addr)); 1609 sdl = satocsdl(sstocsa(&iflr->addr));
1597 if (sdl->sdl_family != AF_LINK) 1610 if (sdl->sdl_family != AF_LINK)
1598 ; 1611 ;
1599 else if (ETHER_IS_MULTICAST(CLLADDR(sdl))) 1612 else if (ETHER_IS_MULTICAST(CLLADDR(sdl)))
1600 return EINVAL; 1613 return EINVAL;
1601 else if (memcmp(zero, CLLADDR(sdl), sizeof(zero)) == 0) 1614 else if (memcmp(zero, CLLADDR(sdl), sizeof(zero)) == 0)
1602 return EINVAL; 1615 return EINVAL;
1603 /*FALLTHROUGH*/ 1616 /*FALLTHROUGH*/
1604 default: 1617 default:
1605 return ifioctl_common(ifp, cmd, data); 1618 return ifioctl_common(ifp, cmd, data);
1606 } 1619 }
1607 return 0; 1620 return 0;
1608} 1621}
1609 1622
1610/* 1623/*
1611 * Enable/disable passing VLAN packets if the parent interface supports it. 1624 * Enable/disable passing VLAN packets if the parent interface supports it.
1612 * Return: 1625 * Return:
1613 * 0: Ok 1626 * 0: Ok
1614 * -1: Parent interface does not support vlans 1627 * -1: Parent interface does not support vlans
1615 * >0: Error 1628 * >0: Error
1616 */ 1629 */
1617int 1630int
1618ether_enable_vlan_mtu(struct ifnet *ifp) 1631ether_enable_vlan_mtu(struct ifnet *ifp)
1619{ 1632{
1620 int error; 1633 int error;
1621 struct ethercom *ec = (void *)ifp; 1634 struct ethercom *ec = (void *)ifp;
1622 1635
1623 /* Parent does not support VLAN's */ 1636 /* Parent does not support VLAN's */
1624 if ((ec->ec_capabilities & ETHERCAP_VLAN_MTU) == 0) 1637 if ((ec->ec_capabilities & ETHERCAP_VLAN_MTU) == 0)
1625 return -1; 1638 return -1;
1626 1639
1627 /* 1640 /*
1628 * Parent supports the VLAN_MTU capability, 1641 * Parent supports the VLAN_MTU capability,
1629 * i.e. can Tx/Rx larger than ETHER_MAX_LEN frames; 1642 * i.e. can Tx/Rx larger than ETHER_MAX_LEN frames;
1630 * enable it. 1643 * enable it.
1631 */ 1644 */
1632 ec->ec_capenable |= ETHERCAP_VLAN_MTU; 1645 ec->ec_capenable |= ETHERCAP_VLAN_MTU;
1633 1646
1634 /* Interface is down, defer for later */ 1647 /* Interface is down, defer for later */
1635 if ((ifp->if_flags & IFF_UP) == 0) 1648 if ((ifp->if_flags & IFF_UP) == 0)
1636 return 0; 1649 return 0;
1637 1650
1638 if ((error = if_flags_set(ifp, ifp->if_flags)) == 0) 1651 if ((error = if_flags_set(ifp, ifp->if_flags)) == 0)
1639 return 0; 1652 return 0;
1640 1653
1641 ec->ec_capenable &= ~ETHERCAP_VLAN_MTU; 1654 ec->ec_capenable &= ~ETHERCAP_VLAN_MTU;
1642 return error; 1655 return error;
1643} 1656}
1644 1657
1645int 1658int
1646ether_disable_vlan_mtu(struct ifnet *ifp) 1659ether_disable_vlan_mtu(struct ifnet *ifp)
1647{ 1660{
1648 int error; 1661 int error;
1649 struct ethercom *ec = (void *)ifp; 1662 struct ethercom *ec = (void *)ifp;
1650 1663
1651 /* We still have VLAN's, defer for later */ 1664 /* We still have VLAN's, defer for later */
1652 if (ec->ec_nvlans != 0) 1665 if (ec->ec_nvlans != 0)
1653 return 0; 1666 return 0;
1654 1667
1655 /* Parent does not support VLAB's, nothing to do. */ 1668 /* Parent does not support VLAB's, nothing to do. */
1656 if ((ec->ec_capenable & ETHERCAP_VLAN_MTU) == 0) 1669 if ((ec->ec_capenable & ETHERCAP_VLAN_MTU) == 0)
1657 return -1; 1670 return -1;
1658 1671
1659 /* 1672 /*
1660 * Disable Tx/Rx of VLAN-sized frames. 1673 * Disable Tx/Rx of VLAN-sized frames.
1661 */ 1674 */
1662 ec->ec_capenable &= ~ETHERCAP_VLAN_MTU; 1675 ec->ec_capenable &= ~ETHERCAP_VLAN_MTU;
1663 1676
1664 /* Interface is down, defer for later */ 1677 /* Interface is down, defer for later */
1665 if ((ifp->if_flags & IFF_UP) == 0) 1678 if ((ifp->if_flags & IFF_UP) == 0)
1666 return 0; 1679 return 0;
1667 1680
1668 if ((error = if_flags_set(ifp, ifp->if_flags)) == 0) 1681 if ((error = if_flags_set(ifp, ifp->if_flags)) == 0)
1669 return 0; 1682 return 0;
1670 1683
1671 ec->ec_capenable |= ETHERCAP_VLAN_MTU; 1684 ec->ec_capenable |= ETHERCAP_VLAN_MTU;
1672 return error; 1685 return error;
1673} 1686}
1674 1687
1675/* 1688/*
1676 * Add and delete VLAN TAG 1689 * Add and delete VLAN TAG
1677 */ 1690 */
1678int 1691int
1679ether_add_vlantag(struct ifnet *ifp, uint16_t vtag, bool *vlanmtu_status) 1692ether_add_vlantag(struct ifnet *ifp, uint16_t vtag, bool *vlanmtu_status)
1680{ 1693{
1681 struct ethercom *ec = (void *)ifp; 1694 struct ethercom *ec = (void *)ifp;
1682 struct vlanid_list *vidp; 1695 struct vlanid_list *vidp;
1683 bool vlanmtu_enabled; 1696 bool vlanmtu_enabled;
1684 uint16_t vid = EVL_VLANOFTAG(vtag); 1697 uint16_t vid = EVL_VLANOFTAG(vtag);
1685 int error; 1698 int error;
1686 1699
1687 vlanmtu_enabled = false; 1700 vlanmtu_enabled = false;
1688 1701
1689 /* Add a vid to the list */ 1702 /* Add a vid to the list */
1690 vidp = kmem_alloc(sizeof(*vidp), KM_SLEEP); 1703 vidp = kmem_alloc(sizeof(*vidp), KM_SLEEP);
1691 vidp->vid = vid; 1704 vidp->vid = vid;
1692 1705
1693 ETHER_LOCK(ec); 1706 ETHER_LOCK(ec);
1694 ec->ec_nvlans++; 1707 ec->ec_nvlans++;
1695 SIMPLEQ_INSERT_TAIL(&ec->ec_vids, vidp, vid_list); 1708 SIMPLEQ_INSERT_TAIL(&ec->ec_vids, vidp, vid_list);
1696 ETHER_UNLOCK(ec); 1709 ETHER_UNLOCK(ec);
1697 1710
1698 if (ec->ec_nvlans == 1) { 1711 if (ec->ec_nvlans == 1) {
1699 IFNET_LOCK(ifp); 1712 IFNET_LOCK(ifp);
1700 error = ether_enable_vlan_mtu(ifp); 1713 error = ether_enable_vlan_mtu(ifp);
1701 IFNET_UNLOCK(ifp); 1714 IFNET_UNLOCK(ifp);
1702 1715
1703 if (error == 0) { 1716 if (error == 0) {
1704 vlanmtu_enabled = true; 1717 vlanmtu_enabled = true;
1705 } else if (error != -1) { 1718 } else if (error != -1) {
1706 goto fail; 1719 goto fail;
1707 } 1720 }
1708 } 1721 }
1709 1722
1710 if (ec->ec_vlan_cb != NULL) { 1723 if (ec->ec_vlan_cb != NULL) {
1711 error = (*ec->ec_vlan_cb)(ec, vid, true); 1724 error = (*ec->ec_vlan_cb)(ec, vid, true);
1712 if (error != 0) 1725 if (error != 0)
1713 goto fail; 1726 goto fail;
1714 } 1727 }
1715 1728
1716 if (vlanmtu_status != NULL) 1729 if (vlanmtu_status != NULL)
1717 *vlanmtu_status = vlanmtu_enabled; 1730 *vlanmtu_status = vlanmtu_enabled;
1718 1731
1719 return 0; 1732 return 0;
1720fail: 1733fail:
1721 ETHER_LOCK(ec); 1734 ETHER_LOCK(ec);
1722 ec->ec_nvlans--; 1735 ec->ec_nvlans--;
1723 SIMPLEQ_REMOVE(&ec->ec_vids, vidp, vlanid_list, vid_list); 1736 SIMPLEQ_REMOVE(&ec->ec_vids, vidp, vlanid_list, vid_list);
1724 ETHER_UNLOCK(ec); 1737 ETHER_UNLOCK(ec);
1725 1738
1726 if (vlanmtu_enabled) { 1739 if (vlanmtu_enabled) {
1727 IFNET_LOCK(ifp); 1740 IFNET_LOCK(ifp);
1728 (void)ether_disable_vlan_mtu(ifp); 1741 (void)ether_disable_vlan_mtu(ifp);
1729 IFNET_UNLOCK(ifp); 1742 IFNET_UNLOCK(ifp);
1730 } 1743 }
1731 1744
1732 kmem_free(vidp, sizeof(*vidp)); 1745 kmem_free(vidp, sizeof(*vidp));
1733 1746
1734 return error; 1747 return error;
1735} 1748}
1736 1749
1737int 1750int
1738ether_del_vlantag(struct ifnet *ifp, uint16_t vtag) 1751ether_del_vlantag(struct ifnet *ifp, uint16_t vtag)
1739{ 1752{
1740 struct ethercom *ec = (void *)ifp; 1753 struct ethercom *ec = (void *)ifp;
1741 struct vlanid_list *vidp; 1754 struct vlanid_list *vidp;
1742 uint16_t vid = EVL_VLANOFTAG(vtag); 1755 uint16_t vid = EVL_VLANOFTAG(vtag);
1743 1756
1744 ETHER_LOCK(ec); 1757 ETHER_LOCK(ec);
1745 SIMPLEQ_FOREACH(vidp, &ec->ec_vids, vid_list) { 1758 SIMPLEQ_FOREACH(vidp, &ec->ec_vids, vid_list) {
1746 if (vidp->vid == vid) { 1759 if (vidp->vid == vid) {
1747 SIMPLEQ_REMOVE(&ec->ec_vids, vidp, 1760 SIMPLEQ_REMOVE(&ec->ec_vids, vidp,
1748 vlanid_list, vid_list); 1761 vlanid_list, vid_list);
1749 ec->ec_nvlans--; 1762 ec->ec_nvlans--;
1750 break; 1763 break;
1751 } 1764 }
1752 } 1765 }
1753 ETHER_UNLOCK(ec); 1766 ETHER_UNLOCK(ec);
1754 1767
1755 if (vidp == NULL) 1768 if (vidp == NULL)
1756 return ENOENT; 1769 return ENOENT;
1757 1770
1758 if (ec->ec_vlan_cb != NULL) { 1771 if (ec->ec_vlan_cb != NULL) {
1759 (void)(*ec->ec_vlan_cb)(ec, vidp->vid, false); 1772 (void)(*ec->ec_vlan_cb)(ec, vidp->vid, false);
1760 } 1773 }
1761 1774
1762 if (ec->ec_nvlans == 0) { 1775 if (ec->ec_nvlans == 0) {
1763 IFNET_LOCK(ifp); 1776 IFNET_LOCK(ifp);
1764 (void)ether_disable_vlan_mtu(ifp); 1777 (void)ether_disable_vlan_mtu(ifp);
1765 IFNET_UNLOCK(ifp); 1778 IFNET_UNLOCK(ifp);
1766 } 1779 }
1767 1780
1768 kmem_free(vidp, sizeof(*vidp)); 1781 kmem_free(vidp, sizeof(*vidp));
1769 1782
1770 return 0; 1783 return 0;
1771} 1784}
1772 1785
1773int 1786int
1774ether_inject_vlantag(struct mbuf **mp, uint16_t etype, uint16_t tag) 1787ether_inject_vlantag(struct mbuf **mp, uint16_t etype, uint16_t tag)
1775{ 1788{
1776 static const size_t min_data_len = 1789 static const size_t min_data_len =
1777 ETHER_MIN_LEN - ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; 1790 ETHER_MIN_LEN - ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
1778 /* Used to pad ethernet frames with < ETHER_MIN_LEN bytes */ 1791 /* Used to pad ethernet frames with < ETHER_MIN_LEN bytes */
1779 static const char vlan_zero_pad_buff[ETHER_MIN_LEN] = { 0 }; 1792 static const char vlan_zero_pad_buff[ETHER_MIN_LEN] = { 0 };
1780 1793
1781 struct ether_vlan_header *evl; 1794 struct ether_vlan_header *evl;
1782 struct mbuf *m = *mp; 1795 struct mbuf *m = *mp;
1783 int error; 1796 int error;
1784 1797
1785 error = 0; 1798 error = 0;
1786 1799
1787 M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_DONTWAIT); 1800 M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_DONTWAIT);
1788 if (m == NULL) { 1801 if (m == NULL) {
1789 error = ENOBUFS; 1802 error = ENOBUFS;
1790 goto out; 1803 goto out;
1791 } 1804 }
1792 1805
1793 if (m->m_len < sizeof(*evl)) { 1806 if (m->m_len < sizeof(*evl)) {
1794 m = m_pullup(m, sizeof(*evl)); 1807 m = m_pullup(m, sizeof(*evl));
1795 if (m == NULL) { 1808 if (m == NULL) {
1796 error = ENOBUFS; 1809 error = ENOBUFS;
1797 goto out; 1810 goto out;
1798 } 1811 }
1799 } 1812 }
1800 1813
1801 /* 1814 /*
1802 * Transform the Ethernet header into an 1815 * Transform the Ethernet header into an
1803 * Ethernet header with 802.1Q encapsulation. 1816 * Ethernet header with 802.1Q encapsulation.
1804 */ 1817 */
1805 memmove(mtod(m, void *), 1818 memmove(mtod(m, void *),
1806 mtod(m, char *) + ETHER_VLAN_ENCAP_LEN, 1819 mtod(m, char *) + ETHER_VLAN_ENCAP_LEN,
1807 sizeof(struct ether_header)); 1820 sizeof(struct ether_header));
1808 evl = mtod(m, struct ether_vlan_header *); 1821 evl = mtod(m, struct ether_vlan_header *);
1809 evl->evl_proto = evl->evl_encap_proto; 1822 evl->evl_proto = evl->evl_encap_proto;
1810 evl->evl_encap_proto = htons(etype); 1823 evl->evl_encap_proto = htons(etype);
1811 evl->evl_tag = htons(tag); 1824 evl->evl_tag = htons(tag);
1812 1825
1813 /* 1826 /*
1814 * To cater for VLAN-aware layer 2 ethernet 1827 * To cater for VLAN-aware layer 2 ethernet
1815 * switches which may need to strip the tag 1828 * switches which may need to strip the tag
1816 * before forwarding the packet, make sure 1829 * before forwarding the packet, make sure
1817 * the packet+tag is at least 68 bytes long. 1830 * the packet+tag is at least 68 bytes long.
1818 * This is necessary because our parent will 1831 * This is necessary because our parent will
1819 * only pad to 64 bytes (ETHER_MIN_LEN) and 1832 * only pad to 64 bytes (ETHER_MIN_LEN) and
1820 * some switches will not pad by themselves 1833 * some switches will not pad by themselves
1821 * after deleting a tag. 1834 * after deleting a tag.
1822 */ 1835 */
1823 if (m->m_pkthdr.len < min_data_len) { 1836 if (m->m_pkthdr.len < min_data_len) {
1824 m_copyback(m, m->m_pkthdr.len, 1837 m_copyback(m, m->m_pkthdr.len,
1825 min_data_len - m->m_pkthdr.len, 1838 min_data_len - m->m_pkthdr.len,
1826 vlan_zero_pad_buff); 1839 vlan_zero_pad_buff);
1827 } 1840 }
1828 1841
1829 m->m_flags &= ~M_VLANTAG; 1842 m->m_flags &= ~M_VLANTAG;
1830 1843
1831out: 1844out:
1832 *mp = m; 1845 *mp = m;
1833 return error; 1846 return error;
1834} 1847}
1835 1848
1836struct mbuf * 1849struct mbuf *
1837ether_strip_vlantag(struct mbuf *m) 1850ether_strip_vlantag(struct mbuf *m)
1838{ 1851{
1839 struct ether_vlan_header *evl; 1852 struct ether_vlan_header *evl;
1840 1853
1841 if (m->m_len < sizeof(*evl) && 1854 if (m->m_len < sizeof(*evl) &&
1842 (m = m_pullup(m, sizeof(*evl))) == NULL) { 1855 (m = m_pullup(m, sizeof(*evl))) == NULL) {
1843 return NULL; 1856 return NULL;
1844 } 1857 }
1845 1858
1846 if (m_makewritable(&m, 0, sizeof(*evl), M_DONTWAIT)) { 1859 if (m_makewritable(&m, 0, sizeof(*evl), M_DONTWAIT)) {
1847 m_freem(m); 1860 m_freem(m);
1848 return NULL; 1861 return NULL;
1849 } 1862 }
1850 1863
1851 evl = mtod(m, struct ether_vlan_header *); 1864 evl = mtod(m, struct ether_vlan_header *);
1852 KASSERT(ntohs(evl->evl_encap_proto) == ETHERTYPE_VLAN); 1865 KASSERT(ntohs(evl->evl_encap_proto) == ETHERTYPE_VLAN);
1853 1866
1854 vlan_set_tag(m, ntohs(evl->evl_tag)); 1867 vlan_set_tag(m, ntohs(evl->evl_tag));
1855 1868
1856 /* 1869 /*
1857 * Restore the original ethertype. We'll remove 1870 * Restore the original ethertype. We'll remove
1858 * the encapsulation after we've found the vlan 1871 * the encapsulation after we've found the vlan
1859 * interface corresponding to the tag. 1872 * interface corresponding to the tag.
1860 */ 1873 */
1861 evl->evl_encap_proto = evl->evl_proto; 1874 evl->evl_encap_proto = evl->evl_proto;
1862 1875
1863 /* 1876 /*
1864 * Remove the encapsulation header and append tag. 1877 * Remove the encapsulation header and append tag.
1865 * The original header has already been fixed up above. 1878 * The original header has already been fixed up above.
1866 */ 1879 */
1867 vlan_set_tag(m, ntohs(evl->evl_tag)); 1880 vlan_set_tag(m, ntohs(evl->evl_tag));
1868 memmove((char *)evl + ETHER_VLAN_ENCAP_LEN, evl, 1881 memmove((char *)evl + ETHER_VLAN_ENCAP_LEN, evl,
1869 offsetof(struct ether_vlan_header, evl_encap_proto)); 1882 offsetof(struct ether_vlan_header, evl_encap_proto));
1870 m_adj(m, ETHER_VLAN_ENCAP_LEN); 1883 m_adj(m, ETHER_VLAN_ENCAP_LEN);
1871 1884
1872 return m; 1885 return m;
1873} 1886}
1874 1887
1875static int 1888static int
1876ether_multicast_sysctl(SYSCTLFN_ARGS) 1889ether_multicast_sysctl(SYSCTLFN_ARGS)
1877{ 1890{
1878 struct ether_multi *enm; 1891 struct ether_multi *enm;
1879 struct ifnet *ifp; 1892 struct ifnet *ifp;
1880 struct ethercom *ec; 1893 struct ethercom *ec;
1881 int error = 0; 1894 int error = 0;
1882 size_t written; 1895 size_t written;
1883 struct psref psref; 1896 struct psref psref;
1884 int bound; 1897 int bound;
1885 unsigned int multicnt; 1898 unsigned int multicnt;
1886 struct ether_multi_sysctl *addrs; 1899 struct ether_multi_sysctl *addrs;
1887 int i; 1900 int i;

cvs diff -r1.310 -r1.311 src/sys/netinet/if_arp.c (switch to unified diff)

--- src/sys/netinet/if_arp.c 2022/11/15 09:15:43 1.310
+++ src/sys/netinet/if_arp.c 2022/11/15 10:47:39 1.311
@@ -1,1953 +1,1961 @@ @@ -1,1953 +1,1961 @@
1/* $NetBSD: if_arp.c,v 1.310 2022/11/15 09:15:43 roy Exp $ */ 1/* $NetBSD: if_arp.c,v 1.311 2022/11/15 10:47:39 roy Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1998, 2000, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 2000, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Public Access Networks Corporation ("Panix"). It was developed under 8 * by Public Access Networks Corporation ("Panix"). It was developed under
9 * contract to Panix by Eric Haszlakiewicz and Thor Lancelot Simon. 9 * contract to Panix by Eric Haszlakiewicz and Thor Lancelot Simon.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33/* 33/*
34 * Copyright (c) 1982, 1986, 1988, 1993 34 * Copyright (c) 1982, 1986, 1988, 1993
35 * The Regents of the University of California. All rights reserved. 35 * The Regents of the University of California. All rights reserved.
36 * 36 *
37 * Redistribution and use in source and binary forms, with or without 37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions 38 * modification, are permitted provided that the following conditions
39 * are met: 39 * are met:
40 * 1. Redistributions of source code must retain the above copyright 40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer. 41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright 42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the 43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution. 44 * documentation and/or other materials provided with the distribution.
45 * 3. Neither the name of the University nor the names of its contributors 45 * 3. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software 46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission. 47 * without specific prior written permission.
48 * 48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE. 59 * SUCH DAMAGE.
60 * 60 *
61 * @(#)if_ether.c 8.2 (Berkeley) 9/26/94 61 * @(#)if_ether.c 8.2 (Berkeley) 9/26/94
62 */ 62 */
63 63
64/* 64/*
65 * Ethernet address resolution protocol. 65 * Ethernet address resolution protocol.
66 * TODO: 66 * TODO:
67 * add "inuse/lock" bit (or ref. count) along with valid bit 67 * add "inuse/lock" bit (or ref. count) along with valid bit
68 */ 68 */
69 69
70#include <sys/cdefs.h> 70#include <sys/cdefs.h>
71__KERNEL_RCSID(0, "$NetBSD: if_arp.c,v 1.310 2022/11/15 09:15:43 roy Exp $"); 71__KERNEL_RCSID(0, "$NetBSD: if_arp.c,v 1.311 2022/11/15 10:47:39 roy Exp $");
72 72
73#ifdef _KERNEL_OPT 73#ifdef _KERNEL_OPT
74#include "opt_ddb.h" 74#include "opt_ddb.h"
75#include "opt_inet.h" 75#include "opt_inet.h"
76#include "opt_net_mpsafe.h" 76#include "opt_net_mpsafe.h"
77#endif 77#endif
78 78
79#ifdef INET 79#ifdef INET
80 80
81#include "arp.h" 81#include "arp.h"
82#include "bridge.h" 82#include "bridge.h"
83 83
84#include <sys/param.h> 84#include <sys/param.h>
85#include <sys/systm.h> 85#include <sys/systm.h>
86#include <sys/callout.h> 86#include <sys/callout.h>
87#include <sys/kmem.h> 87#include <sys/kmem.h>
88#include <sys/mbuf.h> 88#include <sys/mbuf.h>
89#include <sys/socket.h> 89#include <sys/socket.h>
90#include <sys/time.h> 90#include <sys/time.h>
91#include <sys/timetc.h> 91#include <sys/timetc.h>
92#include <sys/kernel.h> 92#include <sys/kernel.h>
93#include <sys/errno.h> 93#include <sys/errno.h>
94#include <sys/ioctl.h> 94#include <sys/ioctl.h>
95#include <sys/syslog.h> 95#include <sys/syslog.h>
96#include <sys/proc.h> 96#include <sys/proc.h>
97#include <sys/protosw.h> 97#include <sys/protosw.h>
98#include <sys/domain.h> 98#include <sys/domain.h>
99#include <sys/sysctl.h> 99#include <sys/sysctl.h>
100#include <sys/socketvar.h> 100#include <sys/socketvar.h>
101#include <sys/percpu.h> 101#include <sys/percpu.h>
102#include <sys/cprng.h> 102#include <sys/cprng.h>
103#include <sys/kmem.h> 103#include <sys/kmem.h>
104 104
105#include <net/ethertypes.h> 105#include <net/ethertypes.h>
106#include <net/if.h> 106#include <net/if.h>
107#include <net/if_dl.h> 107#include <net/if_dl.h>
108#include <net/if_types.h> 108#include <net/if_types.h>
109#include <net/if_ether.h> 109#include <net/if_ether.h>
110#include <net/if_llatbl.h> 110#include <net/if_llatbl.h>
111#include <net/nd.h> 111#include <net/nd.h>
112#include <net/route.h> 112#include <net/route.h>
113#include <net/net_stats.h> 113#include <net/net_stats.h>
114 114
115#include <netinet/in.h> 115#include <netinet/in.h>
116#include <netinet/in_systm.h> 116#include <netinet/in_systm.h>
117#include <netinet/in_var.h> 117#include <netinet/in_var.h>
118#include <netinet/ip.h> 118#include <netinet/ip.h>
119#include <netinet/if_inarp.h> 119#include <netinet/if_inarp.h>
120 120
121#include "arcnet.h" 121#include "arcnet.h"
122#if NARCNET > 0 122#if NARCNET > 0
123#include <net/if_arc.h> 123#include <net/if_arc.h>
124#endif 124#endif
125#include "carp.h" 125#include "carp.h"
126#if NCARP > 0 126#if NCARP > 0
127#include <netinet/ip_carp.h> 127#include <netinet/ip_carp.h>
128#endif 128#endif
129 129
130/* 130/*
131 * ARP trailer negotiation. Trailer protocol is not IP specific, 131 * ARP trailer negotiation. Trailer protocol is not IP specific,
132 * but ARP request/response use IP addresses. 132 * but ARP request/response use IP addresses.
133 */ 133 */
134#define ETHERTYPE_IPTRAILERS ETHERTYPE_TRAIL 134#define ETHERTYPE_IPTRAILERS ETHERTYPE_TRAIL
135 135
136/* timers */ 136/* timers */
137static int arp_reachable = REACHABLE_TIME; 137static int arp_reachable = REACHABLE_TIME;
138static int arp_retrans = RETRANS_TIMER; 138static int arp_retrans = RETRANS_TIMER;
139static int arp_perform_nud = 1; 139static int arp_perform_nud = 1;
140 140
141static bool arp_nud_enabled(struct ifnet *); 141static bool arp_nud_enabled(struct ifnet *);
142static unsigned int arp_llinfo_reachable(struct ifnet *); 142static unsigned int arp_llinfo_reachable(struct ifnet *);
143static unsigned int arp_llinfo_retrans(struct ifnet *); 143static unsigned int arp_llinfo_retrans(struct ifnet *);
144static union l3addr *arp_llinfo_holdsrc(struct llentry *, union l3addr *); 144static union l3addr *arp_llinfo_holdsrc(struct llentry *, union l3addr *);
145static void arp_llinfo_output(struct ifnet *, const union l3addr *, 145static void arp_llinfo_output(struct ifnet *, const union l3addr *,
146 const union l3addr *, const uint8_t *, const union l3addr *); 146 const union l3addr *, const uint8_t *, const union l3addr *);
147static void arp_llinfo_missed(struct ifnet *, const union l3addr *, 147static void arp_llinfo_missed(struct ifnet *, const union l3addr *,
148 int16_t, struct mbuf *); 148 int16_t, struct mbuf *);
149static void arp_free(struct llentry *, int); 149static void arp_free(struct llentry *, int);
150 150
151static struct nd_domain arp_nd_domain = { 151static struct nd_domain arp_nd_domain = {
152 .nd_family = AF_INET, 152 .nd_family = AF_INET,
153 .nd_delay = 5, /* delay first probe time 5 second */ 153 .nd_delay = 5, /* delay first probe time 5 second */
154 .nd_mmaxtries = 3, /* maximum broadcast query */ 154 .nd_mmaxtries = 3, /* maximum broadcast query */
155 .nd_umaxtries = 3, /* maximum unicast query */ 155 .nd_umaxtries = 3, /* maximum unicast query */
156 .nd_retransmultiple = BACKOFF_MULTIPLE, 156 .nd_retransmultiple = BACKOFF_MULTIPLE,
157 .nd_maxretrans = MAX_RETRANS_TIMER, 157 .nd_maxretrans = MAX_RETRANS_TIMER,
158 .nd_maxnudhint = 0, /* max # of subsequent upper layer hints */ 158 .nd_maxnudhint = 0, /* max # of subsequent upper layer hints */
159 .nd_maxqueuelen = 1, /* max # of packets in unresolved ND entries */ 159 .nd_maxqueuelen = 1, /* max # of packets in unresolved ND entries */
160 .nd_nud_enabled = arp_nud_enabled, 160 .nd_nud_enabled = arp_nud_enabled,
161 .nd_reachable = arp_llinfo_reachable, 161 .nd_reachable = arp_llinfo_reachable,
162 .nd_retrans = arp_llinfo_retrans, 162 .nd_retrans = arp_llinfo_retrans,
163 .nd_holdsrc = arp_llinfo_holdsrc, 163 .nd_holdsrc = arp_llinfo_holdsrc,
164 .nd_output = arp_llinfo_output, 164 .nd_output = arp_llinfo_output,
165 .nd_missed = arp_llinfo_missed, 165 .nd_missed = arp_llinfo_missed,
166 .nd_free = arp_free, 166 .nd_free = arp_free,
167}; 167};
168 168
169int ip_dad_count = PROBE_NUM; 169int ip_dad_count = PROBE_NUM;
170#ifdef ARP_DEBUG 170#ifdef ARP_DEBUG
171int arp_debug = 1; 171int arp_debug = 1;
172#else 172#else
173int arp_debug = 0; 173int arp_debug = 0;
174#endif 174#endif
175 175
176static void arp_init(void); 176static void arp_init(void);
177static void arp_dad_init(void); 177static void arp_dad_init(void);
178 178
179static void arprequest(struct ifnet *, 179static void arprequest(struct ifnet *,
180 const struct in_addr *, const struct in_addr *, 180 const struct in_addr *, const struct in_addr *,
181 const uint8_t *, const uint8_t *); 181 const uint8_t *, const uint8_t *);
182static void arpannounce1(struct ifaddr *); 182static void arpannounce1(struct ifaddr *);
183static struct sockaddr *arp_setgate(struct rtentry *, struct sockaddr *, 183static struct sockaddr *arp_setgate(struct rtentry *, struct sockaddr *,
184 const struct sockaddr *); 184 const struct sockaddr *);
185static struct llentry *arpcreate(struct ifnet *, 185static struct llentry *arpcreate(struct ifnet *,
186 const struct in_addr *, const struct sockaddr *, int); 186 const struct in_addr *, const struct sockaddr *, int);
187static void in_arpinput(struct mbuf *); 187static void in_arpinput(struct mbuf *);
188static void in_revarpinput(struct mbuf *); 188static void in_revarpinput(struct mbuf *);
189static void revarprequest(struct ifnet *); 189static void revarprequest(struct ifnet *);
190 190
191static void arp_drainstub(void); 191static void arp_drainstub(void);
192 192
193struct dadq; 193struct dadq;
194static void arp_dad_timer(struct dadq *); 194static void arp_dad_timer(struct dadq *);
195static void arp_dad_start(struct ifaddr *); 195static void arp_dad_start(struct ifaddr *);
196static void arp_dad_stop(struct ifaddr *); 196static void arp_dad_stop(struct ifaddr *);
197static void arp_dad_duplicated(struct ifaddr *, const struct sockaddr_dl *); 197static void arp_dad_duplicated(struct ifaddr *, const struct sockaddr_dl *);
198 198
199#define ARP_MAXQLEN 50 199#define ARP_MAXQLEN 50
200pktqueue_t * arp_pktq __read_mostly; 200pktqueue_t * arp_pktq __read_mostly;
201 201
202static int useloopback = 1; /* use loopback interface for local traffic */ 202static int useloopback = 1; /* use loopback interface for local traffic */
203 203
204static percpu_t *arpstat_percpu; 204static percpu_t *arpstat_percpu;
205 205
206#define ARP_STAT_GETREF() _NET_STAT_GETREF(arpstat_percpu) 206#define ARP_STAT_GETREF() _NET_STAT_GETREF(arpstat_percpu)
207#define ARP_STAT_PUTREF() _NET_STAT_PUTREF(arpstat_percpu) 207#define ARP_STAT_PUTREF() _NET_STAT_PUTREF(arpstat_percpu)
208 208
209#define ARP_STATINC(x) _NET_STATINC(arpstat_percpu, x) 209#define ARP_STATINC(x) _NET_STATINC(arpstat_percpu, x)
210#define ARP_STATADD(x, v) _NET_STATADD(arpstat_percpu, x, v) 210#define ARP_STATADD(x, v) _NET_STATADD(arpstat_percpu, x, v)
211 211
212/* revarp state */ 212/* revarp state */
213static struct in_addr myip, srv_ip; 213static struct in_addr myip, srv_ip;
214static int myip_initialized = 0; 214static int myip_initialized = 0;
215static int revarp_in_progress = 0; 215static int revarp_in_progress = 0;
216static struct ifnet *myip_ifp = NULL; 216static struct ifnet *myip_ifp = NULL;
217 217
218static int arp_drainwanted; 218static int arp_drainwanted;
219 219
220static int log_movements = 0; 220static int log_movements = 0;
221static int log_permanent_modify = 1; 221static int log_permanent_modify = 1;
222static int log_wrong_iface = 1; 222static int log_wrong_iface = 1;
223 223
224DOMAIN_DEFINE(arpdomain); /* forward declare and add to link set */ 224DOMAIN_DEFINE(arpdomain); /* forward declare and add to link set */
225 225
226static void 226static void
227arp_fasttimo(void) 227arp_fasttimo(void)
228{ 228{
229 if (arp_drainwanted) { 229 if (arp_drainwanted) {
230 arp_drain(); 230 arp_drain();
231 arp_drainwanted = 0; 231 arp_drainwanted = 0;
232 } 232 }
233} 233}
234 234
235static const struct protosw arpsw[] = { 235static const struct protosw arpsw[] = {
236 { 236 {
237 .pr_type = 0, 237 .pr_type = 0,
238 .pr_domain = &arpdomain, 238 .pr_domain = &arpdomain,
239 .pr_protocol = 0, 239 .pr_protocol = 0,
240 .pr_flags = 0, 240 .pr_flags = 0,
241 .pr_input = 0, 241 .pr_input = 0,
242 .pr_ctlinput = 0, 242 .pr_ctlinput = 0,
243 .pr_ctloutput = 0, 243 .pr_ctloutput = 0,
244 .pr_usrreqs = 0, 244 .pr_usrreqs = 0,
245 .pr_init = arp_init, 245 .pr_init = arp_init,
246 .pr_fasttimo = arp_fasttimo, 246 .pr_fasttimo = arp_fasttimo,
247 .pr_slowtimo = 0, 247 .pr_slowtimo = 0,
248 .pr_drain = arp_drainstub, 248 .pr_drain = arp_drainstub,
249 } 249 }
250}; 250};
251 251
252struct domain arpdomain = { 252struct domain arpdomain = {
253 .dom_family = PF_ARP, 253 .dom_family = PF_ARP,
254 .dom_name = "arp", 254 .dom_name = "arp",
255 .dom_protosw = arpsw, 255 .dom_protosw = arpsw,
256 .dom_protoswNPROTOSW = &arpsw[__arraycount(arpsw)], 256 .dom_protoswNPROTOSW = &arpsw[__arraycount(arpsw)],
257#ifdef MBUFTRACE 257#ifdef MBUFTRACE
258 .dom_mowner = MOWNER_INIT("internet", "arp"), 258 .dom_mowner = MOWNER_INIT("internet", "arp"),
259#endif 259#endif
260}; 260};
261 261
262static void sysctl_net_inet_arp_setup(struct sysctllog **); 262static void sysctl_net_inet_arp_setup(struct sysctllog **);
263 263
264void 264void
265arp_init(void) 265arp_init(void)
266{ 266{
267 267
268 arp_pktq = pktq_create(ARP_MAXQLEN, arpintr, NULL); 268 arp_pktq = pktq_create(ARP_MAXQLEN, arpintr, NULL);
269 KASSERT(arp_pktq != NULL); 269 KASSERT(arp_pktq != NULL);
270 270
271 sysctl_net_inet_arp_setup(NULL); 271 sysctl_net_inet_arp_setup(NULL);
272 arpstat_percpu = percpu_alloc(sizeof(uint64_t) * ARP_NSTATS); 272 arpstat_percpu = percpu_alloc(sizeof(uint64_t) * ARP_NSTATS);
273 273
274#ifdef MBUFTRACE 274#ifdef MBUFTRACE
275 MOWNER_ATTACH(&arpdomain.dom_mowner); 275 MOWNER_ATTACH(&arpdomain.dom_mowner);
276#endif 276#endif
277 277
278 nd_attach_domain(&arp_nd_domain); 278 nd_attach_domain(&arp_nd_domain);
279 arp_dad_init(); 279 arp_dad_init();
280} 280}
281 281
282static void 282static void
283arp_drainstub(void) 283arp_drainstub(void)
284{ 284{
285 arp_drainwanted = 1; 285 arp_drainwanted = 1;
286} 286}
287 287
288/* 288/*
289 * ARP protocol drain routine. Called when memory is in short supply. 289 * ARP protocol drain routine. Called when memory is in short supply.
290 * Called at splvm(); don't acquire softnet_lock as can be called from 290 * Called at splvm(); don't acquire softnet_lock as can be called from
291 * hardware interrupt handlers. 291 * hardware interrupt handlers.
292 */ 292 */
293void 293void
294arp_drain(void) 294arp_drain(void)
295{ 295{
296 296
297 lltable_drain(AF_INET); 297 lltable_drain(AF_INET);
298} 298}
299 299
300/* 300/*
301 * We set the gateway for RTF_CLONING routes to a "prototype" 301 * We set the gateway for RTF_CLONING routes to a "prototype"
302 * link-layer sockaddr whose interface type (if_type) and interface 302 * link-layer sockaddr whose interface type (if_type) and interface
303 * index (if_index) fields are prepared. 303 * index (if_index) fields are prepared.
304 */ 304 */
305static struct sockaddr * 305static struct sockaddr *
306arp_setgate(struct rtentry *rt, struct sockaddr *gate, 306arp_setgate(struct rtentry *rt, struct sockaddr *gate,
307 const struct sockaddr *netmask) 307 const struct sockaddr *netmask)
308{ 308{
309 const struct ifnet *ifp = rt->rt_ifp; 309 const struct ifnet *ifp = rt->rt_ifp;
310 uint8_t namelen = strlen(ifp->if_xname); 310 uint8_t namelen = strlen(ifp->if_xname);
311 uint8_t addrlen = ifp->if_addrlen; 311 uint8_t addrlen = ifp->if_addrlen;
312 312
313 /* 313 /*
314 * XXX: If this is a manually added route to interface 314 * XXX: If this is a manually added route to interface
315 * such as older version of routed or gated might provide, 315 * such as older version of routed or gated might provide,
316 * restore cloning bit. 316 * restore cloning bit.
317 */ 317 */
318 if ((rt->rt_flags & RTF_HOST) == 0 && netmask != NULL && 318 if ((rt->rt_flags & RTF_HOST) == 0 && netmask != NULL &&
319 satocsin(netmask)->sin_addr.s_addr != 0xffffffff) 319 satocsin(netmask)->sin_addr.s_addr != 0xffffffff)
320 rt->rt_flags |= RTF_CONNECTED; 320 rt->rt_flags |= RTF_CONNECTED;
321 321
322 if ((rt->rt_flags & (RTF_CONNECTED | RTF_LOCAL))) { 322 if ((rt->rt_flags & (RTF_CONNECTED | RTF_LOCAL))) {
323 union { 323 union {
324 struct sockaddr sa; 324 struct sockaddr sa;
325 struct sockaddr_storage ss; 325 struct sockaddr_storage ss;
326 struct sockaddr_dl sdl; 326 struct sockaddr_dl sdl;
327 } u; 327 } u;
328 /* 328 /*
329 * Case 1: This route should come from a route to iface. 329 * Case 1: This route should come from a route to iface.
330 */ 330 */
331 sockaddr_dl_init(&u.sdl, sizeof(u.ss), 331 sockaddr_dl_init(&u.sdl, sizeof(u.ss),
332 ifp->if_index, ifp->if_type, NULL, namelen, NULL, addrlen); 332 ifp->if_index, ifp->if_type, NULL, namelen, NULL, addrlen);
333 rt_setgate(rt, &u.sa); 333 rt_setgate(rt, &u.sa);
334 gate = rt->rt_gateway; 334 gate = rt->rt_gateway;
335 } 335 }
336 return gate; 336 return gate;
337} 337}
338 338
339/* 339/*
340 * Parallel to llc_rtrequest. 340 * Parallel to llc_rtrequest.
341 */ 341 */
342void 342void
343arp_rtrequest(int req, struct rtentry *rt, const struct rt_addrinfo *info) 343arp_rtrequest(int req, struct rtentry *rt, const struct rt_addrinfo *info)
344{ 344{
345 struct sockaddr *gate = rt->rt_gateway; 345 struct sockaddr *gate = rt->rt_gateway;
346 struct in_ifaddr *ia; 346 struct in_ifaddr *ia;
347 struct ifaddr *ifa; 347 struct ifaddr *ifa;
348 struct ifnet *ifp = rt->rt_ifp; 348 struct ifnet *ifp = rt->rt_ifp;
349 int bound; 349 int bound;
350 int s; 350 int s;
351 351
352 if (req == RTM_LLINFO_UPD) { 352 if (req == RTM_LLINFO_UPD) {
353 if ((ifa = info->rti_ifa) != NULL) 353 if ((ifa = info->rti_ifa) != NULL)
354 arpannounce1(ifa); 354 arpannounce1(ifa);
355 return; 355 return;
356 } 356 }
357 357
358 if ((rt->rt_flags & RTF_GATEWAY) != 0) { 358 if ((rt->rt_flags & RTF_GATEWAY) != 0) {
359 if (req != RTM_ADD) 359 if (req != RTM_ADD)
360 return; 360 return;
361 361
362 /* 362 /*
363 * linklayers with particular link MTU limitation. 363 * linklayers with particular link MTU limitation.
364 */ 364 */
365 switch(ifp->if_type) { 365 switch(ifp->if_type) {
366#if NARCNET > 0 366#if NARCNET > 0
367 case IFT_ARCNET: 367 case IFT_ARCNET:
368 { 368 {
369 int arcipifmtu; 369 int arcipifmtu;
370 370
371 if (ifp->if_flags & IFF_LINK0) 371 if (ifp->if_flags & IFF_LINK0)
372 arcipifmtu = arc_ipmtu; 372 arcipifmtu = arc_ipmtu;
373 else 373 else
374 arcipifmtu = ARCMTU; 374 arcipifmtu = ARCMTU;
375 if (ifp->if_mtu > arcipifmtu) 375 if (ifp->if_mtu > arcipifmtu)
376 rt->rt_rmx.rmx_mtu = arcipifmtu; 376 rt->rt_rmx.rmx_mtu = arcipifmtu;
377 break; 377 break;
378 } 378 }
379#endif 379#endif
380 } 380 }
381 return; 381 return;
382 } 382 }
383 383
384 switch (req) { 384 switch (req) {
385 case RTM_SETGATE: 385 case RTM_SETGATE:
386 gate = arp_setgate(rt, gate, info->rti_info[RTAX_NETMASK]); 386 gate = arp_setgate(rt, gate, info->rti_info[RTAX_NETMASK]);
387 break; 387 break;
388 case RTM_ADD: 388 case RTM_ADD:
389 gate = arp_setgate(rt, gate, info->rti_info[RTAX_NETMASK]); 389 gate = arp_setgate(rt, gate, info->rti_info[RTAX_NETMASK]);
390 if (gate == NULL) { 390 if (gate == NULL) {
391 log(LOG_ERR, "%s: arp_setgate failed\n", __func__); 391 log(LOG_ERR, "%s: arp_setgate failed\n", __func__);
392 break; 392 break;
393 } 393 }
394 if ((rt->rt_flags & RTF_CONNECTED) || 394 if ((rt->rt_flags & RTF_CONNECTED) ||
395 (rt->rt_flags & RTF_LOCAL)) { 395 (rt->rt_flags & RTF_LOCAL)) {
396 /* 396 /*
397 * linklayers with particular link MTU limitation. 397 * linklayers with particular link MTU limitation.
398 */ 398 */
399 switch (ifp->if_type) { 399 switch (ifp->if_type) {
400#if NARCNET > 0 400#if NARCNET > 0
401 case IFT_ARCNET: 401 case IFT_ARCNET:
402 { 402 {
403 int arcipifmtu; 403 int arcipifmtu;
404 if (ifp->if_flags & IFF_LINK0) 404 if (ifp->if_flags & IFF_LINK0)
405 arcipifmtu = arc_ipmtu; 405 arcipifmtu = arc_ipmtu;
406 else 406 else
407 arcipifmtu = ARCMTU; 407 arcipifmtu = ARCMTU;
408 408
409 if ((rt->rt_rmx.rmx_locks & RTV_MTU) == 0 && 409 if ((rt->rt_rmx.rmx_locks & RTV_MTU) == 0 &&
410 (rt->rt_rmx.rmx_mtu > arcipifmtu || 410 (rt->rt_rmx.rmx_mtu > arcipifmtu ||
411 (rt->rt_rmx.rmx_mtu == 0 && 411 (rt->rt_rmx.rmx_mtu == 0 &&
412 ifp->if_mtu > arcipifmtu))) 412 ifp->if_mtu > arcipifmtu)))
413 rt->rt_rmx.rmx_mtu = arcipifmtu; 413 rt->rt_rmx.rmx_mtu = arcipifmtu;
414 break; 414 break;
415 } 415 }
416#endif 416#endif
417 } 417 }
418 if (rt->rt_flags & RTF_CONNECTED) 418 if (rt->rt_flags & RTF_CONNECTED)
419 break; 419 break;
420 } 420 }
421 421
422 bound = curlwp_bind(); 422 bound = curlwp_bind();
423 /* Announce a new entry if requested. */ 423 /* Announce a new entry if requested. */
424 if (rt->rt_flags & RTF_ANNOUNCE) { 424 if (rt->rt_flags & RTF_ANNOUNCE) {
425 struct psref psref; 425 struct psref psref;
426 ia = in_get_ia_on_iface_psref( 426 ia = in_get_ia_on_iface_psref(
427 satocsin(rt_getkey(rt))->sin_addr, ifp, &psref); 427 satocsin(rt_getkey(rt))->sin_addr, ifp, &psref);
428 if (ia != NULL) { 428 if (ia != NULL) {
429 arpannounce(ifp, &ia->ia_ifa, 429 arpannounce(ifp, &ia->ia_ifa,
430 CLLADDR(satocsdl(gate))); 430 CLLADDR(satocsdl(gate)));
431 ia4_release(ia, &psref); 431 ia4_release(ia, &psref);
432 } 432 }
433 } 433 }
434 434
435 if (gate->sa_family != AF_LINK || 435 if (gate->sa_family != AF_LINK ||
436 gate->sa_len < sockaddr_dl_measure(0, ifp->if_addrlen)) { 436 gate->sa_len < sockaddr_dl_measure(0, ifp->if_addrlen)) {
437 log(LOG_DEBUG, "%s: bad gateway value\n", __func__); 437 log(LOG_DEBUG, "%s: bad gateway value\n", __func__);
438 goto out; 438 goto out;
439 } 439 }
440 440
441 satosdl(gate)->sdl_type = ifp->if_type; 441 satosdl(gate)->sdl_type = ifp->if_type;
442 satosdl(gate)->sdl_index = ifp->if_index; 442 satosdl(gate)->sdl_index = ifp->if_index;
443 443
444 /* 444 /*
445 * If the route is for a broadcast address mark it as such. 445 * If the route is for a broadcast address mark it as such.
446 * This way we can avoid an expensive call to in_broadcast() 446 * This way we can avoid an expensive call to in_broadcast()
447 * in ip_output() most of the time (because the route passed 447 * in ip_output() most of the time (because the route passed
448 * to ip_output() is almost always a host route). 448 * to ip_output() is almost always a host route).
449 */ 449 */
450 if (rt->rt_flags & RTF_HOST && 450 if (rt->rt_flags & RTF_HOST &&
451 !(rt->rt_flags & RTF_BROADCAST) && 451 !(rt->rt_flags & RTF_BROADCAST) &&
452 in_broadcast(satocsin(rt_getkey(rt))->sin_addr, rt->rt_ifp)) 452 in_broadcast(satocsin(rt_getkey(rt))->sin_addr, rt->rt_ifp))
453 rt->rt_flags |= RTF_BROADCAST; 453 rt->rt_flags |= RTF_BROADCAST;
454 /* There is little point in resolving the broadcast address */ 454 /* There is little point in resolving the broadcast address */
455 if (rt->rt_flags & RTF_BROADCAST) 455 if (rt->rt_flags & RTF_BROADCAST)
456 goto out; 456 goto out;
457 457
458 /* 458 /*
459 * When called from rt_ifa_addlocal, we cannot depend on that 459 * When called from rt_ifa_addlocal, we cannot depend on that
460 * the address (rt_getkey(rt)) exits in the address list of the 460 * the address (rt_getkey(rt)) exits in the address list of the
461 * interface. So check RTF_LOCAL instead. 461 * interface. So check RTF_LOCAL instead.
462 */ 462 */
463 if (rt->rt_flags & RTF_LOCAL) { 463 if (rt->rt_flags & RTF_LOCAL) {
464 if (useloopback) { 464 if (useloopback) {
465 rt->rt_ifp = lo0ifp; 465 rt->rt_ifp = lo0ifp;
466 rt->rt_rmx.rmx_mtu = 0; 466 rt->rt_rmx.rmx_mtu = 0;
467 } 467 }
468 goto out; 468 goto out;
469 } 469 }
470 470
471 s = pserialize_read_enter(); 471 s = pserialize_read_enter();
472 ia = in_get_ia_on_iface(satocsin(rt_getkey(rt))->sin_addr, ifp); 472 ia = in_get_ia_on_iface(satocsin(rt_getkey(rt))->sin_addr, ifp);
473 if (ia == NULL) { 473 if (ia == NULL) {
474 pserialize_read_exit(s); 474 pserialize_read_exit(s);
475 goto out; 475 goto out;
476 } 476 }
477 477
478 if (useloopback) { 478 if (useloopback) {
479 rt->rt_ifp = lo0ifp; 479 rt->rt_ifp = lo0ifp;
480 rt->rt_rmx.rmx_mtu = 0; 480 rt->rt_rmx.rmx_mtu = 0;
481 } 481 }
482 rt->rt_flags |= RTF_LOCAL; 482 rt->rt_flags |= RTF_LOCAL;
483 483
484 if (ISSET(info->rti_flags, RTF_DONTCHANGEIFA)) { 484 if (ISSET(info->rti_flags, RTF_DONTCHANGEIFA)) {
485 pserialize_read_exit(s); 485 pserialize_read_exit(s);
486 goto out; 486 goto out;
487 } 487 }
488 /* 488 /*
489 * make sure to set rt->rt_ifa to the interface 489 * make sure to set rt->rt_ifa to the interface
490 * address we are using, otherwise we will have trouble 490 * address we are using, otherwise we will have trouble
491 * with source address selection. 491 * with source address selection.
492 */ 492 */
493 ifa = &ia->ia_ifa; 493 ifa = &ia->ia_ifa;
494 if (ifa != rt->rt_ifa) 494 if (ifa != rt->rt_ifa)
495 /* Assume it doesn't sleep */ 495 /* Assume it doesn't sleep */
496 rt_replace_ifa(rt, ifa); 496 rt_replace_ifa(rt, ifa);
497 pserialize_read_exit(s); 497 pserialize_read_exit(s);
498 out: 498 out:
499 curlwp_bindx(bound); 499 curlwp_bindx(bound);
500 break; 500 break;
501 } 501 }
502} 502}
503 503
504/* 504/*
505 * Broadcast an ARP request. Caller specifies: 505 * Broadcast an ARP request. Caller specifies:
506 * - arp header source ip address 506 * - arp header source ip address
507 * - arp header target ip address 507 * - arp header target ip address
508 * - arp header source ethernet address 508 * - arp header source ethernet address
509 */ 509 */
510static void 510static void
511arprequest(struct ifnet *ifp, 511arprequest(struct ifnet *ifp,
512 const struct in_addr *sip, const struct in_addr *tip, 512 const struct in_addr *sip, const struct in_addr *tip,
513 const uint8_t *saddr, const uint8_t *taddr) 513 const uint8_t *saddr, const uint8_t *taddr)
514{ 514{
515 struct mbuf *m; 515 struct mbuf *m;
516 struct arphdr *ah; 516 struct arphdr *ah;
517 struct sockaddr sa; 517 struct sockaddr sa;
518 uint64_t *arps; 518 uint64_t *arps;
519 519
520 KASSERT(sip != NULL); 520 KASSERT(sip != NULL);
521 KASSERT(tip != NULL); 521 KASSERT(tip != NULL);
522 KASSERT(saddr != NULL); 522 KASSERT(saddr != NULL);
523 523
524 if ((m = m_gethdr(M_DONTWAIT, MT_DATA)) == NULL) 524 if ((m = m_gethdr(M_DONTWAIT, MT_DATA)) == NULL)
525 return; 525 return;
526 MCLAIM(m, &arpdomain.dom_mowner); 526 MCLAIM(m, &arpdomain.dom_mowner);
527 switch (ifp->if_type) { 527 switch (ifp->if_type) {
528 case IFT_IEEE1394: 528 case IFT_IEEE1394:
529 m->m_len = sizeof(*ah) + 2 * sizeof(struct in_addr) + 529 m->m_len = sizeof(*ah) + 2 * sizeof(struct in_addr) +
530 ifp->if_addrlen; 530 ifp->if_addrlen;
531 break; 531 break;
532 default: 532 default:
533 m->m_len = sizeof(*ah) + 2 * sizeof(struct in_addr) + 533 m->m_len = sizeof(*ah) + 2 * sizeof(struct in_addr) +
534 2 * ifp->if_addrlen; 534 2 * ifp->if_addrlen;
535 break; 535 break;
536 } 536 }
537 m->m_pkthdr.len = m->m_len; 537 m->m_pkthdr.len = m->m_len;
538 m_align(m, m->m_len); 538 m_align(m, m->m_len);
539 ah = mtod(m, struct arphdr *); 539 ah = mtod(m, struct arphdr *);
540 memset(ah, 0, m->m_len); 540 memset(ah, 0, m->m_len);
541 switch (ifp->if_type) { 541 switch (ifp->if_type) {
542 case IFT_IEEE1394: /* RFC2734 */ 542 case IFT_IEEE1394: /* RFC2734 */
543 /* fill it now for ar_tpa computation */ 543 /* fill it now for ar_tpa computation */
544 ah->ar_hrd = htons(ARPHRD_IEEE1394); 544 ah->ar_hrd = htons(ARPHRD_IEEE1394);
545 break; 545 break;
546 default: 546 default:
547 /* ifp->if_output will fill ar_hrd */ 547 /* ifp->if_output will fill ar_hrd */
548 break; 548 break;
549 } 549 }
550 ah->ar_pro = htons(ETHERTYPE_IP); 550 ah->ar_pro = htons(ETHERTYPE_IP);
551 ah->ar_hln = ifp->if_addrlen; /* hardware address length */ 551 ah->ar_hln = ifp->if_addrlen; /* hardware address length */
552 ah->ar_pln = sizeof(struct in_addr); /* protocol address length */ 552 ah->ar_pln = sizeof(struct in_addr); /* protocol address length */
553 ah->ar_op = htons(ARPOP_REQUEST); 553 ah->ar_op = htons(ARPOP_REQUEST);
554 memcpy(ar_sha(ah), saddr, ah->ar_hln); 554 memcpy(ar_sha(ah), saddr, ah->ar_hln);
555 if (taddr == NULL) 555 if (taddr == NULL)
556 m->m_flags |= M_BCAST; 556 m->m_flags |= M_BCAST;
557 else 557 else
558 memcpy(ar_tha(ah), taddr, ah->ar_hln); 558 memcpy(ar_tha(ah), taddr, ah->ar_hln);
559 memcpy(ar_spa(ah), sip, ah->ar_pln); 559 memcpy(ar_spa(ah), sip, ah->ar_pln);
560 memcpy(ar_tpa(ah), tip, ah->ar_pln); 560 memcpy(ar_tpa(ah), tip, ah->ar_pln);
561 sa.sa_family = AF_ARP; 561 sa.sa_family = AF_ARP;
562 sa.sa_len = 2; 562 sa.sa_len = 2;
563 arps = ARP_STAT_GETREF(); 563 arps = ARP_STAT_GETREF();
564 arps[ARP_STAT_SNDTOTAL]++; 564 arps[ARP_STAT_SNDTOTAL]++;
565 arps[ARP_STAT_SENDREQUEST]++; 565 arps[ARP_STAT_SENDREQUEST]++;
566 ARP_STAT_PUTREF(); 566 ARP_STAT_PUTREF();
567 if_output_lock(ifp, ifp, m, &sa, NULL); 567 if_output_lock(ifp, ifp, m, &sa, NULL);
568} 568}
569 569
570void 570void
571arpannounce(struct ifnet *ifp, struct ifaddr *ifa, const uint8_t *enaddr) 571arpannounce(struct ifnet *ifp, struct ifaddr *ifa, const uint8_t *enaddr)
572{ 572{
573 struct in_ifaddr *ia = ifatoia(ifa); 573 struct in_ifaddr *ia = ifatoia(ifa);
574 struct in_addr *ip = &IA_SIN(ifa)->sin_addr; 574 struct in_addr *ip = &IA_SIN(ifa)->sin_addr;
575 575
576 if (ia->ia4_flags & (IN_IFF_NOTREADY | IN_IFF_DETACHED)) { 576 if (ia->ia4_flags & (IN_IFF_NOTREADY | IN_IFF_DETACHED)) {
577 ARPLOG(LOG_DEBUG, "%s not ready\n", ARPLOGADDR(ip)); 577 ARPLOG(LOG_DEBUG, "%s not ready\n", ARPLOGADDR(ip));
578 return; 578 return;
579 } 579 }
580 arprequest(ifp, ip, ip, enaddr, NULL); 580 arprequest(ifp, ip, ip, enaddr, NULL);
581} 581}
582 582
583static void 583static void
584arpannounce1(struct ifaddr *ifa) 584arpannounce1(struct ifaddr *ifa)
585{ 585{
586 586
587 arpannounce(ifa->ifa_ifp, ifa, CLLADDR(ifa->ifa_ifp->if_sadl)); 587 arpannounce(ifa->ifa_ifp, ifa, CLLADDR(ifa->ifa_ifp->if_sadl));
588} 588}
589 589
590/* 590/*
591 * Resolve an IP address into an ethernet address. If success, desten is 591 * Resolve an IP address into an ethernet address. If success, desten is
592 * filled in. If there is no entry in arptab, set one up and broadcast a 592 * filled in. If there is no entry in arptab, set one up and broadcast a
593 * request for the IP address. Hold onto this mbuf and resend it once the 593 * request for the IP address. Hold onto this mbuf and resend it once the
594 * address is finally resolved. 594 * address is finally resolved.
595 * 595 *
596 * A return value of 0 indicates that desten has been filled in and the packet 596 * A return value of 0 indicates that desten has been filled in and the packet
597 * should be sent normally; a return value of EWOULDBLOCK indicates that the 597 * should be sent normally; a return value of EWOULDBLOCK indicates that the
598 * packet has been held pending resolution. Any other value indicates an 598 * packet has been held pending resolution. Any other value indicates an
599 * error. 599 * error.
600 */ 600 */
601int 601int
602arpresolve(struct ifnet *ifp, const struct rtentry *rt, struct mbuf *m, 602arpresolve(struct ifnet *ifp, const struct rtentry *rt, struct mbuf *m,
603 const struct sockaddr *dst, void *desten, size_t destlen) 603 const struct sockaddr *dst, void *desten, size_t destlen)
604{ 604{
605 struct llentry *la; 605 struct llentry *la;
606 const char *create_lookup; 606 const char *create_lookup;
607 int error; 607 int error;
608 608
609#if NCARP > 0 609#if NCARP > 0
610 if (rt != NULL && rt->rt_ifp->if_type == IFT_CARP) 610 if (rt != NULL && rt->rt_ifp->if_type == IFT_CARP)
611 ifp = rt->rt_ifp; 611 ifp = rt->rt_ifp;
612#endif 612#endif
613 613
614 KASSERT(m != NULL); 614 KASSERT(m != NULL);
615 615
616 la = arplookup(ifp, NULL, dst, 0); 616 la = arplookup(ifp, NULL, dst, 0);
617 if (la == NULL) 617 if (la == NULL)
618 goto notfound; 618 goto notfound;
619 619
620 if (la->la_flags & LLE_VALID && la->ln_state == ND_LLINFO_REACHABLE) { 620 if (la->la_flags & LLE_VALID && la->ln_state == ND_LLINFO_REACHABLE) {
621 KASSERT(destlen >= ifp->if_addrlen); 621 KASSERT(destlen >= ifp->if_addrlen);
622 memcpy(desten, &la->ll_addr, ifp->if_addrlen); 622 memcpy(desten, &la->ll_addr, ifp->if_addrlen);
623 LLE_RUNLOCK(la); 623 LLE_RUNLOCK(la);
624 return 0; 624 return 0;
625 } 625 }
626 626
627notfound: 627notfound:
628 if (ifp->if_flags & IFF_NOARP) { 628 if (ifp->if_flags & IFF_NOARP) {
629 if (la != NULL) 629 if (la != NULL)
630 LLE_RUNLOCK(la); 630 LLE_RUNLOCK(la);
631 error = ENOTSUP; 631 error = ENOTSUP;
632 goto bad; 632 goto bad;
633 } 633 }
634 634
635 if (la == NULL) { 635 if (la == NULL) {
636 struct rtentry *_rt; 636 struct rtentry *_rt;
637 637
638 create_lookup = "create"; 638 create_lookup = "create";
639 _rt = rtalloc1(dst, 0); 639 _rt = rtalloc1(dst, 0);
640 IF_AFDATA_WLOCK(ifp); 640 IF_AFDATA_WLOCK(ifp);
641 la = lla_create(LLTABLE(ifp), LLE_EXCLUSIVE, dst, _rt); 641 la = lla_create(LLTABLE(ifp), LLE_EXCLUSIVE, dst, _rt);
642 IF_AFDATA_WUNLOCK(ifp); 642 IF_AFDATA_WUNLOCK(ifp);
643 if (_rt != NULL) 643 if (_rt != NULL)
644 rt_unref(_rt); 644 rt_unref(_rt);
645 if (la == NULL) 645 if (la == NULL)
646 ARP_STATINC(ARP_STAT_ALLOCFAIL); 646 ARP_STATINC(ARP_STAT_ALLOCFAIL);
647 else 647 else
648 la->ln_state = ND_LLINFO_NOSTATE; 648 la->ln_state = ND_LLINFO_NOSTATE;
649 } else if (LLE_TRY_UPGRADE(la) == 0) { 649 } else if (LLE_TRY_UPGRADE(la) == 0) {
650 create_lookup = "lookup"; 650 create_lookup = "lookup";
651 LLE_RUNLOCK(la); 651 LLE_RUNLOCK(la);
652 IF_AFDATA_RLOCK(ifp); 652 IF_AFDATA_RLOCK(ifp);
653 la = lla_lookup(LLTABLE(ifp), LLE_EXCLUSIVE, dst); 653 la = lla_lookup(LLTABLE(ifp), LLE_EXCLUSIVE, dst);
654 IF_AFDATA_RUNLOCK(ifp); 654 IF_AFDATA_RUNLOCK(ifp);
655 } 655 }
656 656
657 error = EINVAL; 657 error = EINVAL;
658 if (la == NULL) { 658 if (la == NULL) {
659 log(LOG_DEBUG, 659 log(LOG_DEBUG,
660 "%s: failed to %s llentry for %s on %s\n", 660 "%s: failed to %s llentry for %s on %s\n",
661 __func__, create_lookup, inet_ntoa(satocsin(dst)->sin_addr), 661 __func__, create_lookup, inet_ntoa(satocsin(dst)->sin_addr),
662 ifp->if_xname); 662 ifp->if_xname);
663 goto bad; 663 goto bad;
664 } 664 }
665 665
666 error = nd_resolve(la, rt, m, desten, destlen); 666 error = nd_resolve(la, rt, m, desten, destlen);
667 return error; 667 return error;
668 668
669bad: 669bad:
670 m_freem(m); 670 m_freem(m);
671 return error; 671 return error;
672} 672}
673 673
674/* 674/*
675 * Common length and type checks are done here, 675 * Common length and type checks are done here,
676 * then the protocol-specific routine is called. 676 * then the protocol-specific routine is called.
677 */ 677 */
678void 678void
679arpintr(void *arg __unused) 679arpintr(void *arg __unused)
680{ 680{
681 struct mbuf *m; 681 struct mbuf *m;
682 struct arphdr *ar; 682 struct arphdr *ar;
683 int s; 683 int s;
684 int arplen; 684 int arplen;
685 struct ifnet *rcvif; 685 struct ifnet *rcvif;
686 bool badhrd; 686 bool badhrd;
687 687
688 SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE(); 688 SOFTNET_KERNEL_LOCK_UNLESS_NET_MPSAFE();
689 while ((m = pktq_dequeue(arp_pktq)) != NULL) { 689 while ((m = pktq_dequeue(arp_pktq)) != NULL) {
690 if ((m->m_flags & M_PKTHDR) == 0) 690 if ((m->m_flags & M_PKTHDR) == 0)
691 panic("arpintr"); 691 panic("arpintr");
692 692
693 MCLAIM(m, &arpdomain.dom_mowner); 693 MCLAIM(m, &arpdomain.dom_mowner);
694 ARP_STATINC(ARP_STAT_RCVTOTAL); 694 ARP_STATINC(ARP_STAT_RCVTOTAL);
695 695
696 if (__predict_false(m->m_len < sizeof(*ar))) { 696 if (__predict_false(m->m_len < sizeof(*ar))) {
697 if ((m = m_pullup(m, sizeof(*ar))) == NULL) 697 if ((m = m_pullup(m, sizeof(*ar))) == NULL)
698 goto badlen; 698 goto badlen;
699 } 699 }
700 ar = mtod(m, struct arphdr *); 700 ar = mtod(m, struct arphdr *);
701 KASSERT(ACCESSIBLE_POINTER(ar, struct arphdr)); 701 KASSERT(ACCESSIBLE_POINTER(ar, struct arphdr));
702 702
703 rcvif = m_get_rcvif(m, &s); 703 rcvif = m_get_rcvif(m, &s);
704 if (__predict_false(rcvif == NULL)) { 704 if (__predict_false(rcvif == NULL)) {
705 ARP_STATINC(ARP_STAT_RCVNOINT); 705 ARP_STATINC(ARP_STAT_RCVNOINT);
706 goto free; 706 goto free;
707 } 707 }
708 708
709 /* 709 /*
710 * We don't want non-IEEE1394 ARP packets on IEEE1394 710 * We don't want non-IEEE1394 ARP packets on IEEE1394
711 * interfaces, and vice versa. Our life depends on that. 711 * interfaces, and vice versa. Our life depends on that.
712 */ 712 */
713 if (ntohs(ar->ar_hrd) == ARPHRD_IEEE1394) 713 if (ntohs(ar->ar_hrd) == ARPHRD_IEEE1394)
714 badhrd = rcvif->if_type != IFT_IEEE1394; 714 badhrd = rcvif->if_type != IFT_IEEE1394;
715 else 715 else
716 badhrd = rcvif->if_type == IFT_IEEE1394; 716 badhrd = rcvif->if_type == IFT_IEEE1394;
717 717
718 m_put_rcvif(rcvif, &s); 718 m_put_rcvif(rcvif, &s);
719 719
720 if (badhrd) { 720 if (badhrd) {
721 ARP_STATINC(ARP_STAT_RCVBADPROTO); 721 ARP_STATINC(ARP_STAT_RCVBADPROTO);
722 goto free; 722 goto free;
723 } 723 }
724 724
725 arplen = sizeof(*ar) + 2 * ar->ar_hln + 2 * ar->ar_pln; 725 arplen = sizeof(*ar) + 2 * ar->ar_hln + 2 * ar->ar_pln;
726 if (__predict_false(m->m_len < arplen)) { 726 if (__predict_false(m->m_len < arplen)) {
727 if ((m = m_pullup(m, arplen)) == NULL) 727 if ((m = m_pullup(m, arplen)) == NULL)
728 goto badlen; 728 goto badlen;
729 ar = mtod(m, struct arphdr *); 729 ar = mtod(m, struct arphdr *);
730 KASSERT(ACCESSIBLE_POINTER(ar, struct arphdr)); 730 KASSERT(ACCESSIBLE_POINTER(ar, struct arphdr));
731 } 731 }
732 732
733 switch (ntohs(ar->ar_pro)) { 733 switch (ntohs(ar->ar_pro)) {
734 case ETHERTYPE_IP: 734 case ETHERTYPE_IP:
735 case ETHERTYPE_IPTRAILERS: 735 case ETHERTYPE_IPTRAILERS:
736 in_arpinput(m); 736 in_arpinput(m);
737 continue; 737 continue;
738 default: 738 default:
739 ARP_STATINC(ARP_STAT_RCVBADPROTO); 739 ARP_STATINC(ARP_STAT_RCVBADPROTO);
740 goto free; 740 goto free;
741 } 741 }
742 742
743badlen: 743badlen:
744 ARP_STATINC(ARP_STAT_RCVBADLEN); 744 ARP_STATINC(ARP_STAT_RCVBADLEN);
745free: 745free:
746 m_freem(m); 746 m_freem(m);
747 } 747 }
748 SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE(); 748 SOFTNET_KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
749 return; /* XXX gcc */ 749 return; /* XXX gcc */
750} 750}
751 751
752/* 752/*
753 * ARP for Internet protocols on 10 Mb/s Ethernet. Algorithm is that given in 753 * ARP for Internet protocols on 10 Mb/s Ethernet. Algorithm is that given in
754 * RFC 826. In addition, a sanity check is performed on the sender protocol 754 * RFC 826. In addition, a sanity check is performed on the sender protocol
755 * address, to catch impersonators. 755 * address, to catch impersonators.
756 * 756 *
757 * We no longer handle negotiations for use of trailer protocol: formerly, ARP 757 * We no longer handle negotiations for use of trailer protocol: formerly, ARP
758 * replied for protocol type ETHERTYPE_TRAIL sent along with IP replies if we 758 * replied for protocol type ETHERTYPE_TRAIL sent along with IP replies if we
759 * wanted trailers sent to us, and also sent them in response to IP replies. 759 * wanted trailers sent to us, and also sent them in response to IP replies.
760 * This allowed either end to announce the desire to receive trailer packets. 760 * This allowed either end to announce the desire to receive trailer packets.
761 * 761 *
762 * We no longer reply to requests for ETHERTYPE_TRAIL protocol either, but 762 * We no longer reply to requests for ETHERTYPE_TRAIL protocol either, but
763 * formerly didn't normally send requests. 763 * formerly didn't normally send requests.
764 */ 764 */
765static void 765static void
766in_arpinput(struct mbuf *m) 766in_arpinput(struct mbuf *m)
767{ 767{
768 struct arphdr *ah; 768 struct arphdr *ah;
769 struct ifnet *ifp, *rcvif = NULL; 769 struct ifnet *ifp, *rcvif = NULL;
770 struct llentry *la = NULL; 770 struct llentry *la = NULL;
771 struct in_ifaddr *ia = NULL; 771 struct in_ifaddr *ia = NULL;
772#if NBRIDGE > 0 772#if NBRIDGE > 0
773 struct in_ifaddr *bridge_ia = NULL; 773 struct in_ifaddr *bridge_ia = NULL;
774#endif 774#endif
775#if NCARP > 0 775#if NCARP > 0
776 uint32_t count = 0, index = 0; 776 uint32_t count = 0, index = 0;
777#endif 777#endif
778 struct sockaddr sa; 778 struct sockaddr sa;
779 struct in_addr isaddr, itaddr, myaddr; 779 struct in_addr isaddr, itaddr, myaddr;
780 int op, rt_cmd, new_state = 0; 780 int op, rt_cmd, new_state = 0;
781 void *tha; 781 void *tha;
782 uint64_t *arps; 782 uint64_t *arps;
783 struct psref psref, psref_ia; 783 struct psref psref, psref_ia;
784 int s; 784 int s;
785 char ipbuf[INET_ADDRSTRLEN]; 785 char ipbuf[INET_ADDRSTRLEN];
786 bool find_source, do_dad; 786 bool find_source, do_dad;
787 787
788 if (__predict_false(m_makewritable(&m, 0, m->m_pkthdr.len, M_DONTWAIT))) 788 if (__predict_false(m_makewritable(&m, 0, m->m_pkthdr.len, M_DONTWAIT)))
789 goto out; 789 goto out;
790 ah = mtod(m, struct arphdr *); 790 ah = mtod(m, struct arphdr *);
791 op = ntohs(ah->ar_op); 791 op = ntohs(ah->ar_op);
792 792
793 if (ah->ar_pln != sizeof(struct in_addr)) 793 if (ah->ar_pln != sizeof(struct in_addr))
794 goto out; 794 goto out;
795 795
796 ifp = if_get_bylla(ar_sha(ah), ah->ar_hln, &psref); 796 ifp = if_get_bylla(ar_sha(ah), ah->ar_hln, &psref);
797 if (ifp) { 797 if (ifp) {
798 /* it's from me, ignore it. */ 798 /* it's from me, ignore it. */
799 if_put(ifp, &psref); 799 if_put(ifp, &psref);
800 ARP_STATINC(ARP_STAT_RCVLOCALSHA); 800 ARP_STATINC(ARP_STAT_RCVLOCALSHA);
801 goto out; 801 goto out;
802 } 802 }
803 803
804 rcvif = ifp = m_get_rcvif_psref(m, &psref); 804 rcvif = ifp = m_get_rcvif_psref(m, &psref);
805 if (__predict_false(rcvif == NULL)) 805 if (__predict_false(rcvif == NULL))
806 goto out; 806 goto out;
807 if (rcvif->if_flags & IFF_NOARP) 807 if (rcvif->if_flags & IFF_NOARP)
808 goto out; 808 goto out;
809 809
810 memcpy(&isaddr, ar_spa(ah), sizeof(isaddr)); 810 memcpy(&isaddr, ar_spa(ah), sizeof(isaddr));
811 memcpy(&itaddr, ar_tpa(ah), sizeof(itaddr)); 811 memcpy(&itaddr, ar_tpa(ah), sizeof(itaddr));
812 812
813 if (m->m_flags & (M_BCAST|M_MCAST)) 813 if (m->m_flags & (M_BCAST|M_MCAST))
814 ARP_STATINC(ARP_STAT_RCVMCAST); 814 ARP_STATINC(ARP_STAT_RCVMCAST);
815 815
816 /* 816 /*
817 * Search for a matching interface address 817 * Search for a matching interface address
818 * or any address on the interface to use 818 * or any address on the interface to use
819 * as a dummy address in the rest of this function. 819 * as a dummy address in the rest of this function.
820 * 820 *
821 * First try and find the source address for early 821 * First try and find the source address for early
822 * duplicate address detection. 822 * duplicate address detection.
823 */ 823 */
824 if (in_nullhost(isaddr)) { 824 if (in_nullhost(isaddr)) {
825 if (in_nullhost(itaddr)) /* very bogus ARP */ 825 if (in_nullhost(itaddr)) /* very bogus ARP */
826 goto out; 826 goto out;
827 find_source = false; 827 find_source = false;
828 myaddr = itaddr; 828 myaddr = itaddr;
829 } else { 829 } else {
830 find_source = true; 830 find_source = true;
831 myaddr = isaddr; 831 myaddr = isaddr;
832 } 832 }
833 s = pserialize_read_enter(); 833 s = pserialize_read_enter();
834again: 834again:
835 IN_ADDRHASH_READER_FOREACH(ia, myaddr.s_addr) { 835 IN_ADDRHASH_READER_FOREACH(ia, myaddr.s_addr) {
836 if (!in_hosteq(ia->ia_addr.sin_addr, myaddr)) 836 if (!in_hosteq(ia->ia_addr.sin_addr, myaddr))
837 continue; 837 continue;
838#if NCARP > 0 838#if NCARP > 0
839 if (ia->ia_ifp->if_type == IFT_CARP && 839 if (ia->ia_ifp->if_type == IFT_CARP &&
840 ((ia->ia_ifp->if_flags & (IFF_UP|IFF_RUNNING)) == 840 ((ia->ia_ifp->if_flags & (IFF_UP|IFF_RUNNING)) ==
841 (IFF_UP|IFF_RUNNING))) { 841 (IFF_UP|IFF_RUNNING))) {
842 index++; 842 index++;
843 /* XXX: ar_hln? */ 843 /* XXX: ar_hln? */
844 if (ia->ia_ifp == rcvif && (ah->ar_hln >= 6) && 844 if (ia->ia_ifp == rcvif && (ah->ar_hln >= 6) &&
845 carp_iamatch(ia, ar_sha(ah), 845 carp_iamatch(ia, ar_sha(ah),
846 &count, index)) { 846 &count, index)) {
847 break; 847 break;
848 } 848 }
849 } else 849 } else
850#endif 850#endif
851 if (ia->ia_ifp == rcvif) 851 if (ia->ia_ifp == rcvif)
852 break; 852 break;
853#if NBRIDGE > 0 853#if NBRIDGE > 0
854 /* 854 /*
855 * If the interface we received the packet on 855 * If the interface we received the packet on
856 * is part of a bridge, check to see if we need 856 * is part of a bridge, check to see if we need
857 * to "bridge" the packet to ourselves at this 857 * to "bridge" the packet to ourselves at this
858 * layer. Note we still prefer a perfect match, 858 * layer. Note we still prefer a perfect match,
859 * but allow this weaker match if necessary. 859 * but allow this weaker match if necessary.
860 */ 860 */
861 if (rcvif->if_bridge != NULL && 861 if (rcvif->if_bridge != NULL &&
862 rcvif->if_bridge == ia->ia_ifp->if_bridge) 862 rcvif->if_bridge == ia->ia_ifp->if_bridge)
863 bridge_ia = ia; 863 bridge_ia = ia;
864#endif 864#endif
865 } 865 }
866 866
867#if NBRIDGE > 0 867#if NBRIDGE > 0
868 if (ia == NULL && bridge_ia != NULL) { 868 if (ia == NULL && bridge_ia != NULL) {
869 ia = bridge_ia; 869 ia = bridge_ia;
870 m_put_rcvif_psref(rcvif, &psref); 870 m_put_rcvif_psref(rcvif, &psref);
871 rcvif = NULL; 871 rcvif = NULL;
872 /* FIXME */ 872 /* FIXME */
873 ifp = bridge_ia->ia_ifp; 873 ifp = bridge_ia->ia_ifp;
874 } 874 }
875#endif 875#endif
876 876
877 /* If we failed to find the source address then find 877 /* If we failed to find the source address then find
878 * the target address. */ 878 * the target address. */
879 if (ia == NULL && find_source && !in_nullhost(itaddr)) { 879 if (ia == NULL && find_source && !in_nullhost(itaddr)) {
880 find_source = false; 880 find_source = false;
881 myaddr = itaddr; 881 myaddr = itaddr;
882 goto again; 882 goto again;
883 } 883 }
884 884
885 if (ia != NULL) 885 if (ia != NULL)
886 ia4_acquire(ia, &psref_ia); 886 ia4_acquire(ia, &psref_ia);
887 pserialize_read_exit(s); 887 pserialize_read_exit(s);
888 888
889 if (ah->ar_hln != ifp->if_addrlen) { 889 if (ah->ar_hln != ifp->if_addrlen) {
890 ARP_STATINC(ARP_STAT_RCVBADLEN); 890 ARP_STATINC(ARP_STAT_RCVBADLEN);
891 log(LOG_WARNING, 891 log(LOG_WARNING,
892 "arp from %s: addr len: new %d, i/f %d (ignored)\n", 892 "arp from %s: addr len: new %d, i/f %d (ignored)\n",
893 IN_PRINT(ipbuf, &isaddr), ah->ar_hln, ifp->if_addrlen); 893 IN_PRINT(ipbuf, &isaddr), ah->ar_hln, ifp->if_addrlen);
894 goto out; 894 goto out;
895 } 895 }
896 896
897 /* Only do DaD if we have a matching address. */ 897 /* Only do DaD if we have a matching address. */
898 do_dad = (ia != NULL); 898 do_dad = (ia != NULL);
899 899
900 if (ia == NULL) { 900 if (ia == NULL) {
901 ia = in_get_ia_on_iface_psref(isaddr, rcvif, &psref_ia); 901 ia = in_get_ia_on_iface_psref(isaddr, rcvif, &psref_ia);
902 if (ia == NULL) { 902 if (ia == NULL) {
903 ia = in_get_ia_from_ifp_psref(ifp, &psref_ia); 903 ia = in_get_ia_from_ifp_psref(ifp, &psref_ia);
904 if (ia == NULL) { 904 if (ia == NULL) {
905 ARP_STATINC(ARP_STAT_RCVNOINT); 905 ARP_STATINC(ARP_STAT_RCVNOINT);
906 goto out; 906 goto out;
907 } 907 }
908 } 908 }
909 } 909 }
910 910
911 myaddr = ia->ia_addr.sin_addr; 911 myaddr = ia->ia_addr.sin_addr;
912 912
913 /* XXX checks for bridge case? */ 913 /* XXX checks for bridge case? */
914 if (!memcmp(ar_sha(ah), ifp->if_broadcastaddr, ifp->if_addrlen)) { 914 if (!memcmp(ar_sha(ah), ifp->if_broadcastaddr, ifp->if_addrlen)) {
915 ARP_STATINC(ARP_STAT_RCVBCASTSHA); 915 ARP_STATINC(ARP_STAT_RCVBCASTSHA);
916 log(LOG_ERR, 916 log(LOG_ERR,
917 "%s: arp: link address is broadcast for IP address %s!\n", 917 "%s: arp: link address is broadcast for IP address %s!\n",
918 ifp->if_xname, IN_PRINT(ipbuf, &isaddr)); 918 ifp->if_xname, IN_PRINT(ipbuf, &isaddr));
919 goto out; 919 goto out;
920 } 920 }
921 921
922 /* 922 /*
923 * If the source IP address is zero, this is an RFC 5227 ARP probe 923 * If the source IP address is zero, this is an RFC 5227 ARP probe
924 */ 924 */
925 if (in_nullhost(isaddr)) 925 if (in_nullhost(isaddr))
926 ARP_STATINC(ARP_STAT_RCVZEROSPA); 926 ARP_STATINC(ARP_STAT_RCVZEROSPA);
927 else if (in_hosteq(isaddr, myaddr)) 927 else if (in_hosteq(isaddr, myaddr))
928 ARP_STATINC(ARP_STAT_RCVLOCALSPA); 928 ARP_STATINC(ARP_STAT_RCVLOCALSPA);
929 929
930 if (in_nullhost(itaddr)) 930 if (in_nullhost(itaddr))
931 ARP_STATINC(ARP_STAT_RCVZEROTPA); 931 ARP_STATINC(ARP_STAT_RCVZEROTPA);
932 932
933 /* 933 /*
934 * DAD check, RFC 5227. 934 * DAD check, RFC 5227.
 935 * ARP sender hardware address must match the interface
 936 * address of the interface sending the packet.
935 * Collision on sender address is always a duplicate. 937 * Collision on sender address is always a duplicate.
936 * Collision on target address is only a duplicate 938 * Collision on target address is only a duplicate
937 * IF the sender address is the null host (ie a DAD probe) 939 * IF the sender address is the null host (ie a DAD probe)
938 * AND the message was broadcast 940 * AND the message was broadcast
939 * AND our address is either tentative or duplicated 941 * AND our address is either tentative or duplicated
940 * If it was unicast then it's a valid Unicast Poll from RFC 1122. 942 * If it was unicast then it's a valid Unicast Poll from RFC 1122.
941 */ 943 */
942 if (do_dad && 944 if (do_dad &&
943 (in_hosteq(isaddr, myaddr) || 945 (in_hosteq(isaddr, myaddr) ||
944 (in_nullhost(isaddr) && in_hosteq(itaddr, myaddr) && 946 (in_nullhost(isaddr) && in_hosteq(itaddr, myaddr) &&
945 m->m_flags & M_BCAST && 947 m->m_flags & M_BCAST &&
946 ia->ia4_flags & (IN_IFF_TENTATIVE | IN_IFF_DUPLICATED)))) 948 ia->ia4_flags & (IN_IFF_TENTATIVE | IN_IFF_DUPLICATED))))
947 { 949 {
948 struct sockaddr_dl sdl, *sdlp; 950 struct m_tag *mtag;
949 951
950 sdlp = sockaddr_dl_init(&sdl, sizeof(sdl), 952 mtag = m_tag_find(m, PACKET_TAG_ETHERNET_SRC);
951 ifp->if_index, ifp->if_type, 953 if (mtag == NULL || (ah->ar_hln == ETHER_ADDR_LEN &&
952 NULL, 0, ar_sha(ah), ah->ar_hln); 954 memcmp(mtag + 1, ar_sha(ah), ah->ar_hln) == 0)) {
953 arp_dad_duplicated((struct ifaddr *)ia, sdlp); 955 struct sockaddr_dl sdl, *sdlp;
954 goto out; 956
 957 sdlp = sockaddr_dl_init(&sdl, sizeof(sdl),
 958 ifp->if_index, ifp->if_type,
 959 NULL, 0, ar_sha(ah), ah->ar_hln);
 960 arp_dad_duplicated((struct ifaddr *)ia, sdlp);
 961 goto out;
 962 }
955 } 963 }
956 964
957 /* 965 /*
958 * If the target IP address is zero, ignore the packet. 966 * If the target IP address is zero, ignore the packet.
959 * This prevents the code below from trying to answer 967 * This prevents the code below from trying to answer
960 * when we are using IP address zero (booting). 968 * when we are using IP address zero (booting).
961 */ 969 */
962 if (in_nullhost(itaddr)) 970 if (in_nullhost(itaddr))
963 goto out; 971 goto out;
964 972
965 if (in_nullhost(isaddr)) 973 if (in_nullhost(isaddr))
966 goto reply; 974 goto reply;
967 975
968 if (in_hosteq(itaddr, myaddr)) 976 if (in_hosteq(itaddr, myaddr))
969 la = arpcreate(ifp, &isaddr, NULL, 1); 977 la = arpcreate(ifp, &isaddr, NULL, 1);
970 else 978 else
971 la = arplookup(ifp, &isaddr, NULL, 1); 979 la = arplookup(ifp, &isaddr, NULL, 1);
972 if (la == NULL) 980 if (la == NULL)
973 goto reply; 981 goto reply;
974 982
975 if ((la->la_flags & LLE_VALID) && 983 if ((la->la_flags & LLE_VALID) &&
976 memcmp(ar_sha(ah), &la->ll_addr, ifp->if_addrlen)) 984 memcmp(ar_sha(ah), &la->ll_addr, ifp->if_addrlen))
977 { 985 {
978 char llabuf[LLA_ADDRSTRLEN], *llastr; 986 char llabuf[LLA_ADDRSTRLEN], *llastr;
979 987
980 llastr = lla_snprintf(llabuf, sizeof(llabuf), 988 llastr = lla_snprintf(llabuf, sizeof(llabuf),
981 ar_sha(ah), ah->ar_hln); 989 ar_sha(ah), ah->ar_hln);
982 990
983 if (la->la_flags & LLE_STATIC) { 991 if (la->la_flags & LLE_STATIC) {
984 ARP_STATINC(ARP_STAT_RCVOVERPERM); 992 ARP_STATINC(ARP_STAT_RCVOVERPERM);
985 if (!log_permanent_modify) 993 if (!log_permanent_modify)
986 goto out; 994 goto out;
987 log(LOG_INFO, 995 log(LOG_INFO,
988 "%s tried to overwrite permanent arp info" 996 "%s tried to overwrite permanent arp info"
989 " for %s\n", llastr, IN_PRINT(ipbuf, &isaddr)); 997 " for %s\n", llastr, IN_PRINT(ipbuf, &isaddr));
990 goto out; 998 goto out;
991 } else if (la->lle_tbl->llt_ifp != ifp) { 999 } else if (la->lle_tbl->llt_ifp != ifp) {
992 /* XXX should not happen? */ 1000 /* XXX should not happen? */
993 ARP_STATINC(ARP_STAT_RCVOVERINT); 1001 ARP_STATINC(ARP_STAT_RCVOVERINT);
994 if (!log_wrong_iface) 1002 if (!log_wrong_iface)
995 goto out; 1003 goto out;
996 log(LOG_INFO, 1004 log(LOG_INFO,
997 "%s on %s tried to overwrite " 1005 "%s on %s tried to overwrite "
998 "arp info for %s on %s\n", 1006 "arp info for %s on %s\n",
999 llastr, 1007 llastr,
1000 ifp->if_xname, IN_PRINT(ipbuf, &isaddr), 1008 ifp->if_xname, IN_PRINT(ipbuf, &isaddr),
1001 la->lle_tbl->llt_ifp->if_xname); 1009 la->lle_tbl->llt_ifp->if_xname);
1002 goto out; 1010 goto out;
1003 } else { 1011 } else {
1004 ARP_STATINC(ARP_STAT_RCVOVER); 1012 ARP_STATINC(ARP_STAT_RCVOVER);
1005 if (log_movements) 1013 if (log_movements)
1006 log(LOG_INFO, "arp info overwritten " 1014 log(LOG_INFO, "arp info overwritten "
1007 "for %s by %s\n", 1015 "for %s by %s\n",
1008 IN_PRINT(ipbuf, &isaddr), llastr); 1016 IN_PRINT(ipbuf, &isaddr), llastr);
1009 } 1017 }
1010 rt_cmd = RTM_CHANGE; 1018 rt_cmd = RTM_CHANGE;
1011 new_state = ND_LLINFO_STALE; 1019 new_state = ND_LLINFO_STALE;
1012 } else { 1020 } else {
1013 if (op == ARPOP_REPLY && in_hosteq(itaddr, myaddr)) { 1021 if (op == ARPOP_REPLY && in_hosteq(itaddr, myaddr)) {
1014 /* This was a solicited ARP reply. */ 1022 /* This was a solicited ARP reply. */
1015 la->ln_byhint = 0; 1023 la->ln_byhint = 0;
1016 new_state = ND_LLINFO_REACHABLE; 1024 new_state = ND_LLINFO_REACHABLE;
1017 } 1025 }
1018 rt_cmd = la->la_flags & LLE_VALID ? 0 : RTM_ADD; 1026 rt_cmd = la->la_flags & LLE_VALID ? 0 : RTM_ADD;
1019 } 1027 }
1020 1028
1021 KASSERT(ifp->if_sadl->sdl_alen == ifp->if_addrlen); 1029 KASSERT(ifp->if_sadl->sdl_alen == ifp->if_addrlen);
1022 1030
1023 KASSERT(sizeof(la->ll_addr) >= ifp->if_addrlen); 1031 KASSERT(sizeof(la->ll_addr) >= ifp->if_addrlen);
1024 memcpy(&la->ll_addr, ar_sha(ah), ifp->if_addrlen); 1032 memcpy(&la->ll_addr, ar_sha(ah), ifp->if_addrlen);
1025 la->la_flags |= LLE_VALID; 1033 la->la_flags |= LLE_VALID;
1026 la->ln_asked = 0; 1034 la->ln_asked = 0;
1027 if (new_state != 0) { 1035 if (new_state != 0) {
1028 la->ln_state = new_state; 1036 la->ln_state = new_state;
1029 1037
1030 if (new_state != ND_LLINFO_REACHABLE || 1038 if (new_state != ND_LLINFO_REACHABLE ||
1031 !(la->la_flags & LLE_STATIC)) 1039 !(la->la_flags & LLE_STATIC))
1032 { 1040 {
1033 int timer = ND_TIMER_GC; 1041 int timer = ND_TIMER_GC;
1034 1042
1035 if (new_state == ND_LLINFO_REACHABLE) 1043 if (new_state == ND_LLINFO_REACHABLE)
1036 timer = ND_TIMER_REACHABLE; 1044 timer = ND_TIMER_REACHABLE;
1037 nd_set_timer(la, timer); 1045 nd_set_timer(la, timer);
1038 } 1046 }
1039 } 1047 }
1040 1048
1041 if (rt_cmd != 0) { 1049 if (rt_cmd != 0) {
1042 struct sockaddr_in sin; 1050 struct sockaddr_in sin;
1043 1051
1044 sockaddr_in_init(&sin, &la->r_l3addr.addr4, 0); 1052 sockaddr_in_init(&sin, &la->r_l3addr.addr4, 0);
1045 rt_clonedmsg(rt_cmd, NULL, sintosa(&sin), ar_sha(ah), ifp); 1053 rt_clonedmsg(rt_cmd, NULL, sintosa(&sin), ar_sha(ah), ifp);
1046 } 1054 }
1047 1055
1048 if (la->la_hold != NULL) { 1056 if (la->la_hold != NULL) {
1049 int n = la->la_numheld; 1057 int n = la->la_numheld;
1050 struct mbuf *m_hold, *m_hold_next; 1058 struct mbuf *m_hold, *m_hold_next;
1051 struct sockaddr_in sin; 1059 struct sockaddr_in sin;
1052 1060
1053 sockaddr_in_init(&sin, &la->r_l3addr.addr4, 0); 1061 sockaddr_in_init(&sin, &la->r_l3addr.addr4, 0);
1054 1062
1055 m_hold = la->la_hold; 1063 m_hold = la->la_hold;
1056 la->la_hold = NULL; 1064 la->la_hold = NULL;
1057 la->la_numheld = 0; 1065 la->la_numheld = 0;
1058 /* 1066 /*
1059 * We have to unlock here because if_output would call 1067 * We have to unlock here because if_output would call
1060 * arpresolve 1068 * arpresolve
1061 */ 1069 */
1062 LLE_WUNLOCK(la); 1070 LLE_WUNLOCK(la);
1063 ARP_STATADD(ARP_STAT_DFRSENT, n); 1071 ARP_STATADD(ARP_STAT_DFRSENT, n);
1064 ARP_STATADD(ARP_STAT_DFRTOTAL, n); 1072 ARP_STATADD(ARP_STAT_DFRTOTAL, n);
1065 for (; m_hold != NULL; m_hold = m_hold_next) { 1073 for (; m_hold != NULL; m_hold = m_hold_next) {
1066 m_hold_next = m_hold->m_nextpkt; 1074 m_hold_next = m_hold->m_nextpkt;
1067 m_hold->m_nextpkt = NULL; 1075 m_hold->m_nextpkt = NULL;
1068 if_output_lock(ifp, ifp, m_hold, sintosa(&sin), NULL); 1076 if_output_lock(ifp, ifp, m_hold, sintosa(&sin), NULL);
1069 } 1077 }
1070 } else 1078 } else
1071 LLE_WUNLOCK(la); 1079 LLE_WUNLOCK(la);
1072 la = NULL; 1080 la = NULL;
1073 1081
1074reply: 1082reply:
1075 if (la != NULL) { 1083 if (la != NULL) {
1076 LLE_WUNLOCK(la); 1084 LLE_WUNLOCK(la);
1077 la = NULL; 1085 la = NULL;
1078 } 1086 }
1079 if (op != ARPOP_REQUEST) { 1087 if (op != ARPOP_REQUEST) {
1080 if (op == ARPOP_REPLY) 1088 if (op == ARPOP_REPLY)
1081 ARP_STATINC(ARP_STAT_RCVREPLY); 1089 ARP_STATINC(ARP_STAT_RCVREPLY);
1082 goto out; 1090 goto out;
1083 } 1091 }
1084 ARP_STATINC(ARP_STAT_RCVREQUEST); 1092 ARP_STATINC(ARP_STAT_RCVREQUEST);
1085 if (in_hosteq(itaddr, myaddr)) { 1093 if (in_hosteq(itaddr, myaddr)) {
1086 /* If our address is unusable, don't reply */ 1094 /* If our address is unusable, don't reply */
1087 if (ia->ia4_flags & (IN_IFF_NOTREADY | IN_IFF_DETACHED)) 1095 if (ia->ia4_flags & (IN_IFF_NOTREADY | IN_IFF_DETACHED))
1088 goto out; 1096 goto out;
1089 /* I am the target */ 1097 /* I am the target */
1090 tha = ar_tha(ah); 1098 tha = ar_tha(ah);
1091 if (tha) 1099 if (tha)
1092 memcpy(tha, ar_sha(ah), ah->ar_hln); 1100 memcpy(tha, ar_sha(ah), ah->ar_hln);
1093 memcpy(ar_sha(ah), CLLADDR(ifp->if_sadl), ah->ar_hln); 1101 memcpy(ar_sha(ah), CLLADDR(ifp->if_sadl), ah->ar_hln);
1094 } else { 1102 } else {
1095 /* Proxy ARP */ 1103 /* Proxy ARP */
1096 struct llentry *lle = NULL; 1104 struct llentry *lle = NULL;
1097 struct sockaddr_in sin; 1105 struct sockaddr_in sin;
1098 1106
1099#if NCARP > 0 1107#if NCARP > 0
1100 if (ifp->if_type == IFT_CARP) { 1108 if (ifp->if_type == IFT_CARP) {
1101 struct ifnet *_rcvif = m_get_rcvif(m, &s); 1109 struct ifnet *_rcvif = m_get_rcvif(m, &s);
1102 int iftype = 0; 1110 int iftype = 0;
1103 if (__predict_true(_rcvif != NULL)) 1111 if (__predict_true(_rcvif != NULL))
1104 iftype = _rcvif->if_type; 1112 iftype = _rcvif->if_type;
1105 m_put_rcvif(_rcvif, &s); 1113 m_put_rcvif(_rcvif, &s);
1106 if (iftype != IFT_CARP) 1114 if (iftype != IFT_CARP)
1107 goto out; 1115 goto out;
1108 } 1116 }
1109#endif 1117#endif
1110 1118
1111 tha = ar_tha(ah); 1119 tha = ar_tha(ah);
1112 1120
1113 sockaddr_in_init(&sin, &itaddr, 0); 1121 sockaddr_in_init(&sin, &itaddr, 0);
1114 1122
1115 IF_AFDATA_RLOCK(ifp); 1123 IF_AFDATA_RLOCK(ifp);
1116 lle = lla_lookup(LLTABLE(ifp), 0, (struct sockaddr *)&sin); 1124 lle = lla_lookup(LLTABLE(ifp), 0, (struct sockaddr *)&sin);
1117 IF_AFDATA_RUNLOCK(ifp); 1125 IF_AFDATA_RUNLOCK(ifp);
1118 1126
1119 if ((lle != NULL) && (lle->la_flags & LLE_PUB)) { 1127 if ((lle != NULL) && (lle->la_flags & LLE_PUB)) {
1120 if (tha) 1128 if (tha)
1121 memcpy(tha, ar_sha(ah), ah->ar_hln); 1129 memcpy(tha, ar_sha(ah), ah->ar_hln);
1122 memcpy(ar_sha(ah), &lle->ll_addr, ah->ar_hln); 1130 memcpy(ar_sha(ah), &lle->ll_addr, ah->ar_hln);
1123 LLE_RUNLOCK(lle); 1131 LLE_RUNLOCK(lle);
1124 } else { 1132 } else {
1125 if (lle != NULL) 1133 if (lle != NULL)
1126 LLE_RUNLOCK(lle); 1134 LLE_RUNLOCK(lle);
1127 goto out; 1135 goto out;
1128 } 1136 }
1129 } 1137 }
1130 ia4_release(ia, &psref_ia); 1138 ia4_release(ia, &psref_ia);
1131 1139
1132 /* 1140 /*
1133 * XXX XXX: Here we're recycling the mbuf. But the mbuf could have 1141 * XXX XXX: Here we're recycling the mbuf. But the mbuf could have
1134 * other mbufs in its chain, and just overwriting m->m_pkthdr.len 1142 * other mbufs in its chain, and just overwriting m->m_pkthdr.len
1135 * would be wrong in this case (the length becomes smaller than the 1143 * would be wrong in this case (the length becomes smaller than the
1136 * real chain size). 1144 * real chain size).
1137 * 1145 *
1138 * This can theoretically cause bugs in the lower layers (drivers, 1146 * This can theoretically cause bugs in the lower layers (drivers,
1139 * and L2encap), in some corner cases. 1147 * and L2encap), in some corner cases.
1140 */ 1148 */
1141 memcpy(ar_tpa(ah), ar_spa(ah), ah->ar_pln); 1149 memcpy(ar_tpa(ah), ar_spa(ah), ah->ar_pln);
1142 memcpy(ar_spa(ah), &itaddr, ah->ar_pln); 1150 memcpy(ar_spa(ah), &itaddr, ah->ar_pln);
1143 ah->ar_op = htons(ARPOP_REPLY); 1151 ah->ar_op = htons(ARPOP_REPLY);
1144 ah->ar_pro = htons(ETHERTYPE_IP); /* let's be sure! */ 1152 ah->ar_pro = htons(ETHERTYPE_IP); /* let's be sure! */
1145 switch (ifp->if_type) { 1153 switch (ifp->if_type) {
1146 case IFT_IEEE1394: 1154 case IFT_IEEE1394:
1147 /* ieee1394 arp reply is broadcast */ 1155 /* ieee1394 arp reply is broadcast */
1148 m->m_flags &= ~M_MCAST; 1156 m->m_flags &= ~M_MCAST;
1149 m->m_flags |= M_BCAST; 1157 m->m_flags |= M_BCAST;
1150 m->m_len = sizeof(*ah) + (2 * ah->ar_pln) + ah->ar_hln; 1158 m->m_len = sizeof(*ah) + (2 * ah->ar_pln) + ah->ar_hln;
1151 break; 1159 break;
1152 default: 1160 default:
1153 m->m_flags &= ~(M_BCAST|M_MCAST); /* never reply by broadcast */ 1161 m->m_flags &= ~(M_BCAST|M_MCAST); /* never reply by broadcast */
1154 m->m_len = sizeof(*ah) + (2 * ah->ar_pln) + (2 * ah->ar_hln); 1162 m->m_len = sizeof(*ah) + (2 * ah->ar_pln) + (2 * ah->ar_hln);
1155 break; 1163 break;
1156 } 1164 }
1157 m->m_pkthdr.len = m->m_len; 1165 m->m_pkthdr.len = m->m_len;
1158 sa.sa_family = AF_ARP; 1166 sa.sa_family = AF_ARP;
1159 sa.sa_len = 2; 1167 sa.sa_len = 2;
1160 arps = ARP_STAT_GETREF(); 1168 arps = ARP_STAT_GETREF();
1161 arps[ARP_STAT_SNDTOTAL]++; 1169 arps[ARP_STAT_SNDTOTAL]++;
1162 arps[ARP_STAT_SNDREPLY]++; 1170 arps[ARP_STAT_SNDREPLY]++;
1163 ARP_STAT_PUTREF(); 1171 ARP_STAT_PUTREF();
1164 if_output_lock(ifp, ifp, m, &sa, NULL); 1172 if_output_lock(ifp, ifp, m, &sa, NULL);
1165 if (rcvif != NULL) 1173 if (rcvif != NULL)
1166 m_put_rcvif_psref(rcvif, &psref); 1174 m_put_rcvif_psref(rcvif, &psref);
1167 return; 1175 return;
1168 1176
1169out: 1177out:
1170 if (la != NULL) 1178 if (la != NULL)
1171 LLE_WUNLOCK(la); 1179 LLE_WUNLOCK(la);
1172 if (ia != NULL) 1180 if (ia != NULL)
1173 ia4_release(ia, &psref_ia); 1181 ia4_release(ia, &psref_ia);
1174 if (rcvif != NULL) 1182 if (rcvif != NULL)
1175 m_put_rcvif_psref(rcvif, &psref); 1183 m_put_rcvif_psref(rcvif, &psref);
1176 m_freem(m); 1184 m_freem(m);
1177} 1185}
1178 1186
1179/* 1187/*
1180 * Lookup or a new address in arptab. 1188 * Lookup or a new address in arptab.
1181 */ 1189 */
1182struct llentry * 1190struct llentry *
1183arplookup(struct ifnet *ifp, const struct in_addr *addr, 1191arplookup(struct ifnet *ifp, const struct in_addr *addr,
1184 const struct sockaddr *sa, int wlock) 1192 const struct sockaddr *sa, int wlock)
1185{ 1193{
1186 struct sockaddr_in sin; 1194 struct sockaddr_in sin;
1187 struct llentry *la; 1195 struct llentry *la;
1188 int flags = wlock ? LLE_EXCLUSIVE : 0; 1196 int flags = wlock ? LLE_EXCLUSIVE : 0;
1189 1197
1190 if (sa == NULL) { 1198 if (sa == NULL) {
1191 KASSERT(addr != NULL); 1199 KASSERT(addr != NULL);
1192 sockaddr_in_init(&sin, addr, 0); 1200 sockaddr_in_init(&sin, addr, 0);
1193 sa = sintocsa(&sin); 1201 sa = sintocsa(&sin);
1194 } 1202 }
1195 1203
1196 IF_AFDATA_RLOCK(ifp); 1204 IF_AFDATA_RLOCK(ifp);
1197 la = lla_lookup(LLTABLE(ifp), flags, sa); 1205 la = lla_lookup(LLTABLE(ifp), flags, sa);
1198 IF_AFDATA_RUNLOCK(ifp); 1206 IF_AFDATA_RUNLOCK(ifp);
1199 1207
1200 return la; 1208 return la;
1201} 1209}
1202 1210
1203static struct llentry * 1211static struct llentry *
1204arpcreate(struct ifnet *ifp, const struct in_addr *addr, 1212arpcreate(struct ifnet *ifp, const struct in_addr *addr,
1205 const struct sockaddr *sa, int wlock) 1213 const struct sockaddr *sa, int wlock)
1206{ 1214{
1207 struct sockaddr_in sin; 1215 struct sockaddr_in sin;
1208 struct llentry *la; 1216 struct llentry *la;
1209 int flags = wlock ? LLE_EXCLUSIVE : 0; 1217 int flags = wlock ? LLE_EXCLUSIVE : 0;
1210 1218
1211 if (sa == NULL) { 1219 if (sa == NULL) {
1212 KASSERT(addr != NULL); 1220 KASSERT(addr != NULL);
1213 sockaddr_in_init(&sin, addr, 0); 1221 sockaddr_in_init(&sin, addr, 0);
1214 sa = sintocsa(&sin); 1222 sa = sintocsa(&sin);
1215 } 1223 }
1216 1224
1217 la = arplookup(ifp, addr, sa, wlock); 1225 la = arplookup(ifp, addr, sa, wlock);
1218 1226
1219 if (la == NULL) { 1227 if (la == NULL) {
1220 struct rtentry *rt; 1228 struct rtentry *rt;
1221 1229
1222 rt = rtalloc1(sa, 0); 1230 rt = rtalloc1(sa, 0);
1223 IF_AFDATA_WLOCK(ifp); 1231 IF_AFDATA_WLOCK(ifp);
1224 la = lla_create(LLTABLE(ifp), flags, sa, rt); 1232 la = lla_create(LLTABLE(ifp), flags, sa, rt);
1225 IF_AFDATA_WUNLOCK(ifp); 1233 IF_AFDATA_WUNLOCK(ifp);
1226 if (rt != NULL) 1234 if (rt != NULL)
1227 rt_unref(rt); 1235 rt_unref(rt);
1228 1236
1229 if (la != NULL) 1237 if (la != NULL)
1230 la->ln_state = ND_LLINFO_NOSTATE; 1238 la->ln_state = ND_LLINFO_NOSTATE;
1231 } 1239 }
1232 1240
1233 return la; 1241 return la;
1234} 1242}
1235 1243
1236int 1244int
1237arpioctl(u_long cmd, void *data) 1245arpioctl(u_long cmd, void *data)
1238{ 1246{
1239 1247
1240 return EOPNOTSUPP; 1248 return EOPNOTSUPP;
1241} 1249}
1242 1250
1243void 1251void
1244arp_ifinit(struct ifnet *ifp, struct ifaddr *ifa) 1252arp_ifinit(struct ifnet *ifp, struct ifaddr *ifa)
1245{ 1253{
1246 struct in_ifaddr *ia = (struct in_ifaddr *)ifa; 1254 struct in_ifaddr *ia = (struct in_ifaddr *)ifa;
1247 1255
1248 ifa->ifa_rtrequest = arp_rtrequest; 1256 ifa->ifa_rtrequest = arp_rtrequest;
1249 ifa->ifa_flags |= RTF_CONNECTED; 1257 ifa->ifa_flags |= RTF_CONNECTED;
1250 1258
1251 /* ARP will handle DAD for this address. */ 1259 /* ARP will handle DAD for this address. */
1252 if (in_nullhost(IA_SIN(ifa)->sin_addr)) { 1260 if (in_nullhost(IA_SIN(ifa)->sin_addr)) {
1253 if (ia->ia_dad_stop != NULL) /* safety */ 1261 if (ia->ia_dad_stop != NULL) /* safety */
1254 ia->ia_dad_stop(ifa); 1262 ia->ia_dad_stop(ifa);
1255 ia->ia_dad_start = NULL; 1263 ia->ia_dad_start = NULL;
1256 ia->ia_dad_stop = NULL; 1264 ia->ia_dad_stop = NULL;
1257 ia->ia4_flags &= ~IN_IFF_TENTATIVE; 1265 ia->ia4_flags &= ~IN_IFF_TENTATIVE;
1258 } else { 1266 } else {
1259 ia->ia_dad_start = arp_dad_start; 1267 ia->ia_dad_start = arp_dad_start;
1260 ia->ia_dad_stop = arp_dad_stop; 1268 ia->ia_dad_stop = arp_dad_stop;
1261 if (ia->ia4_flags & IN_IFF_TRYTENTATIVE && ip_dad_enabled()) 1269 if (ia->ia4_flags & IN_IFF_TRYTENTATIVE && ip_dad_enabled())
1262 ia->ia4_flags |= IN_IFF_TENTATIVE; 1270 ia->ia4_flags |= IN_IFF_TENTATIVE;
1263 else 1271 else
1264 arpannounce1(ifa); 1272 arpannounce1(ifa);
1265 } 1273 }
1266} 1274}
1267 1275
1268static bool 1276static bool
1269arp_nud_enabled(__unused struct ifnet *ifp) 1277arp_nud_enabled(__unused struct ifnet *ifp)
1270{ 1278{
1271 1279
1272 return arp_perform_nud != 0; 1280 return arp_perform_nud != 0;
1273} 1281}
1274 1282
1275static unsigned int 1283static unsigned int
1276arp_llinfo_reachable(__unused struct ifnet *ifp) 1284arp_llinfo_reachable(__unused struct ifnet *ifp)
1277{ 1285{
1278 1286
1279 return arp_reachable; 1287 return arp_reachable;
1280} 1288}
1281 1289
1282static unsigned int 1290static unsigned int
1283arp_llinfo_retrans(__unused struct ifnet *ifp) 1291arp_llinfo_retrans(__unused struct ifnet *ifp)
1284{ 1292{
1285 1293
1286 return arp_retrans; 1294 return arp_retrans;
1287} 1295}
1288 1296
1289/* 1297/*
1290 * Gets source address of the first packet in hold queue 1298 * Gets source address of the first packet in hold queue
1291 * and stores it in @src. 1299 * and stores it in @src.
1292 * Returns pointer to @src (if hold queue is not empty) or NULL. 1300 * Returns pointer to @src (if hold queue is not empty) or NULL.
1293 */ 1301 */
1294static union l3addr * 1302static union l3addr *
1295arp_llinfo_holdsrc(struct llentry *ln, union l3addr *src) 1303arp_llinfo_holdsrc(struct llentry *ln, union l3addr *src)
1296{ 1304{
1297 struct ip *ip; 1305 struct ip *ip;
1298 1306
1299 if (ln == NULL || ln->ln_hold == NULL) 1307 if (ln == NULL || ln->ln_hold == NULL)
1300 return NULL; 1308 return NULL;
1301 1309
1302 /* 1310 /*
1303 * assuming every packet in ln_hold has the same IP header 1311 * assuming every packet in ln_hold has the same IP header
1304 */ 1312 */
1305 ip = mtod(ln->ln_hold, struct ip *); 1313 ip = mtod(ln->ln_hold, struct ip *);
1306 /* XXX pullup? */ 1314 /* XXX pullup? */
1307 if (sizeof(*ip) < ln->ln_hold->m_len) 1315 if (sizeof(*ip) < ln->ln_hold->m_len)
1308 src->addr4 = ip->ip_src; 1316 src->addr4 = ip->ip_src;
1309 else 1317 else
1310 src = NULL; 1318 src = NULL;
1311 1319
1312 return src; 1320 return src;
1313} 1321}
1314 1322
1315static void 1323static void
1316arp_llinfo_output(struct ifnet *ifp, __unused const union l3addr *daddr, 1324arp_llinfo_output(struct ifnet *ifp, __unused const union l3addr *daddr,
1317 const union l3addr *taddr, const uint8_t *tlladdr, 1325 const union l3addr *taddr, const uint8_t *tlladdr,
1318 const union l3addr *hsrc) 1326 const union l3addr *hsrc)
1319{ 1327{
1320 struct in_addr tip = taddr->addr4, sip = zeroin_addr; 1328 struct in_addr tip = taddr->addr4, sip = zeroin_addr;
1321 const uint8_t *slladdr = CLLADDR(ifp->if_sadl); 1329 const uint8_t *slladdr = CLLADDR(ifp->if_sadl);
1322 1330
1323 if (hsrc != NULL) { 1331 if (hsrc != NULL) {
1324 struct in_ifaddr *ia; 1332 struct in_ifaddr *ia;
1325 struct psref psref; 1333 struct psref psref;
1326 1334
1327 ia = in_get_ia_on_iface_psref(hsrc->addr4, ifp, &psref); 1335 ia = in_get_ia_on_iface_psref(hsrc->addr4, ifp, &psref);
1328 if (ia != NULL) { 1336 if (ia != NULL) {
1329 sip = hsrc->addr4; 1337 sip = hsrc->addr4;
1330 ia4_release(ia, &psref); 1338 ia4_release(ia, &psref);
1331 } 1339 }
1332 } 1340 }
1333 1341
1334 if (sip.s_addr == INADDR_ANY) { 1342 if (sip.s_addr == INADDR_ANY) {
1335 struct sockaddr_in dst; 1343 struct sockaddr_in dst;
1336 struct rtentry *rt; 1344 struct rtentry *rt;
1337 1345
1338 sockaddr_in_init(&dst, &tip, 0); 1346 sockaddr_in_init(&dst, &tip, 0);
1339 rt = rtalloc1(sintosa(&dst), 0); 1347 rt = rtalloc1(sintosa(&dst), 0);
1340 if (rt != NULL) { 1348 if (rt != NULL) {
1341 if (rt->rt_ifp == ifp && 1349 if (rt->rt_ifp == ifp &&
1342 rt->rt_ifa != NULL && 1350 rt->rt_ifa != NULL &&
1343 rt->rt_ifa->ifa_addr->sa_family == AF_INET) 1351 rt->rt_ifa->ifa_addr->sa_family == AF_INET)
1344 sip = satosin(rt->rt_ifa->ifa_addr)->sin_addr; 1352 sip = satosin(rt->rt_ifa->ifa_addr)->sin_addr;
1345 rt_unref(rt); 1353 rt_unref(rt);
1346 } 1354 }
1347 if (sip.s_addr == INADDR_ANY) { 1355 if (sip.s_addr == INADDR_ANY) {
1348 char ipbuf[INET_ADDRSTRLEN]; 1356 char ipbuf[INET_ADDRSTRLEN];
1349 1357
1350 log(LOG_DEBUG, "source can't be " 1358 log(LOG_DEBUG, "source can't be "
1351 "determined: dst=%s\n", 1359 "determined: dst=%s\n",
1352 IN_PRINT(ipbuf, &tip)); 1360 IN_PRINT(ipbuf, &tip));
1353 return; 1361 return;
1354 } 1362 }
1355 } 1363 }
1356 1364
1357 arprequest(ifp, &sip, &tip, slladdr, tlladdr); 1365 arprequest(ifp, &sip, &tip, slladdr, tlladdr);
1358} 1366}
1359 1367
1360 1368
1361static void 1369static void
1362arp_llinfo_missed(struct ifnet *ifp, const union l3addr *taddr, 1370arp_llinfo_missed(struct ifnet *ifp, const union l3addr *taddr,
1363 __unused int16_t type, struct mbuf *m) 1371 __unused int16_t type, struct mbuf *m)
1364{ 1372{
1365 struct in_addr mdaddr = zeroin_addr; 1373 struct in_addr mdaddr = zeroin_addr;
1366 struct sockaddr_in dsin, tsin; 1374 struct sockaddr_in dsin, tsin;
1367 struct sockaddr *sa; 1375 struct sockaddr *sa;
1368 1376
1369 if (m != NULL) { 1377 if (m != NULL) {
1370 struct ip *ip = mtod(m, struct ip *); 1378 struct ip *ip = mtod(m, struct ip *);
1371 1379
1372 if (sizeof(*ip) < m->m_len) 1380 if (sizeof(*ip) < m->m_len)
1373 mdaddr = ip->ip_src; 1381 mdaddr = ip->ip_src;
1374 1382
1375 /* ip_input() will send ICMP_UNREACH_HOST, not us. */ 1383 /* ip_input() will send ICMP_UNREACH_HOST, not us. */
1376 m_freem(m); 1384 m_freem(m);
1377 } 1385 }
1378 1386
1379 if (mdaddr.s_addr != INADDR_ANY) { 1387 if (mdaddr.s_addr != INADDR_ANY) {
1380 sockaddr_in_init(&dsin, &mdaddr, 0); 1388 sockaddr_in_init(&dsin, &mdaddr, 0);
1381 sa = sintosa(&dsin); 1389 sa = sintosa(&dsin);
1382 } else 1390 } else
1383 sa = NULL; 1391 sa = NULL;
1384 1392
1385 sockaddr_in_init(&tsin, &taddr->addr4, 0); 1393 sockaddr_in_init(&tsin, &taddr->addr4, 0);
1386 rt_clonedmsg(RTM_MISS, sa, sintosa(&tsin), NULL, ifp); 1394 rt_clonedmsg(RTM_MISS, sa, sintosa(&tsin), NULL, ifp);
1387} 1395}
1388 1396
1389static void 1397static void
1390arp_free(struct llentry *ln, int gc) 1398arp_free(struct llentry *ln, int gc)
1391{ 1399{
1392 struct ifnet *ifp; 1400 struct ifnet *ifp;
1393 1401
1394 KASSERT(ln != NULL); 1402 KASSERT(ln != NULL);
1395 LLE_WLOCK_ASSERT(ln); 1403 LLE_WLOCK_ASSERT(ln);
1396 1404
1397 ifp = ln->lle_tbl->llt_ifp; 1405 ifp = ln->lle_tbl->llt_ifp;
1398 1406
1399 if (ln->la_flags & LLE_VALID || gc) { 1407 if (ln->la_flags & LLE_VALID || gc) {
1400 struct sockaddr_in sin; 1408 struct sockaddr_in sin;
1401 const char *lladdr; 1409 const char *lladdr;
1402 1410
1403 sockaddr_in_init(&sin, &ln->r_l3addr.addr4, 0); 1411 sockaddr_in_init(&sin, &ln->r_l3addr.addr4, 0);
1404 lladdr = ln->la_flags & LLE_VALID ? 1412 lladdr = ln->la_flags & LLE_VALID ?
1405 (const char *)&ln->ll_addr : NULL; 1413 (const char *)&ln->ll_addr : NULL;
1406 rt_clonedmsg(RTM_DELETE, NULL, sintosa(&sin), lladdr, ifp); 1414 rt_clonedmsg(RTM_DELETE, NULL, sintosa(&sin), lladdr, ifp);
1407 } 1415 }
1408 1416
1409 /* 1417 /*
1410 * Save to unlock. We still hold an extra reference and will not 1418 * Save to unlock. We still hold an extra reference and will not
1411 * free(9) in llentry_free() if someone else holds one as well. 1419 * free(9) in llentry_free() if someone else holds one as well.
1412 */ 1420 */
1413 LLE_WUNLOCK(ln); 1421 LLE_WUNLOCK(ln);
1414 IF_AFDATA_LOCK(ifp); 1422 IF_AFDATA_LOCK(ifp);
1415 LLE_WLOCK(ln); 1423 LLE_WLOCK(ln);
1416 1424
1417 lltable_free_entry(LLTABLE(ifp), ln); 1425 lltable_free_entry(LLTABLE(ifp), ln);
1418 1426
1419 IF_AFDATA_UNLOCK(ifp); 1427 IF_AFDATA_UNLOCK(ifp);
1420} 1428}
1421 1429
1422/* 1430/*
1423 * Upper-layer reachability hint for Neighbor Unreachability Detection. 1431 * Upper-layer reachability hint for Neighbor Unreachability Detection.
1424 * 1432 *
1425 * XXX cost-effective methods? 1433 * XXX cost-effective methods?
1426 */ 1434 */
1427void 1435void
1428arp_nud_hint(struct rtentry *rt) 1436arp_nud_hint(struct rtentry *rt)
1429{ 1437{
1430 struct llentry *ln; 1438 struct llentry *ln;
1431 struct ifnet *ifp; 1439 struct ifnet *ifp;
1432 1440
1433 if (rt == NULL) 1441 if (rt == NULL)
1434 return; 1442 return;
1435 1443
1436 ifp = rt->rt_ifp; 1444 ifp = rt->rt_ifp;
1437 ln = arplookup(ifp, NULL, rt_getkey(rt), 1); 1445 ln = arplookup(ifp, NULL, rt_getkey(rt), 1);
1438 nd_nud_hint(ln); 1446 nd_nud_hint(ln);
1439} 1447}
1440 1448
1441TAILQ_HEAD(dadq_head, dadq); 1449TAILQ_HEAD(dadq_head, dadq);
1442struct dadq { 1450struct dadq {
1443 TAILQ_ENTRY(dadq) dad_list; 1451 TAILQ_ENTRY(dadq) dad_list;
1444 struct ifaddr *dad_ifa; 1452 struct ifaddr *dad_ifa;
1445 int dad_count; /* max ARP to send */ 1453 int dad_count; /* max ARP to send */
1446 int dad_arp_tcount; /* # of trials to send ARP */ 1454 int dad_arp_tcount; /* # of trials to send ARP */
1447 int dad_arp_ocount; /* ARP sent so far */ 1455 int dad_arp_ocount; /* ARP sent so far */
1448 int dad_arp_announce; /* max ARP announcements */ 1456 int dad_arp_announce; /* max ARP announcements */
1449 int dad_arp_acount; /* # of announcements */ 1457 int dad_arp_acount; /* # of announcements */
1450 struct callout dad_timer_ch; 1458 struct callout dad_timer_ch;
1451}; 1459};
1452 1460
1453static struct dadq_head dadq; 1461static struct dadq_head dadq;
1454static int dad_maxtry = 15; /* max # of *tries* to transmit DAD packet */ 1462static int dad_maxtry = 15; /* max # of *tries* to transmit DAD packet */
1455static kmutex_t arp_dad_lock; 1463static kmutex_t arp_dad_lock;
1456 1464
1457static void 1465static void
1458arp_dad_init(void) 1466arp_dad_init(void)
1459{ 1467{
1460 1468
1461 TAILQ_INIT(&dadq); 1469 TAILQ_INIT(&dadq);
1462 mutex_init(&arp_dad_lock, MUTEX_DEFAULT, IPL_NONE); 1470 mutex_init(&arp_dad_lock, MUTEX_DEFAULT, IPL_NONE);
1463} 1471}
1464 1472
1465static struct dadq * 1473static struct dadq *
1466arp_dad_find(struct ifaddr *ifa) 1474arp_dad_find(struct ifaddr *ifa)
1467{ 1475{
1468 struct dadq *dp; 1476 struct dadq *dp;
1469 1477
1470 KASSERT(mutex_owned(&arp_dad_lock)); 1478 KASSERT(mutex_owned(&arp_dad_lock));
1471 1479
1472 TAILQ_FOREACH(dp, &dadq, dad_list) { 1480 TAILQ_FOREACH(dp, &dadq, dad_list) {
1473 if (dp->dad_ifa == ifa) 1481 if (dp->dad_ifa == ifa)
1474 return dp; 1482 return dp;
1475 } 1483 }
1476 return NULL; 1484 return NULL;
1477} 1485}
1478 1486
1479static void 1487static void
1480arp_dad_starttimer(struct dadq *dp, int ticks) 1488arp_dad_starttimer(struct dadq *dp, int ticks)
1481{ 1489{
1482 1490
1483 callout_reset(&dp->dad_timer_ch, ticks, 1491 callout_reset(&dp->dad_timer_ch, ticks,
1484 (void (*)(void *))arp_dad_timer, dp); 1492 (void (*)(void *))arp_dad_timer, dp);
1485} 1493}
1486 1494
1487static void 1495static void
1488arp_dad_stoptimer(struct dadq *dp) 1496arp_dad_stoptimer(struct dadq *dp)
1489{ 1497{
1490 1498
1491 KASSERT(mutex_owned(&arp_dad_lock)); 1499 KASSERT(mutex_owned(&arp_dad_lock));
1492 1500
1493 TAILQ_REMOVE(&dadq, dp, dad_list); 1501 TAILQ_REMOVE(&dadq, dp, dad_list);
1494 /* Tell the timer that dp is being destroyed. */ 1502 /* Tell the timer that dp is being destroyed. */
1495 dp->dad_ifa = NULL; 1503 dp->dad_ifa = NULL;
1496 callout_halt(&dp->dad_timer_ch, &arp_dad_lock); 1504 callout_halt(&dp->dad_timer_ch, &arp_dad_lock);
1497} 1505}
1498 1506
1499static void 1507static void
1500arp_dad_destroytimer(struct dadq *dp) 1508arp_dad_destroytimer(struct dadq *dp)
1501{ 1509{
1502 1510
1503 callout_destroy(&dp->dad_timer_ch); 1511 callout_destroy(&dp->dad_timer_ch);
1504 KASSERT(dp->dad_ifa == NULL); 1512 KASSERT(dp->dad_ifa == NULL);
1505 kmem_intr_free(dp, sizeof(*dp)); 1513 kmem_intr_free(dp, sizeof(*dp));
1506} 1514}
1507 1515
1508static void 1516static void
1509arp_dad_output(struct dadq *dp, struct ifaddr *ifa) 1517arp_dad_output(struct dadq *dp, struct ifaddr *ifa)
1510{ 1518{
1511 struct in_ifaddr *ia = (struct in_ifaddr *)ifa; 1519 struct in_ifaddr *ia = (struct in_ifaddr *)ifa;
1512 struct ifnet *ifp = ifa->ifa_ifp; 1520 struct ifnet *ifp = ifa->ifa_ifp;
1513 struct in_addr sip; 1521 struct in_addr sip;
1514 1522
1515 dp->dad_arp_tcount++; 1523 dp->dad_arp_tcount++;
1516 if ((ifp->if_flags & IFF_UP) == 0) 1524 if ((ifp->if_flags & IFF_UP) == 0)
1517 return; 1525 return;
1518 if ((ifp->if_flags & IFF_RUNNING) == 0) 1526 if ((ifp->if_flags & IFF_RUNNING) == 0)
1519 return; 1527 return;
1520 1528
1521 dp->dad_arp_tcount = 0; 1529 dp->dad_arp_tcount = 0;
1522 dp->dad_arp_ocount++; 1530 dp->dad_arp_ocount++;
1523 1531
1524 memset(&sip, 0, sizeof(sip)); 1532 memset(&sip, 0, sizeof(sip));
1525 arprequest(ifa->ifa_ifp, &sip, &ia->ia_addr.sin_addr, 1533 arprequest(ifa->ifa_ifp, &sip, &ia->ia_addr.sin_addr,
1526 CLLADDR(ifa->ifa_ifp->if_sadl), NULL); 1534 CLLADDR(ifa->ifa_ifp->if_sadl), NULL);
1527} 1535}
1528 1536
1529/* 1537/*
1530 * Start Duplicate Address Detection (DAD) for specified interface address. 1538 * Start Duplicate Address Detection (DAD) for specified interface address.
1531 */ 1539 */
1532static void 1540static void
1533arp_dad_start(struct ifaddr *ifa) 1541arp_dad_start(struct ifaddr *ifa)
1534{ 1542{
1535 struct in_ifaddr *ia = (struct in_ifaddr *)ifa; 1543 struct in_ifaddr *ia = (struct in_ifaddr *)ifa;
1536 struct dadq *dp; 1544 struct dadq *dp;
1537 char ipbuf[INET_ADDRSTRLEN]; 1545 char ipbuf[INET_ADDRSTRLEN];
1538 1546
1539 /* 1547 /*
1540 * If we don't need DAD, don't do it. 1548 * If we don't need DAD, don't do it.
1541 * - DAD is disabled 1549 * - DAD is disabled
1542 */ 1550 */
1543 if (!(ia->ia4_flags & IN_IFF_TENTATIVE)) { 1551 if (!(ia->ia4_flags & IN_IFF_TENTATIVE)) {
1544 log(LOG_DEBUG, 1552 log(LOG_DEBUG,
1545 "%s: called with non-tentative address %s(%s)\n", __func__, 1553 "%s: called with non-tentative address %s(%s)\n", __func__,
1546 IN_PRINT(ipbuf, &ia->ia_addr.sin_addr), 1554 IN_PRINT(ipbuf, &ia->ia_addr.sin_addr),
1547 ifa->ifa_ifp ? if_name(ifa->ifa_ifp) : "???"); 1555 ifa->ifa_ifp ? if_name(ifa->ifa_ifp) : "???");
1548 return; 1556 return;
1549 } 1557 }
1550 if (!ip_dad_enabled()) { 1558 if (!ip_dad_enabled()) {
1551 ia->ia4_flags &= ~IN_IFF_TENTATIVE; 1559 ia->ia4_flags &= ~IN_IFF_TENTATIVE;
1552 rt_addrmsg(RTM_NEWADDR, ifa); 1560 rt_addrmsg(RTM_NEWADDR, ifa);
1553 arpannounce1(ifa); 1561 arpannounce1(ifa);
1554 return; 1562 return;
1555 } 1563 }
1556 KASSERT(ifa->ifa_ifp != NULL); 1564 KASSERT(ifa->ifa_ifp != NULL);
1557 if (!(ifa->ifa_ifp->if_flags & IFF_UP)) 1565 if (!(ifa->ifa_ifp->if_flags & IFF_UP))
1558 return; 1566 return;
1559 1567
1560 dp = kmem_intr_alloc(sizeof(*dp), KM_NOSLEEP); 1568 dp = kmem_intr_alloc(sizeof(*dp), KM_NOSLEEP);
1561 1569
1562 mutex_enter(&arp_dad_lock); 1570 mutex_enter(&arp_dad_lock);
1563 if (arp_dad_find(ifa) != NULL) { 1571 if (arp_dad_find(ifa) != NULL) {
1564 mutex_exit(&arp_dad_lock); 1572 mutex_exit(&arp_dad_lock);
1565 /* DAD already in progress */ 1573 /* DAD already in progress */
1566 if (dp != NULL) 1574 if (dp != NULL)
1567 kmem_intr_free(dp, sizeof(*dp)); 1575 kmem_intr_free(dp, sizeof(*dp));
1568 return; 1576 return;
1569 } 1577 }
1570 1578
1571 if (dp == NULL) { 1579 if (dp == NULL) {
1572 mutex_exit(&arp_dad_lock); 1580 mutex_exit(&arp_dad_lock);
1573 log(LOG_ERR, "%s: memory allocation failed for %s(%s)\n", 1581 log(LOG_ERR, "%s: memory allocation failed for %s(%s)\n",
1574 __func__, IN_PRINT(ipbuf, &ia->ia_addr.sin_addr), 1582 __func__, IN_PRINT(ipbuf, &ia->ia_addr.sin_addr),
1575 ifa->ifa_ifp ? if_name(ifa->ifa_ifp) : "???"); 1583 ifa->ifa_ifp ? if_name(ifa->ifa_ifp) : "???");
1576 return; 1584 return;
1577 } 1585 }
1578 1586
1579 /* 1587 /*
1580 * Send ARP packet for DAD, ip_dad_count times. 1588 * Send ARP packet for DAD, ip_dad_count times.
1581 * Note that we must delay the first transmission. 1589 * Note that we must delay the first transmission.
1582 */ 1590 */
1583 callout_init(&dp->dad_timer_ch, CALLOUT_MPSAFE); 1591 callout_init(&dp->dad_timer_ch, CALLOUT_MPSAFE);
1584 dp->dad_ifa = ifa; 1592 dp->dad_ifa = ifa;
1585 ifaref(ifa); /* just for safety */ 1593 ifaref(ifa); /* just for safety */
1586 dp->dad_count = ip_dad_count; 1594 dp->dad_count = ip_dad_count;
1587 dp->dad_arp_announce = 0; /* Will be set when starting to announce */ 1595 dp->dad_arp_announce = 0; /* Will be set when starting to announce */
1588 dp->dad_arp_acount = dp->dad_arp_ocount = dp->dad_arp_tcount = 0; 1596 dp->dad_arp_acount = dp->dad_arp_ocount = dp->dad_arp_tcount = 0;
1589 TAILQ_INSERT_TAIL(&dadq, (struct dadq *)dp, dad_list); 1597 TAILQ_INSERT_TAIL(&dadq, (struct dadq *)dp, dad_list);
1590 1598
1591 ARPLOG(LOG_DEBUG, "%s: starting DAD for %s\n", if_name(ifa->ifa_ifp), 1599 ARPLOG(LOG_DEBUG, "%s: starting DAD for %s\n", if_name(ifa->ifa_ifp),
1592 ARPLOGADDR(&ia->ia_addr.sin_addr)); 1600 ARPLOGADDR(&ia->ia_addr.sin_addr));
1593 1601
1594 arp_dad_starttimer(dp, cprng_fast32() % (PROBE_WAIT * hz)); 1602 arp_dad_starttimer(dp, cprng_fast32() % (PROBE_WAIT * hz));
1595 1603
1596 mutex_exit(&arp_dad_lock); 1604 mutex_exit(&arp_dad_lock);
1597} 1605}
1598 1606
1599/* 1607/*
1600 * terminate DAD unconditionally. used for address removals. 1608 * terminate DAD unconditionally. used for address removals.
1601 */ 1609 */
1602static void 1610static void
1603arp_dad_stop(struct ifaddr *ifa) 1611arp_dad_stop(struct ifaddr *ifa)
1604{ 1612{
1605 struct dadq *dp; 1613 struct dadq *dp;
1606 1614
1607 mutex_enter(&arp_dad_lock); 1615 mutex_enter(&arp_dad_lock);
1608 dp = arp_dad_find(ifa); 1616 dp = arp_dad_find(ifa);
1609 if (dp == NULL) { 1617 if (dp == NULL) {
1610 mutex_exit(&arp_dad_lock); 1618 mutex_exit(&arp_dad_lock);
1611 /* DAD wasn't started yet */ 1619 /* DAD wasn't started yet */
1612 return; 1620 return;
1613 } 1621 }
1614 1622
1615 arp_dad_stoptimer(dp); 1623 arp_dad_stoptimer(dp);
1616 1624
1617 mutex_exit(&arp_dad_lock); 1625 mutex_exit(&arp_dad_lock);
1618 1626
1619 arp_dad_destroytimer(dp); 1627 arp_dad_destroytimer(dp);
1620 ifafree(ifa); 1628 ifafree(ifa);
1621} 1629}
1622 1630
1623static void 1631static void
1624arp_dad_timer(struct dadq *dp) 1632arp_dad_timer(struct dadq *dp)
1625{ 1633{
1626 struct ifaddr *ifa; 1634 struct ifaddr *ifa;
1627 struct in_ifaddr *ia; 1635 struct in_ifaddr *ia;
1628 char ipbuf[INET_ADDRSTRLEN]; 1636 char ipbuf[INET_ADDRSTRLEN];
1629 bool need_free = false; 1637 bool need_free = false;
1630 1638
1631 KERNEL_LOCK_UNLESS_NET_MPSAFE(); 1639 KERNEL_LOCK_UNLESS_NET_MPSAFE();
1632 mutex_enter(&arp_dad_lock); 1640 mutex_enter(&arp_dad_lock);
1633 1641
1634 ifa = dp->dad_ifa; 1642 ifa = dp->dad_ifa;
1635 if (ifa == NULL) { 1643 if (ifa == NULL) {
1636 /* dp is being destroyed by someone. Do nothing. */ 1644 /* dp is being destroyed by someone. Do nothing. */
1637 goto done; 1645 goto done;
1638 } 1646 }
1639 1647
1640 ia = (struct in_ifaddr *)ifa; 1648 ia = (struct in_ifaddr *)ifa;
1641 if (ia->ia4_flags & IN_IFF_DUPLICATED) { 1649 if (ia->ia4_flags & IN_IFF_DUPLICATED) {
1642 log(LOG_ERR, "%s: called with duplicate address %s(%s)\n", 1650 log(LOG_ERR, "%s: called with duplicate address %s(%s)\n",
1643 __func__, IN_PRINT(ipbuf, &ia->ia_addr.sin_addr), 1651 __func__, IN_PRINT(ipbuf, &ia->ia_addr.sin_addr),
1644 ifa->ifa_ifp ? if_name(ifa->ifa_ifp) : "???"); 1652 ifa->ifa_ifp ? if_name(ifa->ifa_ifp) : "???");
1645 goto done; 1653 goto done;
1646 } 1654 }
1647 if ((ia->ia4_flags & IN_IFF_TENTATIVE) == 0 && dp->dad_arp_acount == 0) 1655 if ((ia->ia4_flags & IN_IFF_TENTATIVE) == 0 && dp->dad_arp_acount == 0)
1648 { 1656 {
1649 log(LOG_ERR, "%s: called with non-tentative address %s(%s)\n", 1657 log(LOG_ERR, "%s: called with non-tentative address %s(%s)\n",
1650 __func__, IN_PRINT(ipbuf, &ia->ia_addr.sin_addr), 1658 __func__, IN_PRINT(ipbuf, &ia->ia_addr.sin_addr),
1651 ifa->ifa_ifp ? if_name(ifa->ifa_ifp) : "???"); 1659 ifa->ifa_ifp ? if_name(ifa->ifa_ifp) : "???");
1652 goto done; 1660 goto done;
1653 } 1661 }
1654 1662
1655 /* timeouted with IFF_{RUNNING,UP} check */ 1663 /* timeouted with IFF_{RUNNING,UP} check */
1656 if (dp->dad_arp_tcount > dad_maxtry) { 1664 if (dp->dad_arp_tcount > dad_maxtry) {
1657 ARPLOG(LOG_INFO, "%s: could not run DAD, driver problem?\n", 1665 ARPLOG(LOG_INFO, "%s: could not run DAD, driver problem?\n",
1658 if_name(ifa->ifa_ifp)); 1666 if_name(ifa->ifa_ifp));
1659 1667
1660 arp_dad_stoptimer(dp); 1668 arp_dad_stoptimer(dp);
1661 need_free = true; 1669 need_free = true;
1662 goto done; 1670 goto done;
1663 } 1671 }
1664 1672
1665 /* Need more checks? */ 1673 /* Need more checks? */
1666 if (dp->dad_arp_ocount < dp->dad_count) { 1674 if (dp->dad_arp_ocount < dp->dad_count) {
1667 int adelay; 1675 int adelay;
1668 1676
1669 /* 1677 /*
1670 * We have more ARP to go. Send ARP packet for DAD. 1678 * We have more ARP to go. Send ARP packet for DAD.
1671 */ 1679 */
1672 arp_dad_output(dp, ifa); 1680 arp_dad_output(dp, ifa);
1673 if (dp->dad_arp_ocount < dp->dad_count) 1681 if (dp->dad_arp_ocount < dp->dad_count)
1674 adelay = (PROBE_MIN * hz) + 1682 adelay = (PROBE_MIN * hz) +
1675 (cprng_fast32() % 1683 (cprng_fast32() %
1676 ((PROBE_MAX * hz) - (PROBE_MIN * hz))); 1684 ((PROBE_MAX * hz) - (PROBE_MIN * hz)));
1677 else 1685 else
1678 adelay = ANNOUNCE_WAIT * hz; 1686 adelay = ANNOUNCE_WAIT * hz;
1679 arp_dad_starttimer(dp, adelay); 1687 arp_dad_starttimer(dp, adelay);
1680 goto done; 1688 goto done;
1681 } else if (dp->dad_arp_acount == 0) { 1689 } else if (dp->dad_arp_acount == 0) {
1682 /* 1690 /*
1683 * We are done with DAD. 1691 * We are done with DAD.
1684 * No duplicate address found. 1692 * No duplicate address found.
1685 */ 1693 */
1686 ia->ia4_flags &= ~IN_IFF_TENTATIVE; 1694 ia->ia4_flags &= ~IN_IFF_TENTATIVE;
1687 rt_addrmsg(RTM_NEWADDR, ifa); 1695 rt_addrmsg(RTM_NEWADDR, ifa);
1688 ARPLOG(LOG_DEBUG, 1696 ARPLOG(LOG_DEBUG,
1689 "%s: DAD complete for %s - no duplicates found\n", 1697 "%s: DAD complete for %s - no duplicates found\n",
1690 if_name(ifa->ifa_ifp), ARPLOGADDR(&ia->ia_addr.sin_addr)); 1698 if_name(ifa->ifa_ifp), ARPLOGADDR(&ia->ia_addr.sin_addr));
1691 dp->dad_arp_announce = ANNOUNCE_NUM; 1699 dp->dad_arp_announce = ANNOUNCE_NUM;
1692 goto announce; 1700 goto announce;
1693 } else if (dp->dad_arp_acount < dp->dad_arp_announce) { 1701 } else if (dp->dad_arp_acount < dp->dad_arp_announce) {
1694announce: 1702announce:
1695 /* 1703 /*
1696 * Announce the address. 1704 * Announce the address.
1697 */ 1705 */
1698 arpannounce1(ifa); 1706 arpannounce1(ifa);
1699 dp->dad_arp_acount++; 1707 dp->dad_arp_acount++;
1700 if (dp->dad_arp_acount < dp->dad_arp_announce) { 1708 if (dp->dad_arp_acount < dp->dad_arp_announce) {
1701 arp_dad_starttimer(dp, ANNOUNCE_INTERVAL * hz); 1709 arp_dad_starttimer(dp, ANNOUNCE_INTERVAL * hz);
1702 goto done; 1710 goto done;
1703 } 1711 }
1704 ARPLOG(LOG_DEBUG, 1712 ARPLOG(LOG_DEBUG,
1705 "%s: ARP announcement complete for %s\n", 1713 "%s: ARP announcement complete for %s\n",
1706 if_name(ifa->ifa_ifp), ARPLOGADDR(&ia->ia_addr.sin_addr)); 1714 if_name(ifa->ifa_ifp), ARPLOGADDR(&ia->ia_addr.sin_addr));
1707 } 1715 }
1708 1716
1709 arp_dad_stoptimer(dp); 1717 arp_dad_stoptimer(dp);
1710 need_free = true; 1718 need_free = true;
1711done: 1719done:
1712 mutex_exit(&arp_dad_lock); 1720 mutex_exit(&arp_dad_lock);
1713 1721
1714 if (need_free) { 1722 if (need_free) {
1715 arp_dad_destroytimer(dp); 1723 arp_dad_destroytimer(dp);
1716 KASSERT(ifa != NULL); 1724 KASSERT(ifa != NULL);
1717 ifafree(ifa); 1725 ifafree(ifa);
1718 } 1726 }
1719 1727
1720 KERNEL_UNLOCK_UNLESS_NET_MPSAFE(); 1728 KERNEL_UNLOCK_UNLESS_NET_MPSAFE();
1721} 1729}
1722 1730
1723static void 1731static void
1724arp_dad_duplicated(struct ifaddr *ifa, const struct sockaddr_dl *from) 1732arp_dad_duplicated(struct ifaddr *ifa, const struct sockaddr_dl *from)
1725{ 1733{
1726 struct in_ifaddr *ia = ifatoia(ifa); 1734 struct in_ifaddr *ia = ifatoia(ifa);
1727 struct ifnet *ifp = ifa->ifa_ifp; 1735 struct ifnet *ifp = ifa->ifa_ifp;
1728 char ipbuf[INET_ADDRSTRLEN], llabuf[LLA_ADDRSTRLEN]; 1736 char ipbuf[INET_ADDRSTRLEN], llabuf[LLA_ADDRSTRLEN];
1729 const char *iastr, *llastr; 1737 const char *iastr, *llastr;
1730 1738
1731 iastr = IN_PRINT(ipbuf, &ia->ia_addr.sin_addr); 1739 iastr = IN_PRINT(ipbuf, &ia->ia_addr.sin_addr);
1732 if (__predict_false(from == NULL)) 1740 if (__predict_false(from == NULL))
1733 llastr = NULL; 1741 llastr = NULL;
1734 else 1742 else
1735 llastr = lla_snprintf(llabuf, sizeof(llabuf), 1743 llastr = lla_snprintf(llabuf, sizeof(llabuf),
1736 CLLADDR(from), from->sdl_alen); 1744 CLLADDR(from), from->sdl_alen);
1737 1745
1738 if (ia->ia4_flags & (IN_IFF_TENTATIVE|IN_IFF_DUPLICATED)) { 1746 if (ia->ia4_flags & (IN_IFF_TENTATIVE|IN_IFF_DUPLICATED)) {
1739 log(LOG_ERR, 1747 log(LOG_ERR,
1740 "%s: DAD duplicate address %s from %s\n", 1748 "%s: DAD duplicate address %s from %s\n",
1741 if_name(ifp), iastr, llastr); 1749 if_name(ifp), iastr, llastr);
1742 } else if (ia->ia_dad_defended == 0 || 1750 } else if (ia->ia_dad_defended == 0 ||
1743 ia->ia_dad_defended < time_uptime - DEFEND_INTERVAL) { 1751 ia->ia_dad_defended < time_uptime - DEFEND_INTERVAL) {
1744 ia->ia_dad_defended = time_uptime; 1752 ia->ia_dad_defended = time_uptime;
1745 arpannounce1(ifa); 1753 arpannounce1(ifa);
1746 log(LOG_ERR, 1754 log(LOG_ERR,
1747 "%s: DAD defended address %s from %s\n", 1755 "%s: DAD defended address %s from %s\n",
1748 if_name(ifp), iastr, llastr); 1756 if_name(ifp), iastr, llastr);
1749 return; 1757 return;
1750 } else { 1758 } else {
1751 /* If DAD is disabled, just report the duplicate. */ 1759 /* If DAD is disabled, just report the duplicate. */
1752 if (!ip_dad_enabled()) { 1760 if (!ip_dad_enabled()) {
1753 log(LOG_ERR, 1761 log(LOG_ERR,
1754 "%s: DAD ignoring duplicate address %s from %s\n", 1762 "%s: DAD ignoring duplicate address %s from %s\n",
1755 if_name(ifp), iastr, llastr); 1763 if_name(ifp), iastr, llastr);
1756 return; 1764 return;
1757 } 1765 }
1758 log(LOG_ERR, 1766 log(LOG_ERR,
1759 "%s: DAD defence failed for %s from %s\n", 1767 "%s: DAD defence failed for %s from %s\n",
1760 if_name(ifp), iastr, llastr); 1768 if_name(ifp), iastr, llastr);
1761 } 1769 }
1762 1770
1763 arp_dad_stop(ifa); 1771 arp_dad_stop(ifa);
1764 1772
1765 ia->ia4_flags &= ~IN_IFF_TENTATIVE; 1773 ia->ia4_flags &= ~IN_IFF_TENTATIVE;
1766 if ((ia->ia4_flags & IN_IFF_DUPLICATED) == 0) { 1774 if ((ia->ia4_flags & IN_IFF_DUPLICATED) == 0) {
1767 ia->ia4_flags |= IN_IFF_DUPLICATED; 1775 ia->ia4_flags |= IN_IFF_DUPLICATED;
1768 /* Inform the routing socket of the duplicate address */ 1776 /* Inform the routing socket of the duplicate address */
1769 rt_addrmsg_src(RTM_NEWADDR, ifa, (const struct sockaddr *)from); 1777 rt_addrmsg_src(RTM_NEWADDR, ifa, (const struct sockaddr *)from);
1770 } 1778 }
1771} 1779}
1772 1780
1773/* 1781/*
1774 * Called from 10 Mb/s Ethernet interrupt handlers 1782 * Called from 10 Mb/s Ethernet interrupt handlers
1775 * when ether packet type ETHERTYPE_REVARP 1783 * when ether packet type ETHERTYPE_REVARP
1776 * is received. Common length and type checks are done here, 1784 * is received. Common length and type checks are done here,
1777 * then the protocol-specific routine is called. 1785 * then the protocol-specific routine is called.
1778 */ 1786 */
1779void 1787void
1780revarpinput(struct mbuf *m) 1788revarpinput(struct mbuf *m)
1781{ 1789{
1782 struct arphdr *ar; 1790 struct arphdr *ar;
1783 int arplen; 1791 int arplen;
1784 1792
1785 arplen = sizeof(struct arphdr); 1793 arplen = sizeof(struct arphdr);
1786 if (m->m_len < arplen && (m = m_pullup(m, arplen)) == NULL) 1794 if (m->m_len < arplen && (m = m_pullup(m, arplen)) == NULL)
1787 return; 1795 return;
1788 ar = mtod(m, struct arphdr *); 1796 ar = mtod(m, struct arphdr *);
1789 1797
1790 if (ntohs(ar->ar_hrd) == ARPHRD_IEEE1394) { 1798 if (ntohs(ar->ar_hrd) == ARPHRD_IEEE1394) {
1791 goto out; 1799 goto out;
1792 } 1800 }
1793 1801
1794 arplen = sizeof(struct arphdr) + 2 * (ar->ar_hln + ar->ar_pln); 1802 arplen = sizeof(struct arphdr) + 2 * (ar->ar_hln + ar->ar_pln);
1795 if (m->m_len < arplen && (m = m_pullup(m, arplen)) == NULL) 1803 if (m->m_len < arplen && (m = m_pullup(m, arplen)) == NULL)
1796 return; 1804 return;
1797 ar = mtod(m, struct arphdr *); 1805 ar = mtod(m, struct arphdr *);
1798 1806
1799 switch (ntohs(ar->ar_pro)) { 1807 switch (ntohs(ar->ar_pro)) {
1800 case ETHERTYPE_IP: 1808 case ETHERTYPE_IP:
1801 case ETHERTYPE_IPTRAILERS: 1809 case ETHERTYPE_IPTRAILERS:
1802 in_revarpinput(m); 1810 in_revarpinput(m);
1803 return; 1811 return;
1804 1812
1805 default: 1813 default:
1806 break; 1814 break;
1807 } 1815 }
1808 1816
1809out: 1817out:
1810 m_freem(m); 1818 m_freem(m);
1811} 1819}
1812 1820
1813/* 1821/*
1814 * RARP for Internet protocols on 10 Mb/s Ethernet. 1822 * RARP for Internet protocols on 10 Mb/s Ethernet.
1815 * Algorithm is that given in RFC 903. 1823 * Algorithm is that given in RFC 903.
1816 * We are only using for bootstrap purposes to get an ip address for one of 1824 * We are only using for bootstrap purposes to get an ip address for one of
1817 * our interfaces. Thus we support no user-interface. 1825 * our interfaces. Thus we support no user-interface.
1818 * 1826 *
1819 * Since the contents of the RARP reply are specific to the interface that 1827 * Since the contents of the RARP reply are specific to the interface that
1820 * sent the request, this code must ensure that they are properly associated. 1828 * sent the request, this code must ensure that they are properly associated.
1821 * 1829 *
1822 * Note: also supports ARP via RARP packets, per the RFC. 1830 * Note: also supports ARP via RARP packets, per the RFC.
1823 */ 1831 */
1824void 1832void
1825in_revarpinput(struct mbuf *m) 1833in_revarpinput(struct mbuf *m)
1826{ 1834{
1827 struct arphdr *ah; 1835 struct arphdr *ah;
1828 void *tha; 1836 void *tha;
1829 int op; 1837 int op;
1830 struct ifnet *rcvif; 1838 struct ifnet *rcvif;
1831 int s; 1839 int s;
1832 1840
1833 ah = mtod(m, struct arphdr *); 1841 ah = mtod(m, struct arphdr *);
1834 op = ntohs(ah->ar_op); 1842 op = ntohs(ah->ar_op);
1835 1843
1836 rcvif = m_get_rcvif(m, &s); 1844 rcvif = m_get_rcvif(m, &s);
1837 if (__predict_false(rcvif == NULL)) 1845 if (__predict_false(rcvif == NULL))
1838 goto out; 1846 goto out;
1839 if (rcvif->if_flags & IFF_NOARP) 1847 if (rcvif->if_flags & IFF_NOARP)
1840 goto out; 1848 goto out;
1841 1849
1842 switch (rcvif->if_type) { 1850 switch (rcvif->if_type) {
1843 case IFT_IEEE1394: 1851 case IFT_IEEE1394:
1844 /* ARP without target hardware address is not supported */ 1852 /* ARP without target hardware address is not supported */
1845 goto out; 1853 goto out;
1846 default: 1854 default:
1847 break; 1855 break;
1848 } 1856 }
1849 1857
1850 switch (op) { 1858 switch (op) {
1851 case ARPOP_REQUEST: 1859 case ARPOP_REQUEST:
1852 case ARPOP_REPLY: /* per RFC */ 1860 case ARPOP_REPLY: /* per RFC */
1853 m_put_rcvif(rcvif, &s); 1861 m_put_rcvif(rcvif, &s);
1854 in_arpinput(m); 1862 in_arpinput(m);
1855 return; 1863 return;
1856 case ARPOP_REVREPLY: 1864 case ARPOP_REVREPLY:
1857 break; 1865 break;
1858 case ARPOP_REVREQUEST: /* handled by rarpd(8) */ 1866 case ARPOP_REVREQUEST: /* handled by rarpd(8) */
1859 default: 1867 default:
1860 goto out; 1868 goto out;
1861 } 1869 }
1862 if (!revarp_in_progress) 1870 if (!revarp_in_progress)
1863 goto out; 1871 goto out;
1864 if (rcvif != myip_ifp) /* !same interface */ 1872 if (rcvif != myip_ifp) /* !same interface */
1865 goto out; 1873 goto out;
1866 if (myip_initialized) 1874 if (myip_initialized)
1867 goto wake; 1875 goto wake;
1868 tha = ar_tha(ah); 1876 tha = ar_tha(ah);
1869 if (tha == NULL) 1877 if (tha == NULL)
1870 goto out; 1878 goto out;
1871 if (ah->ar_pln != sizeof(struct in_addr)) 1879 if (ah->ar_pln != sizeof(struct in_addr))
1872 goto out; 1880 goto out;
1873 if (ah->ar_hln != rcvif->if_sadl->sdl_alen) 1881 if (ah->ar_hln != rcvif->if_sadl->sdl_alen)
1874 goto out; 1882 goto out;
1875 if (memcmp(tha, CLLADDR(rcvif->if_sadl), rcvif->if_sadl->sdl_alen)) 1883 if (memcmp(tha, CLLADDR(rcvif->if_sadl), rcvif->if_sadl->sdl_alen))
1876 goto out; 1884 goto out;
1877 memcpy(&srv_ip, ar_spa(ah), sizeof(srv_ip)); 1885 memcpy(&srv_ip, ar_spa(ah), sizeof(srv_ip));
1878 memcpy(&myip, ar_tpa(ah), sizeof(myip)); 1886 memcpy(&myip, ar_tpa(ah), sizeof(myip));
1879 myip_initialized = 1; 1887 myip_initialized = 1;
1880wake: /* Do wakeup every time in case it was missed. */ 1888wake: /* Do wakeup every time in case it was missed. */
1881 wakeup((void *)&myip); 1889 wakeup((void *)&myip);
1882 1890
1883out: 1891out:
1884 m_put_rcvif(rcvif, &s); 1892 m_put_rcvif(rcvif, &s);
1885 m_freem(m); 1893 m_freem(m);
1886} 1894}
1887 1895
1888/* 1896/*
1889 * Send a RARP request for the ip address of the specified interface. 1897 * Send a RARP request for the ip address of the specified interface.
1890 * The request should be RFC 903-compliant. 1898 * The request should be RFC 903-compliant.
1891 */ 1899 */
1892static void 1900static void
1893revarprequest(struct ifnet *ifp) 1901revarprequest(struct ifnet *ifp)
1894{ 1902{
1895 struct sockaddr sa; 1903 struct sockaddr sa;
1896 struct mbuf *m; 1904 struct mbuf *m;
1897 struct arphdr *ah; 1905 struct arphdr *ah;
1898 void *tha; 1906 void *tha;
1899 1907
1900 if ((m = m_gethdr(M_DONTWAIT, MT_DATA)) == NULL) 1908 if ((m = m_gethdr(M_DONTWAIT, MT_DATA)) == NULL)
1901 return; 1909 return;
1902 MCLAIM(m, &arpdomain.dom_mowner); 1910 MCLAIM(m, &arpdomain.dom_mowner);
1903 m->m_len = sizeof(*ah) + 2*sizeof(struct in_addr) + 1911 m->m_len = sizeof(*ah) + 2*sizeof(struct in_addr) +
1904 2*ifp->if_addrlen; 1912 2*ifp->if_addrlen;
1905 m->m_pkthdr.len = m->m_len; 1913 m->m_pkthdr.len = m->m_len;
1906 m_align(m, m->m_len); 1914 m_align(m, m->m_len);
1907 ah = mtod(m, struct arphdr *); 1915 ah = mtod(m, struct arphdr *);
1908 memset(ah, 0, m->m_len); 1916 memset(ah, 0, m->m_len);
1909 ah->ar_pro = htons(ETHERTYPE_IP); 1917 ah->ar_pro = htons(ETHERTYPE_IP);
1910 ah->ar_hln = ifp->if_addrlen; /* hardware address length */ 1918 ah->ar_hln = ifp->if_addrlen; /* hardware address length */
1911 ah->ar_pln = sizeof(struct in_addr); /* protocol address length */ 1919 ah->ar_pln = sizeof(struct in_addr); /* protocol address length */
1912 ah->ar_op = htons(ARPOP_REVREQUEST); 1920 ah->ar_op = htons(ARPOP_REVREQUEST);
1913 1921
1914 memcpy(ar_sha(ah), CLLADDR(ifp->if_sadl), ah->ar_hln); 1922 memcpy(ar_sha(ah), CLLADDR(ifp->if_sadl), ah->ar_hln);
1915 tha = ar_tha(ah); 1923 tha = ar_tha(ah);
1916 if (tha == NULL) { 1924 if (tha == NULL) {
1917 m_free(m); 1925 m_free(m);
1918 return; 1926 return;
1919 } 1927 }
1920 memcpy(tha, CLLADDR(ifp->if_sadl), ah->ar_hln); 1928 memcpy(tha, CLLADDR(ifp->if_sadl), ah->ar_hln);
1921 1929
1922 sa.sa_family = AF_ARP; 1930 sa.sa_family = AF_ARP;
1923 sa.sa_len = 2; 1931 sa.sa_len = 2;
1924 m->m_flags |= M_BCAST; 1932 m->m_flags |= M_BCAST;
1925 1933
1926 if_output_lock(ifp, ifp, m, &sa, NULL); 1934 if_output_lock(ifp, ifp, m, &sa, NULL);
1927} 1935}
1928 1936
1929/* 1937/*
1930 * RARP for the ip address of the specified interface, but also 1938 * RARP for the ip address of the specified interface, but also
1931 * save the ip address of the server that sent the answer. 1939 * save the ip address of the server that sent the answer.
1932 * Timeout if no response is received. 1940 * Timeout if no response is received.
1933 */ 1941 */
1934int 1942int
1935revarpwhoarewe(struct ifnet *ifp, struct in_addr *serv_in, 1943revarpwhoarewe(struct ifnet *ifp, struct in_addr *serv_in,
1936 struct in_addr *clnt_in) 1944 struct in_addr *clnt_in)
1937{ 1945{
1938 int result, count = 20; 1946 int result, count = 20;
1939 1947
1940 myip_initialized = 0; 1948 myip_initialized = 0;
1941 myip_ifp = ifp; 1949 myip_ifp = ifp;
1942 1950
1943 revarp_in_progress = 1; 1951 revarp_in_progress = 1;
1944 while (count--) { 1952 while (count--) {
1945 revarprequest(ifp); 1953 revarprequest(ifp);
1946 result = tsleep((void *)&myip, PSOCK, "revarp", hz/2); 1954 result = tsleep((void *)&myip, PSOCK, "revarp", hz/2);
1947 if (result != EWOULDBLOCK) 1955 if (result != EWOULDBLOCK)
1948 break; 1956 break;
1949 } 1957 }
1950 revarp_in_progress = 0; 1958 revarp_in_progress = 0;
1951 1959
1952 if (!myip_initialized) 1960 if (!myip_initialized)
1953 return ENETUNREACH; 1961 return ENETUNREACH;

cvs diff -r1.235 -r1.236 src/sys/sys/mbuf.h (switch to unified diff)

--- src/sys/sys/mbuf.h 2022/11/15 09:13:43 1.235
+++ src/sys/sys/mbuf.h 2022/11/15 10:47:39 1.236
@@ -1,963 +1,964 @@ @@ -1,963 +1,964 @@
1/* $NetBSD: mbuf.h,v 1.235 2022/11/15 09:13:43 roy Exp $ */ 1/* $NetBSD: mbuf.h,v 1.236 2022/11/15 10:47:39 roy Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1996, 1997, 1999, 2001, 2007 The NetBSD Foundation, Inc. 4 * Copyright (c) 1996, 1997, 1999, 2001, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center and Matt Thomas of 3am Software Foundry. 9 * NASA Ames Research Center and Matt Thomas of 3am Software Foundry.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33/* 33/*
34 * Copyright (c) 1982, 1986, 1988, 1993 34 * Copyright (c) 1982, 1986, 1988, 1993
35 * The Regents of the University of California. All rights reserved. 35 * The Regents of the University of California. All rights reserved.
36 * 36 *
37 * Redistribution and use in source and binary forms, with or without 37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions 38 * modification, are permitted provided that the following conditions
39 * are met: 39 * are met:
40 * 1. Redistributions of source code must retain the above copyright 40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer. 41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright 42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the 43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution. 44 * documentation and/or other materials provided with the distribution.
45 * 3. Neither the name of the University nor the names of its contributors 45 * 3. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software 46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission. 47 * without specific prior written permission.
48 * 48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE. 59 * SUCH DAMAGE.
60 * 60 *
61 * @(#)mbuf.h 8.5 (Berkeley) 2/19/95 61 * @(#)mbuf.h 8.5 (Berkeley) 2/19/95
62 */ 62 */
63 63
64#ifndef _SYS_MBUF_H_ 64#ifndef _SYS_MBUF_H_
65#define _SYS_MBUF_H_ 65#define _SYS_MBUF_H_
66 66
67#ifdef _KERNEL_OPT 67#ifdef _KERNEL_OPT
68#include "opt_mbuftrace.h" 68#include "opt_mbuftrace.h"
69#endif 69#endif
70 70
71#ifndef M_WAITOK 71#ifndef M_WAITOK
72#include <sys/malloc.h> 72#include <sys/malloc.h>
73#endif 73#endif
74#include <sys/pool.h> 74#include <sys/pool.h>
75#include <sys/queue.h> 75#include <sys/queue.h>
76#if defined(_KERNEL) 76#if defined(_KERNEL)
77#include <sys/percpu_types.h> 77#include <sys/percpu_types.h>
78#include <sys/socket.h> /* for AF_UNSPEC */ 78#include <sys/socket.h> /* for AF_UNSPEC */
79#include <sys/psref.h> 79#include <sys/psref.h>
80#endif /* defined(_KERNEL) */ 80#endif /* defined(_KERNEL) */
81 81
82/* For offsetof() */ 82/* For offsetof() */
83#if defined(_KERNEL) || defined(_STANDALONE) 83#if defined(_KERNEL) || defined(_STANDALONE)
84#include <sys/systm.h> 84#include <sys/systm.h>
85#else 85#else
86#include <stddef.h> 86#include <stddef.h>
87#endif 87#endif
88 88
89#include <uvm/uvm_param.h> /* for MIN_PAGE_SIZE */ 89#include <uvm/uvm_param.h> /* for MIN_PAGE_SIZE */
90 90
91#include <net/if.h> 91#include <net/if.h>
92 92
93/* 93/*
94 * Mbufs are of a single size, MSIZE (machine/param.h), which 94 * Mbufs are of a single size, MSIZE (machine/param.h), which
95 * includes overhead. An mbuf may add a single "mbuf cluster" of size 95 * includes overhead. An mbuf may add a single "mbuf cluster" of size
96 * MCLBYTES (also in machine/param.h), which has no additional overhead 96 * MCLBYTES (also in machine/param.h), which has no additional overhead
97 * and is used instead of the internal data area; this is done when 97 * and is used instead of the internal data area; this is done when
98 * at least MINCLSIZE of data must be stored. 98 * at least MINCLSIZE of data must be stored.
99 */ 99 */
100 100
101/* Packet tags structure */ 101/* Packet tags structure */
102struct m_tag { 102struct m_tag {
103 SLIST_ENTRY(m_tag) m_tag_link; /* List of packet tags */ 103 SLIST_ENTRY(m_tag) m_tag_link; /* List of packet tags */
104 uint16_t m_tag_id; /* Tag ID */ 104 uint16_t m_tag_id; /* Tag ID */
105 uint16_t m_tag_len; /* Length of data */ 105 uint16_t m_tag_len; /* Length of data */
106}; 106};
107 107
108/* mbuf ownership structure */ 108/* mbuf ownership structure */
109struct mowner { 109struct mowner {
110 char mo_name[16]; /* owner name (fxp0) */ 110 char mo_name[16]; /* owner name (fxp0) */
111 char mo_descr[16]; /* owner description (input) */ 111 char mo_descr[16]; /* owner description (input) */
112 LIST_ENTRY(mowner) mo_link; /* */ 112 LIST_ENTRY(mowner) mo_link; /* */
113 struct percpu *mo_counters; 113 struct percpu *mo_counters;
114}; 114};
115 115
116#define MOWNER_INIT(x, y) { .mo_name = x, .mo_descr = y } 116#define MOWNER_INIT(x, y) { .mo_name = x, .mo_descr = y }
117 117
118enum mowner_counter_index { 118enum mowner_counter_index {
119 MOWNER_COUNTER_CLAIMS, /* # of small mbuf claimed */ 119 MOWNER_COUNTER_CLAIMS, /* # of small mbuf claimed */
120 MOWNER_COUNTER_RELEASES, /* # of small mbuf released */ 120 MOWNER_COUNTER_RELEASES, /* # of small mbuf released */
121 MOWNER_COUNTER_CLUSTER_CLAIMS, /* # of cluster mbuf claimed */ 121 MOWNER_COUNTER_CLUSTER_CLAIMS, /* # of cluster mbuf claimed */
122 MOWNER_COUNTER_CLUSTER_RELEASES,/* # of cluster mbuf released */ 122 MOWNER_COUNTER_CLUSTER_RELEASES,/* # of cluster mbuf released */
123 MOWNER_COUNTER_EXT_CLAIMS, /* # of M_EXT mbuf claimed */ 123 MOWNER_COUNTER_EXT_CLAIMS, /* # of M_EXT mbuf claimed */
124 MOWNER_COUNTER_EXT_RELEASES, /* # of M_EXT mbuf released */ 124 MOWNER_COUNTER_EXT_RELEASES, /* # of M_EXT mbuf released */
125 125
126 MOWNER_COUNTER_NCOUNTERS, 126 MOWNER_COUNTER_NCOUNTERS,
127}; 127};
128 128
129#if defined(_KERNEL) 129#if defined(_KERNEL)
130struct mowner_counter { 130struct mowner_counter {
131 u_long mc_counter[MOWNER_COUNTER_NCOUNTERS]; 131 u_long mc_counter[MOWNER_COUNTER_NCOUNTERS];
132}; 132};
133#endif 133#endif
134 134
135/* userland-exported version of struct mowner */ 135/* userland-exported version of struct mowner */
136struct mowner_user { 136struct mowner_user {
137 char mo_name[16]; /* owner name (fxp0) */ 137 char mo_name[16]; /* owner name (fxp0) */
138 char mo_descr[16]; /* owner description (input) */ 138 char mo_descr[16]; /* owner description (input) */
139 LIST_ENTRY(mowner) mo_link; /* unused padding; for compatibility */ 139 LIST_ENTRY(mowner) mo_link; /* unused padding; for compatibility */
140 u_long mo_counter[MOWNER_COUNTER_NCOUNTERS]; /* counters */ 140 u_long mo_counter[MOWNER_COUNTER_NCOUNTERS]; /* counters */
141}; 141};
142 142
143/* 143/*
144 * Macros for type conversion 144 * Macros for type conversion
145 * mtod(m,t) - convert mbuf pointer to data pointer of correct type 145 * mtod(m,t) - convert mbuf pointer to data pointer of correct type
146 */ 146 */
147#define mtod(m, t) ((t)((m)->m_data)) 147#define mtod(m, t) ((t)((m)->m_data))
148 148
149/* header at beginning of each mbuf */ 149/* header at beginning of each mbuf */
150struct m_hdr { 150struct m_hdr {
151 struct mbuf *mh_next; /* next buffer in chain */ 151 struct mbuf *mh_next; /* next buffer in chain */
152 struct mbuf *mh_nextpkt; /* next chain in queue/record */ 152 struct mbuf *mh_nextpkt; /* next chain in queue/record */
153 char *mh_data; /* location of data */ 153 char *mh_data; /* location of data */
154 struct mowner *mh_owner; /* mbuf owner */ 154 struct mowner *mh_owner; /* mbuf owner */
155 int mh_len; /* amount of data in this mbuf */ 155 int mh_len; /* amount of data in this mbuf */
156 int mh_flags; /* flags; see below */ 156 int mh_flags; /* flags; see below */
157 paddr_t mh_paddr; /* physical address of mbuf */ 157 paddr_t mh_paddr; /* physical address of mbuf */
158 short mh_type; /* type of data in this mbuf */ 158 short mh_type; /* type of data in this mbuf */
159}; 159};
160 160
161/* 161/*
162 * record/packet header in first mbuf of chain; valid if M_PKTHDR set 162 * record/packet header in first mbuf of chain; valid if M_PKTHDR set
163 * 163 *
164 * A note about csum_data: 164 * A note about csum_data:
165 * 165 *
166 * o For the out-bound direction, the low 16 bits indicates the offset after 166 * o For the out-bound direction, the low 16 bits indicates the offset after
167 * the L4 header where the final L4 checksum value is to be stored and the 167 * the L4 header where the final L4 checksum value is to be stored and the
168 * high 16 bits is the length of the L3 header (the start of the data to 168 * high 16 bits is the length of the L3 header (the start of the data to
169 * be checksummed). 169 * be checksummed).
170 * 170 *
171 * o For the in-bound direction, it is only valid if the M_CSUM_DATA flag is 171 * o For the in-bound direction, it is only valid if the M_CSUM_DATA flag is
172 * set. In this case, an L4 checksum has been calculated by hardware and 172 * set. In this case, an L4 checksum has been calculated by hardware and
173 * is stored in csum_data, but it is up to software to perform final 173 * is stored in csum_data, but it is up to software to perform final
174 * verification. 174 * verification.
175 * 175 *
176 * Note for in-bound TCP/UDP checksums: we expect the csum_data to NOT 176 * Note for in-bound TCP/UDP checksums: we expect the csum_data to NOT
177 * be bit-wise inverted (the final step in the calculation of an IP 177 * be bit-wise inverted (the final step in the calculation of an IP
178 * checksum) -- this is so we can accumulate the checksum for fragmented 178 * checksum) -- this is so we can accumulate the checksum for fragmented
179 * packets during reassembly. 179 * packets during reassembly.
180 * 180 *
181 * Size ILP32: 40 181 * Size ILP32: 40
182 * LP64: 56 182 * LP64: 56
183 */ 183 */
184struct pkthdr { 184struct pkthdr {
185 union { 185 union {
186 void *ctx; /* for M_GETCTX/M_SETCTX */ 186 void *ctx; /* for M_GETCTX/M_SETCTX */
187 if_index_t index; /* rcv interface index */ 187 if_index_t index; /* rcv interface index */
188 } _rcvif; 188 } _rcvif;
189#define rcvif_index _rcvif.index 189#define rcvif_index _rcvif.index
190 SLIST_HEAD(packet_tags, m_tag) tags; /* list of packet tags */ 190 SLIST_HEAD(packet_tags, m_tag) tags; /* list of packet tags */
191 int len; /* total packet length */ 191 int len; /* total packet length */
192 int csum_flags; /* checksum flags */ 192 int csum_flags; /* checksum flags */
193 uint32_t csum_data; /* checksum data */ 193 uint32_t csum_data; /* checksum data */
194 u_int segsz; /* segment size */ 194 u_int segsz; /* segment size */
195 uint16_t ether_vtag; /* ethernet 802.1p+q vlan tag */ 195 uint16_t ether_vtag; /* ethernet 802.1p+q vlan tag */
196 uint16_t pkthdr_flags; /* flags for pkthdr, see blow */ 196 uint16_t pkthdr_flags; /* flags for pkthdr, see blow */
197#define PKTHDR_FLAG_IPSEC_SKIP_PFIL 0x0001 /* skip pfil_run_hooks() after ipsec decrypt */ 197#define PKTHDR_FLAG_IPSEC_SKIP_PFIL 0x0001 /* skip pfil_run_hooks() after ipsec decrypt */
198 198
199 /* 199 /*
200 * Following three fields are open-coded struct altq_pktattr 200 * Following three fields are open-coded struct altq_pktattr
201 * to rearrange struct pkthdr fields flexibly. 201 * to rearrange struct pkthdr fields flexibly.
202 */ 202 */
203 int pattr_af; /* ALTQ: address family */ 203 int pattr_af; /* ALTQ: address family */
204 void *pattr_class; /* ALTQ: sched class set by classifier */ 204 void *pattr_class; /* ALTQ: sched class set by classifier */
205 void *pattr_hdr; /* ALTQ: saved header position in mbuf */ 205 void *pattr_hdr; /* ALTQ: saved header position in mbuf */
206}; 206};
207 207
208/* Checksumming flags (csum_flags). */ 208/* Checksumming flags (csum_flags). */
209#define M_CSUM_TCPv4 0x00000001 /* TCP header/payload */ 209#define M_CSUM_TCPv4 0x00000001 /* TCP header/payload */
210#define M_CSUM_UDPv4 0x00000002 /* UDP header/payload */ 210#define M_CSUM_UDPv4 0x00000002 /* UDP header/payload */
211#define M_CSUM_TCP_UDP_BAD 0x00000004 /* TCP/UDP checksum bad */ 211#define M_CSUM_TCP_UDP_BAD 0x00000004 /* TCP/UDP checksum bad */
212#define M_CSUM_DATA 0x00000008 /* consult csum_data */ 212#define M_CSUM_DATA 0x00000008 /* consult csum_data */
213#define M_CSUM_TCPv6 0x00000010 /* IPv6 TCP header/payload */ 213#define M_CSUM_TCPv6 0x00000010 /* IPv6 TCP header/payload */
214#define M_CSUM_UDPv6 0x00000020 /* IPv6 UDP header/payload */ 214#define M_CSUM_UDPv6 0x00000020 /* IPv6 UDP header/payload */
215#define M_CSUM_IPv4 0x00000040 /* IPv4 header */ 215#define M_CSUM_IPv4 0x00000040 /* IPv4 header */
216#define M_CSUM_IPv4_BAD 0x00000080 /* IPv4 header checksum bad */ 216#define M_CSUM_IPv4_BAD 0x00000080 /* IPv4 header checksum bad */
217#define M_CSUM_TSOv4 0x00000100 /* TCPv4 segmentation offload */ 217#define M_CSUM_TSOv4 0x00000100 /* TCPv4 segmentation offload */
218#define M_CSUM_TSOv6 0x00000200 /* TCPv6 segmentation offload */ 218#define M_CSUM_TSOv6 0x00000200 /* TCPv6 segmentation offload */
219 219
220/* Checksum-assist quirks: keep separate from jump-table bits. */ 220/* Checksum-assist quirks: keep separate from jump-table bits. */
221#define M_CSUM_BLANK 0x40000000 /* csum is missing */ 221#define M_CSUM_BLANK 0x40000000 /* csum is missing */
222#define M_CSUM_NO_PSEUDOHDR 0x80000000 /* Rx csum_data does not include 222#define M_CSUM_NO_PSEUDOHDR 0x80000000 /* Rx csum_data does not include
223 * the UDP/TCP pseudo-hdr, and 223 * the UDP/TCP pseudo-hdr, and
224 * is not yet 1s-complemented. 224 * is not yet 1s-complemented.
225 */ 225 */
226 226
227#define M_CSUM_BITS \ 227#define M_CSUM_BITS \
228 "\20\1TCPv4\2UDPv4\3TCP_UDP_BAD\4DATA\5TCPv6\6UDPv6\7IPv4\10IPv4_BAD" \ 228 "\20\1TCPv4\2UDPv4\3TCP_UDP_BAD\4DATA\5TCPv6\6UDPv6\7IPv4\10IPv4_BAD" \
229 "\11TSOv4\12TSOv6\39BLANK\40NO_PSEUDOHDR" 229 "\11TSOv4\12TSOv6\39BLANK\40NO_PSEUDOHDR"
230 230
231/* 231/*
232 * Macros for manipulating csum_data on outgoing packets. These are 232 * Macros for manipulating csum_data on outgoing packets. These are
233 * used to pass information down from the L4/L3 to the L2. 233 * used to pass information down from the L4/L3 to the L2.
234 * 234 *
235 * _IPHL: Length of the IPv{4/6} header, plus the options; in other 235 * _IPHL: Length of the IPv{4/6} header, plus the options; in other
236 * words the offset of the UDP/TCP header in the packet. 236 * words the offset of the UDP/TCP header in the packet.
237 * _OFFSET: Offset of the checksum field in the UDP/TCP header. 237 * _OFFSET: Offset of the checksum field in the UDP/TCP header.
238 */ 238 */
239#define M_CSUM_DATA_IPv4_IPHL(x) ((x) >> 16) 239#define M_CSUM_DATA_IPv4_IPHL(x) ((x) >> 16)
240#define M_CSUM_DATA_IPv4_OFFSET(x) ((x) & 0xffff) 240#define M_CSUM_DATA_IPv4_OFFSET(x) ((x) & 0xffff)
241#define M_CSUM_DATA_IPv6_IPHL(x) ((x) >> 16) 241#define M_CSUM_DATA_IPv6_IPHL(x) ((x) >> 16)
242#define M_CSUM_DATA_IPv6_OFFSET(x) ((x) & 0xffff) 242#define M_CSUM_DATA_IPv6_OFFSET(x) ((x) & 0xffff)
243#define M_CSUM_DATA_IPv6_SET(x, v) (x) = ((x) & 0xffff) | ((v) << 16) 243#define M_CSUM_DATA_IPv6_SET(x, v) (x) = ((x) & 0xffff) | ((v) << 16)
244 244
245/* 245/*
246 * Max # of pages we can attach to m_ext. This is carefully chosen 246 * Max # of pages we can attach to m_ext. This is carefully chosen
247 * to be able to handle SOSEND_LOAN_CHUNK with our minimum sized page. 247 * to be able to handle SOSEND_LOAN_CHUNK with our minimum sized page.
248 */ 248 */
249#ifdef MIN_PAGE_SIZE 249#ifdef MIN_PAGE_SIZE
250#define M_EXT_MAXPAGES ((65536 / MIN_PAGE_SIZE) + 1) 250#define M_EXT_MAXPAGES ((65536 / MIN_PAGE_SIZE) + 1)
251#endif 251#endif
252 252
253/* 253/*
254 * Description of external storage mapped into mbuf, valid if M_EXT set. 254 * Description of external storage mapped into mbuf, valid if M_EXT set.
255 */ 255 */
256struct _m_ext_storage { 256struct _m_ext_storage {
257 unsigned int ext_refcnt; 257 unsigned int ext_refcnt;
258 char *ext_buf; /* start of buffer */ 258 char *ext_buf; /* start of buffer */
259 void (*ext_free) /* free routine if not the usual */ 259 void (*ext_free) /* free routine if not the usual */
260 (struct mbuf *, void *, size_t, void *); 260 (struct mbuf *, void *, size_t, void *);
261 void *ext_arg; /* argument for ext_free */ 261 void *ext_arg; /* argument for ext_free */
262 size_t ext_size; /* size of buffer, for ext_free */ 262 size_t ext_size; /* size of buffer, for ext_free */
263 263
264 union { 264 union {
265 /* M_EXT_CLUSTER: physical address */ 265 /* M_EXT_CLUSTER: physical address */
266 paddr_t extun_paddr; 266 paddr_t extun_paddr;
267#ifdef M_EXT_MAXPAGES 267#ifdef M_EXT_MAXPAGES
268 /* M_EXT_PAGES: pages */ 268 /* M_EXT_PAGES: pages */
269 struct vm_page *extun_pgs[M_EXT_MAXPAGES]; 269 struct vm_page *extun_pgs[M_EXT_MAXPAGES];
270#endif 270#endif
271 } ext_un; 271 } ext_un;
272#define ext_paddr ext_un.extun_paddr 272#define ext_paddr ext_un.extun_paddr
273#define ext_pgs ext_un.extun_pgs 273#define ext_pgs ext_un.extun_pgs
274}; 274};
275 275
276struct _m_ext { 276struct _m_ext {
277 struct mbuf *ext_ref; 277 struct mbuf *ext_ref;
278 struct _m_ext_storage ext_storage; 278 struct _m_ext_storage ext_storage;
279}; 279};
280 280
281#define M_PADDR_INVALID POOL_PADDR_INVALID 281#define M_PADDR_INVALID POOL_PADDR_INVALID
282 282
283/* 283/*
284 * Definition of "struct mbuf". 284 * Definition of "struct mbuf".
285 * Don't change this without understanding how MHLEN/MLEN are defined. 285 * Don't change this without understanding how MHLEN/MLEN are defined.
286 */ 286 */
287#define MBUF_DEFINE(name, mhlen, mlen) \ 287#define MBUF_DEFINE(name, mhlen, mlen) \
288 struct name { \ 288 struct name { \
289 struct m_hdr m_hdr; \ 289 struct m_hdr m_hdr; \
290 union { \ 290 union { \
291 struct { \ 291 struct { \
292 struct pkthdr MH_pkthdr; \ 292 struct pkthdr MH_pkthdr; \
293 union { \ 293 union { \
294 struct _m_ext MH_ext; \ 294 struct _m_ext MH_ext; \
295 char MH_databuf[(mhlen)]; \ 295 char MH_databuf[(mhlen)]; \
296 } MH_dat; \ 296 } MH_dat; \
297 } MH; \ 297 } MH; \
298 char M_databuf[(mlen)]; \ 298 char M_databuf[(mlen)]; \
299 } M_dat; \ 299 } M_dat; \
300 } 300 }
301#define m_next m_hdr.mh_next 301#define m_next m_hdr.mh_next
302#define m_len m_hdr.mh_len 302#define m_len m_hdr.mh_len
303#define m_data m_hdr.mh_data 303#define m_data m_hdr.mh_data
304#define m_owner m_hdr.mh_owner 304#define m_owner m_hdr.mh_owner
305#define m_type m_hdr.mh_type 305#define m_type m_hdr.mh_type
306#define m_flags m_hdr.mh_flags 306#define m_flags m_hdr.mh_flags
307#define m_nextpkt m_hdr.mh_nextpkt 307#define m_nextpkt m_hdr.mh_nextpkt
308#define m_paddr m_hdr.mh_paddr 308#define m_paddr m_hdr.mh_paddr
309#define m_pkthdr M_dat.MH.MH_pkthdr 309#define m_pkthdr M_dat.MH.MH_pkthdr
310#define m_ext_storage M_dat.MH.MH_dat.MH_ext.ext_storage 310#define m_ext_storage M_dat.MH.MH_dat.MH_ext.ext_storage
311#define m_ext_ref M_dat.MH.MH_dat.MH_ext.ext_ref 311#define m_ext_ref M_dat.MH.MH_dat.MH_ext.ext_ref
312#define m_ext m_ext_ref->m_ext_storage 312#define m_ext m_ext_ref->m_ext_storage
313#define m_pktdat M_dat.MH.MH_dat.MH_databuf 313#define m_pktdat M_dat.MH.MH_dat.MH_databuf
314#define m_dat M_dat.M_databuf 314#define m_dat M_dat.M_databuf
315 315
316/* 316/*
317 * Dummy mbuf structure to calculate the right values for MLEN/MHLEN, taking 317 * Dummy mbuf structure to calculate the right values for MLEN/MHLEN, taking
318 * into account inter-structure padding. 318 * into account inter-structure padding.
319 */ 319 */
320MBUF_DEFINE(_mbuf_dummy, 1, 1); 320MBUF_DEFINE(_mbuf_dummy, 1, 1);
321 321
322/* normal data len */ 322/* normal data len */
323#define MLEN ((int)(MSIZE - offsetof(struct _mbuf_dummy, m_dat))) 323#define MLEN ((int)(MSIZE - offsetof(struct _mbuf_dummy, m_dat)))
324/* data len w/pkthdr */ 324/* data len w/pkthdr */
325#define MHLEN ((int)(MSIZE - offsetof(struct _mbuf_dummy, m_pktdat))) 325#define MHLEN ((int)(MSIZE - offsetof(struct _mbuf_dummy, m_pktdat)))
326 326
327#define MINCLSIZE (MHLEN+MLEN+1) /* smallest amount to put in cluster */ 327#define MINCLSIZE (MHLEN+MLEN+1) /* smallest amount to put in cluster */
328 328
329/* 329/*
330 * The *real* struct mbuf 330 * The *real* struct mbuf
331 */ 331 */
332MBUF_DEFINE(mbuf, MHLEN, MLEN); 332MBUF_DEFINE(mbuf, MHLEN, MLEN);
333 333
334/* mbuf flags */ 334/* mbuf flags */
335#define M_EXT 0x00000001 /* has associated external storage */ 335#define M_EXT 0x00000001 /* has associated external storage */
336#define M_PKTHDR 0x00000002 /* start of record */ 336#define M_PKTHDR 0x00000002 /* start of record */
337#define M_EOR 0x00000004 /* end of record */ 337#define M_EOR 0x00000004 /* end of record */
338#define M_PROTO1 0x00000008 /* protocol-specific */ 338#define M_PROTO1 0x00000008 /* protocol-specific */
339 339
340/* mbuf pkthdr flags, also in m_flags */ 340/* mbuf pkthdr flags, also in m_flags */
341#define M_AUTHIPHDR 0x00000010 /* authenticated (IPsec) */ 341#define M_AUTHIPHDR 0x00000010 /* authenticated (IPsec) */
342#define M_DECRYPTED 0x00000020 /* decrypted (IPsec) */ 342#define M_DECRYPTED 0x00000020 /* decrypted (IPsec) */
343#define M_LOOP 0x00000040 /* received on loopback */ 343#define M_LOOP 0x00000040 /* received on loopback */
344#define M_BCAST 0x00000100 /* send/received as L2 broadcast */ 344#define M_BCAST 0x00000100 /* send/received as L2 broadcast */
345#define M_MCAST 0x00000200 /* send/received as L2 multicast */ 345#define M_MCAST 0x00000200 /* send/received as L2 multicast */
346#define M_CANFASTFWD 0x00000400 /* packet can be fast-forwarded */ 346#define M_CANFASTFWD 0x00000400 /* packet can be fast-forwarded */
347#define M_ANYCAST6 0x00000800 /* received as IPv6 anycast */ 347#define M_ANYCAST6 0x00000800 /* received as IPv6 anycast */
348 348
349#define M_LINK0 0x00001000 /* link layer specific flag */ 349#define M_LINK0 0x00001000 /* link layer specific flag */
350#define M_LINK1 0x00002000 /* link layer specific flag */ 350#define M_LINK1 0x00002000 /* link layer specific flag */
351#define M_LINK2 0x00004000 /* link layer specific flag */ 351#define M_LINK2 0x00004000 /* link layer specific flag */
352#define M_LINK3 0x00008000 /* link layer specific flag */ 352#define M_LINK3 0x00008000 /* link layer specific flag */
353#define M_LINK4 0x00010000 /* link layer specific flag */ 353#define M_LINK4 0x00010000 /* link layer specific flag */
354#define M_LINK5 0x00020000 /* link layer specific flag */ 354#define M_LINK5 0x00020000 /* link layer specific flag */
355#define M_LINK6 0x00040000 /* link layer specific flag */ 355#define M_LINK6 0x00040000 /* link layer specific flag */
356#define M_LINK7 0x00080000 /* link layer specific flag */ 356#define M_LINK7 0x00080000 /* link layer specific flag */
357 357
358#define M_VLANTAG 0x00100000 /* ether_vtag is valid */ 358#define M_VLANTAG 0x00100000 /* ether_vtag is valid */
359 359
360/* additional flags for M_EXT mbufs */ 360/* additional flags for M_EXT mbufs */
361#define M_EXT_FLAGS 0xff000000 361#define M_EXT_FLAGS 0xff000000
362#define M_EXT_CLUSTER 0x01000000 /* ext is a cluster */ 362#define M_EXT_CLUSTER 0x01000000 /* ext is a cluster */
363#define M_EXT_PAGES 0x02000000 /* ext_pgs is valid */ 363#define M_EXT_PAGES 0x02000000 /* ext_pgs is valid */
364#define M_EXT_ROMAP 0x04000000 /* ext mapping is r-o at MMU */ 364#define M_EXT_ROMAP 0x04000000 /* ext mapping is r-o at MMU */
365#define M_EXT_RW 0x08000000 /* ext storage is writable */ 365#define M_EXT_RW 0x08000000 /* ext storage is writable */
366 366
367/* for source-level compatibility */ 367/* for source-level compatibility */
368#define M_NOTIFICATION M_PROTO1 368#define M_NOTIFICATION M_PROTO1
369 369
370#define M_FLAGS_BITS \ 370#define M_FLAGS_BITS \
371 "\20\1EXT\2PKTHDR\3EOR\4PROTO1\5AUTHIPHDR\6DECRYPTED\7LOOP\10NONE" \ 371 "\20\1EXT\2PKTHDR\3EOR\4PROTO1\5AUTHIPHDR\6DECRYPTED\7LOOP\10NONE" \
372 "\11BCAST\12MCAST\13CANFASTFWD\14ANYCAST6\15LINK0\16LINK1\17LINK2\20LINK3" \ 372 "\11BCAST\12MCAST\13CANFASTFWD\14ANYCAST6\15LINK0\16LINK1\17LINK2\20LINK3" \
373 "\21LINK4\22LINK5\23LINK6\24LINK7" \ 373 "\21LINK4\22LINK5\23LINK6\24LINK7" \
374 "\25VLANTAG" \ 374 "\25VLANTAG" \
375 "\31EXT_CLUSTER\32EXT_PAGES\33EXT_ROMAP\34EXT_RW" 375 "\31EXT_CLUSTER\32EXT_PAGES\33EXT_ROMAP\34EXT_RW"
376 376
377/* flags copied when copying m_pkthdr */ 377/* flags copied when copying m_pkthdr */
378#define M_COPYFLAGS (M_PKTHDR|M_EOR|M_BCAST|M_MCAST|M_CANFASTFWD| \ 378#define M_COPYFLAGS (M_PKTHDR|M_EOR|M_BCAST|M_MCAST|M_CANFASTFWD| \
379 M_ANYCAST6|M_LINK0|M_LINK1|M_LINK2|M_AUTHIPHDR|M_DECRYPTED|M_LOOP| \ 379 M_ANYCAST6|M_LINK0|M_LINK1|M_LINK2|M_AUTHIPHDR|M_DECRYPTED|M_LOOP| \
380 M_VLANTAG) 380 M_VLANTAG)
381 381
382/* flag copied when shallow-copying external storage */ 382/* flag copied when shallow-copying external storage */
383#define M_EXTCOPYFLAGS (M_EXT|M_EXT_FLAGS) 383#define M_EXTCOPYFLAGS (M_EXT|M_EXT_FLAGS)
384 384
385/* mbuf types */ 385/* mbuf types */
386#define MT_FREE 0 /* should be on free list */ 386#define MT_FREE 0 /* should be on free list */
387#define MT_DATA 1 /* dynamic (data) allocation */ 387#define MT_DATA 1 /* dynamic (data) allocation */
388#define MT_HEADER 2 /* packet header */ 388#define MT_HEADER 2 /* packet header */
389#define MT_SONAME 3 /* socket name */ 389#define MT_SONAME 3 /* socket name */
390#define MT_SOOPTS 4 /* socket options */ 390#define MT_SOOPTS 4 /* socket options */
391#define MT_FTABLE 5 /* fragment reassembly header */ 391#define MT_FTABLE 5 /* fragment reassembly header */
392#define MT_CONTROL 6 /* extra-data protocol message */ 392#define MT_CONTROL 6 /* extra-data protocol message */
393#define MT_OOBDATA 7 /* expedited data */ 393#define MT_OOBDATA 7 /* expedited data */
394 394
395#ifdef MBUFTYPES 395#ifdef MBUFTYPES
396const char * const mbuftypes[] = { 396const char * const mbuftypes[] = {
397 "mbfree", 397 "mbfree",
398 "mbdata", 398 "mbdata",
399 "mbheader", 399 "mbheader",
400 "mbsoname", 400 "mbsoname",
401 "mbsopts", 401 "mbsopts",
402 "mbftable", 402 "mbftable",
403 "mbcontrol", 403 "mbcontrol",
404 "mboobdata", 404 "mboobdata",
405}; 405};
406#else 406#else
407extern const char * const mbuftypes[]; 407extern const char * const mbuftypes[];
408#endif 408#endif
409 409
410/* flags to m_get/MGET */ 410/* flags to m_get/MGET */
411#define M_DONTWAIT M_NOWAIT 411#define M_DONTWAIT M_NOWAIT
412#define M_WAIT M_WAITOK 412#define M_WAIT M_WAITOK
413 413
414#ifdef MBUFTRACE 414#ifdef MBUFTRACE
415/* Mbuf allocation tracing. */ 415/* Mbuf allocation tracing. */
416void mowner_init_owner(struct mowner *, const char *, const char *); 416void mowner_init_owner(struct mowner *, const char *, const char *);
417void mowner_init(struct mbuf *, int); 417void mowner_init(struct mbuf *, int);
418void mowner_ref(struct mbuf *, int); 418void mowner_ref(struct mbuf *, int);
419void m_claim(struct mbuf *, struct mowner *); 419void m_claim(struct mbuf *, struct mowner *);
420void mowner_revoke(struct mbuf *, bool, int); 420void mowner_revoke(struct mbuf *, bool, int);
421void mowner_attach(struct mowner *); 421void mowner_attach(struct mowner *);
422void mowner_detach(struct mowner *); 422void mowner_detach(struct mowner *);
423void m_claimm(struct mbuf *, struct mowner *); 423void m_claimm(struct mbuf *, struct mowner *);
424#else 424#else
425#define mowner_init_owner(mo, n, d) __nothing 425#define mowner_init_owner(mo, n, d) __nothing
426#define mowner_init(m, type) __nothing 426#define mowner_init(m, type) __nothing
427#define mowner_ref(m, flags) __nothing 427#define mowner_ref(m, flags) __nothing
428#define mowner_revoke(m, all, flags) __nothing 428#define mowner_revoke(m, all, flags) __nothing
429#define m_claim(m, mowner) __nothing 429#define m_claim(m, mowner) __nothing
430#define mowner_attach(mo) __nothing 430#define mowner_attach(mo) __nothing
431#define mowner_detach(mo) __nothing 431#define mowner_detach(mo) __nothing
432#define m_claimm(m, mo) __nothing 432#define m_claimm(m, mo) __nothing
433#endif 433#endif
434 434
435#define MCLAIM(m, mo) m_claim((m), (mo)) 435#define MCLAIM(m, mo) m_claim((m), (mo))
436#define MOWNER_ATTACH(mo) mowner_attach(mo) 436#define MOWNER_ATTACH(mo) mowner_attach(mo)
437#define MOWNER_DETACH(mo) mowner_detach(mo) 437#define MOWNER_DETACH(mo) mowner_detach(mo)
438 438
439/* 439/*
440 * mbuf allocation/deallocation macros: 440 * mbuf allocation/deallocation macros:
441 * 441 *
442 * MGET(struct mbuf *m, int how, int type) 442 * MGET(struct mbuf *m, int how, int type)
443 * allocates an mbuf and initializes it to contain internal data. 443 * allocates an mbuf and initializes it to contain internal data.
444 * 444 *
445 * MGETHDR(struct mbuf *m, int how, int type) 445 * MGETHDR(struct mbuf *m, int how, int type)
446 * allocates an mbuf and initializes it to contain a packet header 446 * allocates an mbuf and initializes it to contain a packet header
447 * and internal data. 447 * and internal data.
448 * 448 *
449 * If 'how' is M_WAIT, these macros (and the corresponding functions) 449 * If 'how' is M_WAIT, these macros (and the corresponding functions)
450 * are guaranteed to return successfully. 450 * are guaranteed to return successfully.
451 */ 451 */
452#define MGET(m, how, type) m = m_get((how), (type)) 452#define MGET(m, how, type) m = m_get((how), (type))
453#define MGETHDR(m, how, type) m = m_gethdr((how), (type)) 453#define MGETHDR(m, how, type) m = m_gethdr((how), (type))
454 454
455#if defined(_KERNEL) 455#if defined(_KERNEL)
456 456
457#define MCLINITREFERENCE(m) \ 457#define MCLINITREFERENCE(m) \
458do { \ 458do { \
459 KASSERT(((m)->m_flags & M_EXT) == 0); \ 459 KASSERT(((m)->m_flags & M_EXT) == 0); \
460 (m)->m_ext_ref = (m); \ 460 (m)->m_ext_ref = (m); \
461 (m)->m_ext.ext_refcnt = 1; \ 461 (m)->m_ext.ext_refcnt = 1; \
462} while (/* CONSTCOND */ 0) 462} while (/* CONSTCOND */ 0)
463 463
464/* 464/*
465 * Macros for mbuf external storage. 465 * Macros for mbuf external storage.
466 * 466 *
467 * MCLGET allocates and adds an mbuf cluster to a normal mbuf; 467 * MCLGET allocates and adds an mbuf cluster to a normal mbuf;
468 * the flag M_EXT is set upon success. 468 * the flag M_EXT is set upon success.
469 * 469 *
470 * MEXTMALLOC allocates external storage and adds it to 470 * MEXTMALLOC allocates external storage and adds it to
471 * a normal mbuf; the flag M_EXT is set upon success. 471 * a normal mbuf; the flag M_EXT is set upon success.
472 * 472 *
473 * MEXTADD adds pre-allocated external storage to 473 * MEXTADD adds pre-allocated external storage to
474 * a normal mbuf; the flag M_EXT is set upon success. 474 * a normal mbuf; the flag M_EXT is set upon success.
475 */ 475 */
476 476
477#define MCLGET(m, how) m_clget((m), (how)) 477#define MCLGET(m, how) m_clget((m), (how))
478 478
479#define MEXTMALLOC(m, size, how) \ 479#define MEXTMALLOC(m, size, how) \
480do { \ 480do { \
481 (m)->m_ext_storage.ext_buf = malloc((size), 0, (how)); \ 481 (m)->m_ext_storage.ext_buf = malloc((size), 0, (how)); \
482 if ((m)->m_ext_storage.ext_buf != NULL) { \ 482 if ((m)->m_ext_storage.ext_buf != NULL) { \
483 MCLINITREFERENCE(m); \ 483 MCLINITREFERENCE(m); \
484 (m)->m_data = (m)->m_ext.ext_buf; \ 484 (m)->m_data = (m)->m_ext.ext_buf; \
485 (m)->m_flags = ((m)->m_flags & ~M_EXTCOPYFLAGS) | \ 485 (m)->m_flags = ((m)->m_flags & ~M_EXTCOPYFLAGS) | \
486 M_EXT|M_EXT_RW; \ 486 M_EXT|M_EXT_RW; \
487 (m)->m_ext.ext_size = (size); \ 487 (m)->m_ext.ext_size = (size); \
488 (m)->m_ext.ext_free = NULL; \ 488 (m)->m_ext.ext_free = NULL; \
489 (m)->m_ext.ext_arg = NULL; \ 489 (m)->m_ext.ext_arg = NULL; \
490 mowner_ref((m), M_EXT); \ 490 mowner_ref((m), M_EXT); \
491 } \ 491 } \
492} while (/* CONSTCOND */ 0) 492} while (/* CONSTCOND */ 0)
493 493
494#define MEXTADD(m, buf, size, type, free, arg) \ 494#define MEXTADD(m, buf, size, type, free, arg) \
495do { \ 495do { \
496 MCLINITREFERENCE(m); \ 496 MCLINITREFERENCE(m); \
497 (m)->m_data = (m)->m_ext.ext_buf = (char *)(buf); \ 497 (m)->m_data = (m)->m_ext.ext_buf = (char *)(buf); \
498 (m)->m_flags = ((m)->m_flags & ~M_EXTCOPYFLAGS) | M_EXT; \ 498 (m)->m_flags = ((m)->m_flags & ~M_EXTCOPYFLAGS) | M_EXT; \
499 (m)->m_ext.ext_size = (size); \ 499 (m)->m_ext.ext_size = (size); \
500 (m)->m_ext.ext_free = (free); \ 500 (m)->m_ext.ext_free = (free); \
501 (m)->m_ext.ext_arg = (arg); \ 501 (m)->m_ext.ext_arg = (arg); \
502 mowner_ref((m), M_EXT); \ 502 mowner_ref((m), M_EXT); \
503} while (/* CONSTCOND */ 0) 503} while (/* CONSTCOND */ 0)
504 504
505#define M_BUFADDR(m) \ 505#define M_BUFADDR(m) \
506 (((m)->m_flags & M_EXT) ? (m)->m_ext.ext_buf : \ 506 (((m)->m_flags & M_EXT) ? (m)->m_ext.ext_buf : \
507 ((m)->m_flags & M_PKTHDR) ? (m)->m_pktdat : (m)->m_dat) 507 ((m)->m_flags & M_PKTHDR) ? (m)->m_pktdat : (m)->m_dat)
508 508
509#define M_BUFSIZE(m) \ 509#define M_BUFSIZE(m) \
510 (((m)->m_flags & M_EXT) ? (m)->m_ext.ext_size : \ 510 (((m)->m_flags & M_EXT) ? (m)->m_ext.ext_size : \
511 ((m)->m_flags & M_PKTHDR) ? MHLEN : MLEN) 511 ((m)->m_flags & M_PKTHDR) ? MHLEN : MLEN)
512 512
513#define MRESETDATA(m) (m)->m_data = M_BUFADDR(m) 513#define MRESETDATA(m) (m)->m_data = M_BUFADDR(m)
514 514
515/* 515/*
516 * Compute the offset of the beginning of the data buffer of a non-ext 516 * Compute the offset of the beginning of the data buffer of a non-ext
517 * mbuf. 517 * mbuf.
518 */ 518 */
519#define M_BUFOFFSET(m) \ 519#define M_BUFOFFSET(m) \
520 (((m)->m_flags & M_PKTHDR) ? \ 520 (((m)->m_flags & M_PKTHDR) ? \
521 offsetof(struct mbuf, m_pktdat) : offsetof(struct mbuf, m_dat)) 521 offsetof(struct mbuf, m_pktdat) : offsetof(struct mbuf, m_dat))
522 522
523/* 523/*
524 * Determine if an mbuf's data area is read-only. This is true 524 * Determine if an mbuf's data area is read-only. This is true
525 * if external storage is read-only mapped, or not marked as R/W, 525 * if external storage is read-only mapped, or not marked as R/W,
526 * or referenced by more than one mbuf. 526 * or referenced by more than one mbuf.
527 */ 527 */
528#define M_READONLY(m) \ 528#define M_READONLY(m) \
529 (((m)->m_flags & M_EXT) != 0 && \ 529 (((m)->m_flags & M_EXT) != 0 && \
530 (((m)->m_flags & (M_EXT_ROMAP|M_EXT_RW)) != M_EXT_RW || \ 530 (((m)->m_flags & (M_EXT_ROMAP|M_EXT_RW)) != M_EXT_RW || \
531 (m)->m_ext.ext_refcnt > 1)) 531 (m)->m_ext.ext_refcnt > 1))
532 532
533#define M_UNWRITABLE(__m, __len) \ 533#define M_UNWRITABLE(__m, __len) \
534 ((__m)->m_len < (__len) || M_READONLY((__m))) 534 ((__m)->m_len < (__len) || M_READONLY((__m)))
535 535
536/* 536/*
537 * Determine if an mbuf's data area is read-only at the MMU. 537 * Determine if an mbuf's data area is read-only at the MMU.
538 */ 538 */
539#define M_ROMAP(m) \ 539#define M_ROMAP(m) \
540 (((m)->m_flags & (M_EXT|M_EXT_ROMAP)) == (M_EXT|M_EXT_ROMAP)) 540 (((m)->m_flags & (M_EXT|M_EXT_ROMAP)) == (M_EXT|M_EXT_ROMAP))
541 541
542/* 542/*
543 * Compute the amount of space available before the current start of 543 * Compute the amount of space available before the current start of
544 * data in an mbuf. 544 * data in an mbuf.
545 */ 545 */
546#define M_LEADINGSPACE(m) \ 546#define M_LEADINGSPACE(m) \
547 (M_READONLY((m)) ? 0 : ((m)->m_data - M_BUFADDR(m))) 547 (M_READONLY((m)) ? 0 : ((m)->m_data - M_BUFADDR(m)))
548 548
549/* 549/*
550 * Compute the amount of space available 550 * Compute the amount of space available
551 * after the end of data in an mbuf. 551 * after the end of data in an mbuf.
552 */ 552 */
553#define _M_TRAILINGSPACE(m) \ 553#define _M_TRAILINGSPACE(m) \
554 ((m)->m_flags & M_EXT ? (m)->m_ext.ext_buf + (m)->m_ext.ext_size - \ 554 ((m)->m_flags & M_EXT ? (m)->m_ext.ext_buf + (m)->m_ext.ext_size - \
555 ((m)->m_data + (m)->m_len) : \ 555 ((m)->m_data + (m)->m_len) : \
556 &(m)->m_dat[MLEN] - ((m)->m_data + (m)->m_len)) 556 &(m)->m_dat[MLEN] - ((m)->m_data + (m)->m_len))
557 557
558#define M_TRAILINGSPACE(m) \ 558#define M_TRAILINGSPACE(m) \
559 (M_READONLY((m)) ? 0 : _M_TRAILINGSPACE((m))) 559 (M_READONLY((m)) ? 0 : _M_TRAILINGSPACE((m)))
560 560
561/* 561/*
562 * Arrange to prepend space of size plen to mbuf m. 562 * Arrange to prepend space of size plen to mbuf m.
563 * If a new mbuf must be allocated, how specifies whether to wait. 563 * If a new mbuf must be allocated, how specifies whether to wait.
564 * If how is M_DONTWAIT and allocation fails, the original mbuf chain 564 * If how is M_DONTWAIT and allocation fails, the original mbuf chain
565 * is freed and m is set to NULL. 565 * is freed and m is set to NULL.
566 */ 566 */
567#define M_PREPEND(m, plen, how) \ 567#define M_PREPEND(m, plen, how) \
568do { \ 568do { \
569 if (M_LEADINGSPACE(m) >= (plen)) { \ 569 if (M_LEADINGSPACE(m) >= (plen)) { \
570 (m)->m_data -= (plen); \ 570 (m)->m_data -= (plen); \
571 (m)->m_len += (plen); \ 571 (m)->m_len += (plen); \
572 } else \ 572 } else \
573 (m) = m_prepend((m), (plen), (how)); \ 573 (m) = m_prepend((m), (plen), (how)); \
574 if ((m) && (m)->m_flags & M_PKTHDR) \ 574 if ((m) && (m)->m_flags & M_PKTHDR) \
575 (m)->m_pkthdr.len += (plen); \ 575 (m)->m_pkthdr.len += (plen); \
576} while (/* CONSTCOND */ 0) 576} while (/* CONSTCOND */ 0)
577 577
578/* change mbuf to new type */ 578/* change mbuf to new type */
579#define MCHTYPE(m, t) \ 579#define MCHTYPE(m, t) \
580do { \ 580do { \
581 KASSERT((t) != MT_FREE); \ 581 KASSERT((t) != MT_FREE); \
582 mbstat_type_add((m)->m_type, -1); \ 582 mbstat_type_add((m)->m_type, -1); \
583 mbstat_type_add(t, 1); \ 583 mbstat_type_add(t, 1); \
584 (m)->m_type = t; \ 584 (m)->m_type = t; \
585} while (/* CONSTCOND */ 0) 585} while (/* CONSTCOND */ 0)
586 586
587#ifdef DIAGNOSTIC 587#ifdef DIAGNOSTIC
588#define M_VERIFY_PACKET(m) m_verify_packet(m) 588#define M_VERIFY_PACKET(m) m_verify_packet(m)
589#else 589#else
590#define M_VERIFY_PACKET(m) __nothing 590#define M_VERIFY_PACKET(m) __nothing
591#endif 591#endif
592 592
593/* The "copy all" special length. */ 593/* The "copy all" special length. */
594#define M_COPYALL -1 594#define M_COPYALL -1
595 595
596/* 596/*
597 * Allow drivers and/or protocols to store private context information. 597 * Allow drivers and/or protocols to store private context information.
598 */ 598 */
599#define M_GETCTX(m, t) ((t)(m)->m_pkthdr._rcvif.ctx) 599#define M_GETCTX(m, t) ((t)(m)->m_pkthdr._rcvif.ctx)
600#define M_SETCTX(m, c) ((void)((m)->m_pkthdr._rcvif.ctx = (void *)(c))) 600#define M_SETCTX(m, c) ((void)((m)->m_pkthdr._rcvif.ctx = (void *)(c)))
601#define M_CLEARCTX(m) M_SETCTX((m), NULL) 601#define M_CLEARCTX(m) M_SETCTX((m), NULL)
602 602
603/* 603/*
604 * M_REGION_GET ensures that the "len"-sized region of type "typ" starting 604 * M_REGION_GET ensures that the "len"-sized region of type "typ" starting
605 * from "off" within "m" is located in a single mbuf, contiguously. 605 * from "off" within "m" is located in a single mbuf, contiguously.
606 * 606 *
607 * The pointer to the region will be returned to pointer variable "val". 607 * The pointer to the region will be returned to pointer variable "val".
608 */ 608 */
609#define M_REGION_GET(val, typ, m, off, len) \ 609#define M_REGION_GET(val, typ, m, off, len) \
610do { \ 610do { \
611 struct mbuf *_t; \ 611 struct mbuf *_t; \
612 int _tmp; \ 612 int _tmp; \
613 if ((m)->m_len >= (off) + (len)) \ 613 if ((m)->m_len >= (off) + (len)) \
614 (val) = (typ)(mtod((m), char *) + (off)); \ 614 (val) = (typ)(mtod((m), char *) + (off)); \
615 else { \ 615 else { \
616 _t = m_pulldown((m), (off), (len), &_tmp); \ 616 _t = m_pulldown((m), (off), (len), &_tmp); \
617 if (_t) { \ 617 if (_t) { \
618 if (_t->m_len < _tmp + (len)) \ 618 if (_t->m_len < _tmp + (len)) \
619 panic("m_pulldown malfunction"); \ 619 panic("m_pulldown malfunction"); \
620 (val) = (typ)(mtod(_t, char *) + _tmp); \ 620 (val) = (typ)(mtod(_t, char *) + _tmp); \
621 } else { \ 621 } else { \
622 (val) = (typ)NULL; \ 622 (val) = (typ)NULL; \
623 (m) = NULL; \ 623 (m) = NULL; \
624 } \ 624 } \
625 } \ 625 } \
626} while (/*CONSTCOND*/ 0) 626} while (/*CONSTCOND*/ 0)
627 627
628#endif /* defined(_KERNEL) */ 628#endif /* defined(_KERNEL) */
629 629
630/* 630/*
631 * Simple mbuf queueing system 631 * Simple mbuf queueing system
632 * 632 *
633 * this is basically a SIMPLEQ adapted to mbuf use (ie using 633 * this is basically a SIMPLEQ adapted to mbuf use (ie using
634 * m_nextpkt instead of field.sqe_next). 634 * m_nextpkt instead of field.sqe_next).
635 * 635 *
636 * m_next is ignored, so queueing chains of mbufs is possible 636 * m_next is ignored, so queueing chains of mbufs is possible
637 */ 637 */
638#define MBUFQ_HEAD(name) \ 638#define MBUFQ_HEAD(name) \
639struct name { \ 639struct name { \
640 struct mbuf *mq_first; \ 640 struct mbuf *mq_first; \
641 struct mbuf **mq_last; \ 641 struct mbuf **mq_last; \
642} 642}
643 643
644#define MBUFQ_INIT(q) do { \ 644#define MBUFQ_INIT(q) do { \
645 (q)->mq_first = NULL; \ 645 (q)->mq_first = NULL; \
646 (q)->mq_last = &(q)->mq_first; \ 646 (q)->mq_last = &(q)->mq_first; \
647} while (/*CONSTCOND*/0) 647} while (/*CONSTCOND*/0)
648 648
649#define MBUFQ_ENQUEUE(q, m) do { \ 649#define MBUFQ_ENQUEUE(q, m) do { \
650 (m)->m_nextpkt = NULL; \ 650 (m)->m_nextpkt = NULL; \
651 *(q)->mq_last = (m); \ 651 *(q)->mq_last = (m); \
652 (q)->mq_last = &(m)->m_nextpkt; \ 652 (q)->mq_last = &(m)->m_nextpkt; \
653} while (/*CONSTCOND*/0) 653} while (/*CONSTCOND*/0)
654 654
655#define MBUFQ_PREPEND(q, m) do { \ 655#define MBUFQ_PREPEND(q, m) do { \
656 if (((m)->m_nextpkt = (q)->mq_first) == NULL) \ 656 if (((m)->m_nextpkt = (q)->mq_first) == NULL) \
657 (q)->mq_last = &(m)->m_nextpkt; \ 657 (q)->mq_last = &(m)->m_nextpkt; \
658 (q)->mq_first = (m); \ 658 (q)->mq_first = (m); \
659} while (/*CONSTCOND*/0) 659} while (/*CONSTCOND*/0)
660 660
661#define MBUFQ_DEQUEUE(q, m) do { \ 661#define MBUFQ_DEQUEUE(q, m) do { \
662 if (((m) = (q)->mq_first) != NULL) { \ 662 if (((m) = (q)->mq_first) != NULL) { \
663 if (((q)->mq_first = (m)->m_nextpkt) == NULL) \ 663 if (((q)->mq_first = (m)->m_nextpkt) == NULL) \
664 (q)->mq_last = &(q)->mq_first; \ 664 (q)->mq_last = &(q)->mq_first; \
665 else \ 665 else \
666 (m)->m_nextpkt = NULL; \ 666 (m)->m_nextpkt = NULL; \
667 } \ 667 } \
668} while (/*CONSTCOND*/0) 668} while (/*CONSTCOND*/0)
669 669
670#define MBUFQ_DRAIN(q) do { \ 670#define MBUFQ_DRAIN(q) do { \
671 struct mbuf *__m0; \ 671 struct mbuf *__m0; \
672 while ((__m0 = (q)->mq_first) != NULL) { \ 672 while ((__m0 = (q)->mq_first) != NULL) { \
673 (q)->mq_first = __m0->m_nextpkt; \ 673 (q)->mq_first = __m0->m_nextpkt; \
674 m_freem(__m0); \ 674 m_freem(__m0); \
675 } \ 675 } \
676 (q)->mq_last = &(q)->mq_first; \ 676 (q)->mq_last = &(q)->mq_first; \
677} while (/*CONSTCOND*/0) 677} while (/*CONSTCOND*/0)
678 678
679#define MBUFQ_FIRST(q) ((q)->mq_first) 679#define MBUFQ_FIRST(q) ((q)->mq_first)
680#define MBUFQ_NEXT(m) ((m)->m_nextpkt) 680#define MBUFQ_NEXT(m) ((m)->m_nextpkt)
681#define MBUFQ_LAST(q) (*(q)->mq_last) 681#define MBUFQ_LAST(q) (*(q)->mq_last)
682 682
683/* 683/*
684 * Mbuf statistics. 684 * Mbuf statistics.
685 * For statistics related to mbuf and cluster allocations, see also the 685 * For statistics related to mbuf and cluster allocations, see also the
686 * pool headers (mb_cache and mcl_cache). 686 * pool headers (mb_cache and mcl_cache).
687 */ 687 */
688struct mbstat { 688struct mbstat {
689 u_long _m_spare; /* formerly m_mbufs */ 689 u_long _m_spare; /* formerly m_mbufs */
690 u_long _m_spare1; /* formerly m_clusters */ 690 u_long _m_spare1; /* formerly m_clusters */
691 u_long _m_spare2; /* spare field */ 691 u_long _m_spare2; /* spare field */
692 u_long _m_spare3; /* formely m_clfree - free clusters */ 692 u_long _m_spare3; /* formely m_clfree - free clusters */
693 u_long m_drops; /* times failed to find space */ 693 u_long m_drops; /* times failed to find space */
694 u_long m_wait; /* times waited for space */ 694 u_long m_wait; /* times waited for space */
695 u_long m_drain; /* times drained protocols for space */ 695 u_long m_drain; /* times drained protocols for space */
696 u_short m_mtypes[256]; /* type specific mbuf allocations */ 696 u_short m_mtypes[256]; /* type specific mbuf allocations */
697}; 697};
698 698
699struct mbstat_cpu { 699struct mbstat_cpu {
700 u_int m_mtypes[256]; /* type specific mbuf allocations */ 700 u_int m_mtypes[256]; /* type specific mbuf allocations */
701}; 701};
702 702
703/* 703/*
704 * Mbuf sysctl variables. 704 * Mbuf sysctl variables.
705 */ 705 */
706#define MBUF_MSIZE 1 /* int: mbuf base size */ 706#define MBUF_MSIZE 1 /* int: mbuf base size */
707#define MBUF_MCLBYTES 2 /* int: mbuf cluster size */ 707#define MBUF_MCLBYTES 2 /* int: mbuf cluster size */
708#define MBUF_NMBCLUSTERS 3 /* int: limit on the # of clusters */ 708#define MBUF_NMBCLUSTERS 3 /* int: limit on the # of clusters */
709#define MBUF_MBLOWAT 4 /* int: mbuf low water mark */ 709#define MBUF_MBLOWAT 4 /* int: mbuf low water mark */
710#define MBUF_MCLLOWAT 5 /* int: mbuf cluster low water mark */ 710#define MBUF_MCLLOWAT 5 /* int: mbuf cluster low water mark */
711#define MBUF_STATS 6 /* struct: mbstat */ 711#define MBUF_STATS 6 /* struct: mbstat */
712#define MBUF_MOWNERS 7 /* struct: m_owner[] */ 712#define MBUF_MOWNERS 7 /* struct: m_owner[] */
713 713
714#ifdef _KERNEL 714#ifdef _KERNEL
715extern struct mbstat mbstat; 715extern struct mbstat mbstat;
716extern int nmbclusters; /* limit on the # of clusters */ 716extern int nmbclusters; /* limit on the # of clusters */
717extern int mblowat; /* mbuf low water mark */ 717extern int mblowat; /* mbuf low water mark */
718extern int mcllowat; /* mbuf cluster low water mark */ 718extern int mcllowat; /* mbuf cluster low water mark */
719extern int max_linkhdr; /* largest link-level header */ 719extern int max_linkhdr; /* largest link-level header */
720extern int max_protohdr; /* largest protocol header */ 720extern int max_protohdr; /* largest protocol header */
721extern int max_hdr; /* largest link+protocol header */ 721extern int max_hdr; /* largest link+protocol header */
722extern int max_datalen; /* MHLEN - max_hdr */ 722extern int max_datalen; /* MHLEN - max_hdr */
723extern const int msize; /* mbuf base size */ 723extern const int msize; /* mbuf base size */
724extern const int mclbytes; /* mbuf cluster size */ 724extern const int mclbytes; /* mbuf cluster size */
725extern pool_cache_t mb_cache; 725extern pool_cache_t mb_cache;
726#ifdef MBUFTRACE 726#ifdef MBUFTRACE
727LIST_HEAD(mownerhead, mowner); 727LIST_HEAD(mownerhead, mowner);
728extern struct mownerhead mowners; 728extern struct mownerhead mowners;
729extern struct mowner unknown_mowners[]; 729extern struct mowner unknown_mowners[];
730extern struct mowner revoked_mowner; 730extern struct mowner revoked_mowner;
731#endif 731#endif
732 732
733MALLOC_DECLARE(M_MBUF); 733MALLOC_DECLARE(M_MBUF);
734MALLOC_DECLARE(M_SONAME); 734MALLOC_DECLARE(M_SONAME);
735 735
736struct mbuf *m_copym(struct mbuf *, int, int, int); 736struct mbuf *m_copym(struct mbuf *, int, int, int);
737struct mbuf *m_copypacket(struct mbuf *, int); 737struct mbuf *m_copypacket(struct mbuf *, int);
738struct mbuf *m_devget(char *, int, int, struct ifnet *); 738struct mbuf *m_devget(char *, int, int, struct ifnet *);
739struct mbuf *m_dup(struct mbuf *, int, int, int); 739struct mbuf *m_dup(struct mbuf *, int, int, int);
740struct mbuf *m_get(int, int); 740struct mbuf *m_get(int, int);
741struct mbuf *m_gethdr(int, int); 741struct mbuf *m_gethdr(int, int);
742struct mbuf *m_prepend(struct mbuf *,int, int); 742struct mbuf *m_prepend(struct mbuf *,int, int);
743struct mbuf *m_pulldown(struct mbuf *, int, int, int *); 743struct mbuf *m_pulldown(struct mbuf *, int, int, int *);
744struct mbuf *m_pullup(struct mbuf *, int); 744struct mbuf *m_pullup(struct mbuf *, int);
745struct mbuf *m_copyup(struct mbuf *, int, int); 745struct mbuf *m_copyup(struct mbuf *, int, int);
746struct mbuf *m_split(struct mbuf *,int, int); 746struct mbuf *m_split(struct mbuf *,int, int);
747struct mbuf *m_getptr(struct mbuf *, int, int *); 747struct mbuf *m_getptr(struct mbuf *, int, int *);
748void m_adj(struct mbuf *, int); 748void m_adj(struct mbuf *, int);
749struct mbuf *m_defrag(struct mbuf *, int); 749struct mbuf *m_defrag(struct mbuf *, int);
750int m_apply(struct mbuf *, int, int, 750int m_apply(struct mbuf *, int, int,
751 int (*)(void *, void *, unsigned int), void *); 751 int (*)(void *, void *, unsigned int), void *);
752void m_cat(struct mbuf *,struct mbuf *); 752void m_cat(struct mbuf *,struct mbuf *);
753void m_clget(struct mbuf *, int); 753void m_clget(struct mbuf *, int);
754void m_copyback(struct mbuf *, int, int, const void *); 754void m_copyback(struct mbuf *, int, int, const void *);
755struct mbuf *m_copyback_cow(struct mbuf *, int, int, const void *, int); 755struct mbuf *m_copyback_cow(struct mbuf *, int, int, const void *, int);
756int m_makewritable(struct mbuf **, int, int, int); 756int m_makewritable(struct mbuf **, int, int, int);
757struct mbuf *m_getcl(int, int, int); 757struct mbuf *m_getcl(int, int, int);
758void m_copydata(struct mbuf *, int, int, void *); 758void m_copydata(struct mbuf *, int, int, void *);
759void m_verify_packet(struct mbuf *); 759void m_verify_packet(struct mbuf *);
760struct mbuf *m_free(struct mbuf *); 760struct mbuf *m_free(struct mbuf *);
761void m_freem(struct mbuf *); 761void m_freem(struct mbuf *);
762void mbinit(void); 762void mbinit(void);
763void m_remove_pkthdr(struct mbuf *); 763void m_remove_pkthdr(struct mbuf *);
764void m_copy_pkthdr(struct mbuf *, struct mbuf *); 764void m_copy_pkthdr(struct mbuf *, struct mbuf *);
765void m_move_pkthdr(struct mbuf *, struct mbuf *); 765void m_move_pkthdr(struct mbuf *, struct mbuf *);
766void m_align(struct mbuf *, int); 766void m_align(struct mbuf *, int);
767 767
768bool m_ensure_contig(struct mbuf **, int); 768bool m_ensure_contig(struct mbuf **, int);
769struct mbuf *m_add(struct mbuf *, struct mbuf *); 769struct mbuf *m_add(struct mbuf *, struct mbuf *);
770 770
771/* Inline routines. */ 771/* Inline routines. */
772static __inline u_int m_length(const struct mbuf *) __unused; 772static __inline u_int m_length(const struct mbuf *) __unused;
773 773
774/* Statistics */ 774/* Statistics */
775void mbstat_type_add(int, int); 775void mbstat_type_add(int, int);
776 776
777/* Packet tag routines */ 777/* Packet tag routines */
778struct m_tag *m_tag_get(int, int, int); 778struct m_tag *m_tag_get(int, int, int);
779void m_tag_free(struct m_tag *); 779void m_tag_free(struct m_tag *);
780void m_tag_prepend(struct mbuf *, struct m_tag *); 780void m_tag_prepend(struct mbuf *, struct m_tag *);
781void m_tag_unlink(struct mbuf *, struct m_tag *); 781void m_tag_unlink(struct mbuf *, struct m_tag *);
782void m_tag_delete(struct mbuf *, struct m_tag *); 782void m_tag_delete(struct mbuf *, struct m_tag *);
783void m_tag_delete_chain(struct mbuf *); 783void m_tag_delete_chain(struct mbuf *);
784struct m_tag *m_tag_find(const struct mbuf *, int); 784struct m_tag *m_tag_find(const struct mbuf *, int);
785struct m_tag *m_tag_copy(struct m_tag *); 785struct m_tag *m_tag_copy(struct m_tag *);
786int m_tag_copy_chain(struct mbuf *, struct mbuf *); 786int m_tag_copy_chain(struct mbuf *, struct mbuf *);
787 787
788/* Packet tag types */ 788/* Packet tag types */
789#define PACKET_TAG_NONE 0 /* Nothing */ 789#define PACKET_TAG_NONE 0 /* Nothing */
790#define PACKET_TAG_SO 4 /* sending socket pointer */ 790#define PACKET_TAG_SO 4 /* sending socket pointer */
791#define PACKET_TAG_NPF 10 /* packet filter */ 791#define PACKET_TAG_NPF 10 /* packet filter */
792#define PACKET_TAG_PF 11 /* packet filter */ 792#define PACKET_TAG_PF 11 /* packet filter */
793#define PACKET_TAG_ALTQ_QID 12 /* ALTQ queue id */ 793#define PACKET_TAG_ALTQ_QID 12 /* ALTQ queue id */
794#define PACKET_TAG_IPSEC_OUT_DONE 18 794#define PACKET_TAG_IPSEC_OUT_DONE 18
795#define PACKET_TAG_IPSEC_NAT_T_PORTS 25 /* two uint16_t */ 795#define PACKET_TAG_IPSEC_NAT_T_PORTS 25 /* two uint16_t */
796#define PACKET_TAG_INET6 26 /* IPv6 info */ 796#define PACKET_TAG_INET6 26 /* IPv6 info */
797#define PACKET_TAG_TUNNEL_INFO 28 /* tunnel identification and 797#define PACKET_TAG_TUNNEL_INFO 28 /* tunnel identification and
798 * protocol callback, for loop 798 * protocol callback, for loop
799 * detection/recovery 799 * detection/recovery
800 */ 800 */
801#define PACKET_TAG_MPLS 29 /* Indicate it's for MPLS */ 801#define PACKET_TAG_MPLS 29 /* Indicate it's for MPLS */
802#define PACKET_TAG_SRCROUTE 30 /* IPv4 source routing */ 802#define PACKET_TAG_SRCROUTE 30 /* IPv4 source routing */
 803#define PACKET_TAG_ETHERNET_SRC 31 /* Ethernet source address */
803 804
804/* 805/*
805 * Return the number of bytes in the mbuf chain, m. 806 * Return the number of bytes in the mbuf chain, m.
806 */ 807 */
807static __inline u_int 808static __inline u_int
808m_length(const struct mbuf *m) 809m_length(const struct mbuf *m)
809{ 810{
810 const struct mbuf *m0; 811 const struct mbuf *m0;
811 u_int pktlen; 812 u_int pktlen;
812 813
813 if ((m->m_flags & M_PKTHDR) != 0) 814 if ((m->m_flags & M_PKTHDR) != 0)
814 return m->m_pkthdr.len; 815 return m->m_pkthdr.len;
815 816
816 pktlen = 0; 817 pktlen = 0;
817 for (m0 = m; m0 != NULL; m0 = m0->m_next) 818 for (m0 = m; m0 != NULL; m0 = m0->m_next)
818 pktlen += m0->m_len; 819 pktlen += m0->m_len;
819 return pktlen; 820 return pktlen;
820} 821}
821 822
822static __inline void 823static __inline void
823m_set_rcvif(struct mbuf *m, const struct ifnet *ifp) 824m_set_rcvif(struct mbuf *m, const struct ifnet *ifp)
824{ 825{
825 KASSERT(m->m_flags & M_PKTHDR); 826 KASSERT(m->m_flags & M_PKTHDR);
826 m->m_pkthdr.rcvif_index = ifp->if_index; 827 m->m_pkthdr.rcvif_index = ifp->if_index;
827} 828}
828 829
829static __inline void 830static __inline void
830m_reset_rcvif(struct mbuf *m) 831m_reset_rcvif(struct mbuf *m)
831{ 832{
832 KASSERT(m->m_flags & M_PKTHDR); 833 KASSERT(m->m_flags & M_PKTHDR);
833 /* A caller may expect whole _rcvif union is zeroed */ 834 /* A caller may expect whole _rcvif union is zeroed */
834 /* m->m_pkthdr.rcvif_index = 0; */ 835 /* m->m_pkthdr.rcvif_index = 0; */
835 m->m_pkthdr._rcvif.ctx = NULL; 836 m->m_pkthdr._rcvif.ctx = NULL;
836} 837}
837 838
838static __inline void 839static __inline void
839m_copy_rcvif(struct mbuf *m, const struct mbuf *n) 840m_copy_rcvif(struct mbuf *m, const struct mbuf *n)
840{ 841{
841 KASSERT(m->m_flags & M_PKTHDR); 842 KASSERT(m->m_flags & M_PKTHDR);
842 KASSERT(n->m_flags & M_PKTHDR); 843 KASSERT(n->m_flags & M_PKTHDR);
843 m->m_pkthdr.rcvif_index = n->m_pkthdr.rcvif_index; 844 m->m_pkthdr.rcvif_index = n->m_pkthdr.rcvif_index;
844} 845}
845 846
846#define M_GET_ALIGNED_HDR(m, type, linkhdr) \ 847#define M_GET_ALIGNED_HDR(m, type, linkhdr) \
847 m_get_aligned_hdr((m), __alignof(type) - 1, sizeof(type), (linkhdr)) 848 m_get_aligned_hdr((m), __alignof(type) - 1, sizeof(type), (linkhdr))
848 849
849static __inline int 850static __inline int
850m_get_aligned_hdr(struct mbuf **m, int mask, size_t hlen, bool linkhdr) 851m_get_aligned_hdr(struct mbuf **m, int mask, size_t hlen, bool linkhdr)
851{ 852{
852#ifndef __NO_STRICT_ALIGNMENT 853#ifndef __NO_STRICT_ALIGNMENT
853 if (((uintptr_t)mtod(*m, void *) & mask) != 0) 854 if (((uintptr_t)mtod(*m, void *) & mask) != 0)
854 *m = m_copyup(*m, hlen,  855 *m = m_copyup(*m, hlen,
855 linkhdr ? (max_linkhdr + mask) & ~mask : 0); 856 linkhdr ? (max_linkhdr + mask) & ~mask : 0);
856 else 857 else
857#endif 858#endif
858 if (__predict_false((size_t)(*m)->m_len < hlen)) 859 if (__predict_false((size_t)(*m)->m_len < hlen))
859 *m = m_pullup(*m, hlen); 860 *m = m_pullup(*m, hlen);
860 861
861 return *m == NULL; 862 return *m == NULL;
862} 863}
863 864
864void m_print(const struct mbuf *, const char *, void (*)(const char *, ...) 865void m_print(const struct mbuf *, const char *, void (*)(const char *, ...)
865 __printflike(1, 2)); 866 __printflike(1, 2));
866 867
867/* from uipc_mbufdebug.c */ 868/* from uipc_mbufdebug.c */
868void m_examine(const struct mbuf *, int, const char *, 869void m_examine(const struct mbuf *, int, const char *,
869 void (*)(const char *, ...) __printflike(1, 2)); 870 void (*)(const char *, ...) __printflike(1, 2));
870 871
871/* parsers for m_examine() */ 872/* parsers for m_examine() */
872void m_examine_ether(const struct mbuf *, int, const char *, 873void m_examine_ether(const struct mbuf *, int, const char *,
873 void (*)(const char *, ...) __printflike(1, 2)); 874 void (*)(const char *, ...) __printflike(1, 2));
874void m_examine_pppoe(const struct mbuf *, int, const char *, 875void m_examine_pppoe(const struct mbuf *, int, const char *,
875 void (*)(const char *, ...) __printflike(1, 2)); 876 void (*)(const char *, ...) __printflike(1, 2));
876void m_examine_ppp(const struct mbuf *, int, const char *, 877void m_examine_ppp(const struct mbuf *, int, const char *,
877 void (*)(const char *, ...) __printflike(1, 2)); 878 void (*)(const char *, ...) __printflike(1, 2));
878void m_examine_arp(const struct mbuf *, int, const char *, 879void m_examine_arp(const struct mbuf *, int, const char *,
879 void (*)(const char *, ...) __printflike(1, 2)); 880 void (*)(const char *, ...) __printflike(1, 2));
880void m_examine_ip(const struct mbuf *, int, const char *, 881void m_examine_ip(const struct mbuf *, int, const char *,
881 void (*)(const char *, ...) __printflike(1, 2)); 882 void (*)(const char *, ...) __printflike(1, 2));
882void m_examine_icmp(const struct mbuf *, int, const char *, 883void m_examine_icmp(const struct mbuf *, int, const char *,
883 void (*)(const char *, ...) __printflike(1, 2)); 884 void (*)(const char *, ...) __printflike(1, 2));
884void m_examine_ip6(const struct mbuf *, int, const char *, 885void m_examine_ip6(const struct mbuf *, int, const char *,
885 void (*)(const char *, ...) __printflike(1, 2)); 886 void (*)(const char *, ...) __printflike(1, 2));
886void m_examine_icmp6(const struct mbuf *, int, const char *, 887void m_examine_icmp6(const struct mbuf *, int, const char *,
887 void (*)(const char *, ...) __printflike(1, 2)); 888 void (*)(const char *, ...) __printflike(1, 2));
888void m_examine_tcp(const struct mbuf *, int, const char *, 889void m_examine_tcp(const struct mbuf *, int, const char *,
889 void (*)(const char *, ...) __printflike(1, 2)); 890 void (*)(const char *, ...) __printflike(1, 2));
890void m_examine_udp(const struct mbuf *, int, const char *, 891void m_examine_udp(const struct mbuf *, int, const char *,
891 void (*)(const char *, ...) __printflike(1, 2)); 892 void (*)(const char *, ...) __printflike(1, 2));
892void m_examine_hex(const struct mbuf *, int, const char *, 893void m_examine_hex(const struct mbuf *, int, const char *,
893 void (*)(const char *, ...) __printflike(1, 2)); 894 void (*)(const char *, ...) __printflike(1, 2));
894 895
895/* 896/*
896 * Get rcvif of a mbuf. 897 * Get rcvif of a mbuf.
897 * 898 *
898 * The caller must call m_put_rcvif after using rcvif if the returned rcvif 899 * The caller must call m_put_rcvif after using rcvif if the returned rcvif
899 * isn't NULL. If the returned rcvif is NULL, the caller doesn't need to call 900 * isn't NULL. If the returned rcvif is NULL, the caller doesn't need to call
900 * m_put_rcvif (although calling it is safe). 901 * m_put_rcvif (although calling it is safe).
901 * 902 *
902 * The caller must not block or sleep while using rcvif. The API ensures a 903 * The caller must not block or sleep while using rcvif. The API ensures a
903 * returned rcvif isn't freed until m_put_rcvif is called. 904 * returned rcvif isn't freed until m_put_rcvif is called.
904 */ 905 */
905static __inline struct ifnet * 906static __inline struct ifnet *
906m_get_rcvif(const struct mbuf *m, int *s) 907m_get_rcvif(const struct mbuf *m, int *s)
907{ 908{
908 struct ifnet *ifp; 909 struct ifnet *ifp;
909 910
910 KASSERT(m->m_flags & M_PKTHDR); 911 KASSERT(m->m_flags & M_PKTHDR);
911 *s = pserialize_read_enter(); 912 *s = pserialize_read_enter();
912 ifp = if_byindex(m->m_pkthdr.rcvif_index); 913 ifp = if_byindex(m->m_pkthdr.rcvif_index);
913 if (__predict_false(ifp == NULL)) 914 if (__predict_false(ifp == NULL))
914 pserialize_read_exit(*s); 915 pserialize_read_exit(*s);
915 916
916 return ifp; 917 return ifp;
917} 918}
918 919
919static __inline void 920static __inline void
920m_put_rcvif(struct ifnet *ifp, int *s) 921m_put_rcvif(struct ifnet *ifp, int *s)
921{ 922{
922 923
923 if (ifp == NULL) 924 if (ifp == NULL)
924 return; 925 return;
925 pserialize_read_exit(*s); 926 pserialize_read_exit(*s);
926} 927}
927 928
928/* 929/*
929 * Get rcvif of a mbuf. 930 * Get rcvif of a mbuf.
930 * 931 *
931 * The caller must call m_put_rcvif_psref after using rcvif. The API ensures 932 * The caller must call m_put_rcvif_psref after using rcvif. The API ensures
932 * a got rcvif isn't be freed until m_put_rcvif_psref is called. 933 * a got rcvif isn't be freed until m_put_rcvif_psref is called.
933 */ 934 */
934static __inline struct ifnet * 935static __inline struct ifnet *
935m_get_rcvif_psref(const struct mbuf *m, struct psref *psref) 936m_get_rcvif_psref(const struct mbuf *m, struct psref *psref)
936{ 937{
937 KASSERT(m->m_flags & M_PKTHDR); 938 KASSERT(m->m_flags & M_PKTHDR);
938 return if_get_byindex(m->m_pkthdr.rcvif_index, psref); 939 return if_get_byindex(m->m_pkthdr.rcvif_index, psref);
939} 940}
940 941
941static __inline void 942static __inline void
942m_put_rcvif_psref(struct ifnet *ifp, struct psref *psref) 943m_put_rcvif_psref(struct ifnet *ifp, struct psref *psref)
943{ 944{
944 945
945 if (ifp == NULL) 946 if (ifp == NULL)
946 return; 947 return;
947 if_put(ifp, psref); 948 if_put(ifp, psref);
948} 949}
949 950
950/* 951/*
951 * Get rcvif of a mbuf. 952 * Get rcvif of a mbuf.
952 * 953 *
953 * This is NOT an MP-safe API and shouldn't be used at where we want MP-safe. 954 * This is NOT an MP-safe API and shouldn't be used at where we want MP-safe.
954 */ 955 */
955static __inline struct ifnet * 956static __inline struct ifnet *
956m_get_rcvif_NOMPSAFE(const struct mbuf *m) 957m_get_rcvif_NOMPSAFE(const struct mbuf *m)
957{ 958{
958 KASSERT(m->m_flags & M_PKTHDR); 959 KASSERT(m->m_flags & M_PKTHDR);
959 return if_byindex(m->m_pkthdr.rcvif_index); 960 return if_byindex(m->m_pkthdr.rcvif_index);
960} 961}
961 962
962#endif /* _KERNEL */ 963#endif /* _KERNEL */
963#endif /* !_SYS_MBUF_H_ */ 964#endif /* !_SYS_MBUF_H_ */