Thu Apr 23 15:06:50 2020 UTC ()
also let backend know that feature-ipv6-csum-offload is supported


(jdolecek)
diff -r1.115 -r1.116 src/sys/arch/xen/xen/if_xennet_xenbus.c

cvs diff -r1.115 -r1.116 src/sys/arch/xen/xen/if_xennet_xenbus.c (switch to unified diff)

--- src/sys/arch/xen/xen/if_xennet_xenbus.c 2020/04/23 14:54:48 1.115
+++ src/sys/arch/xen/xen/if_xennet_xenbus.c 2020/04/23 15:06:49 1.116
@@ -1,1230 +1,1236 @@ @@ -1,1230 +1,1236 @@
1/* $NetBSD: if_xennet_xenbus.c,v 1.115 2020/04/23 14:54:48 jdolecek Exp $ */ 1/* $NetBSD: if_xennet_xenbus.c,v 1.116 2020/04/23 15:06:49 jdolecek Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 Manuel Bouyer. 4 * Copyright (c) 2006 Manuel Bouyer.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */ 25 */
26 26
27/* 27/*
28 * Copyright (c) 2004 Christian Limpach. 28 * Copyright (c) 2004 Christian Limpach.
29 * All rights reserved. 29 * All rights reserved.
30 * 30 *
31 * Redistribution and use in source and binary forms, with or without 31 * Redistribution and use in source and binary forms, with or without
32 * modification, are permitted provided that the following conditions 32 * modification, are permitted provided that the following conditions
33 * are met: 33 * are met:
34 * 1. Redistributions of source code must retain the above copyright 34 * 1. Redistributions of source code must retain the above copyright
35 * notice, this list of conditions and the following disclaimer. 35 * notice, this list of conditions and the following disclaimer.
36 * 2. Redistributions in binary form must reproduce the above copyright 36 * 2. Redistributions in binary form must reproduce the above copyright
37 * notice, this list of conditions and the following disclaimer in the 37 * notice, this list of conditions and the following disclaimer in the
38 * documentation and/or other materials provided with the distribution. 38 * documentation and/or other materials provided with the distribution.
39 * 39 *
40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
41 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 41 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
42 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 42 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
43 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 43 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
44 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 44 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
45 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 45 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
46 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 46 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
47 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 47 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
48 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 48 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
49 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 49 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
50 */ 50 */
51 51
52/* 52/*
53 * This file contains the xennet frontend code required for the network 53 * This file contains the xennet frontend code required for the network
54 * communication between two Xen domains. 54 * communication between two Xen domains.
55 * It ressembles xbd, but is a little more complex as it must deal with two 55 * It ressembles xbd, but is a little more complex as it must deal with two
56 * rings: 56 * rings:
57 * - the TX ring, to transmit packets to backend (inside => outside) 57 * - the TX ring, to transmit packets to backend (inside => outside)
58 * - the RX ring, to receive packets from backend (outside => inside) 58 * - the RX ring, to receive packets from backend (outside => inside)
59 * 59 *
60 * Principles are following. 60 * Principles are following.
61 * 61 *
62 * For TX: 62 * For TX:
63 * Purpose is to transmit packets to the outside. The start of day is in 63 * Purpose is to transmit packets to the outside. The start of day is in
64 * xennet_start() (output routine of xennet) scheduled via a softint. 64 * xennet_start() (output routine of xennet) scheduled via a softint.
65 * xennet_start() generates the requests associated 65 * xennet_start() generates the requests associated
66 * to the TX mbufs queued (see altq(9)). 66 * to the TX mbufs queued (see altq(9)).
67 * The backend's responses are processed by xennet_tx_complete(), called 67 * The backend's responses are processed by xennet_tx_complete(), called
68 * from xennet_start() 68 * from xennet_start()
69 * 69 *
70 * for RX: 70 * for RX:
71 * Purpose is to process the packets received from the outside. RX buffers 71 * Purpose is to process the packets received from the outside. RX buffers
72 * are pre-allocated through xennet_alloc_rx_buffer(), during xennet autoconf 72 * are pre-allocated through xennet_alloc_rx_buffer(), during xennet autoconf
73 * attach. During pre-allocation, frontend pushes requests in the I/O ring, in 73 * attach. During pre-allocation, frontend pushes requests in the I/O ring, in
74 * preparation for incoming packets from backend. 74 * preparation for incoming packets from backend.
75 * When RX packets need to be processed, backend takes the requests previously 75 * When RX packets need to be processed, backend takes the requests previously
76 * offered by frontend and pushes the associated responses inside the I/O ring. 76 * offered by frontend and pushes the associated responses inside the I/O ring.
77 * When done, it notifies frontend through an event notification, which will 77 * When done, it notifies frontend through an event notification, which will
78 * asynchronously call xennet_handler() in frontend. 78 * asynchronously call xennet_handler() in frontend.
79 * xennet_handler() processes the responses, generates the associated mbuf, and 79 * xennet_handler() processes the responses, generates the associated mbuf, and
80 * passes it to the MI layer for further processing. 80 * passes it to the MI layer for further processing.
81 */ 81 */
82 82
83#include <sys/cdefs.h> 83#include <sys/cdefs.h>
84__KERNEL_RCSID(0, "$NetBSD: if_xennet_xenbus.c,v 1.115 2020/04/23 14:54:48 jdolecek Exp $"); 84__KERNEL_RCSID(0, "$NetBSD: if_xennet_xenbus.c,v 1.116 2020/04/23 15:06:49 jdolecek Exp $");
85 85
86#include "opt_xen.h" 86#include "opt_xen.h"
87#include "opt_nfs_boot.h" 87#include "opt_nfs_boot.h"
88 88
89#include <sys/param.h> 89#include <sys/param.h>
90#include <sys/device.h> 90#include <sys/device.h>
91#include <sys/conf.h> 91#include <sys/conf.h>
92#include <sys/kernel.h> 92#include <sys/kernel.h>
93#include <sys/proc.h> 93#include <sys/proc.h>
94#include <sys/systm.h> 94#include <sys/systm.h>
95#include <sys/intr.h> 95#include <sys/intr.h>
96#include <sys/rndsource.h> 96#include <sys/rndsource.h>
97 97
98#include <net/if.h> 98#include <net/if.h>
99#include <net/if_dl.h> 99#include <net/if_dl.h>
100#include <net/if_ether.h> 100#include <net/if_ether.h>
101#include <net/bpf.h> 101#include <net/bpf.h>
102 102
103#if defined(NFS_BOOT_BOOTSTATIC) 103#if defined(NFS_BOOT_BOOTSTATIC)
104#include <sys/fstypes.h> 104#include <sys/fstypes.h>
105#include <sys/mount.h> 105#include <sys/mount.h>
106#include <sys/statvfs.h> 106#include <sys/statvfs.h>
107#include <netinet/in.h> 107#include <netinet/in.h>
108#include <nfs/rpcv2.h> 108#include <nfs/rpcv2.h>
109#include <nfs/nfsproto.h> 109#include <nfs/nfsproto.h>
110#include <nfs/nfs.h> 110#include <nfs/nfs.h>
111#include <nfs/nfsmount.h> 111#include <nfs/nfsmount.h>
112#include <nfs/nfsdiskless.h> 112#include <nfs/nfsdiskless.h>
113#include <xen/if_xennetvar.h> 113#include <xen/if_xennetvar.h>
114#endif /* defined(NFS_BOOT_BOOTSTATIC) */ 114#endif /* defined(NFS_BOOT_BOOTSTATIC) */
115 115
116#include <xen/xennet_checksum.h> 116#include <xen/xennet_checksum.h>
117 117
118#include <uvm/uvm.h> 118#include <uvm/uvm.h>
119 119
120#include <xen/hypervisor.h> 120#include <xen/hypervisor.h>
121#include <xen/evtchn.h> 121#include <xen/evtchn.h>
122#include <xen/granttables.h> 122#include <xen/granttables.h>
123#include <xen/include/public/io/netif.h> 123#include <xen/include/public/io/netif.h>
124#include <xen/xenpmap.h> 124#include <xen/xenpmap.h>
125 125
126#include <xen/xenbus.h> 126#include <xen/xenbus.h>
127#include "locators.h" 127#include "locators.h"
128 128
129#undef XENNET_DEBUG_DUMP 129#undef XENNET_DEBUG_DUMP
130#undef XENNET_DEBUG 130#undef XENNET_DEBUG
131 131
132#ifdef XENNET_DEBUG 132#ifdef XENNET_DEBUG
133#define XEDB_FOLLOW 0x01 133#define XEDB_FOLLOW 0x01
134#define XEDB_INIT 0x02 134#define XEDB_INIT 0x02
135#define XEDB_EVENT 0x04 135#define XEDB_EVENT 0x04
136#define XEDB_MBUF 0x08 136#define XEDB_MBUF 0x08
137#define XEDB_MEM 0x10 137#define XEDB_MEM 0x10
138int xennet_debug = 0xff; 138int xennet_debug = 0xff;
139#define DPRINTF(x) if (xennet_debug) printf x; 139#define DPRINTF(x) if (xennet_debug) printf x;
140#define DPRINTFN(n,x) if (xennet_debug & (n)) printf x; 140#define DPRINTFN(n,x) if (xennet_debug & (n)) printf x;
141#else 141#else
142#define DPRINTF(x) 142#define DPRINTF(x)
143#define DPRINTFN(n,x) 143#define DPRINTFN(n,x)
144#endif 144#endif
145 145
146#define GRANT_INVALID_REF -1 /* entry is free */ 146#define GRANT_INVALID_REF -1 /* entry is free */
147 147
148#define NET_TX_RING_SIZE __CONST_RING_SIZE(netif_tx, PAGE_SIZE) 148#define NET_TX_RING_SIZE __CONST_RING_SIZE(netif_tx, PAGE_SIZE)
149#define NET_RX_RING_SIZE __CONST_RING_SIZE(netif_rx, PAGE_SIZE) 149#define NET_RX_RING_SIZE __CONST_RING_SIZE(netif_rx, PAGE_SIZE)
150 150
151struct xennet_txreq { 151struct xennet_txreq {
152 SLIST_ENTRY(xennet_txreq) txreq_next; 152 SLIST_ENTRY(xennet_txreq) txreq_next;
153 uint16_t txreq_id; /* ID passed to backend */ 153 uint16_t txreq_id; /* ID passed to backend */
154 grant_ref_t txreq_gntref; /* grant ref of this request */ 154 grant_ref_t txreq_gntref; /* grant ref of this request */
155 struct mbuf *txreq_m; /* mbuf being transmitted */ 155 struct mbuf *txreq_m; /* mbuf being transmitted */
156 bus_dmamap_t txreq_dmamap; 156 bus_dmamap_t txreq_dmamap;
157}; 157};
158 158
159struct xennet_rxreq { 159struct xennet_rxreq {
160 SLIST_ENTRY(xennet_rxreq) rxreq_next; 160 SLIST_ENTRY(xennet_rxreq) rxreq_next;
161 uint16_t rxreq_id; /* ID passed to backend */ 161 uint16_t rxreq_id; /* ID passed to backend */
162 grant_ref_t rxreq_gntref; /* grant ref of this request */ 162 grant_ref_t rxreq_gntref; /* grant ref of this request */
163 struct mbuf *rxreq_m; 163 struct mbuf *rxreq_m;
164 bus_dmamap_t rxreq_dmamap; 164 bus_dmamap_t rxreq_dmamap;
165}; 165};
166 166
167struct xennet_xenbus_softc { 167struct xennet_xenbus_softc {
168 device_t sc_dev; 168 device_t sc_dev;
169 struct ethercom sc_ethercom; 169 struct ethercom sc_ethercom;
170 uint8_t sc_enaddr[ETHER_ADDR_LEN]; 170 uint8_t sc_enaddr[ETHER_ADDR_LEN];
171 struct xenbus_device *sc_xbusd; 171 struct xenbus_device *sc_xbusd;
172 172
173 netif_tx_front_ring_t sc_tx_ring; 173 netif_tx_front_ring_t sc_tx_ring;
174 netif_rx_front_ring_t sc_rx_ring; 174 netif_rx_front_ring_t sc_rx_ring;
175 175
176 unsigned int sc_evtchn; 176 unsigned int sc_evtchn;
177 struct intrhand *sc_ih; 177 struct intrhand *sc_ih;
178  178
179 grant_ref_t sc_tx_ring_gntref; 179 grant_ref_t sc_tx_ring_gntref;
180 grant_ref_t sc_rx_ring_gntref; 180 grant_ref_t sc_rx_ring_gntref;
181 181
182 kmutex_t sc_tx_lock; /* protects free TX list, TX ring */ 182 kmutex_t sc_tx_lock; /* protects free TX list, TX ring */
183 kmutex_t sc_rx_lock; /* protects free RX list, RX ring, rxreql */ 183 kmutex_t sc_rx_lock; /* protects free RX list, RX ring, rxreql */
184 struct xennet_txreq sc_txreqs[NET_TX_RING_SIZE]; 184 struct xennet_txreq sc_txreqs[NET_TX_RING_SIZE];
185 struct xennet_rxreq sc_rxreqs[NET_RX_RING_SIZE]; 185 struct xennet_rxreq sc_rxreqs[NET_RX_RING_SIZE];
186 SLIST_HEAD(,xennet_txreq) sc_txreq_head; /* list of free TX requests */ 186 SLIST_HEAD(,xennet_txreq) sc_txreq_head; /* list of free TX requests */
187 SLIST_HEAD(,xennet_rxreq) sc_rxreq_head; /* list of free RX requests */ 187 SLIST_HEAD(,xennet_rxreq) sc_rxreq_head; /* list of free RX requests */
188 int sc_free_rxreql; /* number of free receive request struct */ 188 int sc_free_rxreql; /* number of free receive request struct */
189 189
190 int sc_backend_status; /* our status with backend */ 190 int sc_backend_status; /* our status with backend */
191#define BEST_CLOSED 0 191#define BEST_CLOSED 0
192#define BEST_DISCONNECTED 1 192#define BEST_DISCONNECTED 1
193#define BEST_CONNECTED 2 193#define BEST_CONNECTED 2
194#define BEST_SUSPENDED 3 194#define BEST_SUSPENDED 3
195 bool sc_ipv6_csum; /* whether backend support IPv6 csum offload */ 195 bool sc_ipv6_csum; /* whether backend support IPv6 csum offload */
196 krndsource_t sc_rnd_source; 196 krndsource_t sc_rnd_source;
197}; 197};
198 198
199static pool_cache_t if_xennetrxbuf_cache; 199static pool_cache_t if_xennetrxbuf_cache;
200static int if_xennetrxbuf_cache_inited=0; 200static int if_xennetrxbuf_cache_inited=0;
201 201
202static int xennet_xenbus_match(device_t, cfdata_t, void *); 202static int xennet_xenbus_match(device_t, cfdata_t, void *);
203static void xennet_xenbus_attach(device_t, device_t, void *); 203static void xennet_xenbus_attach(device_t, device_t, void *);
204static int xennet_xenbus_detach(device_t, int); 204static int xennet_xenbus_detach(device_t, int);
205static void xennet_backend_changed(void *, XenbusState); 205static void xennet_backend_changed(void *, XenbusState);
206 206
207static void xennet_alloc_rx_buffer(struct xennet_xenbus_softc *); 207static void xennet_alloc_rx_buffer(struct xennet_xenbus_softc *);
208static void xennet_free_rx_buffer(struct xennet_xenbus_softc *); 208static void xennet_free_rx_buffer(struct xennet_xenbus_softc *);
209static void xennet_tx_complete(struct xennet_xenbus_softc *); 209static void xennet_tx_complete(struct xennet_xenbus_softc *);
210static void xennet_rx_mbuf_free(struct mbuf *, void *, size_t, void *); 210static void xennet_rx_mbuf_free(struct mbuf *, void *, size_t, void *);
211static int xennet_handler(void *); 211static int xennet_handler(void *);
212static bool xennet_talk_to_backend(struct xennet_xenbus_softc *); 212static bool xennet_talk_to_backend(struct xennet_xenbus_softc *);
213#ifdef XENNET_DEBUG_DUMP 213#ifdef XENNET_DEBUG_DUMP
214static void xennet_hex_dump(const unsigned char *, size_t, const char *, int); 214static void xennet_hex_dump(const unsigned char *, size_t, const char *, int);
215#endif 215#endif
216 216
217static int xennet_init(struct ifnet *); 217static int xennet_init(struct ifnet *);
218static void xennet_stop(struct ifnet *, int); 218static void xennet_stop(struct ifnet *, int);
219static void xennet_start(struct ifnet *); 219static void xennet_start(struct ifnet *);
220static int xennet_ioctl(struct ifnet *, u_long, void *); 220static int xennet_ioctl(struct ifnet *, u_long, void *);
221 221
222static bool xennet_xenbus_suspend(device_t dev, const pmf_qual_t *); 222static bool xennet_xenbus_suspend(device_t dev, const pmf_qual_t *);
223static bool xennet_xenbus_resume (device_t dev, const pmf_qual_t *); 223static bool xennet_xenbus_resume (device_t dev, const pmf_qual_t *);
224 224
225CFATTACH_DECL3_NEW(xennet, sizeof(struct xennet_xenbus_softc), 225CFATTACH_DECL3_NEW(xennet, sizeof(struct xennet_xenbus_softc),
226 xennet_xenbus_match, xennet_xenbus_attach, xennet_xenbus_detach, NULL, 226 xennet_xenbus_match, xennet_xenbus_attach, xennet_xenbus_detach, NULL,
227 NULL, NULL, DVF_DETACH_SHUTDOWN); 227 NULL, NULL, DVF_DETACH_SHUTDOWN);
228 228
229static int 229static int
230xennet_xenbus_match(device_t parent, cfdata_t match, void *aux) 230xennet_xenbus_match(device_t parent, cfdata_t match, void *aux)
231{ 231{
232 struct xenbusdev_attach_args *xa = aux; 232 struct xenbusdev_attach_args *xa = aux;
233 233
234 if (strcmp(xa->xa_type, "vif") != 0) 234 if (strcmp(xa->xa_type, "vif") != 0)
235 return 0; 235 return 0;
236 236
237 if (match->cf_loc[XENBUSCF_ID] != XENBUSCF_ID_DEFAULT && 237 if (match->cf_loc[XENBUSCF_ID] != XENBUSCF_ID_DEFAULT &&
238 match->cf_loc[XENBUSCF_ID] != xa->xa_id) 238 match->cf_loc[XENBUSCF_ID] != xa->xa_id)
239 return 0; 239 return 0;
240 240
241 return 1; 241 return 1;
242} 242}
243 243
244static void 244static void
245xennet_xenbus_attach(device_t parent, device_t self, void *aux) 245xennet_xenbus_attach(device_t parent, device_t self, void *aux)
246{ 246{
247 struct xennet_xenbus_softc *sc = device_private(self); 247 struct xennet_xenbus_softc *sc = device_private(self);
248 struct xenbusdev_attach_args *xa = aux; 248 struct xenbusdev_attach_args *xa = aux;
249 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 249 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
250 int err; 250 int err;
251 netif_tx_sring_t *tx_ring; 251 netif_tx_sring_t *tx_ring;
252 netif_rx_sring_t *rx_ring; 252 netif_rx_sring_t *rx_ring;
253 RING_IDX i; 253 RING_IDX i;
254 char *e, *p; 254 char *e, *p;
255 unsigned long uval; 255 unsigned long uval;
256 extern int ifqmaxlen; /* XXX */ 256 extern int ifqmaxlen; /* XXX */
257 char mac[32]; 257 char mac[32];
258 258
259 aprint_normal(": Xen Virtual Network Interface\n"); 259 aprint_normal(": Xen Virtual Network Interface\n");
260 sc->sc_dev = self; 260 sc->sc_dev = self;
261 261
262 sc->sc_xbusd = xa->xa_xbusd; 262 sc->sc_xbusd = xa->xa_xbusd;
263 sc->sc_xbusd->xbusd_otherend_changed = xennet_backend_changed; 263 sc->sc_xbusd->xbusd_otherend_changed = xennet_backend_changed;
264 264
265 /* xenbus ensure 2 devices can't be probed at the same time */ 265 /* xenbus ensure 2 devices can't be probed at the same time */
266 if (if_xennetrxbuf_cache_inited == 0) { 266 if (if_xennetrxbuf_cache_inited == 0) {
267 if_xennetrxbuf_cache = pool_cache_init(PAGE_SIZE, 0, 0, 0, 267 if_xennetrxbuf_cache = pool_cache_init(PAGE_SIZE, 0, 0, 0,
268 "xnfrx", NULL, IPL_NET, NULL, NULL, NULL); 268 "xnfrx", NULL, IPL_NET, NULL, NULL, NULL);
269 if_xennetrxbuf_cache_inited = 1; 269 if_xennetrxbuf_cache_inited = 1;
270 } 270 }
271 271
272 /* initialize free RX and RX request lists */ 272 /* initialize free RX and RX request lists */
273 mutex_init(&sc->sc_tx_lock, MUTEX_DEFAULT, IPL_NET); 273 mutex_init(&sc->sc_tx_lock, MUTEX_DEFAULT, IPL_NET);
274 SLIST_INIT(&sc->sc_txreq_head); 274 SLIST_INIT(&sc->sc_txreq_head);
275 for (i = 0; i < NET_TX_RING_SIZE; i++) { 275 for (i = 0; i < NET_TX_RING_SIZE; i++) {
276 struct xennet_txreq *txreq = &sc->sc_txreqs[i]; 276 struct xennet_txreq *txreq = &sc->sc_txreqs[i];
277  277
278 txreq->txreq_id = i; 278 txreq->txreq_id = i;
279 if (bus_dmamap_create(sc->sc_xbusd->xbusd_dmat, PAGE_SIZE, 1, 279 if (bus_dmamap_create(sc->sc_xbusd->xbusd_dmat, PAGE_SIZE, 1,
280 PAGE_SIZE, PAGE_SIZE, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 280 PAGE_SIZE, PAGE_SIZE, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
281 &txreq->txreq_dmamap) != 0) 281 &txreq->txreq_dmamap) != 0)
282 break; 282 break;
283 283
284 SLIST_INSERT_HEAD(&sc->sc_txreq_head, &sc->sc_txreqs[i], 284 SLIST_INSERT_HEAD(&sc->sc_txreq_head, &sc->sc_txreqs[i],
285 txreq_next); 285 txreq_next);
286 } 286 }
287 287
288 mutex_init(&sc->sc_rx_lock, MUTEX_DEFAULT, IPL_NET); 288 mutex_init(&sc->sc_rx_lock, MUTEX_DEFAULT, IPL_NET);
289 SLIST_INIT(&sc->sc_rxreq_head); 289 SLIST_INIT(&sc->sc_rxreq_head);
290 for (i = 0; i < NET_RX_RING_SIZE; i++) { 290 for (i = 0; i < NET_RX_RING_SIZE; i++) {
291 struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i]; 291 struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i];
292 rxreq->rxreq_id = i; 292 rxreq->rxreq_id = i;
293 if (bus_dmamap_create(sc->sc_xbusd->xbusd_dmat, PAGE_SIZE, 1, 293 if (bus_dmamap_create(sc->sc_xbusd->xbusd_dmat, PAGE_SIZE, 1,
294 PAGE_SIZE, PAGE_SIZE, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 294 PAGE_SIZE, PAGE_SIZE, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
295 &rxreq->rxreq_dmamap) != 0) 295 &rxreq->rxreq_dmamap) != 0)
296 break; 296 break;
297 rxreq->rxreq_gntref = GRANT_INVALID_REF; 297 rxreq->rxreq_gntref = GRANT_INVALID_REF;
298 SLIST_INSERT_HEAD(&sc->sc_rxreq_head, rxreq, rxreq_next); 298 SLIST_INSERT_HEAD(&sc->sc_rxreq_head, rxreq, rxreq_next);
299 } 299 }
300 sc->sc_free_rxreql = i; 300 sc->sc_free_rxreql = i;
301 if (sc->sc_free_rxreql == 0) { 301 if (sc->sc_free_rxreql == 0) {
302 aprint_error_dev(self, "failed to allocate rx memory\n"); 302 aprint_error_dev(self, "failed to allocate rx memory\n");
303 return; 303 return;
304 } 304 }
305 305
306 /* read mac address */ 306 /* read mac address */
307 err = xenbus_read(NULL, sc->sc_xbusd->xbusd_path, "mac", 307 err = xenbus_read(NULL, sc->sc_xbusd->xbusd_path, "mac",
308 mac, sizeof(mac)); 308 mac, sizeof(mac));
309 if (err) { 309 if (err) {
310 aprint_error_dev(self, "can't read mac address, err %d\n", err); 310 aprint_error_dev(self, "can't read mac address, err %d\n", err);
311 return; 311 return;
312 } 312 }
313 for (i = 0, p = mac; i < ETHER_ADDR_LEN; i++) { 313 for (i = 0, p = mac; i < ETHER_ADDR_LEN; i++) {
314 sc->sc_enaddr[i] = strtoul(p, &e, 16); 314 sc->sc_enaddr[i] = strtoul(p, &e, 16);
315 if ((e[0] == '\0' && i != 5) && e[0] != ':') { 315 if ((e[0] == '\0' && i != 5) && e[0] != ':') {
316 aprint_error_dev(self, 316 aprint_error_dev(self,
317 "%s is not a valid mac address\n", mac); 317 "%s is not a valid mac address\n", mac);
318 return; 318 return;
319 } 319 }
320 p = &e[1]; 320 p = &e[1];
321 } 321 }
322 aprint_normal_dev(self, "MAC address %s\n", 322 aprint_normal_dev(self, "MAC address %s\n",
323 ether_sprintf(sc->sc_enaddr)); 323 ether_sprintf(sc->sc_enaddr));
324 324
325 /* read ipv6 csum support flag */ 325 /* read ipv6 csum support flag */
326 err = xenbus_read_ul(NULL, sc->sc_xbusd->xbusd_otherend, 326 err = xenbus_read_ul(NULL, sc->sc_xbusd->xbusd_otherend,
327 "feature-ipv6-csum-offload", &uval, 10); 327 "feature-ipv6-csum-offload", &uval, 10);
328 sc->sc_ipv6_csum = (!err && uval == 1); 328 sc->sc_ipv6_csum = (!err && uval == 1);
329 329
330 /* Initialize ifnet structure and attach interface */ 330 /* Initialize ifnet structure and attach interface */
331 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 331 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
332 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 332 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
333 ifp->if_softc = sc; 333 ifp->if_softc = sc;
334 ifp->if_start = xennet_start; 334 ifp->if_start = xennet_start;
335 ifp->if_ioctl = xennet_ioctl; 335 ifp->if_ioctl = xennet_ioctl;
336 ifp->if_init = xennet_init; 336 ifp->if_init = xennet_init;
337 ifp->if_stop = xennet_stop; 337 ifp->if_stop = xennet_stop;
338 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 338 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
339 ifp->if_extflags = IFEF_MPSAFE; 339 ifp->if_extflags = IFEF_MPSAFE;
340 ifp->if_snd.ifq_maxlen = uimax(ifqmaxlen, NET_TX_RING_SIZE * 2); 340 ifp->if_snd.ifq_maxlen = uimax(ifqmaxlen, NET_TX_RING_SIZE * 2);
341 ifp->if_capabilities = 341 ifp->if_capabilities =
342 IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_IPv4_Tx 342 IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_IPv4_Tx
343 | IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx 343 | IFCAP_CSUM_UDPv4_Rx | IFCAP_CSUM_UDPv4_Tx
344 | IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx 344 | IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_TCPv4_Tx
345 | IFCAP_CSUM_UDPv6_Rx 345 | IFCAP_CSUM_UDPv6_Rx
346 | IFCAP_CSUM_TCPv6_Rx; 346 | IFCAP_CSUM_TCPv6_Rx;
347#define XN_M_CSUM_SUPPORTED ( \ 347#define XN_M_CSUM_SUPPORTED ( \
348 M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_IPv4 \ 348 M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_IPv4 \
349 | M_CSUM_TCPv6 | M_CSUM_UDPv6 \ 349 | M_CSUM_TCPv6 | M_CSUM_UDPv6 \
350 ) 350 )
351 if (sc->sc_ipv6_csum) { 351 if (sc->sc_ipv6_csum) {
352 /* 352 /*
353 * If backend supports IPv6 csum offloading, we can skip 353 * If backend supports IPv6 csum offloading, we can skip
354 * IPv6 csum for Tx packets. Rx packet validation can 354 * IPv6 csum for Tx packets. Rx packet validation can
355 * be skipped regardless. 355 * be skipped regardless.
356 */ 356 */
357 ifp->if_capabilities |= 357 ifp->if_capabilities |=
358 IFCAP_CSUM_UDPv6_Tx | IFCAP_CSUM_TCPv6_Tx; 358 IFCAP_CSUM_UDPv6_Tx | IFCAP_CSUM_TCPv6_Tx;
359 } 359 }
360 360
361 IFQ_SET_READY(&ifp->if_snd); 361 IFQ_SET_READY(&ifp->if_snd);
362 if_attach(ifp); 362 if_attach(ifp);
363 if_deferred_start_init(ifp, NULL); 363 if_deferred_start_init(ifp, NULL);
364 ether_ifattach(ifp, sc->sc_enaddr); 364 ether_ifattach(ifp, sc->sc_enaddr);
365 365
366 /* alloc shared rings */ 366 /* alloc shared rings */
367 tx_ring = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0, 367 tx_ring = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
368 UVM_KMF_WIRED); 368 UVM_KMF_WIRED);
369 rx_ring = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0, 369 rx_ring = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
370 UVM_KMF_WIRED); 370 UVM_KMF_WIRED);
371 if (tx_ring == NULL || rx_ring == NULL) 371 if (tx_ring == NULL || rx_ring == NULL)
372 panic("%s: can't alloc rings", device_xname(self)); 372 panic("%s: can't alloc rings", device_xname(self));
373 373
374 sc->sc_tx_ring.sring = tx_ring; 374 sc->sc_tx_ring.sring = tx_ring;
375 sc->sc_rx_ring.sring = rx_ring; 375 sc->sc_rx_ring.sring = rx_ring;
376 376
377 /* resume shared structures and tell backend that we are ready */ 377 /* resume shared structures and tell backend that we are ready */
378 if (xennet_xenbus_resume(self, PMF_Q_NONE) == false) { 378 if (xennet_xenbus_resume(self, PMF_Q_NONE) == false) {
379 uvm_km_free(kernel_map, (vaddr_t)tx_ring, PAGE_SIZE, 379 uvm_km_free(kernel_map, (vaddr_t)tx_ring, PAGE_SIZE,
380 UVM_KMF_WIRED); 380 UVM_KMF_WIRED);
381 uvm_km_free(kernel_map, (vaddr_t)rx_ring, PAGE_SIZE, 381 uvm_km_free(kernel_map, (vaddr_t)rx_ring, PAGE_SIZE,
382 UVM_KMF_WIRED); 382 UVM_KMF_WIRED);
383 return; 383 return;
384 } 384 }
385 385
386 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), 386 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev),
387 RND_TYPE_NET, RND_FLAG_DEFAULT); 387 RND_TYPE_NET, RND_FLAG_DEFAULT);
388 388
389 if (!pmf_device_register(self, xennet_xenbus_suspend, 389 if (!pmf_device_register(self, xennet_xenbus_suspend,
390 xennet_xenbus_resume)) 390 xennet_xenbus_resume))
391 aprint_error_dev(self, "couldn't establish power handler\n"); 391 aprint_error_dev(self, "couldn't establish power handler\n");
392 else 392 else
393 pmf_class_network_register(self, ifp); 393 pmf_class_network_register(self, ifp);
394} 394}
395 395
396static int 396static int
397xennet_xenbus_detach(device_t self, int flags) 397xennet_xenbus_detach(device_t self, int flags)
398{ 398{
399 struct xennet_xenbus_softc *sc = device_private(self); 399 struct xennet_xenbus_softc *sc = device_private(self);
400 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 400 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
401 RING_IDX i; 401 RING_IDX i;
402 402
403 if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) == DETACH_SHUTDOWN) { 403 if ((flags & (DETACH_SHUTDOWN | DETACH_FORCE)) == DETACH_SHUTDOWN) {
404 /* Trigger state transition with backend */ 404 /* Trigger state transition with backend */
405 xenbus_switch_state(sc->sc_xbusd, NULL, XenbusStateClosing); 405 xenbus_switch_state(sc->sc_xbusd, NULL, XenbusStateClosing);
406 return EBUSY; 406 return EBUSY;
407 } 407 }
408 408
409 DPRINTF(("%s: xennet_xenbus_detach\n", device_xname(self))); 409 DPRINTF(("%s: xennet_xenbus_detach\n", device_xname(self)));
410 410
411 /* stop interface */ 411 /* stop interface */
412 IFNET_LOCK(ifp); 412 IFNET_LOCK(ifp);
413 xennet_stop(ifp, 1); 413 xennet_stop(ifp, 1);
414 IFNET_UNLOCK(ifp); 414 IFNET_UNLOCK(ifp);
415 if (sc->sc_ih != NULL) { 415 if (sc->sc_ih != NULL) {
416 xen_intr_disestablish(sc->sc_ih); 416 xen_intr_disestablish(sc->sc_ih);
417 sc->sc_ih = NULL; 417 sc->sc_ih = NULL;
418 } 418 }
419 419
420 /* collect any outstanding TX responses */ 420 /* collect any outstanding TX responses */
421 mutex_enter(&sc->sc_tx_lock); 421 mutex_enter(&sc->sc_tx_lock);
422 xennet_tx_complete(sc); 422 xennet_tx_complete(sc);
423 while (sc->sc_tx_ring.sring->rsp_prod != sc->sc_tx_ring.rsp_cons) { 423 while (sc->sc_tx_ring.sring->rsp_prod != sc->sc_tx_ring.rsp_cons) {
424 kpause("xndetach", true, hz/2, &sc->sc_tx_lock); 424 kpause("xndetach", true, hz/2, &sc->sc_tx_lock);
425 xennet_tx_complete(sc); 425 xennet_tx_complete(sc);
426 } 426 }
427 mutex_exit(&sc->sc_tx_lock); 427 mutex_exit(&sc->sc_tx_lock);
428 428
429 mutex_enter(&sc->sc_rx_lock); 429 mutex_enter(&sc->sc_rx_lock);
430 xennet_free_rx_buffer(sc); 430 xennet_free_rx_buffer(sc);
431 for (i = 0; i < NET_RX_RING_SIZE; i++) { 431 for (i = 0; i < NET_RX_RING_SIZE; i++) {
432 struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i]; 432 struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i];
433 if (rxreq->rxreq_m != NULL) { 433 if (rxreq->rxreq_m != NULL) {
434 m_freem(rxreq->rxreq_m); 434 m_freem(rxreq->rxreq_m);
435 rxreq->rxreq_m = NULL; 435 rxreq->rxreq_m = NULL;
436 } 436 }
437 } 437 }
438 mutex_exit(&sc->sc_rx_lock); 438 mutex_exit(&sc->sc_rx_lock);
439 439
440 ether_ifdetach(ifp); 440 ether_ifdetach(ifp);
441 if_detach(ifp); 441 if_detach(ifp);
442 442
443 /* Unhook the entropy source. */ 443 /* Unhook the entropy source. */
444 rnd_detach_source(&sc->sc_rnd_source); 444 rnd_detach_source(&sc->sc_rnd_source);
445 445
446 /* Wait until the tx/rx rings stop being used by backend */ 446 /* Wait until the tx/rx rings stop being used by backend */
447 mutex_enter(&sc->sc_tx_lock); 447 mutex_enter(&sc->sc_tx_lock);
448 while (xengnt_status(sc->sc_tx_ring_gntref)) 448 while (xengnt_status(sc->sc_tx_ring_gntref))
449 kpause("xntxref", true, hz/2, &sc->sc_tx_lock); 449 kpause("xntxref", true, hz/2, &sc->sc_tx_lock);
450 xengnt_revoke_access(sc->sc_tx_ring_gntref); 450 xengnt_revoke_access(sc->sc_tx_ring_gntref);
451 mutex_exit(&sc->sc_tx_lock); 451 mutex_exit(&sc->sc_tx_lock);
452 uvm_km_free(kernel_map, (vaddr_t)sc->sc_tx_ring.sring, PAGE_SIZE, 452 uvm_km_free(kernel_map, (vaddr_t)sc->sc_tx_ring.sring, PAGE_SIZE,
453 UVM_KMF_WIRED); 453 UVM_KMF_WIRED);
454 mutex_enter(&sc->sc_rx_lock); 454 mutex_enter(&sc->sc_rx_lock);
455 while (xengnt_status(sc->sc_rx_ring_gntref)) 455 while (xengnt_status(sc->sc_rx_ring_gntref))
456 kpause("xnrxref", true, hz/2, &sc->sc_rx_lock); 456 kpause("xnrxref", true, hz/2, &sc->sc_rx_lock);
457 xengnt_revoke_access(sc->sc_rx_ring_gntref); 457 xengnt_revoke_access(sc->sc_rx_ring_gntref);
458 mutex_exit(&sc->sc_rx_lock); 458 mutex_exit(&sc->sc_rx_lock);
459 uvm_km_free(kernel_map, (vaddr_t)sc->sc_rx_ring.sring, PAGE_SIZE, 459 uvm_km_free(kernel_map, (vaddr_t)sc->sc_rx_ring.sring, PAGE_SIZE,
460 UVM_KMF_WIRED); 460 UVM_KMF_WIRED);
461 461
462 pmf_device_deregister(self); 462 pmf_device_deregister(self);
463 463
464 sc->sc_backend_status = BEST_DISCONNECTED; 464 sc->sc_backend_status = BEST_DISCONNECTED;
465 465
466 DPRINTF(("%s: xennet_xenbus_detach done\n", device_xname(self))); 466 DPRINTF(("%s: xennet_xenbus_detach done\n", device_xname(self)));
467 return 0; 467 return 0;
468} 468}
469 469
470static bool 470static bool
471xennet_xenbus_resume(device_t dev, const pmf_qual_t *qual) 471xennet_xenbus_resume(device_t dev, const pmf_qual_t *qual)
472{ 472{
473 struct xennet_xenbus_softc *sc = device_private(dev); 473 struct xennet_xenbus_softc *sc = device_private(dev);
474 int error; 474 int error;
475 netif_tx_sring_t *tx_ring; 475 netif_tx_sring_t *tx_ring;
476 netif_rx_sring_t *rx_ring; 476 netif_rx_sring_t *rx_ring;
477 paddr_t ma; 477 paddr_t ma;
478 478
479 /* invalidate the RX and TX rings */ 479 /* invalidate the RX and TX rings */
480 if (sc->sc_backend_status == BEST_SUSPENDED) { 480 if (sc->sc_backend_status == BEST_SUSPENDED) {
481 /* 481 /*
482 * Device was suspended, so ensure that access associated to 482 * Device was suspended, so ensure that access associated to
483 * the previous RX and TX rings are revoked. 483 * the previous RX and TX rings are revoked.
484 */ 484 */
485 xengnt_revoke_access(sc->sc_tx_ring_gntref); 485 xengnt_revoke_access(sc->sc_tx_ring_gntref);
486 xengnt_revoke_access(sc->sc_rx_ring_gntref); 486 xengnt_revoke_access(sc->sc_rx_ring_gntref);
487 } 487 }
488 488
489 sc->sc_tx_ring_gntref = GRANT_INVALID_REF; 489 sc->sc_tx_ring_gntref = GRANT_INVALID_REF;
490 sc->sc_rx_ring_gntref = GRANT_INVALID_REF; 490 sc->sc_rx_ring_gntref = GRANT_INVALID_REF;
491 491
492 tx_ring = sc->sc_tx_ring.sring; 492 tx_ring = sc->sc_tx_ring.sring;
493 rx_ring = sc->sc_rx_ring.sring; 493 rx_ring = sc->sc_rx_ring.sring;
494 494
495 /* Initialize rings */ 495 /* Initialize rings */
496 memset(tx_ring, 0, PAGE_SIZE); 496 memset(tx_ring, 0, PAGE_SIZE);
497 SHARED_RING_INIT(tx_ring); 497 SHARED_RING_INIT(tx_ring);
498 FRONT_RING_INIT(&sc->sc_tx_ring, tx_ring, PAGE_SIZE); 498 FRONT_RING_INIT(&sc->sc_tx_ring, tx_ring, PAGE_SIZE);
499 499
500 memset(rx_ring, 0, PAGE_SIZE); 500 memset(rx_ring, 0, PAGE_SIZE);
501 SHARED_RING_INIT(rx_ring); 501 SHARED_RING_INIT(rx_ring);
502 FRONT_RING_INIT(&sc->sc_rx_ring, rx_ring, PAGE_SIZE); 502 FRONT_RING_INIT(&sc->sc_rx_ring, rx_ring, PAGE_SIZE);
503 503
504 (void)pmap_extract_ma(pmap_kernel(), (vaddr_t)tx_ring, &ma); 504 (void)pmap_extract_ma(pmap_kernel(), (vaddr_t)tx_ring, &ma);
505 error = xenbus_grant_ring(sc->sc_xbusd, ma, &sc->sc_tx_ring_gntref); 505 error = xenbus_grant_ring(sc->sc_xbusd, ma, &sc->sc_tx_ring_gntref);
506 if (error) 506 if (error)
507 goto abort_resume; 507 goto abort_resume;
508 (void)pmap_extract_ma(pmap_kernel(), (vaddr_t)rx_ring, &ma); 508 (void)pmap_extract_ma(pmap_kernel(), (vaddr_t)rx_ring, &ma);
509 error = xenbus_grant_ring(sc->sc_xbusd, ma, &sc->sc_rx_ring_gntref); 509 error = xenbus_grant_ring(sc->sc_xbusd, ma, &sc->sc_rx_ring_gntref);
510 if (error) 510 if (error)
511 goto abort_resume; 511 goto abort_resume;
512 error = xenbus_alloc_evtchn(sc->sc_xbusd, &sc->sc_evtchn); 512 error = xenbus_alloc_evtchn(sc->sc_xbusd, &sc->sc_evtchn);
513 if (error) 513 if (error)
514 goto abort_resume; 514 goto abort_resume;
515 aprint_verbose_dev(dev, "using event channel %d\n", 515 aprint_verbose_dev(dev, "using event channel %d\n",
516 sc->sc_evtchn); 516 sc->sc_evtchn);
517 sc->sc_ih = xen_intr_establish_xname(-1, &xen_pic, sc->sc_evtchn, 517 sc->sc_ih = xen_intr_establish_xname(-1, &xen_pic, sc->sc_evtchn,
518 IST_LEVEL, IPL_NET, &xennet_handler, sc, true, device_xname(dev)); 518 IST_LEVEL, IPL_NET, &xennet_handler, sc, true, device_xname(dev));
519 KASSERT(sc->sc_ih != NULL); 519 KASSERT(sc->sc_ih != NULL);
520 return true; 520 return true;
521 521
522abort_resume: 522abort_resume:
523 xenbus_dev_fatal(sc->sc_xbusd, error, "resuming device"); 523 xenbus_dev_fatal(sc->sc_xbusd, error, "resuming device");
524 return false; 524 return false;
525} 525}
526 526
527static bool 527static bool
528xennet_talk_to_backend(struct xennet_xenbus_softc *sc) 528xennet_talk_to_backend(struct xennet_xenbus_softc *sc)
529{ 529{
530 int error; 530 int error;
531 unsigned long rx_copy; 531 unsigned long rx_copy;
532 struct xenbus_transaction *xbt; 532 struct xenbus_transaction *xbt;
533 const char *errmsg; 533 const char *errmsg;
534 534
535 error = xenbus_read_ul(NULL, sc->sc_xbusd->xbusd_otherend, 535 error = xenbus_read_ul(NULL, sc->sc_xbusd->xbusd_otherend,
536 "feature-rx-copy", &rx_copy, 10); 536 "feature-rx-copy", &rx_copy, 10);
537 if (error || !rx_copy) { 537 if (error || !rx_copy) {
538 xenbus_dev_fatal(sc->sc_xbusd, error, 538 xenbus_dev_fatal(sc->sc_xbusd, error,
539 "feature-rx-copy not supported"); 539 "feature-rx-copy not supported");
540 return false; 540 return false;
541 } 541 }
542 aprint_normal_dev(sc->sc_dev, "using RX copy mode\n"); 542 aprint_normal_dev(sc->sc_dev, "using RX copy mode\n");
543 543
544again: 544again:
545 xbt = xenbus_transaction_start(); 545 xbt = xenbus_transaction_start();
546 if (xbt == NULL) 546 if (xbt == NULL)
547 return false; 547 return false;
548 error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path, 548 error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path,
549 "vifname", "%s", device_xname(sc->sc_dev)); 549 "vifname", "%s", device_xname(sc->sc_dev));
550 if (error) { 550 if (error) {
551 errmsg = "vifname"; 551 errmsg = "vifname";
552 goto abort_transaction; 552 goto abort_transaction;
553 } 553 }
554 error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path, 554 error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path,
555 "tx-ring-ref","%u", sc->sc_tx_ring_gntref); 555 "tx-ring-ref","%u", sc->sc_tx_ring_gntref);
556 if (error) { 556 if (error) {
557 errmsg = "writing tx ring-ref"; 557 errmsg = "writing tx ring-ref";
558 goto abort_transaction; 558 goto abort_transaction;
559 } 559 }
560 error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path, 560 error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path,
561 "rx-ring-ref","%u", sc->sc_rx_ring_gntref); 561 "rx-ring-ref","%u", sc->sc_rx_ring_gntref);
562 if (error) { 562 if (error) {
563 errmsg = "writing rx ring-ref"; 563 errmsg = "writing rx ring-ref";
564 goto abort_transaction; 564 goto abort_transaction;
565 } 565 }
566 error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path, 566 error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path,
567 "request-rx-copy", "%lu", rx_copy); 567 "request-rx-copy", "%lu", rx_copy);
568 if (error) { 568 if (error) {
569 errmsg = "writing request-rx-copy"; 569 errmsg = "writing request-rx-copy";
570 goto abort_transaction; 570 goto abort_transaction;
571 } 571 }
572 error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path, 572 error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path,
573 "feature-rx-notify", "%u", 1); 573 "feature-rx-notify", "%u", 1);
574 if (error) { 574 if (error) {
575 errmsg = "writing feature-rx-notify"; 575 errmsg = "writing feature-rx-notify";
576 goto abort_transaction; 576 goto abort_transaction;
577 } 577 }
578 error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path, 578 error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path,
 579 "feature-ipv6-csum-offload", "%u", 1);
 580 if (error) {
 581 errmsg = "writing feature-ipv6-csum-offload";
 582 goto abort_transaction;
 583 }
 584 error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path,
579 "event-channel", "%u", sc->sc_evtchn); 585 "event-channel", "%u", sc->sc_evtchn);
580 if (error) { 586 if (error) {
581 errmsg = "writing event channel"; 587 errmsg = "writing event channel";
582 goto abort_transaction; 588 goto abort_transaction;
583 } 589 }
584 error = xenbus_transaction_end(xbt, 0); 590 error = xenbus_transaction_end(xbt, 0);
585 if (error == EAGAIN) 591 if (error == EAGAIN)
586 goto again; 592 goto again;
587 if (error) { 593 if (error) {
588 xenbus_dev_fatal(sc->sc_xbusd, error, "completing transaction"); 594 xenbus_dev_fatal(sc->sc_xbusd, error, "completing transaction");
589 return false; 595 return false;
590 } 596 }
591 mutex_enter(&sc->sc_rx_lock); 597 mutex_enter(&sc->sc_rx_lock);
592 xennet_alloc_rx_buffer(sc); 598 xennet_alloc_rx_buffer(sc);
593 mutex_exit(&sc->sc_rx_lock); 599 mutex_exit(&sc->sc_rx_lock);
594 600
595 if (sc->sc_backend_status == BEST_SUSPENDED) { 601 if (sc->sc_backend_status == BEST_SUSPENDED) {
596 xenbus_device_resume(sc->sc_xbusd); 602 xenbus_device_resume(sc->sc_xbusd);
597 } 603 }
598 604
599 sc->sc_backend_status = BEST_CONNECTED; 605 sc->sc_backend_status = BEST_CONNECTED;
600 606
601 return true; 607 return true;
602 608
603abort_transaction: 609abort_transaction:
604 xenbus_transaction_end(xbt, 1); 610 xenbus_transaction_end(xbt, 1);
605 xenbus_dev_fatal(sc->sc_xbusd, error, "%s", errmsg); 611 xenbus_dev_fatal(sc->sc_xbusd, error, "%s", errmsg);
606 return false; 612 return false;
607} 613}
608 614
609static bool 615static bool
610xennet_xenbus_suspend(device_t dev, const pmf_qual_t *qual) 616xennet_xenbus_suspend(device_t dev, const pmf_qual_t *qual)
611{ 617{
612 struct xennet_xenbus_softc *sc = device_private(dev); 618 struct xennet_xenbus_softc *sc = device_private(dev);
613 619
614 /* 620 /*
615 * xennet_stop() is called by pmf(9) before xennet_xenbus_suspend(), 621 * xennet_stop() is called by pmf(9) before xennet_xenbus_suspend(),
616 * so we do not mask event channel here 622 * so we do not mask event channel here
617 */ 623 */
618 624
619 /* collect any outstanding TX responses */ 625 /* collect any outstanding TX responses */
620 mutex_enter(&sc->sc_tx_lock); 626 mutex_enter(&sc->sc_tx_lock);
621 xennet_tx_complete(sc); 627 xennet_tx_complete(sc);
622 while (sc->sc_tx_ring.sring->rsp_prod != sc->sc_tx_ring.rsp_cons) { 628 while (sc->sc_tx_ring.sring->rsp_prod != sc->sc_tx_ring.rsp_cons) {
623 kpause("xnsuspend", true, hz/2, &sc->sc_tx_lock); 629 kpause("xnsuspend", true, hz/2, &sc->sc_tx_lock);
624 xennet_tx_complete(sc); 630 xennet_tx_complete(sc);
625 } 631 }
626 mutex_exit(&sc->sc_tx_lock); 632 mutex_exit(&sc->sc_tx_lock);
627 633
628 /* 634 /*
629 * dom0 may still use references to the grants we gave away 635 * dom0 may still use references to the grants we gave away
630 * earlier during RX buffers allocation. So we do not free RX buffers 636 * earlier during RX buffers allocation. So we do not free RX buffers
631 * here, as dom0 does not expect the guest domain to suddenly revoke 637 * here, as dom0 does not expect the guest domain to suddenly revoke
632 * access to these grants. 638 * access to these grants.
633 */ 639 */
634 640
635 sc->sc_backend_status = BEST_SUSPENDED; 641 sc->sc_backend_status = BEST_SUSPENDED;
636 if (sc->sc_ih != NULL) { 642 if (sc->sc_ih != NULL) {
637 /* event already disabled */ 643 /* event already disabled */
638 xen_intr_disestablish(sc->sc_ih); 644 xen_intr_disestablish(sc->sc_ih);
639 sc->sc_ih = NULL; 645 sc->sc_ih = NULL;
640 } 646 }
641 647
642 xenbus_device_suspend(sc->sc_xbusd); 648 xenbus_device_suspend(sc->sc_xbusd);
643 aprint_verbose_dev(dev, "removed event channel %d\n", sc->sc_evtchn); 649 aprint_verbose_dev(dev, "removed event channel %d\n", sc->sc_evtchn);
644 650
645 return true; 651 return true;
646} 652}
647 653
648static void xennet_backend_changed(void *arg, XenbusState new_state) 654static void xennet_backend_changed(void *arg, XenbusState new_state)
649{ 655{
650 struct xennet_xenbus_softc *sc = device_private((device_t)arg); 656 struct xennet_xenbus_softc *sc = device_private((device_t)arg);
651 DPRINTF(("%s: new backend state %d\n", 657 DPRINTF(("%s: new backend state %d\n",
652 device_xname(sc->sc_dev), new_state)); 658 device_xname(sc->sc_dev), new_state));
653 659
654 switch (new_state) { 660 switch (new_state) {
655 case XenbusStateInitialising: 661 case XenbusStateInitialising:
656 case XenbusStateInitialised: 662 case XenbusStateInitialised:
657 case XenbusStateConnected: 663 case XenbusStateConnected:
658 break; 664 break;
659 case XenbusStateClosing: 665 case XenbusStateClosing:
660 sc->sc_backend_status = BEST_CLOSED; 666 sc->sc_backend_status = BEST_CLOSED;
661 xenbus_switch_state(sc->sc_xbusd, NULL, XenbusStateClosed); 667 xenbus_switch_state(sc->sc_xbusd, NULL, XenbusStateClosed);
662 break; 668 break;
663 case XenbusStateInitWait: 669 case XenbusStateInitWait:
664 if (sc->sc_backend_status == BEST_CONNECTED) 670 if (sc->sc_backend_status == BEST_CONNECTED)
665 break; 671 break;
666 if (xennet_talk_to_backend(sc)) 672 if (xennet_talk_to_backend(sc))
667 xenbus_switch_state(sc->sc_xbusd, NULL, 673 xenbus_switch_state(sc->sc_xbusd, NULL,
668 XenbusStateConnected); 674 XenbusStateConnected);
669 break; 675 break;
670 case XenbusStateUnknown: 676 case XenbusStateUnknown:
671 default: 677 default:
672 panic("bad backend state %d", new_state); 678 panic("bad backend state %d", new_state);
673 } 679 }
674} 680}
675 681
676/* 682/*
677 * Allocate RX buffers and put the associated request structures 683 * Allocate RX buffers and put the associated request structures
678 * in the ring. This allows the backend to use them to communicate with 684 * in the ring. This allows the backend to use them to communicate with
679 * frontend when some data is destined to frontend 685 * frontend when some data is destined to frontend
680 */ 686 */
681static void 687static void
682xennet_alloc_rx_buffer(struct xennet_xenbus_softc *sc) 688xennet_alloc_rx_buffer(struct xennet_xenbus_softc *sc)
683{ 689{
684 RING_IDX req_prod = sc->sc_rx_ring.req_prod_pvt; 690 RING_IDX req_prod = sc->sc_rx_ring.req_prod_pvt;
685 RING_IDX i; 691 RING_IDX i;
686 struct xennet_rxreq *req; 692 struct xennet_rxreq *req;
687 int otherend_id, notify; 693 int otherend_id, notify;
688 struct mbuf *m; 694 struct mbuf *m;
689 vaddr_t va; 695 vaddr_t va;
690 paddr_t pa, ma; 696 paddr_t pa, ma;
691 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 697 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
692 698
693 KASSERT(mutex_owned(&sc->sc_rx_lock)); 699 KASSERT(mutex_owned(&sc->sc_rx_lock));
694 700
695 otherend_id = sc->sc_xbusd->xbusd_otherend_id; 701 otherend_id = sc->sc_xbusd->xbusd_otherend_id;
696 702
697 for (i = 0; sc->sc_free_rxreql != 0; i++) { 703 for (i = 0; sc->sc_free_rxreql != 0; i++) {
698 req = SLIST_FIRST(&sc->sc_rxreq_head); 704 req = SLIST_FIRST(&sc->sc_rxreq_head);
699 KASSERT(req != NULL); 705 KASSERT(req != NULL);
700 KASSERT(req == &sc->sc_rxreqs[req->rxreq_id]); 706 KASSERT(req == &sc->sc_rxreqs[req->rxreq_id]);
701 KASSERT(req->rxreq_m == NULL); 707 KASSERT(req->rxreq_m == NULL);
702 KASSERT(req->rxreq_gntref = GRANT_INVALID_REF); 708 KASSERT(req->rxreq_gntref = GRANT_INVALID_REF);
703 709
704 MGETHDR(m, M_DONTWAIT, MT_DATA); 710 MGETHDR(m, M_DONTWAIT, MT_DATA);
705 if (__predict_false(m == NULL)) { 711 if (__predict_false(m == NULL)) {
706 printf("%s: rx no mbuf\n", ifp->if_xname); 712 printf("%s: rx no mbuf\n", ifp->if_xname);
707 break; 713 break;
708 } 714 }
709  715
710 va = (vaddr_t)pool_cache_get_paddr( 716 va = (vaddr_t)pool_cache_get_paddr(
711 if_xennetrxbuf_cache, PR_NOWAIT, &pa); 717 if_xennetrxbuf_cache, PR_NOWAIT, &pa);
712 if (__predict_false(va == 0)) { 718 if (__predict_false(va == 0)) {
713 printf("%s: rx no cluster\n", ifp->if_xname); 719 printf("%s: rx no cluster\n", ifp->if_xname);
714 m_freem(m); 720 m_freem(m);
715 break; 721 break;
716 } 722 }
717 723
718 MEXTADD(m, va, PAGE_SIZE, 724 MEXTADD(m, va, PAGE_SIZE,
719 M_DEVBUF, xennet_rx_mbuf_free, NULL); 725 M_DEVBUF, xennet_rx_mbuf_free, NULL);
720 m->m_len = m->m_pkthdr.len = PAGE_SIZE; 726 m->m_len = m->m_pkthdr.len = PAGE_SIZE;
721 m->m_ext.ext_paddr = pa; 727 m->m_ext.ext_paddr = pa;
722 m->m_flags |= M_EXT_RW; /* we own the buffer */ 728 m->m_flags |= M_EXT_RW; /* we own the buffer */
723 729
724 /* Set M_EXT_CLUSTER so that load_mbuf uses m_ext.ext_paddr */ 730 /* Set M_EXT_CLUSTER so that load_mbuf uses m_ext.ext_paddr */
725 m->m_flags |= M_EXT_CLUSTER; 731 m->m_flags |= M_EXT_CLUSTER;
726 if (__predict_false(bus_dmamap_load_mbuf( 732 if (__predict_false(bus_dmamap_load_mbuf(
727 sc->sc_xbusd->xbusd_dmat, 733 sc->sc_xbusd->xbusd_dmat,
728 req->rxreq_dmamap, m, BUS_DMA_NOWAIT) != 0)) { 734 req->rxreq_dmamap, m, BUS_DMA_NOWAIT) != 0)) {
729 printf("%s: rx mbuf load failed", ifp->if_xname); 735 printf("%s: rx mbuf load failed", ifp->if_xname);
730 m->m_flags &= ~M_EXT_CLUSTER; 736 m->m_flags &= ~M_EXT_CLUSTER;
731 m_freem(m); 737 m_freem(m);
732 break; 738 break;
733 } 739 }
734 m->m_flags &= ~M_EXT_CLUSTER; 740 m->m_flags &= ~M_EXT_CLUSTER;
735 741
736 KASSERT(req->rxreq_dmamap->dm_nsegs == 1); 742 KASSERT(req->rxreq_dmamap->dm_nsegs == 1);
737 ma = req->rxreq_dmamap->dm_segs[0].ds_addr; 743 ma = req->rxreq_dmamap->dm_segs[0].ds_addr;
738 744
739 if (xengnt_grant_access(otherend_id, trunc_page(ma), 745 if (xengnt_grant_access(otherend_id, trunc_page(ma),
740 0, &req->rxreq_gntref) != 0) { 746 0, &req->rxreq_gntref) != 0) {
741 m_freem(m); 747 m_freem(m);
742 break; 748 break;
743 } 749 }
744 750
745 req->rxreq_m = m; 751 req->rxreq_m = m;
746 752
747 RING_GET_REQUEST(&sc->sc_rx_ring, req_prod + i)->id = 753 RING_GET_REQUEST(&sc->sc_rx_ring, req_prod + i)->id =
748 req->rxreq_id; 754 req->rxreq_id;
749 755
750 RING_GET_REQUEST(&sc->sc_rx_ring, req_prod + i)->gref = 756 RING_GET_REQUEST(&sc->sc_rx_ring, req_prod + i)->gref =
751 req->rxreq_gntref; 757 req->rxreq_gntref;
752 758
753 SLIST_REMOVE_HEAD(&sc->sc_rxreq_head, rxreq_next); 759 SLIST_REMOVE_HEAD(&sc->sc_rxreq_head, rxreq_next);
754 sc->sc_free_rxreql--; 760 sc->sc_free_rxreql--;
755 } 761 }
756 762
757 /* Notify backend if more Rx is possible */ 763 /* Notify backend if more Rx is possible */
758 if (i > 0) { 764 if (i > 0) {
759 sc->sc_rx_ring.req_prod_pvt = req_prod + i; 765 sc->sc_rx_ring.req_prod_pvt = req_prod + i;
760 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->sc_rx_ring, notify); 766 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->sc_rx_ring, notify);
761 if (notify) 767 if (notify)
762 hypervisor_notify_via_evtchn(sc->sc_evtchn); 768 hypervisor_notify_via_evtchn(sc->sc_evtchn);
763 } 769 }
764} 770}
765 771
766/* 772/*
767 * Reclaim all RX buffers used by the I/O ring between frontend and backend 773 * Reclaim all RX buffers used by the I/O ring between frontend and backend
768 */ 774 */
769static void 775static void
770xennet_free_rx_buffer(struct xennet_xenbus_softc *sc) 776xennet_free_rx_buffer(struct xennet_xenbus_softc *sc)
771{ 777{
772 RING_IDX i; 778 RING_IDX i;
773 779
774 KASSERT(mutex_owned(&sc->sc_rx_lock)); 780 KASSERT(mutex_owned(&sc->sc_rx_lock));
775 781
776 DPRINTF(("%s: xennet_free_rx_buffer\n", device_xname(sc->sc_dev))); 782 DPRINTF(("%s: xennet_free_rx_buffer\n", device_xname(sc->sc_dev)));
777 /* get back memory from RX ring */ 783 /* get back memory from RX ring */
778 for (i = 0; i < NET_RX_RING_SIZE; i++) { 784 for (i = 0; i < NET_RX_RING_SIZE; i++) {
779 struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i]; 785 struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i];
780 786
781 if (rxreq->rxreq_gntref != GRANT_INVALID_REF) { 787 if (rxreq->rxreq_gntref != GRANT_INVALID_REF) {
782 /* 788 /*
783 * this req is still granted. Get back the page or 789 * this req is still granted. Get back the page or
784 * allocate a new one, and remap it. 790 * allocate a new one, and remap it.
785 */ 791 */
786 SLIST_INSERT_HEAD(&sc->sc_rxreq_head, rxreq, 792 SLIST_INSERT_HEAD(&sc->sc_rxreq_head, rxreq,
787 rxreq_next); 793 rxreq_next);
788 sc->sc_free_rxreql++; 794 sc->sc_free_rxreql++;
789 795
790 xengnt_revoke_access(rxreq->rxreq_gntref); 796 xengnt_revoke_access(rxreq->rxreq_gntref);
791 rxreq->rxreq_gntref = GRANT_INVALID_REF; 797 rxreq->rxreq_gntref = GRANT_INVALID_REF;
792 } 798 }
793 799
794 if (rxreq->rxreq_m != NULL) { 800 if (rxreq->rxreq_m != NULL) {
795 m_freem(rxreq->rxreq_m); 801 m_freem(rxreq->rxreq_m);
796 rxreq->rxreq_m = NULL; 802 rxreq->rxreq_m = NULL;
797 } 803 }
798 } 804 }
799 DPRINTF(("%s: xennet_free_rx_buffer done\n", device_xname(sc->sc_dev))); 805 DPRINTF(("%s: xennet_free_rx_buffer done\n", device_xname(sc->sc_dev)));
800} 806}
801 807
802/* 808/*
803 * Clears a used RX request when its associated mbuf has been processed 809 * Clears a used RX request when its associated mbuf has been processed
804 */ 810 */
805static void 811static void
806xennet_rx_mbuf_free(struct mbuf *m, void *buf, size_t size, void *arg) 812xennet_rx_mbuf_free(struct mbuf *m, void *buf, size_t size, void *arg)
807{ 813{
808 KASSERT(buf == m->m_ext.ext_buf); 814 KASSERT(buf == m->m_ext.ext_buf);
809 KASSERT(arg == NULL); 815 KASSERT(arg == NULL);
810 KASSERT(m != NULL); 816 KASSERT(m != NULL);
811 vaddr_t va = (vaddr_t)(buf) & ~((vaddr_t)PAGE_MASK); 817 vaddr_t va = (vaddr_t)(buf) & ~((vaddr_t)PAGE_MASK);
812 pool_cache_put_paddr(if_xennetrxbuf_cache, 818 pool_cache_put_paddr(if_xennetrxbuf_cache,
813 (void *)va, m->m_ext.ext_paddr); 819 (void *)va, m->m_ext.ext_paddr);
814 pool_cache_put(mb_cache, m); 820 pool_cache_put(mb_cache, m);
815}; 821};
816 822
817static void 823static void
818xennet_rx_free_req(struct xennet_xenbus_softc *sc, struct xennet_rxreq *req) 824xennet_rx_free_req(struct xennet_xenbus_softc *sc, struct xennet_rxreq *req)
819{ 825{
820 KASSERT(mutex_owned(&sc->sc_rx_lock)); 826 KASSERT(mutex_owned(&sc->sc_rx_lock));
821 827
822 /* puts back the RX request in the list of free RX requests */ 828 /* puts back the RX request in the list of free RX requests */
823 SLIST_INSERT_HEAD(&sc->sc_rxreq_head, req, rxreq_next); 829 SLIST_INSERT_HEAD(&sc->sc_rxreq_head, req, rxreq_next);
824 sc->sc_free_rxreql++; 830 sc->sc_free_rxreql++;
825 831
826 /* 832 /*
827 * ring needs more requests to be pushed in, allocate some 833 * ring needs more requests to be pushed in, allocate some
828 * RX buffers to catch-up with backend's consumption 834 * RX buffers to catch-up with backend's consumption
829 */ 835 */
830 if (sc->sc_free_rxreql >= (NET_RX_RING_SIZE * 4 / 5) && 836 if (sc->sc_free_rxreql >= (NET_RX_RING_SIZE * 4 / 5) &&
831 __predict_true(sc->sc_backend_status == BEST_CONNECTED)) { 837 __predict_true(sc->sc_backend_status == BEST_CONNECTED)) {
832 xennet_alloc_rx_buffer(sc); 838 xennet_alloc_rx_buffer(sc);
833 } 839 }
834} 840}
835 841
836/* 842/*
837 * Process responses associated to the TX mbufs sent previously through 843 * Process responses associated to the TX mbufs sent previously through
838 * xennet_start() 844 * xennet_start()
839 * Called at splsoftnet. 845 * Called at splsoftnet.
840 */ 846 */
841static void 847static void
842xennet_tx_complete(struct xennet_xenbus_softc *sc) 848xennet_tx_complete(struct xennet_xenbus_softc *sc)
843{ 849{
844 struct xennet_txreq *req; 850 struct xennet_txreq *req;
845 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 851 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
846 RING_IDX resp_prod, i; 852 RING_IDX resp_prod, i;
847 853
848 DPRINTFN(XEDB_EVENT, ("xennet_tx_complete prod %d cons %d\n", 854 DPRINTFN(XEDB_EVENT, ("xennet_tx_complete prod %d cons %d\n",
849 sc->sc_tx_ring.sring->rsp_prod, sc->sc_tx_ring.rsp_cons)); 855 sc->sc_tx_ring.sring->rsp_prod, sc->sc_tx_ring.rsp_cons));
850 856
851 KASSERT(mutex_owned(&sc->sc_tx_lock)); 857 KASSERT(mutex_owned(&sc->sc_tx_lock));
852again: 858again:
853 resp_prod = sc->sc_tx_ring.sring->rsp_prod; 859 resp_prod = sc->sc_tx_ring.sring->rsp_prod;
854 xen_rmb(); 860 xen_rmb();
855 for (i = sc->sc_tx_ring.rsp_cons; i != resp_prod; i++) { 861 for (i = sc->sc_tx_ring.rsp_cons; i != resp_prod; i++) {
856 req = &sc->sc_txreqs[RING_GET_RESPONSE(&sc->sc_tx_ring, i)->id]; 862 req = &sc->sc_txreqs[RING_GET_RESPONSE(&sc->sc_tx_ring, i)->id];
857 KASSERT(req->txreq_id == 863 KASSERT(req->txreq_id ==
858 RING_GET_RESPONSE(&sc->sc_tx_ring, i)->id); 864 RING_GET_RESPONSE(&sc->sc_tx_ring, i)->id);
859 KASSERT(xengnt_status(req->txreq_gntref) == 0); 865 KASSERT(xengnt_status(req->txreq_gntref) == 0);
860 KASSERT(req->txreq_m != NULL); 866 KASSERT(req->txreq_m != NULL);
861 867
862 if (__predict_false( 868 if (__predict_false(
863 RING_GET_RESPONSE(&sc->sc_tx_ring, i)->status != 869 RING_GET_RESPONSE(&sc->sc_tx_ring, i)->status !=
864 NETIF_RSP_OKAY)) 870 NETIF_RSP_OKAY))
865 if_statinc(ifp, if_oerrors); 871 if_statinc(ifp, if_oerrors);
866 else 872 else
867 if_statinc(ifp, if_opackets); 873 if_statinc(ifp, if_opackets);
868 xengnt_revoke_access(req->txreq_gntref); 874 xengnt_revoke_access(req->txreq_gntref);
869 bus_dmamap_unload(sc->sc_xbusd->xbusd_dmat, req->txreq_dmamap); 875 bus_dmamap_unload(sc->sc_xbusd->xbusd_dmat, req->txreq_dmamap);
870 m_freem(req->txreq_m); 876 m_freem(req->txreq_m);
871 req->txreq_m = NULL; 877 req->txreq_m = NULL;
872 SLIST_INSERT_HEAD(&sc->sc_txreq_head, req, txreq_next); 878 SLIST_INSERT_HEAD(&sc->sc_txreq_head, req, txreq_next);
873 } 879 }
874 880
875 sc->sc_tx_ring.rsp_cons = resp_prod; 881 sc->sc_tx_ring.rsp_cons = resp_prod;
876 /* set new event and check for race with rsp_cons update */ 882 /* set new event and check for race with rsp_cons update */
877 sc->sc_tx_ring.sring->rsp_event = 883 sc->sc_tx_ring.sring->rsp_event =
878 resp_prod + ((sc->sc_tx_ring.sring->req_prod - resp_prod) >> 1) + 1; 884 resp_prod + ((sc->sc_tx_ring.sring->req_prod - resp_prod) >> 1) + 1;
879 xen_wmb(); 885 xen_wmb();
880 if (resp_prod != sc->sc_tx_ring.sring->rsp_prod) 886 if (resp_prod != sc->sc_tx_ring.sring->rsp_prod)
881 goto again; 887 goto again;
882} 888}
883 889
884/* 890/*
885 * Xennet event handler. 891 * Xennet event handler.
886 * Get outstanding responses of TX packets, then collect all responses of 892 * Get outstanding responses of TX packets, then collect all responses of
887 * pending RX packets 893 * pending RX packets
888 * Called at splnet. 894 * Called at splnet.
889 */ 895 */
890static int 896static int
891xennet_handler(void *arg) 897xennet_handler(void *arg)
892{ 898{
893 struct xennet_xenbus_softc *sc = arg; 899 struct xennet_xenbus_softc *sc = arg;
894 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 900 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
895 RING_IDX resp_prod, i; 901 RING_IDX resp_prod, i;
896 struct xennet_rxreq *req; 902 struct xennet_rxreq *req;
897 struct mbuf *m; 903 struct mbuf *m;
898 int more_to_do; 904 int more_to_do;
899 905
900 if (sc->sc_backend_status != BEST_CONNECTED) 906 if (sc->sc_backend_status != BEST_CONNECTED)
901 return 1; 907 return 1;
902 908
903 /* Poke Tx queue if we run out of Tx buffers earlier */ 909 /* Poke Tx queue if we run out of Tx buffers earlier */
904 if_schedule_deferred_start(ifp); 910 if_schedule_deferred_start(ifp);
905 911
906 rnd_add_uint32(&sc->sc_rnd_source, sc->sc_tx_ring.req_prod_pvt); 912 rnd_add_uint32(&sc->sc_rnd_source, sc->sc_tx_ring.req_prod_pvt);
907 913
908again: 914again:
909 DPRINTFN(XEDB_EVENT, ("xennet_handler prod %d cons %d\n", 915 DPRINTFN(XEDB_EVENT, ("xennet_handler prod %d cons %d\n",
910 sc->sc_rx_ring.sring->rsp_prod, sc->sc_rx_ring.rsp_cons)); 916 sc->sc_rx_ring.sring->rsp_prod, sc->sc_rx_ring.rsp_cons));
911 917
912 mutex_enter(&sc->sc_rx_lock); 918 mutex_enter(&sc->sc_rx_lock);
913 resp_prod = sc->sc_rx_ring.sring->rsp_prod; 919 resp_prod = sc->sc_rx_ring.sring->rsp_prod;
914 xen_rmb(); /* ensure we see replies up to resp_prod */ 920 xen_rmb(); /* ensure we see replies up to resp_prod */
915 921
916 for (i = sc->sc_rx_ring.rsp_cons; i != resp_prod; i++) { 922 for (i = sc->sc_rx_ring.rsp_cons; i != resp_prod; i++) {
917 netif_rx_response_t *rx = RING_GET_RESPONSE(&sc->sc_rx_ring, i); 923 netif_rx_response_t *rx = RING_GET_RESPONSE(&sc->sc_rx_ring, i);
918 req = &sc->sc_rxreqs[rx->id]; 924 req = &sc->sc_rxreqs[rx->id];
919 KASSERT(req->rxreq_gntref != GRANT_INVALID_REF); 925 KASSERT(req->rxreq_gntref != GRANT_INVALID_REF);
920 KASSERT(req->rxreq_id == rx->id); 926 KASSERT(req->rxreq_id == rx->id);
921 927
922 xengnt_revoke_access(req->rxreq_gntref); 928 xengnt_revoke_access(req->rxreq_gntref);
923 req->rxreq_gntref = GRANT_INVALID_REF; 929 req->rxreq_gntref = GRANT_INVALID_REF;
924 930
925 m = req->rxreq_m; 931 m = req->rxreq_m;
926 req->rxreq_m = NULL; 932 req->rxreq_m = NULL;
927 933
928 m->m_len = m->m_pkthdr.len = rx->status; 934 m->m_len = m->m_pkthdr.len = rx->status;
929 bus_dmamap_sync(sc->sc_xbusd->xbusd_dmat, req->rxreq_dmamap, 0, 935 bus_dmamap_sync(sc->sc_xbusd->xbusd_dmat, req->rxreq_dmamap, 0,
930 m->m_pkthdr.len, BUS_DMASYNC_PREREAD); 936 m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
931 937
932 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); 938 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
933 m_set_rcvif(m, ifp); 939 m_set_rcvif(m, ifp);
934 940
935 if (rx->flags & NETRXF_csum_blank) 941 if (rx->flags & NETRXF_csum_blank)
936 xennet_checksum_fill(ifp, m); 942 xennet_checksum_fill(ifp, m);
937 else if (rx->flags & NETRXF_data_validated) 943 else if (rx->flags & NETRXF_data_validated)
938 m->m_pkthdr.csum_flags = XN_M_CSUM_SUPPORTED; 944 m->m_pkthdr.csum_flags = XN_M_CSUM_SUPPORTED;
939 /* free req may overwrite *rx, better doing it late */ 945 /* free req may overwrite *rx, better doing it late */
940 xennet_rx_free_req(sc, req); 946 xennet_rx_free_req(sc, req);
941 947
942 /* Pass the packet up. */ 948 /* Pass the packet up. */
943 if_percpuq_enqueue(ifp->if_percpuq, m); 949 if_percpuq_enqueue(ifp->if_percpuq, m);
944 } 950 }
945 xen_rmb(); 951 xen_rmb();
946 sc->sc_rx_ring.rsp_cons = i; 952 sc->sc_rx_ring.rsp_cons = i;
947 RING_FINAL_CHECK_FOR_RESPONSES(&sc->sc_rx_ring, more_to_do); 953 RING_FINAL_CHECK_FOR_RESPONSES(&sc->sc_rx_ring, more_to_do);
948 mutex_exit(&sc->sc_rx_lock); 954 mutex_exit(&sc->sc_rx_lock);
949 955
950 if (more_to_do) { 956 if (more_to_do) {
951 DPRINTF(("%s: %s more_to_do\n", ifp->if_xname, __func__)); 957 DPRINTF(("%s: %s more_to_do\n", ifp->if_xname, __func__));
952 goto again; 958 goto again;
953 } 959 }
954 960
955 return 1; 961 return 1;
956} 962}
957 963
958/* 964/*
959 * The output routine of a xennet interface. Prepares mbufs for TX, 965 * The output routine of a xennet interface. Prepares mbufs for TX,
960 * and notify backend when finished. 966 * and notify backend when finished.
961 * Called at splsoftnet. 967 * Called at splsoftnet.
962 */ 968 */
963void 969void
964xennet_start(struct ifnet *ifp) 970xennet_start(struct ifnet *ifp)
965{ 971{
966 struct xennet_xenbus_softc *sc = ifp->if_softc; 972 struct xennet_xenbus_softc *sc = ifp->if_softc;
967 struct mbuf *m; 973 struct mbuf *m;
968 netif_tx_request_t *txreq; 974 netif_tx_request_t *txreq;
969 RING_IDX req_prod; 975 RING_IDX req_prod;
970 paddr_t ma; 976 paddr_t ma;
971 struct xennet_txreq *req; 977 struct xennet_txreq *req;
972 int notify; 978 int notify;
973 int do_notify = 0; 979 int do_notify = 0;
974 980
975 mutex_enter(&sc->sc_tx_lock); 981 mutex_enter(&sc->sc_tx_lock);
976 982
977 rnd_add_uint32(&sc->sc_rnd_source, sc->sc_tx_ring.req_prod_pvt); 983 rnd_add_uint32(&sc->sc_rnd_source, sc->sc_tx_ring.req_prod_pvt);
978 984
979 xennet_tx_complete(sc); 985 xennet_tx_complete(sc);
980 986
981 req_prod = sc->sc_tx_ring.req_prod_pvt; 987 req_prod = sc->sc_tx_ring.req_prod_pvt;
982 while (/*CONSTCOND*/1) { 988 while (/*CONSTCOND*/1) {
983 uint16_t txflags; 989 uint16_t txflags;
984 990
985 req = SLIST_FIRST(&sc->sc_txreq_head); 991 req = SLIST_FIRST(&sc->sc_txreq_head);
986 if (__predict_false(req == NULL)) { 992 if (__predict_false(req == NULL)) {
987 break; 993 break;
988 } 994 }
989 IFQ_DEQUEUE(&ifp->if_snd, m); 995 IFQ_DEQUEUE(&ifp->if_snd, m);
990 if (m == NULL) 996 if (m == NULL)
991 break; 997 break;
992 998
993 if ((m->m_pkthdr.csum_flags & XN_M_CSUM_SUPPORTED) != 0) { 999 if ((m->m_pkthdr.csum_flags & XN_M_CSUM_SUPPORTED) != 0) {
994 txflags = NETTXF_csum_blank; 1000 txflags = NETTXF_csum_blank;
995 } else { 1001 } else {
996 txflags = NETTXF_data_validated; 1002 txflags = NETTXF_data_validated;
997 } 1003 }
998 1004
999 /* Try to load the mbuf as-is, if that fails allocate new */ 1005 /* Try to load the mbuf as-is, if that fails allocate new */
1000 if (__predict_false(bus_dmamap_load_mbuf( 1006 if (__predict_false(bus_dmamap_load_mbuf(
1001 sc->sc_xbusd->xbusd_dmat, 1007 sc->sc_xbusd->xbusd_dmat,
1002 req->txreq_dmamap, m, BUS_DMA_NOWAIT) != 0)) { 1008 req->txreq_dmamap, m, BUS_DMA_NOWAIT) != 0)) {
1003 struct mbuf *new_m; 1009 struct mbuf *new_m;
1004 1010
1005 MGETHDR(new_m, M_DONTWAIT, MT_DATA); 1011 MGETHDR(new_m, M_DONTWAIT, MT_DATA);
1006 if (__predict_false(new_m == NULL)) { 1012 if (__predict_false(new_m == NULL)) {
1007 printf("%s: cannot allocate new mbuf\n", 1013 printf("%s: cannot allocate new mbuf\n",
1008 device_xname(sc->sc_dev)); 1014 device_xname(sc->sc_dev));
1009 m_freem(m); 1015 m_freem(m);
1010 break; 1016 break;
1011 } 1017 }
1012 if (m->m_pkthdr.len > MHLEN) { 1018 if (m->m_pkthdr.len > MHLEN) {
1013 MCLGET(new_m, M_DONTWAIT); 1019 MCLGET(new_m, M_DONTWAIT);
1014 if (__predict_false( 1020 if (__predict_false(
1015 (new_m->m_flags & M_EXT) == 0)) { 1021 (new_m->m_flags & M_EXT) == 0)) {
1016 DPRINTF(("%s: no mbuf cluster\n", 1022 DPRINTF(("%s: no mbuf cluster\n",
1017 device_xname(sc->sc_dev))); 1023 device_xname(sc->sc_dev)));
1018 m_freem(new_m); 1024 m_freem(new_m);
1019 m_freem(m); 1025 m_freem(m);
1020 break; 1026 break;
1021 } 1027 }
1022 } 1028 }
1023 1029
1024 m_copydata(m, 0, m->m_pkthdr.len, mtod(new_m, void *)); 1030 m_copydata(m, 0, m->m_pkthdr.len, mtod(new_m, void *));
1025 new_m->m_len = new_m->m_pkthdr.len = m->m_pkthdr.len; 1031 new_m->m_len = new_m->m_pkthdr.len = m->m_pkthdr.len;
1026 m_freem(m); 1032 m_freem(m);
1027 m = new_m; 1033 m = new_m;
1028 1034
1029 if (__predict_false(bus_dmamap_load_mbuf( 1035 if (__predict_false(bus_dmamap_load_mbuf(
1030 sc->sc_xbusd->xbusd_dmat, 1036 sc->sc_xbusd->xbusd_dmat,
1031 req->txreq_dmamap, m, BUS_DMA_NOWAIT) != 0)) { 1037 req->txreq_dmamap, m, BUS_DMA_NOWAIT) != 0)) {
1032 printf("%s: cannot load new mbuf\n", 1038 printf("%s: cannot load new mbuf\n",
1033 device_xname(sc->sc_dev)); 1039 device_xname(sc->sc_dev));
1034 m_freem(m); 1040 m_freem(m);
1035 break; 1041 break;
1036 } 1042 }
1037 } 1043 }
1038 1044
1039 KASSERT(req->txreq_dmamap->dm_nsegs == 1); 1045 KASSERT(req->txreq_dmamap->dm_nsegs == 1);
1040 ma = req->txreq_dmamap->dm_segs[0].ds_addr; 1046 ma = req->txreq_dmamap->dm_segs[0].ds_addr;
1041 KASSERT(((ma ^ (ma + m->m_pkthdr.len - 1)) & PTE_4KFRAME) == 0); 1047 KASSERT(((ma ^ (ma + m->m_pkthdr.len - 1)) & PTE_4KFRAME) == 0);
1042 1048
1043 if (__predict_false(xengnt_grant_access( 1049 if (__predict_false(xengnt_grant_access(
1044 sc->sc_xbusd->xbusd_otherend_id, 1050 sc->sc_xbusd->xbusd_otherend_id,
1045 trunc_page(ma), 1051 trunc_page(ma),
1046 GNTMAP_readonly, &req->txreq_gntref) != 0)) { 1052 GNTMAP_readonly, &req->txreq_gntref) != 0)) {
1047 bus_dmamap_unload(sc->sc_xbusd->xbusd_dmat, 1053 bus_dmamap_unload(sc->sc_xbusd->xbusd_dmat,
1048 req->txreq_dmamap); 1054 req->txreq_dmamap);
1049 m_freem(m); 1055 m_freem(m);
1050 break; 1056 break;
1051 } 1057 }
1052 1058
1053 /* We are now committed to transmit the packet */ 1059 /* We are now committed to transmit the packet */
1054 bus_dmamap_sync(sc->sc_xbusd->xbusd_dmat, req->txreq_dmamap, 0, 1060 bus_dmamap_sync(sc->sc_xbusd->xbusd_dmat, req->txreq_dmamap, 0,
1055 m->m_pkthdr.len, BUS_DMASYNC_POSTWRITE); 1061 m->m_pkthdr.len, BUS_DMASYNC_POSTWRITE);
1056 MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner); 1062 MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner);
1057 1063
1058 SLIST_REMOVE_HEAD(&sc->sc_txreq_head, txreq_next); 1064 SLIST_REMOVE_HEAD(&sc->sc_txreq_head, txreq_next);
1059 req->txreq_m = m; 1065 req->txreq_m = m;
1060 1066
1061 DPRINTFN(XEDB_MBUF, ("xennet_start id %d, " 1067 DPRINTFN(XEDB_MBUF, ("xennet_start id %d, "
1062 "mbuf %p, buf %p/%p, size %d\n", 1068 "mbuf %p, buf %p/%p, size %d\n",
1063 req->txreq_id, m, mtod(m, void *), (void *)ma, 1069 req->txreq_id, m, mtod(m, void *), (void *)ma,
1064 m->m_pkthdr.len)); 1070 m->m_pkthdr.len));
1065 1071
1066#ifdef XENNET_DEBUG_DUMP 1072#ifdef XENNET_DEBUG_DUMP
1067 xennet_hex_dump(mtod(m, u_char *), m->m_pkthdr.len, "s", 1073 xennet_hex_dump(mtod(m, u_char *), m->m_pkthdr.len, "s",
1068 req->txreq_id); 1074 req->txreq_id);
1069#endif 1075#endif
1070 1076
1071 txreq = RING_GET_REQUEST(&sc->sc_tx_ring, req_prod); 1077 txreq = RING_GET_REQUEST(&sc->sc_tx_ring, req_prod);
1072 txreq->id = req->txreq_id; 1078 txreq->id = req->txreq_id;
1073 txreq->gref = req->txreq_gntref; 1079 txreq->gref = req->txreq_gntref;
1074 txreq->offset = ma & ~PTE_4KFRAME; 1080 txreq->offset = ma & ~PTE_4KFRAME;
1075 txreq->size = m->m_pkthdr.len; 1081 txreq->size = m->m_pkthdr.len;
1076 txreq->flags = txflags; 1082 txreq->flags = txflags;
1077 1083
1078 req_prod++; 1084 req_prod++;
1079 sc->sc_tx_ring.req_prod_pvt = req_prod; 1085 sc->sc_tx_ring.req_prod_pvt = req_prod;
1080 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->sc_tx_ring, notify); 1086 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->sc_tx_ring, notify);
1081 if (notify) 1087 if (notify)
1082 do_notify = 1; 1088 do_notify = 1;
1083 1089
1084 /* 1090 /*
1085 * Pass packet to bpf if there is a listener. 1091 * Pass packet to bpf if there is a listener.
1086 */ 1092 */
1087 bpf_mtap(ifp, m, BPF_D_OUT); 1093 bpf_mtap(ifp, m, BPF_D_OUT);
1088 } 1094 }
1089 1095
1090 if (do_notify) 1096 if (do_notify)
1091 hypervisor_notify_via_evtchn(sc->sc_evtchn); 1097 hypervisor_notify_via_evtchn(sc->sc_evtchn);
1092 1098
1093 mutex_exit(&sc->sc_tx_lock); 1099 mutex_exit(&sc->sc_tx_lock);
1094 1100
1095 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_start() done\n", 1101 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_start() done\n",
1096 device_xname(sc->sc_dev))); 1102 device_xname(sc->sc_dev)));
1097} 1103}
1098 1104
1099int 1105int
1100xennet_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1106xennet_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1101{ 1107{
1102#ifdef XENNET_DEBUG 1108#ifdef XENNET_DEBUG
1103 struct xennet_xenbus_softc *sc = ifp->if_softc; 1109 struct xennet_xenbus_softc *sc = ifp->if_softc;
1104#endif 1110#endif
1105 int error = 0; 1111 int error = 0;
1106 1112
1107 KASSERT(IFNET_LOCKED(ifp)); 1113 KASSERT(IFNET_LOCKED(ifp));
1108 1114
1109 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl()\n", 1115 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl()\n",
1110 device_xname(sc->sc_dev))); 1116 device_xname(sc->sc_dev)));
1111 error = ether_ioctl(ifp, cmd, data); 1117 error = ether_ioctl(ifp, cmd, data);
1112 if (error == ENETRESET) 1118 if (error == ENETRESET)
1113 error = 0; 1119 error = 0;
1114 1120
1115 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl() returning %d\n", 1121 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl() returning %d\n",
1116 device_xname(sc->sc_dev), error)); 1122 device_xname(sc->sc_dev), error));
1117 1123
1118 return error; 1124 return error;
1119} 1125}
1120 1126
1121int 1127int
1122xennet_init(struct ifnet *ifp) 1128xennet_init(struct ifnet *ifp)
1123{ 1129{
1124 struct xennet_xenbus_softc *sc = ifp->if_softc; 1130 struct xennet_xenbus_softc *sc = ifp->if_softc;
1125 1131
1126 KASSERT(IFNET_LOCKED(ifp)); 1132 KASSERT(IFNET_LOCKED(ifp));
1127 1133
1128 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_init()\n", 1134 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_init()\n",
1129 device_xname(sc->sc_dev))); 1135 device_xname(sc->sc_dev)));
1130 1136
1131 if ((ifp->if_flags & IFF_RUNNING) == 0) { 1137 if ((ifp->if_flags & IFF_RUNNING) == 0) {
1132 mutex_enter(&sc->sc_rx_lock); 1138 mutex_enter(&sc->sc_rx_lock);
1133 sc->sc_rx_ring.sring->rsp_event = 1139 sc->sc_rx_ring.sring->rsp_event =
1134 sc->sc_rx_ring.rsp_cons + 1; 1140 sc->sc_rx_ring.rsp_cons + 1;
1135 mutex_exit(&sc->sc_rx_lock); 1141 mutex_exit(&sc->sc_rx_lock);
1136 hypervisor_unmask_event(sc->sc_evtchn); 1142 hypervisor_unmask_event(sc->sc_evtchn);
1137 hypervisor_notify_via_evtchn(sc->sc_evtchn); 1143 hypervisor_notify_via_evtchn(sc->sc_evtchn);
1138 } 1144 }
1139 ifp->if_flags |= IFF_RUNNING; 1145 ifp->if_flags |= IFF_RUNNING;
1140 1146
1141 return 0; 1147 return 0;
1142} 1148}
1143 1149
1144void 1150void
1145xennet_stop(struct ifnet *ifp, int disable) 1151xennet_stop(struct ifnet *ifp, int disable)
1146{ 1152{
1147 struct xennet_xenbus_softc *sc = ifp->if_softc; 1153 struct xennet_xenbus_softc *sc = ifp->if_softc;
1148 1154
1149 KASSERT(IFNET_LOCKED(ifp)); 1155 KASSERT(IFNET_LOCKED(ifp));
1150 1156
1151 ifp->if_flags &= ~IFF_RUNNING; 1157 ifp->if_flags &= ~IFF_RUNNING;
1152 hypervisor_mask_event(sc->sc_evtchn); 1158 hypervisor_mask_event(sc->sc_evtchn);
1153} 1159}
1154 1160
1155#if defined(NFS_BOOT_BOOTSTATIC) 1161#if defined(NFS_BOOT_BOOTSTATIC)
1156int 1162int
1157xennet_bootstatic_callback(struct nfs_diskless *nd) 1163xennet_bootstatic_callback(struct nfs_diskless *nd)
1158{ 1164{
1159#if 0 1165#if 0
1160 struct ifnet *ifp = nd->nd_ifp; 1166 struct ifnet *ifp = nd->nd_ifp;
1161 struct xennet_xenbus_softc *sc = 1167 struct xennet_xenbus_softc *sc =
1162 (struct xennet_xenbus_softc *)ifp->if_softc; 1168 (struct xennet_xenbus_softc *)ifp->if_softc;
1163#endif 1169#endif
1164 int flags = 0; 1170 int flags = 0;
1165 union xen_cmdline_parseinfo xcp; 1171 union xen_cmdline_parseinfo xcp;
1166 struct sockaddr_in *sin; 1172 struct sockaddr_in *sin;
1167 1173
1168 memset(&xcp, 0, sizeof(xcp.xcp_netinfo)); 1174 memset(&xcp, 0, sizeof(xcp.xcp_netinfo));
1169 xcp.xcp_netinfo.xi_ifno = /* XXX sc->sc_ifno */ 0; 1175 xcp.xcp_netinfo.xi_ifno = /* XXX sc->sc_ifno */ 0;
1170 xcp.xcp_netinfo.xi_root = nd->nd_root.ndm_host; 1176 xcp.xcp_netinfo.xi_root = nd->nd_root.ndm_host;
1171 xen_parse_cmdline(XEN_PARSE_NETINFO, &xcp); 1177 xen_parse_cmdline(XEN_PARSE_NETINFO, &xcp);
1172 1178
1173 if (xcp.xcp_netinfo.xi_root[0] != '\0') { 1179 if (xcp.xcp_netinfo.xi_root[0] != '\0') {
1174 flags |= NFS_BOOT_HAS_SERVER; 1180 flags |= NFS_BOOT_HAS_SERVER;
1175 if (strchr(xcp.xcp_netinfo.xi_root, ':') != NULL) 1181 if (strchr(xcp.xcp_netinfo.xi_root, ':') != NULL)
1176 flags |= NFS_BOOT_HAS_ROOTPATH; 1182 flags |= NFS_BOOT_HAS_ROOTPATH;
1177 } 1183 }
1178 1184
1179 nd->nd_myip.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[0]); 1185 nd->nd_myip.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[0]);
1180 nd->nd_gwip.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[2]); 1186 nd->nd_gwip.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[2]);
1181 nd->nd_mask.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[3]); 1187 nd->nd_mask.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[3]);
1182 1188
1183 sin = (struct sockaddr_in *) &nd->nd_root.ndm_saddr; 1189 sin = (struct sockaddr_in *) &nd->nd_root.ndm_saddr;
1184 memset((void *)sin, 0, sizeof(*sin)); 1190 memset((void *)sin, 0, sizeof(*sin));
1185 sin->sin_len = sizeof(*sin); 1191 sin->sin_len = sizeof(*sin);
1186 sin->sin_family = AF_INET; 1192 sin->sin_family = AF_INET;
1187 sin->sin_addr.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[1]); 1193 sin->sin_addr.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[1]);
1188 1194
1189 if (nd->nd_myip.s_addr) 1195 if (nd->nd_myip.s_addr)
1190 flags |= NFS_BOOT_HAS_MYIP; 1196 flags |= NFS_BOOT_HAS_MYIP;
1191 if (nd->nd_gwip.s_addr) 1197 if (nd->nd_gwip.s_addr)
1192 flags |= NFS_BOOT_HAS_GWIP; 1198 flags |= NFS_BOOT_HAS_GWIP;
1193 if (nd->nd_mask.s_addr) 1199 if (nd->nd_mask.s_addr)
1194 flags |= NFS_BOOT_HAS_MASK; 1200 flags |= NFS_BOOT_HAS_MASK;
1195 if (sin->sin_addr.s_addr) 1201 if (sin->sin_addr.s_addr)
1196 flags |= NFS_BOOT_HAS_SERVADDR; 1202 flags |= NFS_BOOT_HAS_SERVADDR;
1197 1203
1198 return flags; 1204 return flags;
1199} 1205}
1200#endif /* defined(NFS_BOOT_BOOTSTATIC) */ 1206#endif /* defined(NFS_BOOT_BOOTSTATIC) */
1201 1207
1202#ifdef XENNET_DEBUG_DUMP 1208#ifdef XENNET_DEBUG_DUMP
1203#define XCHR(x) hexdigits[(x) & 0xf] 1209#define XCHR(x) hexdigits[(x) & 0xf]
1204static void 1210static void
1205xennet_hex_dump(const unsigned char *pkt, size_t len, const char *type, int id) 1211xennet_hex_dump(const unsigned char *pkt, size_t len, const char *type, int id)
1206{ 1212{
1207 size_t i, j; 1213 size_t i, j;
1208 1214
1209 printf("pkt %p len %zd/%zx type %s id %d\n", pkt, len, len, type, id); 1215 printf("pkt %p len %zd/%zx type %s id %d\n", pkt, len, len, type, id);
1210 printf("00000000 "); 1216 printf("00000000 ");
1211 for(i=0; i<len; i++) { 1217 for(i=0; i<len; i++) {
1212 printf("%c%c ", XCHR(pkt[i]>>4), XCHR(pkt[i])); 1218 printf("%c%c ", XCHR(pkt[i]>>4), XCHR(pkt[i]));
1213 if ((i+1) % 16 == 8) 1219 if ((i+1) % 16 == 8)
1214 printf(" "); 1220 printf(" ");
1215 if ((i+1) % 16 == 0) { 1221 if ((i+1) % 16 == 0) {
1216 printf(" %c", '|'); 1222 printf(" %c", '|');
1217 for(j=0; j<16; j++) 1223 for(j=0; j<16; j++)
1218 printf("%c", pkt[i-15+j]>=32 && 1224 printf("%c", pkt[i-15+j]>=32 &&
1219 pkt[i-15+j]<127?pkt[i-15+j]:'.'); 1225 pkt[i-15+j]<127?pkt[i-15+j]:'.');
1220 printf("%c\n%c%c%c%c%c%c%c%c ", '|', 1226 printf("%c\n%c%c%c%c%c%c%c%c ", '|',
1221 XCHR((i+1)>>28), XCHR((i+1)>>24), 1227 XCHR((i+1)>>28), XCHR((i+1)>>24),
1222 XCHR((i+1)>>20), XCHR((i+1)>>16), 1228 XCHR((i+1)>>20), XCHR((i+1)>>16),
1223 XCHR((i+1)>>12), XCHR((i+1)>>8), 1229 XCHR((i+1)>>12), XCHR((i+1)>>8),
1224 XCHR((i+1)>>4), XCHR(i+1)); 1230 XCHR((i+1)>>4), XCHR(i+1));
1225 } 1231 }
1226 } 1232 }
1227 printf("\n"); 1233 printf("\n");
1228} 1234}
1229#undef XCHR 1235#undef XCHR
1230#endif 1236#endif