Mon Sep 28 01:31:46 2009 UTC ()
Pull up following revision(s) (requested by bouyer in ticket #1027):
	sys/arch/xen/xen/if_xennet_xenbus.c: revision 1.36
Patch from Brian Marcotte on port-xen@:
write a "feature-rx-notify" to the xenstore, which is used by recent
linux dom0 kernels. This reduce packet loss when using a NetBSD domU
on such linux dom0.
This entry is ignored by NetBSD and older linux domUs.


(snj)
diff -r1.29.2.2 -r1.29.2.3 src/sys/arch/xen/xen/if_xennet_xenbus.c

cvs diff -r1.29.2.2 -r1.29.2.3 src/sys/arch/xen/xen/if_xennet_xenbus.c (switch to unified diff)

--- src/sys/arch/xen/xen/if_xennet_xenbus.c 2009/05/13 01:05:20 1.29.2.2
+++ src/sys/arch/xen/xen/if_xennet_xenbus.c 2009/09/28 01:31:46 1.29.2.3
@@ -1,1244 +1,1250 @@ @@ -1,1244 +1,1250 @@
1/* $NetBSD: if_xennet_xenbus.c,v 1.29.2.2 2009/05/13 01:05:20 snj Exp $ */ 1/* $NetBSD: if_xennet_xenbus.c,v 1.29.2.3 2009/09/28 01:31:46 snj Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 Manuel Bouyer. 4 * Copyright (c) 2006 Manuel Bouyer.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software 14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement: 15 * must display the following acknowledgement:
16 * This product includes software developed by Manuel Bouyer. 16 * This product includes software developed by Manuel Bouyer.
17 * 4. The name of the author may not be used to endorse or promote products 17 * 4. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission. 18 * derived from this software without specific prior written permission.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * 30 *
31 */ 31 */
32 32
33/* 33/*
34 * Copyright (c) 2004 Christian Limpach. 34 * Copyright (c) 2004 Christian Limpach.
35 * All rights reserved. 35 * All rights reserved.
36 * 36 *
37 * Redistribution and use in source and binary forms, with or without 37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions 38 * modification, are permitted provided that the following conditions
39 * are met: 39 * are met:
40 * 1. Redistributions of source code must retain the above copyright 40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer. 41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright 42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the 43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution. 44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software 45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement: 46 * must display the following acknowledgement:
47 * This product includes software developed by Christian Limpach. 47 * This product includes software developed by Christian Limpach.
48 * 4. The name of the author may not be used to endorse or promote products 48 * 4. The name of the author may not be used to endorse or promote products
49 * derived from this software without specific prior written permission. 49 * derived from this software without specific prior written permission.
50 * 50 *
51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
53 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 53 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
54 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 54 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
55 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 55 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
56 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 56 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
60 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 */ 61 */
62 62
63#include <sys/cdefs.h> 63#include <sys/cdefs.h>
64__KERNEL_RCSID(0, "$NetBSD: if_xennet_xenbus.c,v 1.29.2.2 2009/05/13 01:05:20 snj Exp $"); 64__KERNEL_RCSID(0, "$NetBSD: if_xennet_xenbus.c,v 1.29.2.3 2009/09/28 01:31:46 snj Exp $");
65 65
66#include "opt_xen.h" 66#include "opt_xen.h"
67#include "opt_nfs_boot.h" 67#include "opt_nfs_boot.h"
68#include "rnd.h" 68#include "rnd.h"
69#include "bpfilter.h" 69#include "bpfilter.h"
70 70
71#include <sys/param.h> 71#include <sys/param.h>
72#include <sys/device.h> 72#include <sys/device.h>
73#include <sys/conf.h> 73#include <sys/conf.h>
74#include <sys/kernel.h> 74#include <sys/kernel.h>
75#include <sys/proc.h> 75#include <sys/proc.h>
76#include <sys/systm.h> 76#include <sys/systm.h>
77#include <sys/intr.h> 77#include <sys/intr.h>
78#if NRND > 0 78#if NRND > 0
79#include <sys/rnd.h> 79#include <sys/rnd.h>
80#endif 80#endif
81 81
82#include <net/if.h> 82#include <net/if.h>
83#include <net/if_dl.h> 83#include <net/if_dl.h>
84#include <net/if_ether.h> 84#include <net/if_ether.h>
85#if NBPFILTER > 0 85#if NBPFILTER > 0
86#include <net/bpf.h> 86#include <net/bpf.h>
87#include <net/bpfdesc.h> 87#include <net/bpfdesc.h>
88#endif 88#endif
89 89
90#if defined(NFS_BOOT_BOOTSTATIC) 90#if defined(NFS_BOOT_BOOTSTATIC)
91#include <sys/fstypes.h> 91#include <sys/fstypes.h>
92#include <sys/mount.h> 92#include <sys/mount.h>
93#include <sys/statvfs.h> 93#include <sys/statvfs.h>
94#include <netinet/in.h> 94#include <netinet/in.h>
95#include <nfs/rpcv2.h> 95#include <nfs/rpcv2.h>
96#include <nfs/nfsproto.h> 96#include <nfs/nfsproto.h>
97#include <nfs/nfs.h> 97#include <nfs/nfs.h>
98#include <nfs/nfsmount.h> 98#include <nfs/nfsmount.h>
99#include <nfs/nfsdiskless.h> 99#include <nfs/nfsdiskless.h>
100#include <xen/if_xennetvar.h> 100#include <xen/if_xennetvar.h>
101#endif /* defined(NFS_BOOT_BOOTSTATIC) */ 101#endif /* defined(NFS_BOOT_BOOTSTATIC) */
102 102
103#include <xen/xennet_checksum.h> 103#include <xen/xennet_checksum.h>
104 104
105#include <uvm/uvm.h> 105#include <uvm/uvm.h>
106 106
107#include <xen/hypervisor.h> 107#include <xen/hypervisor.h>
108#include <xen/evtchn.h> 108#include <xen/evtchn.h>
109#include <xen/granttables.h> 109#include <xen/granttables.h>
110#include <xen/xen3-public/io/netif.h> 110#include <xen/xen3-public/io/netif.h>
111#include <xen/xenpmap.h> 111#include <xen/xenpmap.h>
112 112
113#include <xen/xenbus.h> 113#include <xen/xenbus.h>
114#include "locators.h" 114#include "locators.h"
115 115
116#undef XENNET_DEBUG_DUMP 116#undef XENNET_DEBUG_DUMP
117#undef XENNET_DEBUG 117#undef XENNET_DEBUG
118#ifdef XENNET_DEBUG 118#ifdef XENNET_DEBUG
119#define XEDB_FOLLOW 0x01 119#define XEDB_FOLLOW 0x01
120#define XEDB_INIT 0x02  120#define XEDB_INIT 0x02
121#define XEDB_EVENT 0x04 121#define XEDB_EVENT 0x04
122#define XEDB_MBUF 0x08 122#define XEDB_MBUF 0x08
123#define XEDB_MEM 0x10 123#define XEDB_MEM 0x10
124int xennet_debug = 0xff; 124int xennet_debug = 0xff;
125#define DPRINTF(x) if (xennet_debug) printf x; 125#define DPRINTF(x) if (xennet_debug) printf x;
126#define DPRINTFN(n,x) if (xennet_debug & (n)) printf x; 126#define DPRINTFN(n,x) if (xennet_debug & (n)) printf x;
127#else 127#else
128#define DPRINTF(x) 128#define DPRINTF(x)
129#define DPRINTFN(n,x) 129#define DPRINTFN(n,x)
130#endif 130#endif
131 131
132#define GRANT_INVALID_REF -1 /* entry is free */ 132#define GRANT_INVALID_REF -1 /* entry is free */
133#define GRANT_STACK_REF -2 /* entry owned by the network stack */ 133#define GRANT_STACK_REF -2 /* entry owned by the network stack */
134 134
135#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE) 135#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
136#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE) 136#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
137 137
138struct xennet_txreq { 138struct xennet_txreq {
139 SLIST_ENTRY(xennet_txreq) txreq_next; 139 SLIST_ENTRY(xennet_txreq) txreq_next;
140 uint16_t txreq_id; /* ID passed to backend */ 140 uint16_t txreq_id; /* ID passed to backend */
141 grant_ref_t txreq_gntref; /* grant ref of this request */ 141 grant_ref_t txreq_gntref; /* grant ref of this request */
142 struct mbuf *txreq_m; /* mbuf being transmitted */ 142 struct mbuf *txreq_m; /* mbuf being transmitted */
143}; 143};
144 144
145struct xennet_rxreq { 145struct xennet_rxreq {
146 SLIST_ENTRY(xennet_rxreq) rxreq_next; 146 SLIST_ENTRY(xennet_rxreq) rxreq_next;
147 uint16_t rxreq_id; /* ID passed to backend */ 147 uint16_t rxreq_id; /* ID passed to backend */
148 grant_ref_t rxreq_gntref; /* grant ref of this request */ 148 grant_ref_t rxreq_gntref; /* grant ref of this request */
149/* va/pa for this receive buf. ma will be provided by backend */ 149/* va/pa for this receive buf. ma will be provided by backend */
150 paddr_t rxreq_pa; 150 paddr_t rxreq_pa;
151 vaddr_t rxreq_va; 151 vaddr_t rxreq_va;
152 struct xennet_xenbus_softc *rxreq_sc; /* pointer to our interface */ 152 struct xennet_xenbus_softc *rxreq_sc; /* pointer to our interface */
153}; 153};
154 154
155struct xennet_xenbus_softc { 155struct xennet_xenbus_softc {
156 device_t sc_dev; 156 device_t sc_dev;
157 struct ethercom sc_ethercom; 157 struct ethercom sc_ethercom;
158 uint8_t sc_enaddr[6]; 158 uint8_t sc_enaddr[6];
159 struct xenbus_device *sc_xbusd; 159 struct xenbus_device *sc_xbusd;
160 160
161 netif_tx_front_ring_t sc_tx_ring; 161 netif_tx_front_ring_t sc_tx_ring;
162 netif_rx_front_ring_t sc_rx_ring; 162 netif_rx_front_ring_t sc_rx_ring;
163 163
164 unsigned int sc_evtchn; 164 unsigned int sc_evtchn;
165 void *sc_softintr; 165 void *sc_softintr;
166 166
167 grant_ref_t sc_tx_ring_gntref; 167 grant_ref_t sc_tx_ring_gntref;
168 grant_ref_t sc_rx_ring_gntref; 168 grant_ref_t sc_rx_ring_gntref;
169 169
170 struct xennet_txreq sc_txreqs[NET_TX_RING_SIZE]; 170 struct xennet_txreq sc_txreqs[NET_TX_RING_SIZE];
171 struct xennet_rxreq sc_rxreqs[NET_RX_RING_SIZE]; 171 struct xennet_rxreq sc_rxreqs[NET_RX_RING_SIZE];
172 SLIST_HEAD(,xennet_txreq) sc_txreq_head; /* list of free TX requests */ 172 SLIST_HEAD(,xennet_txreq) sc_txreq_head; /* list of free TX requests */
173 SLIST_HEAD(,xennet_rxreq) sc_rxreq_head; /* list of free RX requests */ 173 SLIST_HEAD(,xennet_rxreq) sc_rxreq_head; /* list of free RX requests */
174 int sc_free_rxreql; /* number of free receive request struct */ 174 int sc_free_rxreql; /* number of free receive request struct */
175 175
176 int sc_backend_status; /* our status with backend */ 176 int sc_backend_status; /* our status with backend */
177#define BEST_CLOSED 0 177#define BEST_CLOSED 0
178#define BEST_DISCONNECTED 1 178#define BEST_DISCONNECTED 1
179#define BEST_CONNECTED 2 179#define BEST_CONNECTED 2
180#define BEST_SUSPENDED 3 180#define BEST_SUSPENDED 3
181#if NRND > 0 181#if NRND > 0
182 rndsource_element_t sc_rnd_source; 182 rndsource_element_t sc_rnd_source;
183#endif 183#endif
184}; 184};
185#define SC_NLIVEREQ(sc) ((sc)->sc_rx_ring.req_prod_pvt - \ 185#define SC_NLIVEREQ(sc) ((sc)->sc_rx_ring.req_prod_pvt - \
186 (sc)->sc_rx_ring.sring->rsp_prod) 186 (sc)->sc_rx_ring.sring->rsp_prod)
187 187
188/* too big to be on stack */ 188/* too big to be on stack */
189static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1]; 189static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
190static u_long xennet_pages[NET_RX_RING_SIZE]; 190static u_long xennet_pages[NET_RX_RING_SIZE];
191 191
192static int xennet_xenbus_match(device_t, cfdata_t, void *); 192static int xennet_xenbus_match(device_t, cfdata_t, void *);
193static void xennet_xenbus_attach(device_t, device_t, void *); 193static void xennet_xenbus_attach(device_t, device_t, void *);
194static int xennet_xenbus_detach(device_t, int); 194static int xennet_xenbus_detach(device_t, int);
195static void xennet_backend_changed(void *, XenbusState); 195static void xennet_backend_changed(void *, XenbusState);
196 196
197static int xennet_xenbus_resume(void *); 197static int xennet_xenbus_resume(void *);
198static void xennet_alloc_rx_buffer(struct xennet_xenbus_softc *); 198static void xennet_alloc_rx_buffer(struct xennet_xenbus_softc *);
199static void xennet_free_rx_buffer(struct xennet_xenbus_softc *); 199static void xennet_free_rx_buffer(struct xennet_xenbus_softc *);
200static void xennet_tx_complete(struct xennet_xenbus_softc *); 200static void xennet_tx_complete(struct xennet_xenbus_softc *);
201static void xennet_rx_mbuf_free(struct mbuf *, void *, size_t, void *); 201static void xennet_rx_mbuf_free(struct mbuf *, void *, size_t, void *);
202static int xennet_handler(void *); 202static int xennet_handler(void *);
203#ifdef XENNET_DEBUG_DUMP 203#ifdef XENNET_DEBUG_DUMP
204static void xennet_hex_dump(const unsigned char *, size_t, const char *, int); 204static void xennet_hex_dump(const unsigned char *, size_t, const char *, int);
205#endif 205#endif
206 206
207static int xennet_init(struct ifnet *); 207static int xennet_init(struct ifnet *);
208static void xennet_stop(struct ifnet *, int); 208static void xennet_stop(struct ifnet *, int);
209static void xennet_reset(struct xennet_xenbus_softc *); 209static void xennet_reset(struct xennet_xenbus_softc *);
210static void xennet_softstart(void *); 210static void xennet_softstart(void *);
211static void xennet_start(struct ifnet *); 211static void xennet_start(struct ifnet *);
212static int xennet_ioctl(struct ifnet *, u_long, void *); 212static int xennet_ioctl(struct ifnet *, u_long, void *);
213static void xennet_watchdog(struct ifnet *); 213static void xennet_watchdog(struct ifnet *);
214 214
215CFATTACH_DECL_NEW(xennet_xenbus, sizeof(struct xennet_xenbus_softc), 215CFATTACH_DECL_NEW(xennet_xenbus, sizeof(struct xennet_xenbus_softc),
216 xennet_xenbus_match, xennet_xenbus_attach, xennet_xenbus_detach, NULL); 216 xennet_xenbus_match, xennet_xenbus_attach, xennet_xenbus_detach, NULL);
217 217
218static int 218static int
219xennet_xenbus_match(device_t parent, cfdata_t match, void *aux) 219xennet_xenbus_match(device_t parent, cfdata_t match, void *aux)
220{ 220{
221 struct xenbusdev_attach_args *xa = aux; 221 struct xenbusdev_attach_args *xa = aux;
222 222
223 if (strcmp(xa->xa_type, "vif") != 0) 223 if (strcmp(xa->xa_type, "vif") != 0)
224 return 0; 224 return 0;
225 225
226 if (match->cf_loc[XENBUSCF_ID] != XENBUSCF_ID_DEFAULT && 226 if (match->cf_loc[XENBUSCF_ID] != XENBUSCF_ID_DEFAULT &&
227 match->cf_loc[XENBUSCF_ID] != xa->xa_id) 227 match->cf_loc[XENBUSCF_ID] != xa->xa_id)
228 return 0; 228 return 0;
229 229
230 return 1; 230 return 1;
231} 231}
232 232
233static void 233static void
234xennet_xenbus_attach(device_t parent, device_t self, void *aux) 234xennet_xenbus_attach(device_t parent, device_t self, void *aux)
235{ 235{
236 struct xennet_xenbus_softc *sc = device_private(self); 236 struct xennet_xenbus_softc *sc = device_private(self);
237 struct xenbusdev_attach_args *xa = aux; 237 struct xenbusdev_attach_args *xa = aux;
238 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 238 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
239 int err; 239 int err;
240 RING_IDX i; 240 RING_IDX i;
241 char *val, *e, *p; 241 char *val, *e, *p;
242 int s; 242 int s;
243 extern int ifqmaxlen; /* XXX */ 243 extern int ifqmaxlen; /* XXX */
244#ifdef XENNET_DEBUG 244#ifdef XENNET_DEBUG
245 char **dir; 245 char **dir;
246 int dir_n = 0; 246 int dir_n = 0;
247 char id_str[20]; 247 char id_str[20];
248#endif 248#endif
249 249
250 aprint_normal(": Xen Virtual Network Interface\n"); 250 aprint_normal(": Xen Virtual Network Interface\n");
251 sc->sc_dev = self; 251 sc->sc_dev = self;
252 252
253#ifdef XENNET_DEBUG 253#ifdef XENNET_DEBUG
254 printf("path: %s\n", xa->xa_xbusd->xbusd_path); 254 printf("path: %s\n", xa->xa_xbusd->xbusd_path);
255 snprintf(id_str, sizeof(id_str), "%d", xa->xa_id); 255 snprintf(id_str, sizeof(id_str), "%d", xa->xa_id);
256 err = xenbus_directory(NULL, "device/vif", id_str, &dir_n, &dir); 256 err = xenbus_directory(NULL, "device/vif", id_str, &dir_n, &dir);
257 if (err) { 257 if (err) {
258 aprint_error_dev(self, "xenbus_directory err %d\n", err); 258 aprint_error_dev(self, "xenbus_directory err %d\n", err);
259 } else { 259 } else {
260 printf("%s/\n", xa->xa_xbusd->xbusd_path); 260 printf("%s/\n", xa->xa_xbusd->xbusd_path);
261 for (i = 0; i < dir_n; i++) { 261 for (i = 0; i < dir_n; i++) {
262 printf("\t/%s", dir[i]); 262 printf("\t/%s", dir[i]);
263 err = xenbus_read(NULL, xa->xa_xbusd->xbusd_path, dir[i], 263 err = xenbus_read(NULL, xa->xa_xbusd->xbusd_path, dir[i],
264 NULL, &val); 264 NULL, &val);
265 if (err) { 265 if (err) {
266 aprint_error_dev(self, "xenbus_read err %d\n", err); 266 aprint_error_dev(self, "xenbus_read err %d\n", err);
267 } else { 267 } else {
268 printf(" = %s\n", val); 268 printf(" = %s\n", val);
269 free(val, M_DEVBUF); 269 free(val, M_DEVBUF);
270 } 270 }
271 } 271 }
272 } 272 }
273#endif /* XENNET_DEBUG */ 273#endif /* XENNET_DEBUG */
274 sc->sc_xbusd = xa->xa_xbusd; 274 sc->sc_xbusd = xa->xa_xbusd;
275 sc->sc_xbusd->xbusd_otherend_changed = xennet_backend_changed; 275 sc->sc_xbusd->xbusd_otherend_changed = xennet_backend_changed;
276 276
277 /* initialize free RX and RX request lists */ 277 /* initialize free RX and RX request lists */
278 SLIST_INIT(&sc->sc_txreq_head); 278 SLIST_INIT(&sc->sc_txreq_head);
279 for (i = 0; i < NET_TX_RING_SIZE; i++) { 279 for (i = 0; i < NET_TX_RING_SIZE; i++) {
280 sc->sc_txreqs[i].txreq_id = i; 280 sc->sc_txreqs[i].txreq_id = i;
281 SLIST_INSERT_HEAD(&sc->sc_txreq_head, &sc->sc_txreqs[i], 281 SLIST_INSERT_HEAD(&sc->sc_txreq_head, &sc->sc_txreqs[i],
282 txreq_next); 282 txreq_next);
283 } 283 }
284 SLIST_INIT(&sc->sc_rxreq_head); 284 SLIST_INIT(&sc->sc_rxreq_head);
285 s = splvm(); 285 s = splvm();
286 for (i = 0; i < NET_RX_RING_SIZE; i++) { 286 for (i = 0; i < NET_RX_RING_SIZE; i++) {
287 struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i]; 287 struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i];
288 rxreq->rxreq_id = i; 288 rxreq->rxreq_id = i;
289 rxreq->rxreq_sc = sc; 289 rxreq->rxreq_sc = sc;
290 rxreq->rxreq_va = uvm_km_alloc(kernel_map, 290 rxreq->rxreq_va = uvm_km_alloc(kernel_map,
291 PAGE_SIZE, PAGE_SIZE, UVM_KMF_WIRED | UVM_KMF_ZERO); 291 PAGE_SIZE, PAGE_SIZE, UVM_KMF_WIRED | UVM_KMF_ZERO);
292 if (rxreq->rxreq_va == 0) 292 if (rxreq->rxreq_va == 0)
293 break; 293 break;
294 if (!pmap_extract(pmap_kernel(), rxreq->rxreq_va, 294 if (!pmap_extract(pmap_kernel(), rxreq->rxreq_va,
295 &rxreq->rxreq_pa)) 295 &rxreq->rxreq_pa))
296 panic("%s: no pa for mapped va ?", device_xname(self)); 296 panic("%s: no pa for mapped va ?", device_xname(self));
297 rxreq->rxreq_gntref = GRANT_INVALID_REF; 297 rxreq->rxreq_gntref = GRANT_INVALID_REF;
298 SLIST_INSERT_HEAD(&sc->sc_rxreq_head, rxreq, rxreq_next); 298 SLIST_INSERT_HEAD(&sc->sc_rxreq_head, rxreq, rxreq_next);
299 } 299 }
300 splx(s); 300 splx(s);
301 sc->sc_free_rxreql = i; 301 sc->sc_free_rxreql = i;
302 if (sc->sc_free_rxreql == 0) { 302 if (sc->sc_free_rxreql == 0) {
303 aprint_error_dev(self, "failed to allocate rx memory\n"); 303 aprint_error_dev(self, "failed to allocate rx memory\n");
304 return; 304 return;
305 } 305 }
306 306
307 /* read mac address */ 307 /* read mac address */
308 err = xenbus_read(NULL, xa->xa_xbusd->xbusd_path, "mac", NULL, &val); 308 err = xenbus_read(NULL, xa->xa_xbusd->xbusd_path, "mac", NULL, &val);
309 if (err) { 309 if (err) {
310 aprint_error_dev(self, "can't read mac address, err %d\n", err); 310 aprint_error_dev(self, "can't read mac address, err %d\n", err);
311 return; 311 return;
312 } 312 }
313 /* read mac address */ 313 /* read mac address */
314 for (i = 0, p = val; i < 6; i++) { 314 for (i = 0, p = val; i < 6; i++) {
315 sc->sc_enaddr[i] = strtoul(p, &e, 16); 315 sc->sc_enaddr[i] = strtoul(p, &e, 16);
316 if ((e[0] == '\0' && i != 5) && e[0] != ':') { 316 if ((e[0] == '\0' && i != 5) && e[0] != ':') {
317 aprint_error_dev(self, "%s is not a valid mac address\n", val); 317 aprint_error_dev(self, "%s is not a valid mac address\n", val);
318 free(val, M_DEVBUF); 318 free(val, M_DEVBUF);
319 return; 319 return;
320 } 320 }
321 p = &e[1]; 321 p = &e[1];
322 } 322 }
323 free(val, M_DEVBUF); 323 free(val, M_DEVBUF);
324 aprint_normal_dev(self, "MAC address %s\n", 324 aprint_normal_dev(self, "MAC address %s\n",
325 ether_sprintf(sc->sc_enaddr)); 325 ether_sprintf(sc->sc_enaddr));
326 /* Initialize ifnet structure and attach interface */ 326 /* Initialize ifnet structure and attach interface */
327 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 327 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
328 ifp->if_softc = sc; 328 ifp->if_softc = sc;
329 ifp->if_start = xennet_start; 329 ifp->if_start = xennet_start;
330 ifp->if_ioctl = xennet_ioctl; 330 ifp->if_ioctl = xennet_ioctl;
331 ifp->if_watchdog = xennet_watchdog; 331 ifp->if_watchdog = xennet_watchdog;
332 ifp->if_init = xennet_init; 332 ifp->if_init = xennet_init;
333 ifp->if_stop = xennet_stop; 333 ifp->if_stop = xennet_stop;
334 ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST; 334 ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST;
335 ifp->if_timer = 0; 335 ifp->if_timer = 0;
336 ifp->if_snd.ifq_maxlen = max(ifqmaxlen, NET_TX_RING_SIZE * 2); 336 ifp->if_snd.ifq_maxlen = max(ifqmaxlen, NET_TX_RING_SIZE * 2);
337 ifp->if_capabilities = IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx; 337 ifp->if_capabilities = IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx;
338 IFQ_SET_READY(&ifp->if_snd); 338 IFQ_SET_READY(&ifp->if_snd);
339 if_attach(ifp); 339 if_attach(ifp);
340 ether_ifattach(ifp, sc->sc_enaddr); 340 ether_ifattach(ifp, sc->sc_enaddr);
341 sc->sc_softintr = softint_establish(SOFTINT_NET, xennet_softstart, sc); 341 sc->sc_softintr = softint_establish(SOFTINT_NET, xennet_softstart, sc);
342 if (sc->sc_softintr == NULL) 342 if (sc->sc_softintr == NULL)
343 panic("%s: can't establish soft interrupt", 343 panic("%s: can't establish soft interrupt",
344 device_xname(self)); 344 device_xname(self));
345 345
346#if NRND > 0 346#if NRND > 0
347 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), 347 rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev),
348 RND_TYPE_NET, 0); 348 RND_TYPE_NET, 0);
349#endif 349#endif
350 350
351 /* initialise shared structures and tell backend that we are ready */ 351 /* initialise shared structures and tell backend that we are ready */
352 xennet_xenbus_resume(sc); 352 xennet_xenbus_resume(sc);
353} 353}
354 354
355static int 355static int
356xennet_xenbus_detach(device_t self, int flags) 356xennet_xenbus_detach(device_t self, int flags)
357{ 357{
358 struct xennet_xenbus_softc *sc = device_private(self); 358 struct xennet_xenbus_softc *sc = device_private(self);
359 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 359 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
360 int s0, s1; 360 int s0, s1;
361 RING_IDX i; 361 RING_IDX i;
362 362
363 DPRINTF(("%s: xennet_xenbus_detach\n", device_xname(self))); 363 DPRINTF(("%s: xennet_xenbus_detach\n", device_xname(self)));
364 s0 = splnet(); 364 s0 = splnet();
365 xennet_stop(ifp, 1); 365 xennet_stop(ifp, 1);
366 /* wait for pending TX to complete, and collect pending RX packets */ 366 /* wait for pending TX to complete, and collect pending RX packets */
367 xennet_handler(sc); 367 xennet_handler(sc);
368 while (sc->sc_tx_ring.sring->rsp_prod != sc->sc_tx_ring.rsp_cons) { 368 while (sc->sc_tx_ring.sring->rsp_prod != sc->sc_tx_ring.rsp_cons) {
369 tsleep(xennet_xenbus_detach, PRIBIO, "xnet_detach", hz/2); 369 tsleep(xennet_xenbus_detach, PRIBIO, "xnet_detach", hz/2);
370 xennet_handler(sc); 370 xennet_handler(sc);
371 } 371 }
372 xennet_free_rx_buffer(sc); 372 xennet_free_rx_buffer(sc);
373 373
374 s1 = splvm(); 374 s1 = splvm();
375 for (i = 0; i < NET_RX_RING_SIZE; i++) { 375 for (i = 0; i < NET_RX_RING_SIZE; i++) {
376 struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i]; 376 struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i];
377 uvm_km_free(kernel_map, rxreq->rxreq_va, PAGE_SIZE, 377 uvm_km_free(kernel_map, rxreq->rxreq_va, PAGE_SIZE,
378 UVM_KMF_WIRED); 378 UVM_KMF_WIRED);
379 } 379 }
380 splx(s1); 380 splx(s1);
381  381
382 ether_ifdetach(ifp); 382 ether_ifdetach(ifp);
383 if_detach(ifp); 383 if_detach(ifp);
384 384
385#if NRND > 0 385#if NRND > 0
386 /* Unhook the entropy source. */ 386 /* Unhook the entropy source. */
387 rnd_detach_source(&sc->sc_rnd_source); 387 rnd_detach_source(&sc->sc_rnd_source);
388#endif 388#endif
389 389
390 while (xengnt_status(sc->sc_tx_ring_gntref)) { 390 while (xengnt_status(sc->sc_tx_ring_gntref)) {
391 tsleep(xennet_xenbus_detach, PRIBIO, "xnet_txref", hz/2); 391 tsleep(xennet_xenbus_detach, PRIBIO, "xnet_txref", hz/2);
392 } 392 }
393 xengnt_revoke_access(sc->sc_tx_ring_gntref); 393 xengnt_revoke_access(sc->sc_tx_ring_gntref);
394 uvm_km_free(kernel_map, (vaddr_t)sc->sc_tx_ring.sring, PAGE_SIZE, 394 uvm_km_free(kernel_map, (vaddr_t)sc->sc_tx_ring.sring, PAGE_SIZE,
395 UVM_KMF_WIRED); 395 UVM_KMF_WIRED);
396 while (xengnt_status(sc->sc_rx_ring_gntref)) { 396 while (xengnt_status(sc->sc_rx_ring_gntref)) {
397 tsleep(xennet_xenbus_detach, PRIBIO, "xnet_rxref", hz/2); 397 tsleep(xennet_xenbus_detach, PRIBIO, "xnet_rxref", hz/2);
398 } 398 }
399 xengnt_revoke_access(sc->sc_rx_ring_gntref); 399 xengnt_revoke_access(sc->sc_rx_ring_gntref);
400 uvm_km_free(kernel_map, (vaddr_t)sc->sc_rx_ring.sring, PAGE_SIZE, 400 uvm_km_free(kernel_map, (vaddr_t)sc->sc_rx_ring.sring, PAGE_SIZE,
401 UVM_KMF_WIRED); 401 UVM_KMF_WIRED);
402 softint_disestablish(sc->sc_softintr); 402 softint_disestablish(sc->sc_softintr);
403 event_remove_handler(sc->sc_evtchn, &xennet_handler, sc); 403 event_remove_handler(sc->sc_evtchn, &xennet_handler, sc);
404 splx(s0); 404 splx(s0);
405 DPRINTF(("%s: xennet_xenbus_detach done\n", device_xname(self))); 405 DPRINTF(("%s: xennet_xenbus_detach done\n", device_xname(self)));
406 return 0; 406 return 0;
407} 407}
408 408
409static int 409static int
410xennet_xenbus_resume(void *p) 410xennet_xenbus_resume(void *p)
411{ 411{
412 struct xennet_xenbus_softc *sc = p; 412 struct xennet_xenbus_softc *sc = p;
413 struct xenbus_transaction *xbt; 413 struct xenbus_transaction *xbt;
414 int error; 414 int error;
415 netif_tx_sring_t *tx_ring; 415 netif_tx_sring_t *tx_ring;
416 netif_rx_sring_t *rx_ring; 416 netif_rx_sring_t *rx_ring;
417 paddr_t ma; 417 paddr_t ma;
418 const char *errmsg; 418 const char *errmsg;
419 419
420 sc->sc_tx_ring_gntref = GRANT_INVALID_REF; 420 sc->sc_tx_ring_gntref = GRANT_INVALID_REF;
421 sc->sc_rx_ring_gntref = GRANT_INVALID_REF; 421 sc->sc_rx_ring_gntref = GRANT_INVALID_REF;
422 422
423 423
424 /* setup device: alloc event channel and shared rings */ 424 /* setup device: alloc event channel and shared rings */
425 tx_ring = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0, 425 tx_ring = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
426 UVM_KMF_WIRED | UVM_KMF_ZERO); 426 UVM_KMF_WIRED | UVM_KMF_ZERO);
427 rx_ring = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0, 427 rx_ring = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
428 UVM_KMF_WIRED | UVM_KMF_ZERO); 428 UVM_KMF_WIRED | UVM_KMF_ZERO);
429 if (tx_ring == NULL || rx_ring == NULL) 429 if (tx_ring == NULL || rx_ring == NULL)
430 panic("xennet_xenbus_resume: can't alloc rings"); 430 panic("xennet_xenbus_resume: can't alloc rings");
431 431
432 SHARED_RING_INIT(tx_ring); 432 SHARED_RING_INIT(tx_ring);
433 FRONT_RING_INIT(&sc->sc_tx_ring, tx_ring, PAGE_SIZE); 433 FRONT_RING_INIT(&sc->sc_tx_ring, tx_ring, PAGE_SIZE);
434 SHARED_RING_INIT(rx_ring); 434 SHARED_RING_INIT(rx_ring);
435 FRONT_RING_INIT(&sc->sc_rx_ring, rx_ring, PAGE_SIZE); 435 FRONT_RING_INIT(&sc->sc_rx_ring, rx_ring, PAGE_SIZE);
436 436
437 (void)pmap_extract_ma(pmap_kernel(), (vaddr_t)tx_ring, &ma); 437 (void)pmap_extract_ma(pmap_kernel(), (vaddr_t)tx_ring, &ma);
438 error = xenbus_grant_ring(sc->sc_xbusd, ma, &sc->sc_tx_ring_gntref); 438 error = xenbus_grant_ring(sc->sc_xbusd, ma, &sc->sc_tx_ring_gntref);
439 if (error) 439 if (error)
440 return error; 440 return error;
441 (void)pmap_extract_ma(pmap_kernel(), (vaddr_t)rx_ring, &ma); 441 (void)pmap_extract_ma(pmap_kernel(), (vaddr_t)rx_ring, &ma);
442 error = xenbus_grant_ring(sc->sc_xbusd, ma, &sc->sc_rx_ring_gntref); 442 error = xenbus_grant_ring(sc->sc_xbusd, ma, &sc->sc_rx_ring_gntref);
443 if (error) 443 if (error)
444 return error; 444 return error;
445 error = xenbus_alloc_evtchn(sc->sc_xbusd, &sc->sc_evtchn); 445 error = xenbus_alloc_evtchn(sc->sc_xbusd, &sc->sc_evtchn);
446 if (error) 446 if (error)
447 return error; 447 return error;
448 aprint_verbose_dev(sc->sc_dev, "using event channel %d\n", 448 aprint_verbose_dev(sc->sc_dev, "using event channel %d\n",
449 sc->sc_evtchn); 449 sc->sc_evtchn);
450 event_set_handler(sc->sc_evtchn, &xennet_handler, sc, 450 event_set_handler(sc->sc_evtchn, &xennet_handler, sc,
451 IPL_NET, device_xname(sc->sc_dev)); 451 IPL_NET, device_xname(sc->sc_dev));
452 452
453again: 453again:
454 xbt = xenbus_transaction_start(); 454 xbt = xenbus_transaction_start();
455 if (xbt == NULL) 455 if (xbt == NULL)
456 return ENOMEM; 456 return ENOMEM;
457 error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path, 457 error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path,
458 "tx-ring-ref","%u", sc->sc_tx_ring_gntref); 458 "tx-ring-ref","%u", sc->sc_tx_ring_gntref);
459 if (error) { 459 if (error) {
460 errmsg = "writing tx ring-ref"; 460 errmsg = "writing tx ring-ref";
461 goto abort_transaction; 461 goto abort_transaction;
462 } 462 }
463 error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path, 463 error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path,
464 "rx-ring-ref","%u", sc->sc_rx_ring_gntref); 464 "rx-ring-ref","%u", sc->sc_rx_ring_gntref);
465 if (error) { 465 if (error) {
466 errmsg = "writing rx ring-ref"; 466 errmsg = "writing rx ring-ref";
467 goto abort_transaction; 467 goto abort_transaction;
468 } 468 }
469 error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path, 469 error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path,
 470 "feature-rx-notify", "%u", 1);
 471 if (error) {
 472 errmsg = "writing feature-rx-notify";
 473 goto abort_transaction;
 474 }
 475 error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path,
470 "event-channel", "%u", sc->sc_evtchn); 476 "event-channel", "%u", sc->sc_evtchn);
471 if (error) { 477 if (error) {
472 errmsg = "writing event channel"; 478 errmsg = "writing event channel";
473 goto abort_transaction; 479 goto abort_transaction;
474 } 480 }
475 error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path, 481 error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path,
476 "state", "%d", XenbusStateConnected); 482 "state", "%d", XenbusStateConnected);
477 if (error) { 483 if (error) {
478 errmsg = "writing frontend XenbusStateConnected"; 484 errmsg = "writing frontend XenbusStateConnected";
479 goto abort_transaction; 485 goto abort_transaction;
480 } 486 }
481 error = xenbus_transaction_end(xbt, 0); 487 error = xenbus_transaction_end(xbt, 0);
482 if (error == EAGAIN) 488 if (error == EAGAIN)
483 goto again; 489 goto again;
484 if (error) { 490 if (error) {
485 xenbus_dev_fatal(sc->sc_xbusd, error, "completing transaction"); 491 xenbus_dev_fatal(sc->sc_xbusd, error, "completing transaction");
486 return -1; 492 return -1;
487 } 493 }
488 xennet_alloc_rx_buffer(sc); 494 xennet_alloc_rx_buffer(sc);
489 sc->sc_backend_status = BEST_CONNECTED; 495 sc->sc_backend_status = BEST_CONNECTED;
490 return 0; 496 return 0;
491 497
492abort_transaction: 498abort_transaction:
493 xenbus_transaction_end(xbt, 1); 499 xenbus_transaction_end(xbt, 1);
494 xenbus_dev_fatal(sc->sc_xbusd, error, "%s", errmsg); 500 xenbus_dev_fatal(sc->sc_xbusd, error, "%s", errmsg);
495 return error; 501 return error;
496} 502}
497 503
498static void xennet_backend_changed(void *arg, XenbusState new_state) 504static void xennet_backend_changed(void *arg, XenbusState new_state)
499{ 505{
500 struct xennet_xenbus_softc *sc = device_private((device_t)arg); 506 struct xennet_xenbus_softc *sc = device_private((device_t)arg);
501 DPRINTF(("%s: new backend state %d\n", device_xname(sc->sc_dev), new_state)); 507 DPRINTF(("%s: new backend state %d\n", device_xname(sc->sc_dev), new_state));
502 508
503 switch (new_state) { 509 switch (new_state) {
504 case XenbusStateInitialising: 510 case XenbusStateInitialising:
505 case XenbusStateInitWait: 511 case XenbusStateInitWait:
506 case XenbusStateInitialised: 512 case XenbusStateInitialised:
507 break; 513 break;
508 case XenbusStateClosing: 514 case XenbusStateClosing:
509 sc->sc_backend_status = BEST_CLOSED; 515 sc->sc_backend_status = BEST_CLOSED;
510 xenbus_switch_state(sc->sc_xbusd, NULL, XenbusStateClosed); 516 xenbus_switch_state(sc->sc_xbusd, NULL, XenbusStateClosed);
511 break; 517 break;
512 case XenbusStateConnected: 518 case XenbusStateConnected:
513 break; 519 break;
514 case XenbusStateUnknown: 520 case XenbusStateUnknown:
515 default: 521 default:
516 panic("bad backend state %d", new_state); 522 panic("bad backend state %d", new_state);
517 } 523 }
518} 524}
519 525
520static void 526static void
521xennet_alloc_rx_buffer(struct xennet_xenbus_softc *sc) 527xennet_alloc_rx_buffer(struct xennet_xenbus_softc *sc)
522{ 528{
523 RING_IDX req_prod = sc->sc_rx_ring.req_prod_pvt; 529 RING_IDX req_prod = sc->sc_rx_ring.req_prod_pvt;
524 RING_IDX i; 530 RING_IDX i;
525 struct xennet_rxreq *req; 531 struct xennet_rxreq *req;
526 struct xen_memory_reservation reservation; 532 struct xen_memory_reservation reservation;
527 int s1, s2; 533 int s1, s2;
528 paddr_t pfn; 534 paddr_t pfn;
529 535
530 s1 = splnet(); 536 s1 = splnet();
531 for (i = 0; sc->sc_free_rxreql != 0; i++) { 537 for (i = 0; sc->sc_free_rxreql != 0; i++) {
532 req = SLIST_FIRST(&sc->sc_rxreq_head); 538 req = SLIST_FIRST(&sc->sc_rxreq_head);
533 KASSERT(req != NULL); 539 KASSERT(req != NULL);
534 KASSERT(req == &sc->sc_rxreqs[req->rxreq_id]); 540 KASSERT(req == &sc->sc_rxreqs[req->rxreq_id]);
535 RING_GET_REQUEST(&sc->sc_rx_ring, req_prod + i)->id = 541 RING_GET_REQUEST(&sc->sc_rx_ring, req_prod + i)->id =
536 req->rxreq_id; 542 req->rxreq_id;
537 if (xengnt_grant_transfer(sc->sc_xbusd->xbusd_otherend_id, 543 if (xengnt_grant_transfer(sc->sc_xbusd->xbusd_otherend_id,
538 &req->rxreq_gntref) != 0) { 544 &req->rxreq_gntref) != 0) {
539 break; 545 break;
540 } 546 }
541 RING_GET_REQUEST(&sc->sc_rx_ring, req_prod + i)->gref = 547 RING_GET_REQUEST(&sc->sc_rx_ring, req_prod + i)->gref =
542 req->rxreq_gntref; 548 req->rxreq_gntref;
543 549
544 SLIST_REMOVE_HEAD(&sc->sc_rxreq_head, rxreq_next); 550 SLIST_REMOVE_HEAD(&sc->sc_rxreq_head, rxreq_next);
545 sc->sc_free_rxreql--; 551 sc->sc_free_rxreql--;
546 552
547 /* unmap the page */ 553 /* unmap the page */
548 MULTI_update_va_mapping(&rx_mcl[i], req->rxreq_va, 0, 0); 554 MULTI_update_va_mapping(&rx_mcl[i], req->rxreq_va, 0, 0);
549 /* 555 /*
550 * Remove this page from pseudo phys map before 556 * Remove this page from pseudo phys map before
551 * passing back to Xen. 557 * passing back to Xen.
552 */ 558 */
553 pfn = (req->rxreq_pa - XPMAP_OFFSET) >> PAGE_SHIFT; 559 pfn = (req->rxreq_pa - XPMAP_OFFSET) >> PAGE_SHIFT;
554 xennet_pages[i] = xpmap_phys_to_machine_mapping[pfn]; 560 xennet_pages[i] = xpmap_phys_to_machine_mapping[pfn];
555 xpmap_phys_to_machine_mapping[pfn] = INVALID_P2M_ENTRY; 561 xpmap_phys_to_machine_mapping[pfn] = INVALID_P2M_ENTRY;
556 } 562 }
557 if (i == 0) { 563 if (i == 0) {
558 splx(s1); 564 splx(s1);
559 return; 565 return;
560 } 566 }
561 /* also make sure to flush all TLB entries */ 567 /* also make sure to flush all TLB entries */
562 rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; 568 rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
563 /* 569 /*
564 * We may have allocated buffers which have entries 570 * We may have allocated buffers which have entries
565 * outstanding in the page update queue -- make sure we flush 571 * outstanding in the page update queue -- make sure we flush
566 * those first! 572 * those first!
567 */ 573 */
568 s2 = splvm(); 574 s2 = splvm();
569 xpq_flush_queue(); 575 xpq_flush_queue();
570 splx(s2); 576 splx(s2);
571 /* now decrease reservation */ 577 /* now decrease reservation */
572 reservation.extent_start = xennet_pages; 578 reservation.extent_start = xennet_pages;
573 reservation.nr_extents = i; 579 reservation.nr_extents = i;
574 reservation.extent_order = 0; 580 reservation.extent_order = 0;
575 reservation.address_bits = 0; 581 reservation.address_bits = 0;
576 reservation.domid = DOMID_SELF; 582 reservation.domid = DOMID_SELF;
577 rx_mcl[i].op = __HYPERVISOR_memory_op; 583 rx_mcl[i].op = __HYPERVISOR_memory_op;
578 rx_mcl[i].args[0] = XENMEM_decrease_reservation; 584 rx_mcl[i].args[0] = XENMEM_decrease_reservation;
579 rx_mcl[i].args[1] = (unsigned long)&reservation; 585 rx_mcl[i].args[1] = (unsigned long)&reservation;
580 HYPERVISOR_multicall(rx_mcl, i+1); 586 HYPERVISOR_multicall(rx_mcl, i+1);
581 if (__predict_false(rx_mcl[i].result != i)) { 587 if (__predict_false(rx_mcl[i].result != i)) {
582 panic("xennet_alloc_rx_buffer: XENMEM_decrease_reservation"); 588 panic("xennet_alloc_rx_buffer: XENMEM_decrease_reservation");
583 } 589 }
584 sc->sc_rx_ring.req_prod_pvt = req_prod + i; 590 sc->sc_rx_ring.req_prod_pvt = req_prod + i;
585 RING_PUSH_REQUESTS(&sc->sc_rx_ring); 591 RING_PUSH_REQUESTS(&sc->sc_rx_ring);
586 592
587 splx(s1); 593 splx(s1);
588 return; 594 return;
589} 595}
590 596
591static void 597static void
592xennet_free_rx_buffer(struct xennet_xenbus_softc *sc) 598xennet_free_rx_buffer(struct xennet_xenbus_softc *sc)
593{ 599{
594 paddr_t ma, pa; 600 paddr_t ma, pa;
595 vaddr_t va; 601 vaddr_t va;
596 RING_IDX i; 602 RING_IDX i;
597 mmu_update_t mmu[1]; 603 mmu_update_t mmu[1];
598 multicall_entry_t mcl[2]; 604 multicall_entry_t mcl[2];
599 605
600 int s = splbio(); 606 int s = splbio();
601  607
602 DPRINTF(("%s: xennet_free_rx_buffer\n", device_xname(sc->sc_dev))); 608 DPRINTF(("%s: xennet_free_rx_buffer\n", device_xname(sc->sc_dev)));
603 /* get back memory from RX ring */ 609 /* get back memory from RX ring */
604 for (i = 0; i < NET_RX_RING_SIZE; i++) { 610 for (i = 0; i < NET_RX_RING_SIZE; i++) {
605 struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i]; 611 struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i];
606 612
607 /* 613 /*
608 * if the buffer is in transit in the network stack, wait for 614 * if the buffer is in transit in the network stack, wait for
609 * the network stack to free it. 615 * the network stack to free it.
610 */ 616 */
611 while ((volatile grant_ref_t)rxreq->rxreq_gntref == 617 while ((volatile grant_ref_t)rxreq->rxreq_gntref ==
612 GRANT_STACK_REF) 618 GRANT_STACK_REF)
613 tsleep(xennet_xenbus_detach, PRIBIO, "xnet_free", hz/2); 619 tsleep(xennet_xenbus_detach, PRIBIO, "xnet_free", hz/2);
614 620
615 if (rxreq->rxreq_gntref != GRANT_INVALID_REF) { 621 if (rxreq->rxreq_gntref != GRANT_INVALID_REF) {
616 /* 622 /*
617 * this req is still granted. Get back the page or 623 * this req is still granted. Get back the page or
618 * allocate a new one, and remap it. 624 * allocate a new one, and remap it.
619 */ 625 */
620 SLIST_INSERT_HEAD(&sc->sc_rxreq_head, rxreq, 626 SLIST_INSERT_HEAD(&sc->sc_rxreq_head, rxreq,
621 rxreq_next); 627 rxreq_next);
622 sc->sc_free_rxreql++; 628 sc->sc_free_rxreql++;
623 ma = xengnt_revoke_transfer(rxreq->rxreq_gntref); 629 ma = xengnt_revoke_transfer(rxreq->rxreq_gntref);
624 rxreq->rxreq_gntref = GRANT_INVALID_REF; 630 rxreq->rxreq_gntref = GRANT_INVALID_REF;
625 if (ma == 0) { 631 if (ma == 0) {
626 u_long pfn; 632 u_long pfn;
627 struct xen_memory_reservation xenres; 633 struct xen_memory_reservation xenres;
628 /* 634 /*
629 * transfer not complete, we lost the page. 635 * transfer not complete, we lost the page.
630 * Get one from hypervisor 636 * Get one from hypervisor
631 */ 637 */
632 xenres.extent_start = &pfn; 638 xenres.extent_start = &pfn;
633 xenres.nr_extents = 1; 639 xenres.nr_extents = 1;
634 xenres.extent_order = 0; 640 xenres.extent_order = 0;
635 xenres.address_bits = 31; 641 xenres.address_bits = 31;
636 xenres.domid = DOMID_SELF; 642 xenres.domid = DOMID_SELF;
637 if (HYPERVISOR_memory_op( 643 if (HYPERVISOR_memory_op(
638 XENMEM_increase_reservation, &xenres) < 0) { 644 XENMEM_increase_reservation, &xenres) < 0) {
639 panic("xennet_free_rx_buffer: " 645 panic("xennet_free_rx_buffer: "
640 "can't get memory back"); 646 "can't get memory back");
641 } 647 }
642 ma = pfn; 648 ma = pfn;
643 KASSERT(ma != 0); 649 KASSERT(ma != 0);
644 } 650 }
645 pa = rxreq->rxreq_pa; 651 pa = rxreq->rxreq_pa;
646 va = rxreq->rxreq_va; 652 va = rxreq->rxreq_va;
647 /* remap the page */ 653 /* remap the page */
648 mmu[0].ptr = (ma << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; 654 mmu[0].ptr = (ma << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
649 mmu[0].val = ((pa - XPMAP_OFFSET) >> PAGE_SHIFT); 655 mmu[0].val = ((pa - XPMAP_OFFSET) >> PAGE_SHIFT);
650 MULTI_update_va_mapping(&mcl[0], va,  656 MULTI_update_va_mapping(&mcl[0], va,
651 (ma << PAGE_SHIFT) | PG_V | PG_KW, 657 (ma << PAGE_SHIFT) | PG_V | PG_KW,
652 UVMF_TLB_FLUSH|UVMF_ALL); 658 UVMF_TLB_FLUSH|UVMF_ALL);
653 xpmap_phys_to_machine_mapping[ 659 xpmap_phys_to_machine_mapping[
654 (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = ma; 660 (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = ma;
655 mcl[1].op = __HYPERVISOR_mmu_update; 661 mcl[1].op = __HYPERVISOR_mmu_update;
656 mcl[1].args[0] = (unsigned long)mmu; 662 mcl[1].args[0] = (unsigned long)mmu;
657 mcl[1].args[1] = 1; 663 mcl[1].args[1] = 1;
658 mcl[1].args[2] = 0; 664 mcl[1].args[2] = 0;
659 mcl[1].args[3] = DOMID_SELF; 665 mcl[1].args[3] = DOMID_SELF;
660 HYPERVISOR_multicall(mcl, 2); 666 HYPERVISOR_multicall(mcl, 2);
661 } 667 }
662 668
663 } 669 }
664 splx(s); 670 splx(s);
665 DPRINTF(("%s: xennet_free_rx_buffer done\n", device_xname(sc->sc_dev))); 671 DPRINTF(("%s: xennet_free_rx_buffer done\n", device_xname(sc->sc_dev)));
666} 672}
667 673
668static void 674static void
669xennet_rx_mbuf_free(struct mbuf *m, void *buf, size_t size, void *arg) 675xennet_rx_mbuf_free(struct mbuf *m, void *buf, size_t size, void *arg)
670{ 676{
671 struct xennet_rxreq *req = arg; 677 struct xennet_rxreq *req = arg;
672 struct xennet_xenbus_softc *sc = req->rxreq_sc; 678 struct xennet_xenbus_softc *sc = req->rxreq_sc;
673 679
674 int s = splnet(); 680 int s = splnet();
675 681
676 SLIST_INSERT_HEAD(&sc->sc_rxreq_head, req, rxreq_next); 682 SLIST_INSERT_HEAD(&sc->sc_rxreq_head, req, rxreq_next);
677 sc->sc_free_rxreql++; 683 sc->sc_free_rxreql++;
678 684
679 req->rxreq_gntref = GRANT_INVALID_REF; 685 req->rxreq_gntref = GRANT_INVALID_REF;
680 if (sc->sc_free_rxreql >= SC_NLIVEREQ(sc) && 686 if (sc->sc_free_rxreql >= SC_NLIVEREQ(sc) &&
681 __predict_true(sc->sc_backend_status == BEST_CONNECTED)) { 687 __predict_true(sc->sc_backend_status == BEST_CONNECTED)) {
682 xennet_alloc_rx_buffer(sc); 688 xennet_alloc_rx_buffer(sc);
683 } 689 }
684 690
685 if (m) 691 if (m)
686 pool_cache_put(mb_cache, m); 692 pool_cache_put(mb_cache, m);
687 splx(s); 693 splx(s);
688} 694}
689 695
690 696
691static void 697static void
692xennet_tx_complete(struct xennet_xenbus_softc *sc) 698xennet_tx_complete(struct xennet_xenbus_softc *sc)
693{ 699{
694 struct xennet_txreq *req; 700 struct xennet_txreq *req;
695 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 701 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
696 RING_IDX resp_prod, i; 702 RING_IDX resp_prod, i;
697 703
698 DPRINTFN(XEDB_EVENT, ("xennet_tx_complete prod %d cons %d\n", 704 DPRINTFN(XEDB_EVENT, ("xennet_tx_complete prod %d cons %d\n",
699 sc->sc_tx_ring.sring->rsp_prod, sc->sc_tx_ring.rsp_cons)); 705 sc->sc_tx_ring.sring->rsp_prod, sc->sc_tx_ring.rsp_cons));
700 706
701again: 707again:
702 resp_prod = sc->sc_tx_ring.sring->rsp_prod; 708 resp_prod = sc->sc_tx_ring.sring->rsp_prod;
703 x86_lfence(); 709 x86_lfence();
704 for (i = sc->sc_tx_ring.rsp_cons; i != resp_prod; i++) { 710 for (i = sc->sc_tx_ring.rsp_cons; i != resp_prod; i++) {
705 req = &sc->sc_txreqs[RING_GET_RESPONSE(&sc->sc_tx_ring, i)->id]; 711 req = &sc->sc_txreqs[RING_GET_RESPONSE(&sc->sc_tx_ring, i)->id];
706 KASSERT(req->txreq_id == 712 KASSERT(req->txreq_id ==
707 RING_GET_RESPONSE(&sc->sc_tx_ring, i)->id); 713 RING_GET_RESPONSE(&sc->sc_tx_ring, i)->id);
708 if (__predict_false(xengnt_status(req->txreq_gntref))) { 714 if (__predict_false(xengnt_status(req->txreq_gntref))) {
709 aprint_verbose_dev(sc->sc_dev, 715 aprint_verbose_dev(sc->sc_dev,
710 "grant still used by backend\n"); 716 "grant still used by backend\n");
711 sc->sc_tx_ring.rsp_cons = i; 717 sc->sc_tx_ring.rsp_cons = i;
712 goto end; 718 goto end;
713 } 719 }
714 if (__predict_false( 720 if (__predict_false(
715 RING_GET_RESPONSE(&sc->sc_tx_ring, i)->status != 721 RING_GET_RESPONSE(&sc->sc_tx_ring, i)->status !=
716 NETIF_RSP_OKAY)) 722 NETIF_RSP_OKAY))
717 ifp->if_oerrors++; 723 ifp->if_oerrors++;
718 else 724 else
719 ifp->if_opackets++; 725 ifp->if_opackets++;
720 xengnt_revoke_access(req->txreq_gntref); 726 xengnt_revoke_access(req->txreq_gntref);
721 m_freem(req->txreq_m); 727 m_freem(req->txreq_m);
722 SLIST_INSERT_HEAD(&sc->sc_txreq_head, req, txreq_next); 728 SLIST_INSERT_HEAD(&sc->sc_txreq_head, req, txreq_next);
723 } 729 }
724 sc->sc_tx_ring.rsp_cons = resp_prod; 730 sc->sc_tx_ring.rsp_cons = resp_prod;
725 /* set new event and check for race with rsp_cons update */ 731 /* set new event and check for race with rsp_cons update */
726 sc->sc_tx_ring.sring->rsp_event =  732 sc->sc_tx_ring.sring->rsp_event =
727 resp_prod + ((sc->sc_tx_ring.sring->req_prod - resp_prod) >> 1) + 1; 733 resp_prod + ((sc->sc_tx_ring.sring->req_prod - resp_prod) >> 1) + 1;
728 ifp->if_timer = 0; 734 ifp->if_timer = 0;
729 x86_sfence(); 735 x86_sfence();
730 if (resp_prod != sc->sc_tx_ring.sring->rsp_prod) 736 if (resp_prod != sc->sc_tx_ring.sring->rsp_prod)
731 goto again; 737 goto again;
732end: 738end:
733 if (ifp->if_flags & IFF_OACTIVE) { 739 if (ifp->if_flags & IFF_OACTIVE) {
734 ifp->if_flags &= ~IFF_OACTIVE; 740 ifp->if_flags &= ~IFF_OACTIVE;
735 xennet_softstart(sc); 741 xennet_softstart(sc);
736 } 742 }
737} 743}
738 744
739static int 745static int
740xennet_handler(void *arg) 746xennet_handler(void *arg)
741{ 747{
742 struct xennet_xenbus_softc *sc = arg; 748 struct xennet_xenbus_softc *sc = arg;
743 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 749 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
744 RING_IDX resp_prod, i; 750 RING_IDX resp_prod, i;
745 struct xennet_rxreq *req; 751 struct xennet_rxreq *req;
746 paddr_t ma, pa; 752 paddr_t ma, pa;
747 vaddr_t va; 753 vaddr_t va;
748 mmu_update_t mmu[1]; 754 mmu_update_t mmu[1];
749 multicall_entry_t mcl[2]; 755 multicall_entry_t mcl[2];
750 struct mbuf *m; 756 struct mbuf *m;
751 void *pktp; 757 void *pktp;
752 int more_to_do; 758 int more_to_do;
753 759
754 if (sc->sc_backend_status != BEST_CONNECTED) 760 if (sc->sc_backend_status != BEST_CONNECTED)
755 return 1; 761 return 1;
756 762
757 xennet_tx_complete(sc); 763 xennet_tx_complete(sc);
758 764
759#if NRND > 0 765#if NRND > 0
760 rnd_add_uint32(&sc->sc_rnd_source, sc->sc_tx_ring.req_prod_pvt); 766 rnd_add_uint32(&sc->sc_rnd_source, sc->sc_tx_ring.req_prod_pvt);
761#endif 767#endif
762again: 768again:
763 DPRINTFN(XEDB_EVENT, ("xennet_handler prod %d cons %d\n", 769 DPRINTFN(XEDB_EVENT, ("xennet_handler prod %d cons %d\n",
764 sc->sc_rx_ring.sring->rsp_prod, sc->sc_rx_ring.rsp_cons)); 770 sc->sc_rx_ring.sring->rsp_prod, sc->sc_rx_ring.rsp_cons));
765 771
766 resp_prod = sc->sc_rx_ring.sring->rsp_prod; 772 resp_prod = sc->sc_rx_ring.sring->rsp_prod;
767 x86_lfence(); /* ensure we see replies up to resp_prod */ 773 x86_lfence(); /* ensure we see replies up to resp_prod */
768 for (i = sc->sc_rx_ring.rsp_cons; i != resp_prod; i++) { 774 for (i = sc->sc_rx_ring.rsp_cons; i != resp_prod; i++) {
769 netif_rx_response_t *rx = RING_GET_RESPONSE(&sc->sc_rx_ring, i); 775 netif_rx_response_t *rx = RING_GET_RESPONSE(&sc->sc_rx_ring, i);
770 req = &sc->sc_rxreqs[rx->id]; 776 req = &sc->sc_rxreqs[rx->id];
771 KASSERT(req->rxreq_gntref != GRANT_INVALID_REF); 777 KASSERT(req->rxreq_gntref != GRANT_INVALID_REF);
772 KASSERT(req->rxreq_id == rx->id); 778 KASSERT(req->rxreq_id == rx->id);
773 ma = xengnt_revoke_transfer(req->rxreq_gntref); 779 ma = xengnt_revoke_transfer(req->rxreq_gntref);
774 if (ma == 0) { 780 if (ma == 0) {
775 DPRINTFN(XEDB_EVENT, ("xennet_handler ma == 0\n")); 781 DPRINTFN(XEDB_EVENT, ("xennet_handler ma == 0\n"));
776 /* 782 /*
777 * the remote could't send us a packet. 783 * the remote could't send us a packet.
778 * we can't free this rxreq as no page will be mapped 784 * we can't free this rxreq as no page will be mapped
779 * here. Instead give it back immediatly to backend. 785 * here. Instead give it back immediatly to backend.
780 */ 786 */
781 ifp->if_ierrors++; 787 ifp->if_ierrors++;
782 RING_GET_REQUEST(&sc->sc_rx_ring, 788 RING_GET_REQUEST(&sc->sc_rx_ring,
783 sc->sc_rx_ring.req_prod_pvt)->id = req->rxreq_id; 789 sc->sc_rx_ring.req_prod_pvt)->id = req->rxreq_id;
784 RING_GET_REQUEST(&sc->sc_rx_ring, 790 RING_GET_REQUEST(&sc->sc_rx_ring,
785 sc->sc_rx_ring.req_prod_pvt)->gref = 791 sc->sc_rx_ring.req_prod_pvt)->gref =
786 req->rxreq_gntref; 792 req->rxreq_gntref;
787 sc->sc_rx_ring.req_prod_pvt++; 793 sc->sc_rx_ring.req_prod_pvt++;
788 RING_PUSH_REQUESTS(&sc->sc_rx_ring); 794 RING_PUSH_REQUESTS(&sc->sc_rx_ring);
789 continue; 795 continue;
790 } 796 }
791 req->rxreq_gntref = GRANT_INVALID_REF; 797 req->rxreq_gntref = GRANT_INVALID_REF;
792 798
793 pa = req->rxreq_pa; 799 pa = req->rxreq_pa;
794 va = req->rxreq_va; 800 va = req->rxreq_va;
795 /* remap the page */ 801 /* remap the page */
796 mmu[0].ptr = (ma << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; 802 mmu[0].ptr = (ma << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
797 mmu[0].val = ((pa - XPMAP_OFFSET) >> PAGE_SHIFT); 803 mmu[0].val = ((pa - XPMAP_OFFSET) >> PAGE_SHIFT);
798 MULTI_update_va_mapping(&mcl[0], va,  804 MULTI_update_va_mapping(&mcl[0], va,
799 (ma << PAGE_SHIFT) | PG_V | PG_KW, UVMF_TLB_FLUSH|UVMF_ALL); 805 (ma << PAGE_SHIFT) | PG_V | PG_KW, UVMF_TLB_FLUSH|UVMF_ALL);
800 xpmap_phys_to_machine_mapping[ 806 xpmap_phys_to_machine_mapping[
801 (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = ma; 807 (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = ma;
802 mcl[1].op = __HYPERVISOR_mmu_update; 808 mcl[1].op = __HYPERVISOR_mmu_update;
803 mcl[1].args[0] = (unsigned long)mmu; 809 mcl[1].args[0] = (unsigned long)mmu;
804 mcl[1].args[1] = 1; 810 mcl[1].args[1] = 1;
805 mcl[1].args[2] = 0; 811 mcl[1].args[2] = 0;
806 mcl[1].args[3] = DOMID_SELF; 812 mcl[1].args[3] = DOMID_SELF;
807 HYPERVISOR_multicall(mcl, 2); 813 HYPERVISOR_multicall(mcl, 2);
808 pktp = (void *)(va + rx->offset); 814 pktp = (void *)(va + rx->offset);
809#ifdef XENNET_DEBUG_DUMP 815#ifdef XENNET_DEBUG_DUMP
810 xennet_hex_dump(pktp, rx->status, "r", rx->id); 816 xennet_hex_dump(pktp, rx->status, "r", rx->id);
811#endif 817#endif
812 if ((ifp->if_flags & IFF_PROMISC) == 0) { 818 if ((ifp->if_flags & IFF_PROMISC) == 0) {
813 struct ether_header *eh = pktp; 819 struct ether_header *eh = pktp;
814 if (ETHER_IS_MULTICAST(eh->ether_dhost) == 0 && 820 if (ETHER_IS_MULTICAST(eh->ether_dhost) == 0 &&
815 memcmp(CLLADDR(ifp->if_sadl), eh->ether_dhost, 821 memcmp(CLLADDR(ifp->if_sadl), eh->ether_dhost,
816 ETHER_ADDR_LEN) != 0) { 822 ETHER_ADDR_LEN) != 0) {
817 DPRINTFN(XEDB_EVENT, 823 DPRINTFN(XEDB_EVENT,
818 ("xennet_handler bad dest\n")); 824 ("xennet_handler bad dest\n"));
819 /* packet not for us */ 825 /* packet not for us */
820 xennet_rx_mbuf_free(NULL, (void *)va, PAGE_SIZE, 826 xennet_rx_mbuf_free(NULL, (void *)va, PAGE_SIZE,
821 req); 827 req);
822 continue; 828 continue;
823 } 829 }
824 } 830 }
825 MGETHDR(m, M_DONTWAIT, MT_DATA); 831 MGETHDR(m, M_DONTWAIT, MT_DATA);
826 if (__predict_false(m == NULL)) { 832 if (__predict_false(m == NULL)) {
827 printf("xennet: rx no mbuf\n"); 833 printf("xennet: rx no mbuf\n");
828 ifp->if_ierrors++; 834 ifp->if_ierrors++;
829 xennet_rx_mbuf_free(NULL, (void *)va, PAGE_SIZE, req); 835 xennet_rx_mbuf_free(NULL, (void *)va, PAGE_SIZE, req);
830 continue; 836 continue;
831 } 837 }
832 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); 838 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
833 839
834 m->m_pkthdr.rcvif = ifp; 840 m->m_pkthdr.rcvif = ifp;
835 if (__predict_true(sc->sc_rx_ring.req_prod_pvt !=  841 if (__predict_true(sc->sc_rx_ring.req_prod_pvt !=
836 sc->sc_rx_ring.sring->rsp_prod)) { 842 sc->sc_rx_ring.sring->rsp_prod)) {
837 m->m_len = m->m_pkthdr.len = rx->status; 843 m->m_len = m->m_pkthdr.len = rx->status;
838 MEXTADD(m, pktp, rx->status, 844 MEXTADD(m, pktp, rx->status,
839 M_DEVBUF, xennet_rx_mbuf_free, req); 845 M_DEVBUF, xennet_rx_mbuf_free, req);
840 m->m_flags |= M_EXT_RW; /* we own the buffer */ 846 m->m_flags |= M_EXT_RW; /* we own the buffer */
841 req->rxreq_gntref = GRANT_STACK_REF; 847 req->rxreq_gntref = GRANT_STACK_REF;
842 } else { 848 } else {
843 /* 849 /*
844 * This was our last receive buffer, allocate 850 * This was our last receive buffer, allocate
845 * memory, copy data and push the receive 851 * memory, copy data and push the receive
846 * buffer back to the hypervisor. 852 * buffer back to the hypervisor.
847 */ 853 */
848 m->m_len = min(MHLEN, rx->status); 854 m->m_len = min(MHLEN, rx->status);
849 m->m_pkthdr.len = 0; 855 m->m_pkthdr.len = 0;
850 m_copyback(m, 0, rx->status, pktp); 856 m_copyback(m, 0, rx->status, pktp);
851 xennet_rx_mbuf_free(NULL, (void *)va, PAGE_SIZE, req); 857 xennet_rx_mbuf_free(NULL, (void *)va, PAGE_SIZE, req);
852 if (m->m_pkthdr.len < rx->status) { 858 if (m->m_pkthdr.len < rx->status) {
853 /* out of memory, just drop packets */ 859 /* out of memory, just drop packets */
854 ifp->if_ierrors++; 860 ifp->if_ierrors++;
855 m_freem(m); 861 m_freem(m);
856 continue; 862 continue;
857 } 863 }
858 } 864 }
859 if ((rx->flags & NETRXF_csum_blank) != 0) { 865 if ((rx->flags & NETRXF_csum_blank) != 0) {
860 xennet_checksum_fill(&m); 866 xennet_checksum_fill(&m);
861 if (m == NULL) { 867 if (m == NULL) {
862 ifp->if_ierrors++; 868 ifp->if_ierrors++;
863 continue; 869 continue;
864 } 870 }
865 } 871 }
866#if NBPFILTER > 0 872#if NBPFILTER > 0
867 /* 873 /*
868 * Pass packet to bpf if there is a listener. 874 * Pass packet to bpf if there is a listener.
869 */ 875 */
870 if (ifp->if_bpf) 876 if (ifp->if_bpf)
871 bpf_mtap(ifp->if_bpf, m); 877 bpf_mtap(ifp->if_bpf, m);
872#endif 878#endif
873 879
874 ifp->if_ipackets++; 880 ifp->if_ipackets++;
875 881
876 /* Pass the packet up. */ 882 /* Pass the packet up. */
877 (*ifp->if_input)(ifp, m); 883 (*ifp->if_input)(ifp, m);
878 } 884 }
879 x86_lfence(); 885 x86_lfence();
880 sc->sc_rx_ring.rsp_cons = i; 886 sc->sc_rx_ring.rsp_cons = i;
881 RING_FINAL_CHECK_FOR_RESPONSES(&sc->sc_rx_ring, more_to_do); 887 RING_FINAL_CHECK_FOR_RESPONSES(&sc->sc_rx_ring, more_to_do);
882 if (more_to_do) 888 if (more_to_do)
883 goto again; 889 goto again;
884 return 1; 890 return 1;
885} 891}
886 892
887/*  893/*
888 * Called at splnet. 894 * Called at splnet.
889 */ 895 */
890void 896void
891xennet_start(struct ifnet *ifp) 897xennet_start(struct ifnet *ifp)
892{ 898{
893 struct xennet_xenbus_softc *sc = ifp->if_softc; 899 struct xennet_xenbus_softc *sc = ifp->if_softc;
894 900
895 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_start()\n", device_xname(sc->sc_dev))); 901 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_start()\n", device_xname(sc->sc_dev)));
896 902
897#if NRND > 0 903#if NRND > 0
898 rnd_add_uint32(&sc->sc_rnd_source, sc->sc_tx_ring.req_prod_pvt); 904 rnd_add_uint32(&sc->sc_rnd_source, sc->sc_tx_ring.req_prod_pvt);
899#endif 905#endif
900 906
901 xennet_tx_complete(sc); 907 xennet_tx_complete(sc);
902 908
903 if (__predict_false( 909 if (__predict_false(
904 (ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)) 910 (ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING))
905 return; 911 return;
906 912
907 /* 913 /*
908 * The Xen communication channel is much more efficient if we can 914 * The Xen communication channel is much more efficient if we can
909 * schedule batch of packets for domain0. To achieve this, we 915 * schedule batch of packets for domain0. To achieve this, we
910 * schedule a soft interrupt, and just return. This way, the network 916 * schedule a soft interrupt, and just return. This way, the network
911 * stack will enqueue all pending mbufs in the interface's send queue 917 * stack will enqueue all pending mbufs in the interface's send queue
912 * before it is processed by xennet_softstart(). 918 * before it is processed by xennet_softstart().
913 */ 919 */
914 softint_schedule(sc->sc_softintr); 920 softint_schedule(sc->sc_softintr);
915 return; 921 return;
916} 922}
917 923
918/* 924/*
919 * called at splsoftnet 925 * called at splsoftnet
920 */ 926 */
921void 927void
922xennet_softstart(void *arg) 928xennet_softstart(void *arg)
923{ 929{
924 struct xennet_xenbus_softc *sc = arg; 930 struct xennet_xenbus_softc *sc = arg;
925 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 931 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
926 struct mbuf *m, *new_m; 932 struct mbuf *m, *new_m;
927 netif_tx_request_t *txreq; 933 netif_tx_request_t *txreq;
928 RING_IDX req_prod; 934 RING_IDX req_prod;
929 paddr_t pa, pa2; 935 paddr_t pa, pa2;
930 struct xennet_txreq *req; 936 struct xennet_txreq *req;
931 int notify; 937 int notify;
932 int do_notify = 0; 938 int do_notify = 0;
933 int s; 939 int s;
934 940
935 s = splnet(); 941 s = splnet();
936 if (__predict_false( 942 if (__predict_false(
937 (ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)) { 943 (ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)) {
938 splx(s); 944 splx(s);
939 return; 945 return;
940 } 946 }
941 947
942 req_prod = sc->sc_tx_ring.req_prod_pvt; 948 req_prod = sc->sc_tx_ring.req_prod_pvt;
943 while (/*CONSTCOND*/1) { 949 while (/*CONSTCOND*/1) {
944 uint16_t txflags; 950 uint16_t txflags;
945 951
946 req = SLIST_FIRST(&sc->sc_txreq_head); 952 req = SLIST_FIRST(&sc->sc_txreq_head);
947 if (__predict_false(req == NULL)) { 953 if (__predict_false(req == NULL)) {
948 ifp->if_flags |= IFF_OACTIVE; 954 ifp->if_flags |= IFF_OACTIVE;
949 break; 955 break;
950 } 956 }
951 IFQ_POLL(&ifp->if_snd, m); 957 IFQ_POLL(&ifp->if_snd, m);
952 if (m == NULL) 958 if (m == NULL)
953 break; 959 break;
954 960
955 switch (m->m_flags & (M_EXT|M_EXT_CLUSTER)) { 961 switch (m->m_flags & (M_EXT|M_EXT_CLUSTER)) {
956 case M_EXT|M_EXT_CLUSTER: 962 case M_EXT|M_EXT_CLUSTER:
957 KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID); 963 KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
958 pa = m->m_ext.ext_paddr + 964 pa = m->m_ext.ext_paddr +
959 (m->m_data - m->m_ext.ext_buf); 965 (m->m_data - m->m_ext.ext_buf);
960 break; 966 break;
961 case 0: 967 case 0:
962 KASSERT(m->m_paddr != M_PADDR_INVALID); 968 KASSERT(m->m_paddr != M_PADDR_INVALID);
963 pa = m->m_paddr + M_BUFOFFSET(m) + 969 pa = m->m_paddr + M_BUFOFFSET(m) +
964 (m->m_data - M_BUFADDR(m)); 970 (m->m_data - M_BUFADDR(m));
965 break; 971 break;
966 default: 972 default:
967 if (__predict_false( 973 if (__predict_false(
968 !pmap_extract(pmap_kernel(), (vaddr_t)m->m_data, 974 !pmap_extract(pmap_kernel(), (vaddr_t)m->m_data,
969 &pa))) { 975 &pa))) {
970 panic("xennet_start: no pa"); 976 panic("xennet_start: no pa");
971 } 977 }
972 break; 978 break;
973 } 979 }
974 980
975 if ((m->m_pkthdr.csum_flags & 981 if ((m->m_pkthdr.csum_flags &
976 (M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) { 982 (M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
977 txflags = NETTXF_csum_blank; 983 txflags = NETTXF_csum_blank;
978 } else { 984 } else {
979 txflags = 0; 985 txflags = 0;
980 } 986 }
981 987
982 if (m->m_pkthdr.len != m->m_len || 988 if (m->m_pkthdr.len != m->m_len ||
983 (pa ^ (pa + m->m_pkthdr.len - 1)) & PG_FRAME) { 989 (pa ^ (pa + m->m_pkthdr.len - 1)) & PG_FRAME) {
984 990
985 MGETHDR(new_m, M_DONTWAIT, MT_DATA); 991 MGETHDR(new_m, M_DONTWAIT, MT_DATA);
986 if (__predict_false(new_m == NULL)) { 992 if (__predict_false(new_m == NULL)) {
987 printf("xennet: no mbuf\n"); 993 printf("xennet: no mbuf\n");
988 break; 994 break;
989 } 995 }
990 if (m->m_pkthdr.len > MHLEN) { 996 if (m->m_pkthdr.len > MHLEN) {
991 MCLGET(new_m, M_DONTWAIT); 997 MCLGET(new_m, M_DONTWAIT);
992 if (__predict_false( 998 if (__predict_false(
993 (new_m->m_flags & M_EXT) == 0)) { 999 (new_m->m_flags & M_EXT) == 0)) {
994 DPRINTF(("xennet: no mbuf cluster\n")); 1000 DPRINTF(("xennet: no mbuf cluster\n"));
995 m_freem(new_m); 1001 m_freem(new_m);
996 break; 1002 break;
997 } 1003 }
998 } 1004 }
999 1005
1000 m_copydata(m, 0, m->m_pkthdr.len, mtod(new_m, void *)); 1006 m_copydata(m, 0, m->m_pkthdr.len, mtod(new_m, void *));
1001 new_m->m_len = new_m->m_pkthdr.len = m->m_pkthdr.len; 1007 new_m->m_len = new_m->m_pkthdr.len = m->m_pkthdr.len;
1002 1008
1003 if ((new_m->m_flags & M_EXT) != 0) { 1009 if ((new_m->m_flags & M_EXT) != 0) {
1004 pa = new_m->m_ext.ext_paddr; 1010 pa = new_m->m_ext.ext_paddr;
1005 KASSERT(new_m->m_data == new_m->m_ext.ext_buf); 1011 KASSERT(new_m->m_data == new_m->m_ext.ext_buf);
1006 KASSERT(pa != M_PADDR_INVALID); 1012 KASSERT(pa != M_PADDR_INVALID);
1007 } else { 1013 } else {
1008 pa = new_m->m_paddr; 1014 pa = new_m->m_paddr;
1009 KASSERT(pa != M_PADDR_INVALID); 1015 KASSERT(pa != M_PADDR_INVALID);
1010 KASSERT(new_m->m_data == M_BUFADDR(new_m)); 1016 KASSERT(new_m->m_data == M_BUFADDR(new_m));
1011 pa += M_BUFOFFSET(new_m); 1017 pa += M_BUFOFFSET(new_m);
1012 } 1018 }
1013 if (__predict_false(xengnt_grant_access( 1019 if (__predict_false(xengnt_grant_access(
1014 sc->sc_xbusd->xbusd_otherend_id, 1020 sc->sc_xbusd->xbusd_otherend_id,
1015 xpmap_ptom_masked(pa), 1021 xpmap_ptom_masked(pa),
1016 GNTMAP_readonly, &req->txreq_gntref) != 0)) { 1022 GNTMAP_readonly, &req->txreq_gntref) != 0)) {
1017 m_freem(new_m); 1023 m_freem(new_m);
1018 ifp->if_flags |= IFF_OACTIVE; 1024 ifp->if_flags |= IFF_OACTIVE;
1019 break; 1025 break;
1020 } 1026 }
1021 /* we will be able to send new_m */ 1027 /* we will be able to send new_m */
1022 IFQ_DEQUEUE(&ifp->if_snd, m); 1028 IFQ_DEQUEUE(&ifp->if_snd, m);
1023 m_freem(m); 1029 m_freem(m);
1024 m = new_m; 1030 m = new_m;
1025 } else { 1031 } else {
1026 if (__predict_false(xengnt_grant_access( 1032 if (__predict_false(xengnt_grant_access(
1027 sc->sc_xbusd->xbusd_otherend_id, 1033 sc->sc_xbusd->xbusd_otherend_id,
1028 xpmap_ptom_masked(pa), 1034 xpmap_ptom_masked(pa),
1029 GNTMAP_readonly, &req->txreq_gntref) != 0)) { 1035 GNTMAP_readonly, &req->txreq_gntref) != 0)) {
1030 ifp->if_flags |= IFF_OACTIVE; 1036 ifp->if_flags |= IFF_OACTIVE;
1031 break; 1037 break;
1032 } 1038 }
1033 /* we will be able to send m */ 1039 /* we will be able to send m */
1034 IFQ_DEQUEUE(&ifp->if_snd, m); 1040 IFQ_DEQUEUE(&ifp->if_snd, m);
1035 } 1041 }
1036 MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner); 1042 MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner);
1037 1043
1038 KASSERT(((pa ^ (pa + m->m_pkthdr.len - 1)) & PG_FRAME) == 0); 1044 KASSERT(((pa ^ (pa + m->m_pkthdr.len - 1)) & PG_FRAME) == 0);
1039 1045
1040 SLIST_REMOVE_HEAD(&sc->sc_txreq_head, txreq_next); 1046 SLIST_REMOVE_HEAD(&sc->sc_txreq_head, txreq_next);
1041 req->txreq_m = m; 1047 req->txreq_m = m;
1042 1048
1043 DPRINTFN(XEDB_MBUF, ("xennet_start id %d, " 1049 DPRINTFN(XEDB_MBUF, ("xennet_start id %d, "
1044 "mbuf %p, buf %p/%p/%p, size %d\n", 1050 "mbuf %p, buf %p/%p/%p, size %d\n",
1045 req->txreq_id, m, mtod(m, void *), (void *)pa, 1051 req->txreq_id, m, mtod(m, void *), (void *)pa,
1046 (void *)xpmap_ptom_masked(pa), m->m_pkthdr.len)); 1052 (void *)xpmap_ptom_masked(pa), m->m_pkthdr.len));
1047 pmap_extract_ma(pmap_kernel(), mtod(m, vaddr_t), &pa2); 1053 pmap_extract_ma(pmap_kernel(), mtod(m, vaddr_t), &pa2);
1048 DPRINTFN(XEDB_MBUF, ("xennet_start pa %p ma %p/%p\n", 1054 DPRINTFN(XEDB_MBUF, ("xennet_start pa %p ma %p/%p\n",
1049 (void *)pa, (void *)xpmap_ptom_masked(pa), (void *)pa2)); 1055 (void *)pa, (void *)xpmap_ptom_masked(pa), (void *)pa2));
1050#ifdef XENNET_DEBUG_DUMP 1056#ifdef XENNET_DEBUG_DUMP
1051 xennet_hex_dump(mtod(m, u_char *), m->m_pkthdr.len, "s", req->txreq_id); 1057 xennet_hex_dump(mtod(m, u_char *), m->m_pkthdr.len, "s", req->txreq_id);
1052#endif 1058#endif
1053 1059
1054 txreq = RING_GET_REQUEST(&sc->sc_tx_ring, req_prod); 1060 txreq = RING_GET_REQUEST(&sc->sc_tx_ring, req_prod);
1055 txreq->id = req->txreq_id; 1061 txreq->id = req->txreq_id;
1056 txreq->gref = req->txreq_gntref; 1062 txreq->gref = req->txreq_gntref;
1057 txreq->offset = pa & ~PG_FRAME; 1063 txreq->offset = pa & ~PG_FRAME;
1058 txreq->size = m->m_pkthdr.len; 1064 txreq->size = m->m_pkthdr.len;
1059 txreq->flags = txflags; 1065 txreq->flags = txflags;
1060 1066
1061 req_prod++; 1067 req_prod++;
1062 sc->sc_tx_ring.req_prod_pvt = req_prod; 1068 sc->sc_tx_ring.req_prod_pvt = req_prod;
1063 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->sc_tx_ring, notify); 1069 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->sc_tx_ring, notify);
1064 if (notify) 1070 if (notify)
1065 do_notify = 1; 1071 do_notify = 1;
1066 1072
1067#ifdef XENNET_DEBUG 1073#ifdef XENNET_DEBUG
1068 DPRINTFN(XEDB_MEM, ("packet addr %p/%p, physical %p/%p, " 1074 DPRINTFN(XEDB_MEM, ("packet addr %p/%p, physical %p/%p, "
1069 "m_paddr %p, len %d/%d\n", M_BUFADDR(m), mtod(m, void *), 1075 "m_paddr %p, len %d/%d\n", M_BUFADDR(m), mtod(m, void *),
1070 (void *)*kvtopte(mtod(m, vaddr_t)), 1076 (void *)*kvtopte(mtod(m, vaddr_t)),
1071 (void *)xpmap_mtop(*kvtopte(mtod(m, vaddr_t))), 1077 (void *)xpmap_mtop(*kvtopte(mtod(m, vaddr_t))),
1072 (void *)m->m_paddr, m->m_pkthdr.len, m->m_len)); 1078 (void *)m->m_paddr, m->m_pkthdr.len, m->m_len));
1073 DPRINTFN(XEDB_MEM, ("id %d gref %d offset %d size %d flags %d" 1079 DPRINTFN(XEDB_MEM, ("id %d gref %d offset %d size %d flags %d"
1074 " prod %d\n", 1080 " prod %d\n",
1075 txreq->id, txreq->gref, txreq->offset, txreq->size, 1081 txreq->id, txreq->gref, txreq->offset, txreq->size,
1076 txreq->flags, req_prod)); 1082 txreq->flags, req_prod));
1077#endif 1083#endif
1078 1084
1079#if NBPFILTER > 0 1085#if NBPFILTER > 0
1080 /* 1086 /*
1081 * Pass packet to bpf if there is a listener. 1087 * Pass packet to bpf if there is a listener.
1082 */ 1088 */
1083 if (ifp->if_bpf) { 1089 if (ifp->if_bpf) {
1084 bpf_mtap(ifp->if_bpf, m); 1090 bpf_mtap(ifp->if_bpf, m);
1085 } 1091 }
1086#endif 1092#endif
1087 } 1093 }
1088 1094
1089 x86_lfence(); 1095 x86_lfence();
1090 if (do_notify) { 1096 if (do_notify) {
1091 hypervisor_notify_via_evtchn(sc->sc_evtchn); 1097 hypervisor_notify_via_evtchn(sc->sc_evtchn);
1092 ifp->if_timer = 5; 1098 ifp->if_timer = 5;
1093 } 1099 }
1094 splx(s); 1100 splx(s);
1095 1101
1096 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_start() done\n", 1102 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_start() done\n",
1097 device_xname(sc->sc_dev))); 1103 device_xname(sc->sc_dev)));
1098} 1104}
1099 1105
1100int 1106int
1101xennet_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1107xennet_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1102{ 1108{
1103#ifdef XENNET_DEBUG 1109#ifdef XENNET_DEBUG
1104 struct xennet_xenbus_softc *sc = ifp->if_softc; 1110 struct xennet_xenbus_softc *sc = ifp->if_softc;
1105#endif 1111#endif
1106 int s, error = 0; 1112 int s, error = 0;
1107 1113
1108 s = splnet(); 1114 s = splnet();
1109 1115
1110 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl()\n", device_xname(sc->sc_dev))); 1116 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl()\n", device_xname(sc->sc_dev)));
1111 error = ether_ioctl(ifp, cmd, data); 1117 error = ether_ioctl(ifp, cmd, data);
1112 if (error == ENETRESET) 1118 if (error == ENETRESET)
1113 error = 0; 1119 error = 0;
1114 splx(s); 1120 splx(s);
1115 1121
1116 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl() returning %d\n", 1122 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl() returning %d\n",
1117 device_xname(sc->sc_dev), error)); 1123 device_xname(sc->sc_dev), error));
1118 1124
1119 return error; 1125 return error;
1120} 1126}
1121 1127
1122void 1128void
1123xennet_watchdog(struct ifnet *ifp) 1129xennet_watchdog(struct ifnet *ifp)
1124{ 1130{
1125 aprint_verbose_ifnet(ifp, "xennet_watchdog\n"); 1131 aprint_verbose_ifnet(ifp, "xennet_watchdog\n");
1126} 1132}
1127 1133
1128int 1134int
1129xennet_init(struct ifnet *ifp) 1135xennet_init(struct ifnet *ifp)
1130{ 1136{
1131 struct xennet_xenbus_softc *sc = ifp->if_softc; 1137 struct xennet_xenbus_softc *sc = ifp->if_softc;
1132 int s = splnet(); 1138 int s = splnet();
1133 1139
1134 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_init()\n", device_xname(sc->sc_dev))); 1140 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_init()\n", device_xname(sc->sc_dev)));
1135 1141
1136 if ((ifp->if_flags & IFF_RUNNING) == 0) { 1142 if ((ifp->if_flags & IFF_RUNNING) == 0) {
1137 sc->sc_rx_ring.sring->rsp_event = 1143 sc->sc_rx_ring.sring->rsp_event =
1138 sc->sc_rx_ring.rsp_cons + 1; 1144 sc->sc_rx_ring.rsp_cons + 1;
1139 hypervisor_enable_event(sc->sc_evtchn); 1145 hypervisor_enable_event(sc->sc_evtchn);
1140 hypervisor_notify_via_evtchn(sc->sc_evtchn); 1146 hypervisor_notify_via_evtchn(sc->sc_evtchn);
1141 xennet_reset(sc); 1147 xennet_reset(sc);
1142 } 1148 }
1143 ifp->if_flags |= IFF_RUNNING; 1149 ifp->if_flags |= IFF_RUNNING;
1144 ifp->if_flags &= ~IFF_OACTIVE; 1150 ifp->if_flags &= ~IFF_OACTIVE;
1145 ifp->if_timer = 0; 1151 ifp->if_timer = 0;
1146 splx(s); 1152 splx(s);
1147 return 0; 1153 return 0;
1148} 1154}
1149 1155
1150void 1156void
1151xennet_stop(struct ifnet *ifp, int disable) 1157xennet_stop(struct ifnet *ifp, int disable)
1152{ 1158{
1153 struct xennet_xenbus_softc *sc = ifp->if_softc; 1159 struct xennet_xenbus_softc *sc = ifp->if_softc;
1154 int s = splnet(); 1160 int s = splnet();
1155 1161
1156 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1162 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1157 hypervisor_mask_event(sc->sc_evtchn); 1163 hypervisor_mask_event(sc->sc_evtchn);
1158 xennet_reset(sc); 1164 xennet_reset(sc);
1159 splx(s); 1165 splx(s);
1160} 1166}
1161 1167
1162void 1168void
1163xennet_reset(struct xennet_xenbus_softc *sc) 1169xennet_reset(struct xennet_xenbus_softc *sc)
1164{ 1170{
1165 1171
1166 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_reset()\n", device_xname(sc->sc_dev))); 1172 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_reset()\n", device_xname(sc->sc_dev)));
1167} 1173}
1168 1174
1169#if defined(NFS_BOOT_BOOTSTATIC) 1175#if defined(NFS_BOOT_BOOTSTATIC)
1170int 1176int
1171xennet_bootstatic_callback(struct nfs_diskless *nd) 1177xennet_bootstatic_callback(struct nfs_diskless *nd)
1172{ 1178{
1173#if 0 1179#if 0
1174 struct ifnet *ifp = nd->nd_ifp; 1180 struct ifnet *ifp = nd->nd_ifp;
1175 struct xennet_xenbus_softc *sc = 1181 struct xennet_xenbus_softc *sc =
1176 (struct xennet_xenbus_softc *)ifp->if_softc; 1182 (struct xennet_xenbus_softc *)ifp->if_softc;
1177#endif 1183#endif
1178 int flags = 0; 1184 int flags = 0;
1179 union xen_cmdline_parseinfo xcp; 1185 union xen_cmdline_parseinfo xcp;
1180 struct sockaddr_in *sin; 1186 struct sockaddr_in *sin;
1181 1187
1182 memset(&xcp, 0, sizeof(xcp.xcp_netinfo)); 1188 memset(&xcp, 0, sizeof(xcp.xcp_netinfo));
1183 xcp.xcp_netinfo.xi_ifno = /* XXX sc->sc_ifno */ 0; 1189 xcp.xcp_netinfo.xi_ifno = /* XXX sc->sc_ifno */ 0;
1184 xcp.xcp_netinfo.xi_root = nd->nd_root.ndm_host; 1190 xcp.xcp_netinfo.xi_root = nd->nd_root.ndm_host;
1185 xen_parse_cmdline(XEN_PARSE_NETINFO, &xcp); 1191 xen_parse_cmdline(XEN_PARSE_NETINFO, &xcp);
1186 1192
1187 if (xcp.xcp_netinfo.xi_root[0] != '\0') { 1193 if (xcp.xcp_netinfo.xi_root[0] != '\0') {
1188 flags |= NFS_BOOT_HAS_SERVER; 1194 flags |= NFS_BOOT_HAS_SERVER;
1189 if (strchr(xcp.xcp_netinfo.xi_root, ':') != NULL) 1195 if (strchr(xcp.xcp_netinfo.xi_root, ':') != NULL)
1190 flags |= NFS_BOOT_HAS_ROOTPATH; 1196 flags |= NFS_BOOT_HAS_ROOTPATH;
1191 } 1197 }
1192 1198
1193 nd->nd_myip.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[0]); 1199 nd->nd_myip.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[0]);
1194 nd->nd_gwip.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[2]); 1200 nd->nd_gwip.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[2]);
1195 nd->nd_mask.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[3]); 1201 nd->nd_mask.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[3]);
1196 1202
1197 sin = (struct sockaddr_in *) &nd->nd_root.ndm_saddr; 1203 sin = (struct sockaddr_in *) &nd->nd_root.ndm_saddr;
1198 memset((void *)sin, 0, sizeof(*sin)); 1204 memset((void *)sin, 0, sizeof(*sin));
1199 sin->sin_len = sizeof(*sin); 1205 sin->sin_len = sizeof(*sin);
1200 sin->sin_family = AF_INET; 1206 sin->sin_family = AF_INET;
1201 sin->sin_addr.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[1]); 1207 sin->sin_addr.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[1]);
1202 1208
1203 if (nd->nd_myip.s_addr) 1209 if (nd->nd_myip.s_addr)
1204 flags |= NFS_BOOT_HAS_MYIP; 1210 flags |= NFS_BOOT_HAS_MYIP;
1205 if (nd->nd_gwip.s_addr) 1211 if (nd->nd_gwip.s_addr)
1206 flags |= NFS_BOOT_HAS_GWIP; 1212 flags |= NFS_BOOT_HAS_GWIP;
1207 if (nd->nd_mask.s_addr) 1213 if (nd->nd_mask.s_addr)
1208 flags |= NFS_BOOT_HAS_MASK; 1214 flags |= NFS_BOOT_HAS_MASK;
1209 if (sin->sin_addr.s_addr) 1215 if (sin->sin_addr.s_addr)
1210 flags |= NFS_BOOT_HAS_SERVADDR; 1216 flags |= NFS_BOOT_HAS_SERVADDR;
1211 1217
1212 return flags; 1218 return flags;
1213} 1219}
1214#endif /* defined(NFS_BOOT_BOOTSTATIC) */ 1220#endif /* defined(NFS_BOOT_BOOTSTATIC) */
1215 1221
1216#ifdef XENNET_DEBUG_DUMP 1222#ifdef XENNET_DEBUG_DUMP
1217#define XCHR(x) hexdigits[(x) & 0xf] 1223#define XCHR(x) hexdigits[(x) & 0xf]
1218static void 1224static void
1219xennet_hex_dump(const unsigned char *pkt, size_t len, const char *type, int id) 1225xennet_hex_dump(const unsigned char *pkt, size_t len, const char *type, int id)
1220{ 1226{
1221 size_t i, j; 1227 size_t i, j;
1222 1228
1223 printf("pkt %p len %d/%x type %s id %d\n", pkt, len, len, type, id); 1229 printf("pkt %p len %d/%x type %s id %d\n", pkt, len, len, type, id);
1224 printf("00000000 "); 1230 printf("00000000 ");
1225 for(i=0; i<len; i++) { 1231 for(i=0; i<len; i++) {
1226 printf("%c%c ", XCHR(pkt[i]>>4), XCHR(pkt[i])); 1232 printf("%c%c ", XCHR(pkt[i]>>4), XCHR(pkt[i]));
1227 if ((i+1) % 16 == 8) 1233 if ((i+1) % 16 == 8)
1228 printf(" "); 1234 printf(" ");
1229 if ((i+1) % 16 == 0) { 1235 if ((i+1) % 16 == 0) {
1230 printf(" %c", '|'); 1236 printf(" %c", '|');
1231 for(j=0; j<16; j++) 1237 for(j=0; j<16; j++)
1232 printf("%c", pkt[i-15+j]>=32 && 1238 printf("%c", pkt[i-15+j]>=32 &&
1233 pkt[i-15+j]<127?pkt[i-15+j]:'.'); 1239 pkt[i-15+j]<127?pkt[i-15+j]:'.');
1234 printf("%c\n%c%c%c%c%c%c%c%c ", '|',  1240 printf("%c\n%c%c%c%c%c%c%c%c ", '|',
1235 XCHR((i+1)>>28), XCHR((i+1)>>24), 1241 XCHR((i+1)>>28), XCHR((i+1)>>24),
1236 XCHR((i+1)>>20), XCHR((i+1)>>16), 1242 XCHR((i+1)>>20), XCHR((i+1)>>16),
1237 XCHR((i+1)>>12), XCHR((i+1)>>8), 1243 XCHR((i+1)>>12), XCHR((i+1)>>8),
1238 XCHR((i+1)>>4), XCHR(i+1)); 1244 XCHR((i+1)>>4), XCHR(i+1));
1239 } 1245 }
1240 } 1246 }
1241 printf("\n"); 1247 printf("\n");
1242} 1248}
1243#undef XCHR 1249#undef XCHR
1244#endif 1250#endif