| @@ -1,1480 +1,1502 @@ | | | @@ -1,1480 +1,1502 @@ |
1 | /* $NetBSD: xennetback_xenbus.c,v 1.71 2018/10/26 05:33:21 cherry Exp $ */ | | 1 | /* $NetBSD: xennetback_xenbus.c,v 1.72 2018/12/23 12:09:45 bouyer Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (c) 2006 Manuel Bouyer. | | 4 | * Copyright (c) 2006 Manuel Bouyer. |
5 | * | | 5 | * |
6 | * Redistribution and use in source and binary forms, with or without | | 6 | * Redistribution and use in source and binary forms, with or without |
7 | * modification, are permitted provided that the following conditions | | 7 | * modification, are permitted provided that the following conditions |
8 | * are met: | | 8 | * are met: |
9 | * 1. Redistributions of source code must retain the above copyright | | 9 | * 1. Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. | | 10 | * notice, this list of conditions and the following disclaimer. |
11 | * 2. Redistributions in binary form must reproduce the above copyright | | 11 | * 2. Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the | | 12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. | | 13 | * documentation and/or other materials provided with the distribution. |
14 | * | | 14 | * |
15 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | | 15 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
16 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | | 16 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
17 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | | 17 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
18 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | | 18 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
19 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | | 19 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
20 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | | 20 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
21 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | | 21 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
22 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | | 22 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | | 23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
24 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | | 24 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
25 | */ | | 25 | */ |
26 | | | 26 | |
27 | #include <sys/cdefs.h> | | 27 | #include <sys/cdefs.h> |
28 | __KERNEL_RCSID(0, "$NetBSD: xennetback_xenbus.c,v 1.71 2018/10/26 05:33:21 cherry Exp $"); | | 28 | __KERNEL_RCSID(0, "$NetBSD: xennetback_xenbus.c,v 1.72 2018/12/23 12:09:45 bouyer Exp $"); |
29 | | | 29 | |
30 | #include "opt_xen.h" | | 30 | #include "opt_xen.h" |
31 | | | 31 | |
32 | #include <sys/types.h> | | 32 | #include <sys/types.h> |
33 | #include <sys/param.h> | | 33 | #include <sys/param.h> |
34 | #include <sys/systm.h> | | 34 | #include <sys/systm.h> |
35 | #include <sys/malloc.h> | | 35 | #include <sys/malloc.h> |
36 | #include <sys/kmem.h> | | 36 | #include <sys/kmem.h> |
37 | #include <sys/queue.h> | | 37 | #include <sys/queue.h> |
38 | #include <sys/kernel.h> | | 38 | #include <sys/kernel.h> |
39 | #include <sys/mbuf.h> | | 39 | #include <sys/mbuf.h> |
40 | #include <sys/protosw.h> | | 40 | #include <sys/protosw.h> |
41 | #include <sys/socket.h> | | 41 | #include <sys/socket.h> |
42 | #include <sys/ioctl.h> | | 42 | #include <sys/ioctl.h> |
43 | #include <sys/errno.h> | | 43 | #include <sys/errno.h> |
44 | #include <sys/device.h> | | 44 | #include <sys/device.h> |
45 | #include <sys/intr.h> | | 45 | #include <sys/intr.h> |
46 | | | 46 | |
47 | #include <net/if.h> | | 47 | #include <net/if.h> |
48 | #include <net/if_types.h> | | 48 | #include <net/if_types.h> |
49 | #include <net/if_dl.h> | | 49 | #include <net/if_dl.h> |
50 | #include <net/route.h> | | 50 | #include <net/route.h> |
51 | #include <net/netisr.h> | | 51 | #include <net/netisr.h> |
52 | #include <net/bpf.h> | | 52 | #include <net/bpf.h> |
53 | | | 53 | |
54 | #include <net/if_ether.h> | | 54 | #include <net/if_ether.h> |
55 | | | 55 | |
56 | #include <xen/xen.h> | | 56 | #include <xen/xen.h> |
57 | #include <xen/xen_shm.h> | | 57 | #include <xen/xen_shm.h> |
58 | #include <xen/evtchn.h> | | 58 | #include <xen/evtchn.h> |
59 | #include <xen/xenbus.h> | | 59 | #include <xen/xenbus.h> |
60 | #include <xen/xennet_checksum.h> | | 60 | #include <xen/xennet_checksum.h> |
61 | | | 61 | |
62 | #include <uvm/uvm.h> | | 62 | #include <uvm/uvm.h> |
63 | | | 63 | |
64 | /* | | 64 | /* |
65 | * Backend network device driver for Xen. | | 65 | * Backend network device driver for Xen. |
66 | */ | | 66 | */ |
67 | | | 67 | |
68 | #ifdef XENDEBUG_NET | | 68 | #ifdef XENDEBUG_NET |
69 | #define XENPRINTF(x) printf x | | 69 | #define XENPRINTF(x) printf x |
70 | #else | | 70 | #else |
71 | #define XENPRINTF(x) | | 71 | #define XENPRINTF(x) |
72 | #endif | | 72 | #endif |
73 | | | 73 | |
74 | extern pt_entry_t xpmap_pg_nx; | | 74 | extern pt_entry_t xpmap_pg_nx; |
75 | | | 75 | |
76 | #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE) | | 76 | #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE) |
77 | #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE) | | 77 | #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE) |
78 | | | 78 | |
79 | /* linux wants at last 16 bytes free in front of the packet */ | | 79 | /* linux wants at last 16 bytes free in front of the packet */ |
80 | #define LINUX_REQUESTED_OFFSET 16 | | 80 | #define LINUX_REQUESTED_OFFSET 16 |
81 | | | 81 | |
82 | /* hash list for TX requests */ | | 82 | /* hash list for TX requests */ |
83 | /* descriptor of a packet being handled by the kernel */ | | 83 | /* descriptor of a packet being handled by the kernel */ |
84 | struct xni_pkt { | | 84 | struct xni_pkt { |
85 | int pkt_id; /* packet's ID */ | | 85 | int pkt_id; /* packet's ID */ |
86 | grant_handle_t pkt_handle; | | 86 | grant_handle_t pkt_handle; |
87 | struct xnetback_instance *pkt_xneti; /* pointer back to our softc */ | | 87 | struct xnetback_instance *pkt_xneti; /* pointer back to our softc */ |
88 | }; | | 88 | }; |
89 | | | 89 | |
90 | /* pools for xni_pkt */ | | 90 | /* pools for xni_pkt */ |
91 | struct pool xni_pkt_pool; | | 91 | struct pool xni_pkt_pool; |
92 | /* ratecheck(9) for pool allocation failures */ | | 92 | /* ratecheck(9) for pool allocation failures */ |
93 | struct timeval xni_pool_errintvl = { 30, 0 }; /* 30s, each */ | | 93 | struct timeval xni_pool_errintvl = { 30, 0 }; /* 30s, each */ |
94 | | | 94 | |
95 | /* state of a xnetback instance */ | | 95 | /* state of a xnetback instance */ |
96 | typedef enum { | | 96 | typedef enum { |
97 | CONNECTED, | | 97 | CONNECTED, |
98 | DISCONNECTING, | | 98 | DISCONNECTING, |
99 | DISCONNECTED | | 99 | DISCONNECTED |
100 | } xnetback_state_t; | | 100 | } xnetback_state_t; |
101 | | | 101 | |
102 | /* we keep the xnetback instances in a linked list */ | | 102 | /* we keep the xnetback instances in a linked list */ |
103 | struct xnetback_instance { | | 103 | struct xnetback_instance { |
104 | SLIST_ENTRY(xnetback_instance) next; | | 104 | SLIST_ENTRY(xnetback_instance) next; |
105 | struct xenbus_device *xni_xbusd; /* our xenstore entry */ | | 105 | struct xenbus_device *xni_xbusd; /* our xenstore entry */ |
106 | domid_t xni_domid; /* attached to this domain */ | | 106 | domid_t xni_domid; /* attached to this domain */ |
107 | uint32_t xni_handle; /* domain-specific handle */ | | 107 | uint32_t xni_handle; /* domain-specific handle */ |
108 | xnetback_state_t xni_status; | | 108 | xnetback_state_t xni_status; |
109 | void *xni_softintr; | | 109 | void *xni_softintr; |
110 | | | 110 | |
111 | /* network interface stuff */ | | 111 | /* network interface stuff */ |
112 | struct ethercom xni_ec; | | 112 | struct ethercom xni_ec; |
113 | struct callout xni_restart; | | 113 | struct callout xni_restart; |
114 | uint8_t xni_enaddr[ETHER_ADDR_LEN]; | | 114 | uint8_t xni_enaddr[ETHER_ADDR_LEN]; |
115 | | | 115 | |
116 | /* remote domain communication stuff */ | | 116 | /* remote domain communication stuff */ |
117 | unsigned int xni_evtchn; /* our event channel */ | | 117 | unsigned int xni_evtchn; /* our event channel */ |
118 | struct intrhand *xni_ih; | | 118 | struct intrhand *xni_ih; |
119 | netif_tx_back_ring_t xni_txring; | | 119 | netif_tx_back_ring_t xni_txring; |
120 | netif_rx_back_ring_t xni_rxring; | | 120 | netif_rx_back_ring_t xni_rxring; |
121 | grant_handle_t xni_tx_ring_handle; /* to unmap the ring */ | | 121 | grant_handle_t xni_tx_ring_handle; /* to unmap the ring */ |
122 | grant_handle_t xni_rx_ring_handle; | | 122 | grant_handle_t xni_rx_ring_handle; |
123 | vaddr_t xni_tx_ring_va; /* to unmap the ring */ | | 123 | vaddr_t xni_tx_ring_va; /* to unmap the ring */ |
124 | vaddr_t xni_rx_ring_va; | | 124 | vaddr_t xni_rx_ring_va; |
125 | }; | | 125 | }; |
126 | #define xni_if xni_ec.ec_if | | 126 | #define xni_if xni_ec.ec_if |
127 | #define xni_bpf xni_if.if_bpf | | 127 | #define xni_bpf xni_if.if_bpf |
128 | | | 128 | |
129 | void xvifattach(int); | | 129 | void xvifattach(int); |
130 | static int xennetback_ifioctl(struct ifnet *, u_long, void *); | | 130 | static int xennetback_ifioctl(struct ifnet *, u_long, void *); |
131 | static void xennetback_ifstart(struct ifnet *); | | 131 | static void xennetback_ifstart(struct ifnet *); |
132 | static void xennetback_ifsoftstart_transfer(void *); | | 132 | static void xennetback_ifsoftstart_transfer(void *); |
133 | static void xennetback_ifsoftstart_copy(void *); | | 133 | static void xennetback_ifsoftstart_copy(void *); |
134 | static void xennetback_ifwatchdog(struct ifnet *); | | 134 | static void xennetback_ifwatchdog(struct ifnet *); |
135 | static int xennetback_ifinit(struct ifnet *); | | 135 | static int xennetback_ifinit(struct ifnet *); |
136 | static void xennetback_ifstop(struct ifnet *, int); | | 136 | static void xennetback_ifstop(struct ifnet *, int); |
137 | | | 137 | |
138 | static int xennetback_xenbus_create(struct xenbus_device *); | | 138 | static int xennetback_xenbus_create(struct xenbus_device *); |
139 | static int xennetback_xenbus_destroy(void *); | | 139 | static int xennetback_xenbus_destroy(void *); |
140 | static void xennetback_frontend_changed(void *, XenbusState); | | 140 | static void xennetback_frontend_changed(void *, XenbusState); |
141 | | | 141 | |
142 | static inline void xennetback_tx_response(struct xnetback_instance *, | | 142 | static inline void xennetback_tx_response(struct xnetback_instance *, |
143 | int, int); | | 143 | int, int); |
144 | static void xennetback_tx_free(struct mbuf * , void *, size_t, void *); | | 144 | static void xennetback_tx_free(struct mbuf * , void *, size_t, void *); |
145 | | | 145 | |
146 | static SLIST_HEAD(, xnetback_instance) xnetback_instances; | | 146 | static SLIST_HEAD(, xnetback_instance) xnetback_instances; |
147 | static kmutex_t xnetback_lock; | | 147 | static kmutex_t xnetback_lock; |
148 | | | 148 | |
149 | static bool xnetif_lookup(domid_t, uint32_t); | | 149 | static bool xnetif_lookup(domid_t, uint32_t); |
150 | static int xennetback_evthandler(void *); | | 150 | static int xennetback_evthandler(void *); |
151 | | | 151 | |
152 | static struct xenbus_backend_driver xvif_backend_driver = { | | 152 | static struct xenbus_backend_driver xvif_backend_driver = { |
153 | .xbakd_create = xennetback_xenbus_create, | | 153 | .xbakd_create = xennetback_xenbus_create, |
154 | .xbakd_type = "vif" | | 154 | .xbakd_type = "vif" |
155 | }; | | 155 | }; |
156 | | | 156 | |
157 | /* | | 157 | /* |
158 | * Number of packets to transmit in one hypercall (= number of pages to | | 158 | * Number of packets to transmit in one hypercall (= number of pages to |
159 | * transmit at once). | | 159 | * transmit at once). |
160 | */ | | 160 | */ |
161 | #define NB_XMIT_PAGES_BATCH 64 | | 161 | #define NB_XMIT_PAGES_BATCH 64 |
162 | | | 162 | |
163 | /* | | 163 | /* |
164 | * We will transfer a mapped page to the remote domain, and remap another | | 164 | * We will transfer a mapped page to the remote domain, and remap another |
165 | * page in place immediately. For this we keep a list of pages available. | | 165 | * page in place immediately. For this we keep a list of pages available. |
166 | * When the list is empty, we ask the hypervisor to give us | | 166 | * When the list is empty, we ask the hypervisor to give us |
167 | * NB_XMIT_PAGES_BATCH pages back. | | 167 | * NB_XMIT_PAGES_BATCH pages back. |
168 | */ | | 168 | */ |
169 | static unsigned long mcl_pages[NB_XMIT_PAGES_BATCH]; /* our physical pages */ | | 169 | static unsigned long mcl_pages[NB_XMIT_PAGES_BATCH]; /* our physical pages */ |
170 | int mcl_pages_alloc; /* current index in mcl_pages */ | | 170 | int mcl_pages_alloc; /* current index in mcl_pages */ |
171 | static int xennetback_get_mcl_page(paddr_t *); | | 171 | static int xennetback_get_mcl_page(paddr_t *); |
172 | static void xennetback_get_new_mcl_pages(void); | | 172 | static void xennetback_get_new_mcl_pages(void); |
173 | | | 173 | |
174 | /* | | 174 | /* |
175 | * If we can't transfer the mbuf directly, we have to copy it to a page which | | 175 | * If we can't transfer the mbuf directly, we have to copy it to a page which |
176 | * will be transferred to the remote domain. We use a pool_cache for this. | | 176 | * will be transferred to the remote domain. We use a pool_cache for this. |
177 | */ | | 177 | */ |
178 | pool_cache_t xmit_pages_cache; | | 178 | pool_cache_t xmit_pages_cache; |
179 | | | 179 | |
180 | /* arrays used in xennetback_ifstart(), too large to allocate on stack */ | | 180 | /* arrays used in xennetback_ifstart(), too large to allocate on stack */ |
181 | /* XXXSMP */ | | 181 | /* XXXSMP */ |
182 | static mmu_update_t xstart_mmu[NB_XMIT_PAGES_BATCH]; | | 182 | static mmu_update_t xstart_mmu[NB_XMIT_PAGES_BATCH]; |
183 | static multicall_entry_t xstart_mcl[NB_XMIT_PAGES_BATCH + 1]; | | 183 | static multicall_entry_t xstart_mcl[NB_XMIT_PAGES_BATCH + 1]; |
184 | static gnttab_transfer_t xstart_gop_transfer[NB_XMIT_PAGES_BATCH]; | | 184 | static gnttab_transfer_t xstart_gop_transfer[NB_XMIT_PAGES_BATCH]; |
185 | static gnttab_copy_t xstart_gop_copy[NB_XMIT_PAGES_BATCH]; | | 185 | static gnttab_copy_t xstart_gop_copy[NB_XMIT_PAGES_BATCH]; |
186 | static struct mbuf *mbufs_sent[NB_XMIT_PAGES_BATCH]; | | 186 | static struct mbuf *mbufs_sent[NB_XMIT_PAGES_BATCH]; |
187 | static struct _pages_pool_free { | | 187 | static struct _pages_pool_free { |
188 | vaddr_t va; | | 188 | vaddr_t va; |
189 | paddr_t pa; | | 189 | paddr_t pa; |
190 | } pages_pool_free[NB_XMIT_PAGES_BATCH]; | | 190 | } pages_pool_free[NB_XMIT_PAGES_BATCH]; |
191 | | | 191 | |
192 | | | 192 | |
193 | static inline void | | 193 | static inline void |
194 | xni_pkt_unmap(struct xni_pkt *pkt, vaddr_t pkt_va) | | 194 | xni_pkt_unmap(struct xni_pkt *pkt, vaddr_t pkt_va) |
195 | { | | 195 | { |
196 | xen_shm_unmap(pkt_va, 1, &pkt->pkt_handle); | | 196 | xen_shm_unmap(pkt_va, 1, &pkt->pkt_handle); |
197 | pool_put(&xni_pkt_pool, pkt); | | 197 | pool_put(&xni_pkt_pool, pkt); |
198 | } | | 198 | } |
199 | | | 199 | |
200 | void | | 200 | void |
201 | xvifattach(int n) | | 201 | xvifattach(int n) |
202 | { | | 202 | { |
203 | int i; | | 203 | int i; |
204 | struct pglist mlist; | | 204 | struct pglist mlist; |
205 | struct vm_page *pg; | | 205 | struct vm_page *pg; |
206 | | | 206 | |
207 | XENPRINTF(("xennetback_init\n")); | | 207 | XENPRINTF(("xennetback_init\n")); |
208 | | | 208 | |
209 | /* | | 209 | /* |
210 | * steal some non-managed pages to the VM system, to replace | | 210 | * steal some non-managed pages to the VM system, to replace |
211 | * mbuf cluster or xmit_pages_pool pages given to foreign domains. | | 211 | * mbuf cluster or xmit_pages_pool pages given to foreign domains. |
212 | */ | | 212 | */ |
213 | if (uvm_pglistalloc(PAGE_SIZE * NB_XMIT_PAGES_BATCH, 0, 0xffffffff, | | 213 | if (uvm_pglistalloc(PAGE_SIZE * NB_XMIT_PAGES_BATCH, 0, 0xffffffff, |
214 | 0, 0, &mlist, NB_XMIT_PAGES_BATCH, 0) != 0) | | 214 | 0, 0, &mlist, NB_XMIT_PAGES_BATCH, 0) != 0) |
215 | panic("xennetback_init: uvm_pglistalloc"); | | 215 | panic("xennetback_init: uvm_pglistalloc"); |
216 | for (i = 0, pg = mlist.tqh_first; pg != NULL; | | 216 | for (i = 0, pg = mlist.tqh_first; pg != NULL; |
217 | pg = pg->pageq.queue.tqe_next, i++) | | 217 | pg = pg->pageq.queue.tqe_next, i++) |
218 | mcl_pages[i] = xpmap_ptom(VM_PAGE_TO_PHYS(pg)) >> PAGE_SHIFT; | | 218 | mcl_pages[i] = xpmap_ptom(VM_PAGE_TO_PHYS(pg)) >> PAGE_SHIFT; |
219 | if (i != NB_XMIT_PAGES_BATCH) | | 219 | if (i != NB_XMIT_PAGES_BATCH) |
220 | panic("xennetback_init: %d mcl pages", i); | | 220 | panic("xennetback_init: %d mcl pages", i); |
221 | mcl_pages_alloc = NB_XMIT_PAGES_BATCH - 1; | | 221 | mcl_pages_alloc = NB_XMIT_PAGES_BATCH - 1; |
222 | | | 222 | |
223 | /* initialise pools */ | | 223 | /* initialise pools */ |
224 | pool_init(&xni_pkt_pool, sizeof(struct xni_pkt), 0, 0, 0, | | 224 | pool_init(&xni_pkt_pool, sizeof(struct xni_pkt), 0, 0, 0, |
225 | "xnbpkt", NULL, IPL_VM); | | 225 | "xnbpkt", NULL, IPL_VM); |
226 | xmit_pages_cache = pool_cache_init(PAGE_SIZE, 0, 0, 0, "xnbxm", NULL, | | 226 | xmit_pages_cache = pool_cache_init(PAGE_SIZE, 0, 0, 0, "xnbxm", NULL, |
227 | IPL_VM, NULL, NULL, NULL); | | 227 | IPL_VM, NULL, NULL, NULL); |
228 | | | 228 | |
229 | SLIST_INIT(&xnetback_instances); | | 229 | SLIST_INIT(&xnetback_instances); |
230 | mutex_init(&xnetback_lock, MUTEX_DEFAULT, IPL_NONE); | | 230 | mutex_init(&xnetback_lock, MUTEX_DEFAULT, IPL_NONE); |
231 | | | 231 | |
232 | xenbus_backend_register(&xvif_backend_driver); | | 232 | xenbus_backend_register(&xvif_backend_driver); |
233 | } | | 233 | } |
234 | | | 234 | |
235 | static int | | 235 | static int |
236 | xennetback_xenbus_create(struct xenbus_device *xbusd) | | 236 | xennetback_xenbus_create(struct xenbus_device *xbusd) |
237 | { | | 237 | { |
238 | struct xnetback_instance *xneti; | | 238 | struct xnetback_instance *xneti; |
239 | long domid, handle; | | 239 | long domid, handle; |
240 | struct ifnet *ifp; | | 240 | struct ifnet *ifp; |
241 | extern int ifqmaxlen; /* XXX */ | | 241 | extern int ifqmaxlen; /* XXX */ |
242 | char *val, *e, *p; | | 242 | char *val, *e, *p; |
243 | int i, err; | | 243 | int i, err; |
244 | struct xenbus_transaction *xbt; | | 244 | struct xenbus_transaction *xbt; |
245 | | | 245 | |
246 | if ((err = xenbus_read_ul(NULL, xbusd->xbusd_path, | | 246 | if ((err = xenbus_read_ul(NULL, xbusd->xbusd_path, |
247 | "frontend-id", &domid, 10)) != 0) { | | 247 | "frontend-id", &domid, 10)) != 0) { |
248 | aprint_error("xvif: can't read %s/frontend-id: %d\n", | | 248 | aprint_error("xvif: can't read %s/frontend-id: %d\n", |
249 | xbusd->xbusd_path, err); | | 249 | xbusd->xbusd_path, err); |
250 | return err; | | 250 | return err; |
251 | } | | 251 | } |
252 | if ((err = xenbus_read_ul(NULL, xbusd->xbusd_path, | | 252 | if ((err = xenbus_read_ul(NULL, xbusd->xbusd_path, |
253 | "handle", &handle, 10)) != 0) { | | 253 | "handle", &handle, 10)) != 0) { |
254 | aprint_error("xvif: can't read %s/handle: %d\n", | | 254 | aprint_error("xvif: can't read %s/handle: %d\n", |
255 | xbusd->xbusd_path, err); | | 255 | xbusd->xbusd_path, err); |
256 | return err; | | 256 | return err; |
257 | } | | 257 | } |
258 | | | 258 | |
259 | if (xnetif_lookup(domid, handle)) { | | 259 | if (xnetif_lookup(domid, handle)) { |
260 | return EEXIST; | | 260 | return EEXIST; |
261 | } | | 261 | } |
262 | xneti = kmem_zalloc(sizeof(*xneti), KM_SLEEP); | | 262 | xneti = kmem_zalloc(sizeof(*xneti), KM_SLEEP); |
263 | xneti->xni_domid = domid; | | 263 | xneti->xni_domid = domid; |
264 | xneti->xni_handle = handle; | | 264 | xneti->xni_handle = handle; |
265 | xneti->xni_status = DISCONNECTED; | | 265 | xneti->xni_status = DISCONNECTED; |
266 | | | 266 | |
267 | xbusd->xbusd_u.b.b_cookie = xneti; | | 267 | xbusd->xbusd_u.b.b_cookie = xneti; |
268 | xbusd->xbusd_u.b.b_detach = xennetback_xenbus_destroy; | | 268 | xbusd->xbusd_u.b.b_detach = xennetback_xenbus_destroy; |
269 | xneti->xni_xbusd = xbusd; | | 269 | xneti->xni_xbusd = xbusd; |
270 | | | 270 | |
271 | ifp = &xneti->xni_if; | | 271 | ifp = &xneti->xni_if; |
272 | ifp->if_softc = xneti; | | 272 | ifp->if_softc = xneti; |
273 | snprintf(ifp->if_xname, IFNAMSIZ, "xvif%di%d", | | 273 | snprintf(ifp->if_xname, IFNAMSIZ, "xvif%di%d", |
274 | (int)domid, (int)handle); | | 274 | (int)domid, (int)handle); |
275 | | | 275 | |
276 | /* read mac address */ | | 276 | /* read mac address */ |
277 | if ((err = xenbus_read(NULL, xbusd->xbusd_path, "mac", NULL, &val))) { | | 277 | if ((err = xenbus_read(NULL, xbusd->xbusd_path, "mac", NULL, &val))) { |
278 | aprint_error_ifnet(ifp, "can't read %s/mac: %d\n", | | 278 | aprint_error_ifnet(ifp, "can't read %s/mac: %d\n", |
279 | xbusd->xbusd_path, err); | | 279 | xbusd->xbusd_path, err); |
280 | goto fail; | | 280 | goto fail; |
281 | } | | 281 | } |
282 | for (i = 0, p = val; i < 6; i++) { | | 282 | for (i = 0, p = val; i < 6; i++) { |
283 | xneti->xni_enaddr[i] = strtoul(p, &e, 16); | | 283 | xneti->xni_enaddr[i] = strtoul(p, &e, 16); |
284 | if ((e[0] == '\0' && i != 5) && e[0] != ':') { | | 284 | if ((e[0] == '\0' && i != 5) && e[0] != ':') { |
285 | aprint_error_ifnet(ifp, | | 285 | aprint_error_ifnet(ifp, |
286 | "%s is not a valid mac address\n", val); | | 286 | "%s is not a valid mac address\n", val); |
287 | free(val, M_DEVBUF); | | 287 | free(val, M_DEVBUF); |
288 | err = EINVAL; | | 288 | err = EINVAL; |
289 | goto fail; | | 289 | goto fail; |
290 | } | | 290 | } |
291 | p = &e[1]; | | 291 | p = &e[1]; |
292 | } | | 292 | } |
293 | free(val, M_DEVBUF); | | 293 | free(val, M_DEVBUF); |
294 | | | 294 | |
295 | /* we can't use the same MAC addr as our guest */ | | 295 | /* we can't use the same MAC addr as our guest */ |
296 | xneti->xni_enaddr[3]++; | | 296 | xneti->xni_enaddr[3]++; |
297 | /* create pseudo-interface */ | | 297 | /* create pseudo-interface */ |
298 | aprint_verbose_ifnet(ifp, "Ethernet address %s\n", | | 298 | aprint_verbose_ifnet(ifp, "Ethernet address %s\n", |
299 | ether_sprintf(xneti->xni_enaddr)); | | 299 | ether_sprintf(xneti->xni_enaddr)); |
300 | xneti->xni_ec.ec_capabilities |= ETHERCAP_VLAN_MTU; | | 300 | xneti->xni_ec.ec_capabilities |= ETHERCAP_VLAN_MTU; |
301 | ifp->if_flags = | | 301 | ifp->if_flags = |
302 | IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST; | | 302 | IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST; |
303 | ifp->if_snd.ifq_maxlen = | | 303 | ifp->if_snd.ifq_maxlen = |
304 | uimax(ifqmaxlen, NET_TX_RING_SIZE * 2); | | 304 | uimax(ifqmaxlen, NET_TX_RING_SIZE * 2); |
305 | ifp->if_capabilities = IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx; | | 305 | ifp->if_capabilities = IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx; |
306 | ifp->if_ioctl = xennetback_ifioctl; | | 306 | ifp->if_ioctl = xennetback_ifioctl; |
307 | ifp->if_start = xennetback_ifstart; | | 307 | ifp->if_start = xennetback_ifstart; |
308 | ifp->if_watchdog = xennetback_ifwatchdog; | | 308 | ifp->if_watchdog = xennetback_ifwatchdog; |
309 | ifp->if_init = xennetback_ifinit; | | 309 | ifp->if_init = xennetback_ifinit; |
310 | ifp->if_stop = xennetback_ifstop; | | 310 | ifp->if_stop = xennetback_ifstop; |
311 | ifp->if_timer = 0; | | 311 | ifp->if_timer = 0; |
312 | IFQ_SET_READY(&ifp->if_snd); | | 312 | IFQ_SET_READY(&ifp->if_snd); |
313 | if_attach(ifp); | | 313 | if_attach(ifp); |
314 | ether_ifattach(&xneti->xni_if, xneti->xni_enaddr); | | 314 | ether_ifattach(&xneti->xni_if, xneti->xni_enaddr); |
315 | | | 315 | |
316 | mutex_enter(&xnetback_lock); | | 316 | mutex_enter(&xnetback_lock); |
317 | SLIST_INSERT_HEAD(&xnetback_instances, xneti, next); | | 317 | SLIST_INSERT_HEAD(&xnetback_instances, xneti, next); |
318 | mutex_exit(&xnetback_lock); | | 318 | mutex_exit(&xnetback_lock); |
319 | | | 319 | |
320 | xbusd->xbusd_otherend_changed = xennetback_frontend_changed; | | 320 | xbusd->xbusd_otherend_changed = xennetback_frontend_changed; |
321 | | | 321 | |
322 | do { | | 322 | do { |
323 | xbt = xenbus_transaction_start(); | | 323 | xbt = xenbus_transaction_start(); |
324 | if (xbt == NULL) { | | 324 | if (xbt == NULL) { |
325 | aprint_error_ifnet(ifp, | | 325 | aprint_error_ifnet(ifp, |
326 | "%s: can't start transaction\n", | | 326 | "%s: can't start transaction\n", |
327 | xbusd->xbusd_path); | | 327 | xbusd->xbusd_path); |
328 | goto fail; | | 328 | goto fail; |
329 | } | | 329 | } |
330 | err = xenbus_printf(xbt, xbusd->xbusd_path, | | 330 | err = xenbus_printf(xbt, xbusd->xbusd_path, |
331 | "vifname", "%s", ifp->if_xname); | | 331 | "vifname", "%s", ifp->if_xname); |
332 | if (err) { | | 332 | if (err) { |
333 | aprint_error_ifnet(ifp, | | 333 | aprint_error_ifnet(ifp, |
334 | "failed to write %s/vifname: %d\n", | | 334 | "failed to write %s/vifname: %d\n", |
335 | xbusd->xbusd_path, err); | | 335 | xbusd->xbusd_path, err); |
336 | goto abort_xbt; | | 336 | goto abort_xbt; |
337 | } | | 337 | } |
338 | err = xenbus_printf(xbt, xbusd->xbusd_path, | | 338 | err = xenbus_printf(xbt, xbusd->xbusd_path, |
339 | "feature-rx-copy", "%d", 1); | | 339 | "feature-rx-copy", "%d", 1); |
340 | if (err) { | | 340 | if (err) { |
341 | aprint_error_ifnet(ifp, | | 341 | aprint_error_ifnet(ifp, |
342 | "failed to write %s/feature-rx-copy: %d\n", | | 342 | "failed to write %s/feature-rx-copy: %d\n", |
343 | xbusd->xbusd_path, err); | | 343 | xbusd->xbusd_path, err); |
344 | goto abort_xbt; | | 344 | goto abort_xbt; |
345 | } | | 345 | } |
346 | err = xenbus_printf(xbt, xbusd->xbusd_path, | | 346 | err = xenbus_printf(xbt, xbusd->xbusd_path, |
347 | "feature-rx-flip", "%d", 1); | | 347 | "feature-rx-flip", "%d", 1); |
348 | if (err) { | | 348 | if (err) { |
349 | aprint_error_ifnet(ifp, | | 349 | aprint_error_ifnet(ifp, |
350 | "failed to write %s/feature-rx-flip: %d\n", | | 350 | "failed to write %s/feature-rx-flip: %d\n", |
351 | xbusd->xbusd_path, err); | | 351 | xbusd->xbusd_path, err); |
352 | goto abort_xbt; | | 352 | goto abort_xbt; |
353 | } | | 353 | } |
354 | } while ((err = xenbus_transaction_end(xbt, 0)) == EAGAIN); | | 354 | } while ((err = xenbus_transaction_end(xbt, 0)) == EAGAIN); |
355 | if (err) { | | 355 | if (err) { |
356 | aprint_error_ifnet(ifp, | | 356 | aprint_error_ifnet(ifp, |
357 | "%s: can't end transaction: %d\n", | | 357 | "%s: can't end transaction: %d\n", |
358 | xbusd->xbusd_path, err); | | 358 | xbusd->xbusd_path, err); |
359 | } | | 359 | } |
360 | | | 360 | |
361 | err = xenbus_switch_state(xbusd, NULL, XenbusStateInitWait); | | 361 | err = xenbus_switch_state(xbusd, NULL, XenbusStateInitWait); |
362 | if (err) { | | 362 | if (err) { |
363 | aprint_error_ifnet(ifp, | | 363 | aprint_error_ifnet(ifp, |
364 | "failed to switch state on %s: %d\n", | | 364 | "failed to switch state on %s: %d\n", |
365 | xbusd->xbusd_path, err); | | 365 | xbusd->xbusd_path, err); |
366 | goto fail; | | 366 | goto fail; |
367 | } | | 367 | } |
368 | return 0; | | 368 | return 0; |
369 | | | 369 | |
370 | abort_xbt: | | 370 | abort_xbt: |
371 | xenbus_transaction_end(xbt, 1); | | 371 | xenbus_transaction_end(xbt, 1); |
372 | fail: | | 372 | fail: |
373 | kmem_free(xneti, sizeof(*xneti)); | | 373 | kmem_free(xneti, sizeof(*xneti)); |
374 | return err; | | 374 | return err; |
375 | } | | 375 | } |
376 | | | 376 | |
377 | int | | 377 | int |
378 | xennetback_xenbus_destroy(void *arg) | | 378 | xennetback_xenbus_destroy(void *arg) |
379 | { | | 379 | { |
380 | struct xnetback_instance *xneti = arg; | | 380 | struct xnetback_instance *xneti = arg; |
381 | struct gnttab_unmap_grant_ref op; | | 381 | struct gnttab_unmap_grant_ref op; |
382 | int err; | | 382 | int err; |
383 | | | 383 | |
384 | aprint_verbose_ifnet(&xneti->xni_if, "disconnecting\n"); | | 384 | aprint_verbose_ifnet(&xneti->xni_if, "disconnecting\n"); |
385 | | | 385 | |
386 | if (xneti->xni_ih != NULL) { | | 386 | if (xneti->xni_ih != NULL) { |
387 | hypervisor_mask_event(xneti->xni_evtchn); | | 387 | hypervisor_mask_event(xneti->xni_evtchn); |
388 | intr_disestablish(xneti->xni_ih); | | 388 | intr_disestablish(xneti->xni_ih); |
389 | xneti->xni_ih = NULL; | | 389 | xneti->xni_ih = NULL; |
390 | | | 390 | |
391 | if (xneti->xni_softintr) { | | 391 | if (xneti->xni_softintr) { |
392 | softint_disestablish(xneti->xni_softintr); | | 392 | softint_disestablish(xneti->xni_softintr); |
393 | xneti->xni_softintr = NULL; | | 393 | xneti->xni_softintr = NULL; |
394 | } | | 394 | } |
395 | } | | 395 | } |
396 | | | 396 | |
397 | mutex_enter(&xnetback_lock); | | 397 | mutex_enter(&xnetback_lock); |
398 | SLIST_REMOVE(&xnetback_instances, | | 398 | SLIST_REMOVE(&xnetback_instances, |
399 | xneti, xnetback_instance, next); | | 399 | xneti, xnetback_instance, next); |
400 | mutex_exit(&xnetback_lock); | | 400 | mutex_exit(&xnetback_lock); |
401 | | | 401 | |
402 | ether_ifdetach(&xneti->xni_if); | | 402 | ether_ifdetach(&xneti->xni_if); |
403 | if_detach(&xneti->xni_if); | | 403 | if_detach(&xneti->xni_if); |
404 | | | 404 | |
405 | if (xneti->xni_txring.sring) { | | 405 | if (xneti->xni_txring.sring) { |
406 | op.host_addr = xneti->xni_tx_ring_va; | | 406 | op.host_addr = xneti->xni_tx_ring_va; |
407 | op.handle = xneti->xni_tx_ring_handle; | | 407 | op.handle = xneti->xni_tx_ring_handle; |
408 | op.dev_bus_addr = 0; | | 408 | op.dev_bus_addr = 0; |
409 | err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, | | 409 | err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, |
410 | &op, 1); | | 410 | &op, 1); |
411 | if (err) | | 411 | if (err) |
412 | aprint_error_ifnet(&xneti->xni_if, | | 412 | aprint_error_ifnet(&xneti->xni_if, |
413 | "unmap_grant_ref failed: %d\n", err); | | 413 | "unmap_grant_ref failed: %d\n", err); |
414 | } | | 414 | } |
415 | if (xneti->xni_rxring.sring) { | | 415 | if (xneti->xni_rxring.sring) { |
416 | op.host_addr = xneti->xni_rx_ring_va; | | 416 | op.host_addr = xneti->xni_rx_ring_va; |
417 | op.handle = xneti->xni_rx_ring_handle; | | 417 | op.handle = xneti->xni_rx_ring_handle; |
418 | op.dev_bus_addr = 0; | | 418 | op.dev_bus_addr = 0; |
419 | err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, | | 419 | err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, |
420 | &op, 1); | | 420 | &op, 1); |
421 | if (err) | | 421 | if (err) |
422 | aprint_error_ifnet(&xneti->xni_if, | | 422 | aprint_error_ifnet(&xneti->xni_if, |
423 | "unmap_grant_ref failed: %d\n", err); | | 423 | "unmap_grant_ref failed: %d\n", err); |
424 | } | | 424 | } |
425 | if (xneti->xni_tx_ring_va != 0) { | | 425 | if (xneti->xni_tx_ring_va != 0) { |
426 | uvm_km_free(kernel_map, xneti->xni_tx_ring_va, | | 426 | uvm_km_free(kernel_map, xneti->xni_tx_ring_va, |
427 | PAGE_SIZE, UVM_KMF_VAONLY); | | 427 | PAGE_SIZE, UVM_KMF_VAONLY); |
428 | xneti->xni_tx_ring_va = 0; | | 428 | xneti->xni_tx_ring_va = 0; |
429 | } | | 429 | } |
430 | if (xneti->xni_rx_ring_va != 0) { | | 430 | if (xneti->xni_rx_ring_va != 0) { |
431 | uvm_km_free(kernel_map, xneti->xni_rx_ring_va, | | 431 | uvm_km_free(kernel_map, xneti->xni_rx_ring_va, |
432 | PAGE_SIZE, UVM_KMF_VAONLY); | | 432 | PAGE_SIZE, UVM_KMF_VAONLY); |
433 | xneti->xni_rx_ring_va = 0; | | 433 | xneti->xni_rx_ring_va = 0; |
434 | } | | 434 | } |
435 | kmem_free(xneti, sizeof(*xneti)); | | 435 | kmem_free(xneti, sizeof(*xneti)); |
436 | return 0; | | 436 | return 0; |
437 | } | | 437 | } |
438 | | | 438 | |
439 | static int | | 439 | static int |
440 | xennetback_connect(struct xnetback_instance *xneti) | | 440 | xennetback_connect(struct xnetback_instance *xneti) |
441 | { | | 441 | { |
442 | int err; | | 442 | int err; |
443 | netif_tx_sring_t *tx_ring; | | 443 | netif_tx_sring_t *tx_ring; |
444 | netif_rx_sring_t *rx_ring; | | 444 | netif_rx_sring_t *rx_ring; |
445 | struct gnttab_map_grant_ref op; | | 445 | struct gnttab_map_grant_ref op; |
446 | struct gnttab_unmap_grant_ref uop; | | 446 | struct gnttab_unmap_grant_ref uop; |
447 | evtchn_op_t evop; | | 447 | evtchn_op_t evop; |
448 | u_long tx_ring_ref, rx_ring_ref; | | 448 | u_long tx_ring_ref, rx_ring_ref; |
449 | u_long revtchn, rx_copy; | | 449 | u_long revtchn, rx_copy; |
450 | struct xenbus_device *xbusd = xneti->xni_xbusd; | | 450 | struct xenbus_device *xbusd = xneti->xni_xbusd; |
451 | | | 451 | |
452 | /* read communication information */ | | 452 | /* read communication information */ |
453 | err = xenbus_read_ul(NULL, xbusd->xbusd_otherend, | | 453 | err = xenbus_read_ul(NULL, xbusd->xbusd_otherend, |
454 | "tx-ring-ref", &tx_ring_ref, 10); | | 454 | "tx-ring-ref", &tx_ring_ref, 10); |
455 | if (err) { | | 455 | if (err) { |
456 | xenbus_dev_fatal(xbusd, err, "reading %s/tx-ring-ref", | | 456 | xenbus_dev_fatal(xbusd, err, "reading %s/tx-ring-ref", |
457 | xbusd->xbusd_otherend); | | 457 | xbusd->xbusd_otherend); |
458 | return -1; | | 458 | return -1; |
459 | } | | 459 | } |
460 | err = xenbus_read_ul(NULL, xbusd->xbusd_otherend, | | 460 | err = xenbus_read_ul(NULL, xbusd->xbusd_otherend, |
461 | "rx-ring-ref", &rx_ring_ref, 10); | | 461 | "rx-ring-ref", &rx_ring_ref, 10); |
462 | if (err) { | | 462 | if (err) { |
463 | xenbus_dev_fatal(xbusd, err, "reading %s/rx-ring-ref", | | 463 | xenbus_dev_fatal(xbusd, err, "reading %s/rx-ring-ref", |
464 | xbusd->xbusd_otherend); | | 464 | xbusd->xbusd_otherend); |
465 | return -1; | | 465 | return -1; |
466 | } | | 466 | } |
467 | err = xenbus_read_ul(NULL, xbusd->xbusd_otherend, | | 467 | err = xenbus_read_ul(NULL, xbusd->xbusd_otherend, |
468 | "event-channel", &revtchn, 10); | | 468 | "event-channel", &revtchn, 10); |
469 | if (err) { | | 469 | if (err) { |
470 | xenbus_dev_fatal(xbusd, err, "reading %s/event-channel", | | 470 | xenbus_dev_fatal(xbusd, err, "reading %s/event-channel", |
471 | xbusd->xbusd_otherend); | | 471 | xbusd->xbusd_otherend); |
472 | return -1; | | 472 | return -1; |
473 | } | | 473 | } |
474 | err = xenbus_read_ul(NULL, xbusd->xbusd_otherend, | | 474 | err = xenbus_read_ul(NULL, xbusd->xbusd_otherend, |
475 | "request-rx-copy", &rx_copy, 10); | | 475 | "request-rx-copy", &rx_copy, 10); |
476 | if (err == ENOENT) | | 476 | if (err == ENOENT) |
477 | rx_copy = 0; | | 477 | rx_copy = 0; |
478 | else if (err) { | | 478 | else if (err) { |
479 | xenbus_dev_fatal(xbusd, err, "reading %s/request-rx-copy", | | 479 | xenbus_dev_fatal(xbusd, err, "reading %s/request-rx-copy", |
480 | xbusd->xbusd_otherend); | | 480 | xbusd->xbusd_otherend); |
481 | return -1; | | 481 | return -1; |
482 | } | | 482 | } |
483 | | | 483 | |
484 | if (rx_copy) | | 484 | if (rx_copy) |
485 | xneti->xni_softintr = softint_establish(SOFTINT_NET, | | 485 | xneti->xni_softintr = softint_establish(SOFTINT_NET, |
486 | xennetback_ifsoftstart_copy, xneti); | | 486 | xennetback_ifsoftstart_copy, xneti); |
487 | else | | 487 | else |
488 | xneti->xni_softintr = softint_establish(SOFTINT_NET, | | 488 | xneti->xni_softintr = softint_establish(SOFTINT_NET, |
489 | xennetback_ifsoftstart_transfer, xneti); | | 489 | xennetback_ifsoftstart_transfer, xneti); |
490 | | | 490 | |
491 | if (xneti->xni_softintr == NULL) { | | 491 | if (xneti->xni_softintr == NULL) { |
492 | err = ENOMEM; | | 492 | err = ENOMEM; |
493 | xenbus_dev_fatal(xbusd, ENOMEM, | | 493 | xenbus_dev_fatal(xbusd, ENOMEM, |
494 | "can't allocate softint", xbusd->xbusd_otherend); | | 494 | "can't allocate softint", xbusd->xbusd_otherend); |
495 | return -1; | | 495 | return -1; |
496 | } | | 496 | } |
497 | | | 497 | |
498 | /* allocate VA space and map rings */ | | 498 | /* allocate VA space and map rings */ |
499 | xneti->xni_tx_ring_va = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, | | 499 | xneti->xni_tx_ring_va = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, |
500 | UVM_KMF_VAONLY); | | 500 | UVM_KMF_VAONLY); |
501 | if (xneti->xni_tx_ring_va == 0) { | | 501 | if (xneti->xni_tx_ring_va == 0) { |
502 | xenbus_dev_fatal(xbusd, ENOMEM, | | 502 | xenbus_dev_fatal(xbusd, ENOMEM, |
503 | "can't get VA for TX ring", xbusd->xbusd_otherend); | | 503 | "can't get VA for TX ring", xbusd->xbusd_otherend); |
504 | goto err1; | | 504 | goto err1; |
505 | } | | 505 | } |
506 | tx_ring = (void *)xneti->xni_tx_ring_va; | | 506 | tx_ring = (void *)xneti->xni_tx_ring_va; |
507 | | | 507 | |
508 | xneti->xni_rx_ring_va = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, | | 508 | xneti->xni_rx_ring_va = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, |
509 | UVM_KMF_VAONLY); | | 509 | UVM_KMF_VAONLY); |
510 | if (xneti->xni_rx_ring_va == 0) { | | 510 | if (xneti->xni_rx_ring_va == 0) { |
511 | xenbus_dev_fatal(xbusd, ENOMEM, | | 511 | xenbus_dev_fatal(xbusd, ENOMEM, |
512 | "can't get VA for RX ring", xbusd->xbusd_otherend); | | 512 | "can't get VA for RX ring", xbusd->xbusd_otherend); |
513 | goto err1; | | 513 | goto err1; |
514 | } | | 514 | } |
515 | rx_ring = (void *)xneti->xni_rx_ring_va; | | 515 | rx_ring = (void *)xneti->xni_rx_ring_va; |
516 | | | 516 | |
517 | op.host_addr = xneti->xni_tx_ring_va; | | 517 | op.host_addr = xneti->xni_tx_ring_va; |
518 | op.flags = GNTMAP_host_map; | | 518 | op.flags = GNTMAP_host_map; |
519 | op.ref = tx_ring_ref; | | 519 | op.ref = tx_ring_ref; |
520 | op.dom = xneti->xni_domid; | | 520 | op.dom = xneti->xni_domid; |
521 | err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1); | | 521 | err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1); |
522 | if (err || op.status) { | | 522 | if (err || op.status) { |
523 | aprint_error_ifnet(&xneti->xni_if, | | 523 | aprint_error_ifnet(&xneti->xni_if, |
524 | "can't map TX grant ref: err %d status %d\n", | | 524 | "can't map TX grant ref: err %d status %d\n", |
525 | err, op.status); | | 525 | err, op.status); |
526 | goto err2; | | 526 | goto err2; |
527 | } | | 527 | } |
528 | xneti->xni_tx_ring_handle = op.handle; | | 528 | xneti->xni_tx_ring_handle = op.handle; |
529 | BACK_RING_INIT(&xneti->xni_txring, tx_ring, PAGE_SIZE); | | 529 | BACK_RING_INIT(&xneti->xni_txring, tx_ring, PAGE_SIZE); |
530 | | | 530 | |
531 | op.host_addr = xneti->xni_rx_ring_va; | | 531 | op.host_addr = xneti->xni_rx_ring_va; |
532 | op.flags = GNTMAP_host_map; | | 532 | op.flags = GNTMAP_host_map; |
533 | op.ref = rx_ring_ref; | | 533 | op.ref = rx_ring_ref; |
534 | op.dom = xneti->xni_domid; | | 534 | op.dom = xneti->xni_domid; |
535 | err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1); | | 535 | err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1); |
536 | if (err || op.status) { | | 536 | if (err || op.status) { |
537 | aprint_error_ifnet(&xneti->xni_if, | | 537 | aprint_error_ifnet(&xneti->xni_if, |
538 | "can't map RX grant ref: err %d status %d\n", | | 538 | "can't map RX grant ref: err %d status %d\n", |
539 | err, op.status); | | 539 | err, op.status); |
540 | goto err2; | | 540 | goto err2; |
541 | } | | 541 | } |
542 | xneti->xni_rx_ring_handle = op.handle; | | 542 | xneti->xni_rx_ring_handle = op.handle; |
543 | BACK_RING_INIT(&xneti->xni_rxring, rx_ring, PAGE_SIZE); | | 543 | BACK_RING_INIT(&xneti->xni_rxring, rx_ring, PAGE_SIZE); |
544 | | | 544 | |
545 | evop.cmd = EVTCHNOP_bind_interdomain; | | 545 | evop.cmd = EVTCHNOP_bind_interdomain; |
546 | evop.u.bind_interdomain.remote_dom = xneti->xni_domid; | | 546 | evop.u.bind_interdomain.remote_dom = xneti->xni_domid; |
547 | evop.u.bind_interdomain.remote_port = revtchn; | | 547 | evop.u.bind_interdomain.remote_port = revtchn; |
548 | err = HYPERVISOR_event_channel_op(&evop); | | 548 | err = HYPERVISOR_event_channel_op(&evop); |
549 | if (err) { | | 549 | if (err) { |
550 | aprint_error_ifnet(&xneti->xni_if, | | 550 | aprint_error_ifnet(&xneti->xni_if, |
551 | "can't get event channel: %d\n", err); | | 551 | "can't get event channel: %d\n", err); |
552 | goto err2; | | 552 | goto err2; |
553 | } | | 553 | } |
554 | xneti->xni_evtchn = evop.u.bind_interdomain.local_port; | | 554 | xneti->xni_evtchn = evop.u.bind_interdomain.local_port; |
555 | xen_wmb(); | | 555 | xen_wmb(); |
556 | xneti->xni_status = CONNECTED; | | 556 | xneti->xni_status = CONNECTED; |
557 | xen_wmb(); | | 557 | xen_wmb(); |
558 | | | 558 | |
559 | xneti->xni_ih = intr_establish_xname(-1, &xen_pic, xneti->xni_evtchn, | | 559 | xneti->xni_ih = intr_establish_xname(-1, &xen_pic, xneti->xni_evtchn, |
560 | IST_LEVEL, IPL_NET, xennetback_evthandler, xneti, false, | | 560 | IST_LEVEL, IPL_NET, xennetback_evthandler, xneti, false, |
561 | xneti->xni_if.if_xname); | | 561 | xneti->xni_if.if_xname); |
562 | KASSERT(xneti->xni_ih != NULL); | | 562 | KASSERT(xneti->xni_ih != NULL); |
563 | xennetback_ifinit(&xneti->xni_if); | | 563 | xennetback_ifinit(&xneti->xni_if); |
564 | hypervisor_unmask_event(xneti->xni_evtchn); | | 564 | hypervisor_unmask_event(xneti->xni_evtchn); |
565 | hypervisor_notify_via_evtchn(xneti->xni_evtchn); | | 565 | hypervisor_notify_via_evtchn(xneti->xni_evtchn); |
566 | return 0; | | 566 | return 0; |
567 | | | 567 | |
568 | err2: | | 568 | err2: |
569 | /* unmap rings */ | | 569 | /* unmap rings */ |
570 | if (xneti->xni_tx_ring_handle != 0) { | | 570 | if (xneti->xni_tx_ring_handle != 0) { |
571 | uop.host_addr = xneti->xni_tx_ring_va; | | 571 | uop.host_addr = xneti->xni_tx_ring_va; |
572 | uop.handle = xneti->xni_tx_ring_handle; | | 572 | uop.handle = xneti->xni_tx_ring_handle; |
573 | uop.dev_bus_addr = 0; | | 573 | uop.dev_bus_addr = 0; |
574 | err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, | | 574 | err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, |
575 | &uop, 1); | | 575 | &uop, 1); |
576 | if (err) | | 576 | if (err) |
577 | aprint_error_ifnet(&xneti->xni_if, | | 577 | aprint_error_ifnet(&xneti->xni_if, |
578 | "unmap_grant_ref failed: %d\n", err); | | 578 | "unmap_grant_ref failed: %d\n", err); |
579 | } | | 579 | } |
580 | | | 580 | |
581 | if (xneti->xni_rx_ring_handle != 0) { | | 581 | if (xneti->xni_rx_ring_handle != 0) { |
582 | uop.host_addr = xneti->xni_rx_ring_va; | | 582 | uop.host_addr = xneti->xni_rx_ring_va; |
583 | uop.handle = xneti->xni_rx_ring_handle; | | 583 | uop.handle = xneti->xni_rx_ring_handle; |
584 | uop.dev_bus_addr = 0; | | 584 | uop.dev_bus_addr = 0; |
585 | err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, | | 585 | err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, |
586 | &uop, 1); | | 586 | &uop, 1); |
587 | if (err) | | 587 | if (err) |
588 | aprint_error_ifnet(&xneti->xni_if, | | 588 | aprint_error_ifnet(&xneti->xni_if, |
589 | "unmap_grant_ref failed: %d\n", err); | | 589 | "unmap_grant_ref failed: %d\n", err); |
590 | } | | 590 | } |
591 | | | 591 | |
592 | err1: | | 592 | err1: |
593 | /* free rings VA space */ | | 593 | /* free rings VA space */ |
594 | if (xneti->xni_rx_ring_va != 0) | | 594 | if (xneti->xni_rx_ring_va != 0) |
595 | uvm_km_free(kernel_map, xneti->xni_rx_ring_va, | | 595 | uvm_km_free(kernel_map, xneti->xni_rx_ring_va, |
596 | PAGE_SIZE, UVM_KMF_VAONLY); | | 596 | PAGE_SIZE, UVM_KMF_VAONLY); |
597 | | | 597 | |
598 | if (xneti->xni_tx_ring_va != 0) | | 598 | if (xneti->xni_tx_ring_va != 0) |
599 | uvm_km_free(kernel_map, xneti->xni_tx_ring_va, | | 599 | uvm_km_free(kernel_map, xneti->xni_tx_ring_va, |
600 | PAGE_SIZE, UVM_KMF_VAONLY); | | 600 | PAGE_SIZE, UVM_KMF_VAONLY); |
601 | | | 601 | |
602 | softint_disestablish(xneti->xni_softintr); | | 602 | softint_disestablish(xneti->xni_softintr); |
603 | return -1; | | 603 | return -1; |
604 | | | 604 | |
605 | } | | 605 | } |
606 | | | 606 | |
607 | static void | | 607 | static void |
608 | xennetback_frontend_changed(void *arg, XenbusState new_state) | | 608 | xennetback_frontend_changed(void *arg, XenbusState new_state) |
609 | { | | 609 | { |
610 | struct xnetback_instance *xneti = arg; | | 610 | struct xnetback_instance *xneti = arg; |
611 | struct xenbus_device *xbusd = xneti->xni_xbusd; | | 611 | struct xenbus_device *xbusd = xneti->xni_xbusd; |
612 | | | 612 | |
613 | XENPRINTF(("%s: new state %d\n", xneti->xni_if.if_xname, new_state)); | | 613 | XENPRINTF(("%s: new state %d\n", xneti->xni_if.if_xname, new_state)); |
614 | switch(new_state) { | | 614 | switch(new_state) { |
615 | case XenbusStateInitialising: | | 615 | case XenbusStateInitialising: |
616 | case XenbusStateInitialised: | | 616 | case XenbusStateInitialised: |
617 | break; | | 617 | break; |
618 | | | 618 | |
619 | case XenbusStateConnected: | | 619 | case XenbusStateConnected: |
620 | if (xneti->xni_status == CONNECTED) | | 620 | if (xneti->xni_status == CONNECTED) |
621 | break; | | 621 | break; |
622 | if (xennetback_connect(xneti) == 0) | | 622 | if (xennetback_connect(xneti) == 0) |
623 | xenbus_switch_state(xbusd, NULL, XenbusStateConnected); | | 623 | xenbus_switch_state(xbusd, NULL, XenbusStateConnected); |
624 | break; | | 624 | break; |
625 | | | 625 | |
626 | case XenbusStateClosing: | | 626 | case XenbusStateClosing: |
627 | xneti->xni_status = DISCONNECTING; | | 627 | xneti->xni_status = DISCONNECTING; |
628 | xneti->xni_if.if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); | | 628 | xneti->xni_if.if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
629 | xneti->xni_if.if_timer = 0; | | 629 | xneti->xni_if.if_timer = 0; |
630 | xenbus_switch_state(xbusd, NULL, XenbusStateClosing); | | 630 | xenbus_switch_state(xbusd, NULL, XenbusStateClosing); |
631 | break; | | 631 | break; |
632 | | | 632 | |
633 | case XenbusStateClosed: | | 633 | case XenbusStateClosed: |
634 | /* otherend_changed() should handle it for us */ | | 634 | /* otherend_changed() should handle it for us */ |
635 | panic("xennetback_frontend_changed: closed\n"); | | 635 | panic("xennetback_frontend_changed: closed\n"); |
636 | case XenbusStateUnknown: | | 636 | case XenbusStateUnknown: |
637 | case XenbusStateInitWait: | | 637 | case XenbusStateInitWait: |
638 | default: | | 638 | default: |
639 | aprint_error("%s: invalid frontend state %d\n", | | 639 | aprint_error("%s: invalid frontend state %d\n", |
640 | xneti->xni_if.if_xname, new_state); | | 640 | xneti->xni_if.if_xname, new_state); |
641 | break; | | 641 | break; |
642 | } | | 642 | } |
643 | return; | | 643 | return; |
644 | | | 644 | |
645 | } | | 645 | } |
646 | | | 646 | |
647 | /* lookup a xneti based on domain id and interface handle */ | | 647 | /* lookup a xneti based on domain id and interface handle */ |
648 | static bool | | 648 | static bool |
649 | xnetif_lookup(domid_t dom , uint32_t handle) | | 649 | xnetif_lookup(domid_t dom , uint32_t handle) |
650 | { | | 650 | { |
651 | struct xnetback_instance *xneti; | | 651 | struct xnetback_instance *xneti; |
652 | bool found = false; | | 652 | bool found = false; |
653 | | | 653 | |
654 | mutex_enter(&xnetback_lock); | | 654 | mutex_enter(&xnetback_lock); |
655 | SLIST_FOREACH(xneti, &xnetback_instances, next) { | | 655 | SLIST_FOREACH(xneti, &xnetback_instances, next) { |
656 | if (xneti->xni_domid == dom && xneti->xni_handle == handle) { | | 656 | if (xneti->xni_domid == dom && xneti->xni_handle == handle) { |
657 | found = true; | | 657 | found = true; |
658 | break; | | 658 | break; |
659 | } | | 659 | } |
660 | } | | 660 | } |
661 | mutex_exit(&xnetback_lock); | | 661 | mutex_exit(&xnetback_lock); |
662 | | | 662 | |
663 | return found; | | 663 | return found; |
664 | } | | 664 | } |
665 | | | 665 | |
666 | /* get a page to replace a mbuf cluster page given to a domain */ | | 666 | /* get a page to replace a mbuf cluster page given to a domain */ |
667 | static int | | 667 | static int |
668 | xennetback_get_mcl_page(paddr_t *map) | | 668 | xennetback_get_mcl_page(paddr_t *map) |
669 | { | | 669 | { |
670 | if (mcl_pages_alloc < 0) { | | 670 | if (mcl_pages_alloc < 0) { |
671 | /* | | 671 | /* |
672 | * we exhausted our allocation. We can't allocate new ones yet | | 672 | * we exhausted our allocation. We can't allocate new ones yet |
673 | * because the current pages may not have been loaned to | | 673 | * because the current pages may not have been loaned to |
674 | * the remote domain yet. We have to let the caller do this. | | 674 | * the remote domain yet. We have to let the caller do this. |
675 | */ | | 675 | */ |
676 | return -1; | | 676 | return -1; |
677 | } | | 677 | } |
678 | | | 678 | |
679 | *map = ((paddr_t)mcl_pages[mcl_pages_alloc]) << PAGE_SHIFT; | | 679 | *map = ((paddr_t)mcl_pages[mcl_pages_alloc]) << PAGE_SHIFT; |
680 | mcl_pages_alloc--; | | 680 | mcl_pages_alloc--; |
681 | return 0; | | 681 | return 0; |
682 | } | | 682 | } |
683 | | | 683 | |
684 | static void | | 684 | static void |
685 | xennetback_get_new_mcl_pages(void) | | 685 | xennetback_get_new_mcl_pages(void) |
686 | { | | 686 | { |
687 | int nb_pages; | | 687 | int nb_pages; |
688 | struct xen_memory_reservation res; | | 688 | struct xen_memory_reservation res; |
689 | | | 689 | |
690 | /* get some new pages. */ | | 690 | /* get some new pages. */ |
691 | set_xen_guest_handle(res.extent_start, mcl_pages); | | 691 | set_xen_guest_handle(res.extent_start, mcl_pages); |
692 | res.nr_extents = NB_XMIT_PAGES_BATCH; | | 692 | res.nr_extents = NB_XMIT_PAGES_BATCH; |
693 | res.extent_order = 0; | | 693 | res.extent_order = 0; |
694 | res.address_bits = 0; | | 694 | res.address_bits = 0; |
695 | res.domid = DOMID_SELF; | | 695 | res.domid = DOMID_SELF; |
696 | | | 696 | |
697 | nb_pages = HYPERVISOR_memory_op(XENMEM_increase_reservation, &res); | | 697 | nb_pages = HYPERVISOR_memory_op(XENMEM_increase_reservation, &res); |
698 | if (nb_pages <= 0) { | | 698 | if (nb_pages <= 0) { |
699 | printf("xennetback: can't get new mcl pages (%d)\n", nb_pages); | | 699 | printf("xennetback: can't get new mcl pages (%d)\n", nb_pages); |
700 | return; | | 700 | return; |
701 | } | | 701 | } |
702 | if (nb_pages != NB_XMIT_PAGES_BATCH) | | 702 | if (nb_pages != NB_XMIT_PAGES_BATCH) |
703 | printf("xennetback: got only %d new mcl pages\n", nb_pages); | | 703 | printf("xennetback: got only %d new mcl pages\n", nb_pages); |
704 | | | 704 | |
705 | mcl_pages_alloc = nb_pages - 1; | | 705 | mcl_pages_alloc = nb_pages - 1; |
706 | } | | 706 | } |
707 | | | 707 | |
708 | static inline void | | 708 | static inline void |
709 | xennetback_tx_response(struct xnetback_instance *xneti, int id, int status) | | 709 | xennetback_tx_response(struct xnetback_instance *xneti, int id, int status) |
710 | { | | 710 | { |
711 | RING_IDX resp_prod; | | 711 | RING_IDX resp_prod; |
712 | netif_tx_response_t *txresp; | | 712 | netif_tx_response_t *txresp; |
713 | int do_event; | | 713 | int do_event; |
714 | | | 714 | |
715 | resp_prod = xneti->xni_txring.rsp_prod_pvt; | | 715 | resp_prod = xneti->xni_txring.rsp_prod_pvt; |
716 | txresp = RING_GET_RESPONSE(&xneti->xni_txring, resp_prod); | | 716 | txresp = RING_GET_RESPONSE(&xneti->xni_txring, resp_prod); |
717 | | | 717 | |
718 | txresp->id = id; | | 718 | txresp->id = id; |
719 | txresp->status = status; | | 719 | txresp->status = status; |
720 | xneti->xni_txring.rsp_prod_pvt++; | | 720 | xneti->xni_txring.rsp_prod_pvt++; |
721 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xneti->xni_txring, do_event); | | 721 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xneti->xni_txring, do_event); |
722 | if (do_event) { | | 722 | if (do_event) { |
723 | XENPRINTF(("%s send event\n", xneti->xni_if.if_xname)); | | 723 | XENPRINTF(("%s send event\n", xneti->xni_if.if_xname)); |
724 | hypervisor_notify_via_evtchn(xneti->xni_evtchn); | | 724 | hypervisor_notify_via_evtchn(xneti->xni_evtchn); |
725 | } | | 725 | } |
726 | } | | 726 | } |
727 | | | 727 | |
728 | static inline const char * | | 728 | static inline const char * |
729 | xennetback_tx_check_packet(const netif_tx_request_t *txreq, int vlan) | | 729 | xennetback_tx_check_packet(const netif_tx_request_t *txreq, int vlan) |
730 | { | | 730 | { |
731 | if (__predict_false(txreq->size < ETHER_HDR_LEN)) | | 731 | if (__predict_false(txreq->size < ETHER_HDR_LEN)) |
732 | return "too small"; | | 732 | return "too small"; |
733 | | | 733 | |
734 | if (__predict_false(txreq->offset + txreq->size > PAGE_SIZE)) | | 734 | if (__predict_false(txreq->offset + txreq->size > PAGE_SIZE)) |
735 | return "crossing a page boundary"; | | 735 | return "crossing a page boundary"; |
736 | | | 736 | |
737 | int maxlen = ETHER_MAX_LEN - ETHER_CRC_LEN; | | 737 | int maxlen = ETHER_MAX_LEN - ETHER_CRC_LEN; |
738 | if (vlan) | | 738 | if (vlan) |
739 | maxlen += ETHER_VLAN_ENCAP_LEN; | | 739 | maxlen += ETHER_VLAN_ENCAP_LEN; |
740 | if (__predict_false(txreq->size > maxlen)) | | 740 | if (__predict_false(txreq->size > maxlen)) |
741 | return "too big"; | | 741 | return "too big"; |
742 | | | 742 | |
743 | return NULL; | | 743 | return NULL; |
744 | } | | 744 | } |
745 | | | 745 | |
746 | static int | | 746 | static int |
747 | xennetback_evthandler(void *arg) | | 747 | xennetback_evthandler(void *arg) |
748 | { | | 748 | { |
749 | struct xnetback_instance *xneti = arg; | | 749 | struct xnetback_instance *xneti = arg; |
750 | struct ifnet *ifp = &xneti->xni_if; | | 750 | struct ifnet *ifp = &xneti->xni_if; |
751 | netif_tx_request_t txreq; | | 751 | netif_tx_request_t txreq; |
752 | struct xni_pkt *pkt; | | 752 | struct xni_pkt *pkt; |
753 | vaddr_t pkt_va; | | 753 | vaddr_t pkt_va; |
754 | struct mbuf *m; | | 754 | struct mbuf *m; |
755 | int receive_pending, err; | | 755 | int receive_pending, err; |
756 | RING_IDX req_cons; | | 756 | RING_IDX req_cons; |
757 | | | 757 | |
758 | XENPRINTF(("xennetback_evthandler ")); | | 758 | XENPRINTF(("xennetback_evthandler ")); |
759 | req_cons = xneti->xni_txring.req_cons; | | 759 | req_cons = xneti->xni_txring.req_cons; |
760 | xen_rmb(); | | 760 | xen_rmb(); |
761 | while (1) { | | 761 | while (1) { |
762 | xen_rmb(); /* be sure to read the request before updating */ | | 762 | xen_rmb(); /* be sure to read the request before updating */ |
763 | xneti->xni_txring.req_cons = req_cons; | | 763 | xneti->xni_txring.req_cons = req_cons; |
764 | xen_wmb(); | | 764 | xen_wmb(); |
765 | RING_FINAL_CHECK_FOR_REQUESTS(&xneti->xni_txring, | | 765 | RING_FINAL_CHECK_FOR_REQUESTS(&xneti->xni_txring, |
766 | receive_pending); | | 766 | receive_pending); |
767 | if (receive_pending == 0) | | 767 | if (receive_pending == 0) |
768 | break; | | 768 | break; |
769 | RING_COPY_REQUEST(&xneti->xni_txring, req_cons, &txreq); | | 769 | RING_COPY_REQUEST(&xneti->xni_txring, req_cons, &txreq); |
770 | xen_rmb(); | | 770 | xen_rmb(); |
771 | XENPRINTF(("%s pkt size %d\n", xneti->xni_if.if_xname, | | 771 | XENPRINTF(("%s pkt size %d\n", xneti->xni_if.if_xname, |
772 | txreq.size)); | | 772 | txreq.size)); |
773 | req_cons++; | | 773 | req_cons++; |
774 | if (__predict_false((ifp->if_flags & (IFF_UP | IFF_RUNNING)) != | | 774 | if (__predict_false((ifp->if_flags & (IFF_UP | IFF_RUNNING)) != |
775 | (IFF_UP | IFF_RUNNING))) { | | 775 | (IFF_UP | IFF_RUNNING))) { |
776 | /* interface not up, drop */ | | 776 | /* interface not up, drop */ |
777 | xennetback_tx_response(xneti, txreq.id, | | 777 | xennetback_tx_response(xneti, txreq.id, |
778 | NETIF_RSP_DROPPED); | | 778 | NETIF_RSP_DROPPED); |
779 | continue; | | 779 | continue; |
780 | } | | 780 | } |
781 | | | 781 | |
782 | /* | | 782 | /* |
783 | * Do some sanity checks, and map the packet's page. | | 783 | * Do some sanity checks, and map the packet's page. |
784 | */ | | 784 | */ |
785 | const char *msg = xennetback_tx_check_packet(&txreq, | | 785 | const char *msg = xennetback_tx_check_packet(&txreq, |
786 | xneti->xni_ec.ec_capenable & ETHERCAP_VLAN_MTU); | | 786 | xneti->xni_ec.ec_capenable & ETHERCAP_VLAN_MTU); |
787 | if (msg) { | | 787 | if (msg) { |
788 | printf("%s: packet with size %d is %s\n", | | 788 | printf("%s: packet with size %d is %s\n", |
789 | ifp->if_xname, txreq.size, msg); | | 789 | ifp->if_xname, txreq.size, msg); |
790 | xennetback_tx_response(xneti, txreq.id, | | 790 | xennetback_tx_response(xneti, txreq.id, |
791 | NETIF_RSP_ERROR); | | 791 | NETIF_RSP_ERROR); |
792 | ifp->if_ierrors++; | | 792 | ifp->if_ierrors++; |
793 | continue; | | 793 | continue; |
794 | } | | 794 | } |
795 | | | 795 | |
796 | /* get a mbuf for this packet */ | | 796 | /* get a mbuf for this packet */ |
797 | MGETHDR(m, M_DONTWAIT, MT_DATA); | | 797 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
798 | if (__predict_false(m == NULL)) { | | 798 | if (__predict_false(m == NULL)) { |
799 | static struct timeval lasttime; | | 799 | static struct timeval lasttime; |
800 | if (ratecheck(&lasttime, &xni_pool_errintvl)) | | 800 | if (ratecheck(&lasttime, &xni_pool_errintvl)) |
801 | printf("%s: mbuf alloc failed\n", | | 801 | printf("%s: mbuf alloc failed\n", |
802 | ifp->if_xname); | | 802 | ifp->if_xname); |
803 | xennetback_tx_response(xneti, txreq.id, | | 803 | xennetback_tx_response(xneti, txreq.id, |
804 | NETIF_RSP_DROPPED); | | 804 | NETIF_RSP_DROPPED); |
805 | ifp->if_ierrors++; | | 805 | ifp->if_ierrors++; |
806 | continue; | | 806 | continue; |
807 | } | | 807 | } |
808 | | | 808 | |
809 | XENPRINTF(("%s pkt offset %d size %d id %d req_cons %d\n", | | 809 | XENPRINTF(("%s pkt offset %d size %d id %d req_cons %d\n", |
810 | xneti->xni_if.if_xname, txreq.offset, | | 810 | xneti->xni_if.if_xname, txreq.offset, |
811 | txreq.size, txreq.id, MASK_NETIF_TX_IDX(req_cons))); | | 811 | txreq.size, txreq.id, MASK_NETIF_TX_IDX(req_cons))); |
812 | | | 812 | |
813 | pkt = pool_get(&xni_pkt_pool, PR_NOWAIT); | | 813 | pkt = pool_get(&xni_pkt_pool, PR_NOWAIT); |
814 | if (__predict_false(pkt == NULL)) { | | 814 | if (__predict_false(pkt == NULL)) { |
815 | static struct timeval lasttime; | | 815 | static struct timeval lasttime; |
816 | if (ratecheck(&lasttime, &xni_pool_errintvl)) | | 816 | if (ratecheck(&lasttime, &xni_pool_errintvl)) |
817 | printf("%s: xnbpkt alloc failed\n", | | 817 | printf("%s: xnbpkt alloc failed\n", |
818 | ifp->if_xname); | | 818 | ifp->if_xname); |
819 | xennetback_tx_response(xneti, txreq.id, | | 819 | xennetback_tx_response(xneti, txreq.id, |
820 | NETIF_RSP_DROPPED); | | 820 | NETIF_RSP_DROPPED); |
821 | ifp->if_ierrors++; | | 821 | ifp->if_ierrors++; |
822 | m_freem(m); | | 822 | m_freem(m); |
823 | continue; | | 823 | continue; |
824 | } | | 824 | } |
825 | err = xen_shm_map(1, xneti->xni_domid, &txreq.gref, &pkt_va, | | 825 | err = xen_shm_map(1, xneti->xni_domid, &txreq.gref, &pkt_va, |
826 | &pkt->pkt_handle, XSHM_RO); | | 826 | &pkt->pkt_handle, XSHM_RO); |
827 | if (__predict_false(err == ENOMEM)) { | | 827 | if (__predict_false(err == ENOMEM)) { |
828 | xennetback_tx_response(xneti, txreq.id, | | 828 | xennetback_tx_response(xneti, txreq.id, |
829 | NETIF_RSP_DROPPED); | | 829 | NETIF_RSP_DROPPED); |
830 | ifp->if_ierrors++; | | 830 | ifp->if_ierrors++; |
831 | pool_put(&xni_pkt_pool, pkt); | | 831 | pool_put(&xni_pkt_pool, pkt); |
832 | m_freem(m); | | 832 | m_freem(m); |
833 | continue; | | 833 | continue; |
834 | } | | 834 | } |
835 | | | 835 | |
836 | if (__predict_false(err)) { | | 836 | if (__predict_false(err)) { |
837 | printf("%s: mapping foreing page failed: %d\n", | | 837 | printf("%s: mapping foreing page failed: %d\n", |
838 | xneti->xni_if.if_xname, err); | | 838 | xneti->xni_if.if_xname, err); |
839 | xennetback_tx_response(xneti, txreq.id, | | 839 | xennetback_tx_response(xneti, txreq.id, |
840 | NETIF_RSP_ERROR); | | 840 | NETIF_RSP_ERROR); |
841 | ifp->if_ierrors++; | | 841 | ifp->if_ierrors++; |
842 | pool_put(&xni_pkt_pool, pkt); | | 842 | pool_put(&xni_pkt_pool, pkt); |
843 | m_freem(m); | | 843 | m_freem(m); |
844 | continue; | | 844 | continue; |
845 | } | | 845 | } |
846 | | | 846 | |
847 | if ((ifp->if_flags & IFF_PROMISC) == 0) { | | 847 | if ((ifp->if_flags & IFF_PROMISC) == 0) { |
848 | struct ether_header *eh = | | 848 | struct ether_header *eh = |
849 | (void*)(pkt_va + txreq.offset); | | 849 | (void*)(pkt_va + txreq.offset); |
850 | if (ETHER_IS_MULTICAST(eh->ether_dhost) == 0 && | | 850 | if (ETHER_IS_MULTICAST(eh->ether_dhost) == 0 && |
851 | memcmp(CLLADDR(ifp->if_sadl), eh->ether_dhost, | | 851 | memcmp(CLLADDR(ifp->if_sadl), eh->ether_dhost, |
852 | ETHER_ADDR_LEN) != 0) { | | 852 | ETHER_ADDR_LEN) != 0) { |
853 | xni_pkt_unmap(pkt, pkt_va); | | 853 | xni_pkt_unmap(pkt, pkt_va); |
854 | m_freem(m); | | 854 | m_freem(m); |
855 | xennetback_tx_response(xneti, txreq.id, | | 855 | xennetback_tx_response(xneti, txreq.id, |
856 | NETIF_RSP_OKAY); | | 856 | NETIF_RSP_OKAY); |
857 | continue; /* packet is not for us */ | | 857 | continue; /* packet is not for us */ |
858 | } | | 858 | } |
859 | } | | 859 | } |
860 | | | 860 | |
861 | #ifdef notyet | | 861 | #ifdef notyet |
862 | /* | | 862 | /* |
863 | * A lot of work is needed in the tcp stack to handle read-only | | 863 | * A lot of work is needed in the tcp stack to handle read-only |
864 | * ext storage so always copy for now. | | 864 | * ext storage so always copy for now. |
865 | */ | | 865 | */ |
866 | if (((req_cons + 1) & (NET_TX_RING_SIZE - 1)) == | | 866 | if (((req_cons + 1) & (NET_TX_RING_SIZE - 1)) == |
867 | (xneti->xni_txring.rsp_prod_pvt & (NET_TX_RING_SIZE - 1))) | | 867 | (xneti->xni_txring.rsp_prod_pvt & (NET_TX_RING_SIZE - 1))) |
868 | #else | | 868 | #else |
869 | if (1) | | 869 | if (1) |
870 | #endif /* notyet */ | | 870 | #endif /* notyet */ |
871 | { | | 871 | { |
872 | /* | | 872 | /* |
873 | * This is the last TX buffer. Copy the data and | | 873 | * This is the last TX buffer. Copy the data and |
874 | * ack it. Delaying it until the mbuf is | | 874 | * ack it. Delaying it until the mbuf is |
875 | * freed will stall transmit. | | 875 | * freed will stall transmit. |
876 | */ | | 876 | */ |
877 | m->m_len = uimin(MHLEN, txreq.size); | | 877 | m->m_len = uimin(MHLEN, txreq.size); |
878 | m->m_pkthdr.len = 0; | | 878 | m->m_pkthdr.len = 0; |
879 | m_copyback(m, 0, txreq.size, | | 879 | m_copyback(m, 0, txreq.size, |
880 | (void *)(pkt_va + txreq.offset)); | | 880 | (void *)(pkt_va + txreq.offset)); |
881 | xni_pkt_unmap(pkt, pkt_va); | | 881 | xni_pkt_unmap(pkt, pkt_va); |
882 | if (m->m_pkthdr.len < txreq.size) { | | 882 | if (m->m_pkthdr.len < txreq.size) { |
883 | ifp->if_ierrors++; | | 883 | ifp->if_ierrors++; |
884 | m_freem(m); | | 884 | m_freem(m); |
885 | xennetback_tx_response(xneti, txreq.id, | | 885 | xennetback_tx_response(xneti, txreq.id, |
886 | NETIF_RSP_DROPPED); | | 886 | NETIF_RSP_DROPPED); |
887 | continue; | | 887 | continue; |
888 | } | | 888 | } |
889 | xennetback_tx_response(xneti, txreq.id, | | 889 | xennetback_tx_response(xneti, txreq.id, |
890 | NETIF_RSP_OKAY); | | 890 | NETIF_RSP_OKAY); |
891 | } else { | | 891 | } else { |
892 | | | 892 | |
893 | pkt->pkt_id = txreq.id; | | 893 | pkt->pkt_id = txreq.id; |
894 | pkt->pkt_xneti = xneti; | | 894 | pkt->pkt_xneti = xneti; |
895 | | | 895 | |
896 | MEXTADD(m, pkt_va + txreq.offset, | | 896 | MEXTADD(m, pkt_va + txreq.offset, |
897 | txreq.size, M_DEVBUF, xennetback_tx_free, pkt); | | 897 | txreq.size, M_DEVBUF, xennetback_tx_free, pkt); |
898 | m->m_pkthdr.len = m->m_len = txreq.size; | | 898 | m->m_pkthdr.len = m->m_len = txreq.size; |
899 | m->m_flags |= M_EXT_ROMAP; | | 899 | m->m_flags |= M_EXT_ROMAP; |
900 | } | | 900 | } |
901 | if ((txreq.flags & NETTXF_csum_blank) != 0) { | | 901 | if ((txreq.flags & NETTXF_csum_blank) != 0) { |
902 | xennet_checksum_fill(&m); | | 902 | xennet_checksum_fill(&m); |
903 | if (m == NULL) { | | 903 | if (m == NULL) { |
904 | ifp->if_ierrors++; | | 904 | ifp->if_ierrors++; |
905 | continue; | | 905 | continue; |
906 | } | | 906 | } |
907 | } | | 907 | } |
908 | m_set_rcvif(m, ifp); | | 908 | m_set_rcvif(m, ifp); |
909 | | | 909 | |
910 | if_percpuq_enqueue(ifp->if_percpuq, m); | | 910 | if_percpuq_enqueue(ifp->if_percpuq, m); |
911 | } | | 911 | } |
912 | xen_rmb(); /* be sure to read the request before updating pointer */ | | 912 | xen_rmb(); /* be sure to read the request before updating pointer */ |
913 | xneti->xni_txring.req_cons = req_cons; | | 913 | xneti->xni_txring.req_cons = req_cons; |
914 | xen_wmb(); | | 914 | xen_wmb(); |
915 | /* check to see if we can transmit more packets */ | | 915 | /* check to see if we can transmit more packets */ |
916 | softint_schedule(xneti->xni_softintr); | | 916 | softint_schedule(xneti->xni_softintr); |
917 | | | 917 | |
918 | return 1; | | 918 | return 1; |
919 | } | | 919 | } |
920 | | | 920 | |
921 | static void | | 921 | static void |
922 | xennetback_tx_free(struct mbuf *m, void *va, size_t size, void *arg) | | 922 | xennetback_tx_free(struct mbuf *m, void *va, size_t size, void *arg) |
923 | { | | 923 | { |
924 | int s = splnet(); | | 924 | int s = splnet(); |
925 | struct xni_pkt *pkt = arg; | | 925 | struct xni_pkt *pkt = arg; |
926 | struct xnetback_instance *xneti = pkt->pkt_xneti; | | 926 | struct xnetback_instance *xneti = pkt->pkt_xneti; |
927 | | | 927 | |
928 | XENPRINTF(("xennetback_tx_free\n")); | | 928 | XENPRINTF(("xennetback_tx_free\n")); |
929 | | | 929 | |
930 | xennetback_tx_response(xneti, pkt->pkt_id, NETIF_RSP_OKAY); | | 930 | xennetback_tx_response(xneti, pkt->pkt_id, NETIF_RSP_OKAY); |
931 | | | 931 | |
932 | xni_pkt_unmap(pkt, (vaddr_t)va & ~PAGE_MASK); | | 932 | xni_pkt_unmap(pkt, (vaddr_t)va & ~PAGE_MASK); |
933 | | | 933 | |
934 | if (m) | | 934 | if (m) |
935 | pool_cache_put(mb_cache, m); | | 935 | pool_cache_put(mb_cache, m); |
936 | splx(s); | | 936 | splx(s); |
937 | } | | 937 | } |
938 | | | 938 | |
939 | static int | | 939 | static int |
940 | xennetback_ifioctl(struct ifnet *ifp, u_long cmd, void *data) | | 940 | xennetback_ifioctl(struct ifnet *ifp, u_long cmd, void *data) |
941 | { | | 941 | { |
942 | //struct xnetback_instance *xneti = ifp->if_softc; | | 942 | //struct xnetback_instance *xneti = ifp->if_softc; |
943 | //struct ifreq *ifr = (struct ifreq *)data; | | 943 | //struct ifreq *ifr = (struct ifreq *)data; |
944 | int s, error; | | 944 | int s, error; |
945 | | | 945 | |
946 | s = splnet(); | | 946 | s = splnet(); |
947 | error = ether_ioctl(ifp, cmd, data); | | 947 | error = ether_ioctl(ifp, cmd, data); |
948 | if (error == ENETRESET) | | 948 | if (error == ENETRESET) |
949 | error = 0; | | 949 | error = 0; |
950 | splx(s); | | 950 | splx(s); |
951 | return error; | | 951 | return error; |
952 | } | | 952 | } |
953 | | | 953 | |
954 | static void | | 954 | static void |
955 | xennetback_ifstart(struct ifnet *ifp) | | 955 | xennetback_ifstart(struct ifnet *ifp) |
956 | { | | 956 | { |
957 | struct xnetback_instance *xneti = ifp->if_softc; | | 957 | struct xnetback_instance *xneti = ifp->if_softc; |
958 | | | 958 | |
959 | /* | | 959 | /* |
960 | * The Xen communication channel is much more efficient if we can | | 960 | * The Xen communication channel is much more efficient if we can |
961 | * schedule batch of packets for the domain. To achieve this, we | | 961 | * schedule batch of packets for the domain. To achieve this, we |
962 | * schedule a soft interrupt, and just return. This way, the network | | 962 | * schedule a soft interrupt, and just return. This way, the network |
963 | * stack will enqueue all pending mbufs in the interface's send queue | | 963 | * stack will enqueue all pending mbufs in the interface's send queue |
964 | * before it is processed by the soft interrupt handler(). | | 964 | * before it is processed by the soft interrupt handler(). |
965 | */ | | 965 | */ |
966 | softint_schedule(xneti->xni_softintr); | | 966 | softint_schedule(xneti->xni_softintr); |
967 | } | | 967 | } |
968 | | | 968 | |
969 | static void | | 969 | static void |
970 | xennetback_ifsoftstart_transfer(void *arg) | | 970 | xennetback_ifsoftstart_transfer(void *arg) |
971 | { | | 971 | { |
972 | struct xnetback_instance *xneti = arg; | | 972 | struct xnetback_instance *xneti = arg; |
973 | struct ifnet *ifp = &xneti->xni_if; | | 973 | struct ifnet *ifp = &xneti->xni_if; |
974 | struct mbuf *m; | | 974 | struct mbuf *m; |
975 | vaddr_t xmit_va; | | 975 | vaddr_t xmit_va; |
976 | paddr_t xmit_pa; | | 976 | paddr_t xmit_pa; |
977 | paddr_t xmit_ma; | | 977 | paddr_t xmit_ma; |
978 | paddr_t newp_ma = 0; /* XXX gcc */ | | 978 | paddr_t newp_ma = 0; /* XXX gcc */ |
979 | int i, j, nppitems; | | 979 | int i, j, nppitems; |
980 | mmu_update_t *mmup; | | 980 | mmu_update_t *mmup; |
981 | multicall_entry_t *mclp; | | 981 | multicall_entry_t *mclp; |
982 | netif_rx_response_t *rxresp; | | 982 | netif_rx_response_t *rxresp; |
983 | netif_rx_request_t rxreq; | | 983 | netif_rx_request_t rxreq; |
984 | RING_IDX req_prod, resp_prod; | | 984 | RING_IDX req_prod, resp_prod; |
985 | int do_event = 0; | | 985 | int do_event = 0; |
986 | gnttab_transfer_t *gop; | | 986 | gnttab_transfer_t *gop; |
987 | int id, offset; | | 987 | int id, offset; |
988 | | | 988 | |
989 | XENPRINTF(("xennetback_ifsoftstart_transfer ")); | | 989 | XENPRINTF(("xennetback_ifsoftstart_transfer ")); |
990 | int s = splnet(); | | 990 | int s = splnet(); |
991 | if (__predict_false( | | 991 | if (__predict_false( |
992 | (ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)) { | | 992 | (ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)) { |
993 | splx(s); | | 993 | splx(s); |
994 | return; | | 994 | return; |
995 | } | | 995 | } |
996 | | | 996 | |
997 | while (!IFQ_IS_EMPTY(&ifp->if_snd)) { | | 997 | while (!IFQ_IS_EMPTY(&ifp->if_snd)) { |
998 | XENPRINTF(("pkt\n")); | | 998 | XENPRINTF(("pkt\n")); |
999 | req_prod = xneti->xni_rxring.sring->req_prod; | | 999 | req_prod = xneti->xni_rxring.sring->req_prod; |
1000 | resp_prod = xneti->xni_rxring.rsp_prod_pvt; | | 1000 | resp_prod = xneti->xni_rxring.rsp_prod_pvt; |
1001 | xen_rmb(); | | 1001 | xen_rmb(); |
1002 | | | 1002 | |
1003 | mmup = xstart_mmu; | | 1003 | mmup = xstart_mmu; |
1004 | mclp = xstart_mcl; | | 1004 | mclp = xstart_mcl; |
1005 | gop = xstart_gop_transfer; | | 1005 | gop = xstart_gop_transfer; |
1006 | for (nppitems = 0, i = 0; !IFQ_IS_EMPTY(&ifp->if_snd);) { | | 1006 | for (nppitems = 0, i = 0; !IFQ_IS_EMPTY(&ifp->if_snd);) { |
1007 | XENPRINTF(("have a packet\n")); | | 1007 | XENPRINTF(("have a packet\n")); |
1008 | IFQ_POLL(&ifp->if_snd, m); | | 1008 | IFQ_POLL(&ifp->if_snd, m); |
1009 | if (__predict_false(m == NULL)) | | 1009 | if (__predict_false(m == NULL)) |
1010 | panic("xennetback_ifstart: IFQ_POLL"); | | 1010 | panic("xennetback_ifstart: IFQ_POLL"); |
1011 | if (__predict_false( | | 1011 | if (__predict_false( |
1012 | req_prod == xneti->xni_rxring.req_cons || | | 1012 | req_prod == xneti->xni_rxring.req_cons || |
1013 | xneti->xni_rxring.req_cons - resp_prod == | | 1013 | xneti->xni_rxring.req_cons - resp_prod == |
1014 | NET_RX_RING_SIZE)) { | | 1014 | NET_RX_RING_SIZE)) { |
1015 | /* out of ring space */ | | 1015 | /* out of ring space */ |
1016 | XENPRINTF(("xennetback_ifstart: ring full " | | 1016 | XENPRINTF(("xennetback_ifstart: ring full " |
1017 | "req_prod 0x%x req_cons 0x%x resp_prod " | | 1017 | "req_prod 0x%x req_cons 0x%x resp_prod " |
1018 | "0x%x\n", | | 1018 | "0x%x\n", |
1019 | req_prod, xneti->xni_rxring.req_cons, | | 1019 | req_prod, xneti->xni_rxring.req_cons, |
1020 | resp_prod)); | | 1020 | resp_prod)); |
1021 | ifp->if_timer = 1; | | 1021 | ifp->if_timer = 1; |
1022 | break; | | 1022 | break; |
1023 | } | | 1023 | } |
1024 | if (__predict_false(i == NB_XMIT_PAGES_BATCH)) | | 1024 | if (__predict_false(i == NB_XMIT_PAGES_BATCH)) |
1025 | break; /* we filled the array */ | | 1025 | break; /* we filled the array */ |
1026 | if (__predict_false( | | 1026 | if (__predict_false( |
1027 | xennetback_get_mcl_page(&newp_ma) != 0)) | | 1027 | xennetback_get_mcl_page(&newp_ma) != 0)) |
1028 | break; /* out of memory */ | | 1028 | break; /* out of memory */ |
1029 | if ((m->m_flags & M_EXT_CLUSTER) != 0 && | | 1029 | if ((m->m_flags & M_EXT_CLUSTER) != 0 && |
1030 | !M_READONLY(m) && MCLBYTES == PAGE_SIZE) { | | 1030 | !M_READONLY(m) && MCLBYTES == PAGE_SIZE) { |
1031 | /* we can give this page away */ | | 1031 | /* we can give this page away */ |
1032 | xmit_pa = m->m_ext.ext_paddr; | | 1032 | xmit_pa = m->m_ext.ext_paddr; |
1033 | xmit_ma = xpmap_ptom(xmit_pa); | | 1033 | xmit_ma = xpmap_ptom(xmit_pa); |
1034 | xmit_va = (vaddr_t)m->m_ext.ext_buf; | | 1034 | xmit_va = (vaddr_t)m->m_ext.ext_buf; |
1035 | KASSERT(xmit_pa != M_PADDR_INVALID); | | 1035 | KASSERT(xmit_pa != M_PADDR_INVALID); |
1036 | KASSERT((xmit_va & PAGE_MASK) == 0); | | 1036 | KASSERT((xmit_va & PAGE_MASK) == 0); |
1037 | offset = m->m_data - m->m_ext.ext_buf; | | 1037 | offset = m->m_data - m->m_ext.ext_buf; |
1038 | } else { | | 1038 | } else { |
1039 | /* we have to copy the packet */ | | 1039 | /* we have to copy the packet */ |
1040 | xmit_va = (vaddr_t)pool_cache_get_paddr( | | 1040 | xmit_va = (vaddr_t)pool_cache_get_paddr( |
1041 | xmit_pages_cache, PR_NOWAIT, &xmit_pa); | | 1041 | xmit_pages_cache, PR_NOWAIT, &xmit_pa); |
1042 | if (__predict_false(xmit_va == 0)) | | 1042 | if (__predict_false(xmit_va == 0)) |
1043 | break; /* out of memory */ | | 1043 | break; /* out of memory */ |
1044 | | | 1044 | |
1045 | KASSERT(xmit_pa != POOL_PADDR_INVALID); | | 1045 | KASSERT(xmit_pa != POOL_PADDR_INVALID); |
1046 | xmit_ma = xpmap_ptom(xmit_pa); | | 1046 | xmit_ma = xpmap_ptom(xmit_pa); |
1047 | XENPRINTF(("xennetback_get_xmit_page: got va " | | 1047 | XENPRINTF(("xennetback_get_xmit_page: got va " |
1048 | "0x%x ma 0x%x\n", (u_int)xmit_va, | | 1048 | "0x%x ma 0x%x\n", (u_int)xmit_va, |
1049 | (u_int)xmit_ma)); | | 1049 | (u_int)xmit_ma)); |
1050 | m_copydata(m, 0, m->m_pkthdr.len, | | 1050 | m_copydata(m, 0, m->m_pkthdr.len, |
1051 | (char *)xmit_va + LINUX_REQUESTED_OFFSET); | | 1051 | (char *)xmit_va + LINUX_REQUESTED_OFFSET); |
1052 | offset = LINUX_REQUESTED_OFFSET; | | 1052 | offset = LINUX_REQUESTED_OFFSET; |
1053 | pages_pool_free[nppitems].va = xmit_va; | | 1053 | pages_pool_free[nppitems].va = xmit_va; |
1054 | pages_pool_free[nppitems].pa = xmit_pa; | | 1054 | pages_pool_free[nppitems].pa = xmit_pa; |
1055 | nppitems++; | | 1055 | nppitems++; |
1056 | } | | 1056 | } |
1057 | /* start filling ring */ | | 1057 | /* start filling ring */ |
1058 | RING_COPY_REQUEST(&xneti->xni_rxring, | | 1058 | RING_COPY_REQUEST(&xneti->xni_rxring, |
1059 | xneti->xni_rxring.req_cons, &rxreq); | | 1059 | xneti->xni_rxring.req_cons, &rxreq); |
1060 | gop->ref = rxreq.gref; | | 1060 | gop->ref = rxreq.gref; |
1061 | id = rxreq.id; | | 1061 | id = rxreq.id; |
1062 | xen_rmb(); | | 1062 | xen_rmb(); |
1063 | xneti->xni_rxring.req_cons++; | | 1063 | xneti->xni_rxring.req_cons++; |
1064 | rxresp = RING_GET_RESPONSE(&xneti->xni_rxring, | | 1064 | rxresp = RING_GET_RESPONSE(&xneti->xni_rxring, |
1065 | resp_prod); | | 1065 | resp_prod); |
1066 | rxresp->id = id; | | 1066 | rxresp->id = id; |
1067 | rxresp->offset = offset; | | 1067 | rxresp->offset = offset; |
1068 | rxresp->status = m->m_pkthdr.len; | | 1068 | rxresp->status = m->m_pkthdr.len; |
1069 | if ((m->m_pkthdr.csum_flags & | | 1069 | if ((m->m_pkthdr.csum_flags & |
1070 | (M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) { | | 1070 | (M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) { |
1071 | rxresp->flags = NETRXF_csum_blank; | | 1071 | rxresp->flags = NETRXF_csum_blank; |
1072 | } else { | | 1072 | } else { |
1073 | rxresp->flags = 0; | | 1073 | rxresp->flags = 0; |
1074 | } | | 1074 | } |
1075 | /* | | 1075 | /* |
1076 | * transfers the page containing the packet to the | | 1076 | * transfers the page containing the packet to the |
1077 | * remote domain, and map newp in place. | | 1077 | * remote domain, and map newp in place. |
1078 | */ | | 1078 | */ |
1079 | xpmap_ptom_map(xmit_pa, newp_ma); | | 1079 | xpmap_ptom_map(xmit_pa, newp_ma); |
1080 | MULTI_update_va_mapping(mclp, xmit_va, | | 1080 | MULTI_update_va_mapping(mclp, xmit_va, |
1081 | newp_ma | PG_V | PG_RW | PG_U | PG_M | xpmap_pg_nx, 0); | | 1081 | newp_ma | PG_V | PG_RW | PG_U | PG_M | xpmap_pg_nx, 0); |
1082 | mclp++; | | 1082 | mclp++; |
1083 | gop->mfn = xmit_ma >> PAGE_SHIFT; | | 1083 | gop->mfn = xmit_ma >> PAGE_SHIFT; |
1084 | gop->domid = xneti->xni_domid; | | 1084 | gop->domid = xneti->xni_domid; |
1085 | gop++; | | 1085 | gop++; |
1086 | | | 1086 | |
1087 | mmup->ptr = newp_ma | MMU_MACHPHYS_UPDATE; | | 1087 | mmup->ptr = newp_ma | MMU_MACHPHYS_UPDATE; |
1088 | mmup->val = xmit_pa >> PAGE_SHIFT; | | 1088 | mmup->val = xmit_pa >> PAGE_SHIFT; |
1089 | mmup++; | | 1089 | mmup++; |
1090 | | | 1090 | |
1091 | /* done with this packet */ | | 1091 | /* done with this packet */ |
1092 | IFQ_DEQUEUE(&ifp->if_snd, m); | | 1092 | IFQ_DEQUEUE(&ifp->if_snd, m); |
1093 | mbufs_sent[i] = m; | | 1093 | mbufs_sent[i] = m; |
1094 | resp_prod++; | | 1094 | resp_prod++; |
1095 | i++; /* this packet has been queued */ | | 1095 | i++; /* this packet has been queued */ |
1096 | ifp->if_opackets++; | | 1096 | ifp->if_opackets++; |
1097 | bpf_mtap(ifp, m, BPF_D_OUT); | | 1097 | bpf_mtap(ifp, m, BPF_D_OUT); |
1098 | } | | 1098 | } |
1099 | if (i != 0) { | | 1099 | if (i != 0) { |
1100 | /* | | 1100 | /* |
1101 | * We may have allocated buffers which have entries | | 1101 | * We may have allocated buffers which have entries |
1102 | * outstanding in the page update queue -- make sure | | 1102 | * outstanding in the page update queue -- make sure |
1103 | * we flush those first! | | 1103 | * we flush those first! |
1104 | */ | | 1104 | */ |
1105 | int svm = splvm(); | | 1105 | int svm = splvm(); |
1106 | xpq_flush_queue(); | | 1106 | xpq_flush_queue(); |
1107 | splx(svm); | | 1107 | splx(svm); |
1108 | mclp[-1].args[MULTI_UVMFLAGS_INDEX] = | | 1108 | mclp[-1].args[MULTI_UVMFLAGS_INDEX] = |
1109 | UVMF_TLB_FLUSH|UVMF_ALL; | | 1109 | UVMF_TLB_FLUSH|UVMF_ALL; |
1110 | mclp->op = __HYPERVISOR_mmu_update; | | 1110 | mclp->op = __HYPERVISOR_mmu_update; |
1111 | mclp->args[0] = (unsigned long)xstart_mmu; | | 1111 | mclp->args[0] = (unsigned long)xstart_mmu; |
1112 | mclp->args[1] = i; | | 1112 | mclp->args[1] = i; |
1113 | mclp->args[2] = 0; | | 1113 | mclp->args[2] = 0; |
1114 | mclp->args[3] = DOMID_SELF; | | 1114 | mclp->args[3] = DOMID_SELF; |
1115 | mclp++; | | 1115 | mclp++; |
1116 | /* update the MMU */ | | 1116 | /* update the MMU */ |
1117 | if (HYPERVISOR_multicall(xstart_mcl, i + 1) != 0) { | | 1117 | if (HYPERVISOR_multicall(xstart_mcl, i + 1) != 0) { |
1118 | panic("%s: HYPERVISOR_multicall failed", | | 1118 | panic("%s: HYPERVISOR_multicall failed", |
1119 | ifp->if_xname); | | 1119 | ifp->if_xname); |
1120 | } | | 1120 | } |
1121 | for (j = 0; j < i + 1; j++) { | | 1121 | for (j = 0; j < i + 1; j++) { |
1122 | if (xstart_mcl[j].result != 0) { | | 1122 | if (xstart_mcl[j].result != 0) { |
1123 | printf("%s: xstart_mcl[%d] " | | 1123 | printf("%s: xstart_mcl[%d] " |
1124 | "failed (%lu)\n", ifp->if_xname, | | 1124 | "failed (%lu)\n", ifp->if_xname, |
1125 | j, xstart_mcl[j].result); | | 1125 | j, xstart_mcl[j].result); |
1126 | printf("%s: req_prod %u req_cons " | | 1126 | printf("%s: req_prod %u req_cons " |
1127 | "%u rsp_prod %u rsp_prod_pvt %u " | | 1127 | "%u rsp_prod %u rsp_prod_pvt %u " |
1128 | "i %u\n", | | 1128 | "i %u\n", |
1129 | ifp->if_xname, | | 1129 | ifp->if_xname, |
1130 | xneti->xni_rxring.sring->req_prod, | | 1130 | xneti->xni_rxring.sring->req_prod, |
1131 | xneti->xni_rxring.req_cons, | | 1131 | xneti->xni_rxring.req_cons, |
1132 | xneti->xni_rxring.sring->rsp_prod, | | 1132 | xneti->xni_rxring.sring->rsp_prod, |
1133 | xneti->xni_rxring.rsp_prod_pvt, | | 1133 | xneti->xni_rxring.rsp_prod_pvt, |
1134 | i); | | 1134 | i); |
1135 | } | | 1135 | } |
1136 | } | | 1136 | } |
1137 | if (HYPERVISOR_grant_table_op(GNTTABOP_transfer, | | 1137 | if (HYPERVISOR_grant_table_op(GNTTABOP_transfer, |
1138 | xstart_gop_transfer, i) != 0) { | | 1138 | xstart_gop_transfer, i) != 0) { |
1139 | panic("%s: GNTTABOP_transfer failed", | | 1139 | panic("%s: GNTTABOP_transfer failed", |
1140 | ifp->if_xname); | | 1140 | ifp->if_xname); |
1141 | } | | 1141 | } |
1142 | | | 1142 | |
1143 | for (j = 0; j < i; j++) { | | 1143 | for (j = 0; j < i; j++) { |
1144 | if (xstart_gop_transfer[j].status != GNTST_okay) { | | 1144 | if (xstart_gop_transfer[j].status != GNTST_okay) { |
1145 | printf("%s GNTTABOP_transfer[%d] %d\n", | | 1145 | printf("%s GNTTABOP_transfer[%d] %d\n", |
1146 | ifp->if_xname, | | 1146 | ifp->if_xname, |
1147 | j, xstart_gop_transfer[j].status); | | 1147 | j, xstart_gop_transfer[j].status); |
1148 | printf("%s: req_prod %u req_cons " | | 1148 | printf("%s: req_prod %u req_cons " |
1149 | "%u rsp_prod %u rsp_prod_pvt %u " | | 1149 | "%u rsp_prod %u rsp_prod_pvt %u " |
1150 | "i %d\n", | | 1150 | "i %d\n", |
1151 | ifp->if_xname, | | 1151 | ifp->if_xname, |
1152 | xneti->xni_rxring.sring->req_prod, | | 1152 | xneti->xni_rxring.sring->req_prod, |
1153 | xneti->xni_rxring.req_cons, | | 1153 | xneti->xni_rxring.req_cons, |
1154 | xneti->xni_rxring.sring->rsp_prod, | | 1154 | xneti->xni_rxring.sring->rsp_prod, |
1155 | xneti->xni_rxring.rsp_prod_pvt, | | 1155 | xneti->xni_rxring.rsp_prod_pvt, |
1156 | i); | | 1156 | i); |
1157 | rxresp = RING_GET_RESPONSE( | | 1157 | rxresp = RING_GET_RESPONSE( |
1158 | &xneti->xni_rxring, | | 1158 | &xneti->xni_rxring, |
1159 | xneti->xni_rxring.rsp_prod_pvt + j); | | 1159 | xneti->xni_rxring.rsp_prod_pvt + j); |
1160 | rxresp->status = NETIF_RSP_ERROR; | | 1160 | rxresp->status = NETIF_RSP_ERROR; |
1161 | } | | 1161 | } |
1162 | } | | 1162 | } |
1163 | | | 1163 | |
1164 | /* update pointer */ | | 1164 | /* update pointer */ |
1165 | KASSERT( | | 1165 | KASSERT( |
1166 | xneti->xni_rxring.rsp_prod_pvt + i == resp_prod); | | 1166 | xneti->xni_rxring.rsp_prod_pvt + i == resp_prod); |
1167 | xneti->xni_rxring.rsp_prod_pvt = resp_prod; | | 1167 | xneti->xni_rxring.rsp_prod_pvt = resp_prod; |
1168 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY( | | 1168 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY( |
1169 | &xneti->xni_rxring, j); | | 1169 | &xneti->xni_rxring, j); |
1170 | if (j) | | 1170 | if (j) |
1171 | do_event = 1; | | 1171 | do_event = 1; |
1172 | /* now we can free the mbufs */ | | 1172 | /* now we can free the mbufs */ |
1173 | for (j = 0; j < i; j++) { | | 1173 | for (j = 0; j < i; j++) { |
1174 | m_freem(mbufs_sent[j]); | | 1174 | m_freem(mbufs_sent[j]); |
1175 | } | | 1175 | } |
1176 | for (j = 0; j < nppitems; j++) { | | 1176 | for (j = 0; j < nppitems; j++) { |
1177 | pool_cache_put_paddr(xmit_pages_cache, | | 1177 | pool_cache_put_paddr(xmit_pages_cache, |
1178 | (void *)pages_pool_free[j].va, | | 1178 | (void *)pages_pool_free[j].va, |
1179 | pages_pool_free[j].pa); | | 1179 | pages_pool_free[j].pa); |
1180 | } | | 1180 | } |
1181 | } | | 1181 | } |
1182 | /* send event */ | | 1182 | /* send event */ |
1183 | if (do_event) { | | 1183 | if (do_event) { |
1184 | xen_rmb(); | | 1184 | xen_rmb(); |
1185 | XENPRINTF(("%s receive event\n", | | 1185 | XENPRINTF(("%s receive event\n", |
1186 | xneti->xni_if.if_xname)); | | 1186 | xneti->xni_if.if_xname)); |
1187 | hypervisor_notify_via_evtchn(xneti->xni_evtchn); | | 1187 | hypervisor_notify_via_evtchn(xneti->xni_evtchn); |
1188 | do_event = 0; | | 1188 | do_event = 0; |
1189 | } | | 1189 | } |
1190 | /* check if we need to get back some pages */ | | 1190 | /* check if we need to get back some pages */ |
1191 | if (mcl_pages_alloc < 0) { | | 1191 | if (mcl_pages_alloc < 0) { |
1192 | xennetback_get_new_mcl_pages(); | | 1192 | xennetback_get_new_mcl_pages(); |
1193 | if (mcl_pages_alloc < 0) { | | 1193 | if (mcl_pages_alloc < 0) { |
1194 | /* | | 1194 | /* |
1195 | * setup the watchdog to try again, because | | 1195 | * setup the watchdog to try again, because |
1196 | * xennetback_ifstart() will never be called | | 1196 | * xennetback_ifstart() will never be called |
1197 | * again if queue is full. | | 1197 | * again if queue is full. |
1198 | */ | | 1198 | */ |
1199 | printf("xennetback_ifstart: no mcl_pages\n"); | | 1199 | printf("xennetback_ifstart: no mcl_pages\n"); |
1200 | ifp->if_timer = 1; | | 1200 | ifp->if_timer = 1; |
1201 | break; | | 1201 | break; |
1202 | } | | 1202 | } |
1203 | } | | 1203 | } |
1204 | /* | | 1204 | /* |
1205 | * note that we don't use RING_FINAL_CHECK_FOR_REQUESTS() | | 1205 | * note that we don't use RING_FINAL_CHECK_FOR_REQUESTS() |
1206 | * here, as the frontend doesn't notify when adding | | 1206 | * here, as the frontend doesn't notify when adding |
1207 | * requests anyway | | 1207 | * requests anyway |
1208 | */ | | 1208 | */ |
1209 | if (__predict_false( | | 1209 | if (__predict_false( |
1210 | !RING_HAS_UNCONSUMED_REQUESTS(&xneti->xni_rxring))) { | | 1210 | !RING_HAS_UNCONSUMED_REQUESTS(&xneti->xni_rxring))) { |
1211 | /* ring full */ | | 1211 | /* ring full */ |
1212 | break; | | 1212 | break; |
1213 | } | | 1213 | } |
1214 | } | | 1214 | } |
1215 | splx(s); | | 1215 | splx(s); |
1216 | } | | 1216 | } |
1217 | | | 1217 | |
| | | 1218 | /* |
| | | 1219 | * sighly different from m_dup(); for some reason m_dup() can return |
| | | 1220 | * a chain where the data area can cross a page boundary. |
| | | 1221 | * This doesn't happens with the function below. |
| | | 1222 | */ |
| | | 1223 | static struct mbuf * |
| | | 1224 | xennetback_copymbuf(struct mbuf *m) |
| | | 1225 | { |
| | | 1226 | struct mbuf *new_m; |
| | | 1227 | |
| | | 1228 | MGETHDR(new_m, M_DONTWAIT, MT_DATA); |
| | | 1229 | if (__predict_false(new_m == NULL)) { |
| | | 1230 | return NULL; |
| | | 1231 | } |
| | | 1232 | if (m->m_pkthdr.len > MHLEN) { |
| | | 1233 | MCLGET(new_m, M_DONTWAIT); |
| | | 1234 | if (__predict_false( |
| | | 1235 | (new_m->m_flags & M_EXT) == 0)) { |
| | | 1236 | m_freem(new_m); |
| | | 1237 | return NULL; |
| | | 1238 | } |
| | | 1239 | } |
| | | 1240 | m_copydata(m, 0, m->m_pkthdr.len, |
| | | 1241 | mtod(new_m, void *)); |
| | | 1242 | new_m->m_len = new_m->m_pkthdr.len = |
| | | 1243 | m->m_pkthdr.len; |
| | | 1244 | return new_m; |
| | | 1245 | } |
| | | 1246 | |
| | | 1247 | /* return physical page address and offset of data area of an mbuf */ |
| | | 1248 | static void |
| | | 1249 | xennetback_mbuf_addr(struct mbuf *m, paddr_t *xmit_pa, int *offset) |
| | | 1250 | { |
| | | 1251 | switch (m->m_flags & (M_EXT|M_EXT_CLUSTER)) { |
| | | 1252 | case M_EXT|M_EXT_CLUSTER: |
| | | 1253 | KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID); |
| | | 1254 | *xmit_pa = m->m_ext.ext_paddr; |
| | | 1255 | *offset = m->m_data - m->m_ext.ext_buf; |
| | | 1256 | break; |
| | | 1257 | case 0: |
| | | 1258 | KASSERT(m->m_paddr != M_PADDR_INVALID); |
| | | 1259 | *xmit_pa = m->m_paddr; |
| | | 1260 | *offset = M_BUFOFFSET(m) + |
| | | 1261 | (m->m_data - M_BUFADDR(m)); |
| | | 1262 | break; |
| | | 1263 | default: |
| | | 1264 | if (__predict_false( |
| | | 1265 | !pmap_extract(pmap_kernel(), |
| | | 1266 | (vaddr_t)m->m_data, xmit_pa))) { |
| | | 1267 | panic("xennet_start: no pa"); |
| | | 1268 | } |
| | | 1269 | *offset = 0; |
| | | 1270 | break; |
| | | 1271 | } |
| | | 1272 | *offset += (*xmit_pa & ~PG_FRAME); |
| | | 1273 | *xmit_pa = (*xmit_pa & PG_FRAME); |
| | | 1274 | } |
| | | 1275 | |
1218 | static void | | 1276 | static void |
1219 | xennetback_ifsoftstart_copy(void *arg) | | 1277 | xennetback_ifsoftstart_copy(void *arg) |
1220 | { | | 1278 | { |
1221 | struct xnetback_instance *xneti = arg; | | 1279 | struct xnetback_instance *xneti = arg; |
1222 | struct ifnet *ifp = &xneti->xni_if; | | 1280 | struct ifnet *ifp = &xneti->xni_if; |
1223 | struct mbuf *m, *new_m; | | 1281 | struct mbuf *m, *new_m; |
1224 | paddr_t xmit_pa; | | 1282 | paddr_t xmit_pa; |
1225 | paddr_t xmit_ma; | | 1283 | paddr_t xmit_ma; |
1226 | int i, j; | | 1284 | int i, j; |
1227 | netif_rx_response_t *rxresp; | | 1285 | netif_rx_response_t *rxresp; |
1228 | netif_rx_request_t rxreq; | | 1286 | netif_rx_request_t rxreq; |
1229 | RING_IDX req_prod, resp_prod; | | 1287 | RING_IDX req_prod, resp_prod; |
1230 | int do_event = 0; | | 1288 | int do_event = 0; |
1231 | gnttab_copy_t *gop; | | 1289 | gnttab_copy_t *gop; |
1232 | int id, offset; | | 1290 | int id, offset; |
| | | 1291 | bool abort; |
1233 | | | 1292 | |
1234 | XENPRINTF(("xennetback_ifsoftstart_copy ")); | | 1293 | XENPRINTF(("xennetback_ifsoftstart_copy ")); |
1235 | int s = splnet(); | | 1294 | int s = splnet(); |
1236 | if (__predict_false( | | 1295 | if (__predict_false( |
1237 | (ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)) { | | 1296 | (ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)) { |
1238 | splx(s); | | 1297 | splx(s); |
1239 | return; | | 1298 | return; |
1240 | } | | 1299 | } |
1241 | | | 1300 | |
1242 | while (!IFQ_IS_EMPTY(&ifp->if_snd)) { | | 1301 | while (!IFQ_IS_EMPTY(&ifp->if_snd)) { |
1243 | XENPRINTF(("pkt\n")); | | 1302 | XENPRINTF(("pkt\n")); |
1244 | req_prod = xneti->xni_rxring.sring->req_prod; | | 1303 | req_prod = xneti->xni_rxring.sring->req_prod; |
1245 | resp_prod = xneti->xni_rxring.rsp_prod_pvt; | | 1304 | resp_prod = xneti->xni_rxring.rsp_prod_pvt; |
1246 | xen_rmb(); | | 1305 | xen_rmb(); |
1247 | | | 1306 | |
1248 | gop = xstart_gop_copy; | | 1307 | gop = xstart_gop_copy; |
| | | 1308 | abort = false; |
1249 | for (i = 0; !IFQ_IS_EMPTY(&ifp->if_snd);) { | | 1309 | for (i = 0; !IFQ_IS_EMPTY(&ifp->if_snd);) { |
1250 | XENPRINTF(("have a packet\n")); | | 1310 | XENPRINTF(("have a packet\n")); |
1251 | IFQ_POLL(&ifp->if_snd, m); | | 1311 | IFQ_POLL(&ifp->if_snd, m); |
1252 | if (__predict_false(m == NULL)) | | 1312 | if (__predict_false(m == NULL)) |
1253 | panic("xennetback_ifstart: IFQ_POLL"); | | 1313 | panic("xennetback_ifstart: IFQ_POLL"); |
1254 | if (__predict_false( | | 1314 | if (__predict_false( |
1255 | req_prod == xneti->xni_rxring.req_cons || | | 1315 | req_prod == xneti->xni_rxring.req_cons || |
1256 | xneti->xni_rxring.req_cons - resp_prod == | | 1316 | xneti->xni_rxring.req_cons - resp_prod == |
1257 | NET_RX_RING_SIZE)) { | | 1317 | NET_RX_RING_SIZE)) { |
1258 | /* out of ring space */ | | 1318 | /* out of ring space */ |
1259 | XENPRINTF(("xennetback_ifstart: ring full " | | 1319 | XENPRINTF(("xennetback_ifstart: ring full " |
1260 | "req_prod 0x%x req_cons 0x%x resp_prod " | | 1320 | "req_prod 0x%x req_cons 0x%x resp_prod " |
1261 | "0x%x\n", | | 1321 | "0x%x\n", |
1262 | req_prod, xneti->xni_rxring.req_cons, | | 1322 | req_prod, xneti->xni_rxring.req_cons, |
1263 | resp_prod)); | | 1323 | resp_prod)); |
1264 | ifp->if_timer = 1; | | 1324 | abort = true; |
1265 | break; | | 1325 | break; |
1266 | } | | 1326 | } |
1267 | if (__predict_false(i == NB_XMIT_PAGES_BATCH)) | | 1327 | if (__predict_false(i == NB_XMIT_PAGES_BATCH)) |
1268 | break; /* we filled the array */ | | 1328 | break; /* we filled the array */ |
1269 | switch (m->m_flags & (M_EXT|M_EXT_CLUSTER)) { | | 1329 | |
1270 | case M_EXT|M_EXT_CLUSTER: | | 1330 | xennetback_mbuf_addr(m, &xmit_pa, &offset); |
1271 | KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID); | | | |
1272 | xmit_pa = m->m_ext.ext_paddr; | | | |
1273 | offset = m->m_data - m->m_ext.ext_buf; | | | |
1274 | break; | | | |
1275 | case 0: | | | |
1276 | KASSERT(m->m_paddr != M_PADDR_INVALID); | | | |
1277 | xmit_pa = m->m_paddr; | | | |
1278 | offset = M_BUFOFFSET(m) + | | | |
1279 | (m->m_data - M_BUFADDR(m)); | | | |
1280 | break; | | | |
1281 | default: | | | |
1282 | if (__predict_false( | | | |
1283 | !pmap_extract(pmap_kernel(), | | | |
1284 | (vaddr_t)m->m_data, &xmit_pa))) { | | | |
1285 | panic("xennet_start: no pa"); | | | |
1286 | } | | | |
1287 | offset = 0; | | | |
1288 | break; | | | |
1289 | } | | | |
1290 | offset += (xmit_pa & ~PG_FRAME); | | | |
1291 | xmit_pa = (xmit_pa & PG_FRAME); | | | |
1292 | if (m->m_pkthdr.len != m->m_len || | | 1331 | if (m->m_pkthdr.len != m->m_len || |
1293 | (offset + m->m_pkthdr.len) > PAGE_SIZE) { | | 1332 | (offset + m->m_pkthdr.len) > PAGE_SIZE) { |
1294 | MGETHDR(new_m, M_DONTWAIT, MT_DATA); | | 1333 | new_m = xennetback_copymbuf(m); |
1295 | if (__predict_false(new_m == NULL)) { | | 1334 | if (__predict_false(new_m == NULL)) { |
1296 | printf("%s: cannot allocate new mbuf\n", | | 1335 | static struct timeval lasttime; |
1297 | ifp->if_xname); | | 1336 | if (ratecheck(&lasttime, &xni_pool_errintvl)) |
| | | 1337 | printf("%s: cannot allocate new mbuf\n", |
| | | 1338 | ifp->if_xname); |
| | | 1339 | abort = 1; |
1298 | break; | | 1340 | break; |
1299 | } | | | |
1300 | if (m->m_pkthdr.len > MHLEN) { | | | |
1301 | MCLGET(new_m, M_DONTWAIT); | | | |
1302 | if (__predict_false( | | | |
1303 | (new_m->m_flags & M_EXT) == 0)) { | | | |
1304 | XENPRINTF(( | | | |
1305 | "%s: no mbuf cluster\n", | | | |
1306 | ifp->if_xname)); | | | |
1307 | m_freem(new_m); | | | |
1308 | break; | | | |
1309 | } | | | |
1310 | xmit_pa = new_m->m_ext.ext_paddr; | | | |
1311 | offset = new_m->m_data - | | | |
1312 | new_m->m_ext.ext_buf; | | | |
1313 | } else { | | 1341 | } else { |
1314 | xmit_pa = new_m->m_paddr; | | 1342 | IFQ_DEQUEUE(&ifp->if_snd, m); |
1315 | offset = M_BUFOFFSET(new_m) + | | 1343 | m_freem(m); |
1316 | (new_m->m_data - M_BUFADDR(new_m)); | | 1344 | m = new_m; |
| | | 1345 | xennetback_mbuf_addr(m, |
| | | 1346 | &xmit_pa, &offset); |
1317 | } | | 1347 | } |
1318 | offset += (xmit_pa & ~PG_FRAME); | | | |
1319 | xmit_pa = (xmit_pa & PG_FRAME); | | | |
1320 | m_copydata(m, 0, m->m_pkthdr.len, | | | |
1321 | mtod(new_m, void *)); | | | |
1322 | new_m->m_len = new_m->m_pkthdr.len = | | | |
1323 | m->m_pkthdr.len; | | | |
1324 | IFQ_DEQUEUE(&ifp->if_snd, m); | | | |
1325 | m_freem(m); | | | |
1326 | m = new_m; | | | |
1327 | } else { | | 1348 | } else { |
1328 | IFQ_DEQUEUE(&ifp->if_snd, m); | | 1349 | IFQ_DEQUEUE(&ifp->if_snd, m); |
1329 | } | | 1350 | } |
1330 | | | 1351 | |
1331 | KASSERT(xmit_pa != POOL_PADDR_INVALID); | | 1352 | KASSERT(xmit_pa != POOL_PADDR_INVALID); |
1332 | KASSERT((offset + m->m_pkthdr.len) <= PAGE_SIZE); | | 1353 | KASSERT((offset + m->m_pkthdr.len) <= PAGE_SIZE); |
1333 | xmit_ma = xpmap_ptom(xmit_pa); | | 1354 | xmit_ma = xpmap_ptom(xmit_pa); |
1334 | /* start filling ring */ | | 1355 | /* start filling ring */ |
1335 | gop->flags = GNTCOPY_dest_gref; | | 1356 | gop->flags = GNTCOPY_dest_gref; |
1336 | gop->source.offset = offset; | | 1357 | gop->source.offset = offset; |
1337 | gop->source.domid = DOMID_SELF; | | 1358 | gop->source.domid = DOMID_SELF; |
1338 | gop->source.u.gmfn = xmit_ma >> PAGE_SHIFT; | | 1359 | gop->source.u.gmfn = xmit_ma >> PAGE_SHIFT; |
1339 | | | 1360 | |
1340 | RING_COPY_REQUEST(&xneti->xni_rxring, | | 1361 | RING_COPY_REQUEST(&xneti->xni_rxring, |
1341 | xneti->xni_rxring.req_cons, &rxreq); | | 1362 | xneti->xni_rxring.req_cons, &rxreq); |
1342 | gop->dest.u.ref = rxreq.gref; | | 1363 | gop->dest.u.ref = rxreq.gref; |
1343 | gop->dest.offset = 0; | | 1364 | gop->dest.offset = 0; |
1344 | gop->dest.domid = xneti->xni_domid; | | 1365 | gop->dest.domid = xneti->xni_domid; |
1345 | | | 1366 | |
1346 | gop->len = m->m_pkthdr.len; | | 1367 | gop->len = m->m_pkthdr.len; |
1347 | gop++; | | 1368 | gop++; |
1348 | | | 1369 | |
1349 | id = rxreq.id; | | 1370 | id = rxreq.id; |
1350 | xen_rmb(); | | 1371 | xen_rmb(); |
1351 | xneti->xni_rxring.req_cons++; | | 1372 | xneti->xni_rxring.req_cons++; |
1352 | rxresp = RING_GET_RESPONSE(&xneti->xni_rxring, | | 1373 | rxresp = RING_GET_RESPONSE(&xneti->xni_rxring, |
1353 | resp_prod); | | 1374 | resp_prod); |
1354 | rxresp->id = id; | | 1375 | rxresp->id = id; |
1355 | rxresp->offset = 0; | | 1376 | rxresp->offset = 0; |
1356 | rxresp->status = m->m_pkthdr.len; | | 1377 | rxresp->status = m->m_pkthdr.len; |
1357 | if ((m->m_pkthdr.csum_flags & | | 1378 | if ((m->m_pkthdr.csum_flags & |
1358 | (M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) { | | 1379 | (M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) { |
1359 | rxresp->flags = NETRXF_csum_blank; | | 1380 | rxresp->flags = NETRXF_csum_blank; |
1360 | } else { | | 1381 | } else { |
1361 | rxresp->flags = 0; | | 1382 | rxresp->flags = 0; |
1362 | } | | 1383 | } |
1363 | | | 1384 | |
1364 | mbufs_sent[i] = m; | | 1385 | mbufs_sent[i] = m; |
1365 | resp_prod++; | | 1386 | resp_prod++; |
1366 | i++; /* this packet has been queued */ | | 1387 | i++; /* this packet has been queued */ |
1367 | ifp->if_opackets++; | | 1388 | ifp->if_opackets++; |
1368 | bpf_mtap(ifp, m, BPF_D_OUT); | | 1389 | bpf_mtap(ifp, m, BPF_D_OUT); |
1369 | } | | 1390 | } |
1370 | if (i != 0) { | | 1391 | if (i != 0) { |
1371 | if (HYPERVISOR_grant_table_op(GNTTABOP_copy, | | 1392 | if (HYPERVISOR_grant_table_op(GNTTABOP_copy, |
1372 | xstart_gop_copy, i) != 0) { | | 1393 | xstart_gop_copy, i) != 0) { |
1373 | panic("%s: GNTTABOP_copy failed", | | 1394 | panic("%s: GNTTABOP_copy failed", |
1374 | ifp->if_xname); | | 1395 | ifp->if_xname); |
1375 | } | | 1396 | } |
1376 | | | 1397 | |
1377 | for (j = 0; j < i; j++) { | | 1398 | for (j = 0; j < i; j++) { |
1378 | if (xstart_gop_copy[j].status != GNTST_okay) { | | 1399 | if (xstart_gop_copy[j].status != GNTST_okay) { |
1379 | printf("%s GNTTABOP_copy[%d] %d\n", | | 1400 | printf("%s GNTTABOP_copy[%d] %d\n", |
1380 | ifp->if_xname, | | 1401 | ifp->if_xname, |
1381 | j, xstart_gop_copy[j].status); | | 1402 | j, xstart_gop_copy[j].status); |
1382 | printf("%s: req_prod %u req_cons " | | 1403 | printf("%s: req_prod %u req_cons " |
1383 | "%u rsp_prod %u rsp_prod_pvt %u " | | 1404 | "%u rsp_prod %u rsp_prod_pvt %u " |
1384 | "i %d\n", | | 1405 | "i %d\n", |
1385 | ifp->if_xname, | | 1406 | ifp->if_xname, |
1386 | xneti->xni_rxring.sring->req_prod, | | 1407 | xneti->xni_rxring.sring->req_prod, |
1387 | xneti->xni_rxring.req_cons, | | 1408 | xneti->xni_rxring.req_cons, |
1388 | xneti->xni_rxring.sring->rsp_prod, | | 1409 | xneti->xni_rxring.sring->rsp_prod, |
1389 | xneti->xni_rxring.rsp_prod_pvt, | | 1410 | xneti->xni_rxring.rsp_prod_pvt, |
1390 | i); | | 1411 | i); |
1391 | rxresp = RING_GET_RESPONSE( | | 1412 | rxresp = RING_GET_RESPONSE( |
1392 | &xneti->xni_rxring, | | 1413 | &xneti->xni_rxring, |
1393 | xneti->xni_rxring.rsp_prod_pvt + j); | | 1414 | xneti->xni_rxring.rsp_prod_pvt + j); |
1394 | rxresp->status = NETIF_RSP_ERROR; | | 1415 | rxresp->status = NETIF_RSP_ERROR; |
1395 | } | | 1416 | } |
1396 | } | | 1417 | } |
1397 | | | 1418 | |
1398 | /* update pointer */ | | 1419 | /* update pointer */ |
1399 | KASSERT( | | 1420 | KASSERT( |
1400 | xneti->xni_rxring.rsp_prod_pvt + i == resp_prod); | | 1421 | xneti->xni_rxring.rsp_prod_pvt + i == resp_prod); |
1401 | xneti->xni_rxring.rsp_prod_pvt = resp_prod; | | 1422 | xneti->xni_rxring.rsp_prod_pvt = resp_prod; |
1402 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY( | | 1423 | RING_PUSH_RESPONSES_AND_CHECK_NOTIFY( |
1403 | &xneti->xni_rxring, j); | | 1424 | &xneti->xni_rxring, j); |
1404 | if (j) | | 1425 | if (j) |
1405 | do_event = 1; | | 1426 | do_event = 1; |
1406 | /* now we can free the mbufs */ | | 1427 | /* now we can free the mbufs */ |
1407 | for (j = 0; j < i; j++) { | | 1428 | for (j = 0; j < i; j++) { |
1408 | m_freem(mbufs_sent[j]); | | 1429 | m_freem(mbufs_sent[j]); |
1409 | } | | 1430 | } |
1410 | } | | 1431 | } |
1411 | /* send event */ | | 1432 | /* send event */ |
1412 | if (do_event) { | | 1433 | if (do_event) { |
1413 | xen_rmb(); | | 1434 | xen_rmb(); |
1414 | XENPRINTF(("%s receive event\n", | | 1435 | XENPRINTF(("%s receive event\n", |
1415 | xneti->xni_if.if_xname)); | | 1436 | xneti->xni_if.if_xname)); |
1416 | hypervisor_notify_via_evtchn(xneti->xni_evtchn); | | 1437 | hypervisor_notify_via_evtchn(xneti->xni_evtchn); |
1417 | do_event = 0; | | 1438 | do_event = 0; |
1418 | } | | 1439 | } |
1419 | /* | | 1440 | /* |
1420 | * note that we don't use RING_FINAL_CHECK_FOR_REQUESTS() | | 1441 | * note that we don't use RING_FINAL_CHECK_FOR_REQUESTS() |
1421 | * here, as the frontend doesn't notify when adding | | 1442 | * here, as the frontend doesn't notify when adding |
1422 | * requests anyway | | 1443 | * requests anyway |
1423 | */ | | 1444 | */ |
1424 | if (__predict_false( | | 1445 | if (__predict_false(abort || |
1425 | !RING_HAS_UNCONSUMED_REQUESTS(&xneti->xni_rxring))) { | | 1446 | !RING_HAS_UNCONSUMED_REQUESTS(&xneti->xni_rxring))) { |
1426 | /* ring full */ | | 1447 | /* ring full */ |
| | | 1448 | ifp->if_timer = 1; |
1427 | break; | | 1449 | break; |
1428 | } | | 1450 | } |
1429 | } | | 1451 | } |
1430 | splx(s); | | 1452 | splx(s); |
1431 | } | | 1453 | } |
1432 | | | 1454 | |
1433 | static void | | 1455 | static void |
1434 | xennetback_ifwatchdog(struct ifnet * ifp) | | 1456 | xennetback_ifwatchdog(struct ifnet * ifp) |
1435 | { | | 1457 | { |
1436 | /* | | 1458 | /* |
1437 | * We can get to the following condition: transmit stalls because the | | 1459 | * We can get to the following condition: transmit stalls because the |
1438 | * ring is full when the ifq is full too. | | 1460 | * ring is full when the ifq is full too. |
1439 | * | | 1461 | * |
1440 | * In this case (as, unfortunately, we don't get an interrupt from xen | | 1462 | * In this case (as, unfortunately, we don't get an interrupt from xen |
1441 | * on transmit) nothing will ever call xennetback_ifstart() again. | | 1463 | * on transmit) nothing will ever call xennetback_ifstart() again. |
1442 | * Here we abuse the watchdog to get out of this condition. | | 1464 | * Here we abuse the watchdog to get out of this condition. |
1443 | */ | | 1465 | */ |
1444 | XENPRINTF(("xennetback_ifwatchdog\n")); | | 1466 | XENPRINTF(("xennetback_ifwatchdog\n")); |
1445 | xennetback_ifstart(ifp); | | 1467 | xennetback_ifstart(ifp); |
1446 | } | | 1468 | } |
1447 | | | 1469 | |
1448 | static int | | 1470 | static int |
1449 | xennetback_ifinit(struct ifnet *ifp) | | 1471 | xennetback_ifinit(struct ifnet *ifp) |
1450 | { | | 1472 | { |
1451 | struct xnetback_instance *xneti = ifp->if_softc; | | 1473 | struct xnetback_instance *xneti = ifp->if_softc; |
1452 | int s = splnet(); | | 1474 | int s = splnet(); |
1453 | | | 1475 | |
1454 | if ((ifp->if_flags & IFF_UP) == 0) { | | 1476 | if ((ifp->if_flags & IFF_UP) == 0) { |
1455 | splx(s); | | 1477 | splx(s); |
1456 | return 0; | | 1478 | return 0; |
1457 | } | | 1479 | } |
1458 | if (xneti->xni_status == CONNECTED) | | 1480 | if (xneti->xni_status == CONNECTED) |
1459 | ifp->if_flags |= IFF_RUNNING; | | 1481 | ifp->if_flags |= IFF_RUNNING; |
1460 | splx(s); | | 1482 | splx(s); |
1461 | return 0; | | 1483 | return 0; |
1462 | } | | 1484 | } |
1463 | | | 1485 | |
1464 | static void | | 1486 | static void |
1465 | xennetback_ifstop(struct ifnet *ifp, int disable) | | 1487 | xennetback_ifstop(struct ifnet *ifp, int disable) |
1466 | { | | 1488 | { |
1467 | struct xnetback_instance *xneti = ifp->if_softc; | | 1489 | struct xnetback_instance *xneti = ifp->if_softc; |
1468 | int s = splnet(); | | 1490 | int s = splnet(); |
1469 | | | 1491 | |
1470 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); | | 1492 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
1471 | ifp->if_timer = 0; | | 1493 | ifp->if_timer = 0; |
1472 | if (xneti->xni_status == CONNECTED) { | | 1494 | if (xneti->xni_status == CONNECTED) { |
1473 | XENPRINTF(("%s: req_prod 0x%x resp_prod 0x%x req_cons 0x%x " | | 1495 | XENPRINTF(("%s: req_prod 0x%x resp_prod 0x%x req_cons 0x%x " |
1474 | "event 0x%x\n", ifp->if_xname, xneti->xni_txring->req_prod, | | 1496 | "event 0x%x\n", ifp->if_xname, xneti->xni_txring->req_prod, |
1475 | xneti->xni_txring->resp_prod, xneti->xni_txring->req_cons, | | 1497 | xneti->xni_txring->resp_prod, xneti->xni_txring->req_cons, |
1476 | xneti->xni_txring->event)); | | 1498 | xneti->xni_txring->event)); |
1477 | xennetback_evthandler(ifp->if_softc); /* flush pending RX requests */ | | 1499 | xennetback_evthandler(ifp->if_softc); /* flush pending RX requests */ |
1478 | } | | 1500 | } |
1479 | splx(s); | | 1501 | splx(s); |
1480 | } | | 1502 | } |