Fri Jan 8 21:05:14 2016 UTC ()
Pull up following revision(s) (requested by bouyer in ticket #1071):
	sys/arch/xen/include/xen-public/io/ring.h: revision 1.3 via patch
	sys/arch/xen/xen/pciback.c: revision 1.10 via patch
	sys/arch/xen/xen/xbdback_xenbus.c: revision 1.62 via patch
	sys/arch/xen/xen/xennetback_xenbus.c: revision 1.54 via patch
Apply patch from xsa155: make sure that the backend won't read parts of the
request again (possibly because of compiler optimisations), by using
copies and barrier.
From XSA155:
The compiler can emit optimizations in the PV backend drivers which
can lead to double fetch vulnerabilities. Specifically the shared
memory between the frontend and backend can be fetched twice (during
which time the frontend can alter the contents) possibly leading to
arbitrary code execution in backend.


(snj)
diff -r1.2 -r1.2.24.1 src/sys/arch/xen/include/xen-public/io/ring.h
diff -r1.9 -r1.9.4.1 src/sys/arch/xen/xen/pciback.c
diff -r1.59.4.2 -r1.59.4.3 src/sys/arch/xen/xen/xbdback_xenbus.c
diff -r1.52 -r1.52.4.1 src/sys/arch/xen/xen/xennetback_xenbus.c

cvs diff -r1.2 -r1.2.24.1 src/sys/arch/xen/include/xen-public/io/Attic/ring.h (expand / switch to unified diff)

--- src/sys/arch/xen/include/xen-public/io/Attic/ring.h 2011/12/07 15:40:15 1.2
+++ src/sys/arch/xen/include/xen-public/io/Attic/ring.h 2016/01/08 21:05:14 1.2.24.1
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: ring.h,v 1.2 2011/12/07 15:40:15 cegger Exp $ */ 1/* $NetBSD: ring.h,v 1.2.24.1 2016/01/08 21:05:14 snj Exp $ */
2/****************************************************************************** 2/******************************************************************************
3 * ring.h 3 * ring.h
4 *  4 *
5 * Shared producer-consumer ring macros. 5 * Shared producer-consumer ring macros.
6 * 6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to 8 * of this software and associated documentation files (the "Software"), to
9 * deal in the Software without restriction, including without limitation the 9 * deal in the Software without restriction, including without limitation the
10 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
11 * sell copies of the Software, and to permit persons to whom the Software is 11 * sell copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions: 12 * furnished to do so, subject to the following conditions:
13 * 13 *
14 * The above copyright notice and this permission notice shall be included in 14 * The above copyright notice and this permission notice shall be included in
@@ -226,26 +226,40 @@ typedef struct __name##_back_ring __name @@ -226,26 +226,40 @@ typedef struct __name##_back_ring __name
226#else 226#else
227/* Same as above, but without the nice GCC ({ ... }) syntax. */ 227/* Same as above, but without the nice GCC ({ ... }) syntax. */
228#define RING_HAS_UNCONSUMED_REQUESTS(_r) \ 228#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
229 ((((_r)->sring->req_prod - (_r)->req_cons) < \ 229 ((((_r)->sring->req_prod - (_r)->req_cons) < \
230 (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \ 230 (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \
231 ((_r)->sring->req_prod - (_r)->req_cons) : \ 231 ((_r)->sring->req_prod - (_r)->req_cons) : \
232 (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) 232 (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt)))
233#endif 233#endif
234 234
235/* Direct access to individual ring elements, by index. */ 235/* Direct access to individual ring elements, by index. */
236#define RING_GET_REQUEST(_r, _idx) \ 236#define RING_GET_REQUEST(_r, _idx) \
237 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) 237 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
238 238
 239/*
 240 * Get a local copy of a request.
 241 *
 242 * Use this in preference to RING_GET_REQUEST() so all processing is
 243 * done on a local copy that cannot be modified by the other end.
 244 *
 245 * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
 246 * to be ineffective where _req is a struct which consists of only bitfields.
 247 */
 248#define RING_COPY_REQUEST(_r, _idx, _req) do { \
 249 /* Use volatile to force the copy into _req. */ \
 250 *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
 251} while (0)
 252
239#define RING_GET_RESPONSE(_r, _idx) \ 253#define RING_GET_RESPONSE(_r, _idx) \
240 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) 254 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
241 255
242/* Loop termination condition: Would the specified index overflow the ring? */ 256/* Loop termination condition: Would the specified index overflow the ring? */
243#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ 257#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
244 (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) 258 (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
245 259
246#define RING_PUSH_REQUESTS(_r) do { \ 260#define RING_PUSH_REQUESTS(_r) do { \
247 xen_wmb(); /* back sees requests /before/ updated producer index */ \ 261 xen_wmb(); /* back sees requests /before/ updated producer index */ \
248 (_r)->sring->req_prod = (_r)->req_prod_pvt; \ 262 (_r)->sring->req_prod = (_r)->req_prod_pvt; \
249} while (0) 263} while (0)
250 264
251#define RING_PUSH_RESPONSES(_r) do { \ 265#define RING_PUSH_RESPONSES(_r) do { \

cvs diff -r1.9 -r1.9.4.1 src/sys/arch/xen/xen/pciback.c (expand / switch to unified diff)

--- src/sys/arch/xen/xen/pciback.c 2014/03/29 19:28:30 1.9
+++ src/sys/arch/xen/xen/pciback.c 2016/01/08 21:05:14 1.9.4.1
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pciback.c,v 1.9 2014/03/29 19:28:30 christos Exp $ */ 1/* $NetBSD: pciback.c,v 1.9.4.1 2016/01/08 21:05:14 snj Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2009 Manuel Bouyer. 4 * Copyright (c) 2009 Manuel Bouyer.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 14 *
@@ -16,27 +16,27 @@ @@ -16,27 +16,27 @@
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 * 25 *
26 */ 26 */
27 27
28#include <sys/cdefs.h> 28#include <sys/cdefs.h>
29__KERNEL_RCSID(0, "$NetBSD: pciback.c,v 1.9 2014/03/29 19:28:30 christos Exp $"); 29__KERNEL_RCSID(0, "$NetBSD: pciback.c,v 1.9.4.1 2016/01/08 21:05:14 snj Exp $");
30 30
31#include "opt_xen.h" 31#include "opt_xen.h"
32 32
33 33
34#include <sys/types.h> 34#include <sys/types.h>
35#include <sys/param.h> 35#include <sys/param.h>
36#include <sys/systm.h> 36#include <sys/systm.h>
37#include <sys/errno.h> 37#include <sys/errno.h>
38#include <sys/malloc.h> 38#include <sys/malloc.h>
39#include <sys/kernel.h> 39#include <sys/kernel.h>
40#include <sys/bus.h> 40#include <sys/bus.h>
41#include <sys/queue.h> 41#include <sys/queue.h>
42 42
@@ -178,26 +178,27 @@ CFATTACH_DECL_NEW(pciback, sizeof(struct @@ -178,26 +178,27 @@ CFATTACH_DECL_NEW(pciback, sizeof(struct
178 pciback_pci_match, pciback_pci_attach, NULL, NULL); 178 pciback_pci_match, pciback_pci_attach, NULL, NULL);
179 179
180static int pciback_pci_inited = 0; 180static int pciback_pci_inited = 0;
181 181
182/* a xenbus PCI backend instance */ 182/* a xenbus PCI backend instance */
183struct pb_xenbus_instance { 183struct pb_xenbus_instance {
184 SLIST_ENTRY(pb_xenbus_instance) pbx_next; /* list of backend instances*/ 184 SLIST_ENTRY(pb_xenbus_instance) pbx_next; /* list of backend instances*/
185 struct xenbus_device *pbx_xbusd; 185 struct xenbus_device *pbx_xbusd;
186 domid_t pbx_domid; 186 domid_t pbx_domid;
187 struct pciback_pci_devlist pbx_pb_pci_dev; /* list of exported PCI devices */ 187 struct pciback_pci_devlist pbx_pb_pci_dev; /* list of exported PCI devices */
188 /* communication with the domU */ 188 /* communication with the domU */
189 unsigned int pbx_evtchn; /* our even channel */ 189 unsigned int pbx_evtchn; /* our even channel */
190 struct xen_pci_sharedinfo *pbx_sh_info; 190 struct xen_pci_sharedinfo *pbx_sh_info;
 191 struct xen_pci_op op;
191 grant_handle_t pbx_shinfo_handle; /* to unmap shared page */ 192 grant_handle_t pbx_shinfo_handle; /* to unmap shared page */
192}; 193};
193 194
194SLIST_HEAD(, pb_xenbus_instance) pb_xenbus_instances; 195SLIST_HEAD(, pb_xenbus_instance) pb_xenbus_instances;
195 196
196static struct xenbus_backend_driver pci_backend_driver = { 197static struct xenbus_backend_driver pci_backend_driver = {
197 .xbakd_create = pciback_xenbus_create, 198 .xbakd_create = pciback_xenbus_create,
198 .xbakd_type = "pci" 199 .xbakd_type = "pci"
199}; 200};
200 201
201int 202int
202pciback_pci_match(device_t parent, cfdata_t match, void *aux) 203pciback_pci_match(device_t parent, cfdata_t match, void *aux)
203{ 204{
@@ -711,33 +712,36 @@ pciback_xenbus_export_roots(struct pb_xe @@ -711,33 +712,36 @@ pciback_xenbus_export_roots(struct pb_xe
711 err = xenbus_printf(NULL, pbxi->pbx_xbusd->xbusd_path, "root_num", 712 err = xenbus_printf(NULL, pbxi->pbx_xbusd->xbusd_path, "root_num",
712 "%d", num_roots); 713 "%d", num_roots);
713 if (err) { 714 if (err) {
714 aprint_error("pciback: can't write to %s/root_num: " 715 aprint_error("pciback: can't write to %s/root_num: "
715 "%d\n", pbxi->pbx_xbusd->xbusd_path, err); 716 "%d\n", pbxi->pbx_xbusd->xbusd_path, err);
716 } 717 }
717} 718}
718 719
719static int 720static int
720pciback_xenbus_evthandler(void * arg) 721pciback_xenbus_evthandler(void * arg)
721{ 722{
722 struct pb_xenbus_instance *pbxi = arg; 723 struct pb_xenbus_instance *pbxi = arg;
723 struct pciback_pci_dev *pbd; 724 struct pciback_pci_dev *pbd;
724 struct xen_pci_op *op = &pbxi->pbx_sh_info->op; 725 struct xen_pci_op *op = &pbxi->op;
725 u_int bus, dev, func; 726 u_int bus, dev, func;
726 727
727 hypervisor_clear_event(pbxi->pbx_evtchn); 728 hypervisor_clear_event(pbxi->pbx_evtchn);
728 if (xen_atomic_test_bit(&pbxi->pbx_sh_info->flags, 729 if (xen_atomic_test_bit(&pbxi->pbx_sh_info->flags,
729 _XEN_PCIF_active) == 0) 730 _XEN_PCIF_active) == 0)
730 return 0; 731 return 0;
 732
 733 memcpy(op, &pbxi->pbx_sh_info->op, sizeof (struct xen_pci_op));
 734 __insn_barrier();
731 if (op->domain != 0) { 735 if (op->domain != 0) {
732 aprint_error("pciback: domain %d != 0", op->domain); 736 aprint_error("pciback: domain %d != 0", op->domain);
733 op->err = XEN_PCI_ERR_dev_not_found; 737 op->err = XEN_PCI_ERR_dev_not_found;
734 goto end; 738 goto end;
735 } 739 }
736 bus = op->bus; 740 bus = op->bus;
737 dev = (op->devfn >> 3) & 0xff; 741 dev = (op->devfn >> 3) & 0xff;
738 func = (op->devfn) & 0x7; 742 func = (op->devfn) & 0x7;
739 SLIST_FOREACH(pbd, &pbxi->pbx_pb_pci_dev, pb_guest_next) { 743 SLIST_FOREACH(pbd, &pbxi->pbx_pb_pci_dev, pb_guest_next) {
740 if (pbd->pb_bus == bus && 744 if (pbd->pb_bus == bus &&
741 pbd->pb_device == dev && 745 pbd->pb_device == dev &&
742 pbd->pb_function == func) 746 pbd->pb_function == func)
743 break; 747 break;
@@ -784,18 +788,20 @@ pciback_xenbus_evthandler(void * arg) @@ -784,18 +788,20 @@ pciback_xenbus_evthandler(void * arg)
784 pci_conf_write(pbd->pb_pc, pbd->pb_tag, 788 pci_conf_write(pbd->pb_pc, pbd->pb_tag,
785 op->offset, op->value); 789 op->offset, op->value);
786 break; 790 break;
787 default: 791 default:
788 aprint_error("pciback: bad size %d\n", op->size); 792 aprint_error("pciback: bad size %d\n", op->size);
789 op->err = XEN_PCI_ERR_invalid_offset; 793 op->err = XEN_PCI_ERR_invalid_offset;
790 break; 794 break;
791 } 795 }
792 break; 796 break;
793 default: 797 default:
794 aprint_error("pciback: unknown cmd %d\n", op->cmd); 798 aprint_error("pciback: unknown cmd %d\n", op->cmd);
795 op->err = XEN_PCI_ERR_not_implemented; 799 op->err = XEN_PCI_ERR_not_implemented;
796 } 800 }
 801 pbxi->pbx_sh_info->op.value = op->value;
 802 pbxi->pbx_sh_info->op.err = op->err;
797end: 803end:
798 xen_atomic_clear_bit(&pbxi->pbx_sh_info->flags, _XEN_PCIF_active); 804 xen_atomic_clear_bit(&pbxi->pbx_sh_info->flags, _XEN_PCIF_active);
799 hypervisor_notify_via_evtchn(pbxi->pbx_evtchn); 805 hypervisor_notify_via_evtchn(pbxi->pbx_evtchn);
800 return 1; 806 return 1;
801} 807}

cvs diff -r1.59.4.2 -r1.59.4.3 src/sys/arch/xen/xen/xbdback_xenbus.c (expand / switch to unified diff)

--- src/sys/arch/xen/xen/xbdback_xenbus.c 2015/11/16 07:34:08 1.59.4.2
+++ src/sys/arch/xen/xen/xbdback_xenbus.c 2016/01/08 21:05:14 1.59.4.3
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: xbdback_xenbus.c,v 1.59.4.2 2015/11/16 07:34:08 msaitoh Exp $ */ 1/* $NetBSD: xbdback_xenbus.c,v 1.59.4.3 2016/01/08 21:05:14 snj Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 Manuel Bouyer. 4 * Copyright (c) 2006 Manuel Bouyer.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 14 *
@@ -16,27 +16,27 @@ @@ -16,27 +16,27 @@
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 * 25 *
26 */ 26 */
27 27
28#include <sys/cdefs.h> 28#include <sys/cdefs.h>
29__KERNEL_RCSID(0, "$NetBSD: xbdback_xenbus.c,v 1.59.4.2 2015/11/16 07:34:08 msaitoh Exp $"); 29__KERNEL_RCSID(0, "$NetBSD: xbdback_xenbus.c,v 1.59.4.3 2016/01/08 21:05:14 snj Exp $");
30 30
31#include <sys/atomic.h> 31#include <sys/atomic.h>
32#include <sys/buf.h> 32#include <sys/buf.h>
33#include <sys/condvar.h> 33#include <sys/condvar.h>
34#include <sys/conf.h> 34#include <sys/conf.h>
35#include <sys/disk.h> 35#include <sys/disk.h>
36#include <sys/device.h> 36#include <sys/device.h>
37#include <sys/fcntl.h> 37#include <sys/fcntl.h>
38#include <sys/kauth.h> 38#include <sys/kauth.h>
39#include <sys/kernel.h> 39#include <sys/kernel.h>
40#include <sys/kmem.h> 40#include <sys/kmem.h>
41#include <sys/kthread.h> 41#include <sys/kthread.h>
42#include <sys/malloc.h> 42#include <sys/malloc.h>
@@ -1012,26 +1012,27 @@ xbdback_co_main_loop(struct xbdback_inst @@ -1012,26 +1012,27 @@ xbdback_co_main_loop(struct xbdback_inst
1012 req->sector_number = req32->sector_number; 1012 req->sector_number = req32->sector_number;
1013 break; 1013 break;
1014  1014
1015 case XBDIP_64: 1015 case XBDIP_64:
1016 req64 = RING_GET_REQUEST(&xbdi->xbdi_ring.ring_64, 1016 req64 = RING_GET_REQUEST(&xbdi->xbdi_ring.ring_64,
1017 xbdi->xbdi_ring.ring_n.req_cons); 1017 xbdi->xbdi_ring.ring_n.req_cons);
1018 req->operation = req64->operation; 1018 req->operation = req64->operation;
1019 req->nr_segments = req64->nr_segments; 1019 req->nr_segments = req64->nr_segments;
1020 req->handle = req64->handle; 1020 req->handle = req64->handle;
1021 req->id = req64->id; 1021 req->id = req64->id;
1022 req->sector_number = req64->sector_number; 1022 req->sector_number = req64->sector_number;
1023 break; 1023 break;
1024 } 1024 }
 1025 __insn_barrier();
1025 XENPRINTF(("xbdback op %d req_cons 0x%x req_prod 0x%x " 1026 XENPRINTF(("xbdback op %d req_cons 0x%x req_prod 0x%x "
1026 "resp_prod 0x%x id %" PRIu64 "\n", req->operation, 1027 "resp_prod 0x%x id %" PRIu64 "\n", req->operation,
1027 xbdi->xbdi_ring.ring_n.req_cons, 1028 xbdi->xbdi_ring.ring_n.req_cons,
1028 xbdi->xbdi_req_prod, 1029 xbdi->xbdi_req_prod,
1029 xbdi->xbdi_ring.ring_n.rsp_prod_pvt, 1030 xbdi->xbdi_ring.ring_n.rsp_prod_pvt,
1030 req->id)); 1031 req->id));
1031 switch(req->operation) { 1032 switch(req->operation) {
1032 case BLKIF_OP_READ: 1033 case BLKIF_OP_READ:
1033 case BLKIF_OP_WRITE: 1034 case BLKIF_OP_WRITE:
1034 xbdi->xbdi_cont = xbdback_co_io; 1035 xbdi->xbdi_cont = xbdback_co_io;
1035 break; 1036 break;
1036 case BLKIF_OP_FLUSH_DISKCACHE: 1037 case BLKIF_OP_FLUSH_DISKCACHE:
1037 xbdi_get(xbdi); 1038 xbdi_get(xbdi);

cvs diff -r1.52 -r1.52.4.1 src/sys/arch/xen/xen/xennetback_xenbus.c (expand / switch to unified diff)

--- src/sys/arch/xen/xen/xennetback_xenbus.c 2013/10/20 11:37:53 1.52
+++ src/sys/arch/xen/xen/xennetback_xenbus.c 2016/01/08 21:05:14 1.52.4.1
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: xennetback_xenbus.c,v 1.52 2013/10/20 11:37:53 bouyer Exp $ */ 1/* $NetBSD: xennetback_xenbus.c,v 1.52.4.1 2016/01/08 21:05:14 snj Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 Manuel Bouyer. 4 * Copyright (c) 2006 Manuel Bouyer.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 14 *
@@ -16,27 +16,27 @@ @@ -16,27 +16,27 @@
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 * 25 *
26 */ 26 */
27 27
28#include <sys/cdefs.h> 28#include <sys/cdefs.h>
29__KERNEL_RCSID(0, "$NetBSD: xennetback_xenbus.c,v 1.52 2013/10/20 11:37:53 bouyer Exp $"); 29__KERNEL_RCSID(0, "$NetBSD: xennetback_xenbus.c,v 1.52.4.1 2016/01/08 21:05:14 snj Exp $");
30 30
31#include "opt_xen.h" 31#include "opt_xen.h"
32 32
33#include <sys/types.h> 33#include <sys/types.h>
34#include <sys/param.h> 34#include <sys/param.h>
35#include <sys/systm.h> 35#include <sys/systm.h>
36#include <sys/malloc.h> 36#include <sys/malloc.h>
37#include <sys/queue.h> 37#include <sys/queue.h>
38#include <sys/kernel.h> 38#include <sys/kernel.h>
39#include <sys/mbuf.h> 39#include <sys/mbuf.h>
40#include <sys/protosw.h> 40#include <sys/protosw.h>
41#include <sys/socket.h> 41#include <sys/socket.h>
42#include <sys/ioctl.h> 42#include <sys/ioctl.h>
@@ -705,181 +705,181 @@ xennetback_tx_response(struct xnetback_i @@ -705,181 +705,181 @@ xennetback_tx_response(struct xnetback_i
705 xneti->xni_txring.rsp_prod_pvt++; 705 xneti->xni_txring.rsp_prod_pvt++;
706 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xneti->xni_txring, do_event); 706 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xneti->xni_txring, do_event);
707 if (do_event) { 707 if (do_event) {
708 XENPRINTF(("%s send event\n", xneti->xni_if.if_xname)); 708 XENPRINTF(("%s send event\n", xneti->xni_if.if_xname));
709 hypervisor_notify_via_evtchn(xneti->xni_evtchn); 709 hypervisor_notify_via_evtchn(xneti->xni_evtchn);
710 } 710 }
711} 711}
712 712
713static int 713static int
714xennetback_evthandler(void *arg) 714xennetback_evthandler(void *arg)
715{ 715{
716 struct xnetback_instance *xneti = arg; 716 struct xnetback_instance *xneti = arg;
717 struct ifnet *ifp = &xneti->xni_if; 717 struct ifnet *ifp = &xneti->xni_if;
718 netif_tx_request_t *txreq; 718 netif_tx_request_t txreq;
719 struct xni_pkt *pkt; 719 struct xni_pkt *pkt;
720 vaddr_t pkt_va; 720 vaddr_t pkt_va;
721 struct mbuf *m; 721 struct mbuf *m;
722 int receive_pending, err; 722 int receive_pending, err;
723 RING_IDX req_cons; 723 RING_IDX req_cons;
724 724
725 XENPRINTF(("xennetback_evthandler ")); 725 XENPRINTF(("xennetback_evthandler "));
726 req_cons = xneti->xni_txring.req_cons; 726 req_cons = xneti->xni_txring.req_cons;
727 xen_rmb(); 727 xen_rmb();
728 while (1) { 728 while (1) {
729 xen_rmb(); /* be sure to read the request before updating */ 729 xen_rmb(); /* be sure to read the request before updating */
730 xneti->xni_txring.req_cons = req_cons; 730 xneti->xni_txring.req_cons = req_cons;
731 xen_wmb(); 731 xen_wmb();
732 RING_FINAL_CHECK_FOR_REQUESTS(&xneti->xni_txring, 732 RING_FINAL_CHECK_FOR_REQUESTS(&xneti->xni_txring,
733 receive_pending); 733 receive_pending);
734 if (receive_pending == 0) 734 if (receive_pending == 0)
735 break; 735 break;
736 txreq = RING_GET_REQUEST(&xneti->xni_txring, req_cons); 736 RING_COPY_REQUEST(&xneti->xni_txring, req_cons, &txreq);
737 xen_rmb(); 737 xen_rmb();
738 XENPRINTF(("%s pkt size %d\n", xneti->xni_if.if_xname, 738 XENPRINTF(("%s pkt size %d\n", xneti->xni_if.if_xname,
739 txreq->size)); 739 txreq.size));
740 req_cons++; 740 req_cons++;
741 if (__predict_false((ifp->if_flags & (IFF_UP | IFF_RUNNING)) != 741 if (__predict_false((ifp->if_flags & (IFF_UP | IFF_RUNNING)) !=
742 (IFF_UP | IFF_RUNNING))) { 742 (IFF_UP | IFF_RUNNING))) {
743 /* interface not up, drop */ 743 /* interface not up, drop */
744 xennetback_tx_response(xneti, txreq->id, 744 xennetback_tx_response(xneti, txreq.id,
745 NETIF_RSP_DROPPED); 745 NETIF_RSP_DROPPED);
746 continue; 746 continue;
747 } 747 }
748 /* 748 /*
749 * Do some sanity checks, and map the packet's page. 749 * Do some sanity checks, and map the packet's page.
750 */ 750 */
751 if (__predict_false(txreq->size < ETHER_HDR_LEN || 751 if (__predict_false(txreq.size < ETHER_HDR_LEN ||
752 txreq->size > (ETHER_MAX_LEN - ETHER_CRC_LEN))) { 752 txreq.size > (ETHER_MAX_LEN - ETHER_CRC_LEN))) {
753 printf("%s: packet size %d too big\n", 753 printf("%s: packet size %d too big\n",
754 ifp->if_xname, txreq->size); 754 ifp->if_xname, txreq.size);
755 xennetback_tx_response(xneti, txreq->id, 755 xennetback_tx_response(xneti, txreq.id,
756 NETIF_RSP_ERROR); 756 NETIF_RSP_ERROR);
757 ifp->if_ierrors++; 757 ifp->if_ierrors++;
758 continue; 758 continue;
759 } 759 }
760 /* don't cross page boundaries */ 760 /* don't cross page boundaries */
761 if (__predict_false( 761 if (__predict_false(
762 txreq->offset + txreq->size > PAGE_SIZE)) { 762 txreq.offset + txreq.size > PAGE_SIZE)) {
763 printf("%s: packet cross page boundary\n", 763 printf("%s: packet cross page boundary\n",
764 ifp->if_xname); 764 ifp->if_xname);
765 xennetback_tx_response(xneti, txreq->id, 765 xennetback_tx_response(xneti, txreq.id,
766 NETIF_RSP_ERROR); 766 NETIF_RSP_ERROR);
767 ifp->if_ierrors++; 767 ifp->if_ierrors++;
768 continue; 768 continue;
769 } 769 }
770 /* get a mbuf for this packet */ 770 /* get a mbuf for this packet */
771 MGETHDR(m, M_DONTWAIT, MT_DATA); 771 MGETHDR(m, M_DONTWAIT, MT_DATA);
772 if (__predict_false(m == NULL)) { 772 if (__predict_false(m == NULL)) {
773 static struct timeval lasttime; 773 static struct timeval lasttime;
774 if (ratecheck(&lasttime, &xni_pool_errintvl)) 774 if (ratecheck(&lasttime, &xni_pool_errintvl))
775 printf("%s: mbuf alloc failed\n", 775 printf("%s: mbuf alloc failed\n",
776 ifp->if_xname); 776 ifp->if_xname);
777 xennetback_tx_response(xneti, txreq->id, 777 xennetback_tx_response(xneti, txreq.id,
778 NETIF_RSP_DROPPED); 778 NETIF_RSP_DROPPED);
779 ifp->if_ierrors++; 779 ifp->if_ierrors++;
780 continue; 780 continue;
781 } 781 }
782 782
783 XENPRINTF(("%s pkt offset %d size %d id %d req_cons %d\n", 783 XENPRINTF(("%s pkt offset %d size %d id %d req_cons %d\n",
784 xneti->xni_if.if_xname, txreq->offset, 784 xneti->xni_if.if_xname, txreq.offset,
785 txreq->size, txreq->id, MASK_NETIF_TX_IDX(req_cons))); 785 txreq.size, txreq.id, MASK_NETIF_TX_IDX(req_cons)));
786  786
787 pkt = pool_get(&xni_pkt_pool, PR_NOWAIT); 787 pkt = pool_get(&xni_pkt_pool, PR_NOWAIT);
788 if (__predict_false(pkt == NULL)) { 788 if (__predict_false(pkt == NULL)) {
789 static struct timeval lasttime; 789 static struct timeval lasttime;
790 if (ratecheck(&lasttime, &xni_pool_errintvl)) 790 if (ratecheck(&lasttime, &xni_pool_errintvl))
791 printf("%s: xnbpkt alloc failed\n", 791 printf("%s: xnbpkt alloc failed\n",
792 ifp->if_xname); 792 ifp->if_xname);
793 xennetback_tx_response(xneti, txreq->id, 793 xennetback_tx_response(xneti, txreq.id,
794 NETIF_RSP_DROPPED); 794 NETIF_RSP_DROPPED);
795 ifp->if_ierrors++; 795 ifp->if_ierrors++;
796 m_freem(m); 796 m_freem(m);
797 continue; 797 continue;
798 } 798 }
799 err = xen_shm_map(1, xneti->xni_domid, &txreq->gref, &pkt_va, 799 err = xen_shm_map(1, xneti->xni_domid, &txreq.gref, &pkt_va,
800 &pkt->pkt_handle, XSHM_RO); 800 &pkt->pkt_handle, XSHM_RO);
801 if (__predict_false(err == ENOMEM)) { 801 if (__predict_false(err == ENOMEM)) {
802 xennetback_tx_response(xneti, txreq->id, 802 xennetback_tx_response(xneti, txreq.id,
803 NETIF_RSP_DROPPED); 803 NETIF_RSP_DROPPED);
804 ifp->if_ierrors++; 804 ifp->if_ierrors++;
805 pool_put(&xni_pkt_pool, pkt); 805 pool_put(&xni_pkt_pool, pkt);
806 m_freem(m); 806 m_freem(m);
807 continue; 807 continue;
808 } 808 }
809  809
810 if (__predict_false(err)) { 810 if (__predict_false(err)) {
811 printf("%s: mapping foreing page failed: %d\n", 811 printf("%s: mapping foreing page failed: %d\n",
812 xneti->xni_if.if_xname, err); 812 xneti->xni_if.if_xname, err);
813 xennetback_tx_response(xneti, txreq->id, 813 xennetback_tx_response(xneti, txreq.id,
814 NETIF_RSP_ERROR); 814 NETIF_RSP_ERROR);
815 ifp->if_ierrors++; 815 ifp->if_ierrors++;
816 pool_put(&xni_pkt_pool, pkt); 816 pool_put(&xni_pkt_pool, pkt);
817 m_freem(m); 817 m_freem(m);
818 continue; 818 continue;
819 } 819 }
820 820
821 if ((ifp->if_flags & IFF_PROMISC) == 0) { 821 if ((ifp->if_flags & IFF_PROMISC) == 0) {
822 struct ether_header *eh = 822 struct ether_header *eh =
823 (void*)(pkt_va + txreq->offset); 823 (void*)(pkt_va + txreq.offset);
824 if (ETHER_IS_MULTICAST(eh->ether_dhost) == 0 && 824 if (ETHER_IS_MULTICAST(eh->ether_dhost) == 0 &&
825 memcmp(CLLADDR(ifp->if_sadl), eh->ether_dhost, 825 memcmp(CLLADDR(ifp->if_sadl), eh->ether_dhost,
826 ETHER_ADDR_LEN) != 0) { 826 ETHER_ADDR_LEN) != 0) {
827 xni_pkt_unmap(pkt, pkt_va); 827 xni_pkt_unmap(pkt, pkt_va);
828 m_freem(m); 828 m_freem(m);
829 xennetback_tx_response(xneti, txreq->id, 829 xennetback_tx_response(xneti, txreq.id,
830 NETIF_RSP_OKAY); 830 NETIF_RSP_OKAY);
831 continue; /* packet is not for us */ 831 continue; /* packet is not for us */
832 } 832 }
833 } 833 }
834#ifdef notyet 834#ifdef notyet
835a lot of work is needed in the tcp stack to handle read-only ext storage 835a lot of work is needed in the tcp stack to handle read-only ext storage
836so always copy for now. 836so always copy for now.
837 if (((req_cons + 1) & (NET_TX_RING_SIZE - 1)) == 837 if (((req_cons + 1) & (NET_TX_RING_SIZE - 1)) ==
838 (xneti->xni_txring.rsp_prod_pvt & (NET_TX_RING_SIZE - 1))) 838 (xneti->xni_txring.rsp_prod_pvt & (NET_TX_RING_SIZE - 1)))
839#else 839#else
840 if (1) 840 if (1)
841#endif /* notyet */ 841#endif /* notyet */
842 { 842 {
843 /* 843 /*
844 * This is the last TX buffer. Copy the data and 844 * This is the last TX buffer. Copy the data and
845 * ack it. Delaying it until the mbuf is 845 * ack it. Delaying it until the mbuf is
846 * freed will stall transmit. 846 * freed will stall transmit.
847 */ 847 */
848 m->m_len = min(MHLEN, txreq->size); 848 m->m_len = min(MHLEN, txreq.size);
849 m->m_pkthdr.len = 0; 849 m->m_pkthdr.len = 0;
850 m_copyback(m, 0, txreq->size, 850 m_copyback(m, 0, txreq.size,
851 (void *)(pkt_va + txreq->offset)); 851 (void *)(pkt_va + txreq.offset));
852 xni_pkt_unmap(pkt, pkt_va); 852 xni_pkt_unmap(pkt, pkt_va);
853 if (m->m_pkthdr.len < txreq->size) { 853 if (m->m_pkthdr.len < txreq.size) {
854 ifp->if_ierrors++; 854 ifp->if_ierrors++;
855 m_freem(m); 855 m_freem(m);
856 xennetback_tx_response(xneti, txreq->id, 856 xennetback_tx_response(xneti, txreq.id,
857 NETIF_RSP_DROPPED); 857 NETIF_RSP_DROPPED);
858 continue; 858 continue;
859 } 859 }
860 xennetback_tx_response(xneti, txreq->id, 860 xennetback_tx_response(xneti, txreq.id,
861 NETIF_RSP_OKAY); 861 NETIF_RSP_OKAY);
862 } else { 862 } else {
863 863
864 pkt->pkt_id = txreq->id; 864 pkt->pkt_id = txreq.id;
865 pkt->pkt_xneti = xneti; 865 pkt->pkt_xneti = xneti;
866 866
867 MEXTADD(m, pkt_va + txreq->offset, 867 MEXTADD(m, pkt_va + txreq.offset,
868 txreq->size, M_DEVBUF, xennetback_tx_free, pkt); 868 txreq.size, M_DEVBUF, xennetback_tx_free, pkt);
869 m->m_pkthdr.len = m->m_len = txreq->size; 869 m->m_pkthdr.len = m->m_len = txreq.size;
870 m->m_flags |= M_EXT_ROMAP; 870 m->m_flags |= M_EXT_ROMAP;
871 } 871 }
872 if ((txreq->flags & NETTXF_csum_blank) != 0) { 872 if ((txreq.flags & NETTXF_csum_blank) != 0) {
873 xennet_checksum_fill(&m); 873 xennet_checksum_fill(&m);
874 if (m == NULL) { 874 if (m == NULL) {
875 ifp->if_ierrors++; 875 ifp->if_ierrors++;
876 continue; 876 continue;
877 } 877 }
878 } 878 }
879 m->m_pkthdr.rcvif = ifp; 879 m->m_pkthdr.rcvif = ifp;
880 ifp->if_ipackets++; 880 ifp->if_ipackets++;
881  881
882 bpf_mtap(ifp, m); 882 bpf_mtap(ifp, m);
883 (*ifp->if_input)(ifp, m); 883 (*ifp->if_input)(ifp, m);
884 } 884 }
885 xen_rmb(); /* be sure to read the request before updating pointer */ 885 xen_rmb(); /* be sure to read the request before updating pointer */
@@ -943,26 +943,27 @@ static void @@ -943,26 +943,27 @@ static void
943xennetback_ifsoftstart_transfer(void *arg) 943xennetback_ifsoftstart_transfer(void *arg)
944{ 944{
945 struct xnetback_instance *xneti = arg; 945 struct xnetback_instance *xneti = arg;
946 struct ifnet *ifp = &xneti->xni_if; 946 struct ifnet *ifp = &xneti->xni_if;
947 struct mbuf *m; 947 struct mbuf *m;
948 vaddr_t xmit_va; 948 vaddr_t xmit_va;
949 paddr_t xmit_pa; 949 paddr_t xmit_pa;
950 paddr_t xmit_ma; 950 paddr_t xmit_ma;
951 paddr_t newp_ma = 0; /* XXX gcc */ 951 paddr_t newp_ma = 0; /* XXX gcc */
952 int i, j, nppitems; 952 int i, j, nppitems;
953 mmu_update_t *mmup; 953 mmu_update_t *mmup;
954 multicall_entry_t *mclp; 954 multicall_entry_t *mclp;
955 netif_rx_response_t *rxresp; 955 netif_rx_response_t *rxresp;
 956 netif_rx_request_t rxreq;
956 RING_IDX req_prod, resp_prod; 957 RING_IDX req_prod, resp_prod;
957 int do_event = 0; 958 int do_event = 0;
958 gnttab_transfer_t *gop; 959 gnttab_transfer_t *gop;
959 int id, offset; 960 int id, offset;
960 961
961 XENPRINTF(("xennetback_ifsoftstart_transfer ")); 962 XENPRINTF(("xennetback_ifsoftstart_transfer "));
962 int s = splnet(); 963 int s = splnet();
963 if (__predict_false( 964 if (__predict_false(
964 (ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)) { 965 (ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)) {
965 splx(s); 966 splx(s);
966 return; 967 return;
967 } 968 }
968 969
@@ -1018,30 +1019,30 @@ xennetback_ifsoftstart_transfer(void *ar @@ -1018,30 +1019,30 @@ xennetback_ifsoftstart_transfer(void *ar
1018 KASSERT(xmit_pa != POOL_PADDR_INVALID); 1019 KASSERT(xmit_pa != POOL_PADDR_INVALID);
1019 xmit_ma = xpmap_ptom(xmit_pa); 1020 xmit_ma = xpmap_ptom(xmit_pa);
1020 XENPRINTF(("xennetback_get_xmit_page: got va " 1021 XENPRINTF(("xennetback_get_xmit_page: got va "
1021 "0x%x ma 0x%x\n", (u_int)xmit_va, 1022 "0x%x ma 0x%x\n", (u_int)xmit_va,
1022 (u_int)xmit_ma)); 1023 (u_int)xmit_ma));
1023 m_copydata(m, 0, m->m_pkthdr.len, 1024 m_copydata(m, 0, m->m_pkthdr.len,
1024 (char *)xmit_va + LINUX_REQUESTED_OFFSET); 1025 (char *)xmit_va + LINUX_REQUESTED_OFFSET);
1025 offset = LINUX_REQUESTED_OFFSET; 1026 offset = LINUX_REQUESTED_OFFSET;
1026 pages_pool_free[nppitems].va = xmit_va; 1027 pages_pool_free[nppitems].va = xmit_va;
1027 pages_pool_free[nppitems].pa = xmit_pa; 1028 pages_pool_free[nppitems].pa = xmit_pa;
1028 nppitems++; 1029 nppitems++;
1029 } 1030 }
1030 /* start filling ring */ 1031 /* start filling ring */
1031 gop->ref = RING_GET_REQUEST(&xneti->xni_rxring, 1032 RING_COPY_REQUEST(&xneti->xni_rxring,
1032 xneti->xni_rxring.req_cons)->gref; 1033 xneti->xni_rxring.req_cons, &rxreq);
1033 id = RING_GET_REQUEST(&xneti->xni_rxring, 1034 gop->ref = rxreq.gref;
1034 xneti->xni_rxring.req_cons)->id; 1035 id = rxreq.id;
1035 xen_rmb(); 1036 xen_rmb();
1036 xneti->xni_rxring.req_cons++; 1037 xneti->xni_rxring.req_cons++;
1037 rxresp = RING_GET_RESPONSE(&xneti->xni_rxring, 1038 rxresp = RING_GET_RESPONSE(&xneti->xni_rxring,
1038 resp_prod); 1039 resp_prod);
1039 rxresp->id = id; 1040 rxresp->id = id;
1040 rxresp->offset = offset; 1041 rxresp->offset = offset;
1041 rxresp->status = m->m_pkthdr.len; 1042 rxresp->status = m->m_pkthdr.len;
1042 if ((m->m_pkthdr.csum_flags & 1043 if ((m->m_pkthdr.csum_flags &
1043 (M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) { 1044 (M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
1044 rxresp->flags = NETRXF_csum_blank; 1045 rxresp->flags = NETRXF_csum_blank;
1045 } else { 1046 } else {
1046 rxresp->flags = 0; 1047 rxresp->flags = 0;
1047 } 1048 }
@@ -1188,26 +1189,27 @@ xennetback_ifsoftstart_transfer(void *ar @@ -1188,26 +1189,27 @@ xennetback_ifsoftstart_transfer(void *ar
1188 splx(s); 1189 splx(s);
1189} 1190}
1190 1191
1191static void 1192static void
1192xennetback_ifsoftstart_copy(void *arg) 1193xennetback_ifsoftstart_copy(void *arg)
1193{ 1194{
1194 struct xnetback_instance *xneti = arg; 1195 struct xnetback_instance *xneti = arg;
1195 struct ifnet *ifp = &xneti->xni_if; 1196 struct ifnet *ifp = &xneti->xni_if;
1196 struct mbuf *m, *new_m; 1197 struct mbuf *m, *new_m;
1197 paddr_t xmit_pa; 1198 paddr_t xmit_pa;
1198 paddr_t xmit_ma; 1199 paddr_t xmit_ma;
1199 int i, j; 1200 int i, j;
1200 netif_rx_response_t *rxresp; 1201 netif_rx_response_t *rxresp;
 1202 netif_rx_request_t rxreq;
1201 RING_IDX req_prod, resp_prod; 1203 RING_IDX req_prod, resp_prod;
1202 int do_event = 0; 1204 int do_event = 0;
1203 gnttab_copy_t *gop; 1205 gnttab_copy_t *gop;
1204 int id, offset; 1206 int id, offset;
1205 1207
1206 XENPRINTF(("xennetback_ifsoftstart_copy ")); 1208 XENPRINTF(("xennetback_ifsoftstart_copy "));
1207 int s = splnet(); 1209 int s = splnet();
1208 if (__predict_false( 1210 if (__predict_false(
1209 (ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)) { 1211 (ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)) {
1210 splx(s); 1212 splx(s);
1211 return; 1213 return;
1212 } 1214 }
1213 1215
@@ -1299,36 +1301,36 @@ xennetback_ifsoftstart_copy(void *arg) @@ -1299,36 +1301,36 @@ xennetback_ifsoftstart_copy(void *arg)
1299 } else { 1301 } else {
1300 IFQ_DEQUEUE(&ifp->if_snd, m); 1302 IFQ_DEQUEUE(&ifp->if_snd, m);
1301 } 1303 }
1302 1304
1303 KASSERT(xmit_pa != POOL_PADDR_INVALID); 1305 KASSERT(xmit_pa != POOL_PADDR_INVALID);
1304 KASSERT((offset + m->m_pkthdr.len) <= PAGE_SIZE); 1306 KASSERT((offset + m->m_pkthdr.len) <= PAGE_SIZE);
1305 xmit_ma = xpmap_ptom(xmit_pa); 1307 xmit_ma = xpmap_ptom(xmit_pa);
1306 /* start filling ring */ 1308 /* start filling ring */
1307 gop->flags = GNTCOPY_dest_gref; 1309 gop->flags = GNTCOPY_dest_gref;
1308 gop->source.offset = offset; 1310 gop->source.offset = offset;
1309 gop->source.domid = DOMID_SELF; 1311 gop->source.domid = DOMID_SELF;
1310 gop->source.u.gmfn = xmit_ma >> PAGE_SHIFT; 1312 gop->source.u.gmfn = xmit_ma >> PAGE_SHIFT;
1311 1313
1312 gop->dest.u.ref = RING_GET_REQUEST(&xneti->xni_rxring, 1314 RING_COPY_REQUEST(&xneti->xni_rxring,
1313 xneti->xni_rxring.req_cons)->gref; 1315 xneti->xni_rxring.req_cons, &rxreq);
 1316 gop->dest.u.ref = rxreq.gref;
1314 gop->dest.offset = 0; 1317 gop->dest.offset = 0;
1315 gop->dest.domid = xneti->xni_domid; 1318 gop->dest.domid = xneti->xni_domid;
1316 1319
1317 gop->len = m->m_pkthdr.len; 1320 gop->len = m->m_pkthdr.len;
1318 gop++; 1321 gop++;
1319 1322
1320 id = RING_GET_REQUEST(&xneti->xni_rxring, 1323 id = rxreq.id;
1321 xneti->xni_rxring.req_cons)->id; 
1322 xen_rmb(); 1324 xen_rmb();
1323 xneti->xni_rxring.req_cons++; 1325 xneti->xni_rxring.req_cons++;
1324 rxresp = RING_GET_RESPONSE(&xneti->xni_rxring, 1326 rxresp = RING_GET_RESPONSE(&xneti->xni_rxring,
1325 resp_prod); 1327 resp_prod);
1326 rxresp->id = id; 1328 rxresp->id = id;
1327 rxresp->offset = 0; 1329 rxresp->offset = 0;
1328 rxresp->status = m->m_pkthdr.len; 1330 rxresp->status = m->m_pkthdr.len;
1329 if ((m->m_pkthdr.csum_flags & 1331 if ((m->m_pkthdr.csum_flags &
1330 (M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) { 1332 (M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
1331 rxresp->flags = NETRXF_csum_blank; 1333 rxresp->flags = NETRXF_csum_blank;
1332 } else { 1334 } else {
1333 rxresp->flags = 0; 1335 rxresp->flags = 0;
1334 } 1336 }