Thu Apr 23 07:24:40 2020 UTC ()
g/c no longer needed xbdi_io structure member, just pass it as continuation
parameter


(jdolecek)
diff -r1.86 -r1.87 src/sys/arch/xen/xen/xbdback_xenbus.c

cvs diff -r1.86 -r1.87 src/sys/arch/xen/xen/xbdback_xenbus.c (expand / switch to unified diff)

--- src/sys/arch/xen/xen/xbdback_xenbus.c 2020/04/21 13:56:18 1.86
+++ src/sys/arch/xen/xen/xbdback_xenbus.c 2020/04/23 07:24:40 1.87
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: xbdback_xenbus.c,v 1.86 2020/04/21 13:56:18 jdolecek Exp $ */ 1/* $NetBSD: xbdback_xenbus.c,v 1.87 2020/04/23 07:24:40 jdolecek Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 Manuel Bouyer. 4 * Copyright (c) 2006 Manuel Bouyer.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 14 *
@@ -16,27 +16,27 @@ @@ -16,27 +16,27 @@
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 * 25 *
26 */ 26 */
27 27
28#include <sys/cdefs.h> 28#include <sys/cdefs.h>
29__KERNEL_RCSID(0, "$NetBSD: xbdback_xenbus.c,v 1.86 2020/04/21 13:56:18 jdolecek Exp $"); 29__KERNEL_RCSID(0, "$NetBSD: xbdback_xenbus.c,v 1.87 2020/04/23 07:24:40 jdolecek Exp $");
30 30
31#include <sys/atomic.h> 31#include <sys/atomic.h>
32#include <sys/buf.h> 32#include <sys/buf.h>
33#include <sys/condvar.h> 33#include <sys/condvar.h>
34#include <sys/conf.h> 34#include <sys/conf.h>
35#include <sys/disk.h> 35#include <sys/disk.h>
36#include <sys/device.h> 36#include <sys/device.h>
37#include <sys/fcntl.h> 37#include <sys/fcntl.h>
38#include <sys/kauth.h> 38#include <sys/kauth.h>
39#include <sys/kernel.h> 39#include <sys/kernel.h>
40#include <sys/kmem.h> 40#include <sys/kmem.h>
41#include <sys/kthread.h> 41#include <sys/kthread.h>
42#include <sys/mutex.h> 42#include <sys/mutex.h>
@@ -67,27 +67,26 @@ __KERNEL_RCSID(0, "$NetBSD: xbdback_xenb @@ -67,27 +67,26 @@ __KERNEL_RCSID(0, "$NetBSD: xbdback_xenb
67 * Backend block device driver for Xen 67 * Backend block device driver for Xen
68 */ 68 */
69 69
70/* Values are expressed in 512-byte sectors */ 70/* Values are expressed in 512-byte sectors */
71#define VBD_BSIZE 512 71#define VBD_BSIZE 512
72#define VBD_MAXSECT ((PAGE_SIZE / VBD_BSIZE) - 1) 72#define VBD_MAXSECT ((PAGE_SIZE / VBD_BSIZE) - 1)
73 73
74/* Need to alloc one extra page to account for possible mapping offset */ 74/* Need to alloc one extra page to account for possible mapping offset */
75#define VBD_VA_SIZE (MAXPHYS + PAGE_SIZE) 75#define VBD_VA_SIZE (MAXPHYS + PAGE_SIZE)
76#define VBD_MAX_INDIRECT_SEGMENTS VBD_VA_SIZE >> PAGE_SHIFT 76#define VBD_MAX_INDIRECT_SEGMENTS VBD_VA_SIZE >> PAGE_SHIFT
77 77
78CTASSERT(XENSHM_MAX_PAGES_PER_REQUEST >= VBD_MAX_INDIRECT_SEGMENTS); 78CTASSERT(XENSHM_MAX_PAGES_PER_REQUEST >= VBD_MAX_INDIRECT_SEGMENTS);
79 79
80struct xbdback_io; 
81struct xbdback_instance; 80struct xbdback_instance;
82 81
83/* 82/*
84 * status of a xbdback instance: 83 * status of a xbdback instance:
85 * WAITING: xbdback instance is connected, waiting for requests 84 * WAITING: xbdback instance is connected, waiting for requests
86 * RUN: xbdi thread must be woken up, I/Os have to be processed 85 * RUN: xbdi thread must be woken up, I/Os have to be processed
87 * DISCONNECTING: the instance is closing, no more I/Os can be scheduled 86 * DISCONNECTING: the instance is closing, no more I/Os can be scheduled
88 * DISCONNECTED: no I/Os, no ring, the thread should terminate. 87 * DISCONNECTED: no I/Os, no ring, the thread should terminate.
89 */ 88 */
90typedef enum {WAITING, RUN, DISCONNECTING, DISCONNECTED} xbdback_state_t; 89typedef enum {WAITING, RUN, DISCONNECTING, DISCONNECTED} xbdback_state_t;
91 90
92/* 91/*
93 * Each xbdback instance is managed by a single thread that handles all 92 * Each xbdback instance is managed by a single thread that handles all
@@ -176,28 +175,26 @@ struct xbdback_instance { @@ -176,28 +175,26 @@ struct xbdback_instance {
176 int xbdi_refcnt; 175 int xbdi_refcnt;
177 /*  176 /*
178 * State for I/O processing/coalescing follows; this has to 177 * State for I/O processing/coalescing follows; this has to
179 * live here instead of on the stack because of the 178 * live here instead of on the stack because of the
180 * continuation-ness (see above). 179 * continuation-ness (see above).
181 */ 180 */
182 RING_IDX xbdi_req_prod; /* limit on request indices */ 181 RING_IDX xbdi_req_prod; /* limit on request indices */
183 xbdback_cont_t xbdi_cont; 182 xbdback_cont_t xbdi_cont;
184 /* _request state: track requests fetched from ring */ 183 /* _request state: track requests fetched from ring */
185 blkif_request_t xbdi_xen_req; 184 blkif_request_t xbdi_xen_req;
186 struct blkif_request_segment xbdi_seg[VBD_MAX_INDIRECT_SEGMENTS]; 185 struct blkif_request_segment xbdi_seg[VBD_MAX_INDIRECT_SEGMENTS];
187 bus_dmamap_t xbdi_seg_dmamap; 186 bus_dmamap_t xbdi_seg_dmamap;
188 grant_ref_t xbdi_in_gntref; 187 grant_ref_t xbdi_in_gntref;
189 /* _io state: I/O associated to this instance */ 
190 struct xbdback_io *xbdi_io; 
191 /* other state */ 188 /* other state */
192 int xbdi_same_page; /* are we merging two segments on the same page? */ 189 int xbdi_same_page; /* are we merging two segments on the same page? */
193 uint xbdi_pendingreqs; /* number of I/O in fly */ 190 uint xbdi_pendingreqs; /* number of I/O in fly */
194 struct timeval xbdi_lasterr_time; /* error time tracking */ 191 struct timeval xbdi_lasterr_time; /* error time tracking */
195#ifdef DEBUG 192#ifdef DEBUG
196 struct timeval xbdi_lastfragio_time; /* fragmented I/O tracking */ 193 struct timeval xbdi_lastfragio_time; /* fragmented I/O tracking */
197#endif 194#endif
198}; 195};
199/* Manipulation of the above reference count. */ 196/* Manipulation of the above reference count. */
200#define xbdi_get(xbdip) atomic_inc_uint(&(xbdip)->xbdi_refcnt) 197#define xbdi_get(xbdip) atomic_inc_uint(&(xbdip)->xbdi_refcnt)
201#define xbdi_put(xbdip) \ 198#define xbdi_put(xbdip) \
202do { \ 199do { \
203 if (atomic_dec_uint_nv(&(xbdip)->xbdi_refcnt) == 0) \ 200 if (atomic_dec_uint_nv(&(xbdip)->xbdi_refcnt) == 0) \
@@ -1033,27 +1030,26 @@ xbdback_co_main_loop(struct xbdback_inst @@ -1033,27 +1030,26 @@ xbdback_co_main_loop(struct xbdback_inst
1033 default: 1030 default:
1034 if (ratecheck(&xbdi->xbdi_lasterr_time, 1031 if (ratecheck(&xbdi->xbdi_lasterr_time,
1035 &xbdback_err_intvl)) { 1032 &xbdback_err_intvl)) {
1036 printf("%s: unknown operation %d\n", 1033 printf("%s: unknown operation %d\n",
1037 xbdi->xbdi_name, req->operation); 1034 xbdi->xbdi_name, req->operation);
1038 } 1035 }
1039fail: 1036fail:
1040 xbdback_send_reply(xbdi, req->id, req->operation, 1037 xbdback_send_reply(xbdi, req->id, req->operation,
1041 BLKIF_RSP_ERROR); 1038 BLKIF_RSP_ERROR);
1042 xbdi->xbdi_cont = xbdback_co_main_incr; 1039 xbdi->xbdi_cont = xbdback_co_main_incr;
1043 break; 1040 break;
1044 } 1041 }
1045 } else { 1042 } else {
1046 KASSERT(xbdi->xbdi_io == NULL); 
1047 xbdi->xbdi_cont = xbdback_co_main_done2; 1043 xbdi->xbdi_cont = xbdback_co_main_done2;
1048 } 1044 }
1049 return xbdi; 1045 return xbdi;
1050} 1046}
1051 1047
1052/* 1048/*
1053 * Increment consumer index and move on to the next request. In case 1049 * Increment consumer index and move on to the next request. In case
1054 * we want to disconnect, leave continuation now. 1050 * we want to disconnect, leave continuation now.
1055 */ 1051 */
1056static void * 1052static void *
1057xbdback_co_main_incr(struct xbdback_instance *xbdi, void *obj) 1053xbdback_co_main_incr(struct xbdback_instance *xbdi, void *obj)
1058{ 1054{
1059 (void)obj; 1055 (void)obj;
@@ -1080,27 +1076,26 @@ xbdback_co_main_incr(struct xbdback_inst @@ -1080,27 +1076,26 @@ xbdback_co_main_incr(struct xbdback_inst
1080 1076
1081 return xbdi; 1077 return xbdi;
1082} 1078}
1083 1079
1084/* 1080/*
1085 * Check for requests in the instance's ring. In case there are, start again 1081 * Check for requests in the instance's ring. In case there are, start again
1086 * from the beginning. If not, stall. 1082 * from the beginning. If not, stall.
1087 */ 1083 */
1088static void * 1084static void *
1089xbdback_co_main_done2(struct xbdback_instance *xbdi, void *obj) 1085xbdback_co_main_done2(struct xbdback_instance *xbdi, void *obj)
1090{ 1086{
1091 int work_to_do; 1087 int work_to_do;
1092 1088
1093 KASSERT(xbdi->xbdi_io == NULL); 
1094 RING_FINAL_CHECK_FOR_REQUESTS(&xbdi->xbdi_ring.ring_n, work_to_do); 1089 RING_FINAL_CHECK_FOR_REQUESTS(&xbdi->xbdi_ring.ring_n, work_to_do);
1095 if (work_to_do) 1090 if (work_to_do)
1096 xbdi->xbdi_cont = xbdback_co_main; 1091 xbdi->xbdi_cont = xbdback_co_main;
1097 else 1092 else
1098 xbdi->xbdi_cont = NULL; 1093 xbdi->xbdi_cont = NULL;
1099 1094
1100 return xbdi; 1095 return xbdi;
1101} 1096}
1102 1097
1103/* 1098/*
1104 * Frontend requested a cache flush operation. 1099 * Frontend requested a cache flush operation.
1105 */ 1100 */
1106static void * 1101static void *
@@ -1116,32 +1111,32 @@ xbdback_co_cache_flush(struct xbdback_in @@ -1116,32 +1111,32 @@ xbdback_co_cache_flush(struct xbdback_in
1116 return NULL; 1111 return NULL;
1117 } 1112 }
1118 xbdi->xbdi_cont = xbdback_co_cache_doflush; 1113 xbdi->xbdi_cont = xbdback_co_cache_doflush;
1119 return xbdback_pool_get(&xbdback_io_pool, xbdi); 1114 return xbdback_pool_get(&xbdback_io_pool, xbdi);
1120} 1115}
1121 1116
1122/* Start the flush work */ 1117/* Start the flush work */
1123static void * 1118static void *
1124xbdback_co_cache_doflush(struct xbdback_instance *xbdi, void *obj) 1119xbdback_co_cache_doflush(struct xbdback_instance *xbdi, void *obj)
1125{ 1120{
1126 struct xbdback_io *xbd_io; 1121 struct xbdback_io *xbd_io;
1127 1122
1128 XENPRINTF(("xbdback_co_cache_doflush %p %p\n", xbdi, obj)); 1123 XENPRINTF(("xbdback_co_cache_doflush %p %p\n", xbdi, obj));
1129 xbd_io = xbdi->xbdi_io = obj; 1124 xbd_io = obj;
1130 xbd_io->xio_xbdi = xbdi; 1125 xbd_io->xio_xbdi = xbdi;
1131 xbd_io->xio_operation = xbdi->xbdi_xen_req.operation; 1126 xbd_io->xio_operation = xbdi->xbdi_xen_req.operation;
1132 xbd_io->xio_id = xbdi->xbdi_xen_req.id; 1127 xbd_io->xio_id = xbdi->xbdi_xen_req.id;
1133 xbdi->xbdi_cont = xbdback_co_do_io; 1128 xbdi->xbdi_cont = xbdback_co_do_io;
1134 return xbdi; 1129 return xbd_io;
1135} 1130}
1136 1131
1137/* 1132/*
1138 * A read or write I/O request must be processed. Do some checks first, 1133 * A read or write I/O request must be processed. Do some checks first,
1139 * then get the segment information directly from the ring request. 1134 * then get the segment information directly from the ring request.
1140 */ 1135 */
1141static void * 1136static void *
1142xbdback_co_io(struct xbdback_instance *xbdi, void *obj __unused) 1137xbdback_co_io(struct xbdback_instance *xbdi, void *obj __unused)
1143{  1138{
1144 int i, error; 1139 int i, error;
1145 blkif_request_t *req, *reqn; 1140 blkif_request_t *req, *reqn;
1146 blkif_x86_32_request_t *req32; 1141 blkif_x86_32_request_t *req32;
1147 blkif_x86_64_request_t *req64; 1142 blkif_x86_64_request_t *req64;
@@ -1225,27 +1220,26 @@ xbdback_co_io(struct xbdback_instance *x @@ -1225,27 +1220,26 @@ xbdback_co_io(struct xbdback_instance *x
1225 if (req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST) 1220 if (req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST)
1226 goto bad_nr_segments; 1221 goto bad_nr_segments;
1227 for (i = 0; i < req->nr_segments; i++) 1222 for (i = 0; i < req->nr_segments; i++)
1228 xbdi->xbdi_seg[i] = req64->seg[i]; 1223 xbdi->xbdi_seg[i] = req64->seg[i];
1229 xbdi->xbdi_in_gntref = 0; 1224 xbdi->xbdi_in_gntref = 0;
1230 } 1225 }
1231 break; 1226 break;
1232 } 1227 }
1233 1228
1234 /* Max value checked already earlier */ 1229 /* Max value checked already earlier */
1235 if (req->nr_segments < 1) 1230 if (req->nr_segments < 1)
1236 goto bad_nr_segments; 1231 goto bad_nr_segments;
1237 1232
1238 KASSERT(xbdi->xbdi_io == NULL); 
1239 xbdi->xbdi_cont = xbdback_co_io_gotio; 1233 xbdi->xbdi_cont = xbdback_co_io_gotio;
1240 return xbdback_pool_get(&xbdback_io_pool, xbdi); 1234 return xbdback_pool_get(&xbdback_io_pool, xbdi);
1241 1235
1242 bad_nr_segments: 1236 bad_nr_segments:
1243 if (ratecheck(&xbdi->xbdi_lasterr_time, &xbdback_err_intvl)) { 1237 if (ratecheck(&xbdi->xbdi_lasterr_time, &xbdback_err_intvl)) {
1244 printf("%s: invalid number of segments: %d\n", 1238 printf("%s: invalid number of segments: %d\n",
1245 xbdi->xbdi_name, req->nr_segments); 1239 xbdi->xbdi_name, req->nr_segments);
1246 } 1240 }
1247 error = EINVAL; 1241 error = EINVAL;
1248 /* FALLTHROUGH */ 1242 /* FALLTHROUGH */
1249 1243
1250 end: 1244 end:
1251 xbdback_send_reply(xbdi, xbdi->xbdi_xen_req.id, 1245 xbdback_send_reply(xbdi, xbdi->xbdi_xen_req.id,
@@ -1258,27 +1252,27 @@ xbdback_co_io(struct xbdback_instance *x @@ -1258,27 +1252,27 @@ xbdback_co_io(struct xbdback_instance *x
1258/* Prepare an I/O buffer for a xbdback instance */ 1252/* Prepare an I/O buffer for a xbdback instance */
1259static void * 1253static void *
1260xbdback_co_io_gotio(struct xbdback_instance *xbdi, void *obj) 1254xbdback_co_io_gotio(struct xbdback_instance *xbdi, void *obj)
1261{ 1255{
1262 struct xbdback_io *xbd_io; 1256 struct xbdback_io *xbd_io;
1263 int buf_flags; 1257 int buf_flags;
1264 size_t bcount; 1258 size_t bcount;
1265 blkif_request_t *req; 1259 blkif_request_t *req;
1266 1260
1267 xbdi_get(xbdi); 1261 xbdi_get(xbdi);
1268 atomic_inc_uint(&xbdi->xbdi_pendingreqs); 1262 atomic_inc_uint(&xbdi->xbdi_pendingreqs);
1269  1263
1270 req = &xbdi->xbdi_xen_req; 1264 req = &xbdi->xbdi_xen_req;
1271 xbd_io = xbdi->xbdi_io = obj; 1265 xbd_io = obj;
1272 memset(xbd_io, 0, sizeof(*xbd_io)); 1266 memset(xbd_io, 0, sizeof(*xbd_io));
1273 buf_init(&xbd_io->xio_buf); 1267 buf_init(&xbd_io->xio_buf);
1274 xbd_io->xio_xbdi = xbdi; 1268 xbd_io->xio_xbdi = xbdi;
1275 xbd_io->xio_operation = req->operation; 1269 xbd_io->xio_operation = req->operation;
1276 xbd_io->xio_id = req->id; 1270 xbd_io->xio_id = req->id;
1277 1271
1278 /* If segments are on an indirect page, copy them now */ 1272 /* If segments are on an indirect page, copy them now */
1279 if (xbdi->xbdi_in_gntref) { 1273 if (xbdi->xbdi_in_gntref) {
1280 gnttab_copy_t gop; 1274 gnttab_copy_t gop;
1281 paddr_t ma; 1275 paddr_t ma;
1282 1276
1283 gop.flags = GNTCOPY_source_gref; 1277 gop.flags = GNTCOPY_source_gref;
1284 gop.len = req->nr_segments 1278 gop.len = req->nr_segments
@@ -1329,83 +1323,81 @@ xbdback_co_io_gotio(struct xbdback_insta @@ -1329,83 +1323,81 @@ xbdback_co_io_gotio(struct xbdback_insta
1329 xbd_io->xio_buf.b_cflags = 0; 1323 xbd_io->xio_buf.b_cflags = 0;
1330 xbd_io->xio_buf.b_oflags = 0; 1324 xbd_io->xio_buf.b_oflags = 0;
1331 xbd_io->xio_buf.b_iodone = xbdback_iodone; 1325 xbd_io->xio_buf.b_iodone = xbdback_iodone;
1332 xbd_io->xio_buf.b_proc = NULL; 1326 xbd_io->xio_buf.b_proc = NULL;
1333 xbd_io->xio_buf.b_vp = xbdi->xbdi_vp; 1327 xbd_io->xio_buf.b_vp = xbdi->xbdi_vp;
1334 xbd_io->xio_buf.b_objlock = xbdi->xbdi_vp->v_interlock; 1328 xbd_io->xio_buf.b_objlock = xbdi->xbdi_vp->v_interlock;
1335 xbd_io->xio_buf.b_dev = xbdi->xbdi_dev; 1329 xbd_io->xio_buf.b_dev = xbdi->xbdi_dev;
1336 xbd_io->xio_buf.b_blkno = req->sector_number; 1330 xbd_io->xio_buf.b_blkno = req->sector_number;
1337 xbd_io->xio_buf.b_bcount = bcount; 1331 xbd_io->xio_buf.b_bcount = bcount;
1338 xbd_io->xio_buf.b_data = NULL; 1332 xbd_io->xio_buf.b_data = NULL;
1339 xbd_io->xio_buf.b_private = xbd_io; 1333 xbd_io->xio_buf.b_private = xbd_io;
1340 1334
1341 xbdi->xbdi_cont = xbdback_co_do_io; 1335 xbdi->xbdi_cont = xbdback_co_do_io;
1342 return xbdback_map_shm(xbdi->xbdi_io); 1336 return xbdback_map_shm(xbd_io);
1343} 1337}
1344 1338
1345static void 1339static void
1346xbdback_io_error(struct xbdback_io *xbd_io, int error) 1340xbdback_io_error(struct xbdback_io *xbd_io, int error)
1347{ 1341{
1348 xbd_io->xio_buf.b_error = error; 1342 xbd_io->xio_buf.b_error = error;
1349 xbdback_iodone(&xbd_io->xio_buf); 1343 xbdback_iodone(&xbd_io->xio_buf);
1350} 1344}
1351 1345
1352/* 1346/*
1353 * Main xbdback I/O routine. It can either perform a flush operation or 1347 * Main xbdback I/O routine. It can either perform a flush operation or
1354 * schedule a read/write operation. 1348 * schedule a read/write operation.
1355 */ 1349 */
1356static void * 1350static void *
1357xbdback_co_do_io(struct xbdback_instance *xbdi, void *obj) 1351xbdback_co_do_io(struct xbdback_instance *xbdi, void *obj)
1358{ 1352{
1359 struct xbdback_io *xbd_io = xbdi->xbdi_io; 1353 struct xbdback_io *xbd_io = obj;
1360 1354
1361 switch (xbd_io->xio_operation) { 1355 switch (xbd_io->xio_operation) {
1362 case BLKIF_OP_FLUSH_DISKCACHE: 1356 case BLKIF_OP_FLUSH_DISKCACHE:
1363 { 1357 {
1364 int error; 1358 int error;
1365 int force = 1; 1359 int force = 1;
1366 1360
1367 error = VOP_IOCTL(xbdi->xbdi_vp, DIOCCACHESYNC, &force, FWRITE, 1361 error = VOP_IOCTL(xbdi->xbdi_vp, DIOCCACHESYNC, &force, FWRITE,
1368 kauth_cred_get()); 1362 kauth_cred_get());
1369 if (error) { 1363 if (error) {
1370 aprint_error("xbdback %s: DIOCCACHESYNC returned %d\n", 1364 aprint_error("xbdback %s: DIOCCACHESYNC returned %d\n",
1371 xbdi->xbdi_xbusd->xbusd_path, error); 1365 xbdi->xbdi_xbusd->xbusd_path, error);
1372 if (error == EOPNOTSUPP || error == ENOTTY) 1366 if (error == EOPNOTSUPP || error == ENOTTY)
1373 error = BLKIF_RSP_EOPNOTSUPP; 1367 error = BLKIF_RSP_EOPNOTSUPP;
1374 else 1368 else
1375 error = BLKIF_RSP_ERROR; 1369 error = BLKIF_RSP_ERROR;
1376 } else 1370 } else
1377 error = BLKIF_RSP_OKAY; 1371 error = BLKIF_RSP_OKAY;
1378 xbdback_send_reply(xbdi, xbd_io->xio_id, 1372 xbdback_send_reply(xbdi, xbd_io->xio_id,
1379 xbd_io->xio_operation, error); 1373 xbd_io->xio_operation, error);
1380 xbdback_pool_put(&xbdback_io_pool, xbd_io); 1374 xbdback_pool_put(&xbdback_io_pool, xbd_io);
1381 xbdi_put(xbdi); 1375 xbdi_put(xbdi);
1382 xbdi->xbdi_io = NULL; 
1383 xbdi->xbdi_cont = xbdback_co_main_incr; 1376 xbdi->xbdi_cont = xbdback_co_main_incr;
1384 return xbdi; 1377 return xbdi;
1385 } 1378 }
1386 case BLKIF_OP_READ: 1379 case BLKIF_OP_READ:
1387 case BLKIF_OP_WRITE: 1380 case BLKIF_OP_WRITE:
1388 xbd_io->xio_buf.b_data = (void *) 1381 xbd_io->xio_buf.b_data = (void *)
1389 (xbd_io->xio_vaddr + xbd_io->xio_start_offset); 1382 (xbd_io->xio_vaddr + xbd_io->xio_start_offset);
1390 1383
1391 if ((xbd_io->xio_buf.b_flags & B_READ) == 0) { 1384 if ((xbd_io->xio_buf.b_flags & B_READ) == 0) {
1392 mutex_enter(xbd_io->xio_buf.b_vp->v_interlock); 1385 mutex_enter(xbd_io->xio_buf.b_vp->v_interlock);
1393 xbd_io->xio_buf.b_vp->v_numoutput++; 1386 xbd_io->xio_buf.b_vp->v_numoutput++;
1394 mutex_exit(xbd_io->xio_buf.b_vp->v_interlock); 1387 mutex_exit(xbd_io->xio_buf.b_vp->v_interlock);
1395 } 1388 }
1396 /* will call xbdback_iodone() asynchronously when done */ 1389 /* will call xbdback_iodone() asynchronously when done */
1397 bdev_strategy(&xbd_io->xio_buf); 1390 bdev_strategy(&xbd_io->xio_buf);
1398 xbdi->xbdi_io = NULL; 
1399 xbdi->xbdi_cont = xbdback_co_main_incr; 1391 xbdi->xbdi_cont = xbdback_co_main_incr;
1400 return xbdi; 1392 return xbdi;
1401 default: 1393 default:
1402 /* Should never happen */ 1394 /* Should never happen */
1403 panic("xbdback_co_do_io: unsupported operation %d", 1395 panic("xbdback_co_do_io: unsupported operation %d",
1404 xbd_io->xio_operation); 1396 xbd_io->xio_operation);
1405 } 1397 }
1406} 1398}
1407 1399
1408/* 1400/*
1409 * Called from softint(9) context when an I/O is done: for each request, send 1401 * Called from softint(9) context when an I/O is done: for each request, send
1410 * back the associated reply to the domain. 1402 * back the associated reply to the domain.
1411 * 1403 *
@@ -1543,36 +1535,36 @@ xbdback_map_shm(struct xbdback_io *xbd_i @@ -1543,36 +1535,36 @@ xbdback_map_shm(struct xbdback_io *xbd_i
1543 error = xen_shm_map(xbd_io->xio_nrma, xbdi->xbdi_domid, 1535 error = xen_shm_map(xbd_io->xio_nrma, xbdi->xbdi_domid,
1544 xbd_io->xio_gref, xbd_io->xio_vaddr, xbd_io->xio_gh,  1536 xbd_io->xio_gref, xbd_io->xio_vaddr, xbd_io->xio_gh,
1545 (xbd_io->xio_operation == BLKIF_OP_WRITE) ? XSHM_RO : 0); 1537 (xbd_io->xio_operation == BLKIF_OP_WRITE) ? XSHM_RO : 0);
1546 1538
1547 switch(error) { 1539 switch(error) {
1548 case 0: 1540 case 0:
1549#ifdef XENDEBUG_VBD 1541#ifdef XENDEBUG_VBD
1550 printf("handle "); 1542 printf("handle ");
1551 for (i = 0; i < xbd_io->xio_nrma; i++) { 1543 for (i = 0; i < xbd_io->xio_nrma; i++) {
1552 printf("%u ", (u_int)xbd_io->xio_gh[i]); 1544 printf("%u ", (u_int)xbd_io->xio_gh[i]);
1553 } 1545 }
1554 printf("\n"); 1546 printf("\n");
1555#endif 1547#endif
1556 return xbdi; 1548 return xbd_io;
1557 default: 1549 default:
1558 if (ratecheck(&xbdi->xbdi_lasterr_time, &xbdback_err_intvl)) { 1550 if (ratecheck(&xbdi->xbdi_lasterr_time, &xbdback_err_intvl)) {
1559 printf("xbdback_map_shm: xen_shm error %d ", error); 1551 printf("xbdback_map_shm: xen_shm error %d ", error);
1560 } 1552 }
1561 xbdback_io_error(xbdi->xbdi_io, error); 1553 /* this will also free xbd_io via xbdback_iodone() */
 1554 xbdback_io_error(xbd_io, error);
1562 SLIST_INSERT_HEAD(&xbdi->xbdi_va_free, xbd_io->xio_xv, xv_next); 1555 SLIST_INSERT_HEAD(&xbdi->xbdi_va_free, xbd_io->xio_xv, xv_next);
1563 xbd_io->xio_xv = NULL; 1556 xbd_io->xio_xv = NULL;
1564 xbdi->xbdi_io = NULL; 1557 /* do not retry */
1565 // do not retry 
1566 xbdi->xbdi_cont = xbdback_co_main_incr; 1558 xbdi->xbdi_cont = xbdback_co_main_incr;
1567 return xbdi; 1559 return xbdi;
1568 } 1560 }
1569} 1561}
1570 1562
1571/* unmap a request from our virtual address space (request is done) */ 1563/* unmap a request from our virtual address space (request is done) */
1572static void 1564static void
1573xbdback_unmap_shm(struct xbdback_io *xbd_io) 1565xbdback_unmap_shm(struct xbdback_io *xbd_io)
1574{ 1566{
1575 struct xbdback_instance *xbdi = xbd_io->xio_xbdi; 1567 struct xbdback_instance *xbdi = xbd_io->xio_xbdi;
1576 1568
1577#ifdef XENDEBUG_VBD 1569#ifdef XENDEBUG_VBD
1578 int i; 1570 int i;