Fri May 25 15:03:38 2012 UTC ()
Update xdb_xenbus.c to new usage of routines in dksubr.c.


(elric)
diff -r1.56 -r1.57 src/sys/arch/xen/xen/xbd_xenbus.c

cvs diff -r1.56 -r1.57 src/sys/arch/xen/xen/xbd_xenbus.c (expand / switch to unified diff)

--- src/sys/arch/xen/xen/xbd_xenbus.c 2012/02/22 16:53:46 1.56
+++ src/sys/arch/xen/xen/xbd_xenbus.c 2012/05/25 15:03:38 1.57
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: xbd_xenbus.c,v 1.56 2012/02/22 16:53:46 jakllsch Exp $ */ 1/* $NetBSD: xbd_xenbus.c,v 1.57 2012/05/25 15:03:38 elric Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 Manuel Bouyer. 4 * Copyright (c) 2006 Manuel Bouyer.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 14 *
@@ -40,27 +40,27 @@ @@ -40,27 +40,27 @@
40 * - initiate request: xbdread/write/open/ioctl/.. 40 * - initiate request: xbdread/write/open/ioctl/..
41 * - depending on operation, it is handled directly by disk(9) subsystem or 41 * - depending on operation, it is handled directly by disk(9) subsystem or
42 * goes through physio(9) first. 42 * goes through physio(9) first.
43 * - the request is ultimately processed by xbdstart() that prepares the 43 * - the request is ultimately processed by xbdstart() that prepares the
44 * xbd requests, post them in the ring I/O queue, then signal the backend. 44 * xbd requests, post them in the ring I/O queue, then signal the backend.
45 * 45 *
46 * When a response is available in the queue, the backend signals the frontend 46 * When a response is available in the queue, the backend signals the frontend
47 * via its event channel. This triggers xbd_handler(), which will link back 47 * via its event channel. This triggers xbd_handler(), which will link back
48 * the response to its request through the request ID, and mark the I/O as 48 * the response to its request through the request ID, and mark the I/O as
49 * completed. 49 * completed.
50 */ 50 */
51 51
52#include <sys/cdefs.h> 52#include <sys/cdefs.h>
53__KERNEL_RCSID(0, "$NetBSD: xbd_xenbus.c,v 1.56 2012/02/22 16:53:46 jakllsch Exp $"); 53__KERNEL_RCSID(0, "$NetBSD: xbd_xenbus.c,v 1.57 2012/05/25 15:03:38 elric Exp $");
54 54
55#include "opt_xen.h" 55#include "opt_xen.h"
56 56
57 57
58#include <sys/param.h> 58#include <sys/param.h>
59#include <sys/buf.h> 59#include <sys/buf.h>
60#include <sys/bufq.h> 60#include <sys/bufq.h>
61#include <sys/device.h> 61#include <sys/device.h>
62#include <sys/disk.h> 62#include <sys/disk.h>
63#include <sys/disklabel.h> 63#include <sys/disklabel.h>
64#include <sys/conf.h> 64#include <sys/conf.h>
65#include <sys/fcntl.h> 65#include <sys/fcntl.h>
66#include <sys/kernel.h> 66#include <sys/kernel.h>
@@ -112,29 +112,27 @@ struct xbd_req { @@ -112,29 +112,27 @@ struct xbd_req {
112 struct { 112 struct {
113 int s_error; 113 int s_error;
114 volatile int s_done; 114 volatile int s_done;
115 } req_sync; 115 } req_sync;
116 } u; 116 } u;
117}; 117};
118#define req_gntref u.req_rw.req_gntref 118#define req_gntref u.req_rw.req_gntref
119#define req_nr_segments u.req_rw.req_nr_segments 119#define req_nr_segments u.req_rw.req_nr_segments
120#define req_bp u.req_rw.req_bp 120#define req_bp u.req_rw.req_bp
121#define req_data u.req_rw.req_data 121#define req_data u.req_rw.req_data
122#define req_sync u.req_sync 122#define req_sync u.req_sync
123 123
124struct xbd_xenbus_softc { 124struct xbd_xenbus_softc {
125 device_t sc_dev; 125 struct dk_softc sc_dksc; /* Must be first in this struct */
126 struct dk_softc sc_dksc; 
127 struct dk_intf *sc_di; 
128 struct xenbus_device *sc_xbusd; 126 struct xenbus_device *sc_xbusd;
129 127
130 blkif_front_ring_t sc_ring; 128 blkif_front_ring_t sc_ring;
131 129
132 unsigned int sc_evtchn; 130 unsigned int sc_evtchn;
133 131
134 grant_ref_t sc_ring_gntref; 132 grant_ref_t sc_ring_gntref;
135 133
136 struct xbd_req sc_reqs[XBD_RING_SIZE]; 134 struct xbd_req sc_reqs[XBD_RING_SIZE];
137 SLIST_HEAD(,xbd_req) sc_xbdreq_head; /* list of free requests */ 135 SLIST_HEAD(,xbd_req) sc_xbdreq_head; /* list of free requests */
138 bool sc_xbdreq_wait; /* special waiting on xbd_req */ 136 bool sc_xbdreq_wait; /* special waiting on xbd_req */
139 137
140 int sc_backend_status; /* our status with backend */ 138 int sc_backend_status; /* our status with backend */
@@ -202,27 +200,27 @@ const struct cdevsw xbd_cdevsw = { @@ -202,27 +200,27 @@ const struct cdevsw xbd_cdevsw = {
202 nostop, notty, nopoll, nommap, nokqfilter, D_DISK 200 nostop, notty, nopoll, nommap, nokqfilter, D_DISK
203}; 201};
204 202
205extern struct cfdriver xbd_cd; 203extern struct cfdriver xbd_cd;
206 204
207/* Pseudo-disk Interface */ 205/* Pseudo-disk Interface */
208static struct dk_intf dkintf_esdi = { 206static struct dk_intf dkintf_esdi = {
209 DTYPE_ESDI, 207 DTYPE_ESDI,
210 "Xen Virtual ESDI", 208 "Xen Virtual ESDI",
211 xbdopen, 209 xbdopen,
212 xbdclose, 210 xbdclose,
213 xbdstrategy, 211 xbdstrategy,
214 xbdstart, 212 xbdstart,
215}; 213}, *di = &dkintf_esdi;
216 214
217static struct dkdriver xbddkdriver = { 215static struct dkdriver xbddkdriver = {
218 .d_strategy = xbdstrategy, 216 .d_strategy = xbdstrategy,
219 .d_minphys = xbdminphys, 217 .d_minphys = xbdminphys,
220}; 218};
221 219
222static int 220static int
223xbd_xenbus_match(device_t parent, cfdata_t match, void *aux) 221xbd_xenbus_match(device_t parent, cfdata_t match, void *aux)
224{ 222{
225 struct xenbusdev_attach_args *xa = aux; 223 struct xenbusdev_attach_args *xa = aux;
226 224
227 if (strcmp(xa->xa_type, "vbd") != 0) 225 if (strcmp(xa->xa_type, "vbd") != 0)
228 return 0; 226 return 0;
@@ -241,56 +239,55 @@ xbd_xenbus_attach(device_t parent, devic @@ -241,56 +239,55 @@ xbd_xenbus_attach(device_t parent, devic
241 struct xenbusdev_attach_args *xa = aux; 239 struct xenbusdev_attach_args *xa = aux;
242 blkif_sring_t *ring; 240 blkif_sring_t *ring;
243 RING_IDX i; 241 RING_IDX i;
244#ifdef XBD_DEBUG 242#ifdef XBD_DEBUG
245 char **dir, *val; 243 char **dir, *val;
246 int dir_n = 0; 244 int dir_n = 0;
247 char id_str[20]; 245 char id_str[20];
248 int err; 246 int err;
249#endif 247#endif
250 248
251 config_pending_incr(); 249 config_pending_incr();
252 aprint_normal(": Xen Virtual Block Device Interface\n"); 250 aprint_normal(": Xen Virtual Block Device Interface\n");
253 251
254 sc->sc_dev = self; 252 dk_sc_init(&sc->sc_dksc, device_xname(self));
 253 sc->sc_dksc.sc_dev = self;
255 254
256#ifdef XBD_DEBUG 255#ifdef XBD_DEBUG
257 printf("path: %s\n", xa->xa_xbusd->xbusd_path); 256 printf("path: %s\n", xa->xa_xbusd->xbusd_path);
258 snprintf(id_str, sizeof(id_str), "%d", xa->xa_id); 257 snprintf(id_str, sizeof(id_str), "%d", xa->xa_id);
259 err = xenbus_directory(NULL, "device/vbd", id_str, &dir_n, &dir); 258 err = xenbus_directory(NULL, "device/vbd", id_str, &dir_n, &dir);
260 if (err) { 259 if (err) {
261 aprint_error_dev(self, "xenbus_directory err %d\n", err); 260 aprint_error_dev(self, "xenbus_directory err %d\n", err);
262 } else { 261 } else {
263 printf("%s/\n", xa->xa_xbusd->xbusd_path); 262 printf("%s/\n", xa->xa_xbusd->xbusd_path);
264 for (i = 0; i < dir_n; i++) { 263 for (i = 0; i < dir_n; i++) {
265 printf("\t/%s", dir[i]); 264 printf("\t/%s", dir[i]);
266 err = xenbus_read(NULL, xa->xa_xbusd->xbusd_path, 265 err = xenbus_read(NULL, xa->xa_xbusd->xbusd_path,
267 dir[i], NULL, &val); 266 dir[i], NULL, &val);
268 if (err) { 267 if (err) {
269 aprint_error_dev(self, "xenbus_read err %d\n", 268 aprint_error_dev(self, "xenbus_read err %d\n",
270 err); 269 err);
271 } else { 270 } else {
272 printf(" = %s\n", val); 271 printf(" = %s\n", val);
273 free(val, M_DEVBUF); 272 free(val, M_DEVBUF);
274 } 273 }
275 } 274 }
276 } 275 }
277#endif /* XBD_DEBUG */ 276#endif /* XBD_DEBUG */
278 sc->sc_xbusd = xa->xa_xbusd; 277 sc->sc_xbusd = xa->xa_xbusd;
279 sc->sc_xbusd->xbusd_otherend_changed = xbd_backend_changed; 278 sc->sc_xbusd->xbusd_otherend_changed = xbd_backend_changed;
280 279
281 dk_sc_init(&sc->sc_dksc, sc, device_xname(self)); 
282 disk_init(&sc->sc_dksc.sc_dkdev, device_xname(self), &xbddkdriver); 280 disk_init(&sc->sc_dksc.sc_dkdev, device_xname(self), &xbddkdriver);
283 sc->sc_di = &dkintf_esdi; 
284 /* initialize free requests list */ 281 /* initialize free requests list */
285 SLIST_INIT(&sc->sc_xbdreq_head); 282 SLIST_INIT(&sc->sc_xbdreq_head);
286 for (i = 0; i < XBD_RING_SIZE; i++) { 283 for (i = 0; i < XBD_RING_SIZE; i++) {
287 sc->sc_reqs[i].req_id = i; 284 sc->sc_reqs[i].req_id = i;
288 SLIST_INSERT_HEAD(&sc->sc_xbdreq_head, &sc->sc_reqs[i], 285 SLIST_INSERT_HEAD(&sc->sc_xbdreq_head, &sc->sc_reqs[i],
289 req_next); 286 req_next);
290 } 287 }
291 288
292 sc->sc_backend_status = BLKIF_STATE_DISCONNECTED; 289 sc->sc_backend_status = BLKIF_STATE_DISCONNECTED;
293 sc->sc_shutdown = BLKIF_SHUTDOWN_REMOTE; 290 sc->sc_shutdown = BLKIF_SHUTDOWN_REMOTE;
294 291
295 ring = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED); 292 ring = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED);
296 if (ring == NULL) 293 if (ring == NULL)
@@ -511,27 +508,27 @@ abort_transaction: @@ -511,27 +508,27 @@ abort_transaction:
511 xenbus_dev_fatal(sc->sc_xbusd, error, "%s", errmsg); 508 xenbus_dev_fatal(sc->sc_xbusd, error, "%s", errmsg);
512 return false; 509 return false;
513} 510}
514 511
515static void xbd_backend_changed(void *arg, XenbusState new_state) 512static void xbd_backend_changed(void *arg, XenbusState new_state)
516{ 513{
517 struct xbd_xenbus_softc *sc = device_private((device_t)arg); 514 struct xbd_xenbus_softc *sc = device_private((device_t)arg);
518 struct dk_geom *pdg; 515 struct dk_geom *pdg;
519 prop_dictionary_t disk_info, odisk_info, geom; 516 prop_dictionary_t disk_info, odisk_info, geom;
520 517
521 char buf[9]; 518 char buf[9];
522 int s; 519 int s;
523 DPRINTF(("%s: new backend state %d\n", 520 DPRINTF(("%s: new backend state %d\n",
524 device_xname(sc->sc_dev), new_state)); 521 device_xname(sc->sc_dksc.sc_dev), new_state));
525 522
526 switch (new_state) { 523 switch (new_state) {
527 case XenbusStateUnknown: 524 case XenbusStateUnknown:
528 case XenbusStateInitialising: 525 case XenbusStateInitialising:
529 case XenbusStateInitWait: 526 case XenbusStateInitWait:
530 case XenbusStateInitialised: 527 case XenbusStateInitialised:
531 break; 528 break;
532 case XenbusStateClosing: 529 case XenbusStateClosing:
533 s = splbio(); 530 s = splbio();
534 if (sc->sc_shutdown == BLKIF_SHUTDOWN_RUN) 531 if (sc->sc_shutdown == BLKIF_SHUTDOWN_RUN)
535 sc->sc_shutdown = BLKIF_SHUTDOWN_REMOTE; 532 sc->sc_shutdown = BLKIF_SHUTDOWN_REMOTE;
536 /* wait for requests to complete */ 533 /* wait for requests to complete */
537 while (sc->sc_backend_status == BLKIF_STATE_CONNECTED && 534 while (sc->sc_backend_status == BLKIF_STATE_CONNECTED &&
@@ -561,49 +558,49 @@ static void xbd_backend_changed(void *ar @@ -561,49 +558,49 @@ static void xbd_backend_changed(void *ar
561 pdg = &sc->sc_dksc.sc_geom; 558 pdg = &sc->sc_dksc.sc_geom;
562 pdg->pdg_secsize = DEV_BSIZE; 559 pdg->pdg_secsize = DEV_BSIZE;
563 pdg->pdg_ntracks = 1; 560 pdg->pdg_ntracks = 1;
564 pdg->pdg_nsectors = 1024 * (1024 / pdg->pdg_secsize); 561 pdg->pdg_nsectors = 1024 * (1024 / pdg->pdg_secsize);
565 pdg->pdg_ncylinders = sc->sc_dksc.sc_size / pdg->pdg_nsectors; 562 pdg->pdg_ncylinders = sc->sc_dksc.sc_size / pdg->pdg_nsectors;
566 563
567 bufq_alloc(&sc->sc_dksc.sc_bufq, "fcfs", 0); 564 bufq_alloc(&sc->sc_dksc.sc_bufq, "fcfs", 0);
568 sc->sc_dksc.sc_flags |= DKF_INITED; 565 sc->sc_dksc.sc_flags |= DKF_INITED;
569 disk_attach(&sc->sc_dksc.sc_dkdev); 566 disk_attach(&sc->sc_dksc.sc_dkdev);
570 567
571 sc->sc_backend_status = BLKIF_STATE_CONNECTED; 568 sc->sc_backend_status = BLKIF_STATE_CONNECTED;
572 569
573 /* try to read the disklabel */ 570 /* try to read the disklabel */
574 dk_getdisklabel(sc->sc_di, &sc->sc_dksc, 0 /* XXX ? */); 571 dk_getdisklabel(di, &sc->sc_dksc, 0 /* XXX ? */);
575 format_bytes(buf, sizeof(buf), sc->sc_sectors * sc->sc_secsize); 572 format_bytes(buf, sizeof(buf), sc->sc_sectors * sc->sc_secsize);
576 aprint_verbose_dev(sc->sc_dev, 573 aprint_verbose_dev(sc->sc_dksc.sc_dev,
577 "%s, %d bytes/sect x %" PRIu64 " sectors\n", 574 "%s, %d bytes/sect x %" PRIu64 " sectors\n",
578 buf, (int)pdg->pdg_secsize, sc->sc_xbdsize); 575 buf, (int)pdg->pdg_secsize, sc->sc_xbdsize);
579 /* Discover wedges on this disk. */ 576 /* Discover wedges on this disk. */
580 dkwedge_discover(&sc->sc_dksc.sc_dkdev); 577 dkwedge_discover(&sc->sc_dksc.sc_dkdev);
581 578
582 disk_info = prop_dictionary_create(); 579 disk_info = prop_dictionary_create();
583 geom = prop_dictionary_create(); 580 geom = prop_dictionary_create();
584 prop_dictionary_set_uint64(geom, "sectors-per-unit", 581 prop_dictionary_set_uint64(geom, "sectors-per-unit",
585 sc->sc_dksc.sc_size); 582 sc->sc_dksc.sc_size);
586 prop_dictionary_set_uint32(geom, "sector-size", 583 prop_dictionary_set_uint32(geom, "sector-size",
587 pdg->pdg_secsize); 584 pdg->pdg_secsize);
588 prop_dictionary_set_uint16(geom, "sectors-per-track", 585 prop_dictionary_set_uint16(geom, "sectors-per-track",
589 pdg->pdg_nsectors); 586 pdg->pdg_nsectors);
590 prop_dictionary_set_uint16(geom, "tracks-per-cylinder", 587 prop_dictionary_set_uint16(geom, "tracks-per-cylinder",
591 pdg->pdg_ntracks); 588 pdg->pdg_ntracks);
592 prop_dictionary_set_uint64(geom, "cylinders-per-unit", 589 prop_dictionary_set_uint64(geom, "cylinders-per-unit",
593 pdg->pdg_ncylinders); 590 pdg->pdg_ncylinders);
594 prop_dictionary_set(disk_info, "geometry", geom); 591 prop_dictionary_set(disk_info, "geometry", geom);
595 prop_object_release(geom); 592 prop_object_release(geom);
596 prop_dictionary_set(device_properties(sc->sc_dev), 593 prop_dictionary_set(device_properties(sc->sc_dksc.sc_dev),
597 "disk-info", disk_info); 594 "disk-info", disk_info);
598 /* 595 /*
599 * Don't release disk_info here; we keep a reference to it. 596 * Don't release disk_info here; we keep a reference to it.
600 * disk_detach() will release it when we go away. 597 * disk_detach() will release it when we go away.
601 */ 598 */
602 odisk_info = sc->sc_dksc.sc_dkdev.dk_info; 599 odisk_info = sc->sc_dksc.sc_dkdev.dk_info;
603 sc->sc_dksc.sc_dkdev.dk_info = disk_info; 600 sc->sc_dksc.sc_dkdev.dk_info = disk_info;
604 if (odisk_info) 601 if (odisk_info)
605 prop_object_release(odisk_info); 602 prop_object_release(odisk_info);
606 603
607 /* the disk should be working now */ 604 /* the disk should be working now */
608 config_pending_decr(); 605 config_pending_decr();
609 break; 606 break;
@@ -613,101 +610,105 @@ static void xbd_backend_changed(void *ar @@ -613,101 +610,105 @@ static void xbd_backend_changed(void *ar
613} 610}
614 611
615static void 612static void
616xbd_connect(struct xbd_xenbus_softc *sc) 613xbd_connect(struct xbd_xenbus_softc *sc)
617{ 614{
618 int err; 615 int err;
619 unsigned long long sectors; 616 unsigned long long sectors;
620 u_long cache_flush; 617 u_long cache_flush;
621 618
622 err = xenbus_read_ul(NULL, 619 err = xenbus_read_ul(NULL,
623 sc->sc_xbusd->xbusd_path, "virtual-device", &sc->sc_handle, 10); 620 sc->sc_xbusd->xbusd_path, "virtual-device", &sc->sc_handle, 10);
624 if (err) 621 if (err)
625 panic("%s: can't read number from %s/virtual-device\n",  622 panic("%s: can't read number from %s/virtual-device\n",
626 device_xname(sc->sc_dev), sc->sc_xbusd->xbusd_otherend); 623 device_xname(sc->sc_dksc.sc_dev),
 624 sc->sc_xbusd->xbusd_otherend);
627 err = xenbus_read_ull(NULL, 625 err = xenbus_read_ull(NULL,
628 sc->sc_xbusd->xbusd_otherend, "sectors", &sectors, 10); 626 sc->sc_xbusd->xbusd_otherend, "sectors", &sectors, 10);
629 if (err) 627 if (err)
630 panic("%s: can't read number from %s/sectors\n",  628 panic("%s: can't read number from %s/sectors\n",
631 device_xname(sc->sc_dev), sc->sc_xbusd->xbusd_otherend); 629 device_xname(sc->sc_dksc.sc_dev),
 630 sc->sc_xbusd->xbusd_otherend);
632 sc->sc_sectors = sectors; 631 sc->sc_sectors = sectors;
633 632
634 err = xenbus_read_ul(NULL, 633 err = xenbus_read_ul(NULL,
635 sc->sc_xbusd->xbusd_otherend, "info", &sc->sc_info, 10); 634 sc->sc_xbusd->xbusd_otherend, "info", &sc->sc_info, 10);
636 if (err) 635 if (err)
637 panic("%s: can't read number from %s/info\n",  636 panic("%s: can't read number from %s/info\n",
638 device_xname(sc->sc_dev), sc->sc_xbusd->xbusd_otherend); 637 device_xname(sc->sc_dksc.sc_dev),
 638 sc->sc_xbusd->xbusd_otherend);
639 err = xenbus_read_ul(NULL, 639 err = xenbus_read_ul(NULL,
640 sc->sc_xbusd->xbusd_otherend, "sector-size", &sc->sc_secsize, 10); 640 sc->sc_xbusd->xbusd_otherend, "sector-size", &sc->sc_secsize, 10);
641 if (err) 641 if (err)
642 panic("%s: can't read number from %s/sector-size\n",  642 panic("%s: can't read number from %s/sector-size\n",
643 device_xname(sc->sc_dev), sc->sc_xbusd->xbusd_otherend); 643 device_xname(sc->sc_dksc.sc_dev),
 644 sc->sc_xbusd->xbusd_otherend);
644 645
645 err = xenbus_read_ul(NULL, sc->sc_xbusd->xbusd_otherend, 646 err = xenbus_read_ul(NULL, sc->sc_xbusd->xbusd_otherend,
646 "feature-flush-cache", &cache_flush, 10); 647 "feature-flush-cache", &cache_flush, 10);
647 if (err) 648 if (err)
648 cache_flush = 0; 649 cache_flush = 0;
649 if (cache_flush > 0) 650 if (cache_flush > 0)
650 sc->sc_cache_flush = 1; 651 sc->sc_cache_flush = 1;
651 else 652 else
652 sc->sc_cache_flush = 0; 653 sc->sc_cache_flush = 0;
653 654
654 xenbus_switch_state(sc->sc_xbusd, NULL, XenbusStateConnected); 655 xenbus_switch_state(sc->sc_xbusd, NULL, XenbusStateConnected);
655} 656}
656 657
657static int 658static int
658xbd_handler(void *arg) 659xbd_handler(void *arg)
659{ 660{
660 struct xbd_xenbus_softc *sc = arg; 661 struct xbd_xenbus_softc *sc = arg;
661 struct buf *bp; 662 struct buf *bp;
662 RING_IDX resp_prod, i; 663 RING_IDX resp_prod, i;
663 int more_to_do; 664 int more_to_do;
664 int seg; 665 int seg;
665 666
666 DPRINTF(("xbd_handler(%s)\n", device_xname(sc->sc_dev))); 667 DPRINTF(("xbd_handler(%s)\n", device_xname(sc->sc_dksc.sc_dev)));
667 668
668 if (__predict_false(sc->sc_backend_status != BLKIF_STATE_CONNECTED)) 669 if (__predict_false(sc->sc_backend_status != BLKIF_STATE_CONNECTED))
669 return 0; 670 return 0;
670again: 671again:
671 resp_prod = sc->sc_ring.sring->rsp_prod; 672 resp_prod = sc->sc_ring.sring->rsp_prod;
672 xen_rmb(); /* ensure we see replies up to resp_prod */ 673 xen_rmb(); /* ensure we see replies up to resp_prod */
673 for (i = sc->sc_ring.rsp_cons; i != resp_prod; i++) { 674 for (i = sc->sc_ring.rsp_cons; i != resp_prod; i++) {
674 blkif_response_t *rep = RING_GET_RESPONSE(&sc->sc_ring, i); 675 blkif_response_t *rep = RING_GET_RESPONSE(&sc->sc_ring, i);
675 struct xbd_req *xbdreq = &sc->sc_reqs[rep->id]; 676 struct xbd_req *xbdreq = &sc->sc_reqs[rep->id];
676 DPRINTF(("xbd_handler(%p): b_bcount = %ld\n", 677 DPRINTF(("xbd_handler(%p): b_bcount = %ld\n",
677 xbdreq->req_bp, (long)bp->b_bcount)); 678 xbdreq->req_bp, (long)bp->b_bcount));
678 bp = xbdreq->req_bp; 679 bp = xbdreq->req_bp;
679 if (rep->operation == BLKIF_OP_FLUSH_DISKCACHE) { 680 if (rep->operation == BLKIF_OP_FLUSH_DISKCACHE) {
680 xbdreq->req_sync.s_error = rep->status; 681 xbdreq->req_sync.s_error = rep->status;
681 xbdreq->req_sync.s_done = 1; 682 xbdreq->req_sync.s_done = 1;
682 wakeup(xbdreq); 683 wakeup(xbdreq);
683 /* caller will free the req */ 684 /* caller will free the req */
684 continue; 685 continue;
685 } 686 }
686 for (seg = xbdreq->req_nr_segments - 1; seg >= 0; seg--) { 687 for (seg = xbdreq->req_nr_segments - 1; seg >= 0; seg--) {
687 if (__predict_false( 688 if (__predict_false(
688 xengnt_status(xbdreq->req_gntref[seg]))) { 689 xengnt_status(xbdreq->req_gntref[seg]))) {
689 aprint_verbose_dev(sc->sc_dev, 690 aprint_verbose_dev(sc->sc_dksc.sc_dev,
690 "grant still used by backend\n"); 691 "grant still used by backend\n");
691 sc->sc_ring.rsp_cons = i; 692 sc->sc_ring.rsp_cons = i;
692 xbdreq->req_nr_segments = seg + 1; 693 xbdreq->req_nr_segments = seg + 1;
693 goto done; 694 goto done;
694 } 695 }
695 xengnt_revoke_access(xbdreq->req_gntref[seg]); 696 xengnt_revoke_access(xbdreq->req_gntref[seg]);
696 xbdreq->req_nr_segments--; 697 xbdreq->req_nr_segments--;
697 } 698 }
698 if (rep->operation != BLKIF_OP_READ && 699 if (rep->operation != BLKIF_OP_READ &&
699 rep->operation != BLKIF_OP_WRITE) { 700 rep->operation != BLKIF_OP_WRITE) {
700 aprint_error_dev(sc->sc_dev, 701 aprint_error_dev(sc->sc_dksc.sc_dev,
701 "bad operation %d from backend\n", rep->operation); 702 "bad operation %d from backend\n", rep->operation);
702 bp->b_error = EIO; 703 bp->b_error = EIO;
703 bp->b_resid = bp->b_bcount; 704 bp->b_resid = bp->b_bcount;
704 goto next; 705 goto next;
705 } 706 }
706 if (rep->status != BLKIF_RSP_OKAY) { 707 if (rep->status != BLKIF_RSP_OKAY) {
707 bp->b_error = EIO; 708 bp->b_error = EIO;
708 bp->b_resid = bp->b_bcount; 709 bp->b_resid = bp->b_bcount;
709 goto next; 710 goto next;
710 } 711 }
711 /* b_resid was set in xbdstart */ 712 /* b_resid was set in xbdstart */
712next: 713next:
713 if (bp->b_data != xbdreq->req_data) 714 if (bp->b_data != xbdreq->req_data)
@@ -718,104 +719,104 @@ next: @@ -718,104 +719,104 @@ next:
718 rnd_add_uint32(&sc->sc_rnd_source, 719 rnd_add_uint32(&sc->sc_rnd_source,
719 bp->b_blkno); 720 bp->b_blkno);
720 biodone(bp); 721 biodone(bp);
721 SLIST_INSERT_HEAD(&sc->sc_xbdreq_head, xbdreq, req_next); 722 SLIST_INSERT_HEAD(&sc->sc_xbdreq_head, xbdreq, req_next);
722 } 723 }
723done: 724done:
724 xen_rmb(); 725 xen_rmb();
725 sc->sc_ring.rsp_cons = i; 726 sc->sc_ring.rsp_cons = i;
726 727
727 RING_FINAL_CHECK_FOR_RESPONSES(&sc->sc_ring, more_to_do); 728 RING_FINAL_CHECK_FOR_RESPONSES(&sc->sc_ring, more_to_do);
728 if (more_to_do) 729 if (more_to_do)
729 goto again; 730 goto again;
730 731
731 dk_iodone(sc->sc_di, &sc->sc_dksc); 732 dk_iodone(di, &sc->sc_dksc);
732 if (sc->sc_xbdreq_wait) 733 if (sc->sc_xbdreq_wait)
733 wakeup(&sc->sc_xbdreq_wait); 734 wakeup(&sc->sc_xbdreq_wait);
734 return 1; 735 return 1;
735} 736}
736 737
737static void 738static void
738xbdminphys(struct buf *bp) 739xbdminphys(struct buf *bp)
739{ 740{
740 if (bp->b_bcount > XBD_MAX_XFER) { 741 if (bp->b_bcount > XBD_MAX_XFER) {
741 bp->b_bcount = XBD_MAX_XFER; 742 bp->b_bcount = XBD_MAX_XFER;
742 } 743 }
743 minphys(bp); 744 minphys(bp);
744} 745}
745 746
746int 747int
747xbdopen(dev_t dev, int flags, int fmt, struct lwp *l) 748xbdopen(dev_t dev, int flags, int fmt, struct lwp *l)
748{ 749{
749 struct xbd_xenbus_softc *sc; 750 struct xbd_xenbus_softc *sc;
750 751
751 sc = device_lookup_private(&xbd_cd, DISKUNIT(dev)); 752 sc = device_lookup_private(&xbd_cd, DISKUNIT(dev));
752 if (sc == NULL) 753 if (sc == NULL)
753 return (ENXIO); 754 return (ENXIO);
754 if ((flags & FWRITE) && (sc->sc_info & VDISK_READONLY)) 755 if ((flags & FWRITE) && (sc->sc_info & VDISK_READONLY))
755 return EROFS; 756 return EROFS;
756 757
757 DPRINTF(("xbdopen(0x%04x, %d)\n", dev, flags)); 758 DPRINTF(("xbdopen(0x%04x, %d)\n", dev, flags));
758 return dk_open(sc->sc_di, &sc->sc_dksc, dev, flags, fmt, l); 759 return dk_open(di, &sc->sc_dksc, dev, flags, fmt, l);
759} 760}
760 761
761int 762int
762xbdclose(dev_t dev, int flags, int fmt, struct lwp *l) 763xbdclose(dev_t dev, int flags, int fmt, struct lwp *l)
763{ 764{
764 struct xbd_xenbus_softc *sc; 765 struct xbd_xenbus_softc *sc;
765 766
766 sc = device_lookup_private(&xbd_cd, DISKUNIT(dev)); 767 sc = device_lookup_private(&xbd_cd, DISKUNIT(dev));
767 768
768 DPRINTF(("xbdclose(%d, %d)\n", dev, flags)); 769 DPRINTF(("xbdclose(%d, %d)\n", dev, flags));
769 return dk_close(sc->sc_di, &sc->sc_dksc, dev, flags, fmt, l); 770 return dk_close(di, &sc->sc_dksc, dev, flags, fmt, l);
770} 771}
771 772
772void 773void
773xbdstrategy(struct buf *bp) 774xbdstrategy(struct buf *bp)
774{ 775{
775 struct xbd_xenbus_softc *sc; 776 struct xbd_xenbus_softc *sc;
776 777
777 sc = device_lookup_private(&xbd_cd, DISKUNIT(bp->b_dev)); 778 sc = device_lookup_private(&xbd_cd, DISKUNIT(bp->b_dev));
778 779
779 DPRINTF(("xbdstrategy(%p): b_bcount = %ld\n", bp, 780 DPRINTF(("xbdstrategy(%p): b_bcount = %ld\n", bp,
780 (long)bp->b_bcount)); 781 (long)bp->b_bcount));
781 782
782 if (sc == NULL || sc->sc_shutdown != BLKIF_SHUTDOWN_RUN) { 783 if (sc == NULL || sc->sc_shutdown != BLKIF_SHUTDOWN_RUN) {
783 bp->b_error = EIO; 784 bp->b_error = EIO;
784 biodone(bp); 785 biodone(bp);
785 return; 786 return;
786 } 787 }
787 if (__predict_false((sc->sc_info & VDISK_READONLY) && 788 if (__predict_false((sc->sc_info & VDISK_READONLY) &&
788 (bp->b_flags & B_READ) == 0)) { 789 (bp->b_flags & B_READ) == 0)) {
789 bp->b_error = EROFS; 790 bp->b_error = EROFS;
790 biodone(bp); 791 biodone(bp);
791 return; 792 return;
792 } 793 }
793 794
794 dk_strategy(sc->sc_di, &sc->sc_dksc, bp); 795 dk_strategy(di, &sc->sc_dksc, bp);
795 return; 796 return;
796} 797}
797 798
798int 799int
799xbdsize(dev_t dev) 800xbdsize(dev_t dev)
800{ 801{
801 struct xbd_xenbus_softc *sc; 802 struct xbd_xenbus_softc *sc;
802 803
803 DPRINTF(("xbdsize(%d)\n", dev)); 804 DPRINTF(("xbdsize(%d)\n", dev));
804 805
805 sc = device_lookup_private(&xbd_cd, DISKUNIT(dev)); 806 sc = device_lookup_private(&xbd_cd, DISKUNIT(dev));
806 if (sc == NULL || sc->sc_shutdown != BLKIF_SHUTDOWN_RUN) 807 if (sc == NULL || sc->sc_shutdown != BLKIF_SHUTDOWN_RUN)
807 return -1; 808 return -1;
808 return dk_size(sc->sc_di, &sc->sc_dksc, dev); 809 return dk_size(di, &sc->sc_dksc, dev);
809} 810}
810 811
811int 812int
812xbdread(dev_t dev, struct uio *uio, int flags) 813xbdread(dev_t dev, struct uio *uio, int flags)
813{ 814{
814 struct xbd_xenbus_softc *sc =  815 struct xbd_xenbus_softc *sc =
815 device_lookup_private(&xbd_cd, DISKUNIT(dev)); 816 device_lookup_private(&xbd_cd, DISKUNIT(dev));
816 struct dk_softc *dksc = &sc->sc_dksc; 817 struct dk_softc *dksc = &sc->sc_dksc;
817 818
818 if ((dksc->sc_flags & DKF_INITED) == 0) 819 if ((dksc->sc_flags & DKF_INITED) == 0)
819 return ENXIO; 820 return ENXIO;
820 return physio(xbdstrategy, NULL, dev, B_READ, xbdminphys, uio); 821 return physio(xbdstrategy, NULL, dev, B_READ, xbdminphys, uio);
821} 822}
@@ -853,27 +854,27 @@ xbdioctl(dev_t dev, u_long cmd, void *da @@ -853,27 +854,27 @@ xbdioctl(dev_t dev, u_long cmd, void *da
853 dk = &dksc->sc_dkdev; 854 dk = &dksc->sc_dkdev;
854 855
855 error = disk_ioctl(&sc->sc_dksc.sc_dkdev, cmd, data, flag, l); 856 error = disk_ioctl(&sc->sc_dksc.sc_dkdev, cmd, data, flag, l);
856 if (error != EPASSTHROUGH) 857 if (error != EPASSTHROUGH)
857 return (error); 858 return (error);
858 859
859 switch (cmd) { 860 switch (cmd) {
860 case DIOCSSTRATEGY: 861 case DIOCSSTRATEGY:
861 error = EOPNOTSUPP; 862 error = EOPNOTSUPP;
862 break; 863 break;
863 case DIOCCACHESYNC: 864 case DIOCCACHESYNC:
864 if (sc->sc_cache_flush <= 0) { 865 if (sc->sc_cache_flush <= 0) {
865 if (sc->sc_cache_flush == 0) { 866 if (sc->sc_cache_flush == 0) {
866 aprint_error_dev(sc->sc_dev, 867 aprint_error_dev(sc->sc_dksc.sc_dev,
867 "WARNING: cache flush not supported " 868 "WARNING: cache flush not supported "
868 "by backend\n"); 869 "by backend\n");
869 sc->sc_cache_flush = -1; 870 sc->sc_cache_flush = -1;
870 } 871 }
871 return EOPNOTSUPP; 872 return EOPNOTSUPP;
872 } 873 }
873 874
874 s = splbio(); 875 s = splbio();
875 876
876 while (RING_FULL(&sc->sc_ring)) { 877 while (RING_FULL(&sc->sc_ring)) {
877 sc->sc_xbdreq_wait = 1; 878 sc->sc_xbdreq_wait = 1;
878 tsleep(&sc->sc_xbdreq_wait, PRIBIO, "xbdreq", 0); 879 tsleep(&sc->sc_xbdreq_wait, PRIBIO, "xbdreq", 0);
879 } 880 }
@@ -903,97 +904,94 @@ xbdioctl(dev_t dev, u_long cmd, void *da @@ -903,97 +904,94 @@ xbdioctl(dev_t dev, u_long cmd, void *da
903 if (xbdreq->req_sync.s_error == BLKIF_RSP_EOPNOTSUPP) 904 if (xbdreq->req_sync.s_error == BLKIF_RSP_EOPNOTSUPP)
904 error = EOPNOTSUPP; 905 error = EOPNOTSUPP;
905 else if (xbdreq->req_sync.s_error == BLKIF_RSP_OKAY) 906 else if (xbdreq->req_sync.s_error == BLKIF_RSP_OKAY)
906 error = 0; 907 error = 0;
907 else 908 else
908 error = EIO; 909 error = EIO;
909 SLIST_INSERT_HEAD(&sc->sc_xbdreq_head, xbdreq, 910 SLIST_INSERT_HEAD(&sc->sc_xbdreq_head, xbdreq,
910 req_next); 911 req_next);
911 } 912 }
912 splx(s); 913 splx(s);
913 break; 914 break;
914 915
915 default: 916 default:
916 error = dk_ioctl(sc->sc_di, dksc, dev, cmd, data, flag, l); 917 error = dk_ioctl(di, dksc, dev, cmd, data, flag, l);
917 break; 918 break;
918 } 919 }
919 920
920 return error; 921 return error;
921} 922}
922 923
923int 924int
924xbddump(dev_t dev, daddr_t blkno, void *va, size_t size) 925xbddump(dev_t dev, daddr_t blkno, void *va, size_t size)
925{ 926{
926 struct xbd_xenbus_softc *sc; 927 struct xbd_xenbus_softc *sc;
927 928
928 sc = device_lookup_private(&xbd_cd, DISKUNIT(dev)); 929 sc = device_lookup_private(&xbd_cd, DISKUNIT(dev));
929 if (sc == NULL) 930 if (sc == NULL)
930 return (ENXIO); 931 return (ENXIO);
931 932
932 DPRINTF(("xbddump(%d, %" PRId64 ", %p, %lu)\n", dev, blkno, va, 933 DPRINTF(("xbddump(%d, %" PRId64 ", %p, %lu)\n", dev, blkno, va,
933 (unsigned long)size)); 934 (unsigned long)size));
934 return dk_dump(sc->sc_di, &sc->sc_dksc, dev, blkno, va, size); 935 return dk_dump(di, &sc->sc_dksc, dev, blkno, va, size);
935} 936}
936 937
937static int 938static int
938xbdstart(struct dk_softc *dksc, struct buf *bp) 939xbdstart(struct dk_softc *dksc, struct buf *bp)
939{ 940{
940 struct xbd_xenbus_softc *sc; 941 struct xbd_xenbus_softc *sc = (struct xbd_xenbus_softc *)dksc;
941 struct xbd_req *xbdreq; 942 struct xbd_req *xbdreq;
942 blkif_request_t *req; 943 blkif_request_t *req;
943 int ret = 0, runqueue = 1; 944 int ret = 0, runqueue = 1;
944 size_t bcount, off; 945 size_t bcount, off;
945 paddr_t ma; 946 paddr_t ma;
946 vaddr_t va; 947 vaddr_t va;
947 int nsects, nbytes, seg; 948 int nsects, nbytes, seg;
948 int notify; 949 int notify;
949 950
950 DPRINTF(("xbdstart(%p): b_bcount = %ld\n", bp, (long)bp->b_bcount)); 951 DPRINTF(("xbdstart(%p): b_bcount = %ld\n", bp, (long)bp->b_bcount));
951 952
952 sc = device_lookup_private(&xbd_cd, DISKUNIT(bp->b_dev)); 953 if (sc->sc_shutdown != BLKIF_SHUTDOWN_RUN) {
953 if (sc == NULL || sc->sc_shutdown != BLKIF_SHUTDOWN_RUN) { 
954 bp->b_error = EIO; 954 bp->b_error = EIO;
955 goto err; 955 goto err;
956 } 956 }
957 957
958 if (bp->b_rawblkno < 0 || bp->b_rawblkno > sc->sc_xbdsize) { 958 if (bp->b_rawblkno < 0 || bp->b_rawblkno > sc->sc_xbdsize) {
959 /* invalid block number */ 959 /* invalid block number */
960 bp->b_error = EINVAL; 960 bp->b_error = EINVAL;
961 goto err; 961 goto err;
962 } 962 }
963 963
964 if (bp->b_rawblkno == sc->sc_xbdsize) { 964 if (bp->b_rawblkno == sc->sc_xbdsize) {
965 /* at end of disk; return short read */ 965 /* at end of disk; return short read */
966 bp->b_resid = bp->b_bcount; 966 bp->b_resid = bp->b_bcount;
967 biodone(bp); 967 biodone(bp);
968 return 0; 968 return 0;
969 } 969 }
970 970
971 if (__predict_false(sc->sc_backend_status == BLKIF_STATE_SUSPENDED)) { 971 if (__predict_false(sc->sc_backend_status == BLKIF_STATE_SUSPENDED)) {
972 /* device is suspended, do not consume buffer */ 972 /* device is suspended, do not consume buffer */
973 DPRINTF(("%s: (xbdstart) device suspended\n", 973 DPRINTF(("%s: (xbdstart) device suspended\n",
974 device_xname(sc->sc_dev))); 974 device_xname(sc->sc_dksc.sc_dev)));
975 ret = -1; 975 ret = -1;
976 goto out; 976 goto out;
977 } 977 }
978 978
979 if (RING_FULL(&sc->sc_ring) || sc->sc_xbdreq_wait) { 979 if (RING_FULL(&sc->sc_ring) || sc->sc_xbdreq_wait) {
980 DPRINTF(("xbdstart: ring_full\n")); 980 DPRINTF(("xbdstart: ring_full\n"));
981 ret = -1; 981 ret = -1;
982 goto out; 982 goto out;
983 } 983 }
984 984
985 dksc = &sc->sc_dksc; 
986 
987 xbdreq = SLIST_FIRST(&sc->sc_xbdreq_head); 985 xbdreq = SLIST_FIRST(&sc->sc_xbdreq_head);
988 if (__predict_false(xbdreq == NULL)) { 986 if (__predict_false(xbdreq == NULL)) {
989 DPRINTF(("xbdstart: no req\n")); 987 DPRINTF(("xbdstart: no req\n"));
990 ret = -1; /* dk_start should not remove bp from queue */ 988 ret = -1; /* dk_start should not remove bp from queue */
991 goto out; 989 goto out;
992 } 990 }
993 991
994 xbdreq->req_bp = bp; 992 xbdreq->req_bp = bp;
995 xbdreq->req_data = bp->b_data; 993 xbdreq->req_data = bp->b_data;
996 if ((vaddr_t)bp->b_data & (XEN_BSIZE - 1)) { 994 if ((vaddr_t)bp->b_data & (XEN_BSIZE - 1)) {
997 if (__predict_false(xbd_map_align(xbdreq) != 0)) { 995 if (__predict_false(xbd_map_align(xbdreq) != 0)) {
998 ret = -1; 996 ret = -1;
999 goto out; 997 goto out;