Tue Aug 21 18:45:16 2018 UTC ()


(jdolecek)
diff -r1.84 -r1.85 src/sys/arch/xen/xen/xbd_xenbus.c

cvs diff -r1.84 -r1.85 src/sys/arch/xen/xen/xbd_xenbus.c (expand / switch to unified diff)

--- src/sys/arch/xen/xen/xbd_xenbus.c 2018/08/21 18:31:55 1.84
+++ src/sys/arch/xen/xen/xbd_xenbus.c 2018/08/21 18:45:16 1.85
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: xbd_xenbus.c,v 1.84 2018/08/21 18:31:55 jdolecek Exp $ */ 1/* $NetBSD: xbd_xenbus.c,v 1.85 2018/08/21 18:45:16 jdolecek Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 Manuel Bouyer. 4 * Copyright (c) 2006 Manuel Bouyer.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 14 *
@@ -40,27 +40,27 @@ @@ -40,27 +40,27 @@
40 * - initiate request: xbdread/write/open/ioctl/.. 40 * - initiate request: xbdread/write/open/ioctl/..
41 * - depending on operation, it is handled directly by disk(9) subsystem or 41 * - depending on operation, it is handled directly by disk(9) subsystem or
42 * goes through physio(9) first. 42 * goes through physio(9) first.
43 * - the request is ultimately processed by xbd_diskstart() that prepares the 43 * - the request is ultimately processed by xbd_diskstart() that prepares the
44 * xbd requests, post them in the ring I/O queue, then signal the backend. 44 * xbd requests, post them in the ring I/O queue, then signal the backend.
45 * 45 *
46 * When a response is available in the queue, the backend signals the frontend 46 * When a response is available in the queue, the backend signals the frontend
47 * via its event channel. This triggers xbd_handler(), which will link back 47 * via its event channel. This triggers xbd_handler(), which will link back
48 * the response to its request through the request ID, and mark the I/O as 48 * the response to its request through the request ID, and mark the I/O as
49 * completed. 49 * completed.
50 */ 50 */
51 51
52#include <sys/cdefs.h> 52#include <sys/cdefs.h>
53__KERNEL_RCSID(0, "$NetBSD: xbd_xenbus.c,v 1.84 2018/08/21 18:31:55 jdolecek Exp $"); 53__KERNEL_RCSID(0, "$NetBSD: xbd_xenbus.c,v 1.85 2018/08/21 18:45:16 jdolecek Exp $");
54 54
55#include "opt_xen.h" 55#include "opt_xen.h"
56 56
57 57
58#include <sys/param.h> 58#include <sys/param.h>
59#include <sys/buf.h> 59#include <sys/buf.h>
60#include <sys/bufq.h> 60#include <sys/bufq.h>
61#include <sys/device.h> 61#include <sys/device.h>
62#include <sys/disk.h> 62#include <sys/disk.h>
63#include <sys/disklabel.h> 63#include <sys/disklabel.h>
64#include <sys/conf.h> 64#include <sys/conf.h>
65#include <sys/fcntl.h> 65#include <sys/fcntl.h>
66#include <sys/kernel.h> 66#include <sys/kernel.h>
@@ -651,56 +651,62 @@ xbd_handler(void *arg) @@ -651,56 +651,62 @@ xbd_handler(void *arg)
651 int more_to_do; 651 int more_to_do;
652 int seg; 652 int seg;
653 653
654 DPRINTF(("xbd_handler(%s)\n", device_xname(sc->sc_dksc.sc_dev))); 654 DPRINTF(("xbd_handler(%s)\n", device_xname(sc->sc_dksc.sc_dev)));
655 655
656 if (__predict_false(sc->sc_backend_status != BLKIF_STATE_CONNECTED)) 656 if (__predict_false(sc->sc_backend_status != BLKIF_STATE_CONNECTED))
657 return 0; 657 return 0;
658again: 658again:
659 resp_prod = sc->sc_ring.sring->rsp_prod; 659 resp_prod = sc->sc_ring.sring->rsp_prod;
660 xen_rmb(); /* ensure we see replies up to resp_prod */ 660 xen_rmb(); /* ensure we see replies up to resp_prod */
661 for (i = sc->sc_ring.rsp_cons; i != resp_prod; i++) { 661 for (i = sc->sc_ring.rsp_cons; i != resp_prod; i++) {
662 blkif_response_t *rep = RING_GET_RESPONSE(&sc->sc_ring, i); 662 blkif_response_t *rep = RING_GET_RESPONSE(&sc->sc_ring, i);
663 struct xbd_req *xbdreq = &sc->sc_reqs[rep->id]; 663 struct xbd_req *xbdreq = &sc->sc_reqs[rep->id];
664 bp = xbdreq->req_bp; 664
665 DPRINTF(("xbd_handler(%p): b_bcount = %ld\n", 
666 xbdreq->req_bp, (long)bp->b_bcount)); 
667 if (rep->operation == BLKIF_OP_FLUSH_DISKCACHE) { 665 if (rep->operation == BLKIF_OP_FLUSH_DISKCACHE) {
 666 KASSERT(xbdreq->req_bp == NULL);
668 xbdreq->req_sync.s_error = rep->status; 667 xbdreq->req_sync.s_error = rep->status;
669 xbdreq->req_sync.s_done = 1; 668 xbdreq->req_sync.s_done = 1;
670 wakeup(xbdreq); /* XXXSMP */ 669 wakeup(xbdreq); /* XXXSMP */
671 /* caller will free the req */ 670 /* caller will free the req */
672 continue; 671 continue;
673 } 672 }
 673
 674 if (rep->operation != BLKIF_OP_READ &&
 675 rep->operation != BLKIF_OP_WRITE) {
 676 aprint_error_dev(sc->sc_dksc.sc_dev,
 677 "bad operation %d from backend\n", rep->operation);
 678 continue;
 679 }
 680
674 for (seg = xbdreq->req_nr_segments - 1; seg >= 0; seg--) { 681 for (seg = xbdreq->req_nr_segments - 1; seg >= 0; seg--) {
675 if (__predict_false( 682 if (__predict_false(
676 xengnt_status(xbdreq->req_gntref[seg]))) { 683 xengnt_status(xbdreq->req_gntref[seg]))) {
677 aprint_verbose_dev(sc->sc_dksc.sc_dev, 684 aprint_verbose_dev(sc->sc_dksc.sc_dev,
678 "grant still used by backend\n"); 685 "grant still used by backend\n");
679 sc->sc_ring.rsp_cons = i; 686 sc->sc_ring.rsp_cons = i;
680 xbdreq->req_nr_segments = seg + 1; 687 xbdreq->req_nr_segments = seg + 1;
681 goto done; 688 goto done;
682 } 689 }
683 xengnt_revoke_access(xbdreq->req_gntref[seg]); 690 xengnt_revoke_access(xbdreq->req_gntref[seg]);
684 xbdreq->req_nr_segments--; 691 xbdreq->req_nr_segments--;
685 } 692 }
686 if (rep->operation != BLKIF_OP_READ && 693 KASSERT(xbdreq->req_nr_segments == 0);
687 rep->operation != BLKIF_OP_WRITE) { 694
688 aprint_error_dev(sc->sc_dksc.sc_dev, 695 bp = xbdreq->req_bp;
689 "bad operation %d from backend\n", rep->operation); 696 xbdreq->req_bp = NULL;
690 bp->b_error = EIO; 697 DPRINTF(("%s(%p): b_bcount = %ld\n", __func__,
691 bp->b_resid = bp->b_bcount; 698 bp, (long)bp->b_bcount));
692 goto next; 699
693 } 
694 if (rep->status != BLKIF_RSP_OKAY) { 700 if (rep->status != BLKIF_RSP_OKAY) {
695 bp->b_error = EIO; 701 bp->b_error = EIO;
696 bp->b_resid = bp->b_bcount; 702 bp->b_resid = bp->b_bcount;
697 goto next; 703 goto next;
698 } 704 }
699 /* b_resid was set in dk_start */ 705 /* b_resid was set in dk_start */
700next: 706next:
701 dk_done(&sc->sc_dksc, bp); 707 dk_done(&sc->sc_dksc, bp);
702 708
703 SLIST_INSERT_HEAD(&sc->sc_xbdreq_head, xbdreq, req_next); 709 SLIST_INSERT_HEAD(&sc->sc_xbdreq_head, xbdreq, req_next);
704 } 710 }
705done: 711done:
706 xen_rmb(); 712 xen_rmb();