Tue Apr 14 14:06:24 2020 UTC ()
rearrange slightly to do proper b_resid accounting, to prepare for partial
transfers


(jdolecek)
diff -r1.111 -r1.112 src/sys/arch/xen/xen/xbd_xenbus.c

cvs diff -r1.111 -r1.112 src/sys/arch/xen/xen/xbd_xenbus.c (expand / switch to unified diff)

--- src/sys/arch/xen/xen/xbd_xenbus.c 2020/04/14 13:10:43 1.111
+++ src/sys/arch/xen/xen/xbd_xenbus.c 2020/04/14 14:06:24 1.112
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: xbd_xenbus.c,v 1.111 2020/04/14 13:10:43 jdolecek Exp $ */ 1/* $NetBSD: xbd_xenbus.c,v 1.112 2020/04/14 14:06:24 jdolecek Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 Manuel Bouyer. 4 * Copyright (c) 2006 Manuel Bouyer.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 14 *
@@ -40,27 +40,27 @@ @@ -40,27 +40,27 @@
40 * - initiate request: xbdread/write/open/ioctl/.. 40 * - initiate request: xbdread/write/open/ioctl/..
41 * - depending on operation, it is handled directly by disk(9) subsystem or 41 * - depending on operation, it is handled directly by disk(9) subsystem or
42 * goes through physio(9) first. 42 * goes through physio(9) first.
43 * - the request is ultimately processed by xbd_diskstart() that prepares the 43 * - the request is ultimately processed by xbd_diskstart() that prepares the
44 * xbd requests, post them in the ring I/O queue, then signal the backend. 44 * xbd requests, post them in the ring I/O queue, then signal the backend.
45 * 45 *
46 * When a response is available in the queue, the backend signals the frontend 46 * When a response is available in the queue, the backend signals the frontend
47 * via its event channel. This triggers xbd_handler(), which will link back 47 * via its event channel. This triggers xbd_handler(), which will link back
48 * the response to its request through the request ID, and mark the I/O as 48 * the response to its request through the request ID, and mark the I/O as
49 * completed. 49 * completed.
50 */ 50 */
51 51
52#include <sys/cdefs.h> 52#include <sys/cdefs.h>
53__KERNEL_RCSID(0, "$NetBSD: xbd_xenbus.c,v 1.111 2020/04/14 13:10:43 jdolecek Exp $"); 53__KERNEL_RCSID(0, "$NetBSD: xbd_xenbus.c,v 1.112 2020/04/14 14:06:24 jdolecek Exp $");
54 54
55#include "opt_xen.h" 55#include "opt_xen.h"
56 56
57 57
58#include <sys/param.h> 58#include <sys/param.h>
59#include <sys/buf.h> 59#include <sys/buf.h>
60#include <sys/bufq.h> 60#include <sys/bufq.h>
61#include <sys/device.h> 61#include <sys/device.h>
62#include <sys/disk.h> 62#include <sys/disk.h>
63#include <sys/disklabel.h> 63#include <sys/disklabel.h>
64#include <sys/conf.h> 64#include <sys/conf.h>
65#include <sys/fcntl.h> 65#include <sys/fcntl.h>
66#include <sys/kernel.h> 66#include <sys/kernel.h>
@@ -735,53 +735,59 @@ again: @@ -735,53 +735,59 @@ again:
735 xbdreq->req_sync.s_done = 1; 735 xbdreq->req_sync.s_done = 1;
736 cv_broadcast(&sc->sc_cache_flush_cv); 736 cv_broadcast(&sc->sc_cache_flush_cv);
737 /* caller will free the req */ 737 /* caller will free the req */
738 continue; 738 continue;
739 } 739 }
740 740
741 if (rep->operation != BLKIF_OP_READ && 741 if (rep->operation != BLKIF_OP_READ &&
742 rep->operation != BLKIF_OP_WRITE) { 742 rep->operation != BLKIF_OP_WRITE) {
743 aprint_error_dev(sc->sc_dksc.sc_dev, 743 aprint_error_dev(sc->sc_dksc.sc_dev,
744 "bad operation %d from backend\n", rep->operation); 744 "bad operation %d from backend\n", rep->operation);
745 continue; 745 continue;
746 } 746 }
747 747
 748 bp = xbdreq->req_bp;
 749 KASSERT(bp != NULL && bp->b_data != NULL);
 750 DPRINTF(("%s(%p): b_bcount = %ld\n", __func__,
 751 bp, (long)bp->b_bcount));
 752
 753 if (rep->status != BLKIF_RSP_OKAY) {
 754 bp->b_error = EIO;
 755 bp->b_resid = bp->b_bcount;
 756 } else {
 757 KASSERTMSG(xbdreq->req_dmamap->dm_mapsize <=
 758 bp->b_resid, "mapsize %d > b_resid %d",
 759 (int)xbdreq->req_dmamap->dm_mapsize,
 760 (int)bp->b_resid);
 761 bp->b_resid -= xbdreq->req_dmamap->dm_mapsize;
 762 KASSERT(bp->b_resid == 0);
 763 }
 764
748 for (seg = 0; seg < xbdreq->req_dmamap->dm_nsegs; seg++) { 765 for (seg = 0; seg < xbdreq->req_dmamap->dm_nsegs; seg++) {
749 /* 766 /*
750 * We are not allowing persistent mappings, so 767 * We are not allowing persistent mappings, so
751 * expect the backend to release the grant 768 * expect the backend to release the grant
752 * immediately. 769 * immediately.
753 */ 770 */
754 KASSERT(xengnt_status(xbdreq->req_gntref[seg]) == 0); 771 KASSERT(xengnt_status(xbdreq->req_gntref[seg]) == 0);
755 xengnt_revoke_access(xbdreq->req_gntref[seg]); 772 xengnt_revoke_access(xbdreq->req_gntref[seg]);
756 } 773 }
757 774
758 bus_dmamap_unload(sc->sc_xbusd->xbusd_dmat, xbdreq->req_dmamap); 775 bus_dmamap_unload(sc->sc_xbusd->xbusd_dmat, xbdreq->req_dmamap);
759 776
760 bp = xbdreq->req_bp; 
761 KASSERT(bp != NULL && bp->b_data != NULL); 
762 DPRINTF(("%s(%p): b_bcount = %ld\n", __func__, 
763 bp, (long)bp->b_bcount)); 
764 
765 if (__predict_false(bp->b_data != xbdreq->req_data)) 777 if (__predict_false(bp->b_data != xbdreq->req_data))
766 xbd_unmap_align(sc, xbdreq, true); 778 xbd_unmap_align(sc, xbdreq, true);
767 xbdreq->req_bp = xbdreq->req_data = NULL; 779 xbdreq->req_bp = xbdreq->req_data = NULL;
768 780
769 /* b_resid was set in dk_start, only override on error */ 
770 if (rep->status != BLKIF_RSP_OKAY) { 
771 bp->b_error = EIO; 
772 bp->b_resid = bp->b_bcount; 
773 } 
774 
775 dk_done(&sc->sc_dksc, bp); 781 dk_done(&sc->sc_dksc, bp);
776 782
777 SLIST_INSERT_HEAD(&sc->sc_xbdreq_head, xbdreq, req_next); 783 SLIST_INSERT_HEAD(&sc->sc_xbdreq_head, xbdreq, req_next);
778 } 784 }
779 785
780 xen_rmb(); 786 xen_rmb();
781 sc->sc_ring.rsp_cons = i; 787 sc->sc_ring.rsp_cons = i;
782 788
783 RING_FINAL_CHECK_FOR_RESPONSES(&sc->sc_ring, more_to_do); 789 RING_FINAL_CHECK_FOR_RESPONSES(&sc->sc_ring, more_to_do);
784 if (more_to_do) 790 if (more_to_do)
785 goto again; 791 goto again;
786 792
787 cv_signal(&sc->sc_req_cv); 793 cv_signal(&sc->sc_req_cv);
@@ -1057,27 +1063,27 @@ xbd_diskstart(device_t self, struct buf  @@ -1057,27 +1063,27 @@ xbd_diskstart(device_t self, struct buf
1057 error = EINVAL; 1063 error = EINVAL;
1058 goto out; 1064 goto out;
1059 } 1065 }
1060 1066
1061 /* We are now committed to the transfer */ 1067 /* We are now committed to the transfer */
1062 SLIST_REMOVE_HEAD(&sc->sc_xbdreq_head, req_next); 1068 SLIST_REMOVE_HEAD(&sc->sc_xbdreq_head, req_next);
1063 req = RING_GET_REQUEST(&sc->sc_ring, sc->sc_ring.req_prod_pvt); 1069 req = RING_GET_REQUEST(&sc->sc_ring, sc->sc_ring.req_prod_pvt);
1064 req->id = xbdreq->req_id; 1070 req->id = xbdreq->req_id;
1065 req->operation = 1071 req->operation =
1066 bp->b_flags & B_READ ? BLKIF_OP_READ : BLKIF_OP_WRITE; 1072 bp->b_flags & B_READ ? BLKIF_OP_READ : BLKIF_OP_WRITE;
1067 req->sector_number = bp->b_rawblkno; 1073 req->sector_number = bp->b_rawblkno;
1068 req->handle = sc->sc_handle; 1074 req->handle = sc->sc_handle;
1069 1075
1070 bp->b_resid = 0; 1076 bp->b_resid = bp->b_bcount;
1071 for (seg = 0; seg < xbdreq->req_dmamap->dm_nsegs; seg++) { 1077 for (seg = 0; seg < xbdreq->req_dmamap->dm_nsegs; seg++) {
1072 bus_dma_segment_t *dmaseg = &xbdreq->req_dmamap->dm_segs[seg]; 1078 bus_dma_segment_t *dmaseg = &xbdreq->req_dmamap->dm_segs[seg];
1073 1079
1074 ma = dmaseg->ds_addr; 1080 ma = dmaseg->ds_addr;
1075 off = ma & PAGE_MASK; 1081 off = ma & PAGE_MASK;
1076 nbytes = dmaseg->ds_len; 1082 nbytes = dmaseg->ds_len;
1077 nsects = nbytes >> XEN_BSHIFT; 1083 nsects = nbytes >> XEN_BSHIFT;
1078 1084
1079 req->seg[seg].first_sect = off >> XEN_BSHIFT; 1085 req->seg[seg].first_sect = off >> XEN_BSHIFT;
1080 req->seg[seg].last_sect = (off >> XEN_BSHIFT) + nsects - 1; 1086 req->seg[seg].last_sect = (off >> XEN_BSHIFT) + nsects - 1;
1081 KASSERT(req->seg[seg].first_sect <= req->seg[seg].last_sect); 1087 KASSERT(req->seg[seg].first_sect <= req->seg[seg].last_sect);
1082 KASSERT(req->seg[seg].last_sect < (PAGE_SIZE / XEN_BSIZE)); 1088 KASSERT(req->seg[seg].last_sect < (PAGE_SIZE / XEN_BSIZE));
1083 1089