Sun Jun 3 19:50:20 2018 UTC ()
unload payload dma map upon command completion


(jakllsch)
diff -r1.18 -r1.19 src/sys/dev/pci/ld_virtio.c

cvs diff -r1.18 -r1.19 src/sys/dev/pci/ld_virtio.c (switch to unified diff)

--- src/sys/dev/pci/ld_virtio.c 2018/06/03 19:47:35 1.18
+++ src/sys/dev/pci/ld_virtio.c 2018/06/03 19:50:20 1.19
@@ -1,652 +1,653 @@ @@ -1,652 +1,653 @@
1/* $NetBSD: ld_virtio.c,v 1.18 2018/06/03 19:47:35 jakllsch Exp $ */ 1/* $NetBSD: ld_virtio.c,v 1.19 2018/06/03 19:50:20 jakllsch Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2010 Minoura Makoto. 4 * Copyright (c) 2010 Minoura Makoto.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */ 26 */
27 27
28#include <sys/cdefs.h> 28#include <sys/cdefs.h>
29__KERNEL_RCSID(0, "$NetBSD: ld_virtio.c,v 1.18 2018/06/03 19:47:35 jakllsch Exp $"); 29__KERNEL_RCSID(0, "$NetBSD: ld_virtio.c,v 1.19 2018/06/03 19:50:20 jakllsch Exp $");
30 30
31#include <sys/param.h> 31#include <sys/param.h>
32#include <sys/systm.h> 32#include <sys/systm.h>
33#include <sys/kernel.h> 33#include <sys/kernel.h>
34#include <sys/buf.h> 34#include <sys/buf.h>
35#include <sys/bufq.h> 35#include <sys/bufq.h>
36#include <sys/bus.h> 36#include <sys/bus.h>
37#include <sys/device.h> 37#include <sys/device.h>
38#include <sys/disk.h> 38#include <sys/disk.h>
39#include <sys/mutex.h> 39#include <sys/mutex.h>
40#include <sys/module.h> 40#include <sys/module.h>
41 41
42#include <dev/pci/pcidevs.h> 42#include <dev/pci/pcidevs.h>
43#include <dev/pci/pcireg.h> 43#include <dev/pci/pcireg.h>
44#include <dev/pci/pcivar.h> 44#include <dev/pci/pcivar.h>
45 45
46#include <dev/ldvar.h> 46#include <dev/ldvar.h>
47#include <dev/pci/virtioreg.h> 47#include <dev/pci/virtioreg.h>
48#include <dev/pci/virtiovar.h> 48#include <dev/pci/virtiovar.h>
49 49
50#include "ioconf.h" 50#include "ioconf.h"
51 51
52/* 52/*
53 * ld_virtioreg: 53 * ld_virtioreg:
54 */ 54 */
55/* Configuration registers */ 55/* Configuration registers */
56#define VIRTIO_BLK_CONFIG_CAPACITY 0 /* 64bit */ 56#define VIRTIO_BLK_CONFIG_CAPACITY 0 /* 64bit */
57#define VIRTIO_BLK_CONFIG_SIZE_MAX 8 /* 32bit */ 57#define VIRTIO_BLK_CONFIG_SIZE_MAX 8 /* 32bit */
58#define VIRTIO_BLK_CONFIG_SEG_MAX 12 /* 32bit */ 58#define VIRTIO_BLK_CONFIG_SEG_MAX 12 /* 32bit */
59#define VIRTIO_BLK_CONFIG_GEOMETRY_C 16 /* 16bit */ 59#define VIRTIO_BLK_CONFIG_GEOMETRY_C 16 /* 16bit */
60#define VIRTIO_BLK_CONFIG_GEOMETRY_H 18 /* 8bit */ 60#define VIRTIO_BLK_CONFIG_GEOMETRY_H 18 /* 8bit */
61#define VIRTIO_BLK_CONFIG_GEOMETRY_S 19 /* 8bit */ 61#define VIRTIO_BLK_CONFIG_GEOMETRY_S 19 /* 8bit */
62#define VIRTIO_BLK_CONFIG_BLK_SIZE 20 /* 32bit */ 62#define VIRTIO_BLK_CONFIG_BLK_SIZE 20 /* 32bit */
63#define VIRTIO_BLK_CONFIG_WRITEBACK 32 /* 8bit */ 63#define VIRTIO_BLK_CONFIG_WRITEBACK 32 /* 8bit */
64 64
65/* Feature bits */ 65/* Feature bits */
66#define VIRTIO_BLK_F_BARRIER (1<<0) 66#define VIRTIO_BLK_F_BARRIER (1<<0)
67#define VIRTIO_BLK_F_SIZE_MAX (1<<1) 67#define VIRTIO_BLK_F_SIZE_MAX (1<<1)
68#define VIRTIO_BLK_F_SEG_MAX (1<<2) 68#define VIRTIO_BLK_F_SEG_MAX (1<<2)
69#define VIRTIO_BLK_F_GEOMETRY (1<<4) 69#define VIRTIO_BLK_F_GEOMETRY (1<<4)
70#define VIRTIO_BLK_F_RO (1<<5) 70#define VIRTIO_BLK_F_RO (1<<5)
71#define VIRTIO_BLK_F_BLK_SIZE (1<<6) 71#define VIRTIO_BLK_F_BLK_SIZE (1<<6)
72#define VIRTIO_BLK_F_SCSI (1<<7) 72#define VIRTIO_BLK_F_SCSI (1<<7)
73#define VIRTIO_BLK_F_FLUSH (1<<9) 73#define VIRTIO_BLK_F_FLUSH (1<<9)
74#define VIRTIO_BLK_F_TOPOLOGY (1<<10) 74#define VIRTIO_BLK_F_TOPOLOGY (1<<10)
75#define VIRTIO_BLK_F_CONFIG_WCE (1<<11) 75#define VIRTIO_BLK_F_CONFIG_WCE (1<<11)
76 76
77/* 77/*
78 * Each block request uses at least two segments - one for the header 78 * Each block request uses at least two segments - one for the header
79 * and one for the status. 79 * and one for the status.
80*/ 80*/
81#define VIRTIO_BLK_MIN_SEGMENTS 2 81#define VIRTIO_BLK_MIN_SEGMENTS 2
82 82
83#define VIRTIO_BLK_FLAG_BITS \ 83#define VIRTIO_BLK_FLAG_BITS \
84 VIRTIO_COMMON_FLAG_BITS \ 84 VIRTIO_COMMON_FLAG_BITS \
85 "\x0c""CONFIG_WCE" \ 85 "\x0c""CONFIG_WCE" \
86 "\x0b""TOPOLOGY" \ 86 "\x0b""TOPOLOGY" \
87 "\x0a""FLUSH" \ 87 "\x0a""FLUSH" \
88 "\x08""SCSI" \ 88 "\x08""SCSI" \
89 "\x07""BLK_SIZE" \ 89 "\x07""BLK_SIZE" \
90 "\x06""RO" \ 90 "\x06""RO" \
91 "\x05""GEOMETRY" \ 91 "\x05""GEOMETRY" \
92 "\x03""SEG_MAX" \ 92 "\x03""SEG_MAX" \
93 "\x02""SIZE_MAX" \ 93 "\x02""SIZE_MAX" \
94 "\x01""BARRIER" 94 "\x01""BARRIER"
95 95
96/* Command */ 96/* Command */
97#define VIRTIO_BLK_T_IN 0 97#define VIRTIO_BLK_T_IN 0
98#define VIRTIO_BLK_T_OUT 1 98#define VIRTIO_BLK_T_OUT 1
99#define VIRTIO_BLK_T_FLUSH 4 99#define VIRTIO_BLK_T_FLUSH 4
100#define VIRTIO_BLK_T_BARRIER 0x80000000 100#define VIRTIO_BLK_T_BARRIER 0x80000000
101 101
102/* Status */ 102/* Status */
103#define VIRTIO_BLK_S_OK 0 103#define VIRTIO_BLK_S_OK 0
104#define VIRTIO_BLK_S_IOERR 1 104#define VIRTIO_BLK_S_IOERR 1
105#define VIRTIO_BLK_S_UNSUPP 2 105#define VIRTIO_BLK_S_UNSUPP 2
106 106
107/* Request header structure */ 107/* Request header structure */
108struct virtio_blk_req_hdr { 108struct virtio_blk_req_hdr {
109 uint32_t type; /* VIRTIO_BLK_T_* */ 109 uint32_t type; /* VIRTIO_BLK_T_* */
110 uint32_t ioprio; 110 uint32_t ioprio;
111 uint64_t sector; 111 uint64_t sector;
112} __packed; 112} __packed;
113/* 512*virtio_blk_req_hdr.sector byte payload and 1 byte status follows */ 113/* 512*virtio_blk_req_hdr.sector byte payload and 1 byte status follows */
114 114
115 115
116/* 116/*
117 * ld_virtiovar: 117 * ld_virtiovar:
118 */ 118 */
119struct virtio_blk_req { 119struct virtio_blk_req {
120 struct virtio_blk_req_hdr vr_hdr; 120 struct virtio_blk_req_hdr vr_hdr;
121 uint8_t vr_status; 121 uint8_t vr_status;
122 struct buf *vr_bp; 122 struct buf *vr_bp;
123 bus_dmamap_t vr_cmdsts; 123 bus_dmamap_t vr_cmdsts;
124 bus_dmamap_t vr_payload; 124 bus_dmamap_t vr_payload;
125}; 125};
126 126
127struct ld_virtio_softc { 127struct ld_virtio_softc {
128 struct ld_softc sc_ld; 128 struct ld_softc sc_ld;
129 device_t sc_dev; 129 device_t sc_dev;
130 130
131 struct virtio_softc *sc_virtio; 131 struct virtio_softc *sc_virtio;
132 struct virtqueue sc_vq; 132 struct virtqueue sc_vq;
133 133
134 struct virtio_blk_req *sc_reqs; 134 struct virtio_blk_req *sc_reqs;
135 bus_dma_segment_t sc_reqs_seg; 135 bus_dma_segment_t sc_reqs_seg;
136 136
137 int sc_readonly; 137 int sc_readonly;
138}; 138};
139 139
140static int ld_virtio_match(device_t, cfdata_t, void *); 140static int ld_virtio_match(device_t, cfdata_t, void *);
141static void ld_virtio_attach(device_t, device_t, void *); 141static void ld_virtio_attach(device_t, device_t, void *);
142static int ld_virtio_detach(device_t, int); 142static int ld_virtio_detach(device_t, int);
143 143
144CFATTACH_DECL_NEW(ld_virtio, sizeof(struct ld_virtio_softc), 144CFATTACH_DECL_NEW(ld_virtio, sizeof(struct ld_virtio_softc),
145 ld_virtio_match, ld_virtio_attach, ld_virtio_detach, NULL); 145 ld_virtio_match, ld_virtio_attach, ld_virtio_detach, NULL);
146 146
147static int 147static int
148ld_virtio_match(device_t parent, cfdata_t match, void *aux) 148ld_virtio_match(device_t parent, cfdata_t match, void *aux)
149{ 149{
150 struct virtio_attach_args *va = aux; 150 struct virtio_attach_args *va = aux;
151 151
152 if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_BLOCK) 152 if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_BLOCK)
153 return 1; 153 return 1;
154 154
155 return 0; 155 return 0;
156} 156}
157 157
158static int ld_virtio_vq_done(struct virtqueue *); 158static int ld_virtio_vq_done(struct virtqueue *);
159static int ld_virtio_dump(struct ld_softc *, void *, int, int); 159static int ld_virtio_dump(struct ld_softc *, void *, int, int);
160static int ld_virtio_start(struct ld_softc *, struct buf *); 160static int ld_virtio_start(struct ld_softc *, struct buf *);
161 161
162static int 162static int
163ld_virtio_alloc_reqs(struct ld_virtio_softc *sc, int qsize) 163ld_virtio_alloc_reqs(struct ld_virtio_softc *sc, int qsize)
164{ 164{
165 int allocsize, r, rsegs, i; 165 int allocsize, r, rsegs, i;
166 struct ld_softc *ld = &sc->sc_ld; 166 struct ld_softc *ld = &sc->sc_ld;
167 void *vaddr; 167 void *vaddr;
168 168
169 allocsize = sizeof(struct virtio_blk_req) * qsize; 169 allocsize = sizeof(struct virtio_blk_req) * qsize;
170 r = bus_dmamem_alloc(virtio_dmat(sc->sc_virtio), allocsize, 0, 0, 170 r = bus_dmamem_alloc(virtio_dmat(sc->sc_virtio), allocsize, 0, 0,
171 &sc->sc_reqs_seg, 1, &rsegs, BUS_DMA_NOWAIT); 171 &sc->sc_reqs_seg, 1, &rsegs, BUS_DMA_NOWAIT);
172 if (r != 0) { 172 if (r != 0) {
173 aprint_error_dev(sc->sc_dev, 173 aprint_error_dev(sc->sc_dev,
174 "DMA memory allocation failed, size %d, " 174 "DMA memory allocation failed, size %d, "
175 "error code %d\n", allocsize, r); 175 "error code %d\n", allocsize, r);
176 goto err_none; 176 goto err_none;
177 } 177 }
178 r = bus_dmamem_map(virtio_dmat(sc->sc_virtio), 178 r = bus_dmamem_map(virtio_dmat(sc->sc_virtio),
179 &sc->sc_reqs_seg, 1, allocsize, 179 &sc->sc_reqs_seg, 1, allocsize,
180 &vaddr, BUS_DMA_NOWAIT); 180 &vaddr, BUS_DMA_NOWAIT);
181 if (r != 0) { 181 if (r != 0) {
182 aprint_error_dev(sc->sc_dev, 182 aprint_error_dev(sc->sc_dev,
183 "DMA memory map failed, " 183 "DMA memory map failed, "
184 "error code %d\n", r); 184 "error code %d\n", r);
185 goto err_dmamem_alloc; 185 goto err_dmamem_alloc;
186 } 186 }
187 sc->sc_reqs = vaddr; 187 sc->sc_reqs = vaddr;
188 memset(vaddr, 0, allocsize); 188 memset(vaddr, 0, allocsize);
189 for (i = 0; i < qsize; i++) { 189 for (i = 0; i < qsize; i++) {
190 struct virtio_blk_req *vr = &sc->sc_reqs[i]; 190 struct virtio_blk_req *vr = &sc->sc_reqs[i];
191 r = bus_dmamap_create(virtio_dmat(sc->sc_virtio), 191 r = bus_dmamap_create(virtio_dmat(sc->sc_virtio),
192 offsetof(struct virtio_blk_req, vr_bp), 192 offsetof(struct virtio_blk_req, vr_bp),
193 1, 193 1,
194 offsetof(struct virtio_blk_req, vr_bp), 194 offsetof(struct virtio_blk_req, vr_bp),
195 0, 195 0,
196 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, 196 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
197 &vr->vr_cmdsts); 197 &vr->vr_cmdsts);
198 if (r != 0) { 198 if (r != 0) {
199 aprint_error_dev(sc->sc_dev, 199 aprint_error_dev(sc->sc_dev,
200 "command dmamap creation failed, " 200 "command dmamap creation failed, "
201 "error code %d\n", r); 201 "error code %d\n", r);
202 goto err_reqs; 202 goto err_reqs;
203 } 203 }
204 r = bus_dmamap_load(virtio_dmat(sc->sc_virtio), vr->vr_cmdsts, 204 r = bus_dmamap_load(virtio_dmat(sc->sc_virtio), vr->vr_cmdsts,
205 &vr->vr_hdr, 205 &vr->vr_hdr,
206 offsetof(struct virtio_blk_req, vr_bp), 206 offsetof(struct virtio_blk_req, vr_bp),
207 NULL, BUS_DMA_NOWAIT); 207 NULL, BUS_DMA_NOWAIT);
208 if (r != 0) { 208 if (r != 0) {
209 aprint_error_dev(sc->sc_dev, 209 aprint_error_dev(sc->sc_dev,
210 "command dmamap load failed, " 210 "command dmamap load failed, "
211 "error code %d\n", r); 211 "error code %d\n", r);
212 goto err_reqs; 212 goto err_reqs;
213 } 213 }
214 r = bus_dmamap_create(virtio_dmat(sc->sc_virtio), 214 r = bus_dmamap_create(virtio_dmat(sc->sc_virtio),
215 ld->sc_maxxfer, 215 ld->sc_maxxfer,
216 (ld->sc_maxxfer / NBPG) + 216 (ld->sc_maxxfer / NBPG) +
217 VIRTIO_BLK_MIN_SEGMENTS, 217 VIRTIO_BLK_MIN_SEGMENTS,
218 ld->sc_maxxfer, 218 ld->sc_maxxfer,
219 0, 219 0,
220 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, 220 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
221 &vr->vr_payload); 221 &vr->vr_payload);
222 if (r != 0) { 222 if (r != 0) {
223 aprint_error_dev(sc->sc_dev, 223 aprint_error_dev(sc->sc_dev,
224 "payload dmamap creation failed, " 224 "payload dmamap creation failed, "
225 "error code %d\n", r); 225 "error code %d\n", r);
226 goto err_reqs; 226 goto err_reqs;
227 } 227 }
228 } 228 }
229 return 0; 229 return 0;
230 230
231err_reqs: 231err_reqs:
232 for (i = 0; i < qsize; i++) { 232 for (i = 0; i < qsize; i++) {
233 struct virtio_blk_req *vr = &sc->sc_reqs[i]; 233 struct virtio_blk_req *vr = &sc->sc_reqs[i];
234 if (vr->vr_cmdsts) { 234 if (vr->vr_cmdsts) {
235 bus_dmamap_destroy(virtio_dmat(sc->sc_virtio), 235 bus_dmamap_destroy(virtio_dmat(sc->sc_virtio),
236 vr->vr_cmdsts); 236 vr->vr_cmdsts);
237 vr->vr_cmdsts = 0; 237 vr->vr_cmdsts = 0;
238 } 238 }
239 if (vr->vr_payload) { 239 if (vr->vr_payload) {
240 bus_dmamap_destroy(virtio_dmat(sc->sc_virtio), 240 bus_dmamap_destroy(virtio_dmat(sc->sc_virtio),
241 vr->vr_payload); 241 vr->vr_payload);
242 vr->vr_payload = 0; 242 vr->vr_payload = 0;
243 } 243 }
244 } 244 }
245 bus_dmamem_unmap(virtio_dmat(sc->sc_virtio), sc->sc_reqs, allocsize); 245 bus_dmamem_unmap(virtio_dmat(sc->sc_virtio), sc->sc_reqs, allocsize);
246err_dmamem_alloc: 246err_dmamem_alloc:
247 bus_dmamem_free(virtio_dmat(sc->sc_virtio), &sc->sc_reqs_seg, 1); 247 bus_dmamem_free(virtio_dmat(sc->sc_virtio), &sc->sc_reqs_seg, 1);
248err_none: 248err_none:
249 return -1; 249 return -1;
250} 250}
251 251
252static void 252static void
253ld_virtio_attach(device_t parent, device_t self, void *aux) 253ld_virtio_attach(device_t parent, device_t self, void *aux)
254{ 254{
255 struct ld_virtio_softc *sc = device_private(self); 255 struct ld_virtio_softc *sc = device_private(self);
256 struct ld_softc *ld = &sc->sc_ld; 256 struct ld_softc *ld = &sc->sc_ld;
257 struct virtio_softc *vsc = device_private(parent); 257 struct virtio_softc *vsc = device_private(parent);
258 uint32_t features; 258 uint32_t features;
259 int qsize, maxxfersize, maxnsegs; 259 int qsize, maxxfersize, maxnsegs;
260 260
261 if (virtio_child(vsc) != NULL) { 261 if (virtio_child(vsc) != NULL) {
262 aprint_normal(": child already attached for %s; " 262 aprint_normal(": child already attached for %s; "
263 "something wrong...\n", device_xname(parent)); 263 "something wrong...\n", device_xname(parent));
264 return; 264 return;
265 } 265 }
266 266
267 sc->sc_dev = self; 267 sc->sc_dev = self;
268 sc->sc_virtio = vsc; 268 sc->sc_virtio = vsc;
269 269
270 virtio_child_attach_start(vsc, self, IPL_BIO, &sc->sc_vq, 270 virtio_child_attach_start(vsc, self, IPL_BIO, &sc->sc_vq,
271 NULL, virtio_vq_intr, 0, 271 NULL, virtio_vq_intr, 0,
272 (VIRTIO_BLK_F_SIZE_MAX | VIRTIO_BLK_F_SEG_MAX | 272 (VIRTIO_BLK_F_SIZE_MAX | VIRTIO_BLK_F_SEG_MAX |
273 VIRTIO_BLK_F_GEOMETRY | VIRTIO_BLK_F_RO | VIRTIO_BLK_F_BLK_SIZE), 273 VIRTIO_BLK_F_GEOMETRY | VIRTIO_BLK_F_RO | VIRTIO_BLK_F_BLK_SIZE),
274 VIRTIO_BLK_FLAG_BITS); 274 VIRTIO_BLK_FLAG_BITS);
275 275
276 features = virtio_features(vsc); 276 features = virtio_features(vsc);
277 277
278 if (features & VIRTIO_BLK_F_RO) 278 if (features & VIRTIO_BLK_F_RO)
279 sc->sc_readonly = 1; 279 sc->sc_readonly = 1;
280 else 280 else
281 sc->sc_readonly = 0; 281 sc->sc_readonly = 0;
282 282
283 if (features & VIRTIO_BLK_F_BLK_SIZE) { 283 if (features & VIRTIO_BLK_F_BLK_SIZE) {
284 ld->sc_secsize = virtio_read_device_config_4(vsc, 284 ld->sc_secsize = virtio_read_device_config_4(vsc,
285 VIRTIO_BLK_CONFIG_BLK_SIZE); 285 VIRTIO_BLK_CONFIG_BLK_SIZE);
286 } else 286 } else
287 ld->sc_secsize = 512; 287 ld->sc_secsize = 512;
288 288
289 /* At least genfs_io assumes maxxfer == MAXPHYS. */ 289 /* At least genfs_io assumes maxxfer == MAXPHYS. */
290 if (features & VIRTIO_BLK_F_SIZE_MAX) { 290 if (features & VIRTIO_BLK_F_SIZE_MAX) {
291 maxxfersize = virtio_read_device_config_4(vsc, 291 maxxfersize = virtio_read_device_config_4(vsc,
292 VIRTIO_BLK_CONFIG_SIZE_MAX); 292 VIRTIO_BLK_CONFIG_SIZE_MAX);
293 if (maxxfersize < MAXPHYS) { 293 if (maxxfersize < MAXPHYS) {
294 aprint_error_dev(sc->sc_dev, 294 aprint_error_dev(sc->sc_dev,
295 "Too small SIZE_MAX %dK minimum is %dK\n", 295 "Too small SIZE_MAX %dK minimum is %dK\n",
296 maxxfersize / 1024, MAXPHYS / 1024); 296 maxxfersize / 1024, MAXPHYS / 1024);
297 // goto err; 297 // goto err;
298 maxxfersize = MAXPHYS; 298 maxxfersize = MAXPHYS;
299 } else if (maxxfersize > MAXPHYS) { 299 } else if (maxxfersize > MAXPHYS) {
300 aprint_normal_dev(sc->sc_dev, 300 aprint_normal_dev(sc->sc_dev,
301 "Clip SEG_MAX from %dK to %dK\n", 301 "Clip SEG_MAX from %dK to %dK\n",
302 maxxfersize / 1024, 302 maxxfersize / 1024,
303 MAXPHYS / 1024); 303 MAXPHYS / 1024);
304 maxxfersize = MAXPHYS; 304 maxxfersize = MAXPHYS;
305 } 305 }
306 } else 306 } else
307 maxxfersize = MAXPHYS; 307 maxxfersize = MAXPHYS;
308 308
309 if (features & VIRTIO_BLK_F_SEG_MAX) { 309 if (features & VIRTIO_BLK_F_SEG_MAX) {
310 maxnsegs = virtio_read_device_config_4(vsc, 310 maxnsegs = virtio_read_device_config_4(vsc,
311 VIRTIO_BLK_CONFIG_SEG_MAX); 311 VIRTIO_BLK_CONFIG_SEG_MAX);
312 if (maxnsegs < VIRTIO_BLK_MIN_SEGMENTS) { 312 if (maxnsegs < VIRTIO_BLK_MIN_SEGMENTS) {
313 aprint_error_dev(sc->sc_dev, 313 aprint_error_dev(sc->sc_dev,
314 "Too small SEG_MAX %d minimum is %d\n", 314 "Too small SEG_MAX %d minimum is %d\n",
315 maxnsegs, VIRTIO_BLK_MIN_SEGMENTS); 315 maxnsegs, VIRTIO_BLK_MIN_SEGMENTS);
316 maxnsegs = maxxfersize / NBPG; 316 maxnsegs = maxxfersize / NBPG;
317 // goto err; 317 // goto err;
318 } 318 }
319 } else 319 } else
320 maxnsegs = maxxfersize / NBPG; 320 maxnsegs = maxxfersize / NBPG;
321 321
322 /* 2 for the minimum size */ 322 /* 2 for the minimum size */
323 maxnsegs += VIRTIO_BLK_MIN_SEGMENTS; 323 maxnsegs += VIRTIO_BLK_MIN_SEGMENTS;
324 324
325 if (virtio_alloc_vq(vsc, &sc->sc_vq, 0, maxxfersize, maxnsegs, 325 if (virtio_alloc_vq(vsc, &sc->sc_vq, 0, maxxfersize, maxnsegs,
326 "I/O request") != 0) { 326 "I/O request") != 0) {
327 goto err; 327 goto err;
328 } 328 }
329 qsize = sc->sc_vq.vq_num; 329 qsize = sc->sc_vq.vq_num;
330 sc->sc_vq.vq_done = ld_virtio_vq_done; 330 sc->sc_vq.vq_done = ld_virtio_vq_done;
331 331
332 if (virtio_child_attach_finish(vsc) != 0) 332 if (virtio_child_attach_finish(vsc) != 0)
333 goto err; 333 goto err;
334 334
335 ld->sc_dv = self; 335 ld->sc_dv = self;
336 ld->sc_secperunit = virtio_read_device_config_8(vsc, 336 ld->sc_secperunit = virtio_read_device_config_8(vsc,
337 VIRTIO_BLK_CONFIG_CAPACITY); 337 VIRTIO_BLK_CONFIG_CAPACITY);
338 ld->sc_maxxfer = maxxfersize; 338 ld->sc_maxxfer = maxxfersize;
339 if (features & VIRTIO_BLK_F_GEOMETRY) { 339 if (features & VIRTIO_BLK_F_GEOMETRY) {
340 ld->sc_ncylinders = virtio_read_device_config_2(vsc, 340 ld->sc_ncylinders = virtio_read_device_config_2(vsc,
341 VIRTIO_BLK_CONFIG_GEOMETRY_C); 341 VIRTIO_BLK_CONFIG_GEOMETRY_C);
342 ld->sc_nheads = virtio_read_device_config_1(vsc, 342 ld->sc_nheads = virtio_read_device_config_1(vsc,
343 VIRTIO_BLK_CONFIG_GEOMETRY_H); 343 VIRTIO_BLK_CONFIG_GEOMETRY_H);
344 ld->sc_nsectors = virtio_read_device_config_1(vsc, 344 ld->sc_nsectors = virtio_read_device_config_1(vsc,
345 VIRTIO_BLK_CONFIG_GEOMETRY_S); 345 VIRTIO_BLK_CONFIG_GEOMETRY_S);
346 } 346 }
347 ld->sc_maxqueuecnt = qsize; 347 ld->sc_maxqueuecnt = qsize;
348 348
349 if (ld_virtio_alloc_reqs(sc, qsize) < 0) 349 if (ld_virtio_alloc_reqs(sc, qsize) < 0)
350 goto err; 350 goto err;
351 351
352 ld->sc_dump = ld_virtio_dump; 352 ld->sc_dump = ld_virtio_dump;
353 ld->sc_start = ld_virtio_start; 353 ld->sc_start = ld_virtio_start;
354 354
355 ld->sc_flags = LDF_ENABLED | LDF_MPSAFE; 355 ld->sc_flags = LDF_ENABLED | LDF_MPSAFE;
356 ldattach(ld, BUFQ_DISK_DEFAULT_STRAT); 356 ldattach(ld, BUFQ_DISK_DEFAULT_STRAT);
357 357
358 return; 358 return;
359 359
360err: 360err:
361 virtio_child_attach_failed(vsc); 361 virtio_child_attach_failed(vsc);
362 return; 362 return;
363} 363}
364 364
365static int 365static int
366ld_virtio_start(struct ld_softc *ld, struct buf *bp) 366ld_virtio_start(struct ld_softc *ld, struct buf *bp)
367{ 367{
368 /* splbio */ 368 /* splbio */
369 struct ld_virtio_softc *sc = device_private(ld->sc_dv); 369 struct ld_virtio_softc *sc = device_private(ld->sc_dv);
370 struct virtio_softc *vsc = sc->sc_virtio; 370 struct virtio_softc *vsc = sc->sc_virtio;
371 struct virtqueue *vq = &sc->sc_vq; 371 struct virtqueue *vq = &sc->sc_vq;
372 struct virtio_blk_req *vr; 372 struct virtio_blk_req *vr;
373 int r; 373 int r;
374 int isread = (bp->b_flags & B_READ); 374 int isread = (bp->b_flags & B_READ);
375 int slot; 375 int slot;
376 376
377 if (sc->sc_readonly && !isread) 377 if (sc->sc_readonly && !isread)
378 return EIO; 378 return EIO;
379 379
380 r = virtio_enqueue_prep(vsc, vq, &slot); 380 r = virtio_enqueue_prep(vsc, vq, &slot);
381 if (r != 0) 381 if (r != 0)
382 return r; 382 return r;
383 383
384 vr = &sc->sc_reqs[slot]; 384 vr = &sc->sc_reqs[slot];
385 KASSERT(vr->vr_bp == NULL); 385 KASSERT(vr->vr_bp == NULL);
386 386
387 r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_payload, 387 r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_payload,
388 bp->b_data, bp->b_bcount, NULL, 388 bp->b_data, bp->b_bcount, NULL,
389 ((isread?BUS_DMA_READ:BUS_DMA_WRITE) 389 ((isread?BUS_DMA_READ:BUS_DMA_WRITE)
390 |BUS_DMA_NOWAIT)); 390 |BUS_DMA_NOWAIT));
391 if (r != 0) { 391 if (r != 0) {
392 aprint_error_dev(sc->sc_dev, 392 aprint_error_dev(sc->sc_dev,
393 "payload dmamap failed, error code %d\n", r); 393 "payload dmamap failed, error code %d\n", r);
394 virtio_enqueue_abort(vsc, vq, slot); 394 virtio_enqueue_abort(vsc, vq, slot);
395 return r; 395 return r;
396 } 396 }
397 397
398 r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs + 398 r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs +
399 VIRTIO_BLK_MIN_SEGMENTS); 399 VIRTIO_BLK_MIN_SEGMENTS);
400 if (r != 0) { 400 if (r != 0) {
401 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload); 401 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload);
402 return r; 402 return r;
403 } 403 }
404 404
405 vr->vr_bp = bp; 405 vr->vr_bp = bp;
406 vr->vr_hdr.type = isread?VIRTIO_BLK_T_IN:VIRTIO_BLK_T_OUT; 406 vr->vr_hdr.type = isread?VIRTIO_BLK_T_IN:VIRTIO_BLK_T_OUT;
407 vr->vr_hdr.ioprio = 0; 407 vr->vr_hdr.ioprio = 0;
408 vr->vr_hdr.sector = bp->b_rawblkno * sc->sc_ld.sc_secsize / 512; 408 vr->vr_hdr.sector = bp->b_rawblkno * sc->sc_ld.sc_secsize / 512;
409 409
410 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts, 410 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
411 0, sizeof(struct virtio_blk_req_hdr), 411 0, sizeof(struct virtio_blk_req_hdr),
412 BUS_DMASYNC_PREWRITE); 412 BUS_DMASYNC_PREWRITE);
413 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload, 413 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
414 0, bp->b_bcount, 414 0, bp->b_bcount,
415 isread?BUS_DMASYNC_PREREAD:BUS_DMASYNC_PREWRITE); 415 isread?BUS_DMASYNC_PREREAD:BUS_DMASYNC_PREWRITE);
416 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts, 416 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
417 offsetof(struct virtio_blk_req, vr_status), 417 offsetof(struct virtio_blk_req, vr_status),
418 sizeof(uint8_t), 418 sizeof(uint8_t),
419 BUS_DMASYNC_PREREAD); 419 BUS_DMASYNC_PREREAD);
420 420
421 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts, 421 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
422 0, sizeof(struct virtio_blk_req_hdr), 422 0, sizeof(struct virtio_blk_req_hdr),
423 true); 423 true);
424 virtio_enqueue(vsc, vq, slot, vr->vr_payload, !isread); 424 virtio_enqueue(vsc, vq, slot, vr->vr_payload, !isread);
425 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts, 425 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
426 offsetof(struct virtio_blk_req, vr_status), 426 offsetof(struct virtio_blk_req, vr_status),
427 sizeof(uint8_t), 427 sizeof(uint8_t),
428 false); 428 false);
429 virtio_enqueue_commit(vsc, vq, slot, true); 429 virtio_enqueue_commit(vsc, vq, slot, true);
430 430
431 return 0; 431 return 0;
432} 432}
433 433
434static void 434static void
435ld_virtio_vq_done1(struct ld_virtio_softc *sc, struct virtio_softc *vsc, 435ld_virtio_vq_done1(struct ld_virtio_softc *sc, struct virtio_softc *vsc,
436 struct virtqueue *vq, int slot) 436 struct virtqueue *vq, int slot)
437{ 437{
438 struct virtio_blk_req *vr = &sc->sc_reqs[slot]; 438 struct virtio_blk_req *vr = &sc->sc_reqs[slot];
439 struct buf *bp = vr->vr_bp; 439 struct buf *bp = vr->vr_bp;
440 440
441 vr->vr_bp = NULL; 441 vr->vr_bp = NULL;
442 442
443 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts, 443 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
444 0, sizeof(struct virtio_blk_req_hdr), 444 0, sizeof(struct virtio_blk_req_hdr),
445 BUS_DMASYNC_POSTWRITE); 445 BUS_DMASYNC_POSTWRITE);
446 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload, 446 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
447 0, bp->b_bcount, 447 0, bp->b_bcount,
448 (bp->b_flags & B_READ)?BUS_DMASYNC_POSTREAD 448 (bp->b_flags & B_READ)?BUS_DMASYNC_POSTREAD
449 :BUS_DMASYNC_POSTWRITE); 449 :BUS_DMASYNC_POSTWRITE);
 450 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload);
450 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts, 451 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
451 sizeof(struct virtio_blk_req_hdr), sizeof(uint8_t), 452 sizeof(struct virtio_blk_req_hdr), sizeof(uint8_t),
452 BUS_DMASYNC_POSTREAD); 453 BUS_DMASYNC_POSTREAD);
453 454
454 if (vr->vr_status != VIRTIO_BLK_S_OK) { 455 if (vr->vr_status != VIRTIO_BLK_S_OK) {
455 bp->b_error = EIO; 456 bp->b_error = EIO;
456 bp->b_resid = bp->b_bcount; 457 bp->b_resid = bp->b_bcount;
457 } else { 458 } else {
458 bp->b_error = 0; 459 bp->b_error = 0;
459 bp->b_resid = 0; 460 bp->b_resid = 0;
460 } 461 }
461 462
462 virtio_dequeue_commit(vsc, vq, slot); 463 virtio_dequeue_commit(vsc, vq, slot);
463 464
464 lddone(&sc->sc_ld, bp); 465 lddone(&sc->sc_ld, bp);
465} 466}
466 467
467static int 468static int
468ld_virtio_vq_done(struct virtqueue *vq) 469ld_virtio_vq_done(struct virtqueue *vq)
469{ 470{
470 struct virtio_softc *vsc = vq->vq_owner; 471 struct virtio_softc *vsc = vq->vq_owner;
471 struct ld_virtio_softc *sc = device_private(virtio_child(vsc)); 472 struct ld_virtio_softc *sc = device_private(virtio_child(vsc));
472 int r = 0; 473 int r = 0;
473 int slot; 474 int slot;
474 475
475again: 476again:
476 if (virtio_dequeue(vsc, vq, &slot, NULL)) 477 if (virtio_dequeue(vsc, vq, &slot, NULL))
477 return r; 478 return r;
478 r = 1; 479 r = 1;
479 480
480 ld_virtio_vq_done1(sc, vsc, vq, slot); 481 ld_virtio_vq_done1(sc, vsc, vq, slot);
481 goto again; 482 goto again;
482} 483}
483 484
484static int 485static int
485ld_virtio_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt) 486ld_virtio_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
486{ 487{
487 struct ld_virtio_softc *sc = device_private(ld->sc_dv); 488 struct ld_virtio_softc *sc = device_private(ld->sc_dv);
488 struct virtio_softc *vsc = sc->sc_virtio; 489 struct virtio_softc *vsc = sc->sc_virtio;
489 struct virtqueue *vq = &sc->sc_vq; 490 struct virtqueue *vq = &sc->sc_vq;
490 struct virtio_blk_req *vr; 491 struct virtio_blk_req *vr;
491 int slot, r; 492 int slot, r;
492 493
493 if (sc->sc_readonly) 494 if (sc->sc_readonly)
494 return EIO; 495 return EIO;
495 496
496 r = virtio_enqueue_prep(vsc, vq, &slot); 497 r = virtio_enqueue_prep(vsc, vq, &slot);
497 if (r != 0) { 498 if (r != 0) {
498 if (r == EAGAIN) { /* no free slot; dequeue first */ 499 if (r == EAGAIN) { /* no free slot; dequeue first */
499 delay(100); 500 delay(100);
500 ld_virtio_vq_done(vq); 501 ld_virtio_vq_done(vq);
501 r = virtio_enqueue_prep(vsc, vq, &slot); 502 r = virtio_enqueue_prep(vsc, vq, &slot);
502 if (r != 0) 503 if (r != 0)
503 return r; 504 return r;
504 } 505 }
505 return r; 506 return r;
506 } 507 }
507 vr = &sc->sc_reqs[slot]; 508 vr = &sc->sc_reqs[slot];
508 r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_payload, 509 r = bus_dmamap_load(virtio_dmat(vsc), vr->vr_payload,
509 data, blkcnt*ld->sc_secsize, NULL, 510 data, blkcnt*ld->sc_secsize, NULL,
510 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 511 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
511 if (r != 0) 512 if (r != 0)
512 return r; 513 return r;
513 514
514 r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs + 515 r = virtio_enqueue_reserve(vsc, vq, slot, vr->vr_payload->dm_nsegs +
515 VIRTIO_BLK_MIN_SEGMENTS); 516 VIRTIO_BLK_MIN_SEGMENTS);
516 if (r != 0) { 517 if (r != 0) {
517 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload); 518 bus_dmamap_unload(virtio_dmat(vsc), vr->vr_payload);
518 return r; 519 return r;
519 } 520 }
520 521
521 vr->vr_bp = (void*)0xdeadbeef; 522 vr->vr_bp = (void*)0xdeadbeef;
522 vr->vr_hdr.type = VIRTIO_BLK_T_OUT; 523 vr->vr_hdr.type = VIRTIO_BLK_T_OUT;
523 vr->vr_hdr.ioprio = 0; 524 vr->vr_hdr.ioprio = 0;
524 vr->vr_hdr.sector = (daddr_t) blkno * ld->sc_secsize / 512; 525 vr->vr_hdr.sector = (daddr_t) blkno * ld->sc_secsize / 512;
525 526
526 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts, 527 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
527 0, sizeof(struct virtio_blk_req_hdr), 528 0, sizeof(struct virtio_blk_req_hdr),
528 BUS_DMASYNC_PREWRITE); 529 BUS_DMASYNC_PREWRITE);
529 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload, 530 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
530 0, blkcnt*ld->sc_secsize, 531 0, blkcnt*ld->sc_secsize,
531 BUS_DMASYNC_PREWRITE); 532 BUS_DMASYNC_PREWRITE);
532 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts, 533 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
533 offsetof(struct virtio_blk_req, vr_status), 534 offsetof(struct virtio_blk_req, vr_status),
534 sizeof(uint8_t), 535 sizeof(uint8_t),
535 BUS_DMASYNC_PREREAD); 536 BUS_DMASYNC_PREREAD);
536 537
537 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts, 538 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
538 0, sizeof(struct virtio_blk_req_hdr), 539 0, sizeof(struct virtio_blk_req_hdr),
539 true); 540 true);
540 virtio_enqueue(vsc, vq, slot, vr->vr_payload, true); 541 virtio_enqueue(vsc, vq, slot, vr->vr_payload, true);
541 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts, 542 virtio_enqueue_p(vsc, vq, slot, vr->vr_cmdsts,
542 offsetof(struct virtio_blk_req, vr_status), 543 offsetof(struct virtio_blk_req, vr_status),
543 sizeof(uint8_t), 544 sizeof(uint8_t),
544 false); 545 false);
545 virtio_enqueue_commit(vsc, vq, slot, true); 546 virtio_enqueue_commit(vsc, vq, slot, true);
546 547
547 for ( ; ; ) { 548 for ( ; ; ) {
548 int dslot; 549 int dslot;
549 550
550 r = virtio_dequeue(vsc, vq, &dslot, NULL); 551 r = virtio_dequeue(vsc, vq, &dslot, NULL);
551 if (r != 0) 552 if (r != 0)
552 continue; 553 continue;
553 if (dslot != slot) { 554 if (dslot != slot) {
554 ld_virtio_vq_done1(sc, vsc, vq, dslot); 555 ld_virtio_vq_done1(sc, vsc, vq, dslot);
555 continue; 556 continue;
556 } else 557 } else
557 break; 558 break;
558 } 559 }
559 560
560 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts, 561 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
561 0, sizeof(struct virtio_blk_req_hdr), 562 0, sizeof(struct virtio_blk_req_hdr),
562 BUS_DMASYNC_POSTWRITE); 563 BUS_DMASYNC_POSTWRITE);
563 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload, 564 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_payload,
564 0, blkcnt*ld->sc_secsize, 565 0, blkcnt*ld->sc_secsize,
565 BUS_DMASYNC_POSTWRITE); 566 BUS_DMASYNC_POSTWRITE);
566 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts, 567 bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
567 offsetof(struct virtio_blk_req, vr_status), 568 offsetof(struct virtio_blk_req, vr_status),
568 sizeof(uint8_t), 569 sizeof(uint8_t),
569 BUS_DMASYNC_POSTREAD); 570 BUS_DMASYNC_POSTREAD);
570 if (vr->vr_status == VIRTIO_BLK_S_OK) 571 if (vr->vr_status == VIRTIO_BLK_S_OK)
571 r = 0; 572 r = 0;
572 else 573 else
573 r = EIO; 574 r = EIO;
574 virtio_dequeue_commit(vsc, vq, slot); 575 virtio_dequeue_commit(vsc, vq, slot);
575 576
576 return r; 577 return r;
577} 578}
578 579
579static int 580static int
580ld_virtio_detach(device_t self, int flags) 581ld_virtio_detach(device_t self, int flags)
581{ 582{
582 struct ld_virtio_softc *sc = device_private(self); 583 struct ld_virtio_softc *sc = device_private(self);
583 struct ld_softc *ld = &sc->sc_ld; 584 struct ld_softc *ld = &sc->sc_ld;
584 bus_dma_tag_t dmat = virtio_dmat(sc->sc_virtio); 585 bus_dma_tag_t dmat = virtio_dmat(sc->sc_virtio);
585 int r, i, qsize; 586 int r, i, qsize;
586 587
587 qsize = sc->sc_vq.vq_num; 588 qsize = sc->sc_vq.vq_num;
588 r = ldbegindetach(ld, flags); 589 r = ldbegindetach(ld, flags);
589 if (r != 0) 590 if (r != 0)
590 return r; 591 return r;
591 virtio_reset(sc->sc_virtio); 592 virtio_reset(sc->sc_virtio);
592 virtio_free_vq(sc->sc_virtio, &sc->sc_vq); 593 virtio_free_vq(sc->sc_virtio, &sc->sc_vq);
593 594
594 for (i = 0; i < qsize; i++) { 595 for (i = 0; i < qsize; i++) {
595 bus_dmamap_destroy(dmat, 596 bus_dmamap_destroy(dmat,
596 sc->sc_reqs[i].vr_cmdsts); 597 sc->sc_reqs[i].vr_cmdsts);
597 bus_dmamap_destroy(dmat, 598 bus_dmamap_destroy(dmat,
598 sc->sc_reqs[i].vr_payload); 599 sc->sc_reqs[i].vr_payload);
599 } 600 }
600 bus_dmamem_unmap(dmat, sc->sc_reqs, 601 bus_dmamem_unmap(dmat, sc->sc_reqs,
601 sizeof(struct virtio_blk_req) * qsize); 602 sizeof(struct virtio_blk_req) * qsize);
602 bus_dmamem_free(dmat, &sc->sc_reqs_seg, 1); 603 bus_dmamem_free(dmat, &sc->sc_reqs_seg, 1);
603 604
604 ldenddetach(ld); 605 ldenddetach(ld);
605 606
606 virtio_child_detach(sc->sc_virtio); 607 virtio_child_detach(sc->sc_virtio);
607 608
608 return 0; 609 return 0;
609} 610}
610 611
611MODULE(MODULE_CLASS_DRIVER, ld_virtio, "ld,virtio"); 612MODULE(MODULE_CLASS_DRIVER, ld_virtio, "ld,virtio");
612 613
613#ifdef _MODULE 614#ifdef _MODULE
614/* 615/*
615 * XXX Don't allow ioconf.c to redefine the "struct cfdriver ld_cd" 616 * XXX Don't allow ioconf.c to redefine the "struct cfdriver ld_cd"
616 * XXX it will be defined in the common-code module 617 * XXX it will be defined in the common-code module
617 */ 618 */
618#undef CFDRIVER_DECL 619#undef CFDRIVER_DECL
619#define CFDRIVER_DECL(name, class, attr) 620#define CFDRIVER_DECL(name, class, attr)
620#include "ioconf.c" 621#include "ioconf.c"
621#endif 622#endif
622 623
623static int 624static int
624ld_virtio_modcmd(modcmd_t cmd, void *opaque) 625ld_virtio_modcmd(modcmd_t cmd, void *opaque)
625{ 626{
626#ifdef _MODULE 627#ifdef _MODULE
627 /* 628 /*
628 * We ignore the cfdriver_vec[] that ioconf provides, since 629 * We ignore the cfdriver_vec[] that ioconf provides, since
629 * the cfdrivers are attached already. 630 * the cfdrivers are attached already.
630 */ 631 */
631 static struct cfdriver * const no_cfdriver_vec[] = { NULL }; 632 static struct cfdriver * const no_cfdriver_vec[] = { NULL };
632#endif 633#endif
633 int error = 0; 634 int error = 0;
634 635
635#ifdef _MODULE 636#ifdef _MODULE
636 switch (cmd) { 637 switch (cmd) {
637 case MODULE_CMD_INIT: 638 case MODULE_CMD_INIT:
638 error = config_init_component(no_cfdriver_vec, 639 error = config_init_component(no_cfdriver_vec,
639 cfattach_ioconf_ld_virtio, cfdata_ioconf_ld_virtio); 640 cfattach_ioconf_ld_virtio, cfdata_ioconf_ld_virtio);
640 break; 641 break;
641 case MODULE_CMD_FINI: 642 case MODULE_CMD_FINI:
642 error = config_fini_component(no_cfdriver_vec, 643 error = config_fini_component(no_cfdriver_vec,
643 cfattach_ioconf_ld_virtio, cfdata_ioconf_ld_virtio); 644 cfattach_ioconf_ld_virtio, cfdata_ioconf_ld_virtio);
644 break; 645 break;
645 default: 646 default:
646 error = ENOTTY; 647 error = ENOTTY;
647 break; 648 break;
648 } 649 }
649#endif 650#endif
650 651
651 return error; 652 return error;
652} 653}