Tue Oct 4 18:23:24 2016 UTC ()
react on ADAPTER_REQ_SET_XFER_MODE so that we set tagged queuing

pass tag type and set id


(jdolecek)
diff -r1.7 -r1.8 src/sys/dev/pci/vioscsi.c

cvs diff -r1.7 -r1.8 src/sys/dev/pci/vioscsi.c (switch to unified diff)

--- src/sys/dev/pci/vioscsi.c 2016/10/04 18:20:49 1.7
+++ src/sys/dev/pci/vioscsi.c 2016/10/04 18:23:24 1.8
@@ -1,524 +1,559 @@ @@ -1,524 +1,559 @@
1/* $NetBSD: vioscsi.c,v 1.7 2016/10/04 18:20:49 jdolecek Exp $ */ 1/* $NetBSD: vioscsi.c,v 1.8 2016/10/04 18:23:24 jdolecek Exp $ */
2/* $OpenBSD: vioscsi.c,v 1.3 2015/03/14 03:38:49 jsg Exp $ */ 2/* $OpenBSD: vioscsi.c,v 1.3 2015/03/14 03:38:49 jsg Exp $ */
3 3
4/* 4/*
5 * Copyright (c) 2013 Google Inc. 5 * Copyright (c) 2013 Google Inc.
6 * 6 *
7 * Permission to use, copy, modify, and distribute this software for any 7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above 8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies. 9 * copyright notice and this permission notice appear in all copies.
10 * 10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */ 18 */
19 19
20#include <sys/cdefs.h> 20#include <sys/cdefs.h>
21__KERNEL_RCSID(0, "$NetBSD: vioscsi.c,v 1.7 2016/10/04 18:20:49 jdolecek Exp $"); 21__KERNEL_RCSID(0, "$NetBSD: vioscsi.c,v 1.8 2016/10/04 18:23:24 jdolecek Exp $");
22 22
23#include <sys/param.h> 23#include <sys/param.h>
24#include <sys/systm.h> 24#include <sys/systm.h>
25#include <sys/device.h> 25#include <sys/device.h>
26#include <sys/bus.h> 26#include <sys/bus.h>
27#include <sys/buf.h> 27#include <sys/buf.h>
28 28
29#include <dev/pci/pcidevs.h> 29#include <dev/pci/pcidevs.h>
30#include <dev/pci/pcireg.h> 30#include <dev/pci/pcireg.h>
31#include <dev/pci/pcivar.h> 31#include <dev/pci/pcivar.h>
32 32
33#include <dev/pci/vioscsireg.h> 33#include <dev/pci/vioscsireg.h>
34#include <dev/pci/virtiovar.h> 34#include <dev/pci/virtiovar.h>
35 35
36#include <dev/scsipi/scsi_all.h> 36#include <dev/scsipi/scsi_all.h>
37#include <dev/scsipi/scsiconf.h> 37#include <dev/scsipi/scsiconf.h>
38 38
39#ifdef VIOSCSI_DEBUG 39#ifdef VIOSCSI_DEBUG
40static int vioscsi_debug = 1; 40static int vioscsi_debug = 1;
41#define DPRINTF(f) do { if (vioscsi_debug) printf f; } while (/*CONSTCOND*/0) 41#define DPRINTF(f) do { if (vioscsi_debug) printf f; } while (/*CONSTCOND*/0)
42#else 42#else
43#define DPRINTF(f) ((void)0) 43#define DPRINTF(f) ((void)0)
44#endif 44#endif
45 45
46struct vioscsi_req { 46struct vioscsi_req {
47 struct virtio_scsi_req_hdr vr_req; 47 struct virtio_scsi_req_hdr vr_req;
48 struct virtio_scsi_res_hdr vr_res; 48 struct virtio_scsi_res_hdr vr_res;
49 struct scsipi_xfer *vr_xs; 49 struct scsipi_xfer *vr_xs;
50 bus_dmamap_t vr_control; 50 bus_dmamap_t vr_control;
51 bus_dmamap_t vr_data; 51 bus_dmamap_t vr_data;
52}; 52};
53 53
54struct vioscsi_softc { 54struct vioscsi_softc {
55 device_t sc_dev; 55 device_t sc_dev;
56 struct scsipi_adapter sc_adapter; 56 struct scsipi_adapter sc_adapter;
57 struct scsipi_channel sc_channel; 57 struct scsipi_channel sc_channel;
58 58
59 struct virtqueue sc_vqs[3]; 59 struct virtqueue sc_vqs[3];
60 struct vioscsi_req *sc_reqs; 60 struct vioscsi_req *sc_reqs;
61 bus_dma_segment_t sc_reqs_segs[1]; 61 bus_dma_segment_t sc_reqs_segs[1];
62 62
63 u_int32_t sc_seg_max; 63 u_int32_t sc_seg_max;
64}; 64};
65 65
66/*  66/*
67 * Each block request uses at least two segments - one for the header 67 * Each block request uses at least two segments - one for the header
68 * and one for the status. 68 * and one for the status.
69*/ 69*/
70#define VIRTIO_SCSI_MIN_SEGMENTS 2 70#define VIRTIO_SCSI_MIN_SEGMENTS 2
71 71
72static int vioscsi_match(device_t, cfdata_t, void *); 72static int vioscsi_match(device_t, cfdata_t, void *);
73static void vioscsi_attach(device_t, device_t, void *); 73static void vioscsi_attach(device_t, device_t, void *);
74 74
75static int vioscsi_alloc_reqs(struct vioscsi_softc *, 75static int vioscsi_alloc_reqs(struct vioscsi_softc *,
76 struct virtio_softc *, int, uint32_t); 76 struct virtio_softc *, int, uint32_t);
77static void vioscsi_scsipi_request(struct scsipi_channel *, 77static void vioscsi_scsipi_request(struct scsipi_channel *,
78 scsipi_adapter_req_t, void *); 78 scsipi_adapter_req_t, void *);
79static int vioscsi_vq_done(struct virtqueue *); 79static int vioscsi_vq_done(struct virtqueue *);
80static void vioscsi_req_done(struct vioscsi_softc *, struct virtio_softc *, 80static void vioscsi_req_done(struct vioscsi_softc *, struct virtio_softc *,
81 struct vioscsi_req *); 81 struct vioscsi_req *);
82static struct vioscsi_req *vioscsi_req_get(struct vioscsi_softc *); 82static struct vioscsi_req *vioscsi_req_get(struct vioscsi_softc *);
83static void vioscsi_req_put(struct vioscsi_softc *, struct vioscsi_req *); 83static void vioscsi_req_put(struct vioscsi_softc *, struct vioscsi_req *);
84 84
85static const char *const vioscsi_vq_names[] = { 85static const char *const vioscsi_vq_names[] = {
86 "control", 86 "control",
87 "event", 87 "event",
88 "request", 88 "request",
89}; 89};
90 90
91CFATTACH_DECL_NEW(vioscsi, sizeof(struct vioscsi_softc), 91CFATTACH_DECL_NEW(vioscsi, sizeof(struct vioscsi_softc),
92 vioscsi_match, vioscsi_attach, NULL, NULL); 92 vioscsi_match, vioscsi_attach, NULL, NULL);
93 93
94static int 94static int
95vioscsi_match(device_t parent, cfdata_t match, void *aux) 95vioscsi_match(device_t parent, cfdata_t match, void *aux)
96{ 96{
97 struct virtio_softc *va = aux; 97 struct virtio_softc *va = aux;
98 98
99 if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_SCSI) 99 if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_SCSI)
100 return 1; 100 return 1;
101 return 0; 101 return 0;
102} 102}
103 103
104static void 104static void
105vioscsi_attach(device_t parent, device_t self, void *aux) 105vioscsi_attach(device_t parent, device_t self, void *aux)
106{ 106{
107 struct vioscsi_softc *sc = device_private(self); 107 struct vioscsi_softc *sc = device_private(self);
108 struct virtio_softc *vsc = device_private(parent); 108 struct virtio_softc *vsc = device_private(parent);
109 struct scsipi_adapter *adapt = &sc->sc_adapter; 109 struct scsipi_adapter *adapt = &sc->sc_adapter;
110 struct scsipi_channel *chan = &sc->sc_channel; 110 struct scsipi_channel *chan = &sc->sc_channel;
111 uint32_t features; 111 uint32_t features;
112 char buf[256]; 112 char buf[256];
113 int rv; 113 int rv;
114 114
115 if (vsc->sc_child != NULL) { 115 if (vsc->sc_child != NULL) {
116 aprint_error(": parent %s already has a child\n", 116 aprint_error(": parent %s already has a child\n",
117 device_xname(parent)); 117 device_xname(parent));
118 return; 118 return;
119 } 119 }
120 120
121 sc->sc_dev = self; 121 sc->sc_dev = self;
122 122
123 vsc->sc_child = self; 123 vsc->sc_child = self;
124 vsc->sc_ipl = IPL_BIO; 124 vsc->sc_ipl = IPL_BIO;
125 vsc->sc_vqs = sc->sc_vqs; 125 vsc->sc_vqs = sc->sc_vqs;
126 vsc->sc_nvqs = __arraycount(sc->sc_vqs); 126 vsc->sc_nvqs = __arraycount(sc->sc_vqs);
127 vsc->sc_config_change = NULL; 127 vsc->sc_config_change = NULL;
128 vsc->sc_intrhand = virtio_vq_intr; 128 vsc->sc_intrhand = virtio_vq_intr;
129 vsc->sc_flags = 0; 129 vsc->sc_flags = 0;
130 130
131 features = virtio_negotiate_features(vsc, 0); 131 features = virtio_negotiate_features(vsc, 0);
132 snprintb(buf, sizeof(buf), VIRTIO_COMMON_FLAG_BITS, features); 132 snprintb(buf, sizeof(buf), VIRTIO_COMMON_FLAG_BITS, features);
133 aprint_normal(": Features: %s\n", buf); 133 aprint_normal(": Features: %s\n", buf);
134 aprint_naive("\n"); 134 aprint_naive("\n");
135 135
136 uint32_t cmd_per_lun = virtio_read_device_config_4(vsc, 136 uint32_t cmd_per_lun = virtio_read_device_config_4(vsc,
137 VIRTIO_SCSI_CONFIG_CMD_PER_LUN); 137 VIRTIO_SCSI_CONFIG_CMD_PER_LUN);
138 138
139 uint32_t seg_max = virtio_read_device_config_4(vsc, 139 uint32_t seg_max = virtio_read_device_config_4(vsc,
140 VIRTIO_SCSI_CONFIG_SEG_MAX); 140 VIRTIO_SCSI_CONFIG_SEG_MAX);
141 141
142 uint16_t max_target = virtio_read_device_config_2(vsc, 142 uint16_t max_target = virtio_read_device_config_2(vsc,
143 VIRTIO_SCSI_CONFIG_MAX_TARGET); 143 VIRTIO_SCSI_CONFIG_MAX_TARGET);
144 144
145 uint16_t max_channel = virtio_read_device_config_2(vsc, 145 uint16_t max_channel = virtio_read_device_config_2(vsc,
146 VIRTIO_SCSI_CONFIG_MAX_CHANNEL); 146 VIRTIO_SCSI_CONFIG_MAX_CHANNEL);
147 147
148 uint32_t max_lun = virtio_read_device_config_4(vsc, 148 uint32_t max_lun = virtio_read_device_config_4(vsc,
149 VIRTIO_SCSI_CONFIG_MAX_LUN); 149 VIRTIO_SCSI_CONFIG_MAX_LUN);
150 150
151 sc->sc_seg_max = seg_max; 151 sc->sc_seg_max = seg_max;
152 152
153 for (size_t i = 0; i < __arraycount(sc->sc_vqs); i++) { 153 for (size_t i = 0; i < __arraycount(sc->sc_vqs); i++) {
154 rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], i, MAXPHYS, 154 rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], i, MAXPHYS,
155 1 + howmany(MAXPHYS, NBPG), vioscsi_vq_names[i]); 155 1 + howmany(MAXPHYS, NBPG), vioscsi_vq_names[i]);
156 if (rv) { 156 if (rv) {
157 aprint_error_dev(sc->sc_dev, 157 aprint_error_dev(sc->sc_dev,
158 "failed to allocate virtqueue %zu\n", i); 158 "failed to allocate virtqueue %zu\n", i);
159 return; 159 return;
160 } 160 }
161 sc->sc_vqs[i].vq_done = vioscsi_vq_done; 161 sc->sc_vqs[i].vq_done = vioscsi_vq_done;
162 } 162 }
163 163
164 int qsize = sc->sc_vqs[2].vq_num; 164 int qsize = sc->sc_vqs[2].vq_num;
165 aprint_normal_dev(sc->sc_dev, "qsize %d\n", qsize); 165 aprint_normal_dev(sc->sc_dev, "qsize %d\n", qsize);
166 if (vioscsi_alloc_reqs(sc, vsc, qsize, seg_max)) 166 if (vioscsi_alloc_reqs(sc, vsc, qsize, seg_max))
167 return; 167 return;
168 168
169 /* 169 /*
170 * Fill in the scsipi_adapter. 170 * Fill in the scsipi_adapter.
171 */ 171 */
172 memset(adapt, 0, sizeof(*adapt)); 172 memset(adapt, 0, sizeof(*adapt));
173 adapt->adapt_dev = sc->sc_dev; 173 adapt->adapt_dev = sc->sc_dev;
174 adapt->adapt_nchannels = max_channel; 174 adapt->adapt_nchannels = max_channel;
175 adapt->adapt_openings = cmd_per_lun; 175 adapt->adapt_openings = cmd_per_lun;
176 adapt->adapt_max_periph = adapt->adapt_openings; 176 adapt->adapt_max_periph = adapt->adapt_openings;
177 adapt->adapt_request = vioscsi_scsipi_request; 177 adapt->adapt_request = vioscsi_scsipi_request;
178 adapt->adapt_minphys = minphys; 178 adapt->adapt_minphys = minphys;
179 179
180 /* 180 /*
181 * Fill in the scsipi_channel. 181 * Fill in the scsipi_channel.
182 */ 182 */
183 memset(chan, 0, sizeof(*chan)); 183 memset(chan, 0, sizeof(*chan));
184 chan->chan_adapter = adapt; 184 chan->chan_adapter = adapt;
185 chan->chan_bustype = &scsi_bustype; 185 chan->chan_bustype = &scsi_bustype;
186 chan->chan_channel = 0; 186 chan->chan_channel = 0;
187 chan->chan_ntargets = max_target; 187 chan->chan_ntargets = max_target;
188 chan->chan_nluns = max_lun; 188 chan->chan_nluns = max_lun;
189 chan->chan_id = 0; 189 chan->chan_id = 0;
190 chan->chan_flags = SCSIPI_CHAN_NOSETTLE; 190 chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
191 191
192 config_found(sc->sc_dev, &sc->sc_channel, scsiprint); 192 config_found(sc->sc_dev, &sc->sc_channel, scsiprint);
193} 193}
194 194
195#define XS2DMA(xs) \ 195#define XS2DMA(xs) \
196 ((((xs)->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE) | \ 196 ((((xs)->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE) | \
197 (((xs)->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | \ 197 (((xs)->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | \
198 BUS_DMA_STREAMING) 198 BUS_DMA_STREAMING)
199 199
200#define XS2DMAPRE(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \ 200#define XS2DMAPRE(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
201 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE) 201 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)
202 202
203#define XS2DMAPOST(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \ 203#define XS2DMAPOST(xs) (((xs)->xs_control & XS_CTL_DATA_IN) ? \
204 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE) 204 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)
205 205
206static void 206static void
207vioscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t 207vioscsi_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t
208 request, void *arg) 208 request, void *arg)
209{ 209{
210 struct vioscsi_softc *sc = 210 struct vioscsi_softc *sc =
211 device_private(chan->chan_adapter->adapt_dev); 211 device_private(chan->chan_adapter->adapt_dev);
212 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev)); 212 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
213 struct scsipi_xfer *xs;  213 struct scsipi_xfer *xs;
214 struct scsipi_periph *periph; 214 struct scsipi_periph *periph;
215 struct vioscsi_req *vr; 215 struct vioscsi_req *vr;
216 struct virtio_scsi_req_hdr *req; 216 struct virtio_scsi_req_hdr *req;
217 struct virtqueue *vq = &sc->sc_vqs[2]; 217 struct virtqueue *vq = &sc->sc_vqs[2];
218 int slot, error; 218 int slot, error;
219 219
220 DPRINTF(("%s: enter\n", __func__)); 220 DPRINTF(("%s: enter\n", __func__));
221 221
222 if (request != ADAPTER_REQ_RUN_XFER) { 222 switch (request) {
 223 case ADAPTER_REQ_RUN_XFER:
 224 break;
 225 case ADAPTER_REQ_SET_XFER_MODE:
 226 {
 227 struct scsipi_xfer_mode *xm = arg;
 228 xm->xm_mode = PERIPH_CAP_TQING;
 229 xm->xm_period = 0;
 230 xm->xm_offset = 0;
 231 scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
 232 return;
 233 }
 234 default:
223 DPRINTF(("%s: unhandled %d\n", __func__, request)); 235 DPRINTF(("%s: unhandled %d\n", __func__, request));
224 return; 236 return;
225 } 237 }
226  238
227 xs = arg; 239 xs = arg;
228 periph = xs->xs_periph; 240 periph = xs->xs_periph;
229 241
230 vr = vioscsi_req_get(sc); 242 vr = vioscsi_req_get(sc);
231 /* 243 /*
232 * This can happen when we run out of queue slots. 244 * This can happen when we run out of queue slots.
233 */ 245 */
234 if (vr == NULL) { 246 if (vr == NULL) {
235 xs->error = XS_RESOURCE_SHORTAGE; 247 xs->error = XS_RESOURCE_SHORTAGE;
236 scsipi_done(xs); 248 scsipi_done(xs);
237 return; 249 return;
238 } 250 }
239 251
240 req = &vr->vr_req; 252 req = &vr->vr_req;
241 slot = vr - sc->sc_reqs; 253 slot = vr - sc->sc_reqs;
242 254
243 vr->vr_xs = xs; 255 vr->vr_xs = xs;
244 256
245 /* 257 /*
246 * "The only supported format for the LUN field is: first byte set to 258 * "The only supported format for the LUN field is: first byte set to
247 * 1, second byte set to target, third and fourth byte representing a 259 * 1, second byte set to target, third and fourth byte representing a
248 * single level LUN structure, followed by four zero bytes." 260 * single level LUN structure, followed by four zero bytes."
249 */ 261 */
250 if (periph->periph_target >= 256 || periph->periph_lun >= 16384) { 262 if (periph->periph_target >= 256 || periph->periph_lun >= 16384) {
251 DPRINTF(("%s: bad target %u or lun %u\n", __func__, 263 DPRINTF(("%s: bad target %u or lun %u\n", __func__,
252 periph->periph_target, periph->periph_lun)); 264 periph->periph_target, periph->periph_lun));
253 goto stuffup; 265 goto stuffup;
254 } 266 }
255 req->lun[0] = 1; 267 req->lun[0] = 1;
256 req->lun[1] = periph->periph_target - 1; 268 req->lun[1] = periph->periph_target - 1;
257 req->lun[2] = 0x40 | (periph->periph_lun >> 8); 269 req->lun[2] = 0x40 | (periph->periph_lun >> 8);
258 req->lun[3] = periph->periph_lun; 270 req->lun[3] = periph->periph_lun;
259 memset(req->lun + 4, 0, 4); 271 memset(req->lun + 4, 0, 4);
260 DPRINTF(("%s: command for %u:%u at slot %d\n", __func__, 272 DPRINTF(("%s: command for %u:%u at slot %d\n", __func__,
261 periph->periph_target - 1, periph->periph_lun, slot)); 273 periph->periph_target - 1, periph->periph_lun, slot));
262 274
 275 /* tag */
 276 switch (XS_CTL_TAGTYPE(xs)) {
 277 case XS_CTL_HEAD_TAG:
 278 req->task_attr = VIRTIO_SCSI_S_HEAD;
 279 break;
 280
 281#if 0 /* XXX */
 282 case XS_CTL_ACA_TAG:
 283 req->task_attr = VIRTIO_SCSI_S_ACA;
 284 break;
 285#endif
 286
 287 case XS_CTL_ORDERED_TAG:
 288 req->task_attr = VIRTIO_SCSI_S_ORDERED;
 289 break;
 290
 291 case XS_CTL_SIMPLE_TAG:
 292 default:
 293 req->task_attr = VIRTIO_SCSI_S_SIMPLE;
 294 break;
 295 }
 296 req->id = (intptr_t)vr;
 297
263 if ((size_t)xs->cmdlen > sizeof(req->cdb)) { 298 if ((size_t)xs->cmdlen > sizeof(req->cdb)) {
264 DPRINTF(("%s: bad cmdlen %zu > %zu\n", __func__, 299 DPRINTF(("%s: bad cmdlen %zu > %zu\n", __func__,
265 (size_t)xs->cmdlen, sizeof(req->cdb))); 300 (size_t)xs->cmdlen, sizeof(req->cdb)));
266 goto stuffup; 301 goto stuffup;
267 } 302 }
268 303
269 memset(req->cdb, 0, sizeof(req->cdb)); 304 memset(req->cdb, 0, sizeof(req->cdb));
270 memcpy(req->cdb, xs->cmd, xs->cmdlen); 305 memcpy(req->cdb, xs->cmd, xs->cmdlen);
271 306
272 error = bus_dmamap_load(vsc->sc_dmat, vr->vr_data, 307 error = bus_dmamap_load(vsc->sc_dmat, vr->vr_data,
273 xs->data, xs->datalen, NULL, XS2DMA(xs)); 308 xs->data, xs->datalen, NULL, XS2DMA(xs));
274 switch (error) { 309 switch (error) {
275 case 0: 310 case 0:
276 break; 311 break;
277 case ENOMEM: 312 case ENOMEM:
278 case EAGAIN: 313 case EAGAIN:
279 xs->error = XS_RESOURCE_SHORTAGE; 314 xs->error = XS_RESOURCE_SHORTAGE;
280 goto nomore; 315 goto nomore;
281 default: 316 default:
282 aprint_error_dev(sc->sc_dev, "error %d loading DMA map\n", 317 aprint_error_dev(sc->sc_dev, "error %d loading DMA map\n",
283 error); 318 error);
284 stuffup: 319 stuffup:
285 xs->error = XS_DRIVER_STUFFUP; 320 xs->error = XS_DRIVER_STUFFUP;
286nomore: 321nomore:
287 vioscsi_req_put(sc, vr); 322 vioscsi_req_put(sc, vr);
288 scsipi_done(xs); 323 scsipi_done(xs);
289 return; 324 return;
290 } 325 }
291 326
292 int nsegs = VIRTIO_SCSI_MIN_SEGMENTS; 327 int nsegs = VIRTIO_SCSI_MIN_SEGMENTS;
293 if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0) 328 if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
294 nsegs += vr->vr_data->dm_nsegs; 329 nsegs += vr->vr_data->dm_nsegs;
295 330
296 error = virtio_enqueue_reserve(vsc, vq, slot, nsegs); 331 error = virtio_enqueue_reserve(vsc, vq, slot, nsegs);
297 if (error) { 332 if (error) {
298 DPRINTF(("%s: error reserving %d\n", __func__, error)); 333 DPRINTF(("%s: error reserving %d\n", __func__, error));
299 goto stuffup; 334 goto stuffup;
300 } 335 }
301 336
302 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control, 337 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
303 offsetof(struct vioscsi_req, vr_req), 338 offsetof(struct vioscsi_req, vr_req),
304 sizeof(struct virtio_scsi_req_hdr), 339 sizeof(struct virtio_scsi_req_hdr),
305 BUS_DMASYNC_PREWRITE); 340 BUS_DMASYNC_PREWRITE);
306 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control, 341 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
307 offsetof(struct vioscsi_req, vr_res), 342 offsetof(struct vioscsi_req, vr_res),
308 sizeof(struct virtio_scsi_res_hdr), 343 sizeof(struct virtio_scsi_res_hdr),
309 BUS_DMASYNC_PREREAD); 344 BUS_DMASYNC_PREREAD);
310 if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0) 345 if ((xs->xs_control & (XS_CTL_DATA_IN|XS_CTL_DATA_OUT)) != 0)
311 bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen, 346 bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
312 XS2DMAPRE(xs)); 347 XS2DMAPRE(xs));
313 348
314 virtio_enqueue_p(vsc, vq, slot, vr->vr_control, 349 virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
315 offsetof(struct vioscsi_req, vr_req), 350 offsetof(struct vioscsi_req, vr_req),
316 sizeof(struct virtio_scsi_req_hdr), 1); 351 sizeof(struct virtio_scsi_req_hdr), 1);
317 if (xs->xs_control & XS_CTL_DATA_OUT) 352 if (xs->xs_control & XS_CTL_DATA_OUT)
318 virtio_enqueue(vsc, vq, slot, vr->vr_data, 1); 353 virtio_enqueue(vsc, vq, slot, vr->vr_data, 1);
319 virtio_enqueue_p(vsc, vq, slot, vr->vr_control, 354 virtio_enqueue_p(vsc, vq, slot, vr->vr_control,
320 offsetof(struct vioscsi_req, vr_res), 355 offsetof(struct vioscsi_req, vr_res),
321 sizeof(struct virtio_scsi_res_hdr), 0); 356 sizeof(struct virtio_scsi_res_hdr), 0);
322 if (xs->xs_control & XS_CTL_DATA_IN) 357 if (xs->xs_control & XS_CTL_DATA_IN)
323 virtio_enqueue(vsc, vq, slot, vr->vr_data, 0); 358 virtio_enqueue(vsc, vq, slot, vr->vr_data, 0);
324 virtio_enqueue_commit(vsc, vq, slot, 1); 359 virtio_enqueue_commit(vsc, vq, slot, 1);
325 360
326 if ((xs->xs_control & XS_CTL_POLL) == 0) 361 if ((xs->xs_control & XS_CTL_POLL) == 0)
327 return; 362 return;
328 363
329 DPRINTF(("%s: polling...\n", __func__)); 364 DPRINTF(("%s: polling...\n", __func__));
330 // XXX: do this better. 365 // XXX: do this better.
331 int timeout = 1000; 366 int timeout = 1000;
332 do { 367 do {
333 (*vsc->sc_intrhand)(vsc); 368 (*vsc->sc_intrhand)(vsc);
334 if (vr->vr_xs != xs) 369 if (vr->vr_xs != xs)
335 break; 370 break;
336 delay(1000); 371 delay(1000);
337 } while (--timeout > 0); 372 } while (--timeout > 0);
338 373
339 if (vr->vr_xs == xs) { 374 if (vr->vr_xs == xs) {
340 // XXX: Abort! 375 // XXX: Abort!
341 xs->error = XS_TIMEOUT; 376 xs->error = XS_TIMEOUT;
342 xs->resid = xs->datalen; 377 xs->resid = xs->datalen;
343 DPRINTF(("%s: polling timeout\n", __func__)); 378 DPRINTF(("%s: polling timeout\n", __func__));
344 scsipi_done(xs); 379 scsipi_done(xs);
345 } 380 }
346 DPRINTF(("%s: done (timeout=%d)\n", __func__, timeout)); 381 DPRINTF(("%s: done (timeout=%d)\n", __func__, timeout));
347} 382}
348 383
349static void 384static void
350vioscsi_req_done(struct vioscsi_softc *sc, struct virtio_softc *vsc, 385vioscsi_req_done(struct vioscsi_softc *sc, struct virtio_softc *vsc,
351 struct vioscsi_req *vr) 386 struct vioscsi_req *vr)
352{ 387{
353 struct scsipi_xfer *xs = vr->vr_xs; 388 struct scsipi_xfer *xs = vr->vr_xs;
354 struct scsi_sense_data *sense = &xs->sense.scsi_sense; 389 struct scsi_sense_data *sense = &xs->sense.scsi_sense;
355 size_t sense_len; 390 size_t sense_len;
356 391
357 DPRINTF(("%s: enter\n", __func__)); 392 DPRINTF(("%s: enter\n", __func__));
358 393
359 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control, 394 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
360 offsetof(struct vioscsi_req, vr_req), 395 offsetof(struct vioscsi_req, vr_req),
361 sizeof(struct virtio_scsi_req_hdr), 396 sizeof(struct virtio_scsi_req_hdr),
362 BUS_DMASYNC_POSTWRITE); 397 BUS_DMASYNC_POSTWRITE);
363 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control, 398 bus_dmamap_sync(vsc->sc_dmat, vr->vr_control,
364 offsetof(struct vioscsi_req, vr_res), 399 offsetof(struct vioscsi_req, vr_res),
365 sizeof(struct virtio_scsi_res_hdr), 400 sizeof(struct virtio_scsi_res_hdr),
366 BUS_DMASYNC_POSTREAD); 401 BUS_DMASYNC_POSTREAD);
367 bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen, 402 bus_dmamap_sync(vsc->sc_dmat, vr->vr_data, 0, xs->datalen,
368 XS2DMAPOST(xs)); 403 XS2DMAPOST(xs));
369 404
370 switch (vr->vr_res.response) { 405 switch (vr->vr_res.response) {
371 case VIRTIO_SCSI_S_OK: 406 case VIRTIO_SCSI_S_OK:
372 sense_len = MIN(sizeof(xs->sense), vr->vr_res.sense_len); 407 sense_len = MIN(sizeof(xs->sense), vr->vr_res.sense_len);
373 memcpy(&xs->sense, vr->vr_res.sense, sense_len); 408 memcpy(&xs->sense, vr->vr_res.sense, sense_len);
374 xs->error = (sense_len == 0) ? XS_NOERROR : XS_SENSE; 409 xs->error = (sense_len == 0) ? XS_NOERROR : XS_SENSE;
375 break; 410 break;
376 case VIRTIO_SCSI_S_BAD_TARGET: 411 case VIRTIO_SCSI_S_BAD_TARGET:
377 DPRINTF(("%s: bad target\n", __func__)); 412 DPRINTF(("%s: bad target\n", __func__));
378 memset(sense, 0, sizeof(*sense)); 413 memset(sense, 0, sizeof(*sense));
379 sense->response_code = 0x70; 414 sense->response_code = 0x70;
380 sense->flags = SKEY_ILLEGAL_REQUEST; 415 sense->flags = SKEY_ILLEGAL_REQUEST;
381 xs->error = XS_SENSE; 416 xs->error = XS_SENSE;
382 xs->status = 0; 417 xs->status = 0;
383 xs->resid = 0; 418 xs->resid = 0;
384 break; 419 break;
385 default: 420 default:
386 DPRINTF(("%s: stuffup: %d\n", __func__, vr->vr_res.response)); 421 DPRINTF(("%s: stuffup: %d\n", __func__, vr->vr_res.response));
387 xs->error = XS_DRIVER_STUFFUP; 422 xs->error = XS_DRIVER_STUFFUP;
388 xs->resid = xs->datalen; 423 xs->resid = xs->datalen;
389 break; 424 break;
390 } 425 }
391 426
392 xs->status = vr->vr_res.status; 427 xs->status = vr->vr_res.status;
393 xs->resid = vr->vr_res.residual; 428 xs->resid = vr->vr_res.residual;
394 429
395 DPRINTF(("%s: done %d, %d, %d\n", __func__, 430 DPRINTF(("%s: done %d, %d, %d\n", __func__,
396 xs->error, xs->status, xs->resid)); 431 xs->error, xs->status, xs->resid));
397 432
398 vr->vr_xs = NULL; 433 vr->vr_xs = NULL;
399 vioscsi_req_put(sc, vr); 434 vioscsi_req_put(sc, vr);
400 scsipi_done(xs); 435 scsipi_done(xs);
401} 436}
402 437
403static int 438static int
404vioscsi_vq_done(struct virtqueue *vq) 439vioscsi_vq_done(struct virtqueue *vq)
405{ 440{
406 struct virtio_softc *vsc = vq->vq_owner; 441 struct virtio_softc *vsc = vq->vq_owner;
407 struct vioscsi_softc *sc = device_private(vsc->sc_child); 442 struct vioscsi_softc *sc = device_private(vsc->sc_child);
408 int ret = 0; 443 int ret = 0;
409 444
410 DPRINTF(("%s: enter\n", __func__)); 445 DPRINTF(("%s: enter\n", __func__));
411 446
412 for (;;) { 447 for (;;) {
413 int r, slot; 448 int r, slot;
414 r = virtio_dequeue(vsc, vq, &slot, NULL); 449 r = virtio_dequeue(vsc, vq, &slot, NULL);
415 if (r != 0) 450 if (r != 0)
416 break; 451 break;
417 452
418 DPRINTF(("%s: slot=%d\n", __func__, slot)); 453 DPRINTF(("%s: slot=%d\n", __func__, slot));
419 vioscsi_req_done(sc, vsc, &sc->sc_reqs[slot]); 454 vioscsi_req_done(sc, vsc, &sc->sc_reqs[slot]);
420 ret = 1; 455 ret = 1;
421 } 456 }
422 457
423 DPRINTF(("%s: exit %d\n", __func__, ret)); 458 DPRINTF(("%s: exit %d\n", __func__, ret));
424 459
425 return ret; 460 return ret;
426} 461}
427 462
428static struct vioscsi_req * 463static struct vioscsi_req *
429vioscsi_req_get(struct vioscsi_softc *sc) 464vioscsi_req_get(struct vioscsi_softc *sc)
430{ 465{
431 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev)); 466 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
432 struct virtqueue *vq = &sc->sc_vqs[2]; 467 struct virtqueue *vq = &sc->sc_vqs[2];
433 struct vioscsi_req *vr; 468 struct vioscsi_req *vr;
434 int r, slot; 469 int r, slot;
435 470
436 if ((r = virtio_enqueue_prep(vsc, vq, &slot)) != 0) { 471 if ((r = virtio_enqueue_prep(vsc, vq, &slot)) != 0) {
437 DPRINTF(("%s: virtio_enqueue_get error %d\n", __func__, r)); 472 DPRINTF(("%s: virtio_enqueue_get error %d\n", __func__, r));
438 goto err1; 473 goto err1;
439 } 474 }
440 vr = &sc->sc_reqs[slot]; 475 vr = &sc->sc_reqs[slot];
441 476
442 vr->vr_req.id = slot; 477 vr->vr_req.id = slot;
443 vr->vr_req.task_attr = VIRTIO_SCSI_S_SIMPLE; 478 vr->vr_req.task_attr = VIRTIO_SCSI_S_SIMPLE;
444 479
445 r = bus_dmamap_create(vsc->sc_dmat, 480 r = bus_dmamap_create(vsc->sc_dmat,
446 offsetof(struct vioscsi_req, vr_xs), 1, 481 offsetof(struct vioscsi_req, vr_xs), 1,
447 offsetof(struct vioscsi_req, vr_xs), 0, 482 offsetof(struct vioscsi_req, vr_xs), 0,
448 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_control); 483 BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_control);
449 if (r != 0) { 484 if (r != 0) {
450 DPRINTF(("%s: bus_dmamap_create xs error %d\n", __func__, r)); 485 DPRINTF(("%s: bus_dmamap_create xs error %d\n", __func__, r));
451 goto err2; 486 goto err2;
452 } 487 }
453 r = bus_dmamap_create(vsc->sc_dmat, MAXPHYS, sc->sc_seg_max, 488 r = bus_dmamap_create(vsc->sc_dmat, MAXPHYS, sc->sc_seg_max,
454 MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_data); 489 MAXPHYS, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, &vr->vr_data);
455 if (r != 0) { 490 if (r != 0) {
456 DPRINTF(("%s: bus_dmamap_create data error %d\n", __func__, r)); 491 DPRINTF(("%s: bus_dmamap_create data error %d\n", __func__, r));
457 goto err3; 492 goto err3;
458 } 493 }
459 r = bus_dmamap_load(vsc->sc_dmat, vr->vr_control, 494 r = bus_dmamap_load(vsc->sc_dmat, vr->vr_control,
460 vr, offsetof(struct vioscsi_req, vr_xs), NULL, 495 vr, offsetof(struct vioscsi_req, vr_xs), NULL,
461 BUS_DMA_NOWAIT); 496 BUS_DMA_NOWAIT);
462 if (r != 0) { 497 if (r != 0) {
463 DPRINTF(("%s: bus_dmamap_create ctrl error %d\n", __func__, r)); 498 DPRINTF(("%s: bus_dmamap_create ctrl error %d\n", __func__, r));
464 goto err4; 499 goto err4;
465 } 500 }
466 501
467 DPRINTF(("%s: %p, %d\n", __func__, vr, slot)); 502 DPRINTF(("%s: %p, %d\n", __func__, vr, slot));
468 503
469 return vr; 504 return vr;
470 505
471err4: 506err4:
472 bus_dmamap_destroy(vsc->sc_dmat, vr->vr_data); 507 bus_dmamap_destroy(vsc->sc_dmat, vr->vr_data);
473err3: 508err3:
474 bus_dmamap_destroy(vsc->sc_dmat, vr->vr_control); 509 bus_dmamap_destroy(vsc->sc_dmat, vr->vr_control);
475err2: 510err2:
476 virtio_enqueue_abort(vsc, vq, slot); 511 virtio_enqueue_abort(vsc, vq, slot);
477err1: 512err1:
478 return NULL; 513 return NULL;
479} 514}
480 515
481static void 516static void
482vioscsi_req_put(struct vioscsi_softc *sc, struct vioscsi_req *vr) 517vioscsi_req_put(struct vioscsi_softc *sc, struct vioscsi_req *vr)
483{ 518{
484 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev)); 519 struct virtio_softc *vsc = device_private(device_parent(sc->sc_dev));
485 struct virtqueue *vq = &sc->sc_vqs[2]; 520 struct virtqueue *vq = &sc->sc_vqs[2];
486 int slot = vr - sc->sc_reqs; 521 int slot = vr - sc->sc_reqs;
487 522
488 DPRINTF(("%s: %p, %d\n", __func__, vr, slot)); 523 DPRINTF(("%s: %p, %d\n", __func__, vr, slot));
489 524
490 bus_dmamap_destroy(vsc->sc_dmat, vr->vr_control); 525 bus_dmamap_destroy(vsc->sc_dmat, vr->vr_control);
491 bus_dmamap_destroy(vsc->sc_dmat, vr->vr_data); 526 bus_dmamap_destroy(vsc->sc_dmat, vr->vr_data);
492 527
493 virtio_dequeue_commit(vsc, vq, slot); 528 virtio_dequeue_commit(vsc, vq, slot);
494} 529}
495 530
496int 531int
497vioscsi_alloc_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc, 532vioscsi_alloc_reqs(struct vioscsi_softc *sc, struct virtio_softc *vsc,
498 int qsize, uint32_t seg_max) 533 int qsize, uint32_t seg_max)
499{ 534{
500 size_t allocsize; 535 size_t allocsize;
501 int r, rsegs; 536 int r, rsegs;
502 void *vaddr; 537 void *vaddr;
503 538
504 allocsize = qsize * sizeof(struct vioscsi_req); 539 allocsize = qsize * sizeof(struct vioscsi_req);
505 r = bus_dmamem_alloc(vsc->sc_dmat, allocsize, 0, 0, 540 r = bus_dmamem_alloc(vsc->sc_dmat, allocsize, 0, 0,
506 &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT); 541 &sc->sc_reqs_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
507 if (r != 0) { 542 if (r != 0) {
508 aprint_error_dev(sc->sc_dev, 543 aprint_error_dev(sc->sc_dev,
509 "%s: bus_dmamem_alloc, size %zu, error %d\n", __func__, 544 "%s: bus_dmamem_alloc, size %zu, error %d\n", __func__,
510 allocsize, r); 545 allocsize, r);
511 return 1; 546 return 1;
512 } 547 }
513 r = bus_dmamem_map(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1, 548 r = bus_dmamem_map(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1,
514 allocsize, &vaddr, BUS_DMA_NOWAIT); 549 allocsize, &vaddr, BUS_DMA_NOWAIT);
515 if (r != 0) { 550 if (r != 0) {
516 aprint_error_dev(sc->sc_dev, 551 aprint_error_dev(sc->sc_dev,
517 "%s: bus_dmamem_map failed, error %d\n", __func__, r); 552 "%s: bus_dmamem_map failed, error %d\n", __func__, r);
518 bus_dmamem_free(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1); 553 bus_dmamem_free(vsc->sc_dmat, &sc->sc_reqs_segs[0], 1);
519 return 1; 554 return 1;
520 } 555 }
521 sc->sc_reqs = vaddr; 556 sc->sc_reqs = vaddr;
522 memset(vaddr, 0, allocsize); 557 memset(vaddr, 0, allocsize);
523 return 0; 558 return 0;
524} 559}