Sun Sep 28 11:27:01 2014 UTC ()
for BIOCDISK_NOVOL, when setting bd_disknovol=false, also set bv_volid


(jmcneill)
diff -r1.30 -r1.31 src/sys/dev/ic/mpt_netbsd.c

cvs diff -r1.30 -r1.31 src/sys/dev/ic/mpt_netbsd.c (switch to unified diff)

--- src/sys/dev/ic/mpt_netbsd.c 2014/09/28 11:20:22 1.30
+++ src/sys/dev/ic/mpt_netbsd.c 2014/09/28 11:27:00 1.31
@@ -1,2064 +1,2065 @@ @@ -1,2064 +1,2065 @@
1/* $NetBSD: mpt_netbsd.c,v 1.30 2014/09/28 11:20:22 jmcneill Exp $ */ 1/* $NetBSD: mpt_netbsd.c,v 1.31 2014/09/28 11:27:00 jmcneill Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2003 Wasabi Systems, Inc. 4 * Copyright (c) 2003 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the 15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution. 16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software 17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement: 18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by 19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc. 20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior 22 * or promote products derived from this software without specific prior
23 * written permission. 23 * written permission.
24 * 24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE. 35 * POSSIBILITY OF SUCH DAMAGE.
36 */ 36 */
37 37
38/* 38/*
39 * Copyright (c) 2000, 2001 by Greg Ansley 39 * Copyright (c) 2000, 2001 by Greg Ansley
40 * Partially derived from Matt Jacob's ISP driver. 40 * Partially derived from Matt Jacob's ISP driver.
41 * 41 *
42 * Redistribution and use in source and binary forms, with or without 42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions 43 * modification, are permitted provided that the following conditions
44 * are met: 44 * are met:
45 * 1. Redistributions of source code must retain the above copyright 45 * 1. Redistributions of source code must retain the above copyright
46 * notice immediately at the beginning of the file, without modification, 46 * notice immediately at the beginning of the file, without modification,
47 * this list of conditions, and the following disclaimer. 47 * this list of conditions, and the following disclaimer.
48 * 2. The name of the author may not be used to endorse or promote products 48 * 2. The name of the author may not be used to endorse or promote products
49 * derived from this software without specific prior written permission. 49 * derived from this software without specific prior written permission.
50 * 50 *
51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
55 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE. 61 * SUCH DAMAGE.
62 */ 62 */
63/* 63/*
64 * Additional Copyright (c) 2002 by Matthew Jacob under same license. 64 * Additional Copyright (c) 2002 by Matthew Jacob under same license.
65 */ 65 */
66 66
67/* 67/*
68 * mpt_netbsd.c: 68 * mpt_netbsd.c:
69 * 69 *
70 * NetBSD-specific routines for LSI Fusion adapters. Includes some 70 * NetBSD-specific routines for LSI Fusion adapters. Includes some
71 * bus_dma glue, and SCSIPI glue. 71 * bus_dma glue, and SCSIPI glue.
72 * 72 *
73 * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for 73 * Adapted from the FreeBSD "mpt" driver by Jason R. Thorpe for
74 * Wasabi Systems, Inc. 74 * Wasabi Systems, Inc.
75 * 75 *
76 * Additional contributions by Garrett D'Amore on behalf of TELES AG. 76 * Additional contributions by Garrett D'Amore on behalf of TELES AG.
77 */ 77 */
78 78
79#include <sys/cdefs.h> 79#include <sys/cdefs.h>
80__KERNEL_RCSID(0, "$NetBSD: mpt_netbsd.c,v 1.30 2014/09/28 11:20:22 jmcneill Exp $"); 80__KERNEL_RCSID(0, "$NetBSD: mpt_netbsd.c,v 1.31 2014/09/28 11:27:00 jmcneill Exp $");
81 81
82#include "bio.h" 82#include "bio.h"
83 83
84#include <dev/ic/mpt.h> /* pulls in all headers */ 84#include <dev/ic/mpt.h> /* pulls in all headers */
85#include <sys/scsiio.h> 85#include <sys/scsiio.h>
86 86
87#if NBIO > 0 87#if NBIO > 0
88#include <dev/biovar.h> 88#include <dev/biovar.h>
89#endif 89#endif
90 90
91static int mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int); 91static int mpt_poll(mpt_softc_t *, struct scsipi_xfer *, int);
92static void mpt_timeout(void *); 92static void mpt_timeout(void *);
93static void mpt_restart(mpt_softc_t *, request_t *); 93static void mpt_restart(mpt_softc_t *, request_t *);
94static void mpt_done(mpt_softc_t *, uint32_t); 94static void mpt_done(mpt_softc_t *, uint32_t);
95static int mpt_drain_queue(mpt_softc_t *); 95static int mpt_drain_queue(mpt_softc_t *);
96static void mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *); 96static void mpt_run_xfer(mpt_softc_t *, struct scsipi_xfer *);
97static void mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *); 97static void mpt_set_xfer_mode(mpt_softc_t *, struct scsipi_xfer_mode *);
98static void mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *); 98static void mpt_get_xfer_mode(mpt_softc_t *, struct scsipi_periph *);
99static void mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t); 99static void mpt_ctlop(mpt_softc_t *, void *vmsg, uint32_t);
100static void mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *); 100static void mpt_event_notify_reply(mpt_softc_t *, MSG_EVENT_NOTIFY_REPLY *);
101static void mpt_bus_reset(mpt_softc_t *); 101static void mpt_bus_reset(mpt_softc_t *);
102 102
103static void mpt_scsipi_request(struct scsipi_channel *, 103static void mpt_scsipi_request(struct scsipi_channel *,
104 scsipi_adapter_req_t, void *); 104 scsipi_adapter_req_t, void *);
105static void mpt_minphys(struct buf *); 105static void mpt_minphys(struct buf *);
106static int mpt_ioctl(struct scsipi_channel *, u_long, void *, int, 106static int mpt_ioctl(struct scsipi_channel *, u_long, void *, int,
107 struct proc *); 107 struct proc *);
108 108
109#if NBIO > 0 109#if NBIO > 0
110static bool mpt_is_raid(mpt_softc_t *); 110static bool mpt_is_raid(mpt_softc_t *);
111static int mpt_bio_ioctl(device_t, u_long, void *); 111static int mpt_bio_ioctl(device_t, u_long, void *);
112static int mpt_bio_ioctl_inq(mpt_softc_t *, struct bioc_inq *); 112static int mpt_bio_ioctl_inq(mpt_softc_t *, struct bioc_inq *);
113static int mpt_bio_ioctl_vol(mpt_softc_t *, struct bioc_vol *); 113static int mpt_bio_ioctl_vol(mpt_softc_t *, struct bioc_vol *);
114static int mpt_bio_ioctl_disk(mpt_softc_t *, struct bioc_disk *); 114static int mpt_bio_ioctl_disk(mpt_softc_t *, struct bioc_disk *);
115static int mpt_bio_ioctl_disk_novol(mpt_softc_t *, struct bioc_disk *); 115static int mpt_bio_ioctl_disk_novol(mpt_softc_t *, struct bioc_disk *);
116static int mpt_bio_ioctl_setstate(mpt_softc_t *, struct bioc_setstate *); 116static int mpt_bio_ioctl_setstate(mpt_softc_t *, struct bioc_setstate *);
117#endif 117#endif
118 118
119void 119void
120mpt_scsipi_attach(mpt_softc_t *mpt) 120mpt_scsipi_attach(mpt_softc_t *mpt)
121{ 121{
122 struct scsipi_adapter *adapt = &mpt->sc_adapter; 122 struct scsipi_adapter *adapt = &mpt->sc_adapter;
123 struct scsipi_channel *chan = &mpt->sc_channel; 123 struct scsipi_channel *chan = &mpt->sc_channel;
124 int maxq; 124 int maxq;
125 125
126 mpt->bus = 0; /* XXX ?? */ 126 mpt->bus = 0; /* XXX ?? */
127 127
128 maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ? 128 maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt)) ?
129 mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt); 129 mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt);
130 130
131 /* Fill in the scsipi_adapter. */ 131 /* Fill in the scsipi_adapter. */
132 memset(adapt, 0, sizeof(*adapt)); 132 memset(adapt, 0, sizeof(*adapt));
133 adapt->adapt_dev = mpt->sc_dev; 133 adapt->adapt_dev = mpt->sc_dev;
134 adapt->adapt_nchannels = 1; 134 adapt->adapt_nchannels = 1;
135 adapt->adapt_openings = maxq - 2; /* Reserve 2 for driver use*/ 135 adapt->adapt_openings = maxq - 2; /* Reserve 2 for driver use*/
136 adapt->adapt_max_periph = maxq - 2; 136 adapt->adapt_max_periph = maxq - 2;
137 adapt->adapt_request = mpt_scsipi_request; 137 adapt->adapt_request = mpt_scsipi_request;
138 adapt->adapt_minphys = mpt_minphys; 138 adapt->adapt_minphys = mpt_minphys;
139 adapt->adapt_ioctl = mpt_ioctl; 139 adapt->adapt_ioctl = mpt_ioctl;
140 140
141 /* Fill in the scsipi_channel. */ 141 /* Fill in the scsipi_channel. */
142 memset(chan, 0, sizeof(*chan)); 142 memset(chan, 0, sizeof(*chan));
143 chan->chan_adapter = adapt; 143 chan->chan_adapter = adapt;
144 if (mpt->is_sas) { 144 if (mpt->is_sas) {
145 chan->chan_bustype = &scsi_sas_bustype; 145 chan->chan_bustype = &scsi_sas_bustype;
146 } else if (mpt->is_fc) { 146 } else if (mpt->is_fc) {
147 chan->chan_bustype = &scsi_fc_bustype; 147 chan->chan_bustype = &scsi_fc_bustype;
148 } else { 148 } else {
149 chan->chan_bustype = &scsi_bustype; 149 chan->chan_bustype = &scsi_bustype;
150 } 150 }
151 chan->chan_channel = 0; 151 chan->chan_channel = 0;
152 chan->chan_flags = 0; 152 chan->chan_flags = 0;
153 chan->chan_nluns = 8; 153 chan->chan_nluns = 8;
154 chan->chan_ntargets = mpt->mpt_max_devices; 154 chan->chan_ntargets = mpt->mpt_max_devices;
155 chan->chan_id = mpt->mpt_ini_id; 155 chan->chan_id = mpt->mpt_ini_id;
156 156
157 /* 157 /*
158 * Save the output of the config so we can rescan the bus in case of  158 * Save the output of the config so we can rescan the bus in case of
159 * errors 159 * errors
160 */ 160 */
161 mpt->sc_scsibus_dv = config_found(mpt->sc_dev, &mpt->sc_channel,  161 mpt->sc_scsibus_dv = config_found(mpt->sc_dev, &mpt->sc_channel,
162 scsiprint); 162 scsiprint);
163 163
164#if NBIO > 0 164#if NBIO > 0
165 if (mpt_is_raid(mpt)) { 165 if (mpt_is_raid(mpt)) {
166 if (bio_register(mpt->sc_dev, mpt_bio_ioctl) != 0) 166 if (bio_register(mpt->sc_dev, mpt_bio_ioctl) != 0)
167 panic("%s: controller registration failed", 167 panic("%s: controller registration failed",
168 device_xname(mpt->sc_dev)); 168 device_xname(mpt->sc_dev));
169 } 169 }
170#endif 170#endif
171} 171}
172 172
173int 173int
174mpt_dma_mem_alloc(mpt_softc_t *mpt) 174mpt_dma_mem_alloc(mpt_softc_t *mpt)
175{ 175{
176 bus_dma_segment_t reply_seg, request_seg; 176 bus_dma_segment_t reply_seg, request_seg;
177 int reply_rseg, request_rseg; 177 int reply_rseg, request_rseg;
178 bus_addr_t pptr, end; 178 bus_addr_t pptr, end;
179 char *vptr; 179 char *vptr;
180 size_t len; 180 size_t len;
181 int error, i; 181 int error, i;
182 182
183 /* Check if we have already allocated the reply memory. */ 183 /* Check if we have already allocated the reply memory. */
184 if (mpt->reply != NULL) 184 if (mpt->reply != NULL)
185 return (0); 185 return (0);
186 186
187 /* 187 /*
188 * Allocate the request pool. This isn't really DMA'd memory, 188 * Allocate the request pool. This isn't really DMA'd memory,
189 * but it's a convenient place to do it. 189 * but it's a convenient place to do it.
190 */ 190 */
191 len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt); 191 len = sizeof(request_t) * MPT_MAX_REQUESTS(mpt);
192 mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 192 mpt->request_pool = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
193 if (mpt->request_pool == NULL) { 193 if (mpt->request_pool == NULL) {
194 aprint_error_dev(mpt->sc_dev, "unable to allocate request pool\n"); 194 aprint_error_dev(mpt->sc_dev, "unable to allocate request pool\n");
195 return (ENOMEM); 195 return (ENOMEM);
196 } 196 }
197 197
198 /* 198 /*
199 * Allocate DMA resources for reply buffers. 199 * Allocate DMA resources for reply buffers.
200 */ 200 */
201 error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0, 201 error = bus_dmamem_alloc(mpt->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
202 &reply_seg, 1, &reply_rseg, 0); 202 &reply_seg, 1, &reply_rseg, 0);
203 if (error) { 203 if (error) {
204 aprint_error_dev(mpt->sc_dev, "unable to allocate reply area, error = %d\n", 204 aprint_error_dev(mpt->sc_dev, "unable to allocate reply area, error = %d\n",
205 error); 205 error);
206 goto fail_0; 206 goto fail_0;
207 } 207 }
208 208
209 error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE, 209 error = bus_dmamem_map(mpt->sc_dmat, &reply_seg, reply_rseg, PAGE_SIZE,
210 (void **) &mpt->reply, BUS_DMA_COHERENT/*XXX*/); 210 (void **) &mpt->reply, BUS_DMA_COHERENT/*XXX*/);
211 if (error) { 211 if (error) {
212 aprint_error_dev(mpt->sc_dev, "unable to map reply area, error = %d\n", 212 aprint_error_dev(mpt->sc_dev, "unable to map reply area, error = %d\n",
213 error); 213 error);
214 goto fail_1; 214 goto fail_1;
215 } 215 }
216 216
217 error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 217 error = bus_dmamap_create(mpt->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE,
218 0, 0, &mpt->reply_dmap); 218 0, 0, &mpt->reply_dmap);
219 if (error) { 219 if (error) {
220 aprint_error_dev(mpt->sc_dev, "unable to create reply DMA map, error = %d\n", 220 aprint_error_dev(mpt->sc_dev, "unable to create reply DMA map, error = %d\n",
221 error); 221 error);
222 goto fail_2; 222 goto fail_2;
223 } 223 }
224 224
225 error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply, 225 error = bus_dmamap_load(mpt->sc_dmat, mpt->reply_dmap, mpt->reply,
226 PAGE_SIZE, NULL, 0); 226 PAGE_SIZE, NULL, 0);
227 if (error) { 227 if (error) {
228 aprint_error_dev(mpt->sc_dev, "unable to load reply DMA map, error = %d\n", 228 aprint_error_dev(mpt->sc_dev, "unable to load reply DMA map, error = %d\n",
229 error); 229 error);
230 goto fail_3; 230 goto fail_3;
231 } 231 }
232 mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr; 232 mpt->reply_phys = mpt->reply_dmap->dm_segs[0].ds_addr;
233 233
234 /* 234 /*
235 * Allocate DMA resources for request buffers. 235 * Allocate DMA resources for request buffers.
236 */ 236 */
237 error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 237 error = bus_dmamem_alloc(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt),
238 PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0); 238 PAGE_SIZE, 0, &request_seg, 1, &request_rseg, 0);
239 if (error) { 239 if (error) {
240 aprint_error_dev(mpt->sc_dev, "unable to allocate request area, " 240 aprint_error_dev(mpt->sc_dev, "unable to allocate request area, "
241 "error = %d\n", error); 241 "error = %d\n", error);
242 goto fail_4; 242 goto fail_4;
243 } 243 }
244 244
245 error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg, 245 error = bus_dmamem_map(mpt->sc_dmat, &request_seg, request_rseg,
246 MPT_REQ_MEM_SIZE(mpt), (void **) &mpt->request, 0); 246 MPT_REQ_MEM_SIZE(mpt), (void **) &mpt->request, 0);
247 if (error) { 247 if (error) {
248 aprint_error_dev(mpt->sc_dev, "unable to map request area, error = %d\n", 248 aprint_error_dev(mpt->sc_dev, "unable to map request area, error = %d\n",
249 error); 249 error);
250 goto fail_5; 250 goto fail_5;
251 } 251 }
252 252
253 error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1, 253 error = bus_dmamap_create(mpt->sc_dmat, MPT_REQ_MEM_SIZE(mpt), 1,
254 MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap); 254 MPT_REQ_MEM_SIZE(mpt), 0, 0, &mpt->request_dmap);
255 if (error) { 255 if (error) {
256 aprint_error_dev(mpt->sc_dev, "unable to create request DMA map, " 256 aprint_error_dev(mpt->sc_dev, "unable to create request DMA map, "
257 "error = %d\n", error); 257 "error = %d\n", error);
258 goto fail_6; 258 goto fail_6;
259 } 259 }
260 260
261 error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request, 261 error = bus_dmamap_load(mpt->sc_dmat, mpt->request_dmap, mpt->request,
262 MPT_REQ_MEM_SIZE(mpt), NULL, 0); 262 MPT_REQ_MEM_SIZE(mpt), NULL, 0);
263 if (error) { 263 if (error) {
264 aprint_error_dev(mpt->sc_dev, "unable to load request DMA map, error = %d\n", 264 aprint_error_dev(mpt->sc_dev, "unable to load request DMA map, error = %d\n",
265 error); 265 error);
266 goto fail_7; 266 goto fail_7;
267 } 267 }
268 mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr; 268 mpt->request_phys = mpt->request_dmap->dm_segs[0].ds_addr;
269 269
270 pptr = mpt->request_phys; 270 pptr = mpt->request_phys;
271 vptr = (void *) mpt->request; 271 vptr = (void *) mpt->request;
272 end = pptr + MPT_REQ_MEM_SIZE(mpt); 272 end = pptr + MPT_REQ_MEM_SIZE(mpt);
273 273
274 for (i = 0; pptr < end; i++) { 274 for (i = 0; pptr < end; i++) {
275 request_t *req = &mpt->request_pool[i]; 275 request_t *req = &mpt->request_pool[i];
276 req->index = i; 276 req->index = i;
277 277
278 /* Store location of Request Data */ 278 /* Store location of Request Data */
279 req->req_pbuf = pptr; 279 req->req_pbuf = pptr;
280 req->req_vbuf = vptr; 280 req->req_vbuf = vptr;
281 281
282 pptr += MPT_REQUEST_AREA; 282 pptr += MPT_REQUEST_AREA;
283 vptr += MPT_REQUEST_AREA; 283 vptr += MPT_REQUEST_AREA;
284 284
285 req->sense_pbuf = (pptr - MPT_SENSE_SIZE); 285 req->sense_pbuf = (pptr - MPT_SENSE_SIZE);
286 req->sense_vbuf = (vptr - MPT_SENSE_SIZE); 286 req->sense_vbuf = (vptr - MPT_SENSE_SIZE);
287 287
288 error = bus_dmamap_create(mpt->sc_dmat, MAXPHYS, 288 error = bus_dmamap_create(mpt->sc_dmat, MAXPHYS,
289 MPT_SGL_MAX, MAXPHYS, 0, 0, &req->dmap); 289 MPT_SGL_MAX, MAXPHYS, 0, 0, &req->dmap);
290 if (error) { 290 if (error) {
291 aprint_error_dev(mpt->sc_dev, "unable to create req %d DMA map, " 291 aprint_error_dev(mpt->sc_dev, "unable to create req %d DMA map, "
292 "error = %d\n", i, error); 292 "error = %d\n", i, error);
293 goto fail_8; 293 goto fail_8;
294 } 294 }
295 } 295 }
296 296
297 return (0); 297 return (0);
298 298
299 fail_8: 299 fail_8:
300 for (--i; i >= 0; i--) { 300 for (--i; i >= 0; i--) {
301 request_t *req = &mpt->request_pool[i]; 301 request_t *req = &mpt->request_pool[i];
302 if (req->dmap != NULL) 302 if (req->dmap != NULL)
303 bus_dmamap_destroy(mpt->sc_dmat, req->dmap); 303 bus_dmamap_destroy(mpt->sc_dmat, req->dmap);
304 } 304 }
305 bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap); 305 bus_dmamap_unload(mpt->sc_dmat, mpt->request_dmap);
306 fail_7: 306 fail_7:
307 bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap); 307 bus_dmamap_destroy(mpt->sc_dmat, mpt->request_dmap);
308 fail_6: 308 fail_6:
309 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->request, PAGE_SIZE); 309 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->request, PAGE_SIZE);
310 fail_5: 310 fail_5:
311 bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg); 311 bus_dmamem_free(mpt->sc_dmat, &request_seg, request_rseg);
312 fail_4: 312 fail_4:
313 bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap); 313 bus_dmamap_unload(mpt->sc_dmat, mpt->reply_dmap);
314 fail_3: 314 fail_3:
315 bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap); 315 bus_dmamap_destroy(mpt->sc_dmat, mpt->reply_dmap);
316 fail_2: 316 fail_2:
317 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->reply, PAGE_SIZE); 317 bus_dmamem_unmap(mpt->sc_dmat, (void *)mpt->reply, PAGE_SIZE);
318 fail_1: 318 fail_1:
319 bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg); 319 bus_dmamem_free(mpt->sc_dmat, &reply_seg, reply_rseg);
320 fail_0: 320 fail_0:
321 free(mpt->request_pool, M_DEVBUF); 321 free(mpt->request_pool, M_DEVBUF);
322 322
323 mpt->reply = NULL; 323 mpt->reply = NULL;
324 mpt->request = NULL; 324 mpt->request = NULL;
325 mpt->request_pool = NULL; 325 mpt->request_pool = NULL;
326 326
327 return (error); 327 return (error);
328} 328}
329 329
330int 330int
331mpt_intr(void *arg) 331mpt_intr(void *arg)
332{ 332{
333 mpt_softc_t *mpt = arg; 333 mpt_softc_t *mpt = arg;
334 int nrepl = 0; 334 int nrepl = 0;
335 335
336 if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0) 336 if ((mpt_read(mpt, MPT_OFFSET_INTR_STATUS) & MPT_INTR_REPLY_READY) == 0)
337 return (0); 337 return (0);
338 338
339 nrepl = mpt_drain_queue(mpt); 339 nrepl = mpt_drain_queue(mpt);
340 return (nrepl != 0); 340 return (nrepl != 0);
341} 341}
342 342
343void 343void
344mpt_prt(mpt_softc_t *mpt, const char *fmt, ...) 344mpt_prt(mpt_softc_t *mpt, const char *fmt, ...)
345{ 345{
346 va_list ap; 346 va_list ap;
347 347
348 printf("%s: ", device_xname(mpt->sc_dev)); 348 printf("%s: ", device_xname(mpt->sc_dev));
349 va_start(ap, fmt); 349 va_start(ap, fmt);
350 vprintf(fmt, ap); 350 vprintf(fmt, ap);
351 va_end(ap); 351 va_end(ap);
352 printf("\n"); 352 printf("\n");
353} 353}
354 354
355static int 355static int
356mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count) 356mpt_poll(mpt_softc_t *mpt, struct scsipi_xfer *xs, int count)
357{ 357{
358 358
359 /* Timeouts are in msec, so we loop in 1000usec cycles */ 359 /* Timeouts are in msec, so we loop in 1000usec cycles */
360 while (count) { 360 while (count) {
361 mpt_intr(mpt); 361 mpt_intr(mpt);
362 if (xs->xs_status & XS_STS_DONE) 362 if (xs->xs_status & XS_STS_DONE)
363 return (0); 363 return (0);
364 delay(1000); /* only happens in boot, so ok */ 364 delay(1000); /* only happens in boot, so ok */
365 count--; 365 count--;
366 } 366 }
367 return (1); 367 return (1);
368} 368}
369 369
370static void 370static void
371mpt_timeout(void *arg) 371mpt_timeout(void *arg)
372{ 372{
373 request_t *req = arg; 373 request_t *req = arg;
374 struct scsipi_xfer *xs; 374 struct scsipi_xfer *xs;
375 struct scsipi_periph *periph; 375 struct scsipi_periph *periph;
376 mpt_softc_t *mpt; 376 mpt_softc_t *mpt;
377 uint32_t oseq; 377 uint32_t oseq;
378 int s, nrepl = 0; 378 int s, nrepl = 0;
379  379
380 if (req->xfer == NULL) { 380 if (req->xfer == NULL) {
381 printf("mpt_timeout: NULL xfer for request index 0x%x, sequenc 0x%x\n", 381 printf("mpt_timeout: NULL xfer for request index 0x%x, sequenc 0x%x\n",
382 req->index, req->sequence); 382 req->index, req->sequence);
383 return; 383 return;
384 } 384 }
385 xs = req->xfer; 385 xs = req->xfer;
386 periph = xs->xs_periph; 386 periph = xs->xs_periph;
387 mpt = device_private(periph->periph_channel->chan_adapter->adapt_dev); 387 mpt = device_private(periph->periph_channel->chan_adapter->adapt_dev);
388 scsipi_printaddr(periph); 388 scsipi_printaddr(periph);
389 printf("command timeout\n"); 389 printf("command timeout\n");
390 390
391 s = splbio(); 391 s = splbio();
392 392
393 oseq = req->sequence; 393 oseq = req->sequence;
394 mpt->timeouts++; 394 mpt->timeouts++;
395 if (mpt_intr(mpt)) { 395 if (mpt_intr(mpt)) {
396 if (req->sequence != oseq) { 396 if (req->sequence != oseq) {
397 mpt->success++; 397 mpt->success++;
398 mpt_prt(mpt, "recovered from command timeout"); 398 mpt_prt(mpt, "recovered from command timeout");
399 splx(s); 399 splx(s);
400 return; 400 return;
401 } 401 }
402 } 402 }
403 403
404 /* 404 /*
405 * Ensure the IOC is really done giving us data since it appears it can 405 * Ensure the IOC is really done giving us data since it appears it can
406 * sometimes fail to give us interrupts under heavy load. 406 * sometimes fail to give us interrupts under heavy load.
407 */ 407 */
408 nrepl = mpt_drain_queue(mpt); 408 nrepl = mpt_drain_queue(mpt);
409 if (nrepl ) { 409 if (nrepl ) {
410 mpt_prt(mpt, "mpt_timeout: recovered %d commands",nrepl); 410 mpt_prt(mpt, "mpt_timeout: recovered %d commands",nrepl);
411 } 411 }
412 412
413 if (req->sequence != oseq) { 413 if (req->sequence != oseq) {
414 mpt->success++; 414 mpt->success++;
415 splx(s); 415 splx(s);
416 return; 416 return;
417 } 417 }
418 418
419 mpt_prt(mpt, 419 mpt_prt(mpt,
420 "timeout on request index = 0x%x, seq = 0x%08x", 420 "timeout on request index = 0x%x, seq = 0x%08x",
421 req->index, req->sequence); 421 req->index, req->sequence);
422 mpt_check_doorbell(mpt); 422 mpt_check_doorbell(mpt);
423 mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x", 423 mpt_prt(mpt, "Status 0x%08x, Mask 0x%08x, Doorbell 0x%08x",
424 mpt_read(mpt, MPT_OFFSET_INTR_STATUS), 424 mpt_read(mpt, MPT_OFFSET_INTR_STATUS),
425 mpt_read(mpt, MPT_OFFSET_INTR_MASK), 425 mpt_read(mpt, MPT_OFFSET_INTR_MASK),
426 mpt_read(mpt, MPT_OFFSET_DOORBELL)); 426 mpt_read(mpt, MPT_OFFSET_DOORBELL));
427 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug)); 427 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
428 if (mpt->verbose > 1) 428 if (mpt->verbose > 1)
429 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf); 429 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
430 430
431 xs->error = XS_TIMEOUT; 431 xs->error = XS_TIMEOUT;
432 splx(s); 432 splx(s);
433 mpt_restart(mpt, req); 433 mpt_restart(mpt, req);
434} 434}
435 435
436static void 436static void
437mpt_restart(mpt_softc_t *mpt, request_t *req0) 437mpt_restart(mpt_softc_t *mpt, request_t *req0)
438{ 438{
439 int i, s, nreq; 439 int i, s, nreq;
440 request_t *req; 440 request_t *req;
441 struct scsipi_xfer *xs; 441 struct scsipi_xfer *xs;
442 442
443 /* first, reset the IOC, leaving stopped so all requests are idle */ 443 /* first, reset the IOC, leaving stopped so all requests are idle */
444 if (mpt_soft_reset(mpt) != MPT_OK) { 444 if (mpt_soft_reset(mpt) != MPT_OK) {
445 mpt_prt(mpt, "soft reset failed"); 445 mpt_prt(mpt, "soft reset failed");
446 /*  446 /*
447 * Don't try a hard reset since this mangles the PCI  447 * Don't try a hard reset since this mangles the PCI
448 * configuration registers. 448 * configuration registers.
449 */ 449 */
450 return; 450 return;
451 } 451 }
452 452
453 /* Freeze the channel so scsipi doesn't queue more commands. */ 453 /* Freeze the channel so scsipi doesn't queue more commands. */
454 scsipi_channel_freeze(&mpt->sc_channel, 1); 454 scsipi_channel_freeze(&mpt->sc_channel, 1);
455 455
456 /* Return all pending requests to scsipi and de-allocate them. */ 456 /* Return all pending requests to scsipi and de-allocate them. */
457 s = splbio(); 457 s = splbio();
458 nreq = 0; 458 nreq = 0;
459 for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) { 459 for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) {
460 req = &mpt->request_pool[i]; 460 req = &mpt->request_pool[i];
461 xs = req->xfer; 461 xs = req->xfer;
462 if (xs != NULL) { 462 if (xs != NULL) {
463 if (xs->datalen != 0) 463 if (xs->datalen != 0)
464 bus_dmamap_unload(mpt->sc_dmat, req->dmap); 464 bus_dmamap_unload(mpt->sc_dmat, req->dmap);
465 req->xfer = NULL; 465 req->xfer = NULL;
466 callout_stop(&xs->xs_callout); 466 callout_stop(&xs->xs_callout);
467 if (req != req0) { 467 if (req != req0) {
468 nreq++; 468 nreq++;
469 xs->error = XS_REQUEUE; 469 xs->error = XS_REQUEUE;
470 } 470 }
471 scsipi_done(xs); 471 scsipi_done(xs);
472 /* 472 /*
473 * Don't need to mpt_free_request() since mpt_init()  473 * Don't need to mpt_free_request() since mpt_init()
474 * below will free all requests anyway. 474 * below will free all requests anyway.
475 */ 475 */
476 mpt_free_request(mpt, req); 476 mpt_free_request(mpt, req);
477 } 477 }
478 } 478 }
479 splx(s); 479 splx(s);
480 if (nreq > 0) 480 if (nreq > 0)
481 mpt_prt(mpt, "re-queued %d requests", nreq); 481 mpt_prt(mpt, "re-queued %d requests", nreq);
482 482
483 /* Re-initialize the IOC (which restarts it). */ 483 /* Re-initialize the IOC (which restarts it). */
484 if (mpt_init(mpt, MPT_DB_INIT_HOST) == 0) 484 if (mpt_init(mpt, MPT_DB_INIT_HOST) == 0)
485 mpt_prt(mpt, "restart succeeded"); 485 mpt_prt(mpt, "restart succeeded");
486 /* else error message already printed */ 486 /* else error message already printed */
487 487
488 /* Thaw the channel, causing scsipi to re-queue the commands. */ 488 /* Thaw the channel, causing scsipi to re-queue the commands. */
489 scsipi_channel_thaw(&mpt->sc_channel, 1); 489 scsipi_channel_thaw(&mpt->sc_channel, 1);
490} 490}
491 491
492static int  492static int
493mpt_drain_queue(mpt_softc_t *mpt) 493mpt_drain_queue(mpt_softc_t *mpt)
494{ 494{
495 int nrepl = 0; 495 int nrepl = 0;
496 uint32_t reply; 496 uint32_t reply;
497 497
498 reply = mpt_pop_reply_queue(mpt); 498 reply = mpt_pop_reply_queue(mpt);
499 while (reply != MPT_REPLY_EMPTY) { 499 while (reply != MPT_REPLY_EMPTY) {
500 nrepl++; 500 nrepl++;
501 if (mpt->verbose > 1) { 501 if (mpt->verbose > 1) {
502 if ((reply & MPT_CONTEXT_REPLY) != 0) { 502 if ((reply & MPT_CONTEXT_REPLY) != 0) {
503 /* Address reply; IOC has something to say */ 503 /* Address reply; IOC has something to say */
504 mpt_print_reply(MPT_REPLY_PTOV(mpt, reply)); 504 mpt_print_reply(MPT_REPLY_PTOV(mpt, reply));
505 } else { 505 } else {
506 /* Context reply; all went well */ 506 /* Context reply; all went well */
507 mpt_prt(mpt, "context %u reply OK", reply); 507 mpt_prt(mpt, "context %u reply OK", reply);
508 } 508 }
509 } 509 }
510 mpt_done(mpt, reply); 510 mpt_done(mpt, reply);
511 reply = mpt_pop_reply_queue(mpt); 511 reply = mpt_pop_reply_queue(mpt);
512 } 512 }
513 return (nrepl); 513 return (nrepl);
514} 514}
515 515
516static void 516static void
517mpt_done(mpt_softc_t *mpt, uint32_t reply) 517mpt_done(mpt_softc_t *mpt, uint32_t reply)
518{ 518{
519 struct scsipi_xfer *xs = NULL; 519 struct scsipi_xfer *xs = NULL;
520 struct scsipi_periph *periph; 520 struct scsipi_periph *periph;
521 int index; 521 int index;
522 request_t *req; 522 request_t *req;
523 MSG_REQUEST_HEADER *mpt_req; 523 MSG_REQUEST_HEADER *mpt_req;
524 MSG_SCSI_IO_REPLY *mpt_reply; 524 MSG_SCSI_IO_REPLY *mpt_reply;
525 int restart = 0; /* nonzero if we need to restart the IOC*/ 525 int restart = 0; /* nonzero if we need to restart the IOC*/
526 526
527 if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) { 527 if (__predict_true((reply & MPT_CONTEXT_REPLY) == 0)) {
528 /* context reply (ok) */ 528 /* context reply (ok) */
529 mpt_reply = NULL; 529 mpt_reply = NULL;
530 index = reply & MPT_CONTEXT_MASK; 530 index = reply & MPT_CONTEXT_MASK;
531 } else { 531 } else {
532 /* address reply (error) */ 532 /* address reply (error) */
533 533
534 /* XXX BUS_DMASYNC_POSTREAD XXX */ 534 /* XXX BUS_DMASYNC_POSTREAD XXX */
535 mpt_reply = MPT_REPLY_PTOV(mpt, reply); 535 mpt_reply = MPT_REPLY_PTOV(mpt, reply);
536 if (mpt_reply != NULL) { 536 if (mpt_reply != NULL) {
537 if (mpt->verbose > 1) { 537 if (mpt->verbose > 1) {
538 uint32_t *pReply = (uint32_t *) mpt_reply; 538 uint32_t *pReply = (uint32_t *) mpt_reply;
539 539
540 mpt_prt(mpt, "Address Reply (index %u):", 540 mpt_prt(mpt, "Address Reply (index %u):",
541 le32toh(mpt_reply->MsgContext) & 0xffff); 541 le32toh(mpt_reply->MsgContext) & 0xffff);
542 mpt_prt(mpt, "%08x %08x %08x %08x", pReply[0], 542 mpt_prt(mpt, "%08x %08x %08x %08x", pReply[0],
543 pReply[1], pReply[2], pReply[3]); 543 pReply[1], pReply[2], pReply[3]);
544 mpt_prt(mpt, "%08x %08x %08x %08x", pReply[4], 544 mpt_prt(mpt, "%08x %08x %08x %08x", pReply[4],
545 pReply[5], pReply[6], pReply[7]); 545 pReply[5], pReply[6], pReply[7]);
546 mpt_prt(mpt, "%08x %08x %08x %08x", pReply[8], 546 mpt_prt(mpt, "%08x %08x %08x %08x", pReply[8],
547 pReply[9], pReply[10], pReply[11]); 547 pReply[9], pReply[10], pReply[11]);
548 } 548 }
549 index = le32toh(mpt_reply->MsgContext); 549 index = le32toh(mpt_reply->MsgContext);
550 } else 550 } else
551 index = reply & MPT_CONTEXT_MASK; 551 index = reply & MPT_CONTEXT_MASK;
552 } 552 }
553 553
554 /* 554 /*
555 * Address reply with MessageContext high bit set. 555 * Address reply with MessageContext high bit set.
556 * This is most likely a notify message, so we try 556 * This is most likely a notify message, so we try
557 * to process it, then free it. 557 * to process it, then free it.
558 */ 558 */
559 if (__predict_false((index & 0x80000000) != 0)) { 559 if (__predict_false((index & 0x80000000) != 0)) {
560 if (mpt_reply != NULL) 560 if (mpt_reply != NULL)
561 mpt_ctlop(mpt, mpt_reply, reply); 561 mpt_ctlop(mpt, mpt_reply, reply);
562 else 562 else
563 mpt_prt(mpt, "%s: index 0x%x, NULL reply", __func__, 563 mpt_prt(mpt, "%s: index 0x%x, NULL reply", __func__,
564 index); 564 index);
565 return; 565 return;
566 } 566 }
567 567
568 /* Did we end up with a valid index into the table? */ 568 /* Did we end up with a valid index into the table? */
569 if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) { 569 if (__predict_false(index < 0 || index >= MPT_MAX_REQUESTS(mpt))) {
570 mpt_prt(mpt, "%s: invalid index (0x%x) in reply", __func__, 570 mpt_prt(mpt, "%s: invalid index (0x%x) in reply", __func__,
571 index); 571 index);
572 return; 572 return;
573 } 573 }
574 574
575 req = &mpt->request_pool[index]; 575 req = &mpt->request_pool[index];
576 576
577 /* Make sure memory hasn't been trashed. */ 577 /* Make sure memory hasn't been trashed. */
578 if (__predict_false(req->index != index)) { 578 if (__predict_false(req->index != index)) {
579 mpt_prt(mpt, "%s: corrupted request_t (0x%x)", __func__, 579 mpt_prt(mpt, "%s: corrupted request_t (0x%x)", __func__,
580 index); 580 index);
581 return; 581 return;
582 } 582 }
583 583
584 MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 584 MPT_SYNC_REQ(mpt, req, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
585 mpt_req = req->req_vbuf; 585 mpt_req = req->req_vbuf;
586 586
587 /* Short cut for task management replies; nothing more for us to do. */ 587 /* Short cut for task management replies; nothing more for us to do. */
588 if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) { 588 if (__predict_false(mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT)) {
589 if (mpt->verbose > 1) 589 if (mpt->verbose > 1)
590 mpt_prt(mpt, "%s: TASK MGMT", __func__); 590 mpt_prt(mpt, "%s: TASK MGMT", __func__);
591 KASSERT(req == mpt->mngt_req); 591 KASSERT(req == mpt->mngt_req);
592 mpt->mngt_req = NULL; 592 mpt->mngt_req = NULL;
593 goto done; 593 goto done;
594 } 594 }
595 595
596 if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE)) 596 if (__predict_false(mpt_req->Function == MPI_FUNCTION_PORT_ENABLE))
597 goto done; 597 goto done;
598 598
599 /* 599 /*
600 * At this point, it had better be a SCSI I/O command, but don't 600 * At this point, it had better be a SCSI I/O command, but don't
601 * crash if it isn't. 601 * crash if it isn't.
602 */ 602 */
603 if (__predict_false(mpt_req->Function != 603 if (__predict_false(mpt_req->Function !=
604 MPI_FUNCTION_SCSI_IO_REQUEST)) { 604 MPI_FUNCTION_SCSI_IO_REQUEST)) {
605 if (mpt->verbose > 1) 605 if (mpt->verbose > 1)
606 mpt_prt(mpt, "%s: unknown Function 0x%x (0x%x)", 606 mpt_prt(mpt, "%s: unknown Function 0x%x (0x%x)",
607 __func__, mpt_req->Function, index); 607 __func__, mpt_req->Function, index);
608 goto done; 608 goto done;
609 } 609 }
610 610
611 /* Recover scsipi_xfer from the request structure. */ 611 /* Recover scsipi_xfer from the request structure. */
612 xs = req->xfer; 612 xs = req->xfer;
613 613
614 /* Can't have a SCSI command without a scsipi_xfer. */ 614 /* Can't have a SCSI command without a scsipi_xfer. */
615 if (__predict_false(xs == NULL)) { 615 if (__predict_false(xs == NULL)) {
616 mpt_prt(mpt, 616 mpt_prt(mpt,
617 "%s: no scsipi_xfer, index = 0x%x, seq = 0x%08x", __func__, 617 "%s: no scsipi_xfer, index = 0x%x, seq = 0x%08x", __func__,
618 req->index, req->sequence); 618 req->index, req->sequence);
619 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug)); 619 mpt_prt(mpt, "request state: %s", mpt_req_state(req->debug));
620 mpt_prt(mpt, "mpt_request:"); 620 mpt_prt(mpt, "mpt_request:");
621 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf); 621 mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf);
622 622
623 if (mpt_reply != NULL) { 623 if (mpt_reply != NULL) {
624 mpt_prt(mpt, "mpt_reply:"); 624 mpt_prt(mpt, "mpt_reply:");
625 mpt_print_reply(mpt_reply); 625 mpt_print_reply(mpt_reply);
626 } else { 626 } else {
627 mpt_prt(mpt, "context reply: 0x%08x", reply); 627 mpt_prt(mpt, "context reply: 0x%08x", reply);
628 } 628 }
629 goto done; 629 goto done;
630 } 630 }
631 631
632 callout_stop(&xs->xs_callout); 632 callout_stop(&xs->xs_callout);
633 633
634 periph = xs->xs_periph; 634 periph = xs->xs_periph;
635 635
636 /* 636 /*
637 * If we were a data transfer, unload the map that described 637 * If we were a data transfer, unload the map that described
638 * the data buffer. 638 * the data buffer.
639 */ 639 */
640 if (__predict_true(xs->datalen != 0)) { 640 if (__predict_true(xs->datalen != 0)) {
641 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0, 641 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
642 req->dmap->dm_mapsize, 642 req->dmap->dm_mapsize,
643 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD 643 (xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMASYNC_POSTREAD
644 : BUS_DMASYNC_POSTWRITE); 644 : BUS_DMASYNC_POSTWRITE);
645 bus_dmamap_unload(mpt->sc_dmat, req->dmap); 645 bus_dmamap_unload(mpt->sc_dmat, req->dmap);
646 } 646 }
647 647
648 if (__predict_true(mpt_reply == NULL)) { 648 if (__predict_true(mpt_reply == NULL)) {
649 /* 649 /*
650 * Context reply; report that the command was 650 * Context reply; report that the command was
651 * successful! 651 * successful!
652 * 652 *
653 * Also report the xfer mode, if necessary. 653 * Also report the xfer mode, if necessary.
654 */ 654 */
655 if (__predict_false(mpt->mpt_report_xfer_mode != 0)) { 655 if (__predict_false(mpt->mpt_report_xfer_mode != 0)) {
656 if ((mpt->mpt_report_xfer_mode & 656 if ((mpt->mpt_report_xfer_mode &
657 (1 << periph->periph_target)) != 0) 657 (1 << periph->periph_target)) != 0)
658 mpt_get_xfer_mode(mpt, periph); 658 mpt_get_xfer_mode(mpt, periph);
659 } 659 }
660 xs->error = XS_NOERROR; 660 xs->error = XS_NOERROR;
661 xs->status = SCSI_OK; 661 xs->status = SCSI_OK;
662 xs->resid = 0; 662 xs->resid = 0;
663 mpt_free_request(mpt, req); 663 mpt_free_request(mpt, req);
664 scsipi_done(xs); 664 scsipi_done(xs);
665 return; 665 return;
666 } 666 }
667 667
668 xs->status = mpt_reply->SCSIStatus; 668 xs->status = mpt_reply->SCSIStatus;
669 switch (le16toh(mpt_reply->IOCStatus) & MPI_IOCSTATUS_MASK) { 669 switch (le16toh(mpt_reply->IOCStatus) & MPI_IOCSTATUS_MASK) {
670 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: 670 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
671 xs->error = XS_DRIVER_STUFFUP; 671 xs->error = XS_DRIVER_STUFFUP;
672 mpt_prt(mpt, "%s: IOC overrun!", __func__); 672 mpt_prt(mpt, "%s: IOC overrun!", __func__);
673 break; 673 break;
674 674
675 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: 675 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
676 /* 676 /*
677 * Yikes! Tagged queue full comes through this path! 677 * Yikes! Tagged queue full comes through this path!
678 * 678 *
679 * So we'll change it to a status error and anything 679 * So we'll change it to a status error and anything
680 * that returns status should probably be a status 680 * that returns status should probably be a status
681 * error as well. 681 * error as well.
682 */ 682 */
683 xs->resid = xs->datalen - le32toh(mpt_reply->TransferCount); 683 xs->resid = xs->datalen - le32toh(mpt_reply->TransferCount);
684 if (mpt_reply->SCSIState & 684 if (mpt_reply->SCSIState &
685 MPI_SCSI_STATE_NO_SCSI_STATUS) { 685 MPI_SCSI_STATE_NO_SCSI_STATUS) {
686 xs->error = XS_DRIVER_STUFFUP; 686 xs->error = XS_DRIVER_STUFFUP;
687 break; 687 break;
688 } 688 }
689 /* FALLTHROUGH */ 689 /* FALLTHROUGH */
690 case MPI_IOCSTATUS_SUCCESS: 690 case MPI_IOCSTATUS_SUCCESS:
691 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: 691 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
692 switch (xs->status) { 692 switch (xs->status) {
693 case SCSI_OK: 693 case SCSI_OK:
694 /* Report the xfer mode, if necessary. */ 694 /* Report the xfer mode, if necessary. */
695 if ((mpt->mpt_report_xfer_mode & 695 if ((mpt->mpt_report_xfer_mode &
696 (1 << periph->periph_target)) != 0) 696 (1 << periph->periph_target)) != 0)
697 mpt_get_xfer_mode(mpt, periph); 697 mpt_get_xfer_mode(mpt, periph);
698 xs->resid = 0; 698 xs->resid = 0;
699 break; 699 break;
700 700
701 case SCSI_CHECK: 701 case SCSI_CHECK:
702 xs->error = XS_SENSE; 702 xs->error = XS_SENSE;
703 break; 703 break;
704 704
705 case SCSI_BUSY: 705 case SCSI_BUSY:
706 case SCSI_QUEUE_FULL: 706 case SCSI_QUEUE_FULL:
707 xs->error = XS_BUSY; 707 xs->error = XS_BUSY;
708 break; 708 break;
709 709
710 default: 710 default:
711 scsipi_printaddr(periph); 711 scsipi_printaddr(periph);
712 printf("invalid status code %d\n", xs->status); 712 printf("invalid status code %d\n", xs->status);
713 xs->error = XS_DRIVER_STUFFUP; 713 xs->error = XS_DRIVER_STUFFUP;
714 break; 714 break;
715 } 715 }
716 break; 716 break;
717 717
718 case MPI_IOCSTATUS_BUSY: 718 case MPI_IOCSTATUS_BUSY:
719 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: 719 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
720 xs->error = XS_RESOURCE_SHORTAGE; 720 xs->error = XS_RESOURCE_SHORTAGE;
721 break; 721 break;
722 722
723 case MPI_IOCSTATUS_SCSI_INVALID_BUS: 723 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
724 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: 724 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
725 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 725 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
726 xs->error = XS_SELTIMEOUT; 726 xs->error = XS_SELTIMEOUT;
727 break; 727 break;
728 728
729 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 729 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
730 xs->error = XS_DRIVER_STUFFUP; 730 xs->error = XS_DRIVER_STUFFUP;
731 mpt_prt(mpt, "%s: IOC SCSI residual mismatch!", __func__); 731 mpt_prt(mpt, "%s: IOC SCSI residual mismatch!", __func__);
732 restart = 1; 732 restart = 1;
733 break; 733 break;
734 734
735 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: 735 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
736 /* XXX What should we do here? */ 736 /* XXX What should we do here? */
737 mpt_prt(mpt, "%s: IOC SCSI task terminated!", __func__); 737 mpt_prt(mpt, "%s: IOC SCSI task terminated!", __func__);
738 restart = 1; 738 restart = 1;
739 break; 739 break;
740 740
741 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 741 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
742 /* XXX */ 742 /* XXX */
743 xs->error = XS_DRIVER_STUFFUP; 743 xs->error = XS_DRIVER_STUFFUP;
744 mpt_prt(mpt, "%s: IOC SCSI task failed!", __func__); 744 mpt_prt(mpt, "%s: IOC SCSI task failed!", __func__);
745 restart = 1; 745 restart = 1;
746 break; 746 break;
747 747
748 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: 748 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
749 /* XXX */ 749 /* XXX */
750 xs->error = XS_DRIVER_STUFFUP; 750 xs->error = XS_DRIVER_STUFFUP;
751 mpt_prt(mpt, "%s: IOC task terminated!", __func__); 751 mpt_prt(mpt, "%s: IOC task terminated!", __func__);
752 restart = 1; 752 restart = 1;
753 break; 753 break;
754 754
755 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: 755 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
756 /* XXX This is a bus-reset */ 756 /* XXX This is a bus-reset */
757 xs->error = XS_DRIVER_STUFFUP; 757 xs->error = XS_DRIVER_STUFFUP;
758 mpt_prt(mpt, "%s: IOC SCSI bus reset!", __func__); 758 mpt_prt(mpt, "%s: IOC SCSI bus reset!", __func__);
759 restart = 1; 759 restart = 1;
760 break; 760 break;
761 761
762 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: 762 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
763 /* 763 /*
764 * FreeBSD and Linux indicate this is a phase error between 764 * FreeBSD and Linux indicate this is a phase error between
765 * the IOC and the drive itself. When this happens, the IOC 765 * the IOC and the drive itself. When this happens, the IOC
766 * becomes unhappy and stops processing all transactions.  766 * becomes unhappy and stops processing all transactions.
767 * Call mpt_timeout which knows how to get the IOC back 767 * Call mpt_timeout which knows how to get the IOC back
768 * on its feet. 768 * on its feet.
769 */ 769 */
770 mpt_prt(mpt, "%s: IOC indicates protocol error -- " 770 mpt_prt(mpt, "%s: IOC indicates protocol error -- "
771 "recovering...", __func__); 771 "recovering...", __func__);
772 xs->error = XS_TIMEOUT; 772 xs->error = XS_TIMEOUT;
773 restart = 1; 773 restart = 1;
774 774
775 break; 775 break;
776 776
777 default: 777 default:
778 /* XXX unrecognized HBA error */ 778 /* XXX unrecognized HBA error */
779 xs->error = XS_DRIVER_STUFFUP; 779 xs->error = XS_DRIVER_STUFFUP;
780 mpt_prt(mpt, "%s: IOC returned unknown code: 0x%x", __func__, 780 mpt_prt(mpt, "%s: IOC returned unknown code: 0x%x", __func__,
781 le16toh(mpt_reply->IOCStatus)); 781 le16toh(mpt_reply->IOCStatus));
782 restart = 1; 782 restart = 1;
783 break; 783 break;
784 } 784 }
785 785
786 if (mpt_reply != NULL) { 786 if (mpt_reply != NULL) {
787 if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) { 787 if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
788 memcpy(&xs->sense.scsi_sense, req->sense_vbuf, 788 memcpy(&xs->sense.scsi_sense, req->sense_vbuf,
789 sizeof(xs->sense.scsi_sense)); 789 sizeof(xs->sense.scsi_sense));
790 } else if (mpt_reply->SCSIState & 790 } else if (mpt_reply->SCSIState &
791 MPI_SCSI_STATE_AUTOSENSE_FAILED) { 791 MPI_SCSI_STATE_AUTOSENSE_FAILED) {
792 /* 792 /*
793 * This will cause the scsipi layer to issue 793 * This will cause the scsipi layer to issue
794 * a REQUEST SENSE. 794 * a REQUEST SENSE.
795 */ 795 */
796 if (xs->status == SCSI_CHECK) 796 if (xs->status == SCSI_CHECK)
797 xs->error = XS_BUSY; 797 xs->error = XS_BUSY;
798 } 798 }
799 } 799 }
800 800
801 done: 801 done:
802 if (mpt_reply != NULL && le16toh(mpt_reply->IOCStatus) &  802 if (mpt_reply != NULL && le16toh(mpt_reply->IOCStatus) &
803 MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { 803 MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
804 mpt_prt(mpt, "%s: IOC has error - logging...\n", __func__); 804 mpt_prt(mpt, "%s: IOC has error - logging...\n", __func__);
805 mpt_ctlop(mpt, mpt_reply, reply); 805 mpt_ctlop(mpt, mpt_reply, reply);
806 } 806 }
807 807
808 /* If IOC done with this request, free it up. */ 808 /* If IOC done with this request, free it up. */
809 if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0) 809 if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0)
810 mpt_free_request(mpt, req); 810 mpt_free_request(mpt, req);
811 811
812 /* If address reply, give the buffer back to the IOC. */ 812 /* If address reply, give the buffer back to the IOC. */
813 if (mpt_reply != NULL) 813 if (mpt_reply != NULL)
814 mpt_free_reply(mpt, (reply << 1)); 814 mpt_free_reply(mpt, (reply << 1));
815 815
816 if (xs != NULL) 816 if (xs != NULL)
817 scsipi_done(xs); 817 scsipi_done(xs);
818 818
819 if (restart) { 819 if (restart) {
820 mpt_prt(mpt, "%s: IOC fatal error: restarting...", __func__); 820 mpt_prt(mpt, "%s: IOC fatal error: restarting...", __func__);
821 mpt_restart(mpt, NULL); 821 mpt_restart(mpt, NULL);
822 } 822 }
823} 823}
824 824
825static void 825static void
826mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs) 826mpt_run_xfer(mpt_softc_t *mpt, struct scsipi_xfer *xs)
827{ 827{
828 struct scsipi_periph *periph = xs->xs_periph; 828 struct scsipi_periph *periph = xs->xs_periph;
829 request_t *req; 829 request_t *req;
830 MSG_SCSI_IO_REQUEST *mpt_req; 830 MSG_SCSI_IO_REQUEST *mpt_req;
831 int error, s; 831 int error, s;
832 832
833 s = splbio(); 833 s = splbio();
834 req = mpt_get_request(mpt); 834 req = mpt_get_request(mpt);
835 if (__predict_false(req == NULL)) { 835 if (__predict_false(req == NULL)) {
836 /* This should happen very infrequently. */ 836 /* This should happen very infrequently. */
837 xs->error = XS_RESOURCE_SHORTAGE; 837 xs->error = XS_RESOURCE_SHORTAGE;
838 scsipi_done(xs); 838 scsipi_done(xs);
839 splx(s); 839 splx(s);
840 return; 840 return;
841 } 841 }
842 splx(s); 842 splx(s);
843 843
844 /* Link the req and the scsipi_xfer. */ 844 /* Link the req and the scsipi_xfer. */
845 req->xfer = xs; 845 req->xfer = xs;
846 846
847 /* Now we build the command for the IOC */ 847 /* Now we build the command for the IOC */
848 mpt_req = req->req_vbuf; 848 mpt_req = req->req_vbuf;
849 memset(mpt_req, 0, sizeof(*mpt_req)); 849 memset(mpt_req, 0, sizeof(*mpt_req));
850 850
851 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; 851 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
852 mpt_req->Bus = mpt->bus; 852 mpt_req->Bus = mpt->bus;
853 853
854 mpt_req->SenseBufferLength = 854 mpt_req->SenseBufferLength =
855 (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ? 855 (sizeof(xs->sense.scsi_sense) < MPT_SENSE_SIZE) ?
856 sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE; 856 sizeof(xs->sense.scsi_sense) : MPT_SENSE_SIZE;
857 857
858 /* 858 /*
859 * We use the message context to find the request structure when 859 * We use the message context to find the request structure when
860 * we get the command completion interrupt from the IOC. 860 * we get the command completion interrupt from the IOC.
861 */ 861 */
862 mpt_req->MsgContext = htole32(req->index); 862 mpt_req->MsgContext = htole32(req->index);
863 863
864 /* Which physical device to do the I/O on. */ 864 /* Which physical device to do the I/O on. */
865 mpt_req->TargetID = periph->periph_target; 865 mpt_req->TargetID = periph->periph_target;
866 mpt_req->LUN[1] = periph->periph_lun; 866 mpt_req->LUN[1] = periph->periph_lun;
867 867
868 /* Set the direction of the transfer. */ 868 /* Set the direction of the transfer. */
869 if (xs->xs_control & XS_CTL_DATA_IN) 869 if (xs->xs_control & XS_CTL_DATA_IN)
870 mpt_req->Control = MPI_SCSIIO_CONTROL_READ; 870 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
871 else if (xs->xs_control & XS_CTL_DATA_OUT) 871 else if (xs->xs_control & XS_CTL_DATA_OUT)
872 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; 872 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
873 else 873 else
874 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; 874 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
875 875
876 /* Set the queue behavior. */ 876 /* Set the queue behavior. */
877 if (__predict_true((!mpt->is_scsi) || 877 if (__predict_true((!mpt->is_scsi) ||
878 (mpt->mpt_tag_enable & 878 (mpt->mpt_tag_enable &
879 (1 << periph->periph_target)))) { 879 (1 << periph->periph_target)))) {
880 switch (XS_CTL_TAGTYPE(xs)) { 880 switch (XS_CTL_TAGTYPE(xs)) {
881 case XS_CTL_HEAD_TAG: 881 case XS_CTL_HEAD_TAG:
882 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; 882 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
883 break; 883 break;
884 884
885#if 0 /* XXX */ 885#if 0 /* XXX */
886 case XS_CTL_ACA_TAG: 886 case XS_CTL_ACA_TAG:
887 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; 887 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
888 break; 888 break;
889#endif 889#endif
890 890
891 case XS_CTL_ORDERED_TAG: 891 case XS_CTL_ORDERED_TAG:
892 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; 892 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
893 break; 893 break;
894 894
895 case XS_CTL_SIMPLE_TAG: 895 case XS_CTL_SIMPLE_TAG:
896 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 896 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
897 break; 897 break;
898 898
899 default: 899 default:
900 if (mpt->is_scsi) 900 if (mpt->is_scsi)
901 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; 901 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
902 else 902 else
903 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 903 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
904 break; 904 break;
905 } 905 }
906 } else 906 } else
907 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; 907 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
908 908
909 if (__predict_false(mpt->is_scsi && 909 if (__predict_false(mpt->is_scsi &&
910 (mpt->mpt_disc_enable & 910 (mpt->mpt_disc_enable &
911 (1 << periph->periph_target)) == 0)) 911 (1 << periph->periph_target)) == 0))
912 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; 912 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
913 913
914 mpt_req->Control = htole32(mpt_req->Control); 914 mpt_req->Control = htole32(mpt_req->Control);
915 915
916 /* Copy the SCSI command block into place. */ 916 /* Copy the SCSI command block into place. */
917 memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen); 917 memcpy(mpt_req->CDB, xs->cmd, xs->cmdlen);
918 918
919 mpt_req->CDBLength = xs->cmdlen; 919 mpt_req->CDBLength = xs->cmdlen;
920 mpt_req->DataLength = htole32(xs->datalen); 920 mpt_req->DataLength = htole32(xs->datalen);
921 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf); 921 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
922 922
923 /* 923 /*
924 * Map the DMA transfer. 924 * Map the DMA transfer.
925 */ 925 */
926 if (xs->datalen) { 926 if (xs->datalen) {
927 SGE_SIMPLE32 *se; 927 SGE_SIMPLE32 *se;
928 928
929 error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data, 929 error = bus_dmamap_load(mpt->sc_dmat, req->dmap, xs->data,
930 xs->datalen, NULL, 930 xs->datalen, NULL,
931 ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT 931 ((xs->xs_control & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT
932 : BUS_DMA_WAITOK) | 932 : BUS_DMA_WAITOK) |
933 BUS_DMA_STREAMING | 933 BUS_DMA_STREAMING |
934 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ 934 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ
935 : BUS_DMA_WRITE)); 935 : BUS_DMA_WRITE));
936 switch (error) { 936 switch (error) {
937 case 0: 937 case 0:
938 break; 938 break;
939 939
940 case ENOMEM: 940 case ENOMEM:
941 case EAGAIN: 941 case EAGAIN:
942 xs->error = XS_RESOURCE_SHORTAGE; 942 xs->error = XS_RESOURCE_SHORTAGE;
943 goto out_bad; 943 goto out_bad;
944 944
945 default: 945 default:
946 xs->error = XS_DRIVER_STUFFUP; 946 xs->error = XS_DRIVER_STUFFUP;
947 mpt_prt(mpt, "error %d loading DMA map", error); 947 mpt_prt(mpt, "error %d loading DMA map", error);
948 out_bad: 948 out_bad:
949 s = splbio(); 949 s = splbio();
950 mpt_free_request(mpt, req); 950 mpt_free_request(mpt, req);
951 scsipi_done(xs); 951 scsipi_done(xs);
952 splx(s); 952 splx(s);
953 return; 953 return;
954 } 954 }
955 955
956 if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) { 956 if (req->dmap->dm_nsegs > MPT_NSGL_FIRST(mpt)) {
957 int seg, i, nleft = req->dmap->dm_nsegs; 957 int seg, i, nleft = req->dmap->dm_nsegs;
958 uint32_t flags; 958 uint32_t flags;
959 SGE_CHAIN32 *ce; 959 SGE_CHAIN32 *ce;
960 960
961 seg = 0; 961 seg = 0;
962 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 962 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
963 if (xs->xs_control & XS_CTL_DATA_OUT) 963 if (xs->xs_control & XS_CTL_DATA_OUT)
964 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 964 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
965 965
966 se = (SGE_SIMPLE32 *) &mpt_req->SGL; 966 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
967 for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1; 967 for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1;
968 i++, se++, seg++) { 968 i++, se++, seg++) {
969 uint32_t tf; 969 uint32_t tf;
970 970
971 memset(se, 0, sizeof(*se)); 971 memset(se, 0, sizeof(*se));
972 se->Address = 972 se->Address =
973 htole32(req->dmap->dm_segs[seg].ds_addr); 973 htole32(req->dmap->dm_segs[seg].ds_addr);
974 MPI_pSGE_SET_LENGTH(se, 974 MPI_pSGE_SET_LENGTH(se,
975 req->dmap->dm_segs[seg].ds_len); 975 req->dmap->dm_segs[seg].ds_len);
976 tf = flags; 976 tf = flags;
977 if (i == MPT_NSGL_FIRST(mpt) - 2) 977 if (i == MPT_NSGL_FIRST(mpt) - 2)
978 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 978 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
979 MPI_pSGE_SET_FLAGS(se, tf); 979 MPI_pSGE_SET_FLAGS(se, tf);
980 se->FlagsLength = htole32(se->FlagsLength); 980 se->FlagsLength = htole32(se->FlagsLength);
981 nleft--; 981 nleft--;
982 } 982 }
983 983
984 /* 984 /*
985 * Tell the IOC where to find the first chain element. 985 * Tell the IOC where to find the first chain element.
986 */ 986 */
987 mpt_req->ChainOffset = 987 mpt_req->ChainOffset =
988 ((char *)se - (char *)mpt_req) >> 2; 988 ((char *)se - (char *)mpt_req) >> 2;
989 989
990 /* 990 /*
991 * Until we're finished with all segments... 991 * Until we're finished with all segments...
992 */ 992 */
993 while (nleft) { 993 while (nleft) {
994 int ntodo; 994 int ntodo;
995 995
996 /* 996 /*
997 * Construct the chain element that points to 997 * Construct the chain element that points to
998 * the next segment. 998 * the next segment.
999 */ 999 */
1000 ce = (SGE_CHAIN32 *) se++; 1000 ce = (SGE_CHAIN32 *) se++;
1001 if (nleft > MPT_NSGL(mpt)) { 1001 if (nleft > MPT_NSGL(mpt)) {
1002 ntodo = MPT_NSGL(mpt) - 1; 1002 ntodo = MPT_NSGL(mpt) - 1;
1003 ce->NextChainOffset = (MPT_RQSL(mpt) - 1003 ce->NextChainOffset = (MPT_RQSL(mpt) -
1004 sizeof(SGE_SIMPLE32)) >> 2; 1004 sizeof(SGE_SIMPLE32)) >> 2;
1005 ce->Length = htole16(MPT_NSGL(mpt) 1005 ce->Length = htole16(MPT_NSGL(mpt)
1006 * sizeof(SGE_SIMPLE32)); 1006 * sizeof(SGE_SIMPLE32));
1007 } else { 1007 } else {
1008 ntodo = nleft; 1008 ntodo = nleft;
1009 ce->NextChainOffset = 0; 1009 ce->NextChainOffset = 0;
1010 ce->Length = htole16(ntodo 1010 ce->Length = htole16(ntodo
1011 * sizeof(SGE_SIMPLE32)); 1011 * sizeof(SGE_SIMPLE32));
1012 } 1012 }
1013 ce->Address = htole32(req->req_pbuf + 1013 ce->Address = htole32(req->req_pbuf +
1014 ((char *)se - (char *)mpt_req)); 1014 ((char *)se - (char *)mpt_req));
1015 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; 1015 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1016 for (i = 0; i < ntodo; i++, se++, seg++) { 1016 for (i = 0; i < ntodo; i++, se++, seg++) {
1017 uint32_t tf; 1017 uint32_t tf;
1018 1018
1019 memset(se, 0, sizeof(*se)); 1019 memset(se, 0, sizeof(*se));
1020 se->Address = htole32( 1020 se->Address = htole32(
1021 req->dmap->dm_segs[seg].ds_addr); 1021 req->dmap->dm_segs[seg].ds_addr);
1022 MPI_pSGE_SET_LENGTH(se, 1022 MPI_pSGE_SET_LENGTH(se,
1023 req->dmap->dm_segs[seg].ds_len); 1023 req->dmap->dm_segs[seg].ds_len);
1024 tf = flags; 1024 tf = flags;
1025 if (i == ntodo - 1) { 1025 if (i == ntodo - 1) {
1026 tf |= 1026 tf |=
1027 MPI_SGE_FLAGS_LAST_ELEMENT; 1027 MPI_SGE_FLAGS_LAST_ELEMENT;
1028 if (ce->NextChainOffset == 0) { 1028 if (ce->NextChainOffset == 0) {
1029 tf |= 1029 tf |=
1030 MPI_SGE_FLAGS_END_OF_LIST | 1030 MPI_SGE_FLAGS_END_OF_LIST |
1031 MPI_SGE_FLAGS_END_OF_BUFFER; 1031 MPI_SGE_FLAGS_END_OF_BUFFER;
1032 } 1032 }
1033 } 1033 }
1034 MPI_pSGE_SET_FLAGS(se, tf); 1034 MPI_pSGE_SET_FLAGS(se, tf);
1035 se->FlagsLength = 1035 se->FlagsLength =
1036 htole32(se->FlagsLength); 1036 htole32(se->FlagsLength);
1037 nleft--; 1037 nleft--;
1038 } 1038 }
1039 } 1039 }
1040 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0, 1040 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
1041 req->dmap->dm_mapsize, 1041 req->dmap->dm_mapsize,
1042 (xs->xs_control & XS_CTL_DATA_IN) ? 1042 (xs->xs_control & XS_CTL_DATA_IN) ?
1043 BUS_DMASYNC_PREREAD 1043 BUS_DMASYNC_PREREAD
1044 : BUS_DMASYNC_PREWRITE); 1044 : BUS_DMASYNC_PREWRITE);
1045 } else { 1045 } else {
1046 int i; 1046 int i;
1047 uint32_t flags; 1047 uint32_t flags;
1048 1048
1049 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 1049 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1050 if (xs->xs_control & XS_CTL_DATA_OUT) 1050 if (xs->xs_control & XS_CTL_DATA_OUT)
1051 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1051 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1052 1052
1053 /* Copy the segments into our SG list. */ 1053 /* Copy the segments into our SG list. */
1054 se = (SGE_SIMPLE32 *) &mpt_req->SGL; 1054 se = (SGE_SIMPLE32 *) &mpt_req->SGL;
1055 for (i = 0; i < req->dmap->dm_nsegs; 1055 for (i = 0; i < req->dmap->dm_nsegs;
1056 i++, se++) { 1056 i++, se++) {
1057 uint32_t tf; 1057 uint32_t tf;
1058 1058
1059 memset(se, 0, sizeof(*se)); 1059 memset(se, 0, sizeof(*se));
1060 se->Address = 1060 se->Address =
1061 htole32(req->dmap->dm_segs[i].ds_addr); 1061 htole32(req->dmap->dm_segs[i].ds_addr);
1062 MPI_pSGE_SET_LENGTH(se, 1062 MPI_pSGE_SET_LENGTH(se,
1063 req->dmap->dm_segs[i].ds_len); 1063 req->dmap->dm_segs[i].ds_len);
1064 tf = flags; 1064 tf = flags;
1065 if (i == req->dmap->dm_nsegs - 1) { 1065 if (i == req->dmap->dm_nsegs - 1) {
1066 tf |= 1066 tf |=
1067 MPI_SGE_FLAGS_LAST_ELEMENT | 1067 MPI_SGE_FLAGS_LAST_ELEMENT |
1068 MPI_SGE_FLAGS_END_OF_BUFFER | 1068 MPI_SGE_FLAGS_END_OF_BUFFER |
1069 MPI_SGE_FLAGS_END_OF_LIST; 1069 MPI_SGE_FLAGS_END_OF_LIST;
1070 } 1070 }
1071 MPI_pSGE_SET_FLAGS(se, tf); 1071 MPI_pSGE_SET_FLAGS(se, tf);
1072 se->FlagsLength = htole32(se->FlagsLength); 1072 se->FlagsLength = htole32(se->FlagsLength);
1073 } 1073 }
1074 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0, 1074 bus_dmamap_sync(mpt->sc_dmat, req->dmap, 0,
1075 req->dmap->dm_mapsize, 1075 req->dmap->dm_mapsize,
1076 (xs->xs_control & XS_CTL_DATA_IN) ? 1076 (xs->xs_control & XS_CTL_DATA_IN) ?
1077 BUS_DMASYNC_PREREAD 1077 BUS_DMASYNC_PREREAD
1078 : BUS_DMASYNC_PREWRITE); 1078 : BUS_DMASYNC_PREWRITE);
1079 } 1079 }
1080 } else { 1080 } else {
1081 /* 1081 /*
1082 * No data to transfer; just make a single simple SGL 1082 * No data to transfer; just make a single simple SGL
1083 * with zero length. 1083 * with zero length.
1084 */ 1084 */
1085 SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL; 1085 SGE_SIMPLE32 *se = (SGE_SIMPLE32 *) &mpt_req->SGL;
1086 memset(se, 0, sizeof(*se)); 1086 memset(se, 0, sizeof(*se));
1087 MPI_pSGE_SET_FLAGS(se, 1087 MPI_pSGE_SET_FLAGS(se,
1088 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1088 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1089 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1089 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1090 se->FlagsLength = htole32(se->FlagsLength); 1090 se->FlagsLength = htole32(se->FlagsLength);
1091 } 1091 }
1092 1092
1093 if (mpt->verbose > 1) 1093 if (mpt->verbose > 1)
1094 mpt_print_scsi_io_request(mpt_req); 1094 mpt_print_scsi_io_request(mpt_req);
1095 1095
1096 if (xs->timeout == 0) { 1096 if (xs->timeout == 0) {
1097 mpt_prt(mpt, "mpt_run_xfer: no timeout specified for request: 0x%x\n", 1097 mpt_prt(mpt, "mpt_run_xfer: no timeout specified for request: 0x%x\n",
1098 req->index); 1098 req->index);
1099 xs->timeout = 500; 1099 xs->timeout = 500;
1100 } 1100 }
1101 1101
1102 s = splbio(); 1102 s = splbio();
1103 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0)) 1103 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
1104 callout_reset(&xs->xs_callout, 1104 callout_reset(&xs->xs_callout,
1105 mstohz(xs->timeout), mpt_timeout, req); 1105 mstohz(xs->timeout), mpt_timeout, req);
1106 mpt_send_cmd(mpt, req); 1106 mpt_send_cmd(mpt, req);
1107 splx(s); 1107 splx(s);
1108 1108
1109 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0)) 1109 if (__predict_true((xs->xs_control & XS_CTL_POLL) == 0))
1110 return; 1110 return;
1111 1111
1112 /* 1112 /*
1113 * If we can't use interrupts, poll on completion. 1113 * If we can't use interrupts, poll on completion.
1114 */ 1114 */
1115 if (mpt_poll(mpt, xs, xs->timeout)) 1115 if (mpt_poll(mpt, xs, xs->timeout))
1116 mpt_timeout(req); 1116 mpt_timeout(req);
1117} 1117}
1118 1118
1119static void 1119static void
1120mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm) 1120mpt_set_xfer_mode(mpt_softc_t *mpt, struct scsipi_xfer_mode *xm)
1121{ 1121{
1122 fCONFIG_PAGE_SCSI_DEVICE_1 tmp; 1122 fCONFIG_PAGE_SCSI_DEVICE_1 tmp;
1123 1123
1124 /* 1124 /*
1125 * Always allow disconnect; we don't have a way to disable 1125 * Always allow disconnect; we don't have a way to disable
1126 * it right now, in any case. 1126 * it right now, in any case.
1127 */ 1127 */
1128 mpt->mpt_disc_enable |= (1 << xm->xm_target); 1128 mpt->mpt_disc_enable |= (1 << xm->xm_target);
1129 1129
1130 if (xm->xm_mode & PERIPH_CAP_TQING) 1130 if (xm->xm_mode & PERIPH_CAP_TQING)
1131 mpt->mpt_tag_enable |= (1 << xm->xm_target); 1131 mpt->mpt_tag_enable |= (1 << xm->xm_target);
1132 else 1132 else
1133 mpt->mpt_tag_enable &= ~(1 << xm->xm_target); 1133 mpt->mpt_tag_enable &= ~(1 << xm->xm_target);
1134 1134
1135 if (mpt->is_scsi) { 1135 if (mpt->is_scsi) {
1136 /* 1136 /*
1137 * SCSI transport settings only make any sense for 1137 * SCSI transport settings only make any sense for
1138 * SCSI 1138 * SCSI
1139 */ 1139 */
1140 1140
1141 tmp = mpt->mpt_dev_page1[xm->xm_target]; 1141 tmp = mpt->mpt_dev_page1[xm->xm_target];
1142 1142
1143 /* 1143 /*
1144 * Set the wide/narrow parameter for the target. 1144 * Set the wide/narrow parameter for the target.
1145 */ 1145 */
1146 if (xm->xm_mode & PERIPH_CAP_WIDE16) 1146 if (xm->xm_mode & PERIPH_CAP_WIDE16)
1147 tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE; 1147 tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
1148 else 1148 else
1149 tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE; 1149 tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
1150 1150
1151 /* 1151 /*
1152 * Set the synchronous parameters for the target. 1152 * Set the synchronous parameters for the target.
1153 * 1153 *
1154 * XXX If we request sync transfers, we just go ahead and 1154 * XXX If we request sync transfers, we just go ahead and
1155 * XXX request the maximum available. We need finer control 1155 * XXX request the maximum available. We need finer control
1156 * XXX in order to implement Domain Validation. 1156 * XXX in order to implement Domain Validation.
1157 */ 1157 */
1158 tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK | 1158 tmp.RequestedParameters &= ~(MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK |
1159 MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK | 1159 MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK |
1160 MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS | 1160 MPI_SCSIDEVPAGE1_RP_DT | MPI_SCSIDEVPAGE1_RP_QAS |
1161 MPI_SCSIDEVPAGE1_RP_IU); 1161 MPI_SCSIDEVPAGE1_RP_IU);
1162 if (xm->xm_mode & PERIPH_CAP_SYNC) { 1162 if (xm->xm_mode & PERIPH_CAP_SYNC) {
1163 int factor, offset, np; 1163 int factor, offset, np;
1164 1164
1165 factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff; 1165 factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff;
1166 offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff; 1166 offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff;
1167 np = 0; 1167 np = 0;
1168 if (factor < 0x9) { 1168 if (factor < 0x9) {
1169 /* Ultra320 */ 1169 /* Ultra320 */
1170 np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU; 1170 np |= MPI_SCSIDEVPAGE1_RP_QAS | MPI_SCSIDEVPAGE1_RP_IU;
1171 } 1171 }
1172 if (factor < 0xa) { 1172 if (factor < 0xa) {
1173 /* at least Ultra160 */ 1173 /* at least Ultra160 */
1174 np |= MPI_SCSIDEVPAGE1_RP_DT; 1174 np |= MPI_SCSIDEVPAGE1_RP_DT;
1175 } 1175 }
1176 np |= (factor << 8) | (offset << 16); 1176 np |= (factor << 8) | (offset << 16);
1177 tmp.RequestedParameters |= np; 1177 tmp.RequestedParameters |= np;
1178 } 1178 }
1179 1179
1180 host2mpt_config_page_scsi_device_1(&tmp); 1180 host2mpt_config_page_scsi_device_1(&tmp);
1181 if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) { 1181 if (mpt_write_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1182 mpt_prt(mpt, "unable to write Device Page 1"); 1182 mpt_prt(mpt, "unable to write Device Page 1");
1183 return; 1183 return;
1184 } 1184 }
1185 1185
1186 if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) { 1186 if (mpt_read_cfg_page(mpt, xm->xm_target, &tmp.Header)) {
1187 mpt_prt(mpt, "unable to read back Device Page 1"); 1187 mpt_prt(mpt, "unable to read back Device Page 1");
1188 return; 1188 return;
1189 } 1189 }
1190 1190
1191 mpt2host_config_page_scsi_device_1(&tmp); 1191 mpt2host_config_page_scsi_device_1(&tmp);
1192 mpt->mpt_dev_page1[xm->xm_target] = tmp; 1192 mpt->mpt_dev_page1[xm->xm_target] = tmp;
1193 if (mpt->verbose > 1) { 1193 if (mpt->verbose > 1) {
1194 mpt_prt(mpt, 1194 mpt_prt(mpt,
1195 "SPI Target %d Page 1: RequestedParameters %x Config %x", 1195 "SPI Target %d Page 1: RequestedParameters %x Config %x",
1196 xm->xm_target, 1196 xm->xm_target,
1197 mpt->mpt_dev_page1[xm->xm_target].RequestedParameters, 1197 mpt->mpt_dev_page1[xm->xm_target].RequestedParameters,
1198 mpt->mpt_dev_page1[xm->xm_target].Configuration); 1198 mpt->mpt_dev_page1[xm->xm_target].Configuration);
1199 } 1199 }
1200 } 1200 }
1201 1201
1202 /* 1202 /*
1203 * Make a note that we should perform an async callback at the 1203 * Make a note that we should perform an async callback at the
1204 * end of the next successful command completion to report the 1204 * end of the next successful command completion to report the
1205 * negotiated transfer mode. 1205 * negotiated transfer mode.
1206 */ 1206 */
1207 mpt->mpt_report_xfer_mode |= (1 << xm->xm_target); 1207 mpt->mpt_report_xfer_mode |= (1 << xm->xm_target);
1208} 1208}
1209 1209
1210static void 1210static void
1211mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph) 1211mpt_get_xfer_mode(mpt_softc_t *mpt, struct scsipi_periph *periph)
1212{ 1212{
1213 fCONFIG_PAGE_SCSI_DEVICE_0 tmp; 1213 fCONFIG_PAGE_SCSI_DEVICE_0 tmp;
1214 struct scsipi_xfer_mode xm; 1214 struct scsipi_xfer_mode xm;
1215 int period, offset; 1215 int period, offset;
1216 1216
1217 tmp = mpt->mpt_dev_page0[periph->periph_target]; 1217 tmp = mpt->mpt_dev_page0[periph->periph_target];
1218 host2mpt_config_page_scsi_device_0(&tmp); 1218 host2mpt_config_page_scsi_device_0(&tmp);
1219 if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) { 1219 if (mpt_read_cfg_page(mpt, periph->periph_target, &tmp.Header)) {
1220 mpt_prt(mpt, "unable to read Device Page 0"); 1220 mpt_prt(mpt, "unable to read Device Page 0");
1221 return; 1221 return;
1222 } 1222 }
1223 mpt2host_config_page_scsi_device_0(&tmp); 1223 mpt2host_config_page_scsi_device_0(&tmp);
1224 1224
1225 if (mpt->verbose > 1) { 1225 if (mpt->verbose > 1) {
1226 mpt_prt(mpt, 1226 mpt_prt(mpt,
1227 "SPI Tgt %d Page 0: NParms %x Information %x", 1227 "SPI Tgt %d Page 0: NParms %x Information %x",
1228 periph->periph_target, 1228 periph->periph_target,
1229 tmp.NegotiatedParameters, tmp.Information); 1229 tmp.NegotiatedParameters, tmp.Information);
1230 } 1230 }
1231 1231
1232 xm.xm_target = periph->periph_target; 1232 xm.xm_target = periph->periph_target;
1233 xm.xm_mode = 0; 1233 xm.xm_mode = 0;
1234 1234
1235 if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) 1235 if (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE)
1236 xm.xm_mode |= PERIPH_CAP_WIDE16; 1236 xm.xm_mode |= PERIPH_CAP_WIDE16;
1237 1237
1238 period = (tmp.NegotiatedParameters >> 8) & 0xff; 1238 period = (tmp.NegotiatedParameters >> 8) & 0xff;
1239 offset = (tmp.NegotiatedParameters >> 16) & 0xff; 1239 offset = (tmp.NegotiatedParameters >> 16) & 0xff;
1240 if (offset) { 1240 if (offset) {
1241 xm.xm_period = period; 1241 xm.xm_period = period;
1242 xm.xm_offset = offset; 1242 xm.xm_offset = offset;
1243 xm.xm_mode |= PERIPH_CAP_SYNC; 1243 xm.xm_mode |= PERIPH_CAP_SYNC;
1244 } 1244 }
1245 1245
1246 /* 1246 /*
1247 * Tagged queueing is all controlled by us; there is no 1247 * Tagged queueing is all controlled by us; there is no
1248 * other setting to query. 1248 * other setting to query.
1249 */ 1249 */
1250 if (mpt->mpt_tag_enable & (1 << periph->periph_target)) 1250 if (mpt->mpt_tag_enable & (1 << periph->periph_target))
1251 xm.xm_mode |= PERIPH_CAP_TQING; 1251 xm.xm_mode |= PERIPH_CAP_TQING;
1252 1252
1253 /* 1253 /*
1254 * We're going to deliver the async event, so clear the marker. 1254 * We're going to deliver the async event, so clear the marker.
1255 */ 1255 */
1256 mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target); 1256 mpt->mpt_report_xfer_mode &= ~(1 << periph->periph_target);
1257 1257
1258 scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm); 1258 scsipi_async_event(&mpt->sc_channel, ASYNC_EVENT_XFER_MODE, &xm);
1259} 1259}
1260 1260
1261static void 1261static void
1262mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply) 1262mpt_ctlop(mpt_softc_t *mpt, void *vmsg, uint32_t reply)
1263{ 1263{
1264 MSG_DEFAULT_REPLY *dmsg = vmsg; 1264 MSG_DEFAULT_REPLY *dmsg = vmsg;
1265 1265
1266 switch (dmsg->Function) { 1266 switch (dmsg->Function) {
1267 case MPI_FUNCTION_EVENT_NOTIFICATION: 1267 case MPI_FUNCTION_EVENT_NOTIFICATION:
1268 mpt_event_notify_reply(mpt, vmsg); 1268 mpt_event_notify_reply(mpt, vmsg);
1269 mpt_free_reply(mpt, (reply << 1)); 1269 mpt_free_reply(mpt, (reply << 1));
1270 break; 1270 break;
1271 1271
1272 case MPI_FUNCTION_EVENT_ACK: 1272 case MPI_FUNCTION_EVENT_ACK:
1273 mpt_free_reply(mpt, (reply << 1)); 1273 mpt_free_reply(mpt, (reply << 1));
1274 break; 1274 break;
1275 1275
1276 case MPI_FUNCTION_PORT_ENABLE: 1276 case MPI_FUNCTION_PORT_ENABLE:
1277 { 1277 {
1278 MSG_PORT_ENABLE_REPLY *msg = vmsg; 1278 MSG_PORT_ENABLE_REPLY *msg = vmsg;
1279 int index = le32toh(msg->MsgContext) & ~0x80000000; 1279 int index = le32toh(msg->MsgContext) & ~0x80000000;
1280 if (mpt->verbose > 1) 1280 if (mpt->verbose > 1)
1281 mpt_prt(mpt, "enable port reply index %d", index); 1281 mpt_prt(mpt, "enable port reply index %d", index);
1282 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) { 1282 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1283 request_t *req = &mpt->request_pool[index]; 1283 request_t *req = &mpt->request_pool[index];
1284 req->debug = REQ_DONE; 1284 req->debug = REQ_DONE;
1285 } 1285 }
1286 mpt_free_reply(mpt, (reply << 1)); 1286 mpt_free_reply(mpt, (reply << 1));
1287 break; 1287 break;
1288 } 1288 }
1289 1289
1290 case MPI_FUNCTION_CONFIG: 1290 case MPI_FUNCTION_CONFIG:
1291 { 1291 {
1292 MSG_CONFIG_REPLY *msg = vmsg; 1292 MSG_CONFIG_REPLY *msg = vmsg;
1293 int index = le32toh(msg->MsgContext) & ~0x80000000; 1293 int index = le32toh(msg->MsgContext) & ~0x80000000;
1294 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) { 1294 if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) {
1295 request_t *req = &mpt->request_pool[index]; 1295 request_t *req = &mpt->request_pool[index];
1296 req->debug = REQ_DONE; 1296 req->debug = REQ_DONE;
1297 req->sequence = reply; 1297 req->sequence = reply;
1298 } else 1298 } else
1299 mpt_free_reply(mpt, (reply << 1)); 1299 mpt_free_reply(mpt, (reply << 1));
1300 break; 1300 break;
1301 } 1301 }
1302 1302
1303 default: 1303 default:
1304 mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function); 1304 mpt_prt(mpt, "unknown ctlop: 0x%x", dmsg->Function);
1305 } 1305 }
1306} 1306}
1307 1307
1308static void 1308static void
1309mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg) 1309mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg)
1310{ 1310{
1311 1311
1312 switch (le32toh(msg->Event)) { 1312 switch (le32toh(msg->Event)) {
1313 case MPI_EVENT_LOG_DATA: 1313 case MPI_EVENT_LOG_DATA:
1314 { 1314 {
1315 int i; 1315 int i;
1316 1316
1317 /* Some error occurrerd that the Fusion wants logged. */ 1317 /* Some error occurrerd that the Fusion wants logged. */
1318 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo); 1318 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x", msg->IOCLogInfo);
1319 mpt_prt(mpt, "EvtLogData: Event Data:"); 1319 mpt_prt(mpt, "EvtLogData: Event Data:");
1320 for (i = 0; i < msg->EventDataLength; i++) { 1320 for (i = 0; i < msg->EventDataLength; i++) {
1321 if ((i % 4) == 0) 1321 if ((i % 4) == 0)
1322 printf("%s:\t", device_xname(mpt->sc_dev)); 1322 printf("%s:\t", device_xname(mpt->sc_dev));
1323 printf("0x%08x%c", msg->Data[i], 1323 printf("0x%08x%c", msg->Data[i],
1324 ((i % 4) == 3) ? '\n' : ' '); 1324 ((i % 4) == 3) ? '\n' : ' ');
1325 } 1325 }
1326 if ((i % 4) != 0) 1326 if ((i % 4) != 0)
1327 printf("\n"); 1327 printf("\n");
1328 break; 1328 break;
1329 } 1329 }
1330 1330
1331 case MPI_EVENT_UNIT_ATTENTION: 1331 case MPI_EVENT_UNIT_ATTENTION:
1332 mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x", 1332 mpt_prt(mpt, "Unit Attn: Bus 0x%02x Target 0x%02x",
1333 (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff); 1333 (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff);
1334 break; 1334 break;
1335 1335
1336 case MPI_EVENT_IOC_BUS_RESET: 1336 case MPI_EVENT_IOC_BUS_RESET:
1337 /* We generated a bus reset. */ 1337 /* We generated a bus reset. */
1338 mpt_prt(mpt, "IOC Bus Reset Port %d", 1338 mpt_prt(mpt, "IOC Bus Reset Port %d",
1339 (msg->Data[0] >> 8) & 0xff); 1339 (msg->Data[0] >> 8) & 0xff);
1340 break; 1340 break;
1341 1341
1342 case MPI_EVENT_EXT_BUS_RESET: 1342 case MPI_EVENT_EXT_BUS_RESET:
1343 /* Someone else generated a bus reset. */ 1343 /* Someone else generated a bus reset. */
1344 mpt_prt(mpt, "External Bus Reset"); 1344 mpt_prt(mpt, "External Bus Reset");
1345 /* 1345 /*
1346 * These replies don't return EventData like the MPI 1346 * These replies don't return EventData like the MPI
1347 * spec says they do. 1347 * spec says they do.
1348 */ 1348 */
1349 /* XXX Send an async event? */ 1349 /* XXX Send an async event? */
1350 break; 1350 break;
1351 1351
1352 case MPI_EVENT_RESCAN: 1352 case MPI_EVENT_RESCAN:
1353 /* 1353 /*
1354 * In general, thise means a device has been added 1354 * In general, thise means a device has been added
1355 * to the loop. 1355 * to the loop.
1356 */ 1356 */
1357 mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff); 1357 mpt_prt(mpt, "Rescan Port %d", (msg->Data[0] >> 8) & 0xff);
1358 /* XXX Send an async event? */ 1358 /* XXX Send an async event? */
1359 break; 1359 break;
1360 1360
1361 case MPI_EVENT_LINK_STATUS_CHANGE: 1361 case MPI_EVENT_LINK_STATUS_CHANGE:
1362 mpt_prt(mpt, "Port %d: Link state %s", 1362 mpt_prt(mpt, "Port %d: Link state %s",
1363 (msg->Data[1] >> 8) & 0xff, 1363 (msg->Data[1] >> 8) & 0xff,
1364 (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active"); 1364 (msg->Data[0] & 0xff) == 0 ? "Failed" : "Active");
1365 break; 1365 break;
1366 1366
1367 case MPI_EVENT_LOOP_STATE_CHANGE: 1367 case MPI_EVENT_LOOP_STATE_CHANGE:
1368 switch ((msg->Data[0] >> 16) & 0xff) { 1368 switch ((msg->Data[0] >> 16) & 0xff) {
1369 case 0x01: 1369 case 0x01:
1370 mpt_prt(mpt, 1370 mpt_prt(mpt,
1371 "Port %d: FC Link Event: LIP(%02x,%02x) " 1371 "Port %d: FC Link Event: LIP(%02x,%02x) "
1372 "(Loop Initialization)", 1372 "(Loop Initialization)",
1373 (msg->Data[1] >> 8) & 0xff, 1373 (msg->Data[1] >> 8) & 0xff,
1374 (msg->Data[0] >> 8) & 0xff, 1374 (msg->Data[0] >> 8) & 0xff,
1375 (msg->Data[0] ) & 0xff); 1375 (msg->Data[0] ) & 0xff);
1376 switch ((msg->Data[0] >> 8) & 0xff) { 1376 switch ((msg->Data[0] >> 8) & 0xff) {
1377 case 0xf7: 1377 case 0xf7:
1378 if ((msg->Data[0] & 0xff) == 0xf7) 1378 if ((msg->Data[0] & 0xff) == 0xf7)
1379 mpt_prt(mpt, "\tDevice needs AL_PA"); 1379 mpt_prt(mpt, "\tDevice needs AL_PA");
1380 else 1380 else
1381 mpt_prt(mpt, "\tDevice %02x doesn't " 1381 mpt_prt(mpt, "\tDevice %02x doesn't "
1382 "like FC performance", 1382 "like FC performance",
1383 msg->Data[0] & 0xff); 1383 msg->Data[0] & 0xff);
1384 break; 1384 break;
1385 1385
1386 case 0xf8: 1386 case 0xf8:
1387 if ((msg->Data[0] & 0xff) == 0xf7) 1387 if ((msg->Data[0] & 0xff) == 0xf7)
1388 mpt_prt(mpt, "\tDevice detected loop " 1388 mpt_prt(mpt, "\tDevice detected loop "
1389 "failure before acquiring AL_PA"); 1389 "failure before acquiring AL_PA");
1390 else 1390 else
1391 mpt_prt(mpt, "\tDevice %02x detected " 1391 mpt_prt(mpt, "\tDevice %02x detected "
1392 "loop failure", 1392 "loop failure",
1393 msg->Data[0] & 0xff); 1393 msg->Data[0] & 0xff);
1394 break; 1394 break;
1395 1395
1396 default: 1396 default:
1397 mpt_prt(mpt, "\tDevice %02x requests that " 1397 mpt_prt(mpt, "\tDevice %02x requests that "
1398 "device %02x reset itself", 1398 "device %02x reset itself",
1399 msg->Data[0] & 0xff, 1399 msg->Data[0] & 0xff,
1400 (msg->Data[0] >> 8) & 0xff); 1400 (msg->Data[0] >> 8) & 0xff);
1401 break; 1401 break;
1402 } 1402 }
1403 break; 1403 break;
1404 1404
1405 case 0x02: 1405 case 0x02:
1406 mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) " 1406 mpt_prt(mpt, "Port %d: FC Link Event: LPE(%02x,%02x) "
1407 "(Loop Port Enable)", 1407 "(Loop Port Enable)",
1408 (msg->Data[1] >> 8) & 0xff, 1408 (msg->Data[1] >> 8) & 0xff,
1409 (msg->Data[0] >> 8) & 0xff, 1409 (msg->Data[0] >> 8) & 0xff,
1410 (msg->Data[0] ) & 0xff); 1410 (msg->Data[0] ) & 0xff);
1411 break; 1411 break;
1412 1412
1413 case 0x03: 1413 case 0x03:
1414 mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) " 1414 mpt_prt(mpt, "Port %d: FC Link Event: LPB(%02x,%02x) "
1415 "(Loop Port Bypass)", 1415 "(Loop Port Bypass)",
1416 (msg->Data[1] >> 8) & 0xff, 1416 (msg->Data[1] >> 8) & 0xff,
1417 (msg->Data[0] >> 8) & 0xff, 1417 (msg->Data[0] >> 8) & 0xff,
1418 (msg->Data[0] ) & 0xff); 1418 (msg->Data[0] ) & 0xff);
1419 break; 1419 break;
1420 1420
1421 default: 1421 default:
1422 mpt_prt(mpt, "Port %d: FC Link Event: " 1422 mpt_prt(mpt, "Port %d: FC Link Event: "
1423 "Unknown event (%02x %02x %02x)", 1423 "Unknown event (%02x %02x %02x)",
1424 (msg->Data[1] >> 8) & 0xff, 1424 (msg->Data[1] >> 8) & 0xff,
1425 (msg->Data[0] >> 16) & 0xff, 1425 (msg->Data[0] >> 16) & 0xff,
1426 (msg->Data[0] >> 8) & 0xff, 1426 (msg->Data[0] >> 8) & 0xff,
1427 (msg->Data[0] ) & 0xff); 1427 (msg->Data[0] ) & 0xff);
1428 break; 1428 break;
1429 } 1429 }
1430 break; 1430 break;
1431 1431
1432 case MPI_EVENT_LOGOUT: 1432 case MPI_EVENT_LOGOUT:
1433 mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x", 1433 mpt_prt(mpt, "Port %d: FC Logout: N_PortID: %02x",
1434 (msg->Data[1] >> 8) & 0xff, msg->Data[0]); 1434 (msg->Data[1] >> 8) & 0xff, msg->Data[0]);
1435 break; 1435 break;
1436 1436
1437 case MPI_EVENT_EVENT_CHANGE: 1437 case MPI_EVENT_EVENT_CHANGE:
1438 /* 1438 /*
1439 * This is just an acknowledgement of our 1439 * This is just an acknowledgement of our
1440 * mpt_send_event_request(). 1440 * mpt_send_event_request().
1441 */ 1441 */
1442 break; 1442 break;
1443 1443
1444 case MPI_EVENT_SAS_PHY_LINK_STATUS: 1444 case MPI_EVENT_SAS_PHY_LINK_STATUS:
1445 switch ((msg->Data[0] >> 12) & 0x0f) { 1445 switch ((msg->Data[0] >> 12) & 0x0f) {
1446 case 0x00: 1446 case 0x00:
1447 mpt_prt(mpt, "Phy %d: Link Status Unknown", 1447 mpt_prt(mpt, "Phy %d: Link Status Unknown",
1448 msg->Data[0] & 0xff); 1448 msg->Data[0] & 0xff);
1449 break; 1449 break;
1450 case 0x01: 1450 case 0x01:
1451 mpt_prt(mpt, "Phy %d: Link Disabled", 1451 mpt_prt(mpt, "Phy %d: Link Disabled",
1452 msg->Data[0] & 0xff); 1452 msg->Data[0] & 0xff);
1453 break; 1453 break;
1454 case 0x02: 1454 case 0x02:
1455 mpt_prt(mpt, "Phy %d: Failed Speed Negotiation", 1455 mpt_prt(mpt, "Phy %d: Failed Speed Negotiation",
1456 msg->Data[0] & 0xff); 1456 msg->Data[0] & 0xff);
1457 break; 1457 break;
1458 case 0x03: 1458 case 0x03:
1459 mpt_prt(mpt, "Phy %d: SATA OOB Complete", 1459 mpt_prt(mpt, "Phy %d: SATA OOB Complete",
1460 msg->Data[0] & 0xff); 1460 msg->Data[0] & 0xff);
1461 break; 1461 break;
1462 case 0x08: 1462 case 0x08:
1463 mpt_prt(mpt, "Phy %d: Link Rate 1.5 Gbps", 1463 mpt_prt(mpt, "Phy %d: Link Rate 1.5 Gbps",
1464 msg->Data[0] & 0xff); 1464 msg->Data[0] & 0xff);
1465 break; 1465 break;
1466 case 0x09: 1466 case 0x09:
1467 mpt_prt(mpt, "Phy %d: Link Rate 3.0 Gbps", 1467 mpt_prt(mpt, "Phy %d: Link Rate 3.0 Gbps",
1468 msg->Data[0] & 0xff); 1468 msg->Data[0] & 0xff);
1469 break; 1469 break;
1470 default: 1470 default:
1471 mpt_prt(mpt, "Phy %d: SAS Phy Link Status Event: " 1471 mpt_prt(mpt, "Phy %d: SAS Phy Link Status Event: "
1472 "Unknown event (%0x)", 1472 "Unknown event (%0x)",
1473 msg->Data[0] & 0xff, (msg->Data[0] >> 8) & 0xff); 1473 msg->Data[0] & 0xff, (msg->Data[0] >> 8) & 0xff);
1474 } 1474 }
1475 break; 1475 break;
1476 1476
1477 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 1477 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
1478 case MPI_EVENT_SAS_DISCOVERY: 1478 case MPI_EVENT_SAS_DISCOVERY:
1479 /* ignore these events for now */ 1479 /* ignore these events for now */
1480 break; 1480 break;
1481 1481
1482 case MPI_EVENT_QUEUE_FULL: 1482 case MPI_EVENT_QUEUE_FULL:
1483 /* This can get a little chatty */ 1483 /* This can get a little chatty */
1484 if (mpt->verbose > 0) 1484 if (mpt->verbose > 0)
1485 mpt_prt(mpt, "Queue Full Event"); 1485 mpt_prt(mpt, "Queue Full Event");
1486 break; 1486 break;
1487 1487
1488 default: 1488 default:
1489 mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event); 1489 mpt_prt(mpt, "Unknown async event: 0x%x", msg->Event);
1490 break; 1490 break;
1491 } 1491 }
1492 1492
1493 if (msg->AckRequired) { 1493 if (msg->AckRequired) {
1494 MSG_EVENT_ACK *ackp; 1494 MSG_EVENT_ACK *ackp;
1495 request_t *req; 1495 request_t *req;
1496 1496
1497 if ((req = mpt_get_request(mpt)) == NULL) { 1497 if ((req = mpt_get_request(mpt)) == NULL) {
1498 /* XXX XXX XXX XXXJRT */ 1498 /* XXX XXX XXX XXXJRT */
1499 panic("mpt_event_notify_reply: unable to allocate " 1499 panic("mpt_event_notify_reply: unable to allocate "
1500 "request structure"); 1500 "request structure");
1501 } 1501 }
1502 1502
1503 ackp = (MSG_EVENT_ACK *) req->req_vbuf; 1503 ackp = (MSG_EVENT_ACK *) req->req_vbuf;
1504 memset(ackp, 0, sizeof(*ackp)); 1504 memset(ackp, 0, sizeof(*ackp));
1505 ackp->Function = MPI_FUNCTION_EVENT_ACK; 1505 ackp->Function = MPI_FUNCTION_EVENT_ACK;
1506 ackp->Event = msg->Event; 1506 ackp->Event = msg->Event;
1507 ackp->EventContext = msg->EventContext; 1507 ackp->EventContext = msg->EventContext;
1508 ackp->MsgContext = htole32(req->index | 0x80000000); 1508 ackp->MsgContext = htole32(req->index | 0x80000000);
1509 mpt_check_doorbell(mpt); 1509 mpt_check_doorbell(mpt);
1510 mpt_send_cmd(mpt, req); 1510 mpt_send_cmd(mpt, req);
1511 } 1511 }
1512} 1512}
1513 1513
1514static void 1514static void
1515mpt_bus_reset(mpt_softc_t *mpt) 1515mpt_bus_reset(mpt_softc_t *mpt)
1516{ 1516{
1517 request_t *req; 1517 request_t *req;
1518 MSG_SCSI_TASK_MGMT *mngt_req; 1518 MSG_SCSI_TASK_MGMT *mngt_req;
1519 int s; 1519 int s;
1520 1520
1521 s = splbio(); 1521 s = splbio();
1522 if (mpt->mngt_req) { 1522 if (mpt->mngt_req) {
1523 /* request already queued; can't do more */ 1523 /* request already queued; can't do more */
1524 splx(s); 1524 splx(s);
1525 return; 1525 return;
1526 } 1526 }
1527 req = mpt_get_request(mpt); 1527 req = mpt_get_request(mpt);
1528 if (__predict_false(req == NULL)) { 1528 if (__predict_false(req == NULL)) {
1529 mpt_prt(mpt, "no mngt request\n"); 1529 mpt_prt(mpt, "no mngt request\n");
1530 splx(s); 1530 splx(s);
1531 return; 1531 return;
1532 } 1532 }
1533 mpt->mngt_req = req; 1533 mpt->mngt_req = req;
1534 splx(s); 1534 splx(s);
1535 mngt_req = req->req_vbuf; 1535 mngt_req = req->req_vbuf;
1536 memset(mngt_req, 0, sizeof(*mngt_req)); 1536 memset(mngt_req, 0, sizeof(*mngt_req));
1537 mngt_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT; 1537 mngt_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
1538 mngt_req->Bus = mpt->bus; 1538 mngt_req->Bus = mpt->bus;
1539 mngt_req->TargetID = 0; 1539 mngt_req->TargetID = 0;
1540 mngt_req->ChainOffset = 0; 1540 mngt_req->ChainOffset = 0;
1541 mngt_req->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS; 1541 mngt_req->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS;
1542 mngt_req->Reserved1 = 0; 1542 mngt_req->Reserved1 = 0;
1543 mngt_req->MsgFlags = 1543 mngt_req->MsgFlags =
1544 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0; 1544 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0;
1545 mngt_req->MsgContext = req->index; 1545 mngt_req->MsgContext = req->index;
1546 mngt_req->TaskMsgContext = 0; 1546 mngt_req->TaskMsgContext = 0;
1547 s = splbio(); 1547 s = splbio();
1548 mpt_send_handshake_cmd(mpt, sizeof(*mngt_req), mngt_req); 1548 mpt_send_handshake_cmd(mpt, sizeof(*mngt_req), mngt_req);
1549 splx(s); 1549 splx(s);
1550} 1550}
1551 1551
1552/***************************************************************************** 1552/*****************************************************************************
1553 * SCSI interface routines 1553 * SCSI interface routines
1554 *****************************************************************************/ 1554 *****************************************************************************/
1555 1555
1556static void 1556static void
1557mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req, 1557mpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1558 void *arg) 1558 void *arg)
1559{ 1559{
1560 struct scsipi_adapter *adapt = chan->chan_adapter; 1560 struct scsipi_adapter *adapt = chan->chan_adapter;
1561 mpt_softc_t *mpt = device_private(adapt->adapt_dev); 1561 mpt_softc_t *mpt = device_private(adapt->adapt_dev);
1562 1562
1563 switch (req) { 1563 switch (req) {
1564 case ADAPTER_REQ_RUN_XFER: 1564 case ADAPTER_REQ_RUN_XFER:
1565 mpt_run_xfer(mpt, (struct scsipi_xfer *) arg); 1565 mpt_run_xfer(mpt, (struct scsipi_xfer *) arg);
1566 return; 1566 return;
1567 1567
1568 case ADAPTER_REQ_GROW_RESOURCES: 1568 case ADAPTER_REQ_GROW_RESOURCES:
1569 /* Not supported. */ 1569 /* Not supported. */
1570 return; 1570 return;
1571 1571
1572 case ADAPTER_REQ_SET_XFER_MODE: 1572 case ADAPTER_REQ_SET_XFER_MODE:
1573 mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg); 1573 mpt_set_xfer_mode(mpt, (struct scsipi_xfer_mode *) arg);
1574 return; 1574 return;
1575 } 1575 }
1576} 1576}
1577 1577
1578static void 1578static void
1579mpt_minphys(struct buf *bp) 1579mpt_minphys(struct buf *bp)
1580{ 1580{
1581 1581
1582/* 1582/*
1583 * Subtract one from the SGL limit, since we need an extra one to handle 1583 * Subtract one from the SGL limit, since we need an extra one to handle
1584 * an non-page-aligned transfer. 1584 * an non-page-aligned transfer.
1585 */ 1585 */
1586#define MPT_MAX_XFER ((MPT_SGL_MAX - 1) * PAGE_SIZE) 1586#define MPT_MAX_XFER ((MPT_SGL_MAX - 1) * PAGE_SIZE)
1587 1587
1588 if (bp->b_bcount > MPT_MAX_XFER) 1588 if (bp->b_bcount > MPT_MAX_XFER)
1589 bp->b_bcount = MPT_MAX_XFER; 1589 bp->b_bcount = MPT_MAX_XFER;
1590 minphys(bp); 1590 minphys(bp);
1591} 1591}
1592 1592
1593static int 1593static int
1594mpt_ioctl(struct scsipi_channel *chan, u_long cmd, void *arg, 1594mpt_ioctl(struct scsipi_channel *chan, u_long cmd, void *arg,
1595 int flag, struct proc *p) 1595 int flag, struct proc *p)
1596{ 1596{
1597 mpt_softc_t *mpt; 1597 mpt_softc_t *mpt;
1598 int s; 1598 int s;
1599 1599
1600 mpt = device_private(chan->chan_adapter->adapt_dev); 1600 mpt = device_private(chan->chan_adapter->adapt_dev);
1601 switch (cmd) { 1601 switch (cmd) {
1602 case SCBUSIORESET: 1602 case SCBUSIORESET:
1603 mpt_bus_reset(mpt); 1603 mpt_bus_reset(mpt);
1604 s = splbio(); 1604 s = splbio();
1605 mpt_intr(mpt); 1605 mpt_intr(mpt);
1606 splx(s); 1606 splx(s);
1607 return(0); 1607 return(0);
1608 default: 1608 default:
1609 return (ENOTTY); 1609 return (ENOTTY);
1610 } 1610 }
1611} 1611}
1612 1612
1613#if NBIO > 0 1613#if NBIO > 0
1614static fCONFIG_PAGE_IOC_2 * 1614static fCONFIG_PAGE_IOC_2 *
1615mpt_get_cfg_page_ioc2(mpt_softc_t *mpt) 1615mpt_get_cfg_page_ioc2(mpt_softc_t *mpt)
1616{ 1616{
1617 fCONFIG_PAGE_HEADER hdr; 1617 fCONFIG_PAGE_HEADER hdr;
1618 fCONFIG_PAGE_IOC_2 *ioc2; 1618 fCONFIG_PAGE_IOC_2 *ioc2;
1619 int rv; 1619 int rv;
1620 1620
1621 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 2, 0, &hdr); 1621 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 2, 0, &hdr);
1622 if (rv) 1622 if (rv)
1623 return NULL; 1623 return NULL;
1624 1624
1625 ioc2 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO); 1625 ioc2 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO);
1626 if (ioc2 == NULL) 1626 if (ioc2 == NULL)
1627 return NULL; 1627 return NULL;
1628 1628
1629 memcpy(ioc2, &hdr, sizeof(hdr)); 1629 memcpy(ioc2, &hdr, sizeof(hdr));
1630 1630
1631 rv = mpt_read_cfg_page(mpt, 0, &ioc2->Header); 1631 rv = mpt_read_cfg_page(mpt, 0, &ioc2->Header);
1632 if (rv) 1632 if (rv)
1633 goto fail; 1633 goto fail;
1634 mpt2host_config_page_ioc_2(ioc2); 1634 mpt2host_config_page_ioc_2(ioc2);
1635 1635
1636 return ioc2; 1636 return ioc2;
1637 1637
1638fail: 1638fail:
1639 free(ioc2, M_DEVBUF); 1639 free(ioc2, M_DEVBUF);
1640 return NULL; 1640 return NULL;
1641} 1641}
1642 1642
1643static fCONFIG_PAGE_IOC_3 * 1643static fCONFIG_PAGE_IOC_3 *
1644mpt_get_cfg_page_ioc3(mpt_softc_t *mpt) 1644mpt_get_cfg_page_ioc3(mpt_softc_t *mpt)
1645{ 1645{
1646 fCONFIG_PAGE_HEADER hdr; 1646 fCONFIG_PAGE_HEADER hdr;
1647 fCONFIG_PAGE_IOC_3 *ioc3; 1647 fCONFIG_PAGE_IOC_3 *ioc3;
1648 int rv; 1648 int rv;
1649 1649
1650 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 3, 0, &hdr); 1650 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 3, 0, &hdr);
1651 if (rv) 1651 if (rv)
1652 return NULL; 1652 return NULL;
1653 1653
1654 ioc3 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO); 1654 ioc3 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO);
1655 if (ioc3 == NULL) 1655 if (ioc3 == NULL)
1656 return NULL; 1656 return NULL;
1657 1657
1658 memcpy(ioc3, &hdr, sizeof(hdr)); 1658 memcpy(ioc3, &hdr, sizeof(hdr));
1659 1659
1660 rv = mpt_read_cfg_page(mpt, 0, &ioc3->Header); 1660 rv = mpt_read_cfg_page(mpt, 0, &ioc3->Header);
1661 if (rv) 1661 if (rv)
1662 goto fail; 1662 goto fail;
1663 1663
1664 return ioc3; 1664 return ioc3;
1665 1665
1666fail: 1666fail:
1667 free(ioc3, M_DEVBUF); 1667 free(ioc3, M_DEVBUF);
1668 return NULL; 1668 return NULL;
1669} 1669}
1670 1670
1671 1671
1672static fCONFIG_PAGE_RAID_VOL_0 * 1672static fCONFIG_PAGE_RAID_VOL_0 *
1673mpt_get_cfg_page_raid_vol0(mpt_softc_t *mpt, int address) 1673mpt_get_cfg_page_raid_vol0(mpt_softc_t *mpt, int address)
1674{ 1674{
1675 fCONFIG_PAGE_HEADER hdr; 1675 fCONFIG_PAGE_HEADER hdr;
1676 fCONFIG_PAGE_RAID_VOL_0 *rvol0; 1676 fCONFIG_PAGE_RAID_VOL_0 *rvol0;
1677 int rv; 1677 int rv;
1678 1678
1679 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0, 1679 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0,
1680 address, &hdr); 1680 address, &hdr);
1681 if (rv) 1681 if (rv)
1682 return NULL; 1682 return NULL;
1683 1683
1684 rvol0 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO); 1684 rvol0 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO);
1685 if (rvol0 == NULL) 1685 if (rvol0 == NULL)
1686 return NULL; 1686 return NULL;
1687 1687
1688 memcpy(rvol0, &hdr, sizeof(hdr)); 1688 memcpy(rvol0, &hdr, sizeof(hdr));
1689 1689
1690 rv = mpt_read_cfg_page(mpt, address, &rvol0->Header); 1690 rv = mpt_read_cfg_page(mpt, address, &rvol0->Header);
1691 if (rv) 1691 if (rv)
1692 goto fail; 1692 goto fail;
1693 mpt2host_config_page_raid_vol_0(rvol0); 1693 mpt2host_config_page_raid_vol_0(rvol0);
1694 1694
1695 return rvol0; 1695 return rvol0;
1696 1696
1697fail: 1697fail:
1698 free(rvol0, M_DEVBUF); 1698 free(rvol0, M_DEVBUF);
1699 return NULL; 1699 return NULL;
1700} 1700}
1701 1701
1702static fCONFIG_PAGE_RAID_PHYS_DISK_0 * 1702static fCONFIG_PAGE_RAID_PHYS_DISK_0 *
1703mpt_get_cfg_page_raid_phys_disk0(mpt_softc_t *mpt, int address) 1703mpt_get_cfg_page_raid_phys_disk0(mpt_softc_t *mpt, int address)
1704{ 1704{
1705 fCONFIG_PAGE_HEADER hdr; 1705 fCONFIG_PAGE_HEADER hdr;
1706 fCONFIG_PAGE_RAID_PHYS_DISK_0 *physdisk0; 1706 fCONFIG_PAGE_RAID_PHYS_DISK_0 *physdisk0;
1707 int rv; 1707 int rv;
1708 1708
1709 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK, 0, 1709 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK, 0,
1710 address, &hdr); 1710 address, &hdr);
1711 if (rv) 1711 if (rv)
1712 return NULL; 1712 return NULL;
1713 1713
1714 physdisk0 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO); 1714 physdisk0 = malloc(hdr.PageLength * 4, M_DEVBUF, M_WAITOK | M_ZERO);
1715 if (physdisk0 == NULL) 1715 if (physdisk0 == NULL)
1716 return NULL; 1716 return NULL;
1717 1717
1718 memcpy(physdisk0, &hdr, sizeof(hdr)); 1718 memcpy(physdisk0, &hdr, sizeof(hdr));
1719 1719
1720 rv = mpt_read_cfg_page(mpt, address, &physdisk0->Header); 1720 rv = mpt_read_cfg_page(mpt, address, &physdisk0->Header);
1721 if (rv) 1721 if (rv)
1722 goto fail; 1722 goto fail;
1723 mpt2host_config_page_raid_phys_disk_0(physdisk0); 1723 mpt2host_config_page_raid_phys_disk_0(physdisk0);
1724 1724
1725 return physdisk0; 1725 return physdisk0;
1726 1726
1727fail: 1727fail:
1728 free(physdisk0, M_DEVBUF); 1728 free(physdisk0, M_DEVBUF);
1729 return NULL; 1729 return NULL;
1730} 1730}
1731 1731
1732static bool 1732static bool
1733mpt_is_raid(mpt_softc_t *mpt) 1733mpt_is_raid(mpt_softc_t *mpt)
1734{ 1734{
1735 fCONFIG_PAGE_IOC_2 *ioc2; 1735 fCONFIG_PAGE_IOC_2 *ioc2;
1736 bool is_raid = false; 1736 bool is_raid = false;
1737 1737
1738 ioc2 = mpt_get_cfg_page_ioc2(mpt); 1738 ioc2 = mpt_get_cfg_page_ioc2(mpt);
1739 if (ioc2 == NULL) 1739 if (ioc2 == NULL)
1740 return false; 1740 return false;
1741 1741
1742 if (ioc2->CapabilitiesFlags != 0xdeadbeef) { 1742 if (ioc2->CapabilitiesFlags != 0xdeadbeef) {
1743 is_raid = !!(ioc2->CapabilitiesFlags & 1743 is_raid = !!(ioc2->CapabilitiesFlags &
1744 (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT| 1744 (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT|
1745 MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT| 1745 MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT|
1746 MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)); 1746 MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT));
1747 } 1747 }
1748 1748
1749 free(ioc2, M_DEVBUF); 1749 free(ioc2, M_DEVBUF);
1750 1750
1751 return is_raid; 1751 return is_raid;
1752} 1752}
1753 1753
1754static int 1754static int
1755mpt_bio_ioctl(device_t dev, u_long cmd, void *addr) 1755mpt_bio_ioctl(device_t dev, u_long cmd, void *addr)
1756{ 1756{
1757 mpt_softc_t *mpt = device_private(dev); 1757 mpt_softc_t *mpt = device_private(dev);
1758 int error, s; 1758 int error, s;
1759 1759
1760 KERNEL_LOCK(1, curlwp); 1760 KERNEL_LOCK(1, curlwp);
1761 s = splbio(); 1761 s = splbio();
1762 1762
1763 switch (cmd) { 1763 switch (cmd) {
1764 case BIOCINQ: 1764 case BIOCINQ:
1765 error = mpt_bio_ioctl_inq(mpt, addr); 1765 error = mpt_bio_ioctl_inq(mpt, addr);
1766 break; 1766 break;
1767 case BIOCVOL: 1767 case BIOCVOL:
1768 error = mpt_bio_ioctl_vol(mpt, addr); 1768 error = mpt_bio_ioctl_vol(mpt, addr);
1769 break; 1769 break;
1770 case BIOCDISK_NOVOL: 1770 case BIOCDISK_NOVOL:
1771 error = mpt_bio_ioctl_disk_novol(mpt, addr); 1771 error = mpt_bio_ioctl_disk_novol(mpt, addr);
1772 break; 1772 break;
1773 case BIOCDISK: 1773 case BIOCDISK:
1774 error = mpt_bio_ioctl_disk(mpt, addr); 1774 error = mpt_bio_ioctl_disk(mpt, addr);
1775 break; 1775 break;
1776 case BIOCSETSTATE: 1776 case BIOCSETSTATE:
1777 error = mpt_bio_ioctl_setstate(mpt, addr); 1777 error = mpt_bio_ioctl_setstate(mpt, addr);
1778 break; 1778 break;
1779 default: 1779 default:
1780 error = EINVAL; 1780 error = EINVAL;
1781 break; 1781 break;
1782 } 1782 }
1783 1783
1784 splx(s); 1784 splx(s);
1785 KERNEL_UNLOCK_ONE(curlwp); 1785 KERNEL_UNLOCK_ONE(curlwp);
1786 1786
1787 return error; 1787 return error;
1788} 1788}
1789 1789
1790static int 1790static int
1791mpt_bio_ioctl_inq(mpt_softc_t *mpt, struct bioc_inq *bi) 1791mpt_bio_ioctl_inq(mpt_softc_t *mpt, struct bioc_inq *bi)
1792{  1792{
1793 fCONFIG_PAGE_IOC_2 *ioc2; 1793 fCONFIG_PAGE_IOC_2 *ioc2;
1794 fCONFIG_PAGE_IOC_3 *ioc3; 1794 fCONFIG_PAGE_IOC_3 *ioc3;
1795 1795
1796 ioc2 = mpt_get_cfg_page_ioc2(mpt); 1796 ioc2 = mpt_get_cfg_page_ioc2(mpt);
1797 if (ioc2 == NULL) 1797 if (ioc2 == NULL)
1798 return EIO; 1798 return EIO;
1799 ioc3 = mpt_get_cfg_page_ioc3(mpt); 1799 ioc3 = mpt_get_cfg_page_ioc3(mpt);
1800 if (ioc3 == NULL) { 1800 if (ioc3 == NULL) {
1801 free(ioc2, M_DEVBUF); 1801 free(ioc2, M_DEVBUF);
1802 return EIO; 1802 return EIO;
1803 } 1803 }
1804 1804
1805 strlcpy(bi->bi_dev, device_xname(mpt->sc_dev), sizeof(bi->bi_dev)); 1805 strlcpy(bi->bi_dev, device_xname(mpt->sc_dev), sizeof(bi->bi_dev));
1806 bi->bi_novol = ioc2->NumActiveVolumes; 1806 bi->bi_novol = ioc2->NumActiveVolumes;
1807 bi->bi_nodisk = ioc3->NumPhysDisks; 1807 bi->bi_nodisk = ioc3->NumPhysDisks;
1808 1808
1809 free(ioc2, M_DEVBUF); 1809 free(ioc2, M_DEVBUF);
1810 free(ioc3, M_DEVBUF); 1810 free(ioc3, M_DEVBUF);
1811 1811
1812 return 0; 1812 return 0;
1813} 1813}
1814 1814
1815static int 1815static int
1816mpt_bio_ioctl_vol(mpt_softc_t *mpt, struct bioc_vol *bv) 1816mpt_bio_ioctl_vol(mpt_softc_t *mpt, struct bioc_vol *bv)
1817{ 1817{
1818 fCONFIG_PAGE_IOC_2 *ioc2 = NULL; 1818 fCONFIG_PAGE_IOC_2 *ioc2 = NULL;
1819 fCONFIG_PAGE_IOC_2_RAID_VOL *ioc2rvol; 1819 fCONFIG_PAGE_IOC_2_RAID_VOL *ioc2rvol;
1820 fCONFIG_PAGE_RAID_VOL_0 *rvol0 = NULL; 1820 fCONFIG_PAGE_RAID_VOL_0 *rvol0 = NULL;
1821 struct scsipi_periph *periph; 1821 struct scsipi_periph *periph;
1822 struct scsipi_inquiry_data inqbuf; 1822 struct scsipi_inquiry_data inqbuf;
1823 char vendor[9], product[17], revision[5]; 1823 char vendor[9], product[17], revision[5];
1824 int address; 1824 int address;
1825 1825
1826 ioc2 = mpt_get_cfg_page_ioc2(mpt); 1826 ioc2 = mpt_get_cfg_page_ioc2(mpt);
1827 if (ioc2 == NULL) 1827 if (ioc2 == NULL)
1828 return EIO; 1828 return EIO;
1829 1829
1830 if (bv->bv_volid < 0 || bv->bv_volid >= ioc2->NumActiveVolumes) 1830 if (bv->bv_volid < 0 || bv->bv_volid >= ioc2->NumActiveVolumes)
1831 goto fail; 1831 goto fail;
1832 1832
1833 ioc2rvol = &ioc2->RaidVolume[bv->bv_volid]; 1833 ioc2rvol = &ioc2->RaidVolume[bv->bv_volid];
1834 address = ioc2rvol->VolumeID | (ioc2rvol->VolumeBus << 8); 1834 address = ioc2rvol->VolumeID | (ioc2rvol->VolumeBus << 8);
1835 1835
1836 rvol0 = mpt_get_cfg_page_raid_vol0(mpt, address); 1836 rvol0 = mpt_get_cfg_page_raid_vol0(mpt, address);
1837 if (rvol0 == NULL) 1837 if (rvol0 == NULL)
1838 goto fail; 1838 goto fail;
1839 1839
1840 bv->bv_dev[0] = '\0'; 1840 bv->bv_dev[0] = '\0';
1841 bv->bv_vendor[0] = '\0'; 1841 bv->bv_vendor[0] = '\0';
1842 1842
1843 periph = scsipi_lookup_periph(&mpt->sc_channel, ioc2rvol->VolumeBus, 0); 1843 periph = scsipi_lookup_periph(&mpt->sc_channel, ioc2rvol->VolumeBus, 0);
1844 if (periph != NULL) { 1844 if (periph != NULL) {
1845 if (periph->periph_dev != NULL) { 1845 if (periph->periph_dev != NULL) {
1846 snprintf(bv->bv_dev, sizeof(bv->bv_dev), "%s", 1846 snprintf(bv->bv_dev, sizeof(bv->bv_dev), "%s",
1847 device_xname(periph->periph_dev)); 1847 device_xname(periph->periph_dev));
1848 } 1848 }
1849 memset(&inqbuf, 0, sizeof(inqbuf)); 1849 memset(&inqbuf, 0, sizeof(inqbuf));
1850 if (scsipi_inquire(periph, &inqbuf, 1850 if (scsipi_inquire(periph, &inqbuf,
1851 XS_CTL_DISCOVERY | XS_CTL_SILENT) == 0) { 1851 XS_CTL_DISCOVERY | XS_CTL_SILENT) == 0) {
1852 scsipi_strvis(vendor, sizeof(vendor), 1852 scsipi_strvis(vendor, sizeof(vendor),
1853 inqbuf.vendor, sizeof(inqbuf.vendor)); 1853 inqbuf.vendor, sizeof(inqbuf.vendor));
1854 scsipi_strvis(product, sizeof(product), 1854 scsipi_strvis(product, sizeof(product),
1855 inqbuf.product, sizeof(inqbuf.product)); 1855 inqbuf.product, sizeof(inqbuf.product));
1856 scsipi_strvis(revision, sizeof(revision), 1856 scsipi_strvis(revision, sizeof(revision),
1857 inqbuf.revision, sizeof(inqbuf.revision)); 1857 inqbuf.revision, sizeof(inqbuf.revision));
1858 1858
1859 snprintf(bv->bv_vendor, sizeof(bv->bv_vendor), 1859 snprintf(bv->bv_vendor, sizeof(bv->bv_vendor),
1860 "%s %s %s", vendor, product, revision); 1860 "%s %s %s", vendor, product, revision);
1861 } 1861 }
1862  1862
1863 snprintf(bv->bv_dev, sizeof(bv->bv_dev), "%s", 1863 snprintf(bv->bv_dev, sizeof(bv->bv_dev), "%s",
1864 device_xname(periph->periph_dev)); 1864 device_xname(periph->periph_dev));
1865 } 1865 }
1866 bv->bv_nodisk = rvol0->NumPhysDisks; 1866 bv->bv_nodisk = rvol0->NumPhysDisks;
1867 bv->bv_size = (uint64_t)rvol0->MaxLBA * 512; 1867 bv->bv_size = (uint64_t)rvol0->MaxLBA * 512;
1868 bv->bv_stripe_size = rvol0->StripeSize; 1868 bv->bv_stripe_size = rvol0->StripeSize;
1869 bv->bv_percent = -1; 1869 bv->bv_percent = -1;
1870 bv->bv_seconds = 0; 1870 bv->bv_seconds = 0;
1871 1871
1872 switch (rvol0->VolumeStatus.State) { 1872 switch (rvol0->VolumeStatus.State) {
1873 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL: 1873 case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
1874 bv->bv_status = BIOC_SVONLINE; 1874 bv->bv_status = BIOC_SVONLINE;
1875 break; 1875 break;
1876 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED: 1876 case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
1877 bv->bv_status = BIOC_SVDEGRADED; 1877 bv->bv_status = BIOC_SVDEGRADED;
1878 break; 1878 break;
1879 case MPI_RAIDVOL0_STATUS_STATE_FAILED: 1879 case MPI_RAIDVOL0_STATUS_STATE_FAILED:
1880 bv->bv_status = BIOC_SVOFFLINE; 1880 bv->bv_status = BIOC_SVOFFLINE;
1881 break; 1881 break;
1882 default: 1882 default:
1883 bv->bv_status = BIOC_SVINVALID; 1883 bv->bv_status = BIOC_SVINVALID;
1884 break; 1884 break;
1885 } 1885 }
1886 1886
1887 switch (ioc2rvol->VolumeType) { 1887 switch (ioc2rvol->VolumeType) {
1888 case MPI_RAID_VOL_TYPE_IS: 1888 case MPI_RAID_VOL_TYPE_IS:
1889 bv->bv_level = 0; 1889 bv->bv_level = 0;
1890 break; 1890 break;
1891 case MPI_RAID_VOL_TYPE_IME: 1891 case MPI_RAID_VOL_TYPE_IME:
1892 case MPI_RAID_VOL_TYPE_IM: 1892 case MPI_RAID_VOL_TYPE_IM:
1893 bv->bv_level = 1; 1893 bv->bv_level = 1;
1894 break; 1894 break;
1895 default: 1895 default:
1896 bv->bv_level = -1; 1896 bv->bv_level = -1;
1897 break; 1897 break;
1898 } 1898 }
1899 1899
1900 free(ioc2, M_DEVBUF); 1900 free(ioc2, M_DEVBUF);
1901 free(rvol0, M_DEVBUF); 1901 free(rvol0, M_DEVBUF);
1902 1902
1903 return 0; 1903 return 0;
1904 1904
1905fail: 1905fail:
1906 if (ioc2) free(ioc2, M_DEVBUF); 1906 if (ioc2) free(ioc2, M_DEVBUF);
1907 if (rvol0) free(rvol0, M_DEVBUF); 1907 if (rvol0) free(rvol0, M_DEVBUF);
1908 return EINVAL; 1908 return EINVAL;
1909} 1909}
1910 1910
1911static void 1911static void
1912mpt_bio_ioctl_disk_common(mpt_softc_t *mpt, struct bioc_disk *bd, 1912mpt_bio_ioctl_disk_common(mpt_softc_t *mpt, struct bioc_disk *bd,
1913 int address) 1913 int address)
1914{ 1914{
1915 fCONFIG_PAGE_RAID_PHYS_DISK_0 *phys = NULL; 1915 fCONFIG_PAGE_RAID_PHYS_DISK_0 *phys = NULL;
1916 char vendor_id[9], product_id[17], product_rev_level[5]; 1916 char vendor_id[9], product_id[17], product_rev_level[5];
1917 1917
1918 phys = mpt_get_cfg_page_raid_phys_disk0(mpt, address); 1918 phys = mpt_get_cfg_page_raid_phys_disk0(mpt, address);
1919 if (phys == NULL) 1919 if (phys == NULL)
1920 return; 1920 return;
1921 1921
1922 scsipi_strvis(vendor_id, sizeof(vendor_id), 1922 scsipi_strvis(vendor_id, sizeof(vendor_id),
1923 phys->InquiryData.VendorID, sizeof(phys->InquiryData.VendorID)); 1923 phys->InquiryData.VendorID, sizeof(phys->InquiryData.VendorID));
1924 scsipi_strvis(product_id, sizeof(product_id), 1924 scsipi_strvis(product_id, sizeof(product_id),
1925 phys->InquiryData.ProductID, sizeof(phys->InquiryData.ProductID)); 1925 phys->InquiryData.ProductID, sizeof(phys->InquiryData.ProductID));
1926 scsipi_strvis(product_rev_level, sizeof(product_rev_level), 1926 scsipi_strvis(product_rev_level, sizeof(product_rev_level),
1927 phys->InquiryData.ProductRevLevel, 1927 phys->InquiryData.ProductRevLevel,
1928 sizeof(phys->InquiryData.ProductRevLevel)); 1928 sizeof(phys->InquiryData.ProductRevLevel));
1929 1929
1930 snprintf(bd->bd_vendor, sizeof(bd->bd_vendor), "%s %s %s", 1930 snprintf(bd->bd_vendor, sizeof(bd->bd_vendor), "%s %s %s",
1931 vendor_id, product_id, product_rev_level); 1931 vendor_id, product_id, product_rev_level);
1932 strlcpy(bd->bd_serial, phys->InquiryData.Info, sizeof(bd->bd_serial)); 1932 strlcpy(bd->bd_serial, phys->InquiryData.Info, sizeof(bd->bd_serial));
1933 bd->bd_procdev[0] = '\0'; 1933 bd->bd_procdev[0] = '\0';
1934 bd->bd_channel = phys->PhysDiskBus; 1934 bd->bd_channel = phys->PhysDiskBus;
1935 bd->bd_target = phys->PhysDiskID; 1935 bd->bd_target = phys->PhysDiskID;
1936 bd->bd_lun = 0; 1936 bd->bd_lun = 0;
1937 bd->bd_size = (uint64_t)phys->MaxLBA * 512; 1937 bd->bd_size = (uint64_t)phys->MaxLBA * 512;
1938 1938
1939 switch (phys->PhysDiskStatus.State) { 1939 switch (phys->PhysDiskStatus.State) {
1940 case MPI_PHYSDISK0_STATUS_ONLINE: 1940 case MPI_PHYSDISK0_STATUS_ONLINE:
1941 bd->bd_status = BIOC_SDONLINE; 1941 bd->bd_status = BIOC_SDONLINE;
1942 break; 1942 break;
1943 case MPI_PHYSDISK0_STATUS_MISSING: 1943 case MPI_PHYSDISK0_STATUS_MISSING:
1944 case MPI_PHYSDISK0_STATUS_FAILED: 1944 case MPI_PHYSDISK0_STATUS_FAILED:
1945 bd->bd_status = BIOC_SDFAILED; 1945 bd->bd_status = BIOC_SDFAILED;
1946 break; 1946 break;
1947 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED: 1947 case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED:
1948 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED: 1948 case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED:
1949 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE: 1949 case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE:
1950 bd->bd_status = BIOC_SDOFFLINE; 1950 bd->bd_status = BIOC_SDOFFLINE;
1951 break; 1951 break;
1952 case MPI_PHYSDISK0_STATUS_INITIALIZING: 1952 case MPI_PHYSDISK0_STATUS_INITIALIZING:
1953 bd->bd_status = BIOC_SDSCRUB; 1953 bd->bd_status = BIOC_SDSCRUB;
1954 break; 1954 break;
1955 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE: 1955 case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE:
1956 default: 1956 default:
1957 bd->bd_status = BIOC_SDINVALID; 1957 bd->bd_status = BIOC_SDINVALID;
1958 break; 1958 break;
1959 } 1959 }
1960 1960
1961 free(phys, M_DEVBUF); 1961 free(phys, M_DEVBUF);
1962} 1962}
1963 1963
1964static int 1964static int
1965mpt_bio_ioctl_disk_novol(mpt_softc_t *mpt, struct bioc_disk *bd) 1965mpt_bio_ioctl_disk_novol(mpt_softc_t *mpt, struct bioc_disk *bd)
1966{ 1966{
1967 fCONFIG_PAGE_IOC_2 *ioc2 = NULL; 1967 fCONFIG_PAGE_IOC_2 *ioc2 = NULL;
1968 fCONFIG_PAGE_IOC_3 *ioc3 = NULL; 1968 fCONFIG_PAGE_IOC_3 *ioc3 = NULL;
1969 fCONFIG_PAGE_RAID_VOL_0 *rvol0 = NULL; 1969 fCONFIG_PAGE_RAID_VOL_0 *rvol0 = NULL;
1970 fCONFIG_PAGE_IOC_2_RAID_VOL *ioc2rvol; 1970 fCONFIG_PAGE_IOC_2_RAID_VOL *ioc2rvol;
1971 int address, v, d; 1971 int address, v, d;
1972 1972
1973 ioc2 = mpt_get_cfg_page_ioc2(mpt); 1973 ioc2 = mpt_get_cfg_page_ioc2(mpt);
1974 if (ioc2 == NULL) 1974 if (ioc2 == NULL)
1975 return EIO; 1975 return EIO;
1976 ioc3 = mpt_get_cfg_page_ioc3(mpt); 1976 ioc3 = mpt_get_cfg_page_ioc3(mpt);
1977 if (ioc3 == NULL) { 1977 if (ioc3 == NULL) {
1978 free(ioc2, M_DEVBUF); 1978 free(ioc2, M_DEVBUF);
1979 return EIO; 1979 return EIO;
1980 } 1980 }
1981 1981
1982 if (bd->bd_diskid < 0 || bd->bd_diskid >= ioc3->NumPhysDisks) 1982 if (bd->bd_diskid < 0 || bd->bd_diskid >= ioc3->NumPhysDisks)
1983 goto fail; 1983 goto fail;
1984 1984
1985 address = ioc3->PhysDisk[bd->bd_diskid].PhysDiskNum; 1985 address = ioc3->PhysDisk[bd->bd_diskid].PhysDiskNum;
1986 1986
1987 mpt_bio_ioctl_disk_common(mpt, bd, address); 1987 mpt_bio_ioctl_disk_common(mpt, bd, address);
1988 1988
1989 bd->bd_disknovol = true; 1989 bd->bd_disknovol = true;
1990 for (v = 0; bd->bd_disknovol && v < ioc2->NumActiveVolumes; v++) { 1990 for (v = 0; bd->bd_disknovol && v < ioc2->NumActiveVolumes; v++) {
1991 ioc2rvol = &ioc2->RaidVolume[v]; 1991 ioc2rvol = &ioc2->RaidVolume[v];
1992 address = ioc2rvol->VolumeID | (ioc2rvol->VolumeBus << 8); 1992 address = ioc2rvol->VolumeID | (ioc2rvol->VolumeBus << 8);
1993 1993
1994 rvol0 = mpt_get_cfg_page_raid_vol0(mpt, address); 1994 rvol0 = mpt_get_cfg_page_raid_vol0(mpt, address);
1995 if (rvol0 == NULL) 1995 if (rvol0 == NULL)
1996 continue; 1996 continue;
1997 1997
1998 for (d = 0; d < rvol0->NumPhysDisks; d++) { 1998 for (d = 0; d < rvol0->NumPhysDisks; d++) {
1999 if (rvol0->PhysDisk[d].PhysDiskNum == 1999 if (rvol0->PhysDisk[d].PhysDiskNum ==
2000 ioc3->PhysDisk[bd->bd_diskid].PhysDiskNum) { 2000 ioc3->PhysDisk[bd->bd_diskid].PhysDiskNum) {
2001 bd->bd_disknovol = false; 2001 bd->bd_disknovol = false;
 2002 bd->bd_volid = v;
2002 break; 2003 break;
2003 } 2004 }
2004 } 2005 }
2005 free(rvol0, M_DEVBUF); 2006 free(rvol0, M_DEVBUF);
2006 } 2007 }
2007 2008
2008 free(ioc3, M_DEVBUF); 2009 free(ioc3, M_DEVBUF);
2009 free(ioc2, M_DEVBUF); 2010 free(ioc2, M_DEVBUF);
2010 2011
2011 return 0; 2012 return 0;
2012 2013
2013fail: 2014fail:
2014 if (ioc3) free(ioc3, M_DEVBUF); 2015 if (ioc3) free(ioc3, M_DEVBUF);
2015 if (ioc2) free(ioc2, M_DEVBUF); 2016 if (ioc2) free(ioc2, M_DEVBUF);
2016 return EINVAL; 2017 return EINVAL;
2017} 2018}
2018 2019
2019 2020
2020static int 2021static int
2021mpt_bio_ioctl_disk(mpt_softc_t *mpt, struct bioc_disk *bd) 2022mpt_bio_ioctl_disk(mpt_softc_t *mpt, struct bioc_disk *bd)
2022{ 2023{
2023 fCONFIG_PAGE_IOC_2 *ioc2 = NULL; 2024 fCONFIG_PAGE_IOC_2 *ioc2 = NULL;
2024 fCONFIG_PAGE_RAID_VOL_0 *rvol0 = NULL; 2025 fCONFIG_PAGE_RAID_VOL_0 *rvol0 = NULL;
2025 fCONFIG_PAGE_IOC_2_RAID_VOL *ioc2rvol; 2026 fCONFIG_PAGE_IOC_2_RAID_VOL *ioc2rvol;
2026 int address; 2027 int address;
2027 2028
2028 ioc2 = mpt_get_cfg_page_ioc2(mpt); 2029 ioc2 = mpt_get_cfg_page_ioc2(mpt);
2029 if (ioc2 == NULL) 2030 if (ioc2 == NULL)
2030 return EIO; 2031 return EIO;
2031 2032
2032 if (bd->bd_volid < 0 || bd->bd_volid >= ioc2->NumActiveVolumes) 2033 if (bd->bd_volid < 0 || bd->bd_volid >= ioc2->NumActiveVolumes)
2033 goto fail; 2034 goto fail;
2034 2035
2035 ioc2rvol = &ioc2->RaidVolume[bd->bd_volid]; 2036 ioc2rvol = &ioc2->RaidVolume[bd->bd_volid];
2036 address = ioc2rvol->VolumeID | (ioc2rvol->VolumeBus << 8); 2037 address = ioc2rvol->VolumeID | (ioc2rvol->VolumeBus << 8);
2037 2038
2038 rvol0 = mpt_get_cfg_page_raid_vol0(mpt, address); 2039 rvol0 = mpt_get_cfg_page_raid_vol0(mpt, address);
2039 if (rvol0 == NULL) 2040 if (rvol0 == NULL)
2040 goto fail; 2041 goto fail;
2041 2042
2042 if (bd->bd_diskid < 0 || bd->bd_diskid >= rvol0->NumPhysDisks) 2043 if (bd->bd_diskid < 0 || bd->bd_diskid >= rvol0->NumPhysDisks)
2043 goto fail; 2044 goto fail;
2044 2045
2045 address = rvol0->PhysDisk[bd->bd_diskid].PhysDiskNum; 2046 address = rvol0->PhysDisk[bd->bd_diskid].PhysDiskNum;
2046 2047
2047 mpt_bio_ioctl_disk_common(mpt, bd, address); 2048 mpt_bio_ioctl_disk_common(mpt, bd, address);
2048 2049
2049 free(ioc2, M_DEVBUF); 2050 free(ioc2, M_DEVBUF);
2050 2051
2051 return 0; 2052 return 0;
2052 2053
2053fail: 2054fail:
2054 if (ioc2) free(ioc2, M_DEVBUF); 2055 if (ioc2) free(ioc2, M_DEVBUF);
2055 return EINVAL; 2056 return EINVAL;
2056} 2057}
2057 2058
2058static int 2059static int
2059mpt_bio_ioctl_setstate(mpt_softc_t *mpt, struct bioc_setstate *bs) 2060mpt_bio_ioctl_setstate(mpt_softc_t *mpt, struct bioc_setstate *bs)
2060{ 2061{
2061 return ENOTTY; 2062 return ENOTTY;
2062} 2063}
2063#endif 2064#endif
2064 2065