Sat Sep 17 20:12:53 2016 UTC ()
#ifdef out MSI related code on platforms not supporting MSI, so that it's
possible to load the driver as module on those platforms (the weak symbols
are not found by module load, claiming they don't exist); this makes it possible
to load the driver on e.g. sparc64, which is supposed to be working
under OpenBSD

unfortunately QEMU sparc64 emulator starts causing data access errors
on first device register read in nvme_attach(), so can't confirm the driver
is actually working on sparc64; same happens in QEMU when booting OpenBSD
image, so it seems to be emulator bug


(jdolecek)
diff -r1.10 -r1.11 src/sys/dev/pci/nvme_pci.c

cvs diff -r1.10 -r1.11 src/sys/dev/pci/nvme_pci.c (expand / switch to unified diff)

--- src/sys/dev/pci/nvme_pci.c 2016/09/17 12:58:51 1.10
+++ src/sys/dev/pci/nvme_pci.c 2016/09/17 20:12:53 1.11
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: nvme_pci.c,v 1.10 2016/09/17 12:58:51 jdolecek Exp $ */ 1/* $NetBSD: nvme_pci.c,v 1.11 2016/09/17 20:12:53 jdolecek Exp $ */
2/* $OpenBSD: nvme_pci.c,v 1.3 2016/04/14 11:18:32 dlg Exp $ */ 2/* $OpenBSD: nvme_pci.c,v 1.3 2016/04/14 11:18:32 dlg Exp $ */
3 3
4/* 4/*
5 * Copyright (c) 2014 David Gwynne <dlg@openbsd.org> 5 * Copyright (c) 2014 David Gwynne <dlg@openbsd.org>
6 * 6 *
7 * Permission to use, copy, modify, and distribute this software for any 7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above 8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies. 9 * copyright notice and this permission notice appear in all copies.
10 * 10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
@@ -33,52 +33,59 @@ @@ -33,52 +33,59 @@
33 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 33 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 34 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 35 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 36 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
38 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 38 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 39 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
40 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 40 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 41 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
42 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 42 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 */ 43 */
44 44
45#include <sys/cdefs.h> 45#include <sys/cdefs.h>
46__KERNEL_RCSID(0, "$NetBSD: nvme_pci.c,v 1.10 2016/09/17 12:58:51 jdolecek Exp $"); 46__KERNEL_RCSID(0, "$NetBSD: nvme_pci.c,v 1.11 2016/09/17 20:12:53 jdolecek Exp $");
47 47
48#include <sys/param.h> 48#include <sys/param.h>
49#include <sys/systm.h> 49#include <sys/systm.h>
50#include <sys/kernel.h> 50#include <sys/kernel.h>
51#include <sys/device.h> 51#include <sys/device.h>
52#include <sys/bitops.h> 52#include <sys/bitops.h>
53#include <sys/bus.h> 53#include <sys/bus.h>
54#include <sys/cpu.h> 54#include <sys/cpu.h>
55#include <sys/interrupt.h> 55#include <sys/interrupt.h>
56#include <sys/kmem.h> 56#include <sys/kmem.h>
57#include <sys/pmf.h> 57#include <sys/pmf.h>
58#include <sys/module.h> 58#include <sys/module.h>
59 59
60#include <dev/pci/pcireg.h> 60#include <dev/pci/pcireg.h>
61#include <dev/pci/pcivar.h> 61#include <dev/pci/pcivar.h>
62 62
63#include <dev/ic/nvmereg.h> 63#include <dev/ic/nvmereg.h>
64#include <dev/ic/nvmevar.h> 64#include <dev/ic/nvmevar.h>
65 65
66int nvme_pci_force_intx = 0; 66int nvme_pci_force_intx = 0;
67int nvme_pci_mpsafe = 0; 67int nvme_pci_mpsafe = 0;
68int nvme_pci_mq = 1; /* INTx: ioq=1, MSI/MSI-X: ioq=ncpu */ 68int nvme_pci_mq = 1; /* INTx: ioq=1, MSI/MSI-X: ioq=ncpu */
69 69
70#define NVME_PCI_BAR 0x10 70#define NVME_PCI_BAR 0x10
71 71
 72#ifndef __HAVE_PCI_MSI_MSIX
 73#define pci_intr_release(pc, intrs, nintrs) \
 74 kmem_free(intrs, sizeof(*intrs) * nintrs)
 75#define pci_intr_establish_xname(pc, ih, level, intrhand, intrarg, xname) \
 76 pci_intr_establish(pc, ih, level, intrhand, intrarg)
 77#endif
 78
72struct nvme_pci_softc { 79struct nvme_pci_softc {
73 struct nvme_softc psc_nvme; 80 struct nvme_softc psc_nvme;
74 81
75 pci_chipset_tag_t psc_pc; 82 pci_chipset_tag_t psc_pc;
76 pci_intr_handle_t *psc_intrs; 83 pci_intr_handle_t *psc_intrs;
77 int psc_nintrs; 84 int psc_nintrs;
78}; 85};
79 86
80static int nvme_pci_match(device_t, cfdata_t, void *); 87static int nvme_pci_match(device_t, cfdata_t, void *);
81static void nvme_pci_attach(device_t, device_t, void *); 88static void nvme_pci_attach(device_t, device_t, void *);
82static int nvme_pci_detach(device_t, int); 89static int nvme_pci_detach(device_t, int);
83 90
84CFATTACH_DECL3_NEW(nvme_pci, sizeof(struct nvme_pci_softc), 91CFATTACH_DECL3_NEW(nvme_pci, sizeof(struct nvme_pci_softc),
@@ -102,28 +109,30 @@ nvme_pci_match(device_t parent, cfdata_t @@ -102,28 +109,30 @@ nvme_pci_match(device_t parent, cfdata_t
102 return 1; 109 return 1;
103 110
104 return 0; 111 return 0;
105} 112}
106 113
107static void 114static void
108nvme_pci_attach(device_t parent, device_t self, void *aux) 115nvme_pci_attach(device_t parent, device_t self, void *aux)
109{ 116{
110 struct nvme_pci_softc *psc = device_private(self); 117 struct nvme_pci_softc *psc = device_private(self);
111 struct nvme_softc *sc = &psc->psc_nvme; 118 struct nvme_softc *sc = &psc->psc_nvme;
112 struct pci_attach_args *pa = aux; 119 struct pci_attach_args *pa = aux;
113 pcireg_t memtype, reg; 120 pcireg_t memtype, reg;
114 bus_addr_t memaddr; 121 bus_addr_t memaddr;
115 int flags, msixoff; 122 int flags, error;
116 int error; 123#ifdef __HAVE_PCI_MSI_MSIX
 124 int msixoff;
 125#endif
117 126
118 sc->sc_dev = self; 127 sc->sc_dev = self;
119 psc->psc_pc = pa->pa_pc; 128 psc->psc_pc = pa->pa_pc;
120 if (pci_dma64_available(pa)) 129 if (pci_dma64_available(pa))
121 sc->sc_dmat = pa->pa_dmat64; 130 sc->sc_dmat = pa->pa_dmat64;
122 else 131 else
123 sc->sc_dmat = pa->pa_dmat; 132 sc->sc_dmat = pa->pa_dmat;
124 133
125 pci_aprint_devinfo(pa, NULL); 134 pci_aprint_devinfo(pa, NULL);
126 135
127 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 136 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
128 if ((reg & PCI_COMMAND_MASTER_ENABLE) == 0) { 137 if ((reg & PCI_COMMAND_MASTER_ENABLE) == 0) {
129 reg |= PCI_COMMAND_MASTER_ENABLE; 138 reg |= PCI_COMMAND_MASTER_ENABLE;
@@ -133,73 +142,86 @@ nvme_pci_attach(device_t parent, device_ @@ -133,73 +142,86 @@ nvme_pci_attach(device_t parent, device_
133 /* Map registers */ 142 /* Map registers */
134 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NVME_PCI_BAR); 143 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NVME_PCI_BAR);
135 if (PCI_MAPREG_TYPE(memtype) != PCI_MAPREG_TYPE_MEM) { 144 if (PCI_MAPREG_TYPE(memtype) != PCI_MAPREG_TYPE_MEM) {
136 aprint_error_dev(self, "invalid type (type=0x%x)\n", memtype); 145 aprint_error_dev(self, "invalid type (type=0x%x)\n", memtype);
137 return; 146 return;
138 } 147 }
139 sc->sc_iot = pa->pa_memt; 148 sc->sc_iot = pa->pa_memt;
140 error = pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START, 149 error = pci_mapreg_info(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START,
141 memtype, &memaddr, &sc->sc_ios, &flags); 150 memtype, &memaddr, &sc->sc_ios, &flags);
142 if (error) { 151 if (error) {
143 aprint_error_dev(self, "can't get map info\n"); 152 aprint_error_dev(self, "can't get map info\n");
144 return; 153 return;
145 } 154 }
 155
 156#ifdef __HAVE_PCI_MSI_MSIX
146 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, &msixoff, 157 if (pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, &msixoff,
147 NULL)) { 158 NULL)) {
148 pcireg_t msixtbl; 159 pcireg_t msixtbl;
149 uint32_t table_offset; 160 uint32_t table_offset;
150 int bir; 161 int bir;
151 162
152 msixtbl = pci_conf_read(pa->pa_pc, pa->pa_tag, 163 msixtbl = pci_conf_read(pa->pa_pc, pa->pa_tag,
153 msixoff + PCI_MSIX_TBLOFFSET); 164 msixoff + PCI_MSIX_TBLOFFSET);
154 table_offset = msixtbl & PCI_MSIX_TBLOFFSET_MASK; 165 table_offset = msixtbl & PCI_MSIX_TBLOFFSET_MASK;
155 bir = msixtbl & PCI_MSIX_PBABIR_MASK; 166 bir = msixtbl & PCI_MSIX_PBABIR_MASK;
156 if (bir == 0) { 167 if (bir == 0) {
157 sc->sc_ios = table_offset; 168 sc->sc_ios = table_offset;
158 } 169 }
159 } 170 }
 171#endif /* __HAVE_PCI_MSI_MSIX */
 172
160 error = bus_space_map(sc->sc_iot, memaddr, sc->sc_ios, flags, 173 error = bus_space_map(sc->sc_iot, memaddr, sc->sc_ios, flags,
161 &sc->sc_ioh); 174 &sc->sc_ioh);
162 if (error != 0) { 175 if (error != 0) {
163 aprint_error_dev(self, "can't map mem space (error=%d)\n", 176 aprint_error_dev(self, "can't map mem space (error=%d)\n",
164 error); 177 error);
165 return; 178 return;
166 } 179 }
167 180
168 /* Establish interrupts */ 181 /* Establish interrupts */
169 if (nvme_pci_setup_intr(pa, psc) != 0) { 182 if (nvme_pci_setup_intr(pa, psc) != 0) {
170 aprint_error_dev(self, "unable to allocate interrupt\n"); 183 aprint_error_dev(self, "unable to allocate interrupt\n");
171 goto unmap; 184 goto unmap;
172 } 185 }
173 sc->sc_intr_establish = nvme_pci_intr_establish; 186 sc->sc_intr_establish = nvme_pci_intr_establish;
174 sc->sc_intr_disestablish = nvme_pci_intr_disestablish; 187 sc->sc_intr_disestablish = nvme_pci_intr_disestablish;
175 188
176 sc->sc_ih = kmem_zalloc(sizeof(*sc->sc_ih) * psc->psc_nintrs, KM_SLEEP); 189 sc->sc_ih = kmem_zalloc(sizeof(*sc->sc_ih) * psc->psc_nintrs, KM_SLEEP);
177 if (sc->sc_ih == NULL) { 190 if (sc->sc_ih == NULL) {
178 aprint_error_dev(self, "unable to allocate ih memory\n"); 191 aprint_error_dev(self, "unable to allocate ih memory\n");
179 goto intr_release; 192 goto intr_release;
180 } 193 }
181 194
 195 sc->sc_softih = kmem_zalloc(sizeof(*sc->sc_softih) * psc->psc_nintrs,
 196 KM_SLEEP);
 197 if (sc->sc_softih == NULL) {
 198 aprint_error_dev(self, "unable to allocate softih memory\n");
 199 goto intr_free;
 200 }
 201
182 if (nvme_attach(sc) != 0) { 202 if (nvme_attach(sc) != 0) {
183 /* error printed by nvme_attach() */ 203 /* error printed by nvme_attach() */
184 goto intr_free; 204 goto softintr_free;
185 } 205 }
186 206
187 if (!pmf_device_register(self, NULL, NULL)) 207 if (!pmf_device_register(self, NULL, NULL))
188 aprint_error_dev(self, "couldn't establish power handler\n"); 208 aprint_error_dev(self, "couldn't establish power handler\n");
189 209
190 SET(sc->sc_flags, NVME_F_ATTACHED); 210 SET(sc->sc_flags, NVME_F_ATTACHED);
191 return; 211 return;
192 212
 213softintr_free:
 214 kmem_free(sc->sc_softih, sizeof(*sc->sc_softih) * psc->psc_nintrs);
193intr_free: 215intr_free:
194 kmem_free(sc->sc_ih, sizeof(*sc->sc_ih) * psc->psc_nintrs); 216 kmem_free(sc->sc_ih, sizeof(*sc->sc_ih) * psc->psc_nintrs);
195 sc->sc_nq = 0; 217 sc->sc_nq = 0;
196intr_release: 218intr_release:
197 pci_intr_release(pa->pa_pc, psc->psc_intrs, psc->psc_nintrs); 219 pci_intr_release(pa->pa_pc, psc->psc_intrs, psc->psc_nintrs);
198 psc->psc_nintrs = 0; 220 psc->psc_nintrs = 0;
199unmap: 221unmap:
200 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios); 222 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
201 sc->sc_ios = 0; 223 sc->sc_ios = 0;
202} 224}
203 225
204static int 226static int
205nvme_pci_detach(device_t self, int flags) 227nvme_pci_detach(device_t self, int flags)
@@ -221,116 +243,131 @@ nvme_pci_detach(device_t self, int flags @@ -221,116 +243,131 @@ nvme_pci_detach(device_t self, int flags
221 return 0; 243 return 0;
222} 244}
223 245
224static int 246static int
225nvme_pci_intr_establish(struct nvme_softc *sc, uint16_t qid, 247nvme_pci_intr_establish(struct nvme_softc *sc, uint16_t qid,
226 struct nvme_queue *q) 248 struct nvme_queue *q)
227{ 249{
228 struct nvme_pci_softc *psc = (struct nvme_pci_softc *)sc; 250 struct nvme_pci_softc *psc = (struct nvme_pci_softc *)sc;
229 char intr_xname[INTRDEVNAMEBUF]; 251 char intr_xname[INTRDEVNAMEBUF];
230 char intrbuf[PCI_INTRSTR_LEN]; 252 char intrbuf[PCI_INTRSTR_LEN];
231 const char *intrstr = NULL; 253 const char *intrstr = NULL;
232 int (*ih_func)(void *); 254 int (*ih_func)(void *);
233 void *ih_arg; 255 void *ih_arg;
234 kcpuset_t *affinity; 256#ifdef __HAVE_PCI_MSI_MSIX
235 cpuid_t affinity_to; 
236 int error; 257 int error;
 258#endif
237 259
238 if (!sc->sc_use_mq && qid > 0) 260 KASSERT(sc->sc_use_mq || qid == NVME_ADMIN_Q);
239 return 0; 
240 
241 KASSERT(sc->sc_ih[qid] == NULL); 261 KASSERT(sc->sc_ih[qid] == NULL);
242 262
243 if (nvme_pci_mpsafe) { 263 if (nvme_pci_mpsafe) {
244 pci_intr_setattr(psc->psc_pc, &psc->psc_intrs[qid], 264 pci_intr_setattr(psc->psc_pc, &psc->psc_intrs[qid],
245 PCI_INTR_MPSAFE, true); 265 PCI_INTR_MPSAFE, true);
246 } 266 }
 267
 268#ifdef __HAVE_PCI_MSI_MSIX
247 if (!sc->sc_use_mq) { 269 if (!sc->sc_use_mq) {
 270#endif
248 snprintf(intr_xname, sizeof(intr_xname), "%s", 271 snprintf(intr_xname, sizeof(intr_xname), "%s",
249 device_xname(sc->sc_dev)); 272 device_xname(sc->sc_dev));
250 ih_arg = sc; 273 ih_arg = sc;
251 ih_func = nvme_intr; 274 ih_func = nvme_intr;
252 } else { 275#ifdef __HAVE_PCI_MSI_MSIX
253 if (qid == 0) { 276 }
 277 else {
 278 if (qid == NVME_ADMIN_Q) {
254 snprintf(intr_xname, sizeof(intr_xname), "%s adminq", 279 snprintf(intr_xname, sizeof(intr_xname), "%s adminq",
255 device_xname(sc->sc_dev)); 280 device_xname(sc->sc_dev));
256 } else { 281 } else {
257 snprintf(intr_xname, sizeof(intr_xname), "%s ioq%d", 282 snprintf(intr_xname, sizeof(intr_xname), "%s ioq%d",
258 device_xname(sc->sc_dev), qid); 283 device_xname(sc->sc_dev), qid);
259 } 284 }
260 ih_arg = q; 285 ih_arg = q;
261 if (pci_intr_type(psc->psc_pc, psc->psc_intrs[qid]) 286 if (pci_intr_type(psc->psc_pc, psc->psc_intrs[qid])
262 == PCI_INTR_TYPE_MSIX) 287 == PCI_INTR_TYPE_MSIX)
263 ih_func = nvme_mq_msix_intr; 288 ih_func = nvme_mq_msix_intr;
264 else 289 else
265 ih_func = nvme_mq_msi_intr; 290 ih_func = nvme_mq_msi_intr;
266 } 291 }
 292#endif /* __HAVE_PCI_MSI_MSIX */
267 sc->sc_ih[qid] = pci_intr_establish_xname(psc->psc_pc, 293 sc->sc_ih[qid] = pci_intr_establish_xname(psc->psc_pc,
268 psc->psc_intrs[qid], IPL_BIO, ih_func, ih_arg, intr_xname); 294 psc->psc_intrs[qid], IPL_BIO, ih_func, ih_arg, intr_xname);
269 if (sc->sc_ih[qid] == NULL) { 295 if (sc->sc_ih[qid] == NULL) {
270 aprint_error_dev(sc->sc_dev, 296 aprint_error_dev(sc->sc_dev,
271 "unable to establish %s interrupt\n", intr_xname); 297 "unable to establish %s interrupt\n", intr_xname);
272 return 1; 298 return 1;
273 } 299 }
274 intrstr = pci_intr_string(psc->psc_pc, psc->psc_intrs[qid], intrbuf, 300 intrstr = pci_intr_string(psc->psc_pc, psc->psc_intrs[qid], intrbuf,
275 sizeof(intrbuf)); 301 sizeof(intrbuf));
276 if (!sc->sc_use_mq) { 302 if (!sc->sc_use_mq) {
277 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); 303 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
278 } else if (qid == NVME_ADMIN_Q) { 304 }
 305#ifdef __HAVE_PCI_MSI_MSIX
 306 else if (qid == NVME_ADMIN_Q) {
279 aprint_normal_dev(sc->sc_dev, 307 aprint_normal_dev(sc->sc_dev,
280 "for admin queue interrupting at %s\n", intrstr); 308 "for admin queue interrupting at %s\n", intrstr);
281 } else if (!nvme_pci_mpsafe) { 309 } else if (!nvme_pci_mpsafe) {
282 aprint_normal_dev(sc->sc_dev, 310 aprint_normal_dev(sc->sc_dev,
283 "for io queue %d interrupting at %s\n", qid, intrstr); 311 "for io queue %d interrupting at %s\n", qid, intrstr);
284 } else { 312 } else {
 313 kcpuset_t *affinity;
 314 cpuid_t affinity_to;
 315
285 kcpuset_create(&affinity, true); 316 kcpuset_create(&affinity, true);
286 affinity_to = (qid - 1) % ncpu; 317 affinity_to = (qid - 1) % ncpu;
287 kcpuset_set(affinity, affinity_to); 318 kcpuset_set(affinity, affinity_to);
288 error = interrupt_distribute(sc->sc_ih[qid], affinity, NULL); 319 error = interrupt_distribute(sc->sc_ih[qid], affinity, NULL);
289 kcpuset_destroy(affinity); 320 kcpuset_destroy(affinity);
290 aprint_normal_dev(sc->sc_dev, 321 aprint_normal_dev(sc->sc_dev,
291 "for io queue %d interrupting at %s", qid, intrstr); 322 "for io queue %d interrupting at %s", qid, intrstr);
292 if (error == 0) 323 if (error == 0)
293 aprint_normal(" affinity to cpu%lu", affinity_to); 324 aprint_normal(" affinity to cpu%lu", affinity_to);
294 aprint_normal("\n"); 325 aprint_normal("\n");
295 } 326 }
 327#endif
296 return 0; 328 return 0;
297} 329}
298 330
299static int 331static int
300nvme_pci_intr_disestablish(struct nvme_softc *sc, uint16_t qid) 332nvme_pci_intr_disestablish(struct nvme_softc *sc, uint16_t qid)
301{ 333{
302 struct nvme_pci_softc *psc = (struct nvme_pci_softc *)sc; 334 struct nvme_pci_softc *psc = (struct nvme_pci_softc *)sc;
303 335
304 if (!sc->sc_use_mq && qid > 0) 336 if (!sc->sc_use_mq && qid > 0)
305 return 0; 337 return 0;
306 338
307 KASSERT(sc->sc_ih[qid] != NULL); 339 KASSERT(sc->sc_ih[qid] != NULL);
308 340
309 pci_intr_disestablish(psc->psc_pc, sc->sc_ih[qid]); 341 pci_intr_disestablish(psc->psc_pc, sc->sc_ih[qid]);
310 sc->sc_ih[qid] = NULL; 342 sc->sc_ih[qid] = NULL;
311 343
312 return 0; 344 return 0;
313} 345}
314 346
315static int 347static int
316nvme_pci_setup_intr(struct pci_attach_args *pa, struct nvme_pci_softc *psc) 348nvme_pci_setup_intr(struct pci_attach_args *pa, struct nvme_pci_softc *psc)
317{ 349{
318 struct nvme_softc *sc = &psc->psc_nvme; 350 struct nvme_softc *sc = &psc->psc_nvme;
319 pci_intr_handle_t *ihps; 351#ifdef __HAVE_PCI_MSI_MSIX
 352 int error;
320 int counts[PCI_INTR_TYPE_SIZE], alloced_counts[PCI_INTR_TYPE_SIZE]; 353 int counts[PCI_INTR_TYPE_SIZE], alloced_counts[PCI_INTR_TYPE_SIZE];
 354 pci_intr_handle_t *ihps;
321 int max_type, intr_type; 355 int max_type, intr_type;
322 int error; 356#else
 357 pci_intr_handle_t ih;
 358#endif /* __HAVE_PCI_MSI_MSIX */
323 359
 360#ifdef __HAVE_PCI_MSI_MSIX
324 if (nvme_pci_force_intx) { 361 if (nvme_pci_force_intx) {
325 max_type = PCI_INTR_TYPE_INTX; 362 max_type = PCI_INTR_TYPE_INTX;
326 goto force_intx; 363 goto force_intx;
327 } 364 }
328 365
329 /* MSI-X */ 366 /* MSI-X */
330 max_type = PCI_INTR_TYPE_MSIX; 367 max_type = PCI_INTR_TYPE_MSIX;
331 counts[PCI_INTR_TYPE_MSIX] = min(pci_msix_count(pa->pa_pc, pa->pa_tag), 368 counts[PCI_INTR_TYPE_MSIX] = min(pci_msix_count(pa->pa_pc, pa->pa_tag),
332 ncpu + 1); 369 ncpu + 1);
333 if (counts[PCI_INTR_TYPE_MSIX] > 0) { 370 if (counts[PCI_INTR_TYPE_MSIX] > 0) {
334 memset(alloced_counts, 0, sizeof(alloced_counts)); 371 memset(alloced_counts, 0, sizeof(alloced_counts));
335 alloced_counts[PCI_INTR_TYPE_MSIX] = counts[PCI_INTR_TYPE_MSIX]; 372 alloced_counts[PCI_INTR_TYPE_MSIX] = counts[PCI_INTR_TYPE_MSIX];
336 if (pci_intr_alloc(pa, &ihps, alloced_counts, 373 if (pci_intr_alloc(pa, &ihps, alloced_counts,
@@ -410,26 +447,40 @@ retry: @@ -410,26 +447,40 @@ retry:
410 goto retry; 447 goto retry;
411 } 448 }
412 return EBUSY; 449 return EBUSY;
413 } 450 }
414 451
415 psc->psc_intrs = ihps; 452 psc->psc_intrs = ihps;
416 psc->psc_nintrs = alloced_counts[intr_type]; 453 psc->psc_nintrs = alloced_counts[intr_type];
417 if (intr_type == PCI_INTR_TYPE_MSI) { 454 if (intr_type == PCI_INTR_TYPE_MSI) {
418 if (alloced_counts[intr_type] > ncpu + 1) 455 if (alloced_counts[intr_type] > ncpu + 1)
419 alloced_counts[intr_type] = ncpu + 1; 456 alloced_counts[intr_type] = ncpu + 1;
420 } 457 }
421 sc->sc_use_mq = alloced_counts[intr_type] > 1; 458 sc->sc_use_mq = alloced_counts[intr_type] > 1;
422 sc->sc_nq = sc->sc_use_mq ? alloced_counts[intr_type] - 1 : 1; 459 sc->sc_nq = sc->sc_use_mq ? alloced_counts[intr_type] - 1 : 1;
 460
 461#else /* !__HAVE_PCI_MSI_MSIX */
 462 if (pci_intr_map(pa, &ih)) {
 463 aprint_error_dev(sc->sc_dev, "couldn't map interrupt\n");
 464 return EBUSY;
 465 }
 466
 467 psc->psc_intrs = kmem_zalloc(sizeof(ih), KM_SLEEP);
 468 psc->psc_intrs[0] = ih;
 469 psc->psc_nintrs = 1;
 470 sc->sc_use_mq = 0;
 471 sc->sc_nq = 1;
 472#endif /* __HAVE_PCI_MSI_MSIX */
 473
423 return 0; 474 return 0;
424} 475}
425 476
426MODULE(MODULE_CLASS_DRIVER, nvme, "pci,dk_subr"); 477MODULE(MODULE_CLASS_DRIVER, nvme, "pci,dk_subr");
427 478
428#ifdef _MODULE 479#ifdef _MODULE
429#include "ioconf.c" 480#include "ioconf.c"
430 481
431extern const struct bdevsw ld_bdevsw; 482extern const struct bdevsw ld_bdevsw;
432extern const struct cdevsw ld_cdevsw; 483extern const struct cdevsw ld_cdevsw;
433#endif 484#endif
434 485
435static int 486static int