| @@ -1,239 +1,244 @@ | | | @@ -1,239 +1,244 @@ |
1 | /* $NetBSD: nouveau_nvkm_subdev_pci_base.c,v 1.3 2018/08/27 07:40:40 riastradh Exp $ */ | | 1 | /* $NetBSD: nouveau_nvkm_subdev_pci_base.c,v 1.4 2018/12/19 09:20:56 maya Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright 2015 Red Hat Inc. | | 4 | * Copyright 2015 Red Hat Inc. |
5 | * | | 5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), | | 7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation | | 8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | | 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the | | 10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: | | 11 | * Software is furnished to do so, subject to the following conditions: |
12 | * | | 12 | * |
13 | * The above copyright notice and this permission notice shall be included in | | 13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. | | 14 | * all copies or substantial portions of the Software. |
15 | * | | 15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | | 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | | 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | | 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | | 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | | 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | | 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. | | 22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * | | 23 | * |
24 | * Authors: Ben Skeggs <bskeggs@redhat.com> | | 24 | * Authors: Ben Skeggs <bskeggs@redhat.com> |
25 | */ | | 25 | */ |
26 | #include <sys/cdefs.h> | | 26 | #include <sys/cdefs.h> |
27 | __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_pci_base.c,v 1.3 2018/08/27 07:40:40 riastradh Exp $"); | | 27 | __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_pci_base.c,v 1.4 2018/12/19 09:20:56 maya Exp $"); |
28 | | | 28 | |
29 | #include "priv.h" | | 29 | #include "priv.h" |
30 | #include "agp.h" | | 30 | #include "agp.h" |
31 | | | 31 | |
32 | #include <core/option.h> | | 32 | #include <core/option.h> |
33 | #include <core/pci.h> | | 33 | #include <core/pci.h> |
34 | #include <subdev/mc.h> | | 34 | #include <subdev/mc.h> |
35 | | | 35 | |
36 | u32 | | 36 | u32 |
37 | nvkm_pci_rd32(struct nvkm_pci *pci, u16 addr) | | 37 | nvkm_pci_rd32(struct nvkm_pci *pci, u16 addr) |
38 | { | | 38 | { |
39 | return pci->func->rd32(pci, addr); | | 39 | return pci->func->rd32(pci, addr); |
40 | } | | 40 | } |
41 | | | 41 | |
42 | void | | 42 | void |
43 | nvkm_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data) | | 43 | nvkm_pci_wr08(struct nvkm_pci *pci, u16 addr, u8 data) |
44 | { | | 44 | { |
45 | pci->func->wr08(pci, addr, data); | | 45 | pci->func->wr08(pci, addr, data); |
46 | } | | 46 | } |
47 | | | 47 | |
48 | void | | 48 | void |
49 | nvkm_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data) | | 49 | nvkm_pci_wr32(struct nvkm_pci *pci, u16 addr, u32 data) |
50 | { | | 50 | { |
51 | pci->func->wr32(pci, addr, data); | | 51 | pci->func->wr32(pci, addr, data); |
52 | } | | 52 | } |
53 | | | 53 | |
54 | u32 | | 54 | u32 |
55 | nvkm_pci_mask(struct nvkm_pci *pci, u16 addr, u32 mask, u32 value) | | 55 | nvkm_pci_mask(struct nvkm_pci *pci, u16 addr, u32 mask, u32 value) |
56 | { | | 56 | { |
57 | u32 data = pci->func->rd32(pci, addr); | | 57 | u32 data = pci->func->rd32(pci, addr); |
58 | pci->func->wr32(pci, addr, (data & ~mask) | value); | | 58 | pci->func->wr32(pci, addr, (data & ~mask) | value); |
59 | return data; | | 59 | return data; |
60 | } | | 60 | } |
61 | | | 61 | |
62 | void | | 62 | void |
63 | nvkm_pci_rom_shadow(struct nvkm_pci *pci, bool shadow) | | 63 | nvkm_pci_rom_shadow(struct nvkm_pci *pci, bool shadow) |
64 | { | | 64 | { |
65 | u32 data = nvkm_pci_rd32(pci, 0x0050); | | 65 | u32 data = nvkm_pci_rd32(pci, 0x0050); |
66 | if (shadow) | | 66 | if (shadow) |
67 | data |= 0x00000001; | | 67 | data |= 0x00000001; |
68 | else | | 68 | else |
69 | data &= ~0x00000001; | | 69 | data &= ~0x00000001; |
70 | nvkm_pci_wr32(pci, 0x0050, data); | | 70 | nvkm_pci_wr32(pci, 0x0050, data); |
71 | } | | 71 | } |
72 | | | 72 | |
73 | static irqreturn_t | | 73 | static irqreturn_t |
74 | nvkm_pci_intr(DRM_IRQ_ARGS) | | 74 | nvkm_pci_intr(DRM_IRQ_ARGS) |
75 | { | | 75 | { |
76 | struct nvkm_pci *pci = arg; | | 76 | struct nvkm_pci *pci = arg; |
77 | struct nvkm_mc *mc = pci->subdev.device->mc; | | 77 | struct nvkm_mc *mc = pci->subdev.device->mc; |
78 | bool handled = false; | | 78 | bool handled = false; |
79 | if (likely(mc)) { | | 79 | if (likely(mc)) { |
80 | nvkm_mc_intr_unarm(mc); | | 80 | nvkm_mc_intr_unarm(mc); |
81 | if (pci->msi) | | 81 | if (pci->msi) |
82 | pci->func->msi_rearm(pci); | | 82 | pci->func->msi_rearm(pci); |
83 | nvkm_mc_intr(mc, &handled); | | 83 | nvkm_mc_intr(mc, &handled); |
84 | nvkm_mc_intr_rearm(mc); | | 84 | nvkm_mc_intr_rearm(mc); |
85 | } | | 85 | } |
86 | return handled ? IRQ_HANDLED : IRQ_NONE; | | 86 | return handled ? IRQ_HANDLED : IRQ_NONE; |
87 | } | | 87 | } |
88 | | | 88 | |
89 | static int | | 89 | static int |
90 | nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend) | | 90 | nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend) |
91 | { | | 91 | { |
92 | struct nvkm_pci *pci = nvkm_pci(subdev); | | 92 | struct nvkm_pci *pci = nvkm_pci(subdev); |
93 | | | 93 | |
94 | #ifdef __NetBSD__ | | 94 | #ifdef __NetBSD__ |
95 | const struct pci_attach_args *pa = &pci->pdev->pd_pa; | | 95 | const struct pci_attach_args *pa = &pci->pdev->pd_pa; |
96 | if (pci->pci_intrcookie != NULL) { | | 96 | if (pci->pci_intrcookie != NULL) { |
97 | pci_intr_disestablish(pa->pa_pc, pci->pci_intrcookie); | | 97 | pci_intr_disestablish(pa->pa_pc, pci->pci_intrcookie); |
98 | pci->pci_intrcookie = NULL; | | 98 | pci->pci_intrcookie = NULL; |
99 | } | | 99 | } |
100 | if (pci->pci_ihp != NULL) { | | 100 | if (pci->pci_ihp != NULL) { |
101 | pci_intr_release(pa->pa_pc, pci->pci_ihp, 1); | | 101 | pci_intr_release(pa->pa_pc, pci->pci_ihp, 1); |
102 | pci->pci_ihp = NULL; | | 102 | pci->pci_ihp = NULL; |
103 | } | | 103 | } |
104 | #else | | 104 | #else |
105 | if (pci->irq >= 0) { | | 105 | if (pci->irq >= 0) { |
106 | free_irq(pci->irq, pci); | | 106 | free_irq(pci->irq, pci); |
107 | pci->irq = -1; | | 107 | pci->irq = -1; |
108 | }; | | 108 | }; |
109 | #endif | | 109 | #endif |
110 | | | 110 | |
111 | if (pci->agp.bridge) | | 111 | if (pci->agp.bridge) |
112 | nvkm_agp_fini(pci); | | 112 | nvkm_agp_fini(pci); |
113 | | | 113 | |
114 | return 0; | | 114 | return 0; |
115 | } | | 115 | } |
116 | | | 116 | |
117 | static int | | 117 | static int |
118 | nvkm_pci_preinit(struct nvkm_subdev *subdev) | | 118 | nvkm_pci_preinit(struct nvkm_subdev *subdev) |
119 | { | | 119 | { |
120 | struct nvkm_pci *pci = nvkm_pci(subdev); | | 120 | struct nvkm_pci *pci = nvkm_pci(subdev); |
121 | if (pci->agp.bridge) | | 121 | if (pci->agp.bridge) |
122 | nvkm_agp_preinit(pci); | | 122 | nvkm_agp_preinit(pci); |
123 | return 0; | | 123 | return 0; |
124 | } | | 124 | } |
125 | | | 125 | |
126 | static int | | 126 | static int |
127 | nvkm_pci_init(struct nvkm_subdev *subdev) | | 127 | nvkm_pci_init(struct nvkm_subdev *subdev) |
128 | { | | 128 | { |
129 | struct nvkm_pci *pci = nvkm_pci(subdev); | | 129 | struct nvkm_pci *pci = nvkm_pci(subdev); |
130 | struct pci_dev *pdev = pci->pdev; | | 130 | struct pci_dev *pdev = pci->pdev; |
131 | int ret; | | 131 | int ret; |
132 | | | 132 | |
133 | if (pci->agp.bridge) { | | 133 | if (pci->agp.bridge) { |
134 | ret = nvkm_agp_init(pci); | | 134 | ret = nvkm_agp_init(pci); |
135 | if (ret) | | 135 | if (ret) |
136 | return ret; | | 136 | return ret; |
137 | } | | 137 | } |
138 | | | 138 | |
139 | if (pci->func->init) | | 139 | if (pci->func->init) |
140 | pci->func->init(pci); | | 140 | pci->func->init(pci); |
141 | | | 141 | |
142 | #ifdef __NetBSD__ | | 142 | #ifdef __NetBSD__ |
143 | { | | 143 | { |
144 | const struct pci_attach_args *pa = &pdev->pd_pa; | | 144 | const struct pci_attach_args *pa = &pdev->pd_pa; |
| | | 145 | int counts[PCI_INTR_TYPE_SIZE] = { |
| | | 146 | [PCI_INTR_TYPE_INTX] = 1, |
| | | 147 | [PCI_INTR_TYPE_MSI] = 0, |
| | | 148 | [PCI_INTR_TYPE_MSIX] = 0, |
| | | 149 | }; |
145 | | | 150 | |
146 | /* XXX errno NetBSD->Linux */ | | 151 | /* XXX errno NetBSD->Linux */ |
147 | ret = -pci_intr_alloc(pa, &pci->pci_ihp, NULL, 0); | | 152 | ret = -pci_intr_alloc(pa, &pci->pci_ihp, counts, PCI_INTR_TYPE_INTX); |
148 | if (ret) | | 153 | if (ret) |
149 | return ret; | | 154 | return ret; |
150 | pci->pci_intrcookie = pci_intr_establish_xname(pa->pa_pc, | | 155 | pci->pci_intrcookie = pci_intr_establish_xname(pa->pa_pc, |
151 | pci->pci_ihp[0], IPL_DRM, nvkm_pci_intr, pci, | | 156 | pci->pci_ihp[0], IPL_DRM, nvkm_pci_intr, pci, |
152 | device_xname(pci_dev_dev(pdev))); | | 157 | device_xname(pci_dev_dev(pdev))); |
153 | if (pci->pci_intrcookie == NULL) | | 158 | if (pci->pci_intrcookie == NULL) |
154 | return -EIO; /* XXX er? */ | | 159 | return -EIO; /* XXX er? */ |
155 | } | | 160 | } |
156 | #else | | 161 | #else |
157 | ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci); | | 162 | ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci); |
158 | if (ret) | | 163 | if (ret) |
159 | return ret; | | 164 | return ret; |
160 | | | 165 | |
161 | pci->irq = pdev->irq; | | 166 | pci->irq = pdev->irq; |
162 | #endif | | 167 | #endif |
163 | | | 168 | |
164 | /* Ensure MSI interrupts are armed, for the case where there are | | 169 | /* Ensure MSI interrupts are armed, for the case where there are |
165 | * already interrupts pending (for whatever reason) at load time. | | 170 | * already interrupts pending (for whatever reason) at load time. |
166 | */ | | 171 | */ |
167 | if (pci->msi) | | 172 | if (pci->msi) |
168 | pci->func->msi_rearm(pci); | | 173 | pci->func->msi_rearm(pci); |
169 | | | 174 | |
170 | return ret; | | 175 | return ret; |
171 | } | | 176 | } |
172 | | | 177 | |
173 | static void * | | 178 | static void * |
174 | nvkm_pci_dtor(struct nvkm_subdev *subdev) | | 179 | nvkm_pci_dtor(struct nvkm_subdev *subdev) |
175 | { | | 180 | { |
176 | struct nvkm_pci *pci = nvkm_pci(subdev); | | 181 | struct nvkm_pci *pci = nvkm_pci(subdev); |
177 | nvkm_agp_dtor(pci); | | 182 | nvkm_agp_dtor(pci); |
178 | if (pci->msi) | | 183 | if (pci->msi) |
179 | pci_disable_msi(pci->pdev); | | 184 | pci_disable_msi(pci->pdev); |
180 | return nvkm_pci(subdev); | | 185 | return nvkm_pci(subdev); |
181 | } | | 186 | } |
182 | | | 187 | |
183 | static const struct nvkm_subdev_func | | 188 | static const struct nvkm_subdev_func |
184 | nvkm_pci_func = { | | 189 | nvkm_pci_func = { |
185 | .dtor = nvkm_pci_dtor, | | 190 | .dtor = nvkm_pci_dtor, |
186 | .preinit = nvkm_pci_preinit, | | 191 | .preinit = nvkm_pci_preinit, |
187 | .init = nvkm_pci_init, | | 192 | .init = nvkm_pci_init, |
188 | .fini = nvkm_pci_fini, | | 193 | .fini = nvkm_pci_fini, |
189 | }; | | 194 | }; |
190 | | | 195 | |
191 | int | | 196 | int |
192 | nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device, | | 197 | nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device, |
193 | int index, struct nvkm_pci **ppci) | | 198 | int index, struct nvkm_pci **ppci) |
194 | { | | 199 | { |
195 | struct nvkm_pci *pci; | | 200 | struct nvkm_pci *pci; |
196 | | | 201 | |
197 | if (!(pci = *ppci = kzalloc(sizeof(**ppci), GFP_KERNEL))) | | 202 | if (!(pci = *ppci = kzalloc(sizeof(**ppci), GFP_KERNEL))) |
198 | return -ENOMEM; | | 203 | return -ENOMEM; |
199 | nvkm_subdev_ctor(&nvkm_pci_func, device, index, 0, &pci->subdev); | | 204 | nvkm_subdev_ctor(&nvkm_pci_func, device, index, 0, &pci->subdev); |
200 | pci->func = func; | | 205 | pci->func = func; |
201 | pci->pdev = device->func->pci(device)->pdev; | | 206 | pci->pdev = device->func->pci(device)->pdev; |
202 | #ifndef __NetBSD__ | | 207 | #ifndef __NetBSD__ |
203 | pci->irq = -1; | | 208 | pci->irq = -1; |
204 | #endif | | 209 | #endif |
205 | | | 210 | |
206 | if (device->type == NVKM_DEVICE_AGP) | | 211 | if (device->type == NVKM_DEVICE_AGP) |
207 | nvkm_agp_ctor(pci); | | 212 | nvkm_agp_ctor(pci); |
208 | | | 213 | |
209 | switch (pci->pdev->device & 0x0ff0) { | | 214 | switch (pci->pdev->device & 0x0ff0) { |
210 | case 0x00f0: | | 215 | case 0x00f0: |
211 | case 0x02e0: | | 216 | case 0x02e0: |
212 | /* BR02? NFI how these would be handled yet exactly */ | | 217 | /* BR02? NFI how these would be handled yet exactly */ |
213 | break; | | 218 | break; |
214 | default: | | 219 | default: |
215 | switch (device->chipset) { | | 220 | switch (device->chipset) { |
216 | case 0xaa: | | 221 | case 0xaa: |
217 | /* reported broken, nv also disable it */ | | 222 | /* reported broken, nv also disable it */ |
218 | break; | | 223 | break; |
219 | default: | | 224 | default: |
220 | pci->msi = true; | | 225 | pci->msi = true; |
221 | break; | | 226 | break; |
222 | } | | 227 | } |
223 | } | | 228 | } |
224 | | | 229 | |
225 | #ifdef __BIG_ENDIAN | | 230 | #ifdef __BIG_ENDIAN |
226 | pci->msi = false; | | 231 | pci->msi = false; |
227 | #endif | | 232 | #endif |
228 | | | 233 | |
229 | pci->msi = nvkm_boolopt(device->cfgopt, "NvMSI", pci->msi); | | 234 | pci->msi = nvkm_boolopt(device->cfgopt, "NvMSI", pci->msi); |
230 | if (pci->msi && func->msi_rearm) { | | 235 | if (pci->msi && func->msi_rearm) { |
231 | pci->msi = pci_enable_msi(pci->pdev) == 0; | | 236 | pci->msi = pci_enable_msi(pci->pdev) == 0; |
232 | if (pci->msi) | | 237 | if (pci->msi) |
233 | nvkm_debug(&pci->subdev, "MSI enabled\n"); | | 238 | nvkm_debug(&pci->subdev, "MSI enabled\n"); |
234 | } else { | | 239 | } else { |
235 | pci->msi = false; | | 240 | pci->msi = false; |
236 | } | | 241 | } |
237 | | | 242 | |
238 | return 0; | | 243 | return 0; |
239 | } | | 244 | } |