Mon Jul 20 13:30:41 2020 UTC ()
Fix confusion between ipi bitmask and mbox register bit assignments.


(jmcneill)
diff -r1.18 -r1.19 src/sys/arch/mips/cavium/octeon_intr.c

cvs diff -r1.18 -r1.19 src/sys/arch/mips/cavium/octeon_intr.c (switch to unified diff)

--- src/sys/arch/mips/cavium/octeon_intr.c 2020/07/17 21:59:30 1.18
+++ src/sys/arch/mips/cavium/octeon_intr.c 2020/07/20 13:30:41 1.19
@@ -1,584 +1,590 @@ @@ -1,584 +1,590 @@
1/* $NetBSD: octeon_intr.c,v 1.18 2020/07/17 21:59:30 jmcneill Exp $ */ 1/* $NetBSD: octeon_intr.c,v 1.19 2020/07/20 13:30:41 jmcneill Exp $ */
2/* 2/*
3 * Copyright 2001, 2002 Wasabi Systems, Inc. 3 * Copyright 2001, 2002 Wasabi Systems, Inc.
4 * All rights reserved. 4 * All rights reserved.
5 * 5 *
6 * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc. 6 * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution. 15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software 16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement: 17 * must display the following acknowledgement:
18 * This product includes software developed for the NetBSD Project by 18 * This product includes software developed for the NetBSD Project by
19 * Wasabi Systems, Inc. 19 * Wasabi Systems, Inc.
20 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 20 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
21 * or promote products derived from this software without specific prior 21 * or promote products derived from this software without specific prior
22 * written permission. 22 * written permission.
23 * 23 *
24 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 24 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE. 34 * POSSIBILITY OF SUCH DAMAGE.
35 */ 35 */
36 36
37/* 37/*
38 * Platform-specific interrupt support for the MIPS Malta. 38 * Platform-specific interrupt support for the MIPS Malta.
39 */ 39 */
40 40
41#include "opt_multiprocessor.h" 41#include "opt_multiprocessor.h"
42 42
43#include "cpunode.h" 43#include "cpunode.h"
44#define __INTR_PRIVATE 44#define __INTR_PRIVATE
45 45
46#include <sys/cdefs.h> 46#include <sys/cdefs.h>
47__KERNEL_RCSID(0, "$NetBSD: octeon_intr.c,v 1.18 2020/07/17 21:59:30 jmcneill Exp $"); 47__KERNEL_RCSID(0, "$NetBSD: octeon_intr.c,v 1.19 2020/07/20 13:30:41 jmcneill Exp $");
48 48
49#include <sys/param.h> 49#include <sys/param.h>
50#include <sys/cpu.h> 50#include <sys/cpu.h>
51#include <sys/systm.h> 51#include <sys/systm.h>
52#include <sys/device.h> 52#include <sys/device.h>
53#include <sys/intr.h> 53#include <sys/intr.h>
54#include <sys/kernel.h> 54#include <sys/kernel.h>
55#include <sys/kmem.h> 55#include <sys/kmem.h>
56#include <sys/atomic.h> 56#include <sys/atomic.h>
57 57
58#include <lib/libkern/libkern.h> 58#include <lib/libkern/libkern.h>
59 59
60#include <mips/locore.h> 60#include <mips/locore.h>
61 61
62#include <mips/cavium/dev/octeon_ciureg.h> 62#include <mips/cavium/dev/octeon_ciureg.h>
63#include <mips/cavium/octeonvar.h> 63#include <mips/cavium/octeonvar.h>
64 64
65/* 65/*
66 * This is a mask of bits to clear in the SR when we go to a 66 * This is a mask of bits to clear in the SR when we go to a
67 * given hardware interrupt priority level. 67 * given hardware interrupt priority level.
68 */ 68 */
69static const struct ipl_sr_map octeon_ipl_sr_map = { 69static const struct ipl_sr_map octeon_ipl_sr_map = {
70 .sr_bits = { 70 .sr_bits = {
71 [IPL_NONE] = 0, 71 [IPL_NONE] = 0,
72 [IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_0, 72 [IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_0,
73 [IPL_SOFTNET] = MIPS_SOFT_INT_MASK, 73 [IPL_SOFTNET] = MIPS_SOFT_INT_MASK,
74 [IPL_VM] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0, 74 [IPL_VM] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0,
75 [IPL_SCHED] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0 75 [IPL_SCHED] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
76 | MIPS_INT_MASK_1 | MIPS_INT_MASK_5, 76 | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
77 [IPL_DDB] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0 77 [IPL_DDB] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
78 | MIPS_INT_MASK_1 | MIPS_INT_MASK_5, 78 | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
79 [IPL_HIGH] = MIPS_INT_MASK, 79 [IPL_HIGH] = MIPS_INT_MASK,
80 }, 80 },
81}; 81};
82 82
83const char * octeon_intrnames[NIRQS] = { 83const char * octeon_intrnames[NIRQS] = {
84 "workq 0", 84 "workq 0",
85 "workq 1", 85 "workq 1",
86 "workq 2", 86 "workq 2",
87 "workq 3", 87 "workq 3",
88 "workq 4", 88 "workq 4",
89 "workq 5", 89 "workq 5",
90 "workq 6", 90 "workq 6",
91 "workq 7", 91 "workq 7",
92 "workq 8", 92 "workq 8",
93 "workq 9", 93 "workq 9",
94 "workq 10", 94 "workq 10",
95 "workq 11", 95 "workq 11",
96 "workq 12", 96 "workq 12",
97 "workq 13", 97 "workq 13",
98 "workq 14", 98 "workq 14",
99 "workq 15", 99 "workq 15",
100 "gpio 0", 100 "gpio 0",
101 "gpio 1", 101 "gpio 1",
102 "gpio 2", 102 "gpio 2",
103 "gpio 3", 103 "gpio 3",
104 "gpio 4", 104 "gpio 4",
105 "gpio 5", 105 "gpio 5",
106 "gpio 6", 106 "gpio 6",
107 "gpio 7", 107 "gpio 7",
108 "gpio 8", 108 "gpio 8",
109 "gpio 9", 109 "gpio 9",
110 "gpio 10", 110 "gpio 10",
111 "gpio 11", 111 "gpio 11",
112 "gpio 12", 112 "gpio 12",
113 "gpio 13", 113 "gpio 13",
114 "gpio 14", 114 "gpio 14",
115 "gpio 15", 115 "gpio 15",
116 "mbox 0-15", 116 "mbox 0-15",
117 "mbox 16-31", 117 "mbox 16-31",
118 "uart 0", 118 "uart 0",
119 "uart 1", 119 "uart 1",
120 "pci inta", 120 "pci inta",
121 "pci intb", 121 "pci intb",
122 "pci intc", 122 "pci intc",
123 "pci intd", 123 "pci intd",
124 "pci msi 0-15", 124 "pci msi 0-15",
125 "pci msi 16-31", 125 "pci msi 16-31",
126 "pci msi 32-47", 126 "pci msi 32-47",
127 "pci msi 48-63", 127 "pci msi 48-63",
128 "wdog summary", 128 "wdog summary",
129 "twsi", 129 "twsi",
130 "rml", 130 "rml",
131 "trace", 131 "trace",
132 "gmx drop", 132 "gmx drop",
133 "reserved", 133 "reserved",
134 "ipd drop", 134 "ipd drop",
135 "reserved", 135 "reserved",
136 "timer 0", 136 "timer 0",
137 "timer 1", 137 "timer 1",
138 "timer 2", 138 "timer 2",
139 "timer 3", 139 "timer 3",
140 "usb", 140 "usb",
141 "pcm/tdm", 141 "pcm/tdm",
142 "mpi/spi", 142 "mpi/spi",
143 "reserved", 143 "reserved",
144 "reserved", 144 "reserved",
145 "reserved", 145 "reserved",
146 "reserved", 146 "reserved",
147 "reserved", 147 "reserved",
148}; 148};
149 149
150struct octeon_intrhand { 150struct octeon_intrhand {
151 int (*ih_func)(void *); 151 int (*ih_func)(void *);
152 void *ih_arg; 152 void *ih_arg;
153 int ih_irq; 153 int ih_irq;
154 int ih_ipl; 154 int ih_ipl;
155}; 155};
156 156
157#ifdef MULTIPROCESSOR 157#ifdef MULTIPROCESSOR
158static int octeon_send_ipi(struct cpu_info *, int); 158static int octeon_send_ipi(struct cpu_info *, int);
159static int octeon_ipi_intr(void *); 159static int octeon_ipi_intr(void *);
160 160
161struct octeon_intrhand ipi_intrhands[2] = { 161struct octeon_intrhand ipi_intrhands[2] = {
162 [0] = { 162 [0] = {
163 .ih_func = octeon_ipi_intr, 163 .ih_func = octeon_ipi_intr,
164 .ih_arg = (void *)(uintptr_t)__BITS(15,0), 164 .ih_arg = (void *)(uintptr_t)__BITS(15,0),
165 .ih_irq = CIU_INT_MBOX_15_0, 165 .ih_irq = CIU_INT_MBOX_15_0,
166 .ih_ipl = IPL_SCHED, 166 .ih_ipl = IPL_SCHED,
167 }, 167 },
168 [1] = { 168 [1] = {
169 .ih_func = octeon_ipi_intr, 169 .ih_func = octeon_ipi_intr,
170 .ih_arg = (void *)(uintptr_t)__BITS(31,16), 170 .ih_arg = (void *)(uintptr_t)__BITS(31,16),
171 .ih_irq = CIU_INT_MBOX_31_16, 171 .ih_irq = CIU_INT_MBOX_31_16,
172 .ih_ipl = IPL_HIGH, 172 .ih_ipl = IPL_HIGH,
173 }, 173 },
174}; 174};
175 175
176#define OCTEON_IPI_SCHED(n) __BIT((n) + 0) 176#define OCTEON_IPI_SCHED(n) __BIT((n) + 0)
177#define OCTEON_IPI_HIGH(n) __BIT((n) + 16) 177#define OCTEON_IPI_HIGH(n) __BIT((n) + 16)
178 178
179static uint64_t octeon_ipi_mask[NIPIS] = { 179static uint32_t octeon_ipi_mbox_mask[NIPIS] = {
180 [IPI_NOP] = OCTEON_IPI_SCHED(IPI_NOP), 180 [IPI_NOP] = OCTEON_IPI_SCHED(IPI_NOP),
181 [IPI_AST] = OCTEON_IPI_SCHED(IPI_AST), 181 [IPI_AST] = OCTEON_IPI_SCHED(IPI_AST),
182 [IPI_SHOOTDOWN] = OCTEON_IPI_SCHED(IPI_SHOOTDOWN), 182 [IPI_SHOOTDOWN] = OCTEON_IPI_SCHED(IPI_SHOOTDOWN),
183 [IPI_SYNCICACHE] = OCTEON_IPI_SCHED(IPI_SYNCICACHE), 183 [IPI_SYNCICACHE] = OCTEON_IPI_SCHED(IPI_SYNCICACHE),
184 [IPI_KPREEMPT] = OCTEON_IPI_SCHED(IPI_KPREEMPT), 184 [IPI_KPREEMPT] = OCTEON_IPI_SCHED(IPI_KPREEMPT),
185 [IPI_SUSPEND] = OCTEON_IPI_HIGH(IPI_SUSPEND), 185 [IPI_SUSPEND] = OCTEON_IPI_HIGH(IPI_SUSPEND),
186 [IPI_HALT] = OCTEON_IPI_HIGH(IPI_HALT), 186 [IPI_HALT] = OCTEON_IPI_HIGH(IPI_HALT),
187 [IPI_XCALL] = OCTEON_IPI_HIGH(IPI_XCALL), 187 [IPI_XCALL] = OCTEON_IPI_HIGH(IPI_XCALL),
188 [IPI_GENERIC] = OCTEON_IPI_HIGH(IPI_GENERIC), 188 [IPI_GENERIC] = OCTEON_IPI_HIGH(IPI_GENERIC),
189 [IPI_WDOG] = OCTEON_IPI_HIGH(IPI_WDOG), 189 [IPI_WDOG] = OCTEON_IPI_HIGH(IPI_WDOG),
190}; 190};
191#endif 191#endif
192 192
193struct octeon_intrhand *octciu_intrs[NIRQS] = { 193struct octeon_intrhand *octciu_intrs[NIRQS] = {
194#ifdef MULTIPROCESSOR 194#ifdef MULTIPROCESSOR
195 [CIU_INT_MBOX_15_0] = &ipi_intrhands[0], 195 [CIU_INT_MBOX_15_0] = &ipi_intrhands[0],
196 [CIU_INT_MBOX_31_16] = &ipi_intrhands[1], 196 [CIU_INT_MBOX_31_16] = &ipi_intrhands[1],
197#endif 197#endif
198}; 198};
199 199
200kmutex_t octeon_intr_lock; 200kmutex_t octeon_intr_lock;
201 201
202#if defined(MULTIPROCESSOR) 202#if defined(MULTIPROCESSOR)
203#define OCTEON_NCPU MAXCPUS 203#define OCTEON_NCPU MAXCPUS
204#else 204#else
205#define OCTEON_NCPU 1 205#define OCTEON_NCPU 1
206#endif 206#endif
207 207
208struct cpu_softc octeon_cpu_softc[OCTEON_NCPU]; 208struct cpu_softc octeon_cpu_softc[OCTEON_NCPU];
209 209
210static void 210static void
211octeon_intr_setup(void) 211octeon_intr_setup(void)
212{ 212{
213 struct cpu_softc *cpu; 213 struct cpu_softc *cpu;
214 int cpunum; 214 int cpunum;
215 215
216#define X(a) MIPS_PHYS_TO_XKPHYS(OCTEON_CCA_NONE, (a)) 216#define X(a) MIPS_PHYS_TO_XKPHYS(OCTEON_CCA_NONE, (a))
217 217
218 for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) { 218 for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
219 cpu = &octeon_cpu_softc[cpunum]; 219 cpu = &octeon_cpu_softc[cpunum];
220 220
221 cpu->cpu_ip2_sum0 = X(CIU_IP2_SUM0(cpunum)); 221 cpu->cpu_ip2_sum0 = X(CIU_IP2_SUM0(cpunum));
222 cpu->cpu_ip3_sum0 = X(CIU_IP3_SUM0(cpunum)); 222 cpu->cpu_ip3_sum0 = X(CIU_IP3_SUM0(cpunum));
223 cpu->cpu_ip4_sum0 = X(CIU_IP4_SUM0(cpunum)); 223 cpu->cpu_ip4_sum0 = X(CIU_IP4_SUM0(cpunum));
224 224
225 cpu->cpu_int_sum1 = X(CIU_INT_SUM1); 225 cpu->cpu_int_sum1 = X(CIU_INT_SUM1);
226 226
227 cpu->cpu_ip2_en[0] = X(CIU_IP2_EN0(cpunum)); 227 cpu->cpu_ip2_en[0] = X(CIU_IP2_EN0(cpunum));
228 cpu->cpu_ip3_en[0] = X(CIU_IP3_EN0(cpunum)); 228 cpu->cpu_ip3_en[0] = X(CIU_IP3_EN0(cpunum));
229 cpu->cpu_ip4_en[0] = X(CIU_IP4_EN0(cpunum)); 229 cpu->cpu_ip4_en[0] = X(CIU_IP4_EN0(cpunum));
230 230
231 cpu->cpu_ip2_en[1] = X(CIU_IP2_EN1(cpunum)); 231 cpu->cpu_ip2_en[1] = X(CIU_IP2_EN1(cpunum));
232 cpu->cpu_ip3_en[1] = X(CIU_IP3_EN1(cpunum)); 232 cpu->cpu_ip3_en[1] = X(CIU_IP3_EN1(cpunum));
233 cpu->cpu_ip4_en[1] = X(CIU_IP4_EN1(cpunum)); 233 cpu->cpu_ip4_en[1] = X(CIU_IP4_EN1(cpunum));
234 234
235 cpu->cpu_wdog = X(CIU_WDOG(cpunum)); 235 cpu->cpu_wdog = X(CIU_WDOG(cpunum));
236 cpu->cpu_pp_poke = X(CIU_PP_POKE(cpunum)); 236 cpu->cpu_pp_poke = X(CIU_PP_POKE(cpunum));
237 237
238#ifdef MULTIPROCESSOR 238#ifdef MULTIPROCESSOR
239 cpu->cpu_mbox_set = X(CIU_MBOX_SET(cpunum)); 239 cpu->cpu_mbox_set = X(CIU_MBOX_SET(cpunum));
240 cpu->cpu_mbox_clr = X(CIU_MBOX_CLR(cpunum)); 240 cpu->cpu_mbox_clr = X(CIU_MBOX_CLR(cpunum));
241#endif 241#endif
242 } 242 }
243 243
244#undef X 244#undef X
245 245
246} 246}
247 247
248void 248void
249octeon_intr_init(struct cpu_info *ci) 249octeon_intr_init(struct cpu_info *ci)
250{ 250{
251 const int cpunum = cpu_index(ci); 251 const int cpunum = cpu_index(ci);
252 struct cpu_softc *cpu = &octeon_cpu_softc[cpunum]; 252 struct cpu_softc *cpu = &octeon_cpu_softc[cpunum];
253 const char * const xname = cpu_name(ci); 253 const char * const xname = cpu_name(ci);
254 int bank; 254 int bank;
255 255
256 cpu->cpu_ci = ci; 256 cpu->cpu_ci = ci;
257 ci->ci_softc = cpu; 257 ci->ci_softc = cpu;
258 258
259 KASSERT(cpunum == ci->ci_cpuid); 259 KASSERT(cpunum == ci->ci_cpuid);
260 260
261 if (ci->ci_cpuid == 0) { 261 if (ci->ci_cpuid == 0) {
262 ipl_sr_map = octeon_ipl_sr_map; 262 ipl_sr_map = octeon_ipl_sr_map;
263 mutex_init(&octeon_intr_lock, MUTEX_DEFAULT, IPL_HIGH); 263 mutex_init(&octeon_intr_lock, MUTEX_DEFAULT, IPL_HIGH);
264#ifdef MULTIPROCESSOR 264#ifdef MULTIPROCESSOR
265 mips_locoresw.lsw_send_ipi = octeon_send_ipi; 265 mips_locoresw.lsw_send_ipi = octeon_send_ipi;
266#endif 266#endif
267 267
268 octeon_intr_setup(); 268 octeon_intr_setup();
269 } 269 }
270 270
271#ifdef MULTIPROCESSOR 271#ifdef MULTIPROCESSOR
272 // Enable the IPIs 272 // Enable the IPIs
273 cpu->cpu_ip3_enable[0] |= __BIT(CIU_INT_MBOX_15_0); 273 cpu->cpu_ip3_enable[0] |= __BIT(CIU_INT_MBOX_15_0);
274 cpu->cpu_ip4_enable[0] |= __BIT(CIU_INT_MBOX_31_16); 274 cpu->cpu_ip4_enable[0] |= __BIT(CIU_INT_MBOX_31_16);
275#endif 275#endif
276 276
277 if (ci->ci_dev) { 277 if (ci->ci_dev) {
278 for (bank = 0; bank < NBANKS; bank++) { 278 for (bank = 0; bank < NBANKS; bank++) {
279 aprint_verbose_dev(ci->ci_dev, 279 aprint_verbose_dev(ci->ci_dev,
280 "enabling intr masks %u " 280 "enabling intr masks %u "
281 " %#"PRIx64"/%#"PRIx64"/%#"PRIx64"\n", 281 " %#"PRIx64"/%#"PRIx64"/%#"PRIx64"\n",
282 bank, 282 bank,
283 cpu->cpu_ip2_enable[bank], 283 cpu->cpu_ip2_enable[bank],
284 cpu->cpu_ip3_enable[bank], 284 cpu->cpu_ip3_enable[bank],
285 cpu->cpu_ip4_enable[bank]); 285 cpu->cpu_ip4_enable[bank]);
286 } 286 }
287 } 287 }
288 288
289 for (bank = 0; bank < NBANKS; bank++) { 289 for (bank = 0; bank < NBANKS; bank++) {
290 mips3_sd(cpu->cpu_ip2_en[bank], cpu->cpu_ip2_enable[bank]); 290 mips3_sd(cpu->cpu_ip2_en[bank], cpu->cpu_ip2_enable[bank]);
291 mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]); 291 mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]);
292 mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]); 292 mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]);
293 } 293 }
294 294
295#ifdef MULTIPROCESSOR 295#ifdef MULTIPROCESSOR
296 mips3_sd(cpu->cpu_mbox_clr, __BITS(31,0)); 296 mips3_sd(cpu->cpu_mbox_clr, __BITS(31,0));
297#endif 297#endif
298 298
299 for (int i = 0; i < NIRQS; i++) { 299 for (int i = 0; i < NIRQS; i++) {
300 if (octeon_intrnames[i] == NULL) 300 if (octeon_intrnames[i] == NULL)
301 octeon_intrnames[i] = kmem_asprintf("irq %d", i); 301 octeon_intrnames[i] = kmem_asprintf("irq %d", i);
302 evcnt_attach_dynamic(&cpu->cpu_intr_evs[i], 302 evcnt_attach_dynamic(&cpu->cpu_intr_evs[i],
303 EVCNT_TYPE_INTR, NULL, xname, octeon_intrnames[i]); 303 EVCNT_TYPE_INTR, NULL, xname, octeon_intrnames[i]);
304 } 304 }
305} 305}
306 306
307void 307void
308octeon_cal_timer(int corefreq) 308octeon_cal_timer(int corefreq)
309{ 309{
310 /* Compute the number of cycles per second. */ 310 /* Compute the number of cycles per second. */
311 curcpu()->ci_cpu_freq = corefreq; 311 curcpu()->ci_cpu_freq = corefreq;
312 312
313 /* Compute the number of ticks for hz. */ 313 /* Compute the number of ticks for hz. */
314 curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz; 314 curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz;
315 315
316 /* Compute the delay divisor and reciprical. */ 316 /* Compute the delay divisor and reciprical. */
317 curcpu()->ci_divisor_delay = 317 curcpu()->ci_divisor_delay =
318 ((curcpu()->ci_cpu_freq + 500000) / 1000000); 318 ((curcpu()->ci_cpu_freq + 500000) / 1000000);
319#if 0 319#if 0
320 MIPS_SET_CI_RECIPRICAL(curcpu()); 320 MIPS_SET_CI_RECIPRICAL(curcpu());
321#endif 321#endif
322 322
323 mips3_cp0_count_write(0); 323 mips3_cp0_count_write(0);
324 mips3_cp0_compare_write(0); 324 mips3_cp0_compare_write(0);
325} 325}
326 326
327void * 327void *
328octeon_intr_establish(int irq, int ipl, int (*func)(void *), void *arg) 328octeon_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
329{ 329{
330 struct octeon_intrhand *ih; 330 struct octeon_intrhand *ih;
331 struct cpu_softc *cpu; 331 struct cpu_softc *cpu;
332 int cpunum; 332 int cpunum;
333 333
334 if (irq >= NIRQS) 334 if (irq >= NIRQS)
335 panic("octeon_intr_establish: bogus IRQ %d", irq); 335 panic("octeon_intr_establish: bogus IRQ %d", irq);
336 if (ipl < IPL_VM) 336 if (ipl < IPL_VM)
337 panic("octeon_intr_establish: bogus IPL %d", ipl); 337 panic("octeon_intr_establish: bogus IPL %d", ipl);
338 338
339 ih = kmem_zalloc(sizeof(*ih), KM_NOSLEEP); 339 ih = kmem_zalloc(sizeof(*ih), KM_NOSLEEP);
340 if (ih == NULL) 340 if (ih == NULL)
341 return (NULL); 341 return (NULL);
342 342
343 ih->ih_func = func; 343 ih->ih_func = func;
344 ih->ih_arg = arg; 344 ih->ih_arg = arg;
345 ih->ih_irq = irq; 345 ih->ih_irq = irq;
346 ih->ih_ipl = ipl; 346 ih->ih_ipl = ipl;
347 347
348 mutex_enter(&octeon_intr_lock); 348 mutex_enter(&octeon_intr_lock);
349 349
350 /* 350 /*
351 * First, make it known. 351 * First, make it known.
352 */ 352 */
353 KASSERTMSG(octciu_intrs[irq] == NULL, "irq %d in use! (%p)", 353 KASSERTMSG(octciu_intrs[irq] == NULL, "irq %d in use! (%p)",
354 irq, octciu_intrs[irq]); 354 irq, octciu_intrs[irq]);
355 355
356 octciu_intrs[irq] = ih; 356 octciu_intrs[irq] = ih;
357 membar_producer(); 357 membar_producer();
358 358
359 /* 359 /*
360 * Now enable it. 360 * Now enable it.
361 */ 361 */
362 const int bank = irq / 64; 362 const int bank = irq / 64;
363 const uint64_t irq_mask = __BIT(irq % 64); 363 const uint64_t irq_mask = __BIT(irq % 64);
364 364
365 switch (ipl) { 365 switch (ipl) {
366 case IPL_VM: 366 case IPL_VM:
367 cpu = &octeon_cpu_softc[0]; 367 cpu = &octeon_cpu_softc[0];
368 cpu->cpu_ip2_enable[bank] |= irq_mask; 368 cpu->cpu_ip2_enable[bank] |= irq_mask;
369 mips3_sd(cpu->cpu_ip2_en[bank], cpu->cpu_ip2_enable[bank]); 369 mips3_sd(cpu->cpu_ip2_en[bank], cpu->cpu_ip2_enable[bank]);
370 break; 370 break;
371 371
372 case IPL_SCHED: 372 case IPL_SCHED:
373 for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) { 373 for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
374 cpu = &octeon_cpu_softc[cpunum]; 374 cpu = &octeon_cpu_softc[cpunum];
375 if (cpu->cpu_ci == NULL) 375 if (cpu->cpu_ci == NULL)
376 break; 376 break;
377 cpu->cpu_ip3_enable[bank] |= irq_mask; 377 cpu->cpu_ip3_enable[bank] |= irq_mask;
378 mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]); 378 mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]);
379 } 379 }
380 break; 380 break;
381 381
382 case IPL_DDB: 382 case IPL_DDB:
383 case IPL_HIGH: 383 case IPL_HIGH:
384 for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) { 384 for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
385 cpu = &octeon_cpu_softc[cpunum]; 385 cpu = &octeon_cpu_softc[cpunum];
386 if (cpu->cpu_ci == NULL) 386 if (cpu->cpu_ci == NULL)
387 break; 387 break;
388 cpu->cpu_ip4_enable[bank] |= irq_mask; 388 cpu->cpu_ip4_enable[bank] |= irq_mask;
389 mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]); 389 mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]);
390 } 390 }
391 break; 391 break;
392 } 392 }
393 393
394 mutex_exit(&octeon_intr_lock); 394 mutex_exit(&octeon_intr_lock);
395 395
396 return ih; 396 return ih;
397} 397}
398 398
399void 399void
400octeon_intr_disestablish(void *cookie) 400octeon_intr_disestablish(void *cookie)
401{ 401{
402 struct octeon_intrhand * const ih = cookie; 402 struct octeon_intrhand * const ih = cookie;
403 struct cpu_softc *cpu; 403 struct cpu_softc *cpu;
404 const int irq = ih->ih_irq & (NIRQS-1); 404 const int irq = ih->ih_irq & (NIRQS-1);
405 const int ipl = ih->ih_ipl; 405 const int ipl = ih->ih_ipl;
406 int cpunum; 406 int cpunum;
407 407
408 mutex_enter(&octeon_intr_lock); 408 mutex_enter(&octeon_intr_lock);
409 409
410 /* 410 /*
411 * First disable it. 411 * First disable it.
412 */ 412 */
413 const int bank = irq / 64; 413 const int bank = irq / 64;
414 const uint64_t irq_mask = ~__BIT(irq % 64); 414 const uint64_t irq_mask = ~__BIT(irq % 64);
415 415
416 switch (ipl) { 416 switch (ipl) {
417 case IPL_VM: 417 case IPL_VM:
418 cpu = &octeon_cpu_softc[0]; 418 cpu = &octeon_cpu_softc[0];
419 cpu->cpu_ip2_enable[bank] &= ~irq_mask; 419 cpu->cpu_ip2_enable[bank] &= ~irq_mask;
420 mips3_sd(cpu->cpu_ip2_en[bank], cpu->cpu_ip2_enable[bank]); 420 mips3_sd(cpu->cpu_ip2_en[bank], cpu->cpu_ip2_enable[bank]);
421 break; 421 break;
422 422
423 case IPL_SCHED: 423 case IPL_SCHED:
424 for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) { 424 for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
425 cpu = &octeon_cpu_softc[cpunum]; 425 cpu = &octeon_cpu_softc[cpunum];
426 if (cpu->cpu_ci == NULL) 426 if (cpu->cpu_ci == NULL)
427 break; 427 break;
428 cpu->cpu_ip3_enable[bank] &= ~irq_mask; 428 cpu->cpu_ip3_enable[bank] &= ~irq_mask;
429 mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]); 429 mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]);
430 } 430 }
431 break; 431 break;
432 432
433 case IPL_DDB: 433 case IPL_DDB:
434 case IPL_HIGH: 434 case IPL_HIGH:
435 for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) { 435 for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
436 cpu = &octeon_cpu_softc[cpunum]; 436 cpu = &octeon_cpu_softc[cpunum];
437 if (cpu->cpu_ci == NULL) 437 if (cpu->cpu_ci == NULL)
438 break; 438 break;
439 cpu->cpu_ip4_enable[bank] &= ~irq_mask; 439 cpu->cpu_ip4_enable[bank] &= ~irq_mask;
440 mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]); 440 mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]);
441 } 441 }
442 break; 442 break;
443 } 443 }
444 444
445 /* 445 /*
446 * Now remove it since we shouldn't get interrupts for it. 446 * Now remove it since we shouldn't get interrupts for it.
447 */ 447 */
448 octciu_intrs[irq] = NULL; 448 octciu_intrs[irq] = NULL;
449 449
450 mutex_exit(&octeon_intr_lock); 450 mutex_exit(&octeon_intr_lock);
451 451
452 kmem_free(ih, sizeof(*ih)); 452 kmem_free(ih, sizeof(*ih));
453} 453}
454 454
455void 455void
456octeon_iointr(int ipl, vaddr_t pc, uint32_t ipending) 456octeon_iointr(int ipl, vaddr_t pc, uint32_t ipending)
457{ 457{
458 struct cpu_info * const ci = curcpu(); 458 struct cpu_info * const ci = curcpu();
459 struct cpu_softc * const cpu = ci->ci_softc; 459 struct cpu_softc * const cpu = ci->ci_softc;
460 int bank; 460 int bank;
461 461
462 KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE); 462 KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
463 KASSERT((ipending & ~MIPS_INT_MASK) == 0); 463 KASSERT((ipending & ~MIPS_INT_MASK) == 0);
464 KASSERT(ipending & MIPS_HARD_INT_MASK); 464 KASSERT(ipending & MIPS_HARD_INT_MASK);
465 uint64_t hwpend[2] = { 0, 0 }; 465 uint64_t hwpend[2] = { 0, 0 };
466 466
467 const uint64_t sum1 = mips3_ld(cpu->cpu_int_sum1); 467 const uint64_t sum1 = mips3_ld(cpu->cpu_int_sum1);
468 468
469 if (ipending & MIPS_INT_MASK_2) { 469 if (ipending & MIPS_INT_MASK_2) {
470 hwpend[0] = mips3_ld(cpu->cpu_ip4_sum0) 470 hwpend[0] = mips3_ld(cpu->cpu_ip4_sum0)
471 & cpu->cpu_ip4_enable[0]; 471 & cpu->cpu_ip4_enable[0];
472 hwpend[1] = sum1 & cpu->cpu_ip4_enable[1]; 472 hwpend[1] = sum1 & cpu->cpu_ip4_enable[1];
473 } else if (ipending & MIPS_INT_MASK_1) { 473 } else if (ipending & MIPS_INT_MASK_1) {
474 hwpend[0] = mips3_ld(cpu->cpu_ip3_sum0) 474 hwpend[0] = mips3_ld(cpu->cpu_ip3_sum0)
475 & cpu->cpu_ip3_enable[0]; 475 & cpu->cpu_ip3_enable[0];
476 hwpend[1] = sum1 & cpu->cpu_ip3_enable[1]; 476 hwpend[1] = sum1 & cpu->cpu_ip3_enable[1];
477 } else if (ipending & MIPS_INT_MASK_0) { 477 } else if (ipending & MIPS_INT_MASK_0) {
478 hwpend[0] = mips3_ld(cpu->cpu_ip2_sum0) 478 hwpend[0] = mips3_ld(cpu->cpu_ip2_sum0)
479 & cpu->cpu_ip2_enable[0]; 479 & cpu->cpu_ip2_enable[0];
480 hwpend[1] = sum1 & cpu->cpu_ip2_enable[1]; 480 hwpend[1] = sum1 & cpu->cpu_ip2_enable[1];
481 } else { 481 } else {
482 panic("octeon_iointr: unexpected ipending %#x", ipending); 482 panic("octeon_iointr: unexpected ipending %#x", ipending);
483 } 483 }
484 for (bank = 0; bank <= 1; bank++) { 484 for (bank = 0; bank <= 1; bank++) {
485 while (hwpend[bank] != 0) { 485 while (hwpend[bank] != 0) {
486 const int bit = ffs64(hwpend[bank]) - 1; 486 const int bit = ffs64(hwpend[bank]) - 1;
487 const int irq = (bank * 64) + bit; 487 const int irq = (bank * 64) + bit;
488 hwpend[bank] &= ~__BIT(bit); 488 hwpend[bank] &= ~__BIT(bit);
489 489
490 struct octeon_intrhand * const ih = octciu_intrs[irq]; 490 struct octeon_intrhand * const ih = octciu_intrs[irq];
491 cpu->cpu_intr_evs[irq].ev_count++; 491 cpu->cpu_intr_evs[irq].ev_count++;
492 if (__predict_true(ih != NULL)) { 492 if (__predict_true(ih != NULL)) {
493#ifdef MULTIPROCESSOR 493#ifdef MULTIPROCESSOR
494 if (ipl == IPL_VM) { 494 if (ipl == IPL_VM) {
495 KERNEL_LOCK(1, NULL); 495 KERNEL_LOCK(1, NULL);
496#endif 496#endif
497 (*ih->ih_func)(ih->ih_arg); 497 (*ih->ih_func)(ih->ih_arg);
498#ifdef MULTIPROCESSOR 498#ifdef MULTIPROCESSOR
499 KERNEL_UNLOCK_ONE(NULL); 499 KERNEL_UNLOCK_ONE(NULL);
500 } else { 500 } else {
501 (*ih->ih_func)(ih->ih_arg); 501 (*ih->ih_func)(ih->ih_arg);
502 } 502 }
503#endif 503#endif
504 KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE); 504 KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
505 } 505 }
506 } 506 }
507 } 507 }
508 KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE); 508 KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
509} 509}
510 510
511#ifdef MULTIPROCESSOR 511#ifdef MULTIPROCESSOR
512__CTASSERT(NIPIS < 16); 512__CTASSERT(NIPIS < 16);
513 513
514int 514int
515octeon_ipi_intr(void *arg) 515octeon_ipi_intr(void *arg)
516{ 516{
517 struct cpu_info * const ci = curcpu(); 517 struct cpu_info * const ci = curcpu();
518 struct cpu_softc * const cpu = ci->ci_softc; 518 struct cpu_softc * const cpu = ci->ci_softc;
519 uint32_t ipi_mask = (uintptr_t) arg; 519 uint32_t mbox_mask = (uintptr_t) arg;
 520 uint32_t ipi_mask;
520 521
521 KASSERTMSG((ipi_mask & __BITS(31,16)) == 0 || ci->ci_cpl >= IPL_SCHED, 522 KASSERTMSG((mbox_mask & __BITS(31,16)) == 0 || ci->ci_cpl >= IPL_SCHED,
522 "ipi_mask %#"PRIx32" cpl %d", ipi_mask, ci->ci_cpl); 523 "mbox_mask %#"PRIx32" cpl %d", mbox_mask, ci->ci_cpl);
523 524
524 ipi_mask &= mips3_ld(cpu->cpu_mbox_set); 525 mbox_mask &= mips3_ld(cpu->cpu_mbox_set);
525 if (ipi_mask == 0) 526 if (mbox_mask == 0)
526 return 0; 527 return 0;
527 528
528 mips3_sd(cpu->cpu_mbox_clr, ipi_mask); 529 mips3_sd(cpu->cpu_mbox_clr, mbox_mask);
 530
 531 ipi_mask = mbox_mask;
 532 if (ci->ci_cpl >= IPL_SCHED)
 533 ipi_mask >>= 16;
529 534
530 KASSERT(ipi_mask < __BIT(NIPIS)); 535 KASSERT(ipi_mask < __BIT(NIPIS));
531 536
532#if NWDOG > 0 537#if NWDOG > 0
533 // Handle WDOG requests ourselves. 538 // Handle WDOG requests ourselves.
534 if (ipi_mask & __BIT(IPI_WDOG)) { 539 if (ipi_mask & __BIT(IPI_WDOG)) {
535 softint_schedule(cpu->cpu_wdog_sih); 540 softint_schedule(cpu->cpu_wdog_sih);
536 atomic_and_64(&ci->ci_request_ipis, ~__BIT(IPI_WDOG)); 541 atomic_and_64(&ci->ci_request_ipis, ~__BIT(IPI_WDOG));
537 ipi_mask &= ~__BIT(IPI_WDOG); 542 ipi_mask &= ~__BIT(IPI_WDOG);
538 ci->ci_evcnt_per_ipi[IPI_WDOG].ev_count++; 543 ci->ci_evcnt_per_ipi[IPI_WDOG].ev_count++;
539 if (__predict_true(ipi_mask == 0)) 544 if (__predict_true(ipi_mask == 0))
540 return 1; 545 return 1;
541 } 546 }
542#endif 547#endif
543 548
544 /* if the request is clear, it was previously processed */ 549 /* if the request is clear, it was previously processed */
545 if ((ci->ci_request_ipis & ipi_mask) == 0) 550 if ((ci->ci_request_ipis & ipi_mask) == 0)
546 return 0; 551 return 0;
547 552
548 atomic_or_64(&ci->ci_active_ipis, ipi_mask); 553 atomic_or_64(&ci->ci_active_ipis, ipi_mask);
549 atomic_and_64(&ci->ci_request_ipis, ~ipi_mask); 554 atomic_and_64(&ci->ci_request_ipis, ~ipi_mask);
550 555
551 ipi_process(ci, ipi_mask); 556 ipi_process(ci, ipi_mask);
552 557
553 atomic_and_64(&ci->ci_active_ipis, ~ipi_mask); 558 atomic_and_64(&ci->ci_active_ipis, ~ipi_mask);
554 559
555 return 1; 560 return 1;
556} 561}
557 562
558int 563int
559octeon_send_ipi(struct cpu_info *ci, int req) 564octeon_send_ipi(struct cpu_info *ci, int req)
560{ 565{
561 KASSERT(req < NIPIS); 566 KASSERT(req < NIPIS);
562 if (ci == NULL) { 567 if (ci == NULL) {
563 CPU_INFO_ITERATOR cii; 568 CPU_INFO_ITERATOR cii;
564 for (CPU_INFO_FOREACH(cii, ci)) { 569 for (CPU_INFO_FOREACH(cii, ci)) {
565 if (ci != curcpu()) { 570 if (ci != curcpu()) {
566 octeon_send_ipi(ci, req); 571 octeon_send_ipi(ci, req);
567 } 572 }
568 } 573 }
569 return 0; 574 return 0;
570 } 575 }
571 KASSERT(cold || ci->ci_softc != NULL); 576 KASSERT(cold || ci->ci_softc != NULL);
572 if (ci->ci_softc == NULL) 577 if (ci->ci_softc == NULL)
573 return -1; 578 return -1;
574 579
575 struct cpu_softc * const cpu = ci->ci_softc; 580 struct cpu_softc * const cpu = ci->ci_softc;
576 const uint64_t ipi_mask = octeon_ipi_mask[req]; 581 const uint32_t mbox_mask = octeon_ipi_mbox_mask[req];
 582 const uint32_t ipi_mask = __BIT(req);
577 583
578 atomic_or_64(&ci->ci_request_ipis, ipi_mask); 584 atomic_or_64(&ci->ci_request_ipis, ipi_mask);
579 585
580 mips3_sd(cpu->cpu_mbox_set, ipi_mask); 586 mips3_sd(cpu->cpu_mbox_set, mbox_mask);
581 587
582 return 0; 588 return 0;
583} 589}
584#endif /* MULTIPROCESSOR */ 590#endif /* MULTIPROCESSOR */