Sun Nov 1 12:13:21 2020 UTC ()
Add an isb() barrier after ICC_SGI1R_EL1 write to prevent reordering with
subsequent wfi/wfe instructions. Haven't seen this in practice but I would
rather be safe here.


(jmcneill)
diff -r1.29 -r1.30 src/sys/arch/arm/cortex/gicv3.c

cvs diff -r1.29 -r1.30 src/sys/arch/arm/cortex/gicv3.c (switch to unified diff)

--- src/sys/arch/arm/cortex/gicv3.c 2020/11/01 11:17:20 1.29
+++ src/sys/arch/arm/cortex/gicv3.c 2020/11/01 12:13:21 1.30
@@ -1,867 +1,868 @@ @@ -1,867 +1,868 @@
1/* $NetBSD: gicv3.c,v 1.29 2020/11/01 11:17:20 jmcneill Exp $ */ 1/* $NetBSD: gicv3.c,v 1.30 2020/11/01 12:13:21 jmcneill Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2018 Jared McNeill <jmcneill@invisible.ca> 4 * Copyright (c) 2018 Jared McNeill <jmcneill@invisible.ca>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE. 26 * SUCH DAMAGE.
27 */ 27 */
28 28
29#include "opt_multiprocessor.h" 29#include "opt_multiprocessor.h"
30 30
31#define _INTR_PRIVATE 31#define _INTR_PRIVATE
32 32
33#include <sys/cdefs.h> 33#include <sys/cdefs.h>
34__KERNEL_RCSID(0, "$NetBSD: gicv3.c,v 1.29 2020/11/01 11:17:20 jmcneill Exp $"); 34__KERNEL_RCSID(0, "$NetBSD: gicv3.c,v 1.30 2020/11/01 12:13:21 jmcneill Exp $");
35 35
36#include <sys/param.h> 36#include <sys/param.h>
37#include <sys/kernel.h> 37#include <sys/kernel.h>
38#include <sys/bus.h> 38#include <sys/bus.h>
39#include <sys/device.h> 39#include <sys/device.h>
40#include <sys/intr.h> 40#include <sys/intr.h>
41#include <sys/systm.h> 41#include <sys/systm.h>
42#include <sys/cpu.h> 42#include <sys/cpu.h>
43#include <sys/vmem.h> 43#include <sys/vmem.h>
44 44
45#include <machine/cpufunc.h> 45#include <machine/cpufunc.h>
46 46
47#include <arm/locore.h> 47#include <arm/locore.h>
48#include <arm/armreg.h> 48#include <arm/armreg.h>
49 49
50#include <arm/cortex/gicv3.h> 50#include <arm/cortex/gicv3.h>
51#include <arm/cortex/gic_reg.h> 51#include <arm/cortex/gic_reg.h>
52 52
53#define PICTOSOFTC(pic) \ 53#define PICTOSOFTC(pic) \
54 ((void *)((uintptr_t)(pic) - offsetof(struct gicv3_softc, sc_pic))) 54 ((void *)((uintptr_t)(pic) - offsetof(struct gicv3_softc, sc_pic)))
55#define LPITOSOFTC(lpi) \ 55#define LPITOSOFTC(lpi) \
56 ((void *)((uintptr_t)(lpi) - offsetof(struct gicv3_softc, sc_lpi))) 56 ((void *)((uintptr_t)(lpi) - offsetof(struct gicv3_softc, sc_lpi)))
57 57
58#define IPL_TO_PRIORITY(sc, ipl) (((0xff - (ipl)) << (sc)->sc_priority_shift) & 0xff) 58#define IPL_TO_PRIORITY(sc, ipl) (((0xff - (ipl)) << (sc)->sc_priority_shift) & 0xff)
59#define IPL_TO_PMR(sc, ipl) (((0xff - (ipl)) << (sc)->sc_pmr_shift) & 0xff) 59#define IPL_TO_PMR(sc, ipl) (((0xff - (ipl)) << (sc)->sc_pmr_shift) & 0xff)
60#define IPL_TO_LPIPRIO(sc, ipl) (((0xff - (ipl)) << 4) & 0xff) 60#define IPL_TO_LPIPRIO(sc, ipl) (((0xff - (ipl)) << 4) & 0xff)
61 61
62static struct gicv3_softc *gicv3_softc; 62static struct gicv3_softc *gicv3_softc;
63 63
64static inline uint32_t 64static inline uint32_t
65gicd_read_4(struct gicv3_softc *sc, bus_size_t reg) 65gicd_read_4(struct gicv3_softc *sc, bus_size_t reg)
66{ 66{
67 return bus_space_read_4(sc->sc_bst, sc->sc_bsh_d, reg); 67 return bus_space_read_4(sc->sc_bst, sc->sc_bsh_d, reg);
68} 68}
69 69
70static inline void 70static inline void
71gicd_write_4(struct gicv3_softc *sc, bus_size_t reg, uint32_t val) 71gicd_write_4(struct gicv3_softc *sc, bus_size_t reg, uint32_t val)
72{ 72{
73 bus_space_write_4(sc->sc_bst, sc->sc_bsh_d, reg, val); 73 bus_space_write_4(sc->sc_bst, sc->sc_bsh_d, reg, val);
74} 74}
75 75
76static inline uint64_t 76static inline uint64_t
77gicd_read_8(struct gicv3_softc *sc, bus_size_t reg) 77gicd_read_8(struct gicv3_softc *sc, bus_size_t reg)
78{ 78{
79 return bus_space_read_8(sc->sc_bst, sc->sc_bsh_d, reg); 79 return bus_space_read_8(sc->sc_bst, sc->sc_bsh_d, reg);
80} 80}
81 81
82static inline void 82static inline void
83gicd_write_8(struct gicv3_softc *sc, bus_size_t reg, uint64_t val) 83gicd_write_8(struct gicv3_softc *sc, bus_size_t reg, uint64_t val)
84{ 84{
85 bus_space_write_8(sc->sc_bst, sc->sc_bsh_d, reg, val); 85 bus_space_write_8(sc->sc_bst, sc->sc_bsh_d, reg, val);
86} 86}
87 87
88static inline uint32_t 88static inline uint32_t
89gicr_read_4(struct gicv3_softc *sc, u_int index, bus_size_t reg) 89gicr_read_4(struct gicv3_softc *sc, u_int index, bus_size_t reg)
90{ 90{
91 KASSERT(index < sc->sc_bsh_r_count); 91 KASSERT(index < sc->sc_bsh_r_count);
92 return bus_space_read_4(sc->sc_bst, sc->sc_bsh_r[index], reg); 92 return bus_space_read_4(sc->sc_bst, sc->sc_bsh_r[index], reg);
93} 93}
94 94
95static inline void 95static inline void
96gicr_write_4(struct gicv3_softc *sc, u_int index, bus_size_t reg, uint32_t val) 96gicr_write_4(struct gicv3_softc *sc, u_int index, bus_size_t reg, uint32_t val)
97{ 97{
98 KASSERT(index < sc->sc_bsh_r_count); 98 KASSERT(index < sc->sc_bsh_r_count);
99 bus_space_write_4(sc->sc_bst, sc->sc_bsh_r[index], reg, val); 99 bus_space_write_4(sc->sc_bst, sc->sc_bsh_r[index], reg, val);
100} 100}
101 101
102static inline uint64_t 102static inline uint64_t
103gicr_read_8(struct gicv3_softc *sc, u_int index, bus_size_t reg) 103gicr_read_8(struct gicv3_softc *sc, u_int index, bus_size_t reg)
104{ 104{
105 KASSERT(index < sc->sc_bsh_r_count); 105 KASSERT(index < sc->sc_bsh_r_count);
106 return bus_space_read_8(sc->sc_bst, sc->sc_bsh_r[index], reg); 106 return bus_space_read_8(sc->sc_bst, sc->sc_bsh_r[index], reg);
107} 107}
108 108
109static inline void 109static inline void
110gicr_write_8(struct gicv3_softc *sc, u_int index, bus_size_t reg, uint64_t val) 110gicr_write_8(struct gicv3_softc *sc, u_int index, bus_size_t reg, uint64_t val)
111{ 111{
112 KASSERT(index < sc->sc_bsh_r_count); 112 KASSERT(index < sc->sc_bsh_r_count);
113 bus_space_write_8(sc->sc_bst, sc->sc_bsh_r[index], reg, val); 113 bus_space_write_8(sc->sc_bst, sc->sc_bsh_r[index], reg, val);
114} 114}
115 115
116static void 116static void
117gicv3_unblock_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask) 117gicv3_unblock_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask)
118{ 118{
119 struct gicv3_softc * const sc = PICTOSOFTC(pic); 119 struct gicv3_softc * const sc = PICTOSOFTC(pic);
120 struct cpu_info * const ci = curcpu(); 120 struct cpu_info * const ci = curcpu();
121 const u_int group = irqbase / 32; 121 const u_int group = irqbase / 32;
122 122
123 if (group == 0) { 123 if (group == 0) {
124 sc->sc_enabled_sgippi |= mask; 124 sc->sc_enabled_sgippi |= mask;
125 gicr_write_4(sc, ci->ci_gic_redist, GICR_ISENABLER0, mask); 125 gicr_write_4(sc, ci->ci_gic_redist, GICR_ISENABLER0, mask);
126 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP) 126 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP)
127 ; 127 ;
128 } else { 128 } else {
129 gicd_write_4(sc, GICD_ISENABLERn(group), mask); 129 gicd_write_4(sc, GICD_ISENABLERn(group), mask);
130 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP) 130 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP)
131 ; 131 ;
132 } 132 }
133} 133}
134 134
135static void 135static void
136gicv3_block_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask) 136gicv3_block_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask)
137{ 137{
138 struct gicv3_softc * const sc = PICTOSOFTC(pic); 138 struct gicv3_softc * const sc = PICTOSOFTC(pic);
139 struct cpu_info * const ci = curcpu(); 139 struct cpu_info * const ci = curcpu();
140 const u_int group = irqbase / 32; 140 const u_int group = irqbase / 32;
141 141
142 if (group == 0) { 142 if (group == 0) {
143 sc->sc_enabled_sgippi &= ~mask; 143 sc->sc_enabled_sgippi &= ~mask;
144 gicr_write_4(sc, ci->ci_gic_redist, GICR_ICENABLER0, mask); 144 gicr_write_4(sc, ci->ci_gic_redist, GICR_ICENABLER0, mask);
145 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP) 145 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP)
146 ; 146 ;
147 } else { 147 } else {
148 gicd_write_4(sc, GICD_ICENABLERn(group), mask); 148 gicd_write_4(sc, GICD_ICENABLERn(group), mask);
149 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP) 149 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP)
150 ; 150 ;
151 } 151 }
152} 152}
153 153
154static void 154static void
155gicv3_establish_irq(struct pic_softc *pic, struct intrsource *is) 155gicv3_establish_irq(struct pic_softc *pic, struct intrsource *is)
156{ 156{
157 struct gicv3_softc * const sc = PICTOSOFTC(pic); 157 struct gicv3_softc * const sc = PICTOSOFTC(pic);
158 const u_int group = is->is_irq / 32; 158 const u_int group = is->is_irq / 32;
159 uint32_t ipriority, icfg; 159 uint32_t ipriority, icfg;
160 uint64_t irouter; 160 uint64_t irouter;
161 u_int n; 161 u_int n;
162 162
163 const u_int ipriority_val = IPL_TO_PRIORITY(sc, is->is_ipl); 163 const u_int ipriority_val = IPL_TO_PRIORITY(sc, is->is_ipl);
164 const u_int ipriority_shift = (is->is_irq & 0x3) * 8; 164 const u_int ipriority_shift = (is->is_irq & 0x3) * 8;
165 const u_int icfg_shift = (is->is_irq & 0xf) * 2; 165 const u_int icfg_shift = (is->is_irq & 0xf) * 2;
166 166
167 if (group == 0) { 167 if (group == 0) {
168 /* SGIs and PPIs are always MP-safe */ 168 /* SGIs and PPIs are always MP-safe */
169 is->is_mpsafe = true; 169 is->is_mpsafe = true;
170 170
171 /* Update interrupt configuration and priority on all redistributors */ 171 /* Update interrupt configuration and priority on all redistributors */
172 for (n = 0; n < sc->sc_bsh_r_count; n++) { 172 for (n = 0; n < sc->sc_bsh_r_count; n++) {
173 icfg = gicr_read_4(sc, n, GICR_ICFGRn(is->is_irq / 16)); 173 icfg = gicr_read_4(sc, n, GICR_ICFGRn(is->is_irq / 16));
174 if (is->is_type == IST_LEVEL) 174 if (is->is_type == IST_LEVEL)
175 icfg &= ~(0x2 << icfg_shift); 175 icfg &= ~(0x2 << icfg_shift);
176 if (is->is_type == IST_EDGE) 176 if (is->is_type == IST_EDGE)
177 icfg |= (0x2 << icfg_shift); 177 icfg |= (0x2 << icfg_shift);
178 gicr_write_4(sc, n, GICR_ICFGRn(is->is_irq / 16), icfg); 178 gicr_write_4(sc, n, GICR_ICFGRn(is->is_irq / 16), icfg);
179 179
180 ipriority = gicr_read_4(sc, n, GICR_IPRIORITYRn(is->is_irq / 4)); 180 ipriority = gicr_read_4(sc, n, GICR_IPRIORITYRn(is->is_irq / 4));
181 ipriority &= ~(0xffU << ipriority_shift); 181 ipriority &= ~(0xffU << ipriority_shift);
182 ipriority |= (ipriority_val << ipriority_shift); 182 ipriority |= (ipriority_val << ipriority_shift);
183 gicr_write_4(sc, n, GICR_IPRIORITYRn(is->is_irq / 4), ipriority); 183 gicr_write_4(sc, n, GICR_IPRIORITYRn(is->is_irq / 4), ipriority);
184 } 184 }
185 } else { 185 } else {
186 if (is->is_mpsafe) { 186 if (is->is_mpsafe) {
187 /* Route MP-safe interrupts to all participating PEs */ 187 /* Route MP-safe interrupts to all participating PEs */
188 irouter = GICD_IROUTER_Interrupt_Routing_mode; 188 irouter = GICD_IROUTER_Interrupt_Routing_mode;
189 } else { 189 } else {
190 /* Route non-MP-safe interrupts to the primary PE only */ 190 /* Route non-MP-safe interrupts to the primary PE only */
191 irouter = sc->sc_irouter[0]; 191 irouter = sc->sc_irouter[0];
192 } 192 }
193 gicd_write_8(sc, GICD_IROUTER(is->is_irq), irouter); 193 gicd_write_8(sc, GICD_IROUTER(is->is_irq), irouter);
194 194
195 /* Update interrupt configuration */ 195 /* Update interrupt configuration */
196 icfg = gicd_read_4(sc, GICD_ICFGRn(is->is_irq / 16)); 196 icfg = gicd_read_4(sc, GICD_ICFGRn(is->is_irq / 16));
197 if (is->is_type == IST_LEVEL) 197 if (is->is_type == IST_LEVEL)
198 icfg &= ~(0x2 << icfg_shift); 198 icfg &= ~(0x2 << icfg_shift);
199 if (is->is_type == IST_EDGE) 199 if (is->is_type == IST_EDGE)
200 icfg |= (0x2 << icfg_shift); 200 icfg |= (0x2 << icfg_shift);
201 gicd_write_4(sc, GICD_ICFGRn(is->is_irq / 16), icfg); 201 gicd_write_4(sc, GICD_ICFGRn(is->is_irq / 16), icfg);
202 202
203 /* Update interrupt priority */ 203 /* Update interrupt priority */
204 ipriority = gicd_read_4(sc, GICD_IPRIORITYRn(is->is_irq / 4)); 204 ipriority = gicd_read_4(sc, GICD_IPRIORITYRn(is->is_irq / 4));
205 ipriority &= ~(0xffU << ipriority_shift); 205 ipriority &= ~(0xffU << ipriority_shift);
206 ipriority |= (ipriority_val << ipriority_shift); 206 ipriority |= (ipriority_val << ipriority_shift);
207 gicd_write_4(sc, GICD_IPRIORITYRn(is->is_irq / 4), ipriority); 207 gicd_write_4(sc, GICD_IPRIORITYRn(is->is_irq / 4), ipriority);
208 } 208 }
209} 209}
210 210
211static void 211static void
212gicv3_set_priority(struct pic_softc *pic, int ipl) 212gicv3_set_priority(struct pic_softc *pic, int ipl)
213{ 213{
214 struct gicv3_softc * const sc = PICTOSOFTC(pic); 214 struct gicv3_softc * const sc = PICTOSOFTC(pic);
215 215
216 icc_pmr_write(IPL_TO_PMR(sc, ipl)); 216 icc_pmr_write(IPL_TO_PMR(sc, ipl));
217} 217}
218 218
219static void 219static void
220gicv3_dist_enable(struct gicv3_softc *sc) 220gicv3_dist_enable(struct gicv3_softc *sc)
221{ 221{
222 uint32_t gicd_ctrl; 222 uint32_t gicd_ctrl;
223 u_int n; 223 u_int n;
224 224
225 /* Disable the distributor */ 225 /* Disable the distributor */
226 gicd_write_4(sc, GICD_CTRL, 0); 226 gicd_write_4(sc, GICD_CTRL, 0);
227 227
228 /* Wait for register write to complete */ 228 /* Wait for register write to complete */
229 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP) 229 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP)
230 ; 230 ;
231 231
232 /* Clear all INTID enable bits */ 232 /* Clear all INTID enable bits */
233 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 32) 233 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 32)
234 gicd_write_4(sc, GICD_ICENABLERn(n / 32), ~0); 234 gicd_write_4(sc, GICD_ICENABLERn(n / 32), ~0);
235 235
236 /* Set default priorities to lowest */ 236 /* Set default priorities to lowest */
237 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 4) 237 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 4)
238 gicd_write_4(sc, GICD_IPRIORITYRn(n / 4), ~0); 238 gicd_write_4(sc, GICD_IPRIORITYRn(n / 4), ~0);
239 239
240 /* Set all interrupts to G1NS */ 240 /* Set all interrupts to G1NS */
241 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 32) { 241 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 32) {
242 gicd_write_4(sc, GICD_IGROUPRn(n / 32), ~0); 242 gicd_write_4(sc, GICD_IGROUPRn(n / 32), ~0);
243 gicd_write_4(sc, GICD_IGRPMODRn(n / 32), 0); 243 gicd_write_4(sc, GICD_IGRPMODRn(n / 32), 0);
244 } 244 }
245 245
246 /* Set all interrupts level-sensitive by default */ 246 /* Set all interrupts level-sensitive by default */
247 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 16) 247 for (n = 32; n < sc->sc_pic.pic_maxsources; n += 16)
248 gicd_write_4(sc, GICD_ICFGRn(n / 16), 0); 248 gicd_write_4(sc, GICD_ICFGRn(n / 16), 0);
249 249
250 /* Wait for register writes to complete */ 250 /* Wait for register writes to complete */
251 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP) 251 while (gicd_read_4(sc, GICD_CTRL) & GICD_CTRL_RWP)
252 ; 252 ;
253 253
254 /* Enable Affinity routing and G1NS interrupts */ 254 /* Enable Affinity routing and G1NS interrupts */
255 gicd_ctrl = GICD_CTRL_EnableGrp1A | GICD_CTRL_ARE_NS; 255 gicd_ctrl = GICD_CTRL_EnableGrp1A | GICD_CTRL_ARE_NS;
256 gicd_write_4(sc, GICD_CTRL, gicd_ctrl); 256 gicd_write_4(sc, GICD_CTRL, gicd_ctrl);
257} 257}
258 258
259static void 259static void
260gicv3_redist_enable(struct gicv3_softc *sc, struct cpu_info *ci) 260gicv3_redist_enable(struct gicv3_softc *sc, struct cpu_info *ci)
261{ 261{
262 uint32_t icfg; 262 uint32_t icfg;
263 u_int n, o; 263 u_int n, o;
264 264
265 /* Clear INTID enable bits */ 265 /* Clear INTID enable bits */
266 gicr_write_4(sc, ci->ci_gic_redist, GICR_ICENABLER0, ~0); 266 gicr_write_4(sc, ci->ci_gic_redist, GICR_ICENABLER0, ~0);
267 267
268 /* Wait for register write to complete */ 268 /* Wait for register write to complete */
269 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP) 269 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP)
270 ; 270 ;
271 271
272 /* Set default priorities */ 272 /* Set default priorities */
273 for (n = 0; n < 32; n += 4) { 273 for (n = 0; n < 32; n += 4) {
274 uint32_t priority = 0; 274 uint32_t priority = 0;
275 size_t byte_shift = 0; 275 size_t byte_shift = 0;
276 for (o = 0; o < 4; o++, byte_shift += 8) { 276 for (o = 0; o < 4; o++, byte_shift += 8) {
277 struct intrsource * const is = sc->sc_pic.pic_sources[n + o]; 277 struct intrsource * const is = sc->sc_pic.pic_sources[n + o];
278 if (is == NULL) 278 if (is == NULL)
279 priority |= (0xffU << byte_shift); 279 priority |= (0xffU << byte_shift);
280 else { 280 else {
281 const u_int ipriority_val = IPL_TO_PRIORITY(sc, is->is_ipl); 281 const u_int ipriority_val = IPL_TO_PRIORITY(sc, is->is_ipl);
282 priority |= ipriority_val << byte_shift; 282 priority |= ipriority_val << byte_shift;
283 } 283 }
284 } 284 }
285 gicr_write_4(sc, ci->ci_gic_redist, GICR_IPRIORITYRn(n / 4), priority); 285 gicr_write_4(sc, ci->ci_gic_redist, GICR_IPRIORITYRn(n / 4), priority);
286 } 286 }
287 287
288 /* Set all interrupts to G1NS */ 288 /* Set all interrupts to G1NS */
289 gicr_write_4(sc, ci->ci_gic_redist, GICR_IGROUPR0, ~0); 289 gicr_write_4(sc, ci->ci_gic_redist, GICR_IGROUPR0, ~0);
290 gicr_write_4(sc, ci->ci_gic_redist, GICR_IGRPMODR0, 0); 290 gicr_write_4(sc, ci->ci_gic_redist, GICR_IGRPMODR0, 0);
291 291
292 /* Restore PPI configs */ 292 /* Restore PPI configs */
293 for (n = 0, icfg = 0; n < 16; n++) { 293 for (n = 0, icfg = 0; n < 16; n++) {
294 struct intrsource * const is = sc->sc_pic.pic_sources[16 + n]; 294 struct intrsource * const is = sc->sc_pic.pic_sources[16 + n];
295 if (is != NULL && is->is_type == IST_EDGE) 295 if (is != NULL && is->is_type == IST_EDGE)
296 icfg |= (0x2 << (n * 2)); 296 icfg |= (0x2 << (n * 2));
297 } 297 }
298 gicr_write_4(sc, ci->ci_gic_redist, GICR_ICFGRn(1), icfg); 298 gicr_write_4(sc, ci->ci_gic_redist, GICR_ICFGRn(1), icfg);
299 299
300 /* Restore current enable bits */ 300 /* Restore current enable bits */
301 gicr_write_4(sc, ci->ci_gic_redist, GICR_ISENABLER0, sc->sc_enabled_sgippi); 301 gicr_write_4(sc, ci->ci_gic_redist, GICR_ISENABLER0, sc->sc_enabled_sgippi);
302 302
303 /* Wait for register write to complete */ 303 /* Wait for register write to complete */
304 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP) 304 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR) & GICR_CTLR_RWP)
305 ; 305 ;
306} 306}
307 307
308static uint64_t 308static uint64_t
309gicv3_cpu_identity(void) 309gicv3_cpu_identity(void)
310{ 310{
311 u_int aff3, aff2, aff1, aff0; 311 u_int aff3, aff2, aff1, aff0;
312 312
313 const register_t mpidr = cpu_mpidr_aff_read(); 313 const register_t mpidr = cpu_mpidr_aff_read();
314 aff0 = __SHIFTOUT(mpidr, MPIDR_AFF0); 314 aff0 = __SHIFTOUT(mpidr, MPIDR_AFF0);
315 aff1 = __SHIFTOUT(mpidr, MPIDR_AFF1); 315 aff1 = __SHIFTOUT(mpidr, MPIDR_AFF1);
316 aff2 = __SHIFTOUT(mpidr, MPIDR_AFF2); 316 aff2 = __SHIFTOUT(mpidr, MPIDR_AFF2);
317 aff3 = __SHIFTOUT(mpidr, MPIDR_AFF3); 317 aff3 = __SHIFTOUT(mpidr, MPIDR_AFF3);
318 318
319 return __SHIFTIN(aff0, GICR_TYPER_Affinity_Value_Aff0) | 319 return __SHIFTIN(aff0, GICR_TYPER_Affinity_Value_Aff0) |
320 __SHIFTIN(aff1, GICR_TYPER_Affinity_Value_Aff1) | 320 __SHIFTIN(aff1, GICR_TYPER_Affinity_Value_Aff1) |
321 __SHIFTIN(aff2, GICR_TYPER_Affinity_Value_Aff2) | 321 __SHIFTIN(aff2, GICR_TYPER_Affinity_Value_Aff2) |
322 __SHIFTIN(aff3, GICR_TYPER_Affinity_Value_Aff3); 322 __SHIFTIN(aff3, GICR_TYPER_Affinity_Value_Aff3);
323} 323}
324 324
325static u_int 325static u_int
326gicv3_find_redist(struct gicv3_softc *sc) 326gicv3_find_redist(struct gicv3_softc *sc)
327{ 327{
328 uint64_t gicr_typer; 328 uint64_t gicr_typer;
329 u_int n; 329 u_int n;
330 330
331 const uint64_t cpu_identity = gicv3_cpu_identity(); 331 const uint64_t cpu_identity = gicv3_cpu_identity();
332 332
333 for (n = 0; n < sc->sc_bsh_r_count; n++) { 333 for (n = 0; n < sc->sc_bsh_r_count; n++) {
334 gicr_typer = gicr_read_8(sc, n, GICR_TYPER); 334 gicr_typer = gicr_read_8(sc, n, GICR_TYPER);
335 if ((gicr_typer & GICR_TYPER_Affinity_Value) == cpu_identity) 335 if ((gicr_typer & GICR_TYPER_Affinity_Value) == cpu_identity)
336 return n; 336 return n;
337 } 337 }
338 338
339 const u_int aff0 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff0); 339 const u_int aff0 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff0);
340 const u_int aff1 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff1); 340 const u_int aff1 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff1);
341 const u_int aff2 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff2); 341 const u_int aff2 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff2);
342 const u_int aff3 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff3); 342 const u_int aff3 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff3);
343 343
344 panic("%s: could not find GICv3 redistributor for cpu %d.%d.%d.%d", 344 panic("%s: could not find GICv3 redistributor for cpu %d.%d.%d.%d",
345 cpu_name(curcpu()), aff3, aff2, aff1, aff0); 345 cpu_name(curcpu()), aff3, aff2, aff1, aff0);
346} 346}
347 347
348static uint64_t 348static uint64_t
349gicv3_sgir(struct gicv3_softc *sc) 349gicv3_sgir(struct gicv3_softc *sc)
350{ 350{
351 const uint64_t cpu_identity = gicv3_cpu_identity(); 351 const uint64_t cpu_identity = gicv3_cpu_identity();
352 352
353 const u_int aff0 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff0); 353 const u_int aff0 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff0);
354 const u_int aff1 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff1); 354 const u_int aff1 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff1);
355 const u_int aff2 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff2); 355 const u_int aff2 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff2);
356 const u_int aff3 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff3); 356 const u_int aff3 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff3);
357 357
358 return __SHIFTIN(__BIT(aff0), ICC_SGIR_EL1_TargetList) | 358 return __SHIFTIN(__BIT(aff0), ICC_SGIR_EL1_TargetList) |
359 __SHIFTIN(aff1, ICC_SGIR_EL1_Aff1) | 359 __SHIFTIN(aff1, ICC_SGIR_EL1_Aff1) |
360 __SHIFTIN(aff2, ICC_SGIR_EL1_Aff2) | 360 __SHIFTIN(aff2, ICC_SGIR_EL1_Aff2) |
361 __SHIFTIN(aff3, ICC_SGIR_EL1_Aff3); 361 __SHIFTIN(aff3, ICC_SGIR_EL1_Aff3);
362} 362}
363 363
364static void 364static void
365gicv3_cpu_init(struct pic_softc *pic, struct cpu_info *ci) 365gicv3_cpu_init(struct pic_softc *pic, struct cpu_info *ci)
366{ 366{
367 struct gicv3_softc * const sc = PICTOSOFTC(pic); 367 struct gicv3_softc * const sc = PICTOSOFTC(pic);
368 uint32_t icc_sre, icc_ctlr, gicr_waker; 368 uint32_t icc_sre, icc_ctlr, gicr_waker;
369 369
370 ci->ci_gic_redist = gicv3_find_redist(sc); 370 ci->ci_gic_redist = gicv3_find_redist(sc);
371 ci->ci_gic_sgir = gicv3_sgir(sc); 371 ci->ci_gic_sgir = gicv3_sgir(sc);
372 372
373 /* Store route to CPU for SPIs */ 373 /* Store route to CPU for SPIs */
374 const uint64_t cpu_identity = gicv3_cpu_identity(); 374 const uint64_t cpu_identity = gicv3_cpu_identity();
375 const u_int aff0 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff0); 375 const u_int aff0 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff0);
376 const u_int aff1 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff1); 376 const u_int aff1 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff1);
377 const u_int aff2 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff2); 377 const u_int aff2 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff2);
378 const u_int aff3 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff3); 378 const u_int aff3 = __SHIFTOUT(cpu_identity, GICR_TYPER_Affinity_Value_Aff3);
379 sc->sc_irouter[cpu_index(ci)] = 379 sc->sc_irouter[cpu_index(ci)] =
380 __SHIFTIN(aff0, GICD_IROUTER_Aff0) | 380 __SHIFTIN(aff0, GICD_IROUTER_Aff0) |
381 __SHIFTIN(aff1, GICD_IROUTER_Aff1) | 381 __SHIFTIN(aff1, GICD_IROUTER_Aff1) |
382 __SHIFTIN(aff2, GICD_IROUTER_Aff2) | 382 __SHIFTIN(aff2, GICD_IROUTER_Aff2) |
383 __SHIFTIN(aff3, GICD_IROUTER_Aff3); 383 __SHIFTIN(aff3, GICD_IROUTER_Aff3);
384 384
385 /* Enable System register access and disable IRQ/FIQ bypass */ 385 /* Enable System register access and disable IRQ/FIQ bypass */
386 icc_sre = ICC_SRE_EL1_SRE | ICC_SRE_EL1_DFB | ICC_SRE_EL1_DIB; 386 icc_sre = ICC_SRE_EL1_SRE | ICC_SRE_EL1_DFB | ICC_SRE_EL1_DIB;
387 icc_sre_write(icc_sre); 387 icc_sre_write(icc_sre);
388 388
389 /* Mark the connected PE as being awake */ 389 /* Mark the connected PE as being awake */
390 gicr_waker = gicr_read_4(sc, ci->ci_gic_redist, GICR_WAKER); 390 gicr_waker = gicr_read_4(sc, ci->ci_gic_redist, GICR_WAKER);
391 gicr_waker &= ~GICR_WAKER_ProcessorSleep; 391 gicr_waker &= ~GICR_WAKER_ProcessorSleep;
392 gicr_write_4(sc, ci->ci_gic_redist, GICR_WAKER, gicr_waker); 392 gicr_write_4(sc, ci->ci_gic_redist, GICR_WAKER, gicr_waker);
393 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_WAKER) & GICR_WAKER_ChildrenAsleep) 393 while (gicr_read_4(sc, ci->ci_gic_redist, GICR_WAKER) & GICR_WAKER_ChildrenAsleep)
394 ; 394 ;
395 395
396 /* Set initial priority mask */ 396 /* Set initial priority mask */
397 gicv3_set_priority(pic, IPL_HIGH); 397 gicv3_set_priority(pic, IPL_HIGH);
398 398
399 /* Set the binary point field to the minimum value */ 399 /* Set the binary point field to the minimum value */
400 icc_bpr1_write(0); 400 icc_bpr1_write(0);
401 401
402 /* Enable group 1 interrupt signaling */ 402 /* Enable group 1 interrupt signaling */
403 icc_igrpen1_write(ICC_IGRPEN_EL1_Enable); 403 icc_igrpen1_write(ICC_IGRPEN_EL1_Enable);
404 404
405 /* Set EOI mode */ 405 /* Set EOI mode */
406 icc_ctlr = icc_ctlr_read(); 406 icc_ctlr = icc_ctlr_read();
407 icc_ctlr &= ~ICC_CTLR_EL1_EOImode; 407 icc_ctlr &= ~ICC_CTLR_EL1_EOImode;
408 icc_ctlr_write(icc_ctlr); 408 icc_ctlr_write(icc_ctlr);
409 409
410 /* Enable redistributor */ 410 /* Enable redistributor */
411 gicv3_redist_enable(sc, ci); 411 gicv3_redist_enable(sc, ci);
412 412
413 /* Allow IRQ exceptions */ 413 /* Allow IRQ exceptions */
414 cpsie(I32_bit); 414 cpsie(I32_bit);
415} 415}
416 416
417#ifdef MULTIPROCESSOR 417#ifdef MULTIPROCESSOR
418static void 418static void
419gicv3_ipi_send(struct pic_softc *pic, const kcpuset_t *kcp, u_long ipi) 419gicv3_ipi_send(struct pic_softc *pic, const kcpuset_t *kcp, u_long ipi)
420{ 420{
421 struct cpu_info *ci; 421 struct cpu_info *ci;
422 uint64_t sgir; 422 uint64_t sgir;
423 423
424 sgir = __SHIFTIN(ipi, ICC_SGIR_EL1_INTID); 424 sgir = __SHIFTIN(ipi, ICC_SGIR_EL1_INTID);
425 if (kcp == NULL) { 425 if (kcp == NULL) {
426 /* Interrupts routed to all PEs, excluding "self" */ 426 /* Interrupts routed to all PEs, excluding "self" */
427 if (ncpu == 1) 427 if (ncpu == 1)
428 return; 428 return;
429 sgir |= ICC_SGIR_EL1_IRM; 429 sgir |= ICC_SGIR_EL1_IRM;
430 } else { 430 } else {
431 /* Interrupt to exactly one PE */ 431 /* Interrupt to exactly one PE */
432 ci = cpu_lookup(kcpuset_ffs(kcp) - 1); 432 ci = cpu_lookup(kcpuset_ffs(kcp) - 1);
433 if (ci == curcpu()) 433 if (ci == curcpu())
434 return; 434 return;
435 sgir |= ci->ci_gic_sgir; 435 sgir |= ci->ci_gic_sgir;
436 } 436 }
437 icc_sgi1r_write(sgir); 437 icc_sgi1r_write(sgir);
 438 isb();
438} 439}
439 440
440static void 441static void
441gicv3_get_affinity(struct pic_softc *pic, size_t irq, kcpuset_t *affinity) 442gicv3_get_affinity(struct pic_softc *pic, size_t irq, kcpuset_t *affinity)
442{ 443{
443 struct gicv3_softc * const sc = PICTOSOFTC(pic); 444 struct gicv3_softc * const sc = PICTOSOFTC(pic);
444 const size_t group = irq / 32; 445 const size_t group = irq / 32;
445 int n; 446 int n;
446 447
447 kcpuset_zero(affinity); 448 kcpuset_zero(affinity);
448 if (group == 0) { 449 if (group == 0) {
449 /* All CPUs are targets for group 0 (SGI/PPI) */ 450 /* All CPUs are targets for group 0 (SGI/PPI) */
450 for (n = 0; n < ncpu; n++) { 451 for (n = 0; n < ncpu; n++) {
451 if (sc->sc_irouter[n] != UINT64_MAX) 452 if (sc->sc_irouter[n] != UINT64_MAX)
452 kcpuset_set(affinity, n); 453 kcpuset_set(affinity, n);
453 } 454 }
454 } else { 455 } else {
455 /* Find distributor targets (SPI) */ 456 /* Find distributor targets (SPI) */
456 const uint64_t irouter = gicd_read_8(sc, GICD_IROUTER(irq)); 457 const uint64_t irouter = gicd_read_8(sc, GICD_IROUTER(irq));
457 for (n = 0; n < ncpu; n++) { 458 for (n = 0; n < ncpu; n++) {
458 if (irouter == GICD_IROUTER_Interrupt_Routing_mode || 459 if (irouter == GICD_IROUTER_Interrupt_Routing_mode ||
459 irouter == sc->sc_irouter[n]) 460 irouter == sc->sc_irouter[n])
460 kcpuset_set(affinity, n); 461 kcpuset_set(affinity, n);
461 } 462 }
462 } 463 }
463} 464}
464 465
465static int 466static int
466gicv3_set_affinity(struct pic_softc *pic, size_t irq, const kcpuset_t *affinity) 467gicv3_set_affinity(struct pic_softc *pic, size_t irq, const kcpuset_t *affinity)
467{ 468{
468 struct gicv3_softc * const sc = PICTOSOFTC(pic); 469 struct gicv3_softc * const sc = PICTOSOFTC(pic);
469 const size_t group = irq / 32; 470 const size_t group = irq / 32;
470 uint64_t irouter; 471 uint64_t irouter;
471 472
472 if (group == 0) 473 if (group == 0)
473 return EINVAL; 474 return EINVAL;
474 475
475 const int set = kcpuset_countset(affinity); 476 const int set = kcpuset_countset(affinity);
476 if (set == ncpu) 477 if (set == ncpu)
477 irouter = GICD_IROUTER_Interrupt_Routing_mode; 478 irouter = GICD_IROUTER_Interrupt_Routing_mode;
478 else if (set == 1) 479 else if (set == 1)
479 irouter = sc->sc_irouter[kcpuset_ffs(affinity) - 1]; 480 irouter = sc->sc_irouter[kcpuset_ffs(affinity) - 1];
480 else 481 else
481 return EINVAL; 482 return EINVAL;
482 483
483 gicd_write_8(sc, GICD_IROUTER(irq), irouter); 484 gicd_write_8(sc, GICD_IROUTER(irq), irouter);
484 485
485 return 0; 486 return 0;
486} 487}
487#endif 488#endif
488 489
489static const struct pic_ops gicv3_picops = { 490static const struct pic_ops gicv3_picops = {
490 .pic_unblock_irqs = gicv3_unblock_irqs, 491 .pic_unblock_irqs = gicv3_unblock_irqs,
491 .pic_block_irqs = gicv3_block_irqs, 492 .pic_block_irqs = gicv3_block_irqs,
492 .pic_establish_irq = gicv3_establish_irq, 493 .pic_establish_irq = gicv3_establish_irq,
493 .pic_set_priority = gicv3_set_priority, 494 .pic_set_priority = gicv3_set_priority,
494#ifdef MULTIPROCESSOR 495#ifdef MULTIPROCESSOR
495 .pic_cpu_init = gicv3_cpu_init, 496 .pic_cpu_init = gicv3_cpu_init,
496 .pic_ipi_send = gicv3_ipi_send, 497 .pic_ipi_send = gicv3_ipi_send,
497 .pic_get_affinity = gicv3_get_affinity, 498 .pic_get_affinity = gicv3_get_affinity,
498 .pic_set_affinity = gicv3_set_affinity, 499 .pic_set_affinity = gicv3_set_affinity,
499#endif 500#endif
500}; 501};
501 502
502static void 503static void
503gicv3_lpi_unblock_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask) 504gicv3_lpi_unblock_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask)
504{ 505{
505 struct gicv3_softc * const sc = LPITOSOFTC(pic); 506 struct gicv3_softc * const sc = LPITOSOFTC(pic);
506 int bit; 507 int bit;
507 508
508 while ((bit = ffs(mask)) != 0) { 509 while ((bit = ffs(mask)) != 0) {
509 sc->sc_lpiconf.base[irqbase + bit - 1] |= GIC_LPICONF_Enable; 510 sc->sc_lpiconf.base[irqbase + bit - 1] |= GIC_LPICONF_Enable;
510 if (sc->sc_lpiconf_flush) 511 if (sc->sc_lpiconf_flush)
511 cpu_dcache_wb_range((vaddr_t)&sc->sc_lpiconf.base[irqbase + bit - 1], 1); 512 cpu_dcache_wb_range((vaddr_t)&sc->sc_lpiconf.base[irqbase + bit - 1], 1);
512 mask &= ~__BIT(bit - 1); 513 mask &= ~__BIT(bit - 1);
513 } 514 }
514 515
515 if (!sc->sc_lpiconf_flush) 516 if (!sc->sc_lpiconf_flush)
516 dsb(ishst); 517 dsb(ishst);
517} 518}
518 519
519static void 520static void
520gicv3_lpi_block_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask) 521gicv3_lpi_block_irqs(struct pic_softc *pic, size_t irqbase, uint32_t mask)
521{ 522{
522 struct gicv3_softc * const sc = LPITOSOFTC(pic); 523 struct gicv3_softc * const sc = LPITOSOFTC(pic);
523 int bit; 524 int bit;
524 525
525 while ((bit = ffs(mask)) != 0) { 526 while ((bit = ffs(mask)) != 0) {
526 sc->sc_lpiconf.base[irqbase + bit - 1] &= ~GIC_LPICONF_Enable; 527 sc->sc_lpiconf.base[irqbase + bit - 1] &= ~GIC_LPICONF_Enable;
527 if (sc->sc_lpiconf_flush) 528 if (sc->sc_lpiconf_flush)
528 cpu_dcache_wb_range((vaddr_t)&sc->sc_lpiconf.base[irqbase + bit - 1], 1); 529 cpu_dcache_wb_range((vaddr_t)&sc->sc_lpiconf.base[irqbase + bit - 1], 1);
529 mask &= ~__BIT(bit - 1); 530 mask &= ~__BIT(bit - 1);
530 } 531 }
531 532
532 if (!sc->sc_lpiconf_flush) 533 if (!sc->sc_lpiconf_flush)
533 dsb(ishst); 534 dsb(ishst);
534} 535}
535 536
536static void 537static void
537gicv3_lpi_establish_irq(struct pic_softc *pic, struct intrsource *is) 538gicv3_lpi_establish_irq(struct pic_softc *pic, struct intrsource *is)
538{ 539{
539 struct gicv3_softc * const sc = LPITOSOFTC(pic); 540 struct gicv3_softc * const sc = LPITOSOFTC(pic);
540 541
541 sc->sc_lpiconf.base[is->is_irq] = IPL_TO_LPIPRIO(sc, is->is_ipl) | GIC_LPICONF_Res1; 542 sc->sc_lpiconf.base[is->is_irq] = IPL_TO_LPIPRIO(sc, is->is_ipl) | GIC_LPICONF_Res1;
542 543
543 if (sc->sc_lpiconf_flush) 544 if (sc->sc_lpiconf_flush)
544 cpu_dcache_wb_range((vaddr_t)&sc->sc_lpiconf.base[is->is_irq], 1); 545 cpu_dcache_wb_range((vaddr_t)&sc->sc_lpiconf.base[is->is_irq], 1);
545 else 546 else
546 dsb(ishst); 547 dsb(ishst);
547} 548}
548 549
549static void 550static void
550gicv3_lpi_cpu_init(struct pic_softc *pic, struct cpu_info *ci) 551gicv3_lpi_cpu_init(struct pic_softc *pic, struct cpu_info *ci)
551{ 552{
552 struct gicv3_softc * const sc = LPITOSOFTC(pic); 553 struct gicv3_softc * const sc = LPITOSOFTC(pic);
553 struct gicv3_lpi_callback *cb; 554 struct gicv3_lpi_callback *cb;
554 uint64_t propbase, pendbase; 555 uint64_t propbase, pendbase;
555 uint32_t ctlr; 556 uint32_t ctlr;
556 557
557 /* If physical LPIs are not supported on this redistributor, just return. */ 558 /* If physical LPIs are not supported on this redistributor, just return. */
558 const uint64_t typer = gicr_read_8(sc, ci->ci_gic_redist, GICR_TYPER); 559 const uint64_t typer = gicr_read_8(sc, ci->ci_gic_redist, GICR_TYPER);
559 if ((typer & GICR_TYPER_PLPIS) == 0) 560 if ((typer & GICR_TYPER_PLPIS) == 0)
560 return; 561 return;
561 562
562 /* Interrupt target address for this CPU, used by ITS when GITS_TYPER.PTA == 0 */ 563 /* Interrupt target address for this CPU, used by ITS when GITS_TYPER.PTA == 0 */
563 sc->sc_processor_id[cpu_index(ci)] = __SHIFTOUT(typer, GICR_TYPER_Processor_Number); 564 sc->sc_processor_id[cpu_index(ci)] = __SHIFTOUT(typer, GICR_TYPER_Processor_Number);
564 565
565 /* Disable LPIs before making changes */ 566 /* Disable LPIs before making changes */
566 ctlr = gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR); 567 ctlr = gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR);
567 ctlr &= ~GICR_CTLR_Enable_LPIs; 568 ctlr &= ~GICR_CTLR_Enable_LPIs;
568 gicr_write_4(sc, ci->ci_gic_redist, GICR_CTLR, ctlr); 569 gicr_write_4(sc, ci->ci_gic_redist, GICR_CTLR, ctlr);
569 dsb(sy); 570 dsb(sy);
570 571
571 /* Setup the LPI configuration table */ 572 /* Setup the LPI configuration table */
572 propbase = sc->sc_lpiconf.segs[0].ds_addr | 573 propbase = sc->sc_lpiconf.segs[0].ds_addr |
573 __SHIFTIN(ffs(pic->pic_maxsources) - 1, GICR_PROPBASER_IDbits) | 574 __SHIFTIN(ffs(pic->pic_maxsources) - 1, GICR_PROPBASER_IDbits) |
574 __SHIFTIN(GICR_Shareability_IS, GICR_PROPBASER_Shareability) | 575 __SHIFTIN(GICR_Shareability_IS, GICR_PROPBASER_Shareability) |
575 __SHIFTIN(GICR_Cache_NORMAL_RA_WA_WB, GICR_PROPBASER_InnerCache); 576 __SHIFTIN(GICR_Cache_NORMAL_RA_WA_WB, GICR_PROPBASER_InnerCache);
576 gicr_write_8(sc, ci->ci_gic_redist, GICR_PROPBASER, propbase); 577 gicr_write_8(sc, ci->ci_gic_redist, GICR_PROPBASER, propbase);
577 propbase = gicr_read_8(sc, ci->ci_gic_redist, GICR_PROPBASER); 578 propbase = gicr_read_8(sc, ci->ci_gic_redist, GICR_PROPBASER);
578 if (__SHIFTOUT(propbase, GICR_PROPBASER_Shareability) != GICR_Shareability_IS) { 579 if (__SHIFTOUT(propbase, GICR_PROPBASER_Shareability) != GICR_Shareability_IS) {
579 if (__SHIFTOUT(propbase, GICR_PROPBASER_Shareability) == GICR_Shareability_NS) { 580 if (__SHIFTOUT(propbase, GICR_PROPBASER_Shareability) == GICR_Shareability_NS) {
580 propbase &= ~GICR_PROPBASER_Shareability; 581 propbase &= ~GICR_PROPBASER_Shareability;
581 propbase |= __SHIFTIN(GICR_Shareability_NS, GICR_PROPBASER_Shareability); 582 propbase |= __SHIFTIN(GICR_Shareability_NS, GICR_PROPBASER_Shareability);
582 propbase &= ~GICR_PROPBASER_InnerCache; 583 propbase &= ~GICR_PROPBASER_InnerCache;
583 propbase |= __SHIFTIN(GICR_Cache_NORMAL_NC, GICR_PROPBASER_InnerCache); 584 propbase |= __SHIFTIN(GICR_Cache_NORMAL_NC, GICR_PROPBASER_InnerCache);
584 gicr_write_8(sc, ci->ci_gic_redist, GICR_PROPBASER, propbase); 585 gicr_write_8(sc, ci->ci_gic_redist, GICR_PROPBASER, propbase);
585 } 586 }
586 sc->sc_lpiconf_flush = true; 587 sc->sc_lpiconf_flush = true;
587 } 588 }
588 589
589 /* Setup the LPI pending table */ 590 /* Setup the LPI pending table */
590 pendbase = sc->sc_lpipend[cpu_index(ci)].segs[0].ds_addr | 591 pendbase = sc->sc_lpipend[cpu_index(ci)].segs[0].ds_addr |
591 __SHIFTIN(GICR_Shareability_IS, GICR_PENDBASER_Shareability) | 592 __SHIFTIN(GICR_Shareability_IS, GICR_PENDBASER_Shareability) |
592 __SHIFTIN(GICR_Cache_NORMAL_RA_WA_WB, GICR_PENDBASER_InnerCache); 593 __SHIFTIN(GICR_Cache_NORMAL_RA_WA_WB, GICR_PENDBASER_InnerCache);
593 gicr_write_8(sc, ci->ci_gic_redist, GICR_PENDBASER, pendbase); 594 gicr_write_8(sc, ci->ci_gic_redist, GICR_PENDBASER, pendbase);
594 pendbase = gicr_read_8(sc, ci->ci_gic_redist, GICR_PENDBASER); 595 pendbase = gicr_read_8(sc, ci->ci_gic_redist, GICR_PENDBASER);
595 if (__SHIFTOUT(pendbase, GICR_PENDBASER_Shareability) == GICR_Shareability_NS) { 596 if (__SHIFTOUT(pendbase, GICR_PENDBASER_Shareability) == GICR_Shareability_NS) {
596 pendbase &= ~GICR_PENDBASER_Shareability; 597 pendbase &= ~GICR_PENDBASER_Shareability;
597 pendbase |= __SHIFTIN(GICR_Shareability_NS, GICR_PENDBASER_Shareability); 598 pendbase |= __SHIFTIN(GICR_Shareability_NS, GICR_PENDBASER_Shareability);
598 pendbase &= ~GICR_PENDBASER_InnerCache; 599 pendbase &= ~GICR_PENDBASER_InnerCache;
599 pendbase |= __SHIFTIN(GICR_Cache_NORMAL_NC, GICR_PENDBASER_InnerCache); 600 pendbase |= __SHIFTIN(GICR_Cache_NORMAL_NC, GICR_PENDBASER_InnerCache);
600 gicr_write_8(sc, ci->ci_gic_redist, GICR_PENDBASER, pendbase); 601 gicr_write_8(sc, ci->ci_gic_redist, GICR_PENDBASER, pendbase);
601 } 602 }
602 603
603 /* Enable LPIs */ 604 /* Enable LPIs */
604 ctlr = gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR); 605 ctlr = gicr_read_4(sc, ci->ci_gic_redist, GICR_CTLR);
605 ctlr |= GICR_CTLR_Enable_LPIs; 606 ctlr |= GICR_CTLR_Enable_LPIs;
606 gicr_write_4(sc, ci->ci_gic_redist, GICR_CTLR, ctlr); 607 gicr_write_4(sc, ci->ci_gic_redist, GICR_CTLR, ctlr);
607 dsb(sy); 608 dsb(sy);
608 609
609 /* Setup ITS if present */ 610 /* Setup ITS if present */
610 LIST_FOREACH(cb, &sc->sc_lpi_callbacks, list) 611 LIST_FOREACH(cb, &sc->sc_lpi_callbacks, list)
611 cb->cpu_init(cb->priv, ci); 612 cb->cpu_init(cb->priv, ci);
612} 613}
613 614
614#ifdef MULTIPROCESSOR 615#ifdef MULTIPROCESSOR
615static void 616static void
616gicv3_lpi_get_affinity(struct pic_softc *pic, size_t irq, kcpuset_t *affinity) 617gicv3_lpi_get_affinity(struct pic_softc *pic, size_t irq, kcpuset_t *affinity)
617{ 618{
618 struct gicv3_softc * const sc = LPITOSOFTC(pic); 619 struct gicv3_softc * const sc = LPITOSOFTC(pic);
619 struct gicv3_lpi_callback *cb; 620 struct gicv3_lpi_callback *cb;
620 621
621 kcpuset_zero(affinity); 622 kcpuset_zero(affinity);
622 LIST_FOREACH(cb, &sc->sc_lpi_callbacks, list) 623 LIST_FOREACH(cb, &sc->sc_lpi_callbacks, list)
623 cb->get_affinity(cb->priv, irq, affinity); 624 cb->get_affinity(cb->priv, irq, affinity);
624} 625}
625 626
626static int 627static int
627gicv3_lpi_set_affinity(struct pic_softc *pic, size_t irq, const kcpuset_t *affinity) 628gicv3_lpi_set_affinity(struct pic_softc *pic, size_t irq, const kcpuset_t *affinity)
628{ 629{
629 struct gicv3_softc * const sc = LPITOSOFTC(pic); 630 struct gicv3_softc * const sc = LPITOSOFTC(pic);
630 struct gicv3_lpi_callback *cb; 631 struct gicv3_lpi_callback *cb;
631 int error = EINVAL; 632 int error = EINVAL;
632 633
633 LIST_FOREACH(cb, &sc->sc_lpi_callbacks, list) { 634 LIST_FOREACH(cb, &sc->sc_lpi_callbacks, list) {
634 error = cb->set_affinity(cb->priv, irq, affinity); 635 error = cb->set_affinity(cb->priv, irq, affinity);
635 if (error != EPASSTHROUGH) 636 if (error != EPASSTHROUGH)
636 return error; 637 return error;
637 } 638 }
638 639
639 return EINVAL; 640 return EINVAL;
640} 641}
641#endif 642#endif
642 643
643static const struct pic_ops gicv3_lpiops = { 644static const struct pic_ops gicv3_lpiops = {
644 .pic_unblock_irqs = gicv3_lpi_unblock_irqs, 645 .pic_unblock_irqs = gicv3_lpi_unblock_irqs,
645 .pic_block_irqs = gicv3_lpi_block_irqs, 646 .pic_block_irqs = gicv3_lpi_block_irqs,
646 .pic_establish_irq = gicv3_lpi_establish_irq, 647 .pic_establish_irq = gicv3_lpi_establish_irq,
647#ifdef MULTIPROCESSOR 648#ifdef MULTIPROCESSOR
648 .pic_cpu_init = gicv3_lpi_cpu_init, 649 .pic_cpu_init = gicv3_lpi_cpu_init,
649 .pic_get_affinity = gicv3_lpi_get_affinity, 650 .pic_get_affinity = gicv3_lpi_get_affinity,
650 .pic_set_affinity = gicv3_lpi_set_affinity, 651 .pic_set_affinity = gicv3_lpi_set_affinity,
651#endif 652#endif
652}; 653};
653 654
654void 655void
655gicv3_dma_alloc(struct gicv3_softc *sc, struct gicv3_dma *dma, bus_size_t len, bus_size_t align) 656gicv3_dma_alloc(struct gicv3_softc *sc, struct gicv3_dma *dma, bus_size_t len, bus_size_t align)
656{ 657{
657 int nsegs, error; 658 int nsegs, error;
658 659
659 dma->len = len; 660 dma->len = len;
660 error = bus_dmamem_alloc(sc->sc_dmat, dma->len, align, 0, dma->segs, 1, &nsegs, BUS_DMA_WAITOK); 661 error = bus_dmamem_alloc(sc->sc_dmat, dma->len, align, 0, dma->segs, 1, &nsegs, BUS_DMA_WAITOK);
661 if (error) 662 if (error)
662 panic("bus_dmamem_alloc failed: %d", error); 663 panic("bus_dmamem_alloc failed: %d", error);
663 error = bus_dmamem_map(sc->sc_dmat, dma->segs, nsegs, len, (void **)&dma->base, BUS_DMA_WAITOK); 664 error = bus_dmamem_map(sc->sc_dmat, dma->segs, nsegs, len, (void **)&dma->base, BUS_DMA_WAITOK);
664 if (error) 665 if (error)
665 panic("bus_dmamem_map failed: %d", error); 666 panic("bus_dmamem_map failed: %d", error);
666 error = bus_dmamap_create(sc->sc_dmat, len, 1, len, 0, BUS_DMA_WAITOK, &dma->map); 667 error = bus_dmamap_create(sc->sc_dmat, len, 1, len, 0, BUS_DMA_WAITOK, &dma->map);
667 if (error) 668 if (error)
668 panic("bus_dmamap_create failed: %d", error); 669 panic("bus_dmamap_create failed: %d", error);
669 error = bus_dmamap_load(sc->sc_dmat, dma->map, dma->base, dma->len, NULL, BUS_DMA_WAITOK); 670 error = bus_dmamap_load(sc->sc_dmat, dma->map, dma->base, dma->len, NULL, BUS_DMA_WAITOK);
670 if (error) 671 if (error)
671 panic("bus_dmamap_load failed: %d", error); 672 panic("bus_dmamap_load failed: %d", error);
672 673
673 memset(dma->base, 0, dma->len); 674 memset(dma->base, 0, dma->len);
674 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, dma->len, BUS_DMASYNC_PREWRITE); 675 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, dma->len, BUS_DMASYNC_PREWRITE);
675} 676}
676 677
677static void 678static void
678gicv3_lpi_init(struct gicv3_softc *sc) 679gicv3_lpi_init(struct gicv3_softc *sc)
679{ 680{
680 /* 681 /*
681 * Allocate LPI configuration table 682 * Allocate LPI configuration table
682 */ 683 */
683 gicv3_dma_alloc(sc, &sc->sc_lpiconf, sc->sc_lpi.pic_maxsources, 0x1000); 684 gicv3_dma_alloc(sc, &sc->sc_lpiconf, sc->sc_lpi.pic_maxsources, 0x1000);
684 KASSERT((sc->sc_lpiconf.segs[0].ds_addr & ~GICR_PROPBASER_Physical_Address) == 0); 685 KASSERT((sc->sc_lpiconf.segs[0].ds_addr & ~GICR_PROPBASER_Physical_Address) == 0);
685 686
686 /* 687 /*
687 * Allocate LPI pending tables 688 * Allocate LPI pending tables
688 */ 689 */
689 const bus_size_t lpipend_sz = (8192 + sc->sc_lpi.pic_maxsources) / NBBY; 690 const bus_size_t lpipend_sz = (8192 + sc->sc_lpi.pic_maxsources) / NBBY;
690 for (int cpuindex = 0; cpuindex < ncpu; cpuindex++) { 691 for (int cpuindex = 0; cpuindex < ncpu; cpuindex++) {
691 gicv3_dma_alloc(sc, &sc->sc_lpipend[cpuindex], lpipend_sz, 0x10000); 692 gicv3_dma_alloc(sc, &sc->sc_lpipend[cpuindex], lpipend_sz, 0x10000);
692 KASSERT((sc->sc_lpipend[cpuindex].segs[0].ds_addr & ~GICR_PENDBASER_Physical_Address) == 0); 693 KASSERT((sc->sc_lpipend[cpuindex].segs[0].ds_addr & ~GICR_PENDBASER_Physical_Address) == 0);
693 } 694 }
694} 695}
695 696
696void 697void
697gicv3_irq_handler(void *frame) 698gicv3_irq_handler(void *frame)
698{ 699{
699 struct cpu_info * const ci = curcpu(); 700 struct cpu_info * const ci = curcpu();
700 struct gicv3_softc * const sc = gicv3_softc; 701 struct gicv3_softc * const sc = gicv3_softc;
701 struct pic_softc *pic; 702 struct pic_softc *pic;
702 const int oldipl = ci->ci_cpl; 703 const int oldipl = ci->ci_cpl;
703 704
704 ci->ci_data.cpu_nintr++; 705 ci->ci_data.cpu_nintr++;
705 706
706 for (;;) { 707 for (;;) {
707 const uint32_t iar = icc_iar1_read(); 708 const uint32_t iar = icc_iar1_read();
708 dsb(sy); 709 dsb(sy);
709 const uint32_t irq = __SHIFTOUT(iar, ICC_IAR_INTID); 710 const uint32_t irq = __SHIFTOUT(iar, ICC_IAR_INTID);
710 if (irq == ICC_IAR_INTID_SPURIOUS) 711 if (irq == ICC_IAR_INTID_SPURIOUS)
711 break; 712 break;
712 713
713 pic = irq >= GIC_LPI_BASE ? &sc->sc_lpi : &sc->sc_pic; 714 pic = irq >= GIC_LPI_BASE ? &sc->sc_lpi : &sc->sc_pic;
714 if (irq - pic->pic_irqbase >= pic->pic_maxsources) 715 if (irq - pic->pic_irqbase >= pic->pic_maxsources)
715 continue; 716 continue;
716 717
717 struct intrsource * const is = pic->pic_sources[irq - pic->pic_irqbase]; 718 struct intrsource * const is = pic->pic_sources[irq - pic->pic_irqbase];
718 KASSERT(is != NULL); 719 KASSERT(is != NULL);
719 720
720 const bool early_eoi = irq < GIC_LPI_BASE && is->is_type == IST_EDGE; 721 const bool early_eoi = irq < GIC_LPI_BASE && is->is_type == IST_EDGE;
721 722
722 const int ipl = is->is_ipl; 723 const int ipl = is->is_ipl;
723 if (__predict_false(ipl < ci->ci_cpl)) { 724 if (__predict_false(ipl < ci->ci_cpl)) {
724 pic_do_pending_ints(I32_bit, ipl, frame); 725 pic_do_pending_ints(I32_bit, ipl, frame);
725 } else if (ci->ci_cpl != ipl) { 726 } else if (ci->ci_cpl != ipl) {
726 gicv3_set_priority(pic, ipl); 727 gicv3_set_priority(pic, ipl);
727 ci->ci_cpl = ipl; 728 ci->ci_cpl = ipl;
728 } 729 }
729 730
730 if (early_eoi) { 731 if (early_eoi) {
731 icc_eoi1r_write(iar); 732 icc_eoi1r_write(iar);
732 isb(); 733 isb();
733 } 734 }
734 735
735 cpsie(I32_bit); 736 cpsie(I32_bit);
736 pic_dispatch(is, frame); 737 pic_dispatch(is, frame);
737 cpsid(I32_bit); 738 cpsid(I32_bit);
738 739
739 if (!early_eoi) { 740 if (!early_eoi) {
740 icc_eoi1r_write(iar); 741 icc_eoi1r_write(iar);
741 isb(); 742 isb();
742 } 743 }
743 } 744 }
744 745
745 pic_do_pending_ints(I32_bit, oldipl, frame); 746 pic_do_pending_ints(I32_bit, oldipl, frame);
746} 747}
747 748
748static int 749static int
749gicv3_detect_pmr_bits(struct gicv3_softc *sc) 750gicv3_detect_pmr_bits(struct gicv3_softc *sc)
750{ 751{
751 const uint32_t opmr = icc_pmr_read(); 752 const uint32_t opmr = icc_pmr_read();
752 icc_pmr_write(0xbf); 753 icc_pmr_write(0xbf);
753 const uint32_t npmr = icc_pmr_read(); 754 const uint32_t npmr = icc_pmr_read();
754 icc_pmr_write(opmr); 755 icc_pmr_write(opmr);
755 756
756 return NBBY - (ffs(npmr) - 1); 757 return NBBY - (ffs(npmr) - 1);
757} 758}
758 759
759static int 760static int
760gicv3_detect_ipriority_bits(struct gicv3_softc *sc) 761gicv3_detect_ipriority_bits(struct gicv3_softc *sc)
761{ 762{
762 const uint32_t oipriorityr = gicd_read_4(sc, GICD_IPRIORITYRn(8)); 763 const uint32_t oipriorityr = gicd_read_4(sc, GICD_IPRIORITYRn(8));
763 gicd_write_4(sc, GICD_IPRIORITYRn(8), oipriorityr | 0xff); 764 gicd_write_4(sc, GICD_IPRIORITYRn(8), oipriorityr | 0xff);
764 const uint32_t nipriorityr = gicd_read_4(sc, GICD_IPRIORITYRn(8)); 765 const uint32_t nipriorityr = gicd_read_4(sc, GICD_IPRIORITYRn(8));
765 gicd_write_4(sc, GICD_IPRIORITYRn(8), oipriorityr); 766 gicd_write_4(sc, GICD_IPRIORITYRn(8), oipriorityr);
766 767
767 return NBBY - (ffs(nipriorityr & 0xff) - 1); 768 return NBBY - (ffs(nipriorityr & 0xff) - 1);
768} 769}
769 770
770int 771int
771gicv3_init(struct gicv3_softc *sc) 772gicv3_init(struct gicv3_softc *sc)
772{ 773{
773 const uint32_t gicd_typer = gicd_read_4(sc, GICD_TYPER); 774 const uint32_t gicd_typer = gicd_read_4(sc, GICD_TYPER);
774 const uint32_t gicd_ctrl = gicd_read_4(sc, GICD_CTRL); 775 const uint32_t gicd_ctrl = gicd_read_4(sc, GICD_CTRL);
775 int n; 776 int n;
776 777
777 KASSERT(CPU_IS_PRIMARY(curcpu())); 778 KASSERT(CPU_IS_PRIMARY(curcpu()));
778 779
779 LIST_INIT(&sc->sc_lpi_callbacks); 780 LIST_INIT(&sc->sc_lpi_callbacks);
780 781
781 for (n = 0; n < MAXCPUS; n++) 782 for (n = 0; n < MAXCPUS; n++)
782 sc->sc_irouter[n] = UINT64_MAX; 783 sc->sc_irouter[n] = UINT64_MAX;
783 784
784 sc->sc_priority_shift = 4; 785 sc->sc_priority_shift = 4;
785 sc->sc_pmr_shift = 4; 786 sc->sc_pmr_shift = 4;
786 787
787 if ((gicd_ctrl & GICD_CTRL_DS) == 0) { 788 if ((gicd_ctrl & GICD_CTRL_DS) == 0) {
788 const int pmr_bits = gicv3_detect_pmr_bits(sc); 789 const int pmr_bits = gicv3_detect_pmr_bits(sc);
789 const int ipriority_bits = gicv3_detect_ipriority_bits(sc); 790 const int ipriority_bits = gicv3_detect_ipriority_bits(sc);
790 791
791 if (ipriority_bits != pmr_bits) 792 if (ipriority_bits != pmr_bits)
792 --sc->sc_priority_shift; 793 --sc->sc_priority_shift;
793 794
794 aprint_verbose_dev(sc->sc_dev, "%d pmr bits, %d ipriority bits\n", 795 aprint_verbose_dev(sc->sc_dev, "%d pmr bits, %d ipriority bits\n",
795 pmr_bits, ipriority_bits); 796 pmr_bits, ipriority_bits);
796 } else { 797 } else {
797 aprint_verbose_dev(sc->sc_dev, "security disabled\n"); 798 aprint_verbose_dev(sc->sc_dev, "security disabled\n");
798 } 799 }
799 800
800 aprint_verbose_dev(sc->sc_dev, "priority shift %d, pmr shift %d\n", 801 aprint_verbose_dev(sc->sc_dev, "priority shift %d, pmr shift %d\n",
801 sc->sc_priority_shift, sc->sc_pmr_shift); 802 sc->sc_priority_shift, sc->sc_pmr_shift);
802 803
803 sc->sc_pic.pic_ops = &gicv3_picops; 804 sc->sc_pic.pic_ops = &gicv3_picops;
804 sc->sc_pic.pic_maxsources = GICD_TYPER_LINES(gicd_typer); 805 sc->sc_pic.pic_maxsources = GICD_TYPER_LINES(gicd_typer);
805 snprintf(sc->sc_pic.pic_name, sizeof(sc->sc_pic.pic_name), "gicv3"); 806 snprintf(sc->sc_pic.pic_name, sizeof(sc->sc_pic.pic_name), "gicv3");
806#ifdef MULTIPROCESSOR 807#ifdef MULTIPROCESSOR
807 sc->sc_pic.pic_cpus = kcpuset_running; 808 sc->sc_pic.pic_cpus = kcpuset_running;
808#endif 809#endif
809 pic_add(&sc->sc_pic, 0); 810 pic_add(&sc->sc_pic, 0);
810 811
811 if ((gicd_typer & GICD_TYPER_LPIS) != 0) { 812 if ((gicd_typer & GICD_TYPER_LPIS) != 0) {
812 sc->sc_lpi.pic_ops = &gicv3_lpiops; 813 sc->sc_lpi.pic_ops = &gicv3_lpiops;
813 sc->sc_lpi.pic_maxsources = 8192; /* Min. required by GICv3 spec */ 814 sc->sc_lpi.pic_maxsources = 8192; /* Min. required by GICv3 spec */
814 snprintf(sc->sc_lpi.pic_name, sizeof(sc->sc_lpi.pic_name), "gicv3-lpi"); 815 snprintf(sc->sc_lpi.pic_name, sizeof(sc->sc_lpi.pic_name), "gicv3-lpi");
815 pic_add(&sc->sc_lpi, GIC_LPI_BASE); 816 pic_add(&sc->sc_lpi, GIC_LPI_BASE);
816 817
817 sc->sc_lpi_pool = vmem_create("gicv3-lpi", 0, sc->sc_lpi.pic_maxsources, 818 sc->sc_lpi_pool = vmem_create("gicv3-lpi", 0, sc->sc_lpi.pic_maxsources,
818 1, NULL, NULL, NULL, 0, VM_SLEEP, IPL_HIGH); 819 1, NULL, NULL, NULL, 0, VM_SLEEP, IPL_HIGH);
819 if (sc->sc_lpi_pool == NULL) 820 if (sc->sc_lpi_pool == NULL)
820 panic("failed to create gicv3 lpi pool\n"); 821 panic("failed to create gicv3 lpi pool\n");
821 822
822 gicv3_lpi_init(sc); 823 gicv3_lpi_init(sc);
823 } 824 }
824 825
825 KASSERT(gicv3_softc == NULL); 826 KASSERT(gicv3_softc == NULL);
826 gicv3_softc = sc; 827 gicv3_softc = sc;
827 828
828 for (int i = 0; i < sc->sc_bsh_r_count; i++) { 829 for (int i = 0; i < sc->sc_bsh_r_count; i++) {
829 const uint64_t gicr_typer = gicr_read_8(sc, i, GICR_TYPER); 830 const uint64_t gicr_typer = gicr_read_8(sc, i, GICR_TYPER);
830 const u_int aff0 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff0); 831 const u_int aff0 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff0);
831 const u_int aff1 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff1); 832 const u_int aff1 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff1);
832 const u_int aff2 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff2); 833 const u_int aff2 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff2);
833 const u_int aff3 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff3); 834 const u_int aff3 = __SHIFTOUT(gicr_typer, GICR_TYPER_Affinity_Value_Aff3);
834 835
835 aprint_debug_dev(sc->sc_dev, "redist %d: cpu %d.%d.%d.%d\n", 836 aprint_debug_dev(sc->sc_dev, "redist %d: cpu %d.%d.%d.%d\n",
836 i, aff3, aff2, aff1, aff0); 837 i, aff3, aff2, aff1, aff0);
837 } 838 }
838 839
839 gicv3_dist_enable(sc); 840 gicv3_dist_enable(sc);
840 841
841 gicv3_cpu_init(&sc->sc_pic, curcpu()); 842 gicv3_cpu_init(&sc->sc_pic, curcpu());
842 if ((gicd_typer & GICD_TYPER_LPIS) != 0) 843 if ((gicd_typer & GICD_TYPER_LPIS) != 0)
843 gicv3_lpi_cpu_init(&sc->sc_lpi, curcpu()); 844 gicv3_lpi_cpu_init(&sc->sc_lpi, curcpu());
844 845
845#ifdef __HAVE_PIC_FAST_SOFTINTS 846#ifdef __HAVE_PIC_FAST_SOFTINTS
846 intr_establish_xname(SOFTINT_BIO, IPL_SOFTBIO, IST_MPSAFE | IST_EDGE, pic_handle_softint, (void *)SOFTINT_BIO, "softint bio"); 847 intr_establish_xname(SOFTINT_BIO, IPL_SOFTBIO, IST_MPSAFE | IST_EDGE, pic_handle_softint, (void *)SOFTINT_BIO, "softint bio");
847 intr_establish_xname(SOFTINT_CLOCK, IPL_SOFTCLOCK, IST_MPSAFE | IST_EDGE, pic_handle_softint, (void *)SOFTINT_CLOCK, "softint clock"); 848 intr_establish_xname(SOFTINT_CLOCK, IPL_SOFTCLOCK, IST_MPSAFE | IST_EDGE, pic_handle_softint, (void *)SOFTINT_CLOCK, "softint clock");
848 intr_establish_xname(SOFTINT_NET, IPL_SOFTNET, IST_MPSAFE | IST_EDGE, pic_handle_softint, (void *)SOFTINT_NET, "softint net"); 849 intr_establish_xname(SOFTINT_NET, IPL_SOFTNET, IST_MPSAFE | IST_EDGE, pic_handle_softint, (void *)SOFTINT_NET, "softint net");
849 intr_establish_xname(SOFTINT_SERIAL, IPL_SOFTSERIAL, IST_MPSAFE | IST_EDGE, pic_handle_softint, (void *)SOFTINT_SERIAL, "softint serial"); 850 intr_establish_xname(SOFTINT_SERIAL, IPL_SOFTSERIAL, IST_MPSAFE | IST_EDGE, pic_handle_softint, (void *)SOFTINT_SERIAL, "softint serial");
850#endif 851#endif
851 852
852#ifdef MULTIPROCESSOR 853#ifdef MULTIPROCESSOR
853 intr_establish_xname(IPI_AST, IPL_VM, IST_MPSAFE | IST_EDGE, pic_ipi_ast, (void *)-1, "IPI ast"); 854 intr_establish_xname(IPI_AST, IPL_VM, IST_MPSAFE | IST_EDGE, pic_ipi_ast, (void *)-1, "IPI ast");
854 intr_establish_xname(IPI_XCALL, IPL_HIGH, IST_MPSAFE | IST_EDGE, pic_ipi_xcall, (void *)-1, "IPI xcall"); 855 intr_establish_xname(IPI_XCALL, IPL_HIGH, IST_MPSAFE | IST_EDGE, pic_ipi_xcall, (void *)-1, "IPI xcall");
855 intr_establish_xname(IPI_GENERIC, IPL_HIGH, IST_MPSAFE | IST_EDGE, pic_ipi_generic, (void *)-1, "IPI generic"); 856 intr_establish_xname(IPI_GENERIC, IPL_HIGH, IST_MPSAFE | IST_EDGE, pic_ipi_generic, (void *)-1, "IPI generic");
856 intr_establish_xname(IPI_NOP, IPL_VM, IST_MPSAFE | IST_EDGE, pic_ipi_nop, (void *)-1, "IPI nop"); 857 intr_establish_xname(IPI_NOP, IPL_VM, IST_MPSAFE | IST_EDGE, pic_ipi_nop, (void *)-1, "IPI nop");
857 intr_establish_xname(IPI_SHOOTDOWN, IPL_SCHED, IST_MPSAFE | IST_EDGE, pic_ipi_shootdown, (void *)-1, "IPI shootdown"); 858 intr_establish_xname(IPI_SHOOTDOWN, IPL_SCHED, IST_MPSAFE | IST_EDGE, pic_ipi_shootdown, (void *)-1, "IPI shootdown");
858#ifdef DDB 859#ifdef DDB
859 intr_establish_xname(IPI_DDB, IPL_HIGH, IST_MPSAFE | IST_EDGE, pic_ipi_ddb, NULL, "IPI ddb"); 860 intr_establish_xname(IPI_DDB, IPL_HIGH, IST_MPSAFE | IST_EDGE, pic_ipi_ddb, NULL, "IPI ddb");
860#endif 861#endif
861#ifdef __HAVE_PREEMPTION 862#ifdef __HAVE_PREEMPTION
862 intr_establish_xname(IPI_KPREEMPT, IPL_VM, IST_MPSAFE | IST_EDGE, pic_ipi_kpreempt, (void *)-1, "IPI kpreempt"); 863 intr_establish_xname(IPI_KPREEMPT, IPL_VM, IST_MPSAFE | IST_EDGE, pic_ipi_kpreempt, (void *)-1, "IPI kpreempt");
863#endif 864#endif
864#endif 865#endif
865 866
866 return 0; 867 return 0;
867} 868}