Sun Aug 8 19:28:09 2021 UTC ()
Re-apply

Move 'struct pic_pending' from percpu to struct cpu_info. Saves a few
instructions in splx.

There is(/was) no need to use atomic operations on the percpu / cpu_info
members, so don't.

Finally removng the use of percpu should help avoid problems with "late"
attaching cpus.


(skrll)
diff -r1.36 -r1.37 src/sys/arch/aarch64/include/cpu.h
diff -r1.117 -r1.118 src/sys/arch/arm/include/cpu.h
diff -r1.70 -r1.71 src/sys/arch/arm/pic/pic.c

cvs diff -r1.36 -r1.37 src/sys/arch/aarch64/include/cpu.h (expand / switch to unified diff)

--- src/sys/arch/aarch64/include/cpu.h 2021/05/29 06:54:20 1.36
+++ src/sys/arch/aarch64/include/cpu.h 2021/08/08 19:28:08 1.37
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpu.h,v 1.36 2021/05/29 06:54:20 skrll Exp $ */ 1/* $NetBSD: cpu.h,v 1.37 2021/08/08 19:28:08 skrll Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2014, 2020 The NetBSD Foundation, Inc. 4 * Copyright (c) 2014, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry. 8 * by Matt Thomas of 3am Software Foundry.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -94,26 +94,29 @@ struct cpu_info { @@ -94,26 +94,29 @@ struct cpu_info {
94 * largely CPU-private. 94 * largely CPU-private.
95 */ 95 */
96 struct lwp *ci_softlwps[SOFTINT_COUNT] __aligned(COHERENCY_UNIT); 96 struct lwp *ci_softlwps[SOFTINT_COUNT] __aligned(COHERENCY_UNIT);
97 97
98 uint64_t ci_lastintr; 98 uint64_t ci_lastintr;
99 99
100 int ci_mtx_oldspl; 100 int ci_mtx_oldspl;
101 int ci_mtx_count; 101 int ci_mtx_count;
102 102
103 int ci_cpl; /* current processor level (spl) */ 103 int ci_cpl; /* current processor level (spl) */
104 int ci_hwpl; /* current hardware priority */ 104 int ci_hwpl; /* current hardware priority */
105 volatile u_int ci_softints; 105 volatile u_int ci_softints;
106 volatile u_int ci_intr_depth; 106 volatile u_int ci_intr_depth;
 107 volatile uint32_t ci_blocked_pics;
 108 volatile uint32_t ci_pending_pics;
 109 volatile uint32_t ci_pending_ipls;
107 110
108 int ci_kfpu_spl; 111 int ci_kfpu_spl;
109 112
110 /* event counters */ 113 /* event counters */
111 struct evcnt ci_vfp_use; 114 struct evcnt ci_vfp_use;
112 struct evcnt ci_vfp_reuse; 115 struct evcnt ci_vfp_reuse;
113 struct evcnt ci_vfp_save; 116 struct evcnt ci_vfp_save;
114 struct evcnt ci_vfp_release; 117 struct evcnt ci_vfp_release;
115 struct evcnt ci_uct_trap; 118 struct evcnt ci_uct_trap;
116 struct evcnt ci_intr_preempt; 119 struct evcnt ci_intr_preempt;
117 120
118 /* FDT or similar supplied "cpu capacity" */ 121 /* FDT or similar supplied "cpu capacity" */
119 uint32_t ci_capacity_dmips_mhz; 122 uint32_t ci_capacity_dmips_mhz;

cvs diff -r1.117 -r1.118 src/sys/arch/arm/include/cpu.h (expand / switch to unified diff)

--- src/sys/arch/arm/include/cpu.h 2021/03/27 12:15:08 1.117
+++ src/sys/arch/arm/include/cpu.h 2021/08/08 19:28:08 1.118
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpu.h,v 1.117 2021/03/27 12:15:08 jmcneill Exp $ */ 1/* $NetBSD: cpu.h,v 1.118 2021/08/08 19:28:08 skrll Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1994-1996 Mark Brinicombe. 4 * Copyright (c) 1994-1996 Mark Brinicombe.
5 * Copyright (c) 1994 Brini. 5 * Copyright (c) 1994 Brini.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software written for Brini by Mark Brinicombe 8 * This code is derived from software written for Brini by Mark Brinicombe
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -180,26 +180,29 @@ struct cpu_info { @@ -180,26 +180,29 @@ struct cpu_info {
180 * largely CPU-private. 180 * largely CPU-private.
181 */ 181 */
182 lwp_t * ci_softlwps[SOFTINT_COUNT] __aligned(COHERENCY_UNIT); 182 lwp_t * ci_softlwps[SOFTINT_COUNT] __aligned(COHERENCY_UNIT);
183 183
184 struct cpu_softc * 184 struct cpu_softc *
185 ci_softc; /* platform softc */ 185 ci_softc; /* platform softc */
186 186
187 int ci_cpl; /* current processor level (spl) */ 187 int ci_cpl; /* current processor level (spl) */
188 int ci_hwpl; /* current hardware priority */ 188 int ci_hwpl; /* current hardware priority */
189 int ci_kfpu_spl; 189 int ci_kfpu_spl;
190 190
191 volatile u_int ci_intr_depth; /* */ 191 volatile u_int ci_intr_depth; /* */
192 volatile u_int ci_softints; 192 volatile u_int ci_softints;
 193 volatile uint32_t ci_blocked_pics;
 194 volatile uint32_t ci_pending_pics;
 195 volatile uint32_t ci_pending_ipls;
193 196
194 lwp_t * ci_lastlwp; /* last lwp */ 197 lwp_t * ci_lastlwp; /* last lwp */
195 198
196 struct evcnt ci_arm700bugcount; 199 struct evcnt ci_arm700bugcount;
197 int32_t ci_mtx_count; 200 int32_t ci_mtx_count;
198 int ci_mtx_oldspl; 201 int ci_mtx_oldspl;
199 register_t ci_undefsave[3]; 202 register_t ci_undefsave[3];
200 uint32_t ci_vfp_id; 203 uint32_t ci_vfp_id;
201 uint64_t ci_lastintr; 204 uint64_t ci_lastintr;
202 205
203 struct pmap_tlb_info * 206 struct pmap_tlb_info *
204 ci_tlb_info; 207 ci_tlb_info;
205 struct pmap * ci_pmap_lastuser; 208 struct pmap * ci_pmap_lastuser;

cvs diff -r1.70 -r1.71 src/sys/arch/arm/pic/pic.c (expand / switch to unified diff)

--- src/sys/arch/arm/pic/pic.c 2021/03/27 12:15:09 1.70
+++ src/sys/arch/arm/pic/pic.c 2021/08/08 19:28:08 1.71
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pic.c,v 1.70 2021/03/27 12:15:09 jmcneill Exp $ */ 1/* $NetBSD: pic.c,v 1.71 2021/08/08 19:28:08 skrll Exp $ */
2/*- 2/*-
3 * Copyright (c) 2008 The NetBSD Foundation, Inc. 3 * Copyright (c) 2008 The NetBSD Foundation, Inc.
4 * All rights reserved. 4 * All rights reserved.
5 * 5 *
6 * This code is derived from software contributed to The NetBSD Foundation 6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Matt Thomas. 7 * by Matt Thomas.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -23,27 +23,27 @@ @@ -23,27 +23,27 @@
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE. 28 * POSSIBILITY OF SUCH DAMAGE.
29 */ 29 */
30 30
31#define _INTR_PRIVATE 31#define _INTR_PRIVATE
32#include "opt_ddb.h" 32#include "opt_ddb.h"
33#include "opt_multiprocessor.h" 33#include "opt_multiprocessor.h"
34 34
35#include <sys/cdefs.h> 35#include <sys/cdefs.h>
36__KERNEL_RCSID(0, "$NetBSD: pic.c,v 1.70 2021/03/27 12:15:09 jmcneill Exp $"); 36__KERNEL_RCSID(0, "$NetBSD: pic.c,v 1.71 2021/08/08 19:28:08 skrll Exp $");
37 37
38#include <sys/param.h> 38#include <sys/param.h>
39#include <sys/atomic.h> 39#include <sys/atomic.h>
40#include <sys/cpu.h> 40#include <sys/cpu.h>
41#include <sys/evcnt.h> 41#include <sys/evcnt.h>
42#include <sys/interrupt.h> 42#include <sys/interrupt.h>
43#include <sys/intr.h> 43#include <sys/intr.h>
44#include <sys/ipi.h> 44#include <sys/ipi.h>
45#include <sys/kernel.h> 45#include <sys/kernel.h>
46#include <sys/kmem.h> 46#include <sys/kmem.h>
47#include <sys/mutex.h> 47#include <sys/mutex.h>
48#include <sys/once.h> 48#include <sys/once.h>
49#include <sys/xcall.h> 49#include <sys/xcall.h>
@@ -55,57 +55,35 @@ __KERNEL_RCSID(0, "$NetBSD: pic.c,v 1.70 @@ -55,57 +55,35 @@ __KERNEL_RCSID(0, "$NetBSD: pic.c,v 1.70
55#ifdef DDB 55#ifdef DDB
56#include <arm/db_machdep.h> 56#include <arm/db_machdep.h>
57#endif 57#endif
58 58
59#include <arm/pic/picvar.h> 59#include <arm/pic/picvar.h>
60 60
61#if defined(__HAVE_PIC_PENDING_INTRS) 61#if defined(__HAVE_PIC_PENDING_INTRS)
62/* 62/*
63 * This implementation of pending interrupts on a MULTIPROCESSOR system makes 63 * This implementation of pending interrupts on a MULTIPROCESSOR system makes
64 * the assumption that a PIC (pic_softc) shall only have all its interrupts 64 * the assumption that a PIC (pic_softc) shall only have all its interrupts
65 * come from the same CPU. In other words, interrupts from a single PIC will 65 * come from the same CPU. In other words, interrupts from a single PIC will
66 * not be distributed among multiple CPUs. 66 * not be distributed among multiple CPUs.
67 */ 67 */
68struct pic_pending { 
69 volatile uint32_t blocked_pics; 
70 volatile uint32_t pending_pics; 
71 volatile uint32_t pending_ipls; 
72}; 
73static uint32_t 68static uint32_t
74 pic_find_pending_irqs_by_ipl(struct pic_softc *, size_t, uint32_t, int); 69 pic_find_pending_irqs_by_ipl(struct pic_softc *, size_t, uint32_t, int);
75static struct pic_softc * 70static struct pic_softc *
76 pic_list_find_pic_by_pending_ipl(struct pic_pending *, uint32_t); 71 pic_list_find_pic_by_pending_ipl(struct cpu_info *, uint32_t);
77static void 72static void
78 pic_deliver_irqs(struct pic_pending *, struct pic_softc *, int, void *); 73 pic_deliver_irqs(struct cpu_info *, struct pic_softc *, int, void *);
79static void 74static void
80 pic_list_deliver_irqs(struct pic_pending *, register_t, int, void *); 75 pic_list_deliver_irqs(struct cpu_info *, register_t, int, void *);
81 76
82#ifdef MULTIPROCESSOR 
83percpu_t *pic_pending_percpu; 
84static struct pic_pending * 
85pic_pending_get(void) 
86{ 
87 return percpu_getref(pic_pending_percpu); 
88} 
89static void 
90pic_pending_put(struct pic_pending *pend) 
91{ 
92 percpu_putref(pic_pending_percpu); 
93} 
94#else 
95struct pic_pending pic_pending; 
96#define pic_pending_get() (&pic_pending) 
97#define pic_pending_put(pend) __nothing 
98#endif /* MULTIPROCESSOR */ 
99#endif /* __HAVE_PIC_PENDING_INTRS */ 77#endif /* __HAVE_PIC_PENDING_INTRS */
100 78
101struct pic_softc *pic_list[PIC_MAXPICS]; 79struct pic_softc *pic_list[PIC_MAXPICS];
102#if PIC_MAXPICS > 32 80#if PIC_MAXPICS > 32
103#error PIC_MAXPICS > 32 not supported 81#error PIC_MAXPICS > 32 not supported
104#endif 82#endif
105struct intrsource *pic_sources[PIC_MAXMAXSOURCES]; 83struct intrsource *pic_sources[PIC_MAXMAXSOURCES];
106struct intrsource *pic__iplsources[PIC_MAXMAXSOURCES]; 84struct intrsource *pic__iplsources[PIC_MAXMAXSOURCES];
107struct intrsource **pic_iplsource[NIPL] = { 85struct intrsource **pic_iplsource[NIPL] = {
108 [0 ... NIPL-1] = pic__iplsources, 86 [0 ... NIPL-1] = pic__iplsources,
109}; 87};
110size_t pic_ipl_offset[NIPL+1]; 88size_t pic_ipl_offset[NIPL+1];
111 89
@@ -254,81 +232,80 @@ pic_handle_intr(void *arg) @@ -254,81 +232,80 @@ pic_handle_intr(void *arg)
254 struct pic_softc * const pic = arg; 232 struct pic_softc * const pic = arg;
255 int rv; 233 int rv;
256 234
257 rv = (*pic->pic_ops->pic_find_pending_irqs)(pic); 235 rv = (*pic->pic_ops->pic_find_pending_irqs)(pic);
258 236
259 return rv > 0; 237 return rv > 0;
260} 238}
261 239
262#if defined(__HAVE_PIC_PENDING_INTRS) 240#if defined(__HAVE_PIC_PENDING_INTRS)
263void 241void
264pic_mark_pending_source(struct pic_softc *pic, struct intrsource *is) 242pic_mark_pending_source(struct pic_softc *pic, struct intrsource *is)
265{ 243{
266 const uint32_t ipl_mask = __BIT(is->is_ipl); 244 const uint32_t ipl_mask = __BIT(is->is_ipl);
 245 struct cpu_info * const ci = curcpu();
267 246
268 atomic_or_32(&pic->pic_pending_irqs[is->is_irq >> 5], 247 atomic_or_32(&pic->pic_pending_irqs[is->is_irq >> 5],
269 __BIT(is->is_irq & 0x1f)); 248 __BIT(is->is_irq & 0x1f));
270 249
271 atomic_or_32(&pic->pic_pending_ipls, ipl_mask); 250 atomic_or_32(&pic->pic_pending_ipls, ipl_mask);
272 struct pic_pending *pend = pic_pending_get(); 251 ci->ci_pending_ipls |= ipl_mask;
273 atomic_or_32(&pend->pending_ipls, ipl_mask); 252 ci->ci_pending_pics |= __BIT(pic->pic_id);
274 atomic_or_32(&pend->pending_pics, __BIT(pic->pic_id)); 
275 pic_pending_put(pend); 
276} 253}
277 254
278void 255void
279pic_mark_pending(struct pic_softc *pic, int irq) 256pic_mark_pending(struct pic_softc *pic, int irq)
280{ 257{
281 struct intrsource * const is = pic->pic_sources[irq]; 258 struct intrsource * const is = pic->pic_sources[irq];
282 259
283 KASSERT(irq < pic->pic_maxsources); 260 KASSERT(irq < pic->pic_maxsources);
284 KASSERT(is != NULL); 261 KASSERT(is != NULL);
285 262
286 pic_mark_pending_source(pic, is); 263 pic_mark_pending_source(pic, is);
287} 264}
288 265
289uint32_t 266uint32_t
290pic_mark_pending_sources(struct pic_softc *pic, size_t irq_base, 267pic_mark_pending_sources(struct pic_softc *pic, size_t irq_base,
291 uint32_t pending) 268 uint32_t pending)
292{ 269{
293 struct intrsource ** const isbase = &pic->pic_sources[irq_base]; 270 struct intrsource ** const isbase = &pic->pic_sources[irq_base];
 271 struct cpu_info * const ci = curcpu();
294 struct intrsource *is; 272 struct intrsource *is;
295 volatile uint32_t *ipending = &pic->pic_pending_irqs[irq_base >> 5]; 273 volatile uint32_t *ipending = &pic->pic_pending_irqs[irq_base >> 5];
296 uint32_t ipl_mask = 0; 274 uint32_t ipl_mask = 0;
297 275
298 if (pending == 0) 276 if (pending == 0)
299 return ipl_mask; 277 return ipl_mask;
300 278
301 KASSERT((irq_base & 31) == 0); 279 KASSERT((irq_base & 31) == 0);
302 280
303 (*pic->pic_ops->pic_block_irqs)(pic, irq_base, pending); 281 (*pic->pic_ops->pic_block_irqs)(pic, irq_base, pending);
304 282
305 atomic_or_32(ipending, pending); 283 atomic_or_32(ipending, pending);
306 while (pending != 0) { 284 while (pending != 0) {
307 int n = ffs(pending); 285 int n = ffs(pending);
308 if (n-- == 0) 286 if (n-- == 0)
309 break; 287 break;
310 is = isbase[n]; 288 is = isbase[n];
311 KASSERT(is != NULL); 289 KASSERT(is != NULL);
312 KASSERT(irq_base <= is->is_irq && is->is_irq < irq_base + 32); 290 KASSERT(irq_base <= is->is_irq && is->is_irq < irq_base + 32);
313 pending &= ~__BIT(n); 291 pending &= ~__BIT(n);
314 ipl_mask |= __BIT(is->is_ipl); 292 ipl_mask |= __BIT(is->is_ipl);
315 } 293 }
316 294
317 atomic_or_32(&pic->pic_pending_ipls, ipl_mask); 295 atomic_or_32(&pic->pic_pending_ipls, ipl_mask);
318 struct pic_pending *pend = pic_pending_get(); 296 ci->ci_pending_ipls |= ipl_mask;
319 atomic_or_32(&pend->pending_ipls, ipl_mask); 297 ci->ci_pending_pics |= __BIT(pic->pic_id);
320 atomic_or_32(&pend->pending_pics, __BIT(pic->pic_id)); 298
321 pic_pending_put(pend); 
322 return ipl_mask; 299 return ipl_mask;
323} 300}
324 301
325uint32_t 302uint32_t
326pic_find_pending_irqs_by_ipl(struct pic_softc *pic, size_t irq_base, 303pic_find_pending_irqs_by_ipl(struct pic_softc *pic, size_t irq_base,
327 uint32_t pending, int ipl) 304 uint32_t pending, int ipl)
328{ 305{
329 uint32_t ipl_irq_mask = 0; 306 uint32_t ipl_irq_mask = 0;
330 uint32_t irq_mask; 307 uint32_t irq_mask;
331 308
332 for (;;) { 309 for (;;) {
333 int irq = ffs(pending); 310 int irq = ffs(pending);
334 if (irq-- == 0) 311 if (irq-- == 0)
@@ -377,27 +354,27 @@ pic_dispatch(struct intrsource *is, void @@ -377,27 +354,27 @@ pic_dispatch(struct intrsource *is, void
377 KERNEL_UNLOCK_ONE(NULL); 354 KERNEL_UNLOCK_ONE(NULL);
378 } else 355 } else
379#endif 356#endif
380 (void)(*func)(arg); 357 (void)(*func)(arg);
381 358
382 struct pic_percpu * const pcpu = percpu_getref(is->is_pic->pic_percpu); 359 struct pic_percpu * const pcpu = percpu_getref(is->is_pic->pic_percpu);
383 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC); 360 KASSERT(pcpu->pcpu_magic == PICPERCPU_MAGIC);
384 pcpu->pcpu_evs[is->is_irq].ev_count++; 361 pcpu->pcpu_evs[is->is_irq].ev_count++;
385 percpu_putref(is->is_pic->pic_percpu); 362 percpu_putref(is->is_pic->pic_percpu);
386} 363}
387 364
388#if defined(__HAVE_PIC_PENDING_INTRS) 365#if defined(__HAVE_PIC_PENDING_INTRS)
389void 366void
390pic_deliver_irqs(struct pic_pending *pend, struct pic_softc *pic, int ipl, 367pic_deliver_irqs(struct cpu_info *ci, struct pic_softc *pic, int ipl,
391 void *frame) 368 void *frame)
392{ 369{
393 const uint32_t ipl_mask = __BIT(ipl); 370 const uint32_t ipl_mask = __BIT(ipl);
394 struct intrsource *is; 371 struct intrsource *is;
395 volatile uint32_t *ipending = pic->pic_pending_irqs; 372 volatile uint32_t *ipending = pic->pic_pending_irqs;
396 volatile uint32_t *iblocked = pic->pic_blocked_irqs; 373 volatile uint32_t *iblocked = pic->pic_blocked_irqs;
397 size_t irq_base; 374 size_t irq_base;
398#if PIC_MAXSOURCES > 32 375#if PIC_MAXSOURCES > 32
399 size_t irq_count; 376 size_t irq_count;
400 int poi = 0; /* Possibility of interrupting */ 377 int poi = 0; /* Possibility of interrupting */
401#endif 378#endif
402 uint32_t pending_irqs; 379 uint32_t pending_irqs;
403 uint32_t blocked_irqs; 380 uint32_t blocked_irqs;
@@ -458,45 +435,45 @@ pic_deliver_irqs(struct pic_pending *pen @@ -458,45 +435,45 @@ pic_deliver_irqs(struct pic_pending *pen
458 * DISABLE_INTERRUPT(). 435 * DISABLE_INTERRUPT().
459 */ 436 */
460 poi = 1; 437 poi = 1;
461#endif 438#endif
462 blocked_irqs |= __BIT(irq); 439 blocked_irqs |= __BIT(irq);
463 } else { 440 } else {
464 KASSERT(0); 441 KASSERT(0);
465 } 442 }
466 pending_irqs = pic_find_pending_irqs_by_ipl(pic, 443 pending_irqs = pic_find_pending_irqs_by_ipl(pic,
467 irq_base, *ipending, ipl); 444 irq_base, *ipending, ipl);
468 } while (pending_irqs); 445 } while (pending_irqs);
469 if (blocked_irqs) { 446 if (blocked_irqs) {
470 atomic_or_32(iblocked, blocked_irqs); 447 atomic_or_32(iblocked, blocked_irqs);
471 atomic_or_32(&pend->blocked_pics, __BIT(pic->pic_id)); 448 ci->ci_blocked_pics |= __BIT(pic->pic_id);
472 } 449 }
473 } 450 }
474 451
475 KASSERT(progress); 452 KASSERT(progress);
476 /* 453 /*
477 * Since interrupts are disabled, we don't have to be too careful 454 * Since interrupts are disabled, we don't have to be too careful
478 * about these. 455 * about these.
479 */ 456 */
480 if (atomic_and_32_nv(&pic->pic_pending_ipls, ~ipl_mask) == 0) 457 if (atomic_and_32_nv(&pic->pic_pending_ipls, ~ipl_mask) == 0)
481 atomic_and_32(&pend->pending_pics, ~__BIT(pic->pic_id)); 458 ci->ci_pending_pics &= ~__BIT(pic->pic_id);
482} 459}
483 460
484static void 461static void
485pic_list_unblock_irqs(struct pic_pending *pend) 462pic_list_unblock_irqs(struct cpu_info *ci)
486{ 463{
487 uint32_t blocked_pics = pend->blocked_pics; 464 uint32_t blocked_pics = ci->ci_blocked_pics;
488 465
489 pend->blocked_pics = 0; 466 ci->ci_blocked_pics = 0;
490 467
491 for (;;) { 468 for (;;) {
492 struct pic_softc *pic; 469 struct pic_softc *pic;
493#if PIC_MAXSOURCES > 32 470#if PIC_MAXSOURCES > 32
494 volatile uint32_t *iblocked; 471 volatile uint32_t *iblocked;
495 uint32_t blocked; 472 uint32_t blocked;
496 size_t irq_base; 473 size_t irq_base;
497#endif 474#endif
498 475
499 int pic_id = ffs(blocked_pics); 476 int pic_id = ffs(blocked_pics);
500 if (pic_id-- == 0) 477 if (pic_id-- == 0)
501 return; 478 return;
502 479
@@ -513,83 +490,81 @@ pic_list_unblock_irqs(struct pic_pending @@ -513,83 +490,81 @@ pic_list_unblock_irqs(struct pic_pending
513 } 490 }
514 } 491 }
515#else 492#else
516 KASSERT(pic->pic_blocked_irqs[0] != 0); 493 KASSERT(pic->pic_blocked_irqs[0] != 0);
517 (*pic->pic_ops->pic_unblock_irqs)(pic, 494 (*pic->pic_ops->pic_unblock_irqs)(pic,
518 0, pic->pic_blocked_irqs[0]); 495 0, pic->pic_blocked_irqs[0]);
519 pic->pic_blocked_irqs[0] = 0; 496 pic->pic_blocked_irqs[0] = 0;
520#endif 497#endif
521 blocked_pics &= ~__BIT(pic_id); 498 blocked_pics &= ~__BIT(pic_id);
522 } 499 }
523} 500}
524 501
525struct pic_softc * 502struct pic_softc *
526pic_list_find_pic_by_pending_ipl(struct pic_pending *pend, uint32_t ipl_mask) 503pic_list_find_pic_by_pending_ipl(struct cpu_info *ci, uint32_t ipl_mask)
527{ 504{
528 uint32_t pending_pics = pend->pending_pics; 505 uint32_t pending_pics = ci->ci_pending_pics;
529 struct pic_softc *pic; 506 struct pic_softc *pic;
530 507
531 for (;;) { 508 for (;;) {
532 int pic_id = ffs(pending_pics); 509 int pic_id = ffs(pending_pics);
533 if (pic_id-- == 0) 510 if (pic_id-- == 0)
534 return NULL; 511 return NULL;
535 512
536 pic = pic_list[pic_id]; 513 pic = pic_list[pic_id];
537 KASSERT(pic != NULL); 514 KASSERT(pic != NULL);
538 if (pic->pic_pending_ipls & ipl_mask) 515 if (pic->pic_pending_ipls & ipl_mask)
539 return pic; 516 return pic;
540 pending_pics &= ~__BIT(pic_id); 517 pending_pics &= ~__BIT(pic_id);
541 } 518 }
542} 519}
543 520
544void 521void
545pic_list_deliver_irqs(struct pic_pending *pend, register_t psw, int ipl, 522pic_list_deliver_irqs(struct cpu_info *ci, register_t psw, int ipl,
546 void *frame) 523 void *frame)
547{ 524{
548 const uint32_t ipl_mask = __BIT(ipl); 525 const uint32_t ipl_mask = __BIT(ipl);
549 struct pic_softc *pic; 526 struct pic_softc *pic;
550 527
551 while ((pic = pic_list_find_pic_by_pending_ipl(pend, ipl_mask)) != NULL) { 528 while ((pic = pic_list_find_pic_by_pending_ipl(ci, ipl_mask)) != NULL) {
552 pic_deliver_irqs(pend, pic, ipl, frame); 529 pic_deliver_irqs(ci, pic, ipl, frame);
553 KASSERT((pic->pic_pending_ipls & ipl_mask) == 0); 530 KASSERT((pic->pic_pending_ipls & ipl_mask) == 0);
554 } 531 }
555 atomic_and_32(&pend->pending_ipls, ~ipl_mask); 532 ci->ci_pending_ipls &= ~ipl_mask;
556} 533}
557#endif /* __HAVE_PIC_PENDING_INTRS */ 534#endif /* __HAVE_PIC_PENDING_INTRS */
558 535
559void 536void
560pic_do_pending_ints(register_t psw, int newipl, void *frame) 537pic_do_pending_ints(register_t psw, int newipl, void *frame)
561{ 538{
562 struct cpu_info * const ci = curcpu(); 539 struct cpu_info * const ci = curcpu();
563 if (__predict_false(newipl == IPL_HIGH)) { 540 if (__predict_false(newipl == IPL_HIGH)) {
564 KASSERTMSG(ci->ci_cpl == IPL_HIGH, "cpl %d", ci->ci_cpl); 541 KASSERTMSG(ci->ci_cpl == IPL_HIGH, "cpl %d", ci->ci_cpl);
565 return; 542 return;
566 } 543 }
567#if defined(__HAVE_PIC_PENDING_INTRS) 544#if defined(__HAVE_PIC_PENDING_INTRS)
568 struct pic_pending *pend = pic_pending_get(); 545 while ((ci->ci_pending_ipls & ~__BIT(newipl)) > __BIT(newipl)) {
569 while ((pend->pending_ipls & ~__BIT(newipl)) > __BIT(newipl)) { 546 KASSERT(ci->ci_pending_ipls < __BIT(NIPL));
570 KASSERT(pend->pending_ipls < __BIT(NIPL)); 
571 for (;;) { 547 for (;;) {
572 int ipl = 31 - __builtin_clz(pend->pending_ipls); 548 int ipl = 31 - __builtin_clz(ci->ci_pending_ipls);
573 KASSERT(ipl < NIPL); 549 KASSERT(ipl < NIPL);
574 if (ipl <= newipl) 550 if (ipl <= newipl)
575 break; 551 break;
576 552
577 pic_set_priority(ci, ipl); 553 pic_set_priority(ci, ipl);
578 pic_list_deliver_irqs(pend, psw, ipl, frame); 554 pic_list_deliver_irqs(ci, psw, ipl, frame);
579 pic_list_unblock_irqs(pend); 555 pic_list_unblock_irqs(ci);
580 } 556 }
581 } 557 }
582 pic_pending_put(pend); 
583#endif /* __HAVE_PIC_PENDING_INTRS */ 558#endif /* __HAVE_PIC_PENDING_INTRS */
584#ifdef __HAVE_PREEMPTION 559#ifdef __HAVE_PREEMPTION
585 if (newipl == IPL_NONE && (ci->ci_astpending & __BIT(1))) { 560 if (newipl == IPL_NONE && (ci->ci_astpending & __BIT(1))) {
586 pic_set_priority(ci, IPL_SCHED); 561 pic_set_priority(ci, IPL_SCHED);
587 kpreempt(0); 562 kpreempt(0);
588 } 563 }
589#endif 564#endif
590 if (ci->ci_cpl != newipl) 565 if (ci->ci_cpl != newipl)
591 pic_set_priority(ci, newipl); 566 pic_set_priority(ci, newipl);
592} 567}
593 568
594static void 569static void
595pic_percpu_allocate(void *v0, void *v1, struct cpu_info *ci) 570pic_percpu_allocate(void *v0, void *v1, struct cpu_info *ci)
@@ -632,31 +607,26 @@ pic_init(void) @@ -632,31 +607,26 @@ pic_init(void)
632} 607}
633 608
634int 609int
635pic_add(struct pic_softc *pic, int irqbase) 610pic_add(struct pic_softc *pic, int irqbase)
636{ 611{
637 int slot, maybe_slot = -1; 612 int slot, maybe_slot = -1;
638 size_t sourcebase; 613 size_t sourcebase;
639 static ONCE_DECL(pic_once); 614 static ONCE_DECL(pic_once);
640 615
641 RUN_ONCE(&pic_once, pic_init); 616 RUN_ONCE(&pic_once, pic_init);
642 617
643 KASSERT(strlen(pic->pic_name) > 0); 618 KASSERT(strlen(pic->pic_name) > 0);
644 619
645#if defined(__HAVE_PIC_PENDING_INTRS) && defined(MULTIPROCESSOR) 
646 if (__predict_false(pic_pending_percpu == NULL)) 
647 pic_pending_percpu = percpu_alloc(sizeof(struct pic_pending)); 
648#endif /* __HAVE_PIC_PENDING_INTRS && MULTIPROCESSOR */ 
649 
650 mutex_enter(&pic_lock); 620 mutex_enter(&pic_lock);
651 if (irqbase == PIC_IRQBASE_ALLOC) { 621 if (irqbase == PIC_IRQBASE_ALLOC) {
652 irqbase = pic_lastbase; 622 irqbase = pic_lastbase;
653 } 623 }
654 for (slot = 0; slot < PIC_MAXPICS; slot++) { 624 for (slot = 0; slot < PIC_MAXPICS; slot++) {
655 struct pic_softc * const xpic = pic_list[slot]; 625 struct pic_softc * const xpic = pic_list[slot];
656 if (xpic == NULL) { 626 if (xpic == NULL) {
657 if (maybe_slot < 0) 627 if (maybe_slot < 0)
658 maybe_slot = slot; 628 maybe_slot = slot;
659 if (irqbase < 0) 629 if (irqbase < 0)
660 break; 630 break;
661 continue; 631 continue;
662 } 632 }