Tue Apr 11 13:11:01 2023 UTC ()
x86: Omit needless membar_sync in intr_disestablish_xcall.

Details in comments.


(riastradh)
diff -r1.164 -r1.165 src/sys/arch/x86/x86/intr.c

cvs diff -r1.164 -r1.165 src/sys/arch/x86/x86/intr.c (switch to unified diff)

--- src/sys/arch/x86/x86/intr.c 2023/01/25 15:54:53 1.164
+++ src/sys/arch/x86/x86/intr.c 2023/04/11 13:11:01 1.165
@@ -1,2168 +1,2172 @@ @@ -1,2168 +1,2172 @@
1/* $NetBSD: intr.c,v 1.164 2023/01/25 15:54:53 riastradh Exp $ */ 1/* $NetBSD: intr.c,v 1.165 2023/04/11 13:11:01 riastradh Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc. 4 * Copyright (c) 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, and by Jason R. Thorpe. 8 * by Andrew Doran, and by Jason R. Thorpe.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Copyright 2002 (c) Wasabi Systems, Inc. 33 * Copyright 2002 (c) Wasabi Systems, Inc.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Written by Frank van der Linden for Wasabi Systems, Inc. 36 * Written by Frank van der Linden for Wasabi Systems, Inc.
37 * 37 *
38 * Redistribution and use in source and binary forms, with or without 38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions 39 * modification, are permitted provided that the following conditions
40 * are met: 40 * are met:
41 * 1. Redistributions of source code must retain the above copyright 41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer. 42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright 43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the 44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution. 45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software 46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement: 47 * must display the following acknowledgement:
48 * This product includes software developed for the NetBSD Project by 48 * This product includes software developed for the NetBSD Project by
49 * Wasabi Systems, Inc. 49 * Wasabi Systems, Inc.
50 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 50 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
51 * or promote products derived from this software without specific prior 51 * or promote products derived from this software without specific prior
52 * written permission. 52 * written permission.
53 * 53 *
54 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 54 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 57 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64 * POSSIBILITY OF SUCH DAMAGE. 64 * POSSIBILITY OF SUCH DAMAGE.
65 */ 65 */
66 66
67/*- 67/*-
68 * Copyright (c) 1991 The Regents of the University of California. 68 * Copyright (c) 1991 The Regents of the University of California.
69 * All rights reserved. 69 * All rights reserved.
70 * 70 *
71 * This code is derived from software contributed to Berkeley by 71 * This code is derived from software contributed to Berkeley by
72 * William Jolitz. 72 * William Jolitz.
73 * 73 *
74 * Redistribution and use in source and binary forms, with or without 74 * Redistribution and use in source and binary forms, with or without
75 * modification, are permitted provided that the following conditions 75 * modification, are permitted provided that the following conditions
76 * are met: 76 * are met:
77 * 1. Redistributions of source code must retain the above copyright 77 * 1. Redistributions of source code must retain the above copyright
78 * notice, this list of conditions and the following disclaimer. 78 * notice, this list of conditions and the following disclaimer.
79 * 2. Redistributions in binary form must reproduce the above copyright 79 * 2. Redistributions in binary form must reproduce the above copyright
80 * notice, this list of conditions and the following disclaimer in the 80 * notice, this list of conditions and the following disclaimer in the
81 * documentation and/or other materials provided with the distribution. 81 * documentation and/or other materials provided with the distribution.
82 * 3. Neither the name of the University nor the names of its contributors 82 * 3. Neither the name of the University nor the names of its contributors
83 * may be used to endorse or promote products derived from this software 83 * may be used to endorse or promote products derived from this software
84 * without specific prior written permission. 84 * without specific prior written permission.
85 * 85 *
86 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 86 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
87 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 87 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
88 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 88 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
89 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 89 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
90 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 90 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
91 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 91 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
92 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 92 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
93 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 93 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
94 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 94 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
95 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 95 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
96 * SUCH DAMAGE. 96 * SUCH DAMAGE.
97 * 97 *
98 * @(#)isa.c 7.2 (Berkeley) 5/13/91 98 * @(#)isa.c 7.2 (Berkeley) 5/13/91
99 */ 99 */
100 100
101/*- 101/*-
102 * Copyright (c) 1993, 1994 Charles Hannum. 102 * Copyright (c) 1993, 1994 Charles Hannum.
103 * 103 *
104 * Redistribution and use in source and binary forms, with or without 104 * Redistribution and use in source and binary forms, with or without
105 * modification, are permitted provided that the following conditions 105 * modification, are permitted provided that the following conditions
106 * are met: 106 * are met:
107 * 1. Redistributions of source code must retain the above copyright 107 * 1. Redistributions of source code must retain the above copyright
108 * notice, this list of conditions and the following disclaimer. 108 * notice, this list of conditions and the following disclaimer.
109 * 2. Redistributions in binary form must reproduce the above copyright 109 * 2. Redistributions in binary form must reproduce the above copyright
110 * notice, this list of conditions and the following disclaimer in the 110 * notice, this list of conditions and the following disclaimer in the
111 * documentation and/or other materials provided with the distribution. 111 * documentation and/or other materials provided with the distribution.
112 * 3. All advertising materials mentioning features or use of this software 112 * 3. All advertising materials mentioning features or use of this software
113 * must display the following acknowledgement: 113 * must display the following acknowledgement:
114 * This product includes software developed by the University of 114 * This product includes software developed by the University of
115 * California, Berkeley and its contributors. 115 * California, Berkeley and its contributors.
116 * 4. Neither the name of the University nor the names of its contributors 116 * 4. Neither the name of the University nor the names of its contributors
117 * may be used to endorse or promote products derived from this software 117 * may be used to endorse or promote products derived from this software
118 * without specific prior written permission. 118 * without specific prior written permission.
119 * 119 *
120 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 120 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
121 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 121 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
122 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 122 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
123 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 123 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
124 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 124 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
125 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 125 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
126 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 126 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
127 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 127 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
128 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 128 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
129 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 129 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
130 * SUCH DAMAGE. 130 * SUCH DAMAGE.
131 * 131 *
132 * @(#)isa.c 7.2 (Berkeley) 5/13/91 132 * @(#)isa.c 7.2 (Berkeley) 5/13/91
133 */ 133 */
134 134
135#include <sys/cdefs.h> 135#include <sys/cdefs.h>
136__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.164 2023/01/25 15:54:53 riastradh Exp $"); 136__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.165 2023/04/11 13:11:01 riastradh Exp $");
137 137
138#include "opt_intrdebug.h" 138#include "opt_intrdebug.h"
139#include "opt_multiprocessor.h" 139#include "opt_multiprocessor.h"
140#include "opt_acpi.h" 140#include "opt_acpi.h"
141 141
142#include <sys/param.h> 142#include <sys/param.h>
143#include <sys/systm.h> 143#include <sys/systm.h>
144#include <sys/kernel.h> 144#include <sys/kernel.h>
145#include <sys/syslog.h> 145#include <sys/syslog.h>
146#include <sys/device.h> 146#include <sys/device.h>
147#include <sys/kmem.h> 147#include <sys/kmem.h>
148#include <sys/proc.h> 148#include <sys/proc.h>
149#include <sys/errno.h> 149#include <sys/errno.h>
150#include <sys/intr.h> 150#include <sys/intr.h>
151#include <sys/cpu.h> 151#include <sys/cpu.h>
152#include <sys/xcall.h> 152#include <sys/xcall.h>
153#include <sys/interrupt.h> 153#include <sys/interrupt.h>
154#include <sys/reboot.h> /* for AB_VERBOSE */ 154#include <sys/reboot.h> /* for AB_VERBOSE */
155#include <sys/sdt.h> 155#include <sys/sdt.h>
156 156
157#include <sys/kauth.h> 157#include <sys/kauth.h>
158#include <sys/conf.h> 158#include <sys/conf.h>
159 159
160#include <uvm/uvm_extern.h> 160#include <uvm/uvm_extern.h>
161 161
162#include <machine/i8259.h> 162#include <machine/i8259.h>
163#include <machine/pio.h> 163#include <machine/pio.h>
164 164
165#include <x86/intr_private.h> 165#include <x86/intr_private.h>
166 166
167#include "ioapic.h" 167#include "ioapic.h"
168#include "lapic.h" 168#include "lapic.h"
169#include "pci.h" 169#include "pci.h"
170#include "acpica.h" 170#include "acpica.h"
171#ifndef XENPV 171#ifndef XENPV
172#include "hyperv.h" 172#include "hyperv.h"
173#if NHYPERV > 0 173#if NHYPERV > 0
174#include <dev/hyperv/hypervvar.h> 174#include <dev/hyperv/hypervvar.h>
175 175
176extern void Xresume_hyperv_hypercall(void); 176extern void Xresume_hyperv_hypercall(void);
177extern void Xrecurse_hyperv_hypercall(void); 177extern void Xrecurse_hyperv_hypercall(void);
178#endif 178#endif
179#endif 179#endif
180 180
181#if NIOAPIC > 0 || NACPICA > 0 181#if NIOAPIC > 0 || NACPICA > 0
182#include <machine/i82093var.h> 182#include <machine/i82093var.h>
183#include <machine/mpbiosvar.h> 183#include <machine/mpbiosvar.h>
184#include <machine/mpacpi.h> 184#include <machine/mpacpi.h>
185#endif 185#endif
186 186
187#if NLAPIC > 0 187#if NLAPIC > 0
188#include <machine/i82489var.h> 188#include <machine/i82489var.h>
189#endif 189#endif
190 190
191#if NPCI > 0 191#if NPCI > 0
192#include <dev/pci/ppbreg.h> 192#include <dev/pci/ppbreg.h>
193#endif 193#endif
194 194
195#include <x86/pci/msipic.h> 195#include <x86/pci/msipic.h>
196#include <x86/pci/pci_msi_machdep.h> 196#include <x86/pci/pci_msi_machdep.h>
197 197
198#if NPCI == 0 || !defined(__HAVE_PCI_MSI_MSIX) 198#if NPCI == 0 || !defined(__HAVE_PCI_MSI_MSIX)
199#define msipic_is_msi_pic(PIC) (false) 199#define msipic_is_msi_pic(PIC) (false)
200#endif 200#endif
201 201
202#include <ddb/db_active.h> 202#include <ddb/db_active.h>
203 203
204#ifdef DDB 204#ifdef DDB
205#include <ddb/db_output.h> 205#include <ddb/db_output.h>
206#endif 206#endif
207 207
208#ifdef INTRDEBUG 208#ifdef INTRDEBUG
209#define DPRINTF(msg) printf msg 209#define DPRINTF(msg) printf msg
210#else 210#else
211#define DPRINTF(msg) 211#define DPRINTF(msg)
212#endif 212#endif
213 213
214static SIMPLEQ_HEAD(, intrsource) io_interrupt_sources = 214static SIMPLEQ_HEAD(, intrsource) io_interrupt_sources =
215 SIMPLEQ_HEAD_INITIALIZER(io_interrupt_sources); 215 SIMPLEQ_HEAD_INITIALIZER(io_interrupt_sources);
216 216
217static kmutex_t intr_distribute_lock; 217static kmutex_t intr_distribute_lock;
218 218
219static int intr_allocate_slot_cpu(struct cpu_info *, struct pic *, int, int *, 219static int intr_allocate_slot_cpu(struct cpu_info *, struct pic *, int, int *,
220 struct intrsource *); 220 struct intrsource *);
221static int __noinline intr_allocate_slot(struct pic *, int, int, 221static int __noinline intr_allocate_slot(struct pic *, int, int,
222 struct cpu_info **, int *, int *, 222 struct cpu_info **, int *, int *,
223 struct intrsource *); 223 struct intrsource *);
224 224
225static void intr_source_free(struct cpu_info *, int, struct pic *, int); 225static void intr_source_free(struct cpu_info *, int, struct pic *, int);
226 226
227static void intr_establish_xcall(void *, void *); 227static void intr_establish_xcall(void *, void *);
228static void intr_disestablish_xcall(void *, void *); 228static void intr_disestablish_xcall(void *, void *);
229 229
230static const char *legacy_intr_string(int, char *, size_t, struct pic *); 230static const char *legacy_intr_string(int, char *, size_t, struct pic *);
231 231
232static const char *xen_intr_string(int, char *, size_t, struct pic *); 232static const char *xen_intr_string(int, char *, size_t, struct pic *);
233 233
234#if defined(INTRSTACKSIZE) 234#if defined(INTRSTACKSIZE)
235static inline bool redzone_const_or_false(bool); 235static inline bool redzone_const_or_false(bool);
236static inline int redzone_const_or_zero(int); 236static inline int redzone_const_or_zero(int);
237#endif 237#endif
238 238
239static void intr_redistribute_xc_t(void *, void *); 239static void intr_redistribute_xc_t(void *, void *);
240static void intr_redistribute_xc_s1(void *, void *); 240static void intr_redistribute_xc_s1(void *, void *);
241static void intr_redistribute_xc_s2(void *, void *); 241static void intr_redistribute_xc_s2(void *, void *);
242static bool intr_redistribute(struct cpu_info *); 242static bool intr_redistribute(struct cpu_info *);
243static struct intrsource *intr_get_io_intrsource(const char *); 243static struct intrsource *intr_get_io_intrsource(const char *);
244static void intr_free_io_intrsource_direct(struct intrsource *); 244static void intr_free_io_intrsource_direct(struct intrsource *);
245static int intr_num_handlers(struct intrsource *); 245static int intr_num_handlers(struct intrsource *);
246static int intr_find_unused_slot(struct cpu_info *, int *); 246static int intr_find_unused_slot(struct cpu_info *, int *);
247static void intr_activate_xcall(void *, void *); 247static void intr_activate_xcall(void *, void *);
248static void intr_deactivate_xcall(void *, void *); 248static void intr_deactivate_xcall(void *, void *);
249static void intr_get_affinity(struct intrsource *, kcpuset_t *); 249static void intr_get_affinity(struct intrsource *, kcpuset_t *);
250static int intr_set_affinity(struct intrsource *, const kcpuset_t *); 250static int intr_set_affinity(struct intrsource *, const kcpuset_t *);
251 251
252SDT_PROBE_DEFINE3(sdt, kernel, intr, entry, 252SDT_PROBE_DEFINE3(sdt, kernel, intr, entry,
253 "int (*)(void *)"/*func*/, 253 "int (*)(void *)"/*func*/,
254 "void *"/*arg*/, 254 "void *"/*arg*/,
255 "struct intrhand *"/*ih*/); 255 "struct intrhand *"/*ih*/);
256SDT_PROBE_DEFINE4(sdt, kernel, intr, return, 256SDT_PROBE_DEFINE4(sdt, kernel, intr, return,
257 "int (*)(void *)"/*func*/, 257 "int (*)(void *)"/*func*/,
258 "void *"/*arg*/, 258 "void *"/*arg*/,
259 "struct intrhand *"/*ih*/, 259 "struct intrhand *"/*ih*/,
260 "int"/*handled*/); 260 "int"/*handled*/);
261 261
262/* 262/*
263 * Fill in default interrupt table (in case of spurious interrupt 263 * Fill in default interrupt table (in case of spurious interrupt
264 * during configuration of kernel), setup interrupt control unit 264 * during configuration of kernel), setup interrupt control unit
265 */ 265 */
266void 266void
267intr_default_setup(void) 267intr_default_setup(void)
268{ 268{
269 struct idt_vec *iv = &(cpu_info_primary.ci_idtvec); 269 struct idt_vec *iv = &(cpu_info_primary.ci_idtvec);
270 int i; 270 int i;
271 271
272 /* icu vectors */ 272 /* icu vectors */
273 for (i = 0; i < NUM_LEGACY_IRQS; i++) { 273 for (i = 0; i < NUM_LEGACY_IRQS; i++) {
274 idt_vec_reserve(iv, ICU_OFFSET + i); 274 idt_vec_reserve(iv, ICU_OFFSET + i);
275 idt_vec_set(iv, ICU_OFFSET + i, legacy_stubs[i].ist_entry); 275 idt_vec_set(iv, ICU_OFFSET + i, legacy_stubs[i].ist_entry);
276 } 276 }
277 277
278 /* 278 /*
279 * Eventually might want to check if it's actually there. 279 * Eventually might want to check if it's actually there.
280 */ 280 */
281 i8259_default_setup(); 281 i8259_default_setup();
282 282
283 mutex_init(&intr_distribute_lock, MUTEX_DEFAULT, IPL_NONE); 283 mutex_init(&intr_distribute_lock, MUTEX_DEFAULT, IPL_NONE);
284} 284}
285 285
286/* 286/*
287 * Handle a NMI, possibly a machine check. 287 * Handle a NMI, possibly a machine check.
288 * return true to panic system, false to ignore. 288 * return true to panic system, false to ignore.
289 */ 289 */
290void 290void
291x86_nmi(void) 291x86_nmi(void)
292{ 292{
293 293
294 log(LOG_CRIT, "NMI port 61 %x, port 70 %x\n", inb(0x61), inb(0x70)); 294 log(LOG_CRIT, "NMI port 61 %x, port 70 %x\n", inb(0x61), inb(0x70));
295} 295}
296 296
297/* 297/*
298 * Create an interrupt id such as "ioapic0 pin 9". This interrupt id is used 298 * Create an interrupt id such as "ioapic0 pin 9". This interrupt id is used
299 * by MI code and intrctl(8). 299 * by MI code and intrctl(8).
300 */ 300 */
301const char * 301const char *
302intr_create_intrid(int legacy_irq, struct pic *pic, int pin, char *buf, 302intr_create_intrid(int legacy_irq, struct pic *pic, int pin, char *buf,
303 size_t len) 303 size_t len)
304{ 304{
305 int ih = 0; 305 int ih = 0;
306 306
307#if NPCI > 0 307#if NPCI > 0
308#if defined(__HAVE_PCI_MSI_MSIX) 308#if defined(__HAVE_PCI_MSI_MSIX)
309 if ((pic->pic_type == PIC_MSI) || (pic->pic_type == PIC_MSIX)) { 309 if ((pic->pic_type == PIC_MSI) || (pic->pic_type == PIC_MSIX)) {
310 uint64_t pih; 310 uint64_t pih;
311 int dev, vec; 311 int dev, vec;
312 312
313 dev = msipic_get_devid(pic); 313 dev = msipic_get_devid(pic);
314 vec = pin; 314 vec = pin;
315 pih = __SHIFTIN((uint64_t)dev, MSI_INT_DEV_MASK) 315 pih = __SHIFTIN((uint64_t)dev, MSI_INT_DEV_MASK)
316 | __SHIFTIN((uint64_t)vec, MSI_INT_VEC_MASK) 316 | __SHIFTIN((uint64_t)vec, MSI_INT_VEC_MASK)
317 | APIC_INT_VIA_MSI; 317 | APIC_INT_VIA_MSI;
318 if (pic->pic_type == PIC_MSI) 318 if (pic->pic_type == PIC_MSI)
319 MSI_INT_MAKE_MSI(pih); 319 MSI_INT_MAKE_MSI(pih);
320 else if (pic->pic_type == PIC_MSIX) 320 else if (pic->pic_type == PIC_MSIX)
321 MSI_INT_MAKE_MSIX(pih); 321 MSI_INT_MAKE_MSIX(pih);
322 322
323 return x86_pci_msi_string(NULL, pih, buf, len); 323 return x86_pci_msi_string(NULL, pih, buf, len);
324 } 324 }
325#endif /* __HAVE_PCI_MSI_MSIX */ 325#endif /* __HAVE_PCI_MSI_MSIX */
326#endif 326#endif
327 327
328 if (pic->pic_type == PIC_XEN) { 328 if (pic->pic_type == PIC_XEN) {
329 ih = pin; /* Port == pin */ 329 ih = pin; /* Port == pin */
330 return xen_intr_string(pin, buf, len, pic); 330 return xen_intr_string(pin, buf, len, pic);
331 } 331 }
332 332
333 /* 333 /*
334 * If the device is pci, "legacy_irq" is always -1. Least 8 bit of "ih" 334 * If the device is pci, "legacy_irq" is always -1. Least 8 bit of "ih"
335 * is only used in intr_string() to show the irq number. 335 * is only used in intr_string() to show the irq number.
336 * If the device is "legacy"(such as floppy), it should not use 336 * If the device is "legacy"(such as floppy), it should not use
337 * intr_string(). 337 * intr_string().
338 */ 338 */
339 if (pic->pic_type == PIC_I8259) { 339 if (pic->pic_type == PIC_I8259) {
340 ih = legacy_irq; 340 ih = legacy_irq;
341 return legacy_intr_string(ih, buf, len, pic); 341 return legacy_intr_string(ih, buf, len, pic);
342 } 342 }
343 343
344#if NIOAPIC > 0 || NACPICA > 0 344#if NIOAPIC > 0 || NACPICA > 0
345 ih = ((pic->pic_apicid << APIC_INT_APIC_SHIFT) & APIC_INT_APIC_MASK) 345 ih = ((pic->pic_apicid << APIC_INT_APIC_SHIFT) & APIC_INT_APIC_MASK)
346 | ((pin << APIC_INT_PIN_SHIFT) & APIC_INT_PIN_MASK); 346 | ((pin << APIC_INT_PIN_SHIFT) & APIC_INT_PIN_MASK);
347 if (pic->pic_type == PIC_IOAPIC) { 347 if (pic->pic_type == PIC_IOAPIC) {
348 ih |= APIC_INT_VIA_APIC; 348 ih |= APIC_INT_VIA_APIC;
349 } 349 }
350 ih |= pin; 350 ih |= pin;
351 return intr_string(ih, buf, len); 351 return intr_string(ih, buf, len);
352#endif 352#endif
353 353
354 return NULL; /* No pic found! */ 354 return NULL; /* No pic found! */
355} 355}
356 356
357/* 357/*
358 * Find intrsource from io_interrupt_sources list. 358 * Find intrsource from io_interrupt_sources list.
359 */ 359 */
360static struct intrsource * 360static struct intrsource *
361intr_get_io_intrsource(const char *intrid) 361intr_get_io_intrsource(const char *intrid)
362{ 362{
363 struct intrsource *isp; 363 struct intrsource *isp;
364 364
365 KASSERT(mutex_owned(&cpu_lock)); 365 KASSERT(mutex_owned(&cpu_lock));
366 366
367 SIMPLEQ_FOREACH(isp, &io_interrupt_sources, is_list) { 367 SIMPLEQ_FOREACH(isp, &io_interrupt_sources, is_list) {
368 KASSERT(isp->is_intrid != NULL); 368 KASSERT(isp->is_intrid != NULL);
369 if (strncmp(intrid, isp->is_intrid, INTRIDBUF - 1) == 0) 369 if (strncmp(intrid, isp->is_intrid, INTRIDBUF - 1) == 0)
370 return isp; 370 return isp;
371 } 371 }
372 return NULL; 372 return NULL;
373} 373}
374 374
375/* 375/*
376 * Allocate intrsource and add to io_interrupt_sources list. 376 * Allocate intrsource and add to io_interrupt_sources list.
377 */ 377 */
378struct intrsource * 378struct intrsource *
379intr_allocate_io_intrsource(const char *intrid) 379intr_allocate_io_intrsource(const char *intrid)
380{ 380{
381 CPU_INFO_ITERATOR cii; 381 CPU_INFO_ITERATOR cii;
382 struct cpu_info *ci; 382 struct cpu_info *ci;
383 struct intrsource *isp; 383 struct intrsource *isp;
384 struct percpu_evcnt *pep; 384 struct percpu_evcnt *pep;
385 385
386 KASSERT(mutex_owned(&cpu_lock)); 386 KASSERT(mutex_owned(&cpu_lock));
387 387
388 if (intrid == NULL) 388 if (intrid == NULL)
389 return NULL; 389 return NULL;
390 390
391 isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); 391 isp = kmem_zalloc(sizeof(*isp), KM_SLEEP);
392 pep = kmem_zalloc(sizeof(*pep) * ncpu, KM_SLEEP); 392 pep = kmem_zalloc(sizeof(*pep) * ncpu, KM_SLEEP);
393 isp->is_saved_evcnt = pep; 393 isp->is_saved_evcnt = pep;
394 for (CPU_INFO_FOREACH(cii, ci)) { 394 for (CPU_INFO_FOREACH(cii, ci)) {
395 pep->cpuid = ci->ci_cpuid; 395 pep->cpuid = ci->ci_cpuid;
396 pep++; 396 pep++;
397 } 397 }
398 strlcpy(isp->is_intrid, intrid, sizeof(isp->is_intrid)); 398 strlcpy(isp->is_intrid, intrid, sizeof(isp->is_intrid));
399 399
400 SIMPLEQ_INSERT_TAIL(&io_interrupt_sources, isp, is_list); 400 SIMPLEQ_INSERT_TAIL(&io_interrupt_sources, isp, is_list);
401 401
402 return isp; 402 return isp;
403} 403}
404 404
405/* 405/*
406 * Remove from io_interrupt_sources list and free by the intrsource pointer. 406 * Remove from io_interrupt_sources list and free by the intrsource pointer.
407 */ 407 */
408static void 408static void
409intr_free_io_intrsource_direct(struct intrsource *isp) 409intr_free_io_intrsource_direct(struct intrsource *isp)
410{ 410{
411 KASSERT(mutex_owned(&cpu_lock)); 411 KASSERT(mutex_owned(&cpu_lock));
412 412
413 SIMPLEQ_REMOVE(&io_interrupt_sources, isp, intrsource, is_list); 413 SIMPLEQ_REMOVE(&io_interrupt_sources, isp, intrsource, is_list);
414 414
415 /* Is this interrupt established? */ 415 /* Is this interrupt established? */
416 if (isp->is_evname[0] != '\0') { 416 if (isp->is_evname[0] != '\0') {
417 evcnt_detach(&isp->is_evcnt); 417 evcnt_detach(&isp->is_evcnt);
418 isp->is_evname[0] = '\0'; 418 isp->is_evname[0] = '\0';
419 } 419 }
420 420
421 kmem_free(isp->is_saved_evcnt, 421 kmem_free(isp->is_saved_evcnt,
422 sizeof(*(isp->is_saved_evcnt)) * ncpu); 422 sizeof(*(isp->is_saved_evcnt)) * ncpu);
423 423
424 kmem_free(isp, sizeof(*isp)); 424 kmem_free(isp, sizeof(*isp));
425} 425}
426 426
427/* 427/*
428 * Remove from io_interrupt_sources list and free by the interrupt id. 428 * Remove from io_interrupt_sources list and free by the interrupt id.
429 * This function can be used by MI code. 429 * This function can be used by MI code.
430 */ 430 */
431void 431void
432intr_free_io_intrsource(const char *intrid) 432intr_free_io_intrsource(const char *intrid)
433{ 433{
434 struct intrsource *isp; 434 struct intrsource *isp;
435 435
436 KASSERT(mutex_owned(&cpu_lock)); 436 KASSERT(mutex_owned(&cpu_lock));
437 437
438 if (intrid == NULL) 438 if (intrid == NULL)
439 return; 439 return;
440 440
441 if ((isp = intr_get_io_intrsource(intrid)) == NULL) { 441 if ((isp = intr_get_io_intrsource(intrid)) == NULL) {
442 return; 442 return;
443 } 443 }
444 444
445 /* If the interrupt uses shared IRQ, don't free yet. */ 445 /* If the interrupt uses shared IRQ, don't free yet. */
446 if (isp->is_handlers != NULL) { 446 if (isp->is_handlers != NULL) {
447 return; 447 return;
448 } 448 }
449 449
450 intr_free_io_intrsource_direct(isp); 450 intr_free_io_intrsource_direct(isp);
451} 451}
452 452
453static int 453static int
454intr_allocate_slot_cpu(struct cpu_info *ci, struct pic *pic, int pin, 454intr_allocate_slot_cpu(struct cpu_info *ci, struct pic *pic, int pin,
455 int *index, struct intrsource *chained) 455 int *index, struct intrsource *chained)
456{ 456{
457 int slot, i; 457 int slot, i;
458 struct intrsource *isp; 458 struct intrsource *isp;
459 459
460 KASSERT(mutex_owned(&cpu_lock)); 460 KASSERT(mutex_owned(&cpu_lock));
461 461
462 if (pic == &i8259_pic) { 462 if (pic == &i8259_pic) {
463 KASSERT(CPU_IS_PRIMARY(ci)); 463 KASSERT(CPU_IS_PRIMARY(ci));
464 slot = pin; 464 slot = pin;
465 } else { 465 } else {
466 int start = 0; 466 int start = 0;
467 int max = MAX_INTR_SOURCES; 467 int max = MAX_INTR_SOURCES;
468 slot = -1; 468 slot = -1;
469 469
470 /* avoid reserved slots for legacy interrupts. */ 470 /* avoid reserved slots for legacy interrupts. */
471 if (CPU_IS_PRIMARY(ci) && msipic_is_msi_pic(pic)) 471 if (CPU_IS_PRIMARY(ci) && msipic_is_msi_pic(pic))
472 start = NUM_LEGACY_IRQS; 472 start = NUM_LEGACY_IRQS;
473 /* don't step over Xen's slots */ 473 /* don't step over Xen's slots */
474 if (vm_guest == VM_GUEST_XENPVH) 474 if (vm_guest == VM_GUEST_XENPVH)
475 max = SIR_XENIPL_VM; 475 max = SIR_XENIPL_VM;
476 /* 476 /*
477 * intr_allocate_slot has checked for an existing mapping. 477 * intr_allocate_slot has checked for an existing mapping.
478 * Now look for a free slot. 478 * Now look for a free slot.
479 */ 479 */
480 for (i = start; i < max ; i++) { 480 for (i = start; i < max ; i++) {
481 if (ci->ci_isources[i] == NULL) { 481 if (ci->ci_isources[i] == NULL) {
482 slot = i; 482 slot = i;
483 break; 483 break;
484 } 484 }
485 } 485 }
486 if (slot == -1) { 486 if (slot == -1) {
487 return EBUSY; 487 return EBUSY;
488 } 488 }
489 } 489 }
490 490
491 isp = ci->ci_isources[slot]; 491 isp = ci->ci_isources[slot];
492 if (isp == NULL) { 492 if (isp == NULL) {
493 const char *via; 493 const char *via;
494 494
495 isp = chained; 495 isp = chained;
496 KASSERT(isp != NULL); 496 KASSERT(isp != NULL);
497 if (pic->pic_type == PIC_MSI || pic->pic_type == PIC_MSIX) 497 if (pic->pic_type == PIC_MSI || pic->pic_type == PIC_MSIX)
498 via = "vec"; 498 via = "vec";
499 else 499 else
500 via = "pin"; 500 via = "pin";
501 snprintf(isp->is_evname, sizeof (isp->is_evname), 501 snprintf(isp->is_evname, sizeof (isp->is_evname),
502 "%s %d", via, pin); 502 "%s %d", via, pin);
503 evcnt_attach_dynamic(&isp->is_evcnt, EVCNT_TYPE_INTR, NULL, 503 evcnt_attach_dynamic(&isp->is_evcnt, EVCNT_TYPE_INTR, NULL,
504 pic->pic_name, isp->is_evname); 504 pic->pic_name, isp->is_evname);
505 isp->is_active_cpu = ci->ci_cpuid; 505 isp->is_active_cpu = ci->ci_cpuid;
506 ci->ci_isources[slot] = isp; 506 ci->ci_isources[slot] = isp;
507 } 507 }
508 508
509 *index = slot; 509 *index = slot;
510 return 0; 510 return 0;
511} 511}
512 512
513/* 513/*
514 * A simple round-robin allocator to assign interrupts to CPUs. 514 * A simple round-robin allocator to assign interrupts to CPUs.
515 */ 515 */
516static int __noinline 516static int __noinline
517intr_allocate_slot(struct pic *pic, int pin, int level, 517intr_allocate_slot(struct pic *pic, int pin, int level,
518 struct cpu_info **cip, int *index, int *idt_slot, 518 struct cpu_info **cip, int *index, int *idt_slot,
519 struct intrsource *chained) 519 struct intrsource *chained)
520{ 520{
521 CPU_INFO_ITERATOR cii; 521 CPU_INFO_ITERATOR cii;
522 struct cpu_info *ci, *lci; 522 struct cpu_info *ci, *lci;
523 struct intrsource *isp; 523 struct intrsource *isp;
524 int slot = 0, idtvec, error; 524 int slot = 0, idtvec, error;
525 525
526 KASSERT(mutex_owned(&cpu_lock)); 526 KASSERT(mutex_owned(&cpu_lock));
527 527
528 /* First check if this pin is already used by an interrupt vector. */ 528 /* First check if this pin is already used by an interrupt vector. */
529 for (CPU_INFO_FOREACH(cii, ci)) { 529 for (CPU_INFO_FOREACH(cii, ci)) {
530 for (slot = 0 ; slot < MAX_INTR_SOURCES ; slot++) { 530 for (slot = 0 ; slot < MAX_INTR_SOURCES ; slot++) {
531 if ((isp = ci->ci_isources[slot]) == NULL) { 531 if ((isp = ci->ci_isources[slot]) == NULL) {
532 continue; 532 continue;
533 } 533 }
534 if (isp->is_pic == pic && 534 if (isp->is_pic == pic &&
535 pin != -1 && isp->is_pin == pin) { 535 pin != -1 && isp->is_pin == pin) {
536 *idt_slot = isp->is_idtvec; 536 *idt_slot = isp->is_idtvec;
537 *index = slot; 537 *index = slot;
538 *cip = ci; 538 *cip = ci;
539 return 0; 539 return 0;
540 } 540 }
541 } 541 }
542 } 542 }
543 543
544 /* 544 /*
545 * The pic/pin combination doesn't have an existing mapping. 545 * The pic/pin combination doesn't have an existing mapping.
546 * Find a slot for a new interrupt source. For the i8259 case, 546 * Find a slot for a new interrupt source. For the i8259 case,
547 * we always use reserved slots of the primary CPU. Otherwise, 547 * we always use reserved slots of the primary CPU. Otherwise,
548 * we make an attempt to balance the interrupt load. 548 * we make an attempt to balance the interrupt load.
549 * 549 *
550 * PIC and APIC usage are essentially exclusive, so the reservation 550 * PIC and APIC usage are essentially exclusive, so the reservation
551 * of the ISA slots is ignored when assigning IOAPIC slots. 551 * of the ISA slots is ignored when assigning IOAPIC slots.
552 */ 552 */
553 if (pic == &i8259_pic) { 553 if (pic == &i8259_pic) {
554 /* 554 /*
555 * Must be directed to BP. 555 * Must be directed to BP.
556 */ 556 */
557 ci = &cpu_info_primary; 557 ci = &cpu_info_primary;
558 error = intr_allocate_slot_cpu(ci, pic, pin, &slot, chained); 558 error = intr_allocate_slot_cpu(ci, pic, pin, &slot, chained);
559 } else { 559 } else {
560 /* 560 /*
561 * Find least loaded AP/BP and try to allocate there. 561 * Find least loaded AP/BP and try to allocate there.
562 */ 562 */
563 ci = NULL; 563 ci = NULL;
564 for (CPU_INFO_FOREACH(cii, lci)) { 564 for (CPU_INFO_FOREACH(cii, lci)) {
565 if ((lci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) { 565 if ((lci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) {
566 continue; 566 continue;
567 } 567 }
568#if 0 568#if 0
569 if (ci == NULL || 569 if (ci == NULL ||
570 ci->ci_nintrhand > lci->ci_nintrhand) { 570 ci->ci_nintrhand > lci->ci_nintrhand) {
571 ci = lci; 571 ci = lci;
572 } 572 }
573#else 573#else
574 ci = &cpu_info_primary; 574 ci = &cpu_info_primary;
575#endif 575#endif
576 } 576 }
577 KASSERT(ci != NULL); 577 KASSERT(ci != NULL);
578 error = intr_allocate_slot_cpu(ci, pic, pin, &slot, chained); 578 error = intr_allocate_slot_cpu(ci, pic, pin, &slot, chained);
579 579
580 /* 580 /*
581 * If that did not work, allocate anywhere. 581 * If that did not work, allocate anywhere.
582 */ 582 */
583 if (error != 0) { 583 if (error != 0) {
584 for (CPU_INFO_FOREACH(cii, ci)) { 584 for (CPU_INFO_FOREACH(cii, ci)) {
585 if ((ci->ci_schedstate.spc_flags & 585 if ((ci->ci_schedstate.spc_flags &
586 SPCF_NOINTR) != 0) { 586 SPCF_NOINTR) != 0) {
587 continue; 587 continue;
588 } 588 }
589 error = intr_allocate_slot_cpu(ci, pic, 589 error = intr_allocate_slot_cpu(ci, pic,
590 pin, &slot, chained); 590 pin, &slot, chained);
591 if (error == 0) { 591 if (error == 0) {
592 break; 592 break;
593 } 593 }
594 } 594 }
595 } 595 }
596 } 596 }
597 if (error != 0) { 597 if (error != 0) {
598 return error; 598 return error;
599 } 599 }
600 KASSERT(ci != NULL); 600 KASSERT(ci != NULL);
601 601
602 /* 602 /*
603 * Now allocate an IDT vector. 603 * Now allocate an IDT vector.
604 * For the 8259 these are reserved up front. 604 * For the 8259 these are reserved up front.
605 */ 605 */
606 if (pic == &i8259_pic) { 606 if (pic == &i8259_pic) {
607 idtvec = ICU_OFFSET + pin; 607 idtvec = ICU_OFFSET + pin;
608 } else { 608 } else {
609 /* 609 /*
610 * TODO to support MSI (not MSI-X) multiple vectors 610 * TODO to support MSI (not MSI-X) multiple vectors
611 * 611 *
612 * PCI Local Bus Specification Revision 3.0 says the devices 612 * PCI Local Bus Specification Revision 3.0 says the devices
613 * which use MSI multiple vectors increment the low order bits 613 * which use MSI multiple vectors increment the low order bits
614 * of MSI message data. 614 * of MSI message data.
615 * On the other hand, Intel SDM "10.11.2 Message Data Register 615 * On the other hand, Intel SDM "10.11.2 Message Data Register
616 * Format" says the 7:0 bits of MSI message data mean Interrupt 616 * Format" says the 7:0 bits of MSI message data mean Interrupt
617 * Descriptor Table(IDT) vector. 617 * Descriptor Table(IDT) vector.
618 * As the result of these two documents, the IDT vectors which 618 * As the result of these two documents, the IDT vectors which
619 * are used by a device using MSI multiple vectors must be 619 * are used by a device using MSI multiple vectors must be
620 * continuous. 620 * continuous.
621 */ 621 */
622 struct idt_vec *iv; 622 struct idt_vec *iv;
623 623
624 iv = idt_vec_ref(&ci->ci_idtvec); 624 iv = idt_vec_ref(&ci->ci_idtvec);
625 idtvec = idt_vec_alloc(iv, APIC_LEVEL(level), IDT_INTR_HIGH); 625 idtvec = idt_vec_alloc(iv, APIC_LEVEL(level), IDT_INTR_HIGH);
626 } 626 }
627 if (idtvec < 0) { 627 if (idtvec < 0) {
628 evcnt_detach(&ci->ci_isources[slot]->is_evcnt); 628 evcnt_detach(&ci->ci_isources[slot]->is_evcnt);
629 ci->ci_isources[slot]->is_evname[0] = '\0'; 629 ci->ci_isources[slot]->is_evname[0] = '\0';
630 ci->ci_isources[slot] = NULL; 630 ci->ci_isources[slot] = NULL;
631 return EBUSY; 631 return EBUSY;
632 } 632 }
633 ci->ci_isources[slot]->is_idtvec = idtvec; 633 ci->ci_isources[slot]->is_idtvec = idtvec;
634 *idt_slot = idtvec; 634 *idt_slot = idtvec;
635 *index = slot; 635 *index = slot;
636 *cip = ci; 636 *cip = ci;
637 return 0; 637 return 0;
638} 638}
639 639
640static void 640static void
641intr_source_free(struct cpu_info *ci, int slot, struct pic *pic, int idtvec) 641intr_source_free(struct cpu_info *ci, int slot, struct pic *pic, int idtvec)
642{ 642{
643 struct intrsource *isp; 643 struct intrsource *isp;
644 struct idt_vec *iv; 644 struct idt_vec *iv;
645 645
646 isp = ci->ci_isources[slot]; 646 isp = ci->ci_isources[slot];
647 iv = idt_vec_ref(&ci->ci_idtvec); 647 iv = idt_vec_ref(&ci->ci_idtvec);
648 648
649 if (isp->is_handlers != NULL) 649 if (isp->is_handlers != NULL)
650 return; 650 return;
651 ci->ci_isources[slot] = NULL; 651 ci->ci_isources[slot] = NULL;
652 if (pic != &i8259_pic) 652 if (pic != &i8259_pic)
653 idt_vec_free(iv, idtvec); 653 idt_vec_free(iv, idtvec);
654 654
655 isp->is_recurse = NULL; 655 isp->is_recurse = NULL;
656 isp->is_resume = NULL; 656 isp->is_resume = NULL;
657} 657}
658 658
659#ifdef MULTIPROCESSOR 659#ifdef MULTIPROCESSOR
660static int intr_biglock_wrapper(void *); 660static int intr_biglock_wrapper(void *);
661 661
662/* 662/*
663 * intr_biglock_wrapper: grab biglock and call a real interrupt handler. 663 * intr_biglock_wrapper: grab biglock and call a real interrupt handler.
664 */ 664 */
665 665
666static int 666static int
667intr_biglock_wrapper(void *vp) 667intr_biglock_wrapper(void *vp)
668{ 668{
669 struct intrhand *ih = vp; 669 struct intrhand *ih = vp;
670 int locks; 670 int locks;
671 int ret; 671 int ret;
672 672
673 KERNEL_LOCK(1, NULL); 673 KERNEL_LOCK(1, NULL);
674 674
675 locks = curcpu()->ci_biglock_count; 675 locks = curcpu()->ci_biglock_count;
676 SDT_PROBE3(sdt, kernel, intr, entry, 676 SDT_PROBE3(sdt, kernel, intr, entry,
677 ih->ih_realfun, ih->ih_realarg, ih); 677 ih->ih_realfun, ih->ih_realarg, ih);
678 ret = (*ih->ih_realfun)(ih->ih_realarg); 678 ret = (*ih->ih_realfun)(ih->ih_realarg);
679 SDT_PROBE4(sdt, kernel, intr, return, 679 SDT_PROBE4(sdt, kernel, intr, return,
680 ih->ih_realfun, ih->ih_realarg, ih, ret); 680 ih->ih_realfun, ih->ih_realarg, ih, ret);
681 KASSERTMSG(locks == curcpu()->ci_biglock_count, 681 KASSERTMSG(locks == curcpu()->ci_biglock_count,
682 "%s @ %p slipped locks %d -> %d", 682 "%s @ %p slipped locks %d -> %d",
683 ih->ih_xname, ih->ih_realfun, locks, curcpu()->ci_biglock_count); 683 ih->ih_xname, ih->ih_realfun, locks, curcpu()->ci_biglock_count);
684 684
685 KERNEL_UNLOCK_ONE(NULL); 685 KERNEL_UNLOCK_ONE(NULL);
686 686
687 return ret; 687 return ret;
688} 688}
689#endif /* MULTIPROCESSOR */ 689#endif /* MULTIPROCESSOR */
690 690
691#ifdef KDTRACE_HOOKS 691#ifdef KDTRACE_HOOKS
692static int 692static int
693intr_kdtrace_wrapper(void *vp) 693intr_kdtrace_wrapper(void *vp)
694{ 694{
695 struct intrhand *ih = vp; 695 struct intrhand *ih = vp;
696 int ret; 696 int ret;
697 697
698 SDT_PROBE3(sdt, kernel, intr, entry, 698 SDT_PROBE3(sdt, kernel, intr, entry,
699 ih->ih_realfun, ih->ih_realarg, ih); 699 ih->ih_realfun, ih->ih_realarg, ih);
700 ret = (*ih->ih_realfun)(ih->ih_realarg); 700 ret = (*ih->ih_realfun)(ih->ih_realarg);
701 SDT_PROBE4(sdt, kernel, intr, return, 701 SDT_PROBE4(sdt, kernel, intr, return,
702 ih->ih_realfun, ih->ih_realarg, ih, ret); 702 ih->ih_realfun, ih->ih_realarg, ih, ret);
703 703
704 return ret; 704 return ret;
705} 705}
706#endif 706#endif
707 707
708/* 708/*
709 * Append device name to intrsource. If device A and device B share IRQ number, 709 * Append device name to intrsource. If device A and device B share IRQ number,
710 * the device name of the interrupt id is "device A, device B". 710 * the device name of the interrupt id is "device A, device B".
711 */ 711 */
712static void 712static void
713intr_append_intrsource_xname(struct intrsource *isp, const char *xname) 713intr_append_intrsource_xname(struct intrsource *isp, const char *xname)
714{ 714{
715 715
716 if (isp->is_xname[0] != '\0') 716 if (isp->is_xname[0] != '\0')
717 strlcat(isp->is_xname, ", ", sizeof(isp->is_xname)); 717 strlcat(isp->is_xname, ", ", sizeof(isp->is_xname));
718 strlcat(isp->is_xname, xname, sizeof(isp->is_xname)); 718 strlcat(isp->is_xname, xname, sizeof(isp->is_xname));
719} 719}
720 720
721/* 721/*
722 * Called on bound CPU to handle calling pic_hwunmask from contexts 722 * Called on bound CPU to handle calling pic_hwunmask from contexts
723 * that are not already running on the bound CPU. 723 * that are not already running on the bound CPU.
724 * 724 *
725 * => caller (on initiating CPU) holds cpu_lock on our behalf 725 * => caller (on initiating CPU) holds cpu_lock on our behalf
726 * => arg1: struct intrhand *ih 726 * => arg1: struct intrhand *ih
727 */ 727 */
728static void 728static void
729intr_hwunmask_xcall(void *arg1, void *arg2) 729intr_hwunmask_xcall(void *arg1, void *arg2)
730{ 730{
731 struct intrhand * const ih = arg1; 731 struct intrhand * const ih = arg1;
732 struct cpu_info * const ci = ih->ih_cpu; 732 struct cpu_info * const ci = ih->ih_cpu;
733 733
734 KASSERT(ci == curcpu() || !mp_online); 734 KASSERT(ci == curcpu() || !mp_online);
735 735
736 const u_long psl = x86_read_psl(); 736 const u_long psl = x86_read_psl();
737 x86_disable_intr(); 737 x86_disable_intr();
738 738
739 struct intrsource * const source = ci->ci_isources[ih->ih_slot]; 739 struct intrsource * const source = ci->ci_isources[ih->ih_slot];
740 struct pic * const pic = source->is_pic; 740 struct pic * const pic = source->is_pic;
741 741
742 if (source->is_mask_count == 0) { 742 if (source->is_mask_count == 0) {
743 (*pic->pic_hwunmask)(pic, ih->ih_pin); 743 (*pic->pic_hwunmask)(pic, ih->ih_pin);
744 } 744 }
745 745
746 x86_write_psl(psl); 746 x86_write_psl(psl);
747} 747}
748 748
749/* 749/*
750 * Handle per-CPU component of interrupt establish. 750 * Handle per-CPU component of interrupt establish.
751 * 751 *
752 * => caller (on initiating CPU) holds cpu_lock on our behalf 752 * => caller (on initiating CPU) holds cpu_lock on our behalf
753 * => arg1: struct intrhand *ih 753 * => arg1: struct intrhand *ih
754 * => arg2: int idt_vec 754 * => arg2: int idt_vec
755 */ 755 */
756static void 756static void
757intr_establish_xcall(void *arg1, void *arg2) 757intr_establish_xcall(void *arg1, void *arg2)
758{ 758{
759 struct idt_vec *iv; 759 struct idt_vec *iv;
760 struct intrsource *source; 760 struct intrsource *source;
761 struct intrstub *stubp; 761 struct intrstub *stubp;
762 struct intrhand *ih; 762 struct intrhand *ih;
763 struct cpu_info *ci; 763 struct cpu_info *ci;
764 int idt_vec; 764 int idt_vec;
765 u_long psl; 765 u_long psl;
766 766
767 ih = arg1; 767 ih = arg1;
768 768
769 KASSERT(ih->ih_cpu == curcpu() || !mp_online); 769 KASSERT(ih->ih_cpu == curcpu() || !mp_online);
770 770
771 ci = ih->ih_cpu; 771 ci = ih->ih_cpu;
772 source = ci->ci_isources[ih->ih_slot]; 772 source = ci->ci_isources[ih->ih_slot];
773 idt_vec = (int)(intptr_t)arg2; 773 idt_vec = (int)(intptr_t)arg2;
774 iv = idt_vec_ref(&ci->ci_idtvec); 774 iv = idt_vec_ref(&ci->ci_idtvec);
775 775
776 /* Disable interrupts locally. */ 776 /* Disable interrupts locally. */
777 psl = x86_read_psl(); 777 psl = x86_read_psl();
778 x86_disable_intr(); 778 x86_disable_intr();
779 779
780 /* Link in the handler and re-calculate masks. */ 780 /* Link in the handler and re-calculate masks. */
781 *(ih->ih_prevp) = ih; 781 *(ih->ih_prevp) = ih;
782 x86_intr_calculatemasks(ci); 782 x86_intr_calculatemasks(ci);
783 783
784 /* Hook in new IDT vector and SPL state. */ 784 /* Hook in new IDT vector and SPL state. */
785 if (source->is_resume == NULL || source->is_idtvec != idt_vec) { 785 if (source->is_resume == NULL || source->is_idtvec != idt_vec) {
786 if (source->is_idtvec != 0 && source->is_idtvec != idt_vec) 786 if (source->is_idtvec != 0 && source->is_idtvec != idt_vec)
787 idt_vec_free(iv, source->is_idtvec); 787 idt_vec_free(iv, source->is_idtvec);
788 source->is_idtvec = idt_vec; 788 source->is_idtvec = idt_vec;
789 if (source->is_type == IST_LEVEL) { 789 if (source->is_type == IST_LEVEL) {
790 stubp = &source->is_pic->pic_level_stubs[ih->ih_slot]; 790 stubp = &source->is_pic->pic_level_stubs[ih->ih_slot];
791 } else { 791 } else {
792 stubp = &source->is_pic->pic_edge_stubs[ih->ih_slot]; 792 stubp = &source->is_pic->pic_edge_stubs[ih->ih_slot];
793 } 793 }
794 source->is_resume = stubp->ist_resume; 794 source->is_resume = stubp->ist_resume;
795 source->is_recurse = stubp->ist_recurse; 795 source->is_recurse = stubp->ist_recurse;
796 idt_vec_set(iv, idt_vec, stubp->ist_entry); 796 idt_vec_set(iv, idt_vec, stubp->ist_entry);
797 } 797 }
798 798
799 /* Re-enable interrupts locally. */ 799 /* Re-enable interrupts locally. */
800 x86_write_psl(psl); 800 x86_write_psl(psl);
801} 801}
802 802
803void * 803void *
804intr_establish_xname(int legacy_irq, struct pic *pic, int pin, int type, 804intr_establish_xname(int legacy_irq, struct pic *pic, int pin, int type,
805 int level, int (*handler)(void *), void *arg, 805 int level, int (*handler)(void *), void *arg,
806 bool known_mpsafe, const char *xname) 806 bool known_mpsafe, const char *xname)
807{ 807{
808 struct intrhand **p, *q, *ih; 808 struct intrhand **p, *q, *ih;
809 struct cpu_info *ci; 809 struct cpu_info *ci;
810 int slot, error, idt_vec; 810 int slot, error, idt_vec;
811 struct intrsource *chained, *source; 811 struct intrsource *chained, *source;
812#ifdef MULTIPROCESSOR 812#ifdef MULTIPROCESSOR
813 bool mpsafe = (known_mpsafe || level != IPL_VM); 813 bool mpsafe = (known_mpsafe || level != IPL_VM);
814#endif /* MULTIPROCESSOR */ 814#endif /* MULTIPROCESSOR */
815 uint64_t where; 815 uint64_t where;
816 const char *intrstr; 816 const char *intrstr;
817 char intrstr_buf[INTRIDBUF]; 817 char intrstr_buf[INTRIDBUF];
818 818
819 KASSERTMSG((legacy_irq == -1 || (0 <= legacy_irq && legacy_irq < 16)), 819 KASSERTMSG((legacy_irq == -1 || (0 <= legacy_irq && legacy_irq < 16)),
820 "bad legacy IRQ value: %d", legacy_irq); 820 "bad legacy IRQ value: %d", legacy_irq);
821 KASSERTMSG((legacy_irq != -1 || pic != &i8259_pic), 821 KASSERTMSG((legacy_irq != -1 || pic != &i8259_pic),
822 "non-legacy IRQ on i8259"); 822 "non-legacy IRQ on i8259");
823 823
824 ih = kmem_alloc(sizeof(*ih), KM_SLEEP); 824 ih = kmem_alloc(sizeof(*ih), KM_SLEEP);
825 intrstr = intr_create_intrid(legacy_irq, pic, pin, intrstr_buf, 825 intrstr = intr_create_intrid(legacy_irq, pic, pin, intrstr_buf,
826 sizeof(intrstr_buf)); 826 sizeof(intrstr_buf));
827 KASSERT(intrstr != NULL); 827 KASSERT(intrstr != NULL);
828 828
829 mutex_enter(&cpu_lock); 829 mutex_enter(&cpu_lock);
830 830
831 /* allocate intrsource pool, if not yet. */ 831 /* allocate intrsource pool, if not yet. */
832 chained = intr_get_io_intrsource(intrstr); 832 chained = intr_get_io_intrsource(intrstr);
833 if (chained == NULL) { 833 if (chained == NULL) {
834 if (msipic_is_msi_pic(pic)) { 834 if (msipic_is_msi_pic(pic)) {
835 mutex_exit(&cpu_lock); 835 mutex_exit(&cpu_lock);
836 kmem_free(ih, sizeof(*ih)); 836 kmem_free(ih, sizeof(*ih));
837 printf("%s: %s has no intrsource\n", __func__, intrstr); 837 printf("%s: %s has no intrsource\n", __func__, intrstr);
838 return NULL; 838 return NULL;
839 } 839 }
840 chained = intr_allocate_io_intrsource(intrstr); 840 chained = intr_allocate_io_intrsource(intrstr);
841 if (chained == NULL) { 841 if (chained == NULL) {
842 mutex_exit(&cpu_lock); 842 mutex_exit(&cpu_lock);
843 kmem_free(ih, sizeof(*ih)); 843 kmem_free(ih, sizeof(*ih));
844 printf("%s: can't allocate io_intersource\n", __func__); 844 printf("%s: can't allocate io_intersource\n", __func__);
845 return NULL; 845 return NULL;
846 } 846 }
847 } 847 }
848 848
849 error = intr_allocate_slot(pic, pin, level, &ci, &slot, &idt_vec, 849 error = intr_allocate_slot(pic, pin, level, &ci, &slot, &idt_vec,
850 chained); 850 chained);
851 if (error != 0) { 851 if (error != 0) {
852 intr_free_io_intrsource_direct(chained); 852 intr_free_io_intrsource_direct(chained);
853 mutex_exit(&cpu_lock); 853 mutex_exit(&cpu_lock);
854 kmem_free(ih, sizeof(*ih)); 854 kmem_free(ih, sizeof(*ih));
855 printf("failed to allocate interrupt slot for PIC %s pin %d\n", 855 printf("failed to allocate interrupt slot for PIC %s pin %d\n",
856 pic->pic_name, pin); 856 pic->pic_name, pin);
857 return NULL; 857 return NULL;
858 } 858 }
859 859
860 source = ci->ci_isources[slot]; 860 source = ci->ci_isources[slot];
861 861
862 if (source->is_handlers != NULL && 862 if (source->is_handlers != NULL &&
863 source->is_pic->pic_type != pic->pic_type) { 863 source->is_pic->pic_type != pic->pic_type) {
864 intr_free_io_intrsource_direct(chained); 864 intr_free_io_intrsource_direct(chained);
865 mutex_exit(&cpu_lock); 865 mutex_exit(&cpu_lock);
866 kmem_free(ih, sizeof(*ih)); 866 kmem_free(ih, sizeof(*ih));
867 printf("%s: can't share intr source between " 867 printf("%s: can't share intr source between "
868 "different PIC types (legacy_irq %d pin %d slot %d)\n", 868 "different PIC types (legacy_irq %d pin %d slot %d)\n",
869 __func__, legacy_irq, pin, slot); 869 __func__, legacy_irq, pin, slot);
870 return NULL; 870 return NULL;
871 } 871 }
872 872
873 source->is_pin = pin; 873 source->is_pin = pin;
874 source->is_pic = pic; 874 source->is_pic = pic;
875 intr_append_intrsource_xname(source, xname); 875 intr_append_intrsource_xname(source, xname);
876 switch (source->is_type) { 876 switch (source->is_type) {
877 case IST_NONE: 877 case IST_NONE:
878 source->is_type = type; 878 source->is_type = type;
879 break; 879 break;
880 case IST_EDGE: 880 case IST_EDGE:
881 case IST_LEVEL: 881 case IST_LEVEL:
882 if (source->is_type == type) 882 if (source->is_type == type)
883 break; 883 break;
884 /* FALLTHROUGH */ 884 /* FALLTHROUGH */
885 case IST_PULSE: 885 case IST_PULSE:
886 if (type != IST_NONE) { 886 if (type != IST_NONE) {
887 intr_source_free(ci, slot, pic, idt_vec); 887 intr_source_free(ci, slot, pic, idt_vec);
888 intr_free_io_intrsource_direct(chained); 888 intr_free_io_intrsource_direct(chained);
889 mutex_exit(&cpu_lock); 889 mutex_exit(&cpu_lock);
890 kmem_free(ih, sizeof(*ih)); 890 kmem_free(ih, sizeof(*ih));
891 printf("%s: pic %s pin %d: can't share " 891 printf("%s: pic %s pin %d: can't share "
892 "type %d with %d\n", 892 "type %d with %d\n",
893 __func__, pic->pic_name, pin, 893 __func__, pic->pic_name, pin,
894 source->is_type, type); 894 source->is_type, type);
895 return NULL; 895 return NULL;
896 } 896 }
897 break; 897 break;
898 default: 898 default:
899 panic("%s: bad intr type %d for pic %s pin %d\n", 899 panic("%s: bad intr type %d for pic %s pin %d\n",
900 __func__, source->is_type, pic->pic_name, pin); 900 __func__, source->is_type, pic->pic_name, pin);
901 /* NOTREACHED */ 901 /* NOTREACHED */
902 } 902 }
903 903
904 /* 904 /*
905 * If the establishing interrupt uses shared IRQ, the interrupt uses 905 * If the establishing interrupt uses shared IRQ, the interrupt uses
906 * "ci->ci_isources[slot]" instead of allocated by the establishing 906 * "ci->ci_isources[slot]" instead of allocated by the establishing
907 * device's pci_intr_alloc() or this function. 907 * device's pci_intr_alloc() or this function.
908 */ 908 */
909 if (source->is_handlers != NULL) { 909 if (source->is_handlers != NULL) {
910 struct intrsource *isp, *nisp; 910 struct intrsource *isp, *nisp;
911 911
912 SIMPLEQ_FOREACH_SAFE(isp, &io_interrupt_sources, 912 SIMPLEQ_FOREACH_SAFE(isp, &io_interrupt_sources,
913 is_list, nisp) { 913 is_list, nisp) {
914 if (strncmp(intrstr, isp->is_intrid, INTRIDBUF - 1) == 0 914 if (strncmp(intrstr, isp->is_intrid, INTRIDBUF - 1) == 0
915 && isp->is_handlers == NULL) 915 && isp->is_handlers == NULL)
916 intr_free_io_intrsource_direct(isp); 916 intr_free_io_intrsource_direct(isp);
917 } 917 }
918 } 918 }
919 919
920 /* 920 /*
921 * We're now committed. Mask the interrupt in hardware and 921 * We're now committed. Mask the interrupt in hardware and
922 * count it for load distribution. 922 * count it for load distribution.
923 */ 923 */
924 (*pic->pic_hwmask)(pic, pin); 924 (*pic->pic_hwmask)(pic, pin);
925 (ci->ci_nintrhand)++; 925 (ci->ci_nintrhand)++;
926 926
927 /* 927 /*
928 * Figure out where to put the handler. 928 * Figure out where to put the handler.
929 * This is O(N^2), but we want to preserve the order, and N is 929 * This is O(N^2), but we want to preserve the order, and N is
930 * generally small. 930 * generally small.
931 */ 931 */
932 for (p = &ci->ci_isources[slot]->is_handlers; 932 for (p = &ci->ci_isources[slot]->is_handlers;
933 (q = *p) != NULL && q->ih_level > level; 933 (q = *p) != NULL && q->ih_level > level;
934 p = &q->ih_next) { 934 p = &q->ih_next) {
935 /* nothing */; 935 /* nothing */;
936 } 936 }
937 937
938 ih->ih_pic = pic; 938 ih->ih_pic = pic;
939 ih->ih_fun = ih->ih_realfun = handler; 939 ih->ih_fun = ih->ih_realfun = handler;
940 ih->ih_arg = ih->ih_realarg = arg; 940 ih->ih_arg = ih->ih_realarg = arg;
941 ih->ih_prevp = p; 941 ih->ih_prevp = p;
942 ih->ih_next = *p; 942 ih->ih_next = *p;
943 ih->ih_level = level; 943 ih->ih_level = level;
944 ih->ih_pin = pin; 944 ih->ih_pin = pin;
945 ih->ih_cpu = ci; 945 ih->ih_cpu = ci;
946 ih->ih_slot = slot; 946 ih->ih_slot = slot;
947 strlcpy(ih->ih_xname, xname, sizeof(ih->ih_xname)); 947 strlcpy(ih->ih_xname, xname, sizeof(ih->ih_xname));
948#ifdef KDTRACE_HOOKS 948#ifdef KDTRACE_HOOKS
949 /* 949 /*
950 * XXX i8254_clockintr is special -- takes a magic extra 950 * XXX i8254_clockintr is special -- takes a magic extra
951 * argument. This should be fixed properly in some way that 951 * argument. This should be fixed properly in some way that
952 * doesn't involve sketchy function pointer casts. See also 952 * doesn't involve sketchy function pointer casts. See also
953 * the comments in x86/isa/clock.c. 953 * the comments in x86/isa/clock.c.
954 */ 954 */
955 if (handler != __FPTRCAST(int (*)(void *), i8254_clockintr)) { 955 if (handler != __FPTRCAST(int (*)(void *), i8254_clockintr)) {
956 ih->ih_fun = intr_kdtrace_wrapper; 956 ih->ih_fun = intr_kdtrace_wrapper;
957 ih->ih_arg = ih; 957 ih->ih_arg = ih;
958 } 958 }
959#endif 959#endif
960#ifdef MULTIPROCESSOR 960#ifdef MULTIPROCESSOR
961 if (!mpsafe) { 961 if (!mpsafe) {
962 KASSERT(handler != /* XXX */ 962 KASSERT(handler != /* XXX */
963 __FPTRCAST(int (*)(void *), i8254_clockintr)); 963 __FPTRCAST(int (*)(void *), i8254_clockintr));
964 ih->ih_fun = intr_biglock_wrapper; 964 ih->ih_fun = intr_biglock_wrapper;
965 ih->ih_arg = ih; 965 ih->ih_arg = ih;
966 } 966 }
967#endif /* MULTIPROCESSOR */ 967#endif /* MULTIPROCESSOR */
968 968
969 /* 969 /*
970 * Call out to the remote CPU to update its interrupt state. 970 * Call out to the remote CPU to update its interrupt state.
971 * Only make RPCs if the APs are up and running. 971 * Only make RPCs if the APs are up and running.
972 */ 972 */
973 if (ci == curcpu() || !mp_online) { 973 if (ci == curcpu() || !mp_online) {
974 intr_establish_xcall(ih, (void *)(intptr_t)idt_vec); 974 intr_establish_xcall(ih, (void *)(intptr_t)idt_vec);
975 } else { 975 } else {
976 where = xc_unicast(0, intr_establish_xcall, ih, 976 where = xc_unicast(0, intr_establish_xcall, ih,
977 (void *)(intptr_t)idt_vec, ci); 977 (void *)(intptr_t)idt_vec, ci);
978 xc_wait(where); 978 xc_wait(where);
979 } 979 }
980 980
981 /* All set up, so add a route for the interrupt and unmask it. */ 981 /* All set up, so add a route for the interrupt and unmask it. */
982 (*pic->pic_addroute)(pic, ci, pin, idt_vec, type); 982 (*pic->pic_addroute)(pic, ci, pin, idt_vec, type);
983 if (ci == curcpu() || !mp_online) { 983 if (ci == curcpu() || !mp_online) {
984 intr_hwunmask_xcall(ih, NULL); 984 intr_hwunmask_xcall(ih, NULL);
985 } else { 985 } else {
986 where = xc_unicast(0, intr_hwunmask_xcall, ih, NULL, ci); 986 where = xc_unicast(0, intr_hwunmask_xcall, ih, NULL, ci);
987 xc_wait(where); 987 xc_wait(where);
988 } 988 }
989 mutex_exit(&cpu_lock); 989 mutex_exit(&cpu_lock);
990 990
991 if (bootverbose || cpu_index(ci) != 0) 991 if (bootverbose || cpu_index(ci) != 0)
992 aprint_verbose("allocated pic %s type %s pin %d level %d to " 992 aprint_verbose("allocated pic %s type %s pin %d level %d to "
993 "%s slot %d idt entry %d\n", 993 "%s slot %d idt entry %d\n",
994 pic->pic_name, type == IST_EDGE ? "edge" : "level", pin, 994 pic->pic_name, type == IST_EDGE ? "edge" : "level", pin,
995 level, device_xname(ci->ci_dev), slot, idt_vec); 995 level, device_xname(ci->ci_dev), slot, idt_vec);
996 996
997 return ih; 997 return ih;
998} 998}
999 999
1000void * 1000void *
1001intr_establish(int legacy_irq, struct pic *pic, int pin, int type, int level, 1001intr_establish(int legacy_irq, struct pic *pic, int pin, int type, int level,
1002 int (*handler)(void *), void *arg, bool known_mpsafe) 1002 int (*handler)(void *), void *arg, bool known_mpsafe)
1003{ 1003{
1004 1004
1005 return intr_establish_xname(legacy_irq, pic, pin, type, 1005 return intr_establish_xname(legacy_irq, pic, pin, type,
1006 level, handler, arg, known_mpsafe, "unknown"); 1006 level, handler, arg, known_mpsafe, "unknown");
1007} 1007}
1008 1008
1009/* 1009/*
1010 * Called on bound CPU to handle intr_mask() / intr_unmask(). 1010 * Called on bound CPU to handle intr_mask() / intr_unmask().
1011 * 1011 *
1012 * => caller (on initiating CPU) holds cpu_lock on our behalf 1012 * => caller (on initiating CPU) holds cpu_lock on our behalf
1013 * => arg1: struct intrhand *ih 1013 * => arg1: struct intrhand *ih
1014 * => arg2: true -> mask, false -> unmask. 1014 * => arg2: true -> mask, false -> unmask.
1015 */ 1015 */
1016static void 1016static void
1017intr_mask_xcall(void *arg1, void *arg2) 1017intr_mask_xcall(void *arg1, void *arg2)
1018{ 1018{
1019 struct intrhand * const ih = arg1; 1019 struct intrhand * const ih = arg1;
1020 const uintptr_t mask = (uintptr_t)arg2; 1020 const uintptr_t mask = (uintptr_t)arg2;
1021 struct cpu_info * const ci = ih->ih_cpu; 1021 struct cpu_info * const ci = ih->ih_cpu;
1022 bool force_pending = false; 1022 bool force_pending = false;
1023 1023
1024 KASSERT(ci == curcpu() || !mp_online); 1024 KASSERT(ci == curcpu() || !mp_online);
1025 1025
1026 /* 1026 /*
1027 * We need to disable interrupts to hold off the interrupt 1027 * We need to disable interrupts to hold off the interrupt
1028 * vectors. 1028 * vectors.
1029 */ 1029 */
1030 const u_long psl = x86_read_psl(); 1030 const u_long psl = x86_read_psl();
1031 x86_disable_intr(); 1031 x86_disable_intr();
1032 1032
1033 struct intrsource * const source = ci->ci_isources[ih->ih_slot]; 1033 struct intrsource * const source = ci->ci_isources[ih->ih_slot];
1034 struct pic * const pic = source->is_pic; 1034 struct pic * const pic = source->is_pic;
1035 1035
1036 if (mask) { 1036 if (mask) {
1037 source->is_mask_count++; 1037 source->is_mask_count++;
1038 KASSERT(source->is_mask_count != 0); 1038 KASSERT(source->is_mask_count != 0);
1039 if (source->is_mask_count == 1) { 1039 if (source->is_mask_count == 1) {
1040 (*pic->pic_hwmask)(pic, ih->ih_pin); 1040 (*pic->pic_hwmask)(pic, ih->ih_pin);
1041 } 1041 }
1042 } else { 1042 } else {
1043 KASSERT(source->is_mask_count != 0); 1043 KASSERT(source->is_mask_count != 0);
1044 if (--source->is_mask_count == 0) { 1044 if (--source->is_mask_count == 0) {
1045 /* 1045 /*
1046 * If this interrupt source is being moved, don't 1046 * If this interrupt source is being moved, don't
1047 * unmask it at the hw. 1047 * unmask it at the hw.
1048 */ 1048 */
1049 if (! source->is_distribute_pending) { 1049 if (! source->is_distribute_pending) {
1050 (*pic->pic_hwunmask)(pic, ih->ih_pin); 1050 (*pic->pic_hwunmask)(pic, ih->ih_pin);
1051 } 1051 }
1052 1052
1053 /* 1053 /*
1054 * For level-sensitive interrupts, the hardware 1054 * For level-sensitive interrupts, the hardware
1055 * will let us know. For everything else, we 1055 * will let us know. For everything else, we
1056 * need to explicitly handle interrupts that 1056 * need to explicitly handle interrupts that
1057 * happened when when the source was masked. 1057 * happened when when the source was masked.
1058 */ 1058 */
1059 const uint64_t bit = (1U << ih->ih_slot); 1059 const uint64_t bit = (1U << ih->ih_slot);
1060 if (ci->ci_imasked & bit) { 1060 if (ci->ci_imasked & bit) {
1061 ci->ci_imasked &= ~bit; 1061 ci->ci_imasked &= ~bit;
1062 if (source->is_type != IST_LEVEL) { 1062 if (source->is_type != IST_LEVEL) {
1063 ci->ci_ipending |= bit; 1063 ci->ci_ipending |= bit;
1064 force_pending = true; 1064 force_pending = true;
1065 } 1065 }
1066 } 1066 }
1067 } 1067 }
1068 } 1068 }
1069 1069
1070 /* Re-enable interrupts. */ 1070 /* Re-enable interrupts. */
1071 x86_write_psl(psl); 1071 x86_write_psl(psl);
1072 1072
1073 if (force_pending) { 1073 if (force_pending) {
1074 /* Force processing of any pending interrupts. */ 1074 /* Force processing of any pending interrupts. */
1075 splx(splhigh()); 1075 splx(splhigh());
1076 } 1076 }
1077} 1077}
1078 1078
1079static void 1079static void
1080intr_mask_internal(struct intrhand * const ih, const bool mask) 1080intr_mask_internal(struct intrhand * const ih, const bool mask)
1081{ 1081{
1082 1082
1083 /* 1083 /*
1084 * Call out to the remote CPU to update its interrupt state. 1084 * Call out to the remote CPU to update its interrupt state.
1085 * Only make RPCs if the APs are up and running. 1085 * Only make RPCs if the APs are up and running.
1086 */ 1086 */
1087 mutex_enter(&cpu_lock); 1087 mutex_enter(&cpu_lock);
1088 struct cpu_info * const ci = ih->ih_cpu; 1088 struct cpu_info * const ci = ih->ih_cpu;
1089 void * const mask_arg = (void *)(uintptr_t)mask; 1089 void * const mask_arg = (void *)(uintptr_t)mask;
1090 if (ci == curcpu() || !mp_online) { 1090 if (ci == curcpu() || !mp_online) {
1091 intr_mask_xcall(ih, mask_arg); 1091 intr_mask_xcall(ih, mask_arg);
1092 } else { 1092 } else {
1093 const uint64_t where = 1093 const uint64_t where =
1094 xc_unicast(0, intr_mask_xcall, ih, mask_arg, ci); 1094 xc_unicast(0, intr_mask_xcall, ih, mask_arg, ci);
1095 xc_wait(where); 1095 xc_wait(where);
1096 } 1096 }
1097 mutex_exit(&cpu_lock); 1097 mutex_exit(&cpu_lock);
1098} 1098}
1099 1099
1100void 1100void
1101intr_mask(struct intrhand *ih) 1101intr_mask(struct intrhand *ih)
1102{ 1102{
1103 1103
1104 if (cpu_intr_p()) { 1104 if (cpu_intr_p()) {
1105 /* 1105 /*
1106 * Special case of calling intr_mask() from an interrupt 1106 * Special case of calling intr_mask() from an interrupt
1107 * handler: we MUST be called from the bound CPU for this 1107 * handler: we MUST be called from the bound CPU for this
1108 * interrupt (presumably from a handler we're about to 1108 * interrupt (presumably from a handler we're about to
1109 * mask). 1109 * mask).
1110 * 1110 *
1111 * We can't take the cpu_lock in this case, and we must 1111 * We can't take the cpu_lock in this case, and we must
1112 * therefore be extra careful. 1112 * therefore be extra careful.
1113 */ 1113 */
1114 KASSERT(ih->ih_cpu == curcpu() || !mp_online); 1114 KASSERT(ih->ih_cpu == curcpu() || !mp_online);
1115 intr_mask_xcall(ih, (void *)(uintptr_t)true); 1115 intr_mask_xcall(ih, (void *)(uintptr_t)true);
1116 return; 1116 return;
1117 } 1117 }
1118 1118
1119 intr_mask_internal(ih, true); 1119 intr_mask_internal(ih, true);
1120} 1120}
1121 1121
1122void 1122void
1123intr_unmask(struct intrhand *ih) 1123intr_unmask(struct intrhand *ih)
1124{ 1124{
1125 1125
1126 /* 1126 /*
1127 * This is not safe to call from an interrupt context because 1127 * This is not safe to call from an interrupt context because
1128 * we don't want to accidentally unmask an interrupt source 1128 * we don't want to accidentally unmask an interrupt source
1129 * that's masked because it's being serviced. 1129 * that's masked because it's being serviced.
1130 */ 1130 */
1131 KASSERT(!cpu_intr_p()); 1131 KASSERT(!cpu_intr_p());
1132 intr_mask_internal(ih, false); 1132 intr_mask_internal(ih, false);
1133} 1133}
1134 1134
1135/* 1135/*
1136 * Called on bound CPU to handle intr_disestablish(). 1136 * Called on bound CPU to handle intr_disestablish().
1137 * 1137 *
1138 * => caller (on initiating CPU) holds cpu_lock on our behalf 1138 * => caller (on initiating CPU) holds cpu_lock on our behalf
1139 * => arg1: struct intrhand *ih 1139 * => arg1: struct intrhand *ih
1140 * => arg2: unused 1140 * => arg2: unused
1141 */ 1141 */
1142static void 1142static void
1143intr_disestablish_xcall(void *arg1, void *arg2) 1143intr_disestablish_xcall(void *arg1, void *arg2)
1144{ 1144{
1145 struct intrhand **p, *q; 1145 struct intrhand **p, *q;
1146 struct cpu_info *ci; 1146 struct cpu_info *ci;
1147 struct pic *pic; 1147 struct pic *pic;
1148 struct intrsource *source; 1148 struct intrsource *source;
1149 struct intrhand *ih; 1149 struct intrhand *ih;
1150 u_long psl; 1150 u_long psl;
1151 int idtvec; 1151 int idtvec;
1152 1152
1153 ih = arg1; 1153 ih = arg1;
1154 ci = ih->ih_cpu; 1154 ci = ih->ih_cpu;
1155 1155
1156 KASSERT(ci == curcpu() || !mp_online); 1156 KASSERT(ci == curcpu() || !mp_online);
1157 1157
1158 /* Disable interrupts locally. */ 1158 /* Disable interrupts locally. */
1159 psl = x86_read_psl(); 1159 psl = x86_read_psl();
1160 x86_disable_intr(); 1160 x86_disable_intr();
1161 1161
1162 pic = ci->ci_isources[ih->ih_slot]->is_pic; 1162 pic = ci->ci_isources[ih->ih_slot]->is_pic;
1163 source = ci->ci_isources[ih->ih_slot]; 1163 source = ci->ci_isources[ih->ih_slot];
1164 idtvec = source->is_idtvec; 1164 idtvec = source->is_idtvec;
1165 1165
1166 (*pic->pic_hwmask)(pic, ih->ih_pin); 1166 (*pic->pic_hwmask)(pic, ih->ih_pin);
1167 membar_sync(); 1167
 1168 /*
 1169 * ci_pending is stable on the current CPU while interrupts are
 1170 * blocked, and we only need to synchronize with interrupt
 1171 * vectors on the same CPU, so no need for atomics or membars.
 1172 */
1168 ci->ci_ipending &= ~(1ULL << ih->ih_slot); 1173 ci->ci_ipending &= ~(1ULL << ih->ih_slot);
1169 membar_sync(); 
1170 1174
1171 /* 1175 /*
1172 * Remove the handler from the chain. 1176 * Remove the handler from the chain.
1173 */ 1177 */
1174 for (p = &source->is_handlers; (q = *p) != NULL && q != ih; 1178 for (p = &source->is_handlers; (q = *p) != NULL && q != ih;
1175 p = &q->ih_next) 1179 p = &q->ih_next)
1176 ; 1180 ;
1177 if (q == NULL) { 1181 if (q == NULL) {
1178 x86_write_psl(psl); 1182 x86_write_psl(psl);
1179 panic("%s: handler not registered", __func__); 1183 panic("%s: handler not registered", __func__);
1180 /* NOTREACHED */ 1184 /* NOTREACHED */
1181 } 1185 }
1182 1186
1183 *p = q->ih_next; 1187 *p = q->ih_next;
1184 1188
1185 x86_intr_calculatemasks(ci); 1189 x86_intr_calculatemasks(ci);
1186 /* 1190 /*
1187 * If there is no any handler, 1) do delroute because it has no 1191 * If there is no any handler, 1) do delroute because it has no
1188 * any source and 2) dont' hwunmask to prevent spurious interrupt. 1192 * any source and 2) dont' hwunmask to prevent spurious interrupt.
1189 * 1193 *
1190 * If there is any handler, 1) don't delroute because it has source 1194 * If there is any handler, 1) don't delroute because it has source
1191 * and 2) do hwunmask to be able to get interrupt again. 1195 * and 2) do hwunmask to be able to get interrupt again.
1192 * 1196 *
1193 */ 1197 */
1194 if (source->is_handlers == NULL) 1198 if (source->is_handlers == NULL)
1195 (*pic->pic_delroute)(pic, ci, ih->ih_pin, idtvec, 1199 (*pic->pic_delroute)(pic, ci, ih->ih_pin, idtvec,
1196 source->is_type); 1200 source->is_type);
1197 else if (source->is_mask_count == 0) 1201 else if (source->is_mask_count == 0)
1198 (*pic->pic_hwunmask)(pic, ih->ih_pin); 1202 (*pic->pic_hwunmask)(pic, ih->ih_pin);
1199 1203
1200 /* If the source is free we can drop it now. */ 1204 /* If the source is free we can drop it now. */
1201 intr_source_free(ci, ih->ih_slot, pic, idtvec); 1205 intr_source_free(ci, ih->ih_slot, pic, idtvec);
1202 1206
1203 /* Re-enable interrupts. */ 1207 /* Re-enable interrupts. */
1204 x86_write_psl(psl); 1208 x86_write_psl(psl);
1205 1209
1206 DPRINTF(("%s: remove slot %d (pic %s pin %d vec %d)\n", 1210 DPRINTF(("%s: remove slot %d (pic %s pin %d vec %d)\n",
1207 device_xname(ci->ci_dev), ih->ih_slot, pic->pic_name, 1211 device_xname(ci->ci_dev), ih->ih_slot, pic->pic_name,
1208 ih->ih_pin, idtvec)); 1212 ih->ih_pin, idtvec));
1209} 1213}
1210 1214
1211static int 1215static int
1212intr_num_handlers(struct intrsource *isp) 1216intr_num_handlers(struct intrsource *isp)
1213{ 1217{
1214 struct intrhand *ih; 1218 struct intrhand *ih;
1215 int num; 1219 int num;
1216 1220
1217 num = 0; 1221 num = 0;
1218 for (ih = isp->is_handlers; ih != NULL; ih = ih->ih_next) 1222 for (ih = isp->is_handlers; ih != NULL; ih = ih->ih_next)
1219 num++; 1223 num++;
1220 1224
1221 return num; 1225 return num;
1222} 1226}
1223 1227
1224/* 1228/*
1225 * Deregister an interrupt handler. 1229 * Deregister an interrupt handler.
1226 */ 1230 */
1227void 1231void
1228intr_disestablish(struct intrhand *ih) 1232intr_disestablish(struct intrhand *ih)
1229{ 1233{
1230 struct cpu_info *ci; 1234 struct cpu_info *ci;
1231 struct intrsource *isp; 1235 struct intrsource *isp;
1232 uint64_t where; 1236 uint64_t where;
1233 1237
1234 /* 1238 /*
1235 * Count the removal for load balancing. 1239 * Count the removal for load balancing.
1236 * Call out to the remote CPU to update its interrupt state. 1240 * Call out to the remote CPU to update its interrupt state.
1237 * Only make RPCs if the APs are up and running. 1241 * Only make RPCs if the APs are up and running.
1238 */ 1242 */
1239 mutex_enter(&cpu_lock); 1243 mutex_enter(&cpu_lock);
1240 ci = ih->ih_cpu; 1244 ci = ih->ih_cpu;
1241 (ci->ci_nintrhand)--; 1245 (ci->ci_nintrhand)--;
1242 KASSERT(ci->ci_nintrhand >= 0); 1246 KASSERT(ci->ci_nintrhand >= 0);
1243 isp = ci->ci_isources[ih->ih_slot]; 1247 isp = ci->ci_isources[ih->ih_slot];
1244 if (ci == curcpu() || !mp_online) { 1248 if (ci == curcpu() || !mp_online) {
1245 intr_disestablish_xcall(ih, NULL); 1249 intr_disestablish_xcall(ih, NULL);
1246 } else { 1250 } else {
1247 where = xc_unicast(0, intr_disestablish_xcall, ih, NULL, ci); 1251 where = xc_unicast(0, intr_disestablish_xcall, ih, NULL, ci);
1248 xc_wait(where); 1252 xc_wait(where);
1249 } 1253 }
1250 if (!msipic_is_msi_pic(isp->is_pic) && intr_num_handlers(isp) < 1) { 1254 if (!msipic_is_msi_pic(isp->is_pic) && intr_num_handlers(isp) < 1) {
1251 intr_free_io_intrsource_direct(isp); 1255 intr_free_io_intrsource_direct(isp);
1252 } 1256 }
1253 mutex_exit(&cpu_lock); 1257 mutex_exit(&cpu_lock);
1254 kmem_free(ih, sizeof(*ih)); 1258 kmem_free(ih, sizeof(*ih));
1255} 1259}
1256 1260
1257static const char * 1261static const char *
1258xen_intr_string(int port, char *buf, size_t len, struct pic *pic) 1262xen_intr_string(int port, char *buf, size_t len, struct pic *pic)
1259{ 1263{
1260 KASSERT(pic->pic_type == PIC_XEN); 1264 KASSERT(pic->pic_type == PIC_XEN);
1261 1265
1262 KASSERT(port >= 0); 1266 KASSERT(port >= 0);
1263 1267
1264 snprintf(buf, len, "%s chan %d", pic->pic_name, port); 1268 snprintf(buf, len, "%s chan %d", pic->pic_name, port);
1265 1269
1266 return buf; 1270 return buf;
1267} 1271}
1268 1272
1269static const char * 1273static const char *
1270legacy_intr_string(int ih, char *buf, size_t len, struct pic *pic) 1274legacy_intr_string(int ih, char *buf, size_t len, struct pic *pic)
1271{ 1275{
1272 int legacy_irq; 1276 int legacy_irq;
1273 1277
1274 KASSERT(pic->pic_type == PIC_I8259); 1278 KASSERT(pic->pic_type == PIC_I8259);
1275#if NLAPIC > 0 1279#if NLAPIC > 0
1276 KASSERT(APIC_IRQ_ISLEGACY(ih)); 1280 KASSERT(APIC_IRQ_ISLEGACY(ih));
1277 1281
1278 legacy_irq = APIC_IRQ_LEGACY_IRQ(ih); 1282 legacy_irq = APIC_IRQ_LEGACY_IRQ(ih);
1279#else 1283#else
1280 legacy_irq = ih; 1284 legacy_irq = ih;
1281#endif 1285#endif
1282 KASSERT(legacy_irq >= 0 && legacy_irq < 16); 1286 KASSERT(legacy_irq >= 0 && legacy_irq < 16);
1283 1287
1284 snprintf(buf, len, "%s pin %d", pic->pic_name, legacy_irq); 1288 snprintf(buf, len, "%s pin %d", pic->pic_name, legacy_irq);
1285 1289
1286 return buf; 1290 return buf;
1287} 1291}
1288 1292
1289const char * 1293const char *
1290intr_string(intr_handle_t ih, char *buf, size_t len) 1294intr_string(intr_handle_t ih, char *buf, size_t len)
1291{ 1295{
1292#if NIOAPIC > 0 1296#if NIOAPIC > 0
1293 struct ioapic_softc *pic; 1297 struct ioapic_softc *pic;
1294#endif 1298#endif
1295 1299
1296 if (ih == 0) 1300 if (ih == 0)
1297 panic("%s: bogus handle 0x%" PRIx64, __func__, ih); 1301 panic("%s: bogus handle 0x%" PRIx64, __func__, ih);
1298 1302
1299#if NIOAPIC > 0 1303#if NIOAPIC > 0
1300 if (ih & APIC_INT_VIA_APIC) { 1304 if (ih & APIC_INT_VIA_APIC) {
1301 pic = ioapic_find(APIC_IRQ_APIC(ih)); 1305 pic = ioapic_find(APIC_IRQ_APIC(ih));
1302 if (pic != NULL) { 1306 if (pic != NULL) {
1303 snprintf(buf, len, "%s pin %d", 1307 snprintf(buf, len, "%s pin %d",
1304 device_xname(pic->sc_dev), APIC_IRQ_PIN(ih)); 1308 device_xname(pic->sc_dev), APIC_IRQ_PIN(ih));
1305 } else { 1309 } else {
1306 snprintf(buf, len, 1310 snprintf(buf, len,
1307 "apic %d int %d (irq %d)", 1311 "apic %d int %d (irq %d)",
1308 APIC_IRQ_APIC(ih), 1312 APIC_IRQ_APIC(ih),
1309 APIC_IRQ_PIN(ih), 1313 APIC_IRQ_PIN(ih),
1310 APIC_IRQ_LEGACY_IRQ(ih)); 1314 APIC_IRQ_LEGACY_IRQ(ih));
1311 } 1315 }
1312 } else 1316 } else
1313 snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih)); 1317 snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih));
1314 1318
1315#elif NLAPIC > 0 1319#elif NLAPIC > 0
1316 snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih)); 1320 snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih));
1317#else 1321#else
1318 snprintf(buf, len, "irq %d", (int) ih); 1322 snprintf(buf, len, "irq %d", (int) ih);
1319#endif 1323#endif
1320 return buf; 1324 return buf;
1321 1325
1322} 1326}
1323 1327
1324/* 1328/*
1325 * Fake interrupt handler structures for the benefit of symmetry with 1329 * Fake interrupt handler structures for the benefit of symmetry with
1326 * other interrupt sources, and the benefit of x86_intr_calculatemasks() 1330 * other interrupt sources, and the benefit of x86_intr_calculatemasks()
1327 */ 1331 */
1328struct intrhand fake_timer_intrhand; 1332struct intrhand fake_timer_intrhand;
1329struct intrhand fake_ipi_intrhand; 1333struct intrhand fake_ipi_intrhand;
1330#if NHYPERV > 0 1334#if NHYPERV > 0
1331struct intrhand fake_hyperv_intrhand; 1335struct intrhand fake_hyperv_intrhand;
1332#endif 1336#endif
1333 1337
1334#if NLAPIC > 0 && defined(MULTIPROCESSOR) 1338#if NLAPIC > 0 && defined(MULTIPROCESSOR)
1335static const char *x86_ipi_names[X86_NIPI] = X86_IPI_NAMES; 1339static const char *x86_ipi_names[X86_NIPI] = X86_IPI_NAMES;
1336#endif 1340#endif
1337 1341
1338#if defined(INTRSTACKSIZE) 1342#if defined(INTRSTACKSIZE)
1339static inline bool 1343static inline bool
1340redzone_const_or_false(bool x) 1344redzone_const_or_false(bool x)
1341{ 1345{
1342#ifdef DIAGNOSTIC 1346#ifdef DIAGNOSTIC
1343 return x; 1347 return x;
1344#else 1348#else
1345 return false; 1349 return false;
1346#endif /* !DIAGNOSTIC */ 1350#endif /* !DIAGNOSTIC */
1347} 1351}
1348 1352
1349static inline int 1353static inline int
1350redzone_const_or_zero(int x) 1354redzone_const_or_zero(int x)
1351{ 1355{
1352 return redzone_const_or_false(true) ? x : 0; 1356 return redzone_const_or_false(true) ? x : 0;
1353} 1357}
1354#endif 1358#endif
1355 1359
1356/* 1360/*
1357 * Initialize all handlers that aren't dynamically allocated, and exist 1361 * Initialize all handlers that aren't dynamically allocated, and exist
1358 * for each CPU. 1362 * for each CPU.
1359 */ 1363 */
1360void 1364void
1361cpu_intr_init(struct cpu_info *ci) 1365cpu_intr_init(struct cpu_info *ci)
1362{ 1366{
1363#if (NLAPIC > 0) || defined(MULTIPROCESSOR) || \ 1367#if (NLAPIC > 0) || defined(MULTIPROCESSOR) || \
1364 (NHYPERV > 0) 1368 (NHYPERV > 0)
1365 struct intrsource *isp; 1369 struct intrsource *isp;
1366#endif 1370#endif
1367#if NLAPIC > 0 1371#if NLAPIC > 0
1368 static int first = 1; 1372 static int first = 1;
1369#if defined(MULTIPROCESSOR) 1373#if defined(MULTIPROCESSOR)
1370 int i; 1374 int i;
1371#endif 1375#endif
1372#endif 1376#endif
1373 1377
1374#if NLAPIC > 0 1378#if NLAPIC > 0
1375 isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); 1379 isp = kmem_zalloc(sizeof(*isp), KM_SLEEP);
1376 isp->is_recurse = Xrecurse_lapic_ltimer; 1380 isp->is_recurse = Xrecurse_lapic_ltimer;
1377 isp->is_resume = Xresume_lapic_ltimer; 1381 isp->is_resume = Xresume_lapic_ltimer;
1378 fake_timer_intrhand.ih_pic = &local_pic; 1382 fake_timer_intrhand.ih_pic = &local_pic;
1379 fake_timer_intrhand.ih_level = IPL_CLOCK; 1383 fake_timer_intrhand.ih_level = IPL_CLOCK;
1380 isp->is_handlers = &fake_timer_intrhand; 1384 isp->is_handlers = &fake_timer_intrhand;
1381 isp->is_pic = &local_pic; 1385 isp->is_pic = &local_pic;
1382 ci->ci_isources[LIR_TIMER] = isp; 1386 ci->ci_isources[LIR_TIMER] = isp;
1383 evcnt_attach_dynamic(&isp->is_evcnt, 1387 evcnt_attach_dynamic(&isp->is_evcnt,
1384 first ? EVCNT_TYPE_INTR : EVCNT_TYPE_MISC, NULL, 1388 first ? EVCNT_TYPE_INTR : EVCNT_TYPE_MISC, NULL,
1385 device_xname(ci->ci_dev), "timer"); 1389 device_xname(ci->ci_dev), "timer");
1386 first = 0; 1390 first = 0;
1387 1391
1388#ifdef MULTIPROCESSOR 1392#ifdef MULTIPROCESSOR
1389 isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); 1393 isp = kmem_zalloc(sizeof(*isp), KM_SLEEP);
1390 isp->is_recurse = Xrecurse_lapic_ipi; 1394 isp->is_recurse = Xrecurse_lapic_ipi;
1391 isp->is_resume = Xresume_lapic_ipi; 1395 isp->is_resume = Xresume_lapic_ipi;
1392 fake_ipi_intrhand.ih_pic = &local_pic; 1396 fake_ipi_intrhand.ih_pic = &local_pic;
1393 fake_ipi_intrhand.ih_level = IPL_HIGH; 1397 fake_ipi_intrhand.ih_level = IPL_HIGH;
1394 isp->is_handlers = &fake_ipi_intrhand; 1398 isp->is_handlers = &fake_ipi_intrhand;
1395 isp->is_pic = &local_pic; 1399 isp->is_pic = &local_pic;
1396 ci->ci_isources[LIR_IPI] = isp; 1400 ci->ci_isources[LIR_IPI] = isp;
1397 1401
1398 for (i = 0; i < X86_NIPI; i++) 1402 for (i = 0; i < X86_NIPI; i++)
1399 evcnt_attach_dynamic(&ci->ci_ipi_events[i], EVCNT_TYPE_MISC, 1403 evcnt_attach_dynamic(&ci->ci_ipi_events[i], EVCNT_TYPE_MISC,
1400 NULL, device_xname(ci->ci_dev), x86_ipi_names[i]); 1404 NULL, device_xname(ci->ci_dev), x86_ipi_names[i]);
1401#endif 1405#endif
1402 1406
1403#if NHYPERV > 0 1407#if NHYPERV > 0
1404 if (hyperv_hypercall_enabled()) { 1408 if (hyperv_hypercall_enabled()) {
1405 isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); 1409 isp = kmem_zalloc(sizeof(*isp), KM_SLEEP);
1406 isp->is_recurse = Xrecurse_hyperv_hypercall; 1410 isp->is_recurse = Xrecurse_hyperv_hypercall;
1407 isp->is_resume = Xresume_hyperv_hypercall; 1411 isp->is_resume = Xresume_hyperv_hypercall;
1408 fake_hyperv_intrhand.ih_level = IPL_NET; 1412 fake_hyperv_intrhand.ih_level = IPL_NET;
1409 isp->is_handlers = &fake_hyperv_intrhand; 1413 isp->is_handlers = &fake_hyperv_intrhand;
1410 isp->is_pic = &local_pic; 1414 isp->is_pic = &local_pic;
1411 ci->ci_isources[LIR_HV] = isp; 1415 ci->ci_isources[LIR_HV] = isp;
1412 evcnt_attach_dynamic(&isp->is_evcnt, EVCNT_TYPE_INTR, NULL, 1416 evcnt_attach_dynamic(&isp->is_evcnt, EVCNT_TYPE_INTR, NULL,
1413 device_xname(ci->ci_dev), "Hyper-V hypercall"); 1417 device_xname(ci->ci_dev), "Hyper-V hypercall");
1414 } 1418 }
1415#endif 1419#endif
1416#endif 1420#endif
1417 1421
1418#if defined(__HAVE_PREEMPTION) 1422#if defined(__HAVE_PREEMPTION)
1419 x86_init_preempt(ci); 1423 x86_init_preempt(ci);
1420 1424
1421#endif 1425#endif
1422 x86_intr_calculatemasks(ci); 1426 x86_intr_calculatemasks(ci);
1423 1427
1424#if defined(INTRSTACKSIZE) 1428#if defined(INTRSTACKSIZE)
1425 vaddr_t istack; 1429 vaddr_t istack;
1426 1430
1427 /* 1431 /*
1428 * If the red zone is activated, protect both the top and 1432 * If the red zone is activated, protect both the top and
1429 * the bottom of the stack with an unmapped page. 1433 * the bottom of the stack with an unmapped page.
1430 */ 1434 */
1431 istack = uvm_km_alloc(kernel_map, 1435 istack = uvm_km_alloc(kernel_map,
1432 INTRSTACKSIZE + redzone_const_or_zero(2 * PAGE_SIZE), 0, 1436 INTRSTACKSIZE + redzone_const_or_zero(2 * PAGE_SIZE), 0,
1433 UVM_KMF_WIRED | UVM_KMF_ZERO); 1437 UVM_KMF_WIRED | UVM_KMF_ZERO);
1434 if (redzone_const_or_false(true)) { 1438 if (redzone_const_or_false(true)) {
1435 pmap_kremove(istack, PAGE_SIZE); 1439 pmap_kremove(istack, PAGE_SIZE);
1436 pmap_kremove(istack + INTRSTACKSIZE + PAGE_SIZE, PAGE_SIZE); 1440 pmap_kremove(istack + INTRSTACKSIZE + PAGE_SIZE, PAGE_SIZE);
1437 pmap_update(pmap_kernel()); 1441 pmap_update(pmap_kernel());
1438 } 1442 }
1439 1443
1440 /* 1444 /*
1441 * 33 used to be 1. Arbitrarily reserve 32 more register_t's 1445 * 33 used to be 1. Arbitrarily reserve 32 more register_t's
1442 * of space for ddb(4) to examine some subroutine arguments 1446 * of space for ddb(4) to examine some subroutine arguments
1443 * and to hunt for the next stack frame. 1447 * and to hunt for the next stack frame.
1444 */ 1448 */
1445 ci->ci_intrstack = (char *)istack + redzone_const_or_zero(PAGE_SIZE) + 1449 ci->ci_intrstack = (char *)istack + redzone_const_or_zero(PAGE_SIZE) +
1446 INTRSTACKSIZE - 33 * sizeof(register_t); 1450 INTRSTACKSIZE - 33 * sizeof(register_t);
1447#endif 1451#endif
1448 1452
1449 ci->ci_idepth = -1; 1453 ci->ci_idepth = -1;
1450} 1454}
1451 1455
1452#if defined(INTRDEBUG) || defined(DDB) 1456#if defined(INTRDEBUG) || defined(DDB)
1453 1457
1454void 1458void
1455intr_printconfig(void) 1459intr_printconfig(void)
1456{ 1460{
1457 int i; 1461 int i;
1458 struct intrhand *ih; 1462 struct intrhand *ih;
1459 struct intrsource *isp; 1463 struct intrsource *isp;
1460 struct cpu_info *ci; 1464 struct cpu_info *ci;
1461 CPU_INFO_ITERATOR cii; 1465 CPU_INFO_ITERATOR cii;
1462 void (*pr)(const char *, ...); 1466 void (*pr)(const char *, ...);
1463 1467
1464 pr = printf; 1468 pr = printf;
1465#ifdef DDB 1469#ifdef DDB
1466 if (db_active) { 1470 if (db_active) {
1467 pr = db_printf; 1471 pr = db_printf;
1468 } 1472 }
1469#endif 1473#endif
1470 1474
1471 for (CPU_INFO_FOREACH(cii, ci)) { 1475 for (CPU_INFO_FOREACH(cii, ci)) {
1472 (*pr)("%s: interrupt masks:\n", device_xname(ci->ci_dev)); 1476 (*pr)("%s: interrupt masks:\n", device_xname(ci->ci_dev));
1473 for (i = 0; i < NIPL; i++) 1477 for (i = 0; i < NIPL; i++)
1474 (*pr)("IPL %d mask %016"PRIx64" unmask %016"PRIx64"\n", 1478 (*pr)("IPL %d mask %016"PRIx64" unmask %016"PRIx64"\n",
1475 i, ci->ci_imask[i], ci->ci_iunmask[i]); 1479 i, ci->ci_imask[i], ci->ci_iunmask[i]);
1476 for (i = 0; i < MAX_INTR_SOURCES; i++) { 1480 for (i = 0; i < MAX_INTR_SOURCES; i++) {
1477 isp = ci->ci_isources[i]; 1481 isp = ci->ci_isources[i];
1478 if (isp == NULL) 1482 if (isp == NULL)
1479 continue; 1483 continue;
1480 (*pr)("%s source %d is pin %d from pic %s type %d " 1484 (*pr)("%s source %d is pin %d from pic %s type %d "
1481 "maxlevel %d\n", device_xname(ci->ci_dev), i, 1485 "maxlevel %d\n", device_xname(ci->ci_dev), i,
1482 isp->is_pin, isp->is_pic->pic_name, isp->is_type, 1486 isp->is_pin, isp->is_pic->pic_name, isp->is_type,
1483 isp->is_maxlevel); 1487 isp->is_maxlevel);
1484 for (ih = isp->is_handlers; ih != NULL; 1488 for (ih = isp->is_handlers; ih != NULL;
1485 ih = ih->ih_next) 1489 ih = ih->ih_next)
1486 (*pr)("\thandler %p level %d\n", 1490 (*pr)("\thandler %p level %d\n",
1487 ih->ih_fun, ih->ih_level); 1491 ih->ih_fun, ih->ih_level);
1488#if NIOAPIC > 0 1492#if NIOAPIC > 0
1489 if (isp->is_pic->pic_type == PIC_IOAPIC) { 1493 if (isp->is_pic->pic_type == PIC_IOAPIC) {
1490 struct ioapic_softc *sc; 1494 struct ioapic_softc *sc;
1491 sc = isp->is_pic->pic_ioapic; 1495 sc = isp->is_pic->pic_ioapic;
1492 (*pr)("\tioapic redir 0x%x\n", 1496 (*pr)("\tioapic redir 0x%x\n",
1493 sc->sc_pins[isp->is_pin].ip_map->redir); 1497 sc->sc_pins[isp->is_pin].ip_map->redir);
1494 } 1498 }
1495#endif 1499#endif
1496 1500
1497 } 1501 }
1498 } 1502 }
1499} 1503}
1500 1504
1501#endif 1505#endif
1502 1506
1503/* 1507/*
1504 * Save current affinitied cpu's interrupt count. 1508 * Save current affinitied cpu's interrupt count.
1505 */ 1509 */
1506static void 1510static void
1507intr_save_evcnt(struct intrsource *source, cpuid_t cpuid) 1511intr_save_evcnt(struct intrsource *source, cpuid_t cpuid)
1508{ 1512{
1509 struct percpu_evcnt *pep; 1513 struct percpu_evcnt *pep;
1510 uint64_t curcnt; 1514 uint64_t curcnt;
1511 int i; 1515 int i;
1512 1516
1513 curcnt = source->is_evcnt.ev_count; 1517 curcnt = source->is_evcnt.ev_count;
1514 pep = source->is_saved_evcnt; 1518 pep = source->is_saved_evcnt;
1515 1519
1516 for (i = 0; i < ncpu; i++) { 1520 for (i = 0; i < ncpu; i++) {
1517 if (pep[i].cpuid == cpuid) { 1521 if (pep[i].cpuid == cpuid) {
1518 pep[i].count = curcnt; 1522 pep[i].count = curcnt;
1519 break; 1523 break;
1520 } 1524 }
1521 } 1525 }
1522} 1526}
1523 1527
1524/* 1528/*
1525 * Restore current affinitied cpu's interrupt count. 1529 * Restore current affinitied cpu's interrupt count.
1526 */ 1530 */
1527static void 1531static void
1528intr_restore_evcnt(struct intrsource *source, cpuid_t cpuid) 1532intr_restore_evcnt(struct intrsource *source, cpuid_t cpuid)
1529{ 1533{
1530 struct percpu_evcnt *pep; 1534 struct percpu_evcnt *pep;
1531 int i; 1535 int i;
1532 1536
1533 pep = source->is_saved_evcnt; 1537 pep = source->is_saved_evcnt;
1534 1538
1535 for (i = 0; i < ncpu; i++) { 1539 for (i = 0; i < ncpu; i++) {
1536 if (pep[i].cpuid == cpuid) { 1540 if (pep[i].cpuid == cpuid) {
1537 source->is_evcnt.ev_count = pep[i].count; 1541 source->is_evcnt.ev_count = pep[i].count;
1538 break; 1542 break;
1539 } 1543 }
1540 } 1544 }
1541} 1545}
1542 1546
1543static void 1547static void
1544intr_redistribute_xc_t(void *arg1, void *arg2) 1548intr_redistribute_xc_t(void *arg1, void *arg2)
1545{ 1549{
1546 struct cpu_info *ci; 1550 struct cpu_info *ci;
1547 struct intrsource *isp; 1551 struct intrsource *isp;
1548 int slot; 1552 int slot;
1549 u_long psl; 1553 u_long psl;
1550 1554
1551 ci = curcpu(); 1555 ci = curcpu();
1552 isp = arg1; 1556 isp = arg1;
1553 slot = (int)(intptr_t)arg2; 1557 slot = (int)(intptr_t)arg2;
1554 1558
1555 /* Disable interrupts locally. */ 1559 /* Disable interrupts locally. */
1556 psl = x86_read_psl(); 1560 psl = x86_read_psl();
1557 x86_disable_intr(); 1561 x86_disable_intr();
1558 1562
1559 /* Hook it in and re-calculate masks. */ 1563 /* Hook it in and re-calculate masks. */
1560 ci->ci_isources[slot] = isp; 1564 ci->ci_isources[slot] = isp;
1561 x86_intr_calculatemasks(curcpu()); 1565 x86_intr_calculatemasks(curcpu());
1562 1566
1563 /* Re-enable interrupts locally. */ 1567 /* Re-enable interrupts locally. */
1564 x86_write_psl(psl); 1568 x86_write_psl(psl);
1565} 1569}
1566 1570
1567static void 1571static void
1568intr_redistribute_xc_s1(void *arg1, void *arg2) 1572intr_redistribute_xc_s1(void *arg1, void *arg2)
1569{ 1573{
1570 struct pic *pic; 1574 struct pic *pic;
1571 struct intrsource *isp; 1575 struct intrsource *isp;
1572 struct cpu_info *nci; 1576 struct cpu_info *nci;
1573 u_long psl; 1577 u_long psl;
1574 1578
1575 isp = arg1; 1579 isp = arg1;
1576 nci = arg2; 1580 nci = arg2;
1577 1581
1578 /* 1582 /*
1579 * Disable interrupts on-chip and mask the pin. Back out 1583 * Disable interrupts on-chip and mask the pin. Back out
1580 * and let the interrupt be processed if one is pending. 1584 * and let the interrupt be processed if one is pending.
1581 */ 1585 */
1582 pic = isp->is_pic; 1586 pic = isp->is_pic;
1583 for (;;) { 1587 for (;;) {
1584 psl = x86_read_psl(); 1588 psl = x86_read_psl();
1585 x86_disable_intr(); 1589 x86_disable_intr();
1586 if ((*pic->pic_trymask)(pic, isp->is_pin)) { 1590 if ((*pic->pic_trymask)(pic, isp->is_pin)) {
1587 break; 1591 break;
1588 } 1592 }
1589 x86_write_psl(psl); 1593 x86_write_psl(psl);
1590 DELAY(1000); 1594 DELAY(1000);
1591 } 1595 }
1592 1596
1593 /* pic_addroute will unmask the interrupt. */ 1597 /* pic_addroute will unmask the interrupt. */
1594 (*pic->pic_addroute)(pic, nci, isp->is_pin, isp->is_idtvec, 1598 (*pic->pic_addroute)(pic, nci, isp->is_pin, isp->is_idtvec,
1595 isp->is_type); 1599 isp->is_type);
1596 x86_write_psl(psl); 1600 x86_write_psl(psl);
1597} 1601}
1598 1602
1599static void 1603static void
1600intr_redistribute_xc_s2(void *arg1, void *arg2) 1604intr_redistribute_xc_s2(void *arg1, void *arg2)
1601{ 1605{
1602 struct cpu_info *ci; 1606 struct cpu_info *ci;
1603 u_long psl; 1607 u_long psl;
1604 int slot; 1608 int slot;
1605 1609
1606 ci = curcpu(); 1610 ci = curcpu();
1607 slot = (int)(uintptr_t)arg1; 1611 slot = (int)(uintptr_t)arg1;
1608 1612
1609 /* Disable interrupts locally. */ 1613 /* Disable interrupts locally. */
1610 psl = x86_read_psl(); 1614 psl = x86_read_psl();
1611 x86_disable_intr(); 1615 x86_disable_intr();
1612 1616
1613 /* Patch out the source and re-calculate masks. */ 1617 /* Patch out the source and re-calculate masks. */
1614 ci->ci_isources[slot] = NULL; 1618 ci->ci_isources[slot] = NULL;
1615 x86_intr_calculatemasks(ci); 1619 x86_intr_calculatemasks(ci);
1616 1620
1617 /* Re-enable interrupts locally. */ 1621 /* Re-enable interrupts locally. */
1618 x86_write_psl(psl); 1622 x86_write_psl(psl);
1619} 1623}
1620 1624
1621static bool 1625static bool
1622intr_redistribute(struct cpu_info *oci) 1626intr_redistribute(struct cpu_info *oci)
1623{ 1627{
1624 struct intrsource *isp; 1628 struct intrsource *isp;
1625 struct intrhand *ih; 1629 struct intrhand *ih;
1626 CPU_INFO_ITERATOR cii; 1630 CPU_INFO_ITERATOR cii;
1627 struct cpu_info *nci, *ici; 1631 struct cpu_info *nci, *ici;
1628 int oslot, nslot; 1632 int oslot, nslot;
1629 uint64_t where; 1633 uint64_t where;
1630 1634
1631 KASSERT(mutex_owned(&cpu_lock)); 1635 KASSERT(mutex_owned(&cpu_lock));
1632 1636
1633 /* Look for an interrupt source that we can migrate. */ 1637 /* Look for an interrupt source that we can migrate. */
1634 for (oslot = 0; oslot < MAX_INTR_SOURCES; oslot++) { 1638 for (oslot = 0; oslot < MAX_INTR_SOURCES; oslot++) {
1635 if ((isp = oci->ci_isources[oslot]) == NULL) { 1639 if ((isp = oci->ci_isources[oslot]) == NULL) {
1636 continue; 1640 continue;
1637 } 1641 }
1638 if (isp->is_pic->pic_type == PIC_IOAPIC) { 1642 if (isp->is_pic->pic_type == PIC_IOAPIC) {
1639 break; 1643 break;
1640 } 1644 }
1641 } 1645 }
1642 if (oslot == MAX_INTR_SOURCES) { 1646 if (oslot == MAX_INTR_SOURCES) {
1643 return false; 1647 return false;
1644 } 1648 }
1645 1649
1646 /* Find least loaded CPU and try to move there. */ 1650 /* Find least loaded CPU and try to move there. */
1647 nci = NULL; 1651 nci = NULL;
1648 for (CPU_INFO_FOREACH(cii, ici)) { 1652 for (CPU_INFO_FOREACH(cii, ici)) {
1649 if ((ici->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) { 1653 if ((ici->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) {
1650 continue; 1654 continue;
1651 } 1655 }
1652 KASSERT(ici != oci); 1656 KASSERT(ici != oci);
1653 if (nci == NULL || nci->ci_nintrhand > ici->ci_nintrhand) { 1657 if (nci == NULL || nci->ci_nintrhand > ici->ci_nintrhand) {
1654 nci = ici; 1658 nci = ici;
1655 } 1659 }
1656 } 1660 }
1657 if (nci == NULL) { 1661 if (nci == NULL) {
1658 return false; 1662 return false;
1659 } 1663 }
1660 for (nslot = 0; nslot < MAX_INTR_SOURCES; nslot++) { 1664 for (nslot = 0; nslot < MAX_INTR_SOURCES; nslot++) {
1661 if (nci->ci_isources[nslot] == NULL) { 1665 if (nci->ci_isources[nslot] == NULL) {
1662 break; 1666 break;
1663 } 1667 }
1664 } 1668 }
1665 1669
1666 /* If that did not work, allocate anywhere. */ 1670 /* If that did not work, allocate anywhere. */
1667 if (nslot == MAX_INTR_SOURCES) { 1671 if (nslot == MAX_INTR_SOURCES) {
1668 for (CPU_INFO_FOREACH(cii, nci)) { 1672 for (CPU_INFO_FOREACH(cii, nci)) {
1669 if ((nci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) { 1673 if ((nci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) {
1670 continue; 1674 continue;
1671 } 1675 }
1672 KASSERT(nci != oci); 1676 KASSERT(nci != oci);
1673 for (nslot = 0; nslot < MAX_INTR_SOURCES; nslot++) { 1677 for (nslot = 0; nslot < MAX_INTR_SOURCES; nslot++) {
1674 if (nci->ci_isources[nslot] == NULL) { 1678 if (nci->ci_isources[nslot] == NULL) {
1675 break; 1679 break;
1676 } 1680 }
1677 } 1681 }
1678 if (nslot != MAX_INTR_SOURCES) { 1682 if (nslot != MAX_INTR_SOURCES) {
1679 break; 1683 break;
1680 } 1684 }
1681 } 1685 }
1682 } 1686 }
1683 if (nslot == MAX_INTR_SOURCES) { 1687 if (nslot == MAX_INTR_SOURCES) {
1684 return false; 1688 return false;
1685 } 1689 }
1686 1690
1687 /* 1691 /*
1688 * Now we have new CPU and new slot. Run a cross-call to set up 1692 * Now we have new CPU and new slot. Run a cross-call to set up
1689 * the new vector on the target CPU. 1693 * the new vector on the target CPU.
1690 */ 1694 */
1691 where = xc_unicast(0, intr_redistribute_xc_t, isp, 1695 where = xc_unicast(0, intr_redistribute_xc_t, isp,
1692 (void *)(intptr_t)nslot, nci); 1696 (void *)(intptr_t)nslot, nci);
1693 xc_wait(where); 1697 xc_wait(where);
1694 1698
1695 /* 1699 /*
1696 * We're ready to go on the target CPU. Run a cross call to 1700 * We're ready to go on the target CPU. Run a cross call to
1697 * reroute the interrupt away from the source CPU. 1701 * reroute the interrupt away from the source CPU.
1698 */ 1702 */
1699 where = xc_unicast(0, intr_redistribute_xc_s1, isp, nci, oci); 1703 where = xc_unicast(0, intr_redistribute_xc_s1, isp, nci, oci);
1700 xc_wait(where); 1704 xc_wait(where);
1701 1705
1702 /* Sleep for (at least) 10ms to allow the change to take hold. */ 1706 /* Sleep for (at least) 10ms to allow the change to take hold. */
1703 (void)kpause("intrdist", false, mstohz(10), NULL); 1707 (void)kpause("intrdist", false, mstohz(10), NULL);
1704 1708
1705 /* Complete removal from the source CPU. */ 1709 /* Complete removal from the source CPU. */
1706 where = xc_unicast(0, intr_redistribute_xc_s2, 1710 where = xc_unicast(0, intr_redistribute_xc_s2,
1707 (void *)(uintptr_t)oslot, NULL, oci); 1711 (void *)(uintptr_t)oslot, NULL, oci);
1708 xc_wait(where); 1712 xc_wait(where);
1709 1713
1710 /* Finally, take care of book-keeping. */ 1714 /* Finally, take care of book-keeping. */
1711 for (ih = isp->is_handlers; ih != NULL; ih = ih->ih_next) { 1715 for (ih = isp->is_handlers; ih != NULL; ih = ih->ih_next) {
1712 oci->ci_nintrhand--; 1716 oci->ci_nintrhand--;
1713 nci->ci_nintrhand++; 1717 nci->ci_nintrhand++;
1714 ih->ih_cpu = nci; 1718 ih->ih_cpu = nci;
1715 } 1719 }
1716 intr_save_evcnt(isp, oci->ci_cpuid); 1720 intr_save_evcnt(isp, oci->ci_cpuid);
1717 intr_restore_evcnt(isp, nci->ci_cpuid); 1721 intr_restore_evcnt(isp, nci->ci_cpuid);
1718 isp->is_active_cpu = nci->ci_cpuid; 1722 isp->is_active_cpu = nci->ci_cpuid;
1719 1723
1720 return true; 1724 return true;
1721} 1725}
1722 1726
1723void 1727void
1724cpu_intr_redistribute(void) 1728cpu_intr_redistribute(void)
1725{ 1729{
1726 CPU_INFO_ITERATOR cii; 1730 CPU_INFO_ITERATOR cii;
1727 struct cpu_info *ci; 1731 struct cpu_info *ci;
1728 1732
1729 KASSERT(mutex_owned(&cpu_lock)); 1733 KASSERT(mutex_owned(&cpu_lock));
1730 KASSERT(mp_online); 1734 KASSERT(mp_online);
1731 1735
1732 /* Direct interrupts away from shielded CPUs. */ 1736 /* Direct interrupts away from shielded CPUs. */
1733 for (CPU_INFO_FOREACH(cii, ci)) { 1737 for (CPU_INFO_FOREACH(cii, ci)) {
1734 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0) { 1738 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0) {
1735 continue; 1739 continue;
1736 } 1740 }
1737 while (intr_redistribute(ci)) { 1741 while (intr_redistribute(ci)) {
1738 /* nothing */ 1742 /* nothing */
1739 } 1743 }
1740 } 1744 }
1741 1745
1742 /* XXX should now re-balance */ 1746 /* XXX should now re-balance */
1743} 1747}
1744 1748
1745u_int 1749u_int
1746cpu_intr_count(struct cpu_info *ci) 1750cpu_intr_count(struct cpu_info *ci)
1747{ 1751{
1748 1752
1749 KASSERT(ci->ci_nintrhand >= 0); 1753 KASSERT(ci->ci_nintrhand >= 0);
1750 1754
1751 return ci->ci_nintrhand; 1755 return ci->ci_nintrhand;
1752} 1756}
1753 1757
1754static int 1758static int
1755intr_find_unused_slot(struct cpu_info *ci, int *index) 1759intr_find_unused_slot(struct cpu_info *ci, int *index)
1756{ 1760{
1757 int slot, i; 1761 int slot, i;
1758 1762
1759 KASSERT(mutex_owned(&cpu_lock)); 1763 KASSERT(mutex_owned(&cpu_lock));
1760 1764
1761 slot = -1; 1765 slot = -1;
1762 for (i = 0; i < MAX_INTR_SOURCES ; i++) { 1766 for (i = 0; i < MAX_INTR_SOURCES ; i++) {
1763 if (ci->ci_isources[i] == NULL) { 1767 if (ci->ci_isources[i] == NULL) {
1764 slot = i; 1768 slot = i;
1765 break; 1769 break;
1766 } 1770 }
1767 } 1771 }
1768 if (slot == -1) { 1772 if (slot == -1) {
1769 DPRINTF(("cannot allocate ci_isources\n")); 1773 DPRINTF(("cannot allocate ci_isources\n"));
1770 return EBUSY; 1774 return EBUSY;
1771 } 1775 }
1772 1776
1773 *index = slot; 1777 *index = slot;
1774 return 0; 1778 return 0;
1775} 1779}
1776 1780
1777/* 1781/*
1778 * Let cpu_info ready to accept the interrupt. 1782 * Let cpu_info ready to accept the interrupt.
1779 */ 1783 */
1780static void 1784static void
1781intr_activate_xcall(void *arg1, void *arg2) 1785intr_activate_xcall(void *arg1, void *arg2)
1782{ 1786{
1783 struct cpu_info *ci; 1787 struct cpu_info *ci;
1784 struct intrsource *source; 1788 struct intrsource *source;
1785 struct intrstub *stubp; 1789 struct intrstub *stubp;
1786 struct intrhand *ih; 1790 struct intrhand *ih;
1787 struct idt_vec *iv; 1791 struct idt_vec *iv;
1788 u_long psl; 1792 u_long psl;
1789 int idt_vec; 1793 int idt_vec;
1790 int slot; 1794 int slot;
1791 1795
1792 ih = arg1; 1796 ih = arg1;
1793 1797
1794 kpreempt_disable(); 1798 kpreempt_disable();
1795 1799
1796 KASSERT(ih->ih_cpu == curcpu() || !mp_online); 1800 KASSERT(ih->ih_cpu == curcpu() || !mp_online);
1797 1801
1798 ci = ih->ih_cpu; 1802 ci = ih->ih_cpu;
1799 slot = ih->ih_slot; 1803 slot = ih->ih_slot;
1800 source = ci->ci_isources[slot]; 1804 source = ci->ci_isources[slot];
1801 idt_vec = source->is_idtvec; 1805 idt_vec = source->is_idtvec;
1802 iv = idt_vec_ref(&ci->ci_idtvec); 1806 iv = idt_vec_ref(&ci->ci_idtvec);
1803 1807
1804 psl = x86_read_psl(); 1808 psl = x86_read_psl();
1805 x86_disable_intr(); 1809 x86_disable_intr();
1806 1810
1807 x86_intr_calculatemasks(ci); 1811 x86_intr_calculatemasks(ci);
1808 1812
1809 if (source->is_type == IST_LEVEL) { 1813 if (source->is_type == IST_LEVEL) {
1810 stubp = &source->is_pic->pic_level_stubs[slot]; 1814 stubp = &source->is_pic->pic_level_stubs[slot];
1811 } else { 1815 } else {
1812 stubp = &source->is_pic->pic_edge_stubs[slot]; 1816 stubp = &source->is_pic->pic_edge_stubs[slot];
1813 } 1817 }
1814 1818
1815 source->is_resume = stubp->ist_resume; 1819 source->is_resume = stubp->ist_resume;
1816 source->is_recurse = stubp->ist_recurse; 1820 source->is_recurse = stubp->ist_recurse;
1817 idt_vec_set(iv, idt_vec, stubp->ist_entry); 1821 idt_vec_set(iv, idt_vec, stubp->ist_entry);
1818 1822
1819 x86_write_psl(psl); 1823 x86_write_psl(psl);
1820 1824
1821 kpreempt_enable(); 1825 kpreempt_enable();
1822} 1826}
1823 1827
1824/* 1828/*
1825 * Let cpu_info not accept the interrupt. 1829 * Let cpu_info not accept the interrupt.
1826 */ 1830 */
1827static void 1831static void
1828intr_deactivate_xcall(void *arg1, void *arg2) 1832intr_deactivate_xcall(void *arg1, void *arg2)
1829{ 1833{
1830 struct cpu_info *ci; 1834 struct cpu_info *ci;
1831 struct intrhand *ih, *lih; 1835 struct intrhand *ih, *lih;
1832 struct intrsource *isp; 1836 struct intrsource *isp;
1833 u_long psl; 1837 u_long psl;
1834 int idt_vec; 1838 int idt_vec;
1835 int slot; 1839 int slot;
1836 1840
1837 ih = arg1; 1841 ih = arg1;
1838 1842
1839 kpreempt_disable(); 1843 kpreempt_disable();
1840 1844
1841 KASSERT(ih->ih_cpu == curcpu() || !mp_online); 1845 KASSERT(ih->ih_cpu == curcpu() || !mp_online);
1842 1846
1843 ci = ih->ih_cpu; 1847 ci = ih->ih_cpu;
1844 slot = ih->ih_slot; 1848 slot = ih->ih_slot;
1845 isp = ci->ci_isources[slot]; 1849 isp = ci->ci_isources[slot];
1846 idt_vec = isp->is_idtvec; 1850 idt_vec = isp->is_idtvec;
1847 1851
1848 psl = x86_read_psl(); 1852 psl = x86_read_psl();
1849 x86_disable_intr(); 1853 x86_disable_intr();
1850 1854
1851 /* Move all devices sharing IRQ number. */ 1855 /* Move all devices sharing IRQ number. */
1852 ci->ci_isources[slot] = NULL; 1856 ci->ci_isources[slot] = NULL;
1853 for (lih = ih; lih != NULL; lih = lih->ih_next) { 1857 for (lih = ih; lih != NULL; lih = lih->ih_next) {
1854 ci->ci_nintrhand--; 1858 ci->ci_nintrhand--;
1855 } 1859 }
1856 1860
1857 x86_intr_calculatemasks(ci); 1861 x86_intr_calculatemasks(ci);
1858 1862
1859 if (idt_vec_is_pcpu()) { 1863 if (idt_vec_is_pcpu()) {
1860 idt_vec_free(&ci->ci_idtvec, idt_vec); 1864 idt_vec_free(&ci->ci_idtvec, idt_vec);
1861 } else { 1865 } else {
1862 /* 1866 /*
1863 * Skip unsetgate(), because the same idt[] entry is 1867 * Skip unsetgate(), because the same idt[] entry is
1864 * overwritten in intr_activate_xcall(). 1868 * overwritten in intr_activate_xcall().
1865 */ 1869 */
1866 } 1870 }
1867 1871
1868 x86_write_psl(psl); 1872 x86_write_psl(psl);
1869 1873
1870 kpreempt_enable(); 1874 kpreempt_enable();
1871} 1875}
1872 1876
1873static void 1877static void
1874intr_get_affinity(struct intrsource *isp, kcpuset_t *cpuset) 1878intr_get_affinity(struct intrsource *isp, kcpuset_t *cpuset)
1875{ 1879{
1876 struct cpu_info *ci; 1880 struct cpu_info *ci;
1877 1881
1878 KASSERT(mutex_owned(&cpu_lock)); 1882 KASSERT(mutex_owned(&cpu_lock));
1879 1883
1880 if (isp == NULL) { 1884 if (isp == NULL) {
1881 kcpuset_zero(cpuset); 1885 kcpuset_zero(cpuset);
1882 return; 1886 return;
1883 } 1887 }
1884 1888
1885 KASSERTMSG(isp->is_handlers != NULL, 1889 KASSERTMSG(isp->is_handlers != NULL,
1886 "Don't get affinity for the device which is not established."); 1890 "Don't get affinity for the device which is not established.");
1887 1891
1888 ci = isp->is_handlers->ih_cpu; 1892 ci = isp->is_handlers->ih_cpu;
1889 if (ci == NULL) { 1893 if (ci == NULL) {
1890 kcpuset_zero(cpuset); 1894 kcpuset_zero(cpuset);
1891 return; 1895 return;
1892 } 1896 }
1893 1897
1894 kcpuset_set(cpuset, cpu_index(ci)); 1898 kcpuset_set(cpuset, cpu_index(ci));
1895 return; 1899 return;
1896} 1900}
1897 1901
1898static int 1902static int
1899intr_set_affinity(struct intrsource *isp, const kcpuset_t *cpuset) 1903intr_set_affinity(struct intrsource *isp, const kcpuset_t *cpuset)
1900{ 1904{
1901 struct cpu_info *oldci, *newci; 1905 struct cpu_info *oldci, *newci;
1902 struct intrhand *ih, *lih; 1906 struct intrhand *ih, *lih;
1903 struct pic *pic; 1907 struct pic *pic;
1904 u_int cpu_idx; 1908 u_int cpu_idx;
1905 int old_idtvec, new_idtvec; 1909 int old_idtvec, new_idtvec;
1906 int oldslot, newslot; 1910 int oldslot, newslot;
1907 int err; 1911 int err;
1908 int pin; 1912 int pin;
1909 1913
1910 KASSERT(mutex_owned(&intr_distribute_lock)); 1914 KASSERT(mutex_owned(&intr_distribute_lock));
1911 KASSERT(mutex_owned(&cpu_lock)); 1915 KASSERT(mutex_owned(&cpu_lock));
1912 1916
1913 /* XXX 1917 /* XXX
1914 * logical destination mode is not supported, use lowest index cpu. 1918 * logical destination mode is not supported, use lowest index cpu.
1915 */ 1919 */
1916 cpu_idx = kcpuset_ffs(cpuset) - 1; 1920 cpu_idx = kcpuset_ffs(cpuset) - 1;
1917 newci = cpu_lookup(cpu_idx); 1921 newci = cpu_lookup(cpu_idx);
1918 if (newci == NULL) { 1922 if (newci == NULL) {
1919 DPRINTF(("invalid cpu index: %u\n", cpu_idx)); 1923 DPRINTF(("invalid cpu index: %u\n", cpu_idx));
1920 return EINVAL; 1924 return EINVAL;
1921 } 1925 }
1922 if ((newci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) { 1926 if ((newci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) {
1923 DPRINTF(("the cpu is set nointr shield. index:%u\n", cpu_idx)); 1927 DPRINTF(("the cpu is set nointr shield. index:%u\n", cpu_idx));
1924 return EINVAL; 1928 return EINVAL;
1925 } 1929 }
1926 1930
1927 if (isp == NULL) { 1931 if (isp == NULL) {
1928 DPRINTF(("invalid intrctl handler\n")); 1932 DPRINTF(("invalid intrctl handler\n"));
1929 return EINVAL; 1933 return EINVAL;
1930 } 1934 }
1931 1935
1932 /* i8259_pic supports only primary cpu, see i8259.c. */ 1936 /* i8259_pic supports only primary cpu, see i8259.c. */
1933 pic = isp->is_pic; 1937 pic = isp->is_pic;
1934 if (pic == &i8259_pic) { 1938 if (pic == &i8259_pic) {
1935 DPRINTF(("i8259 pic does not support set_affinity\n")); 1939 DPRINTF(("i8259 pic does not support set_affinity\n"));
1936 return ENOTSUP; 1940 return ENOTSUP;
1937 } 1941 }
1938 1942
1939 ih = isp->is_handlers; 1943 ih = isp->is_handlers;
1940 KASSERTMSG(ih != NULL, 1944 KASSERTMSG(ih != NULL,
1941 "Don't set affinity for the device which is not established."); 1945 "Don't set affinity for the device which is not established.");
1942 1946
1943 oldci = ih->ih_cpu; 1947 oldci = ih->ih_cpu;
1944 if (newci == oldci) /* nothing to do */ 1948 if (newci == oldci) /* nothing to do */
1945 return 0; 1949 return 0;
1946 1950
1947 oldslot = ih->ih_slot; 1951 oldslot = ih->ih_slot;
1948 1952
1949 err = intr_find_unused_slot(newci, &newslot); 1953 err = intr_find_unused_slot(newci, &newslot);
1950 if (err) { 1954 if (err) {
1951 DPRINTF(("failed to allocate interrupt slot for PIC %s intrid " 1955 DPRINTF(("failed to allocate interrupt slot for PIC %s intrid "
1952 "%s\n", isp->is_pic->pic_name, isp->is_intrid)); 1956 "%s\n", isp->is_pic->pic_name, isp->is_intrid));
1953 return err; 1957 return err;
1954 } 1958 }
1955 1959
1956 old_idtvec = isp->is_idtvec; 1960 old_idtvec = isp->is_idtvec;
1957 1961
1958 if (idt_vec_is_pcpu()) { 1962 if (idt_vec_is_pcpu()) {
1959 new_idtvec = idt_vec_alloc(&newci->ci_idtvec, 1963 new_idtvec = idt_vec_alloc(&newci->ci_idtvec,
1960 APIC_LEVEL(ih->ih_level), IDT_INTR_HIGH); 1964 APIC_LEVEL(ih->ih_level), IDT_INTR_HIGH);
1961 if (new_idtvec == 0) 1965 if (new_idtvec == 0)
1962 return EBUSY; 1966 return EBUSY;
1963 DPRINTF(("interrupt from cpu%d vec %d to cpu%d vec %d\n", 1967 DPRINTF(("interrupt from cpu%d vec %d to cpu%d vec %d\n",
1964 cpu_index(oldci), old_idtvec, cpu_index(newci), 1968 cpu_index(oldci), old_idtvec, cpu_index(newci),
1965 new_idtvec)); 1969 new_idtvec));
1966 } else { 1970 } else {
1967 new_idtvec = isp->is_idtvec; 1971 new_idtvec = isp->is_idtvec;
1968 } 1972 }
1969 1973
1970 /* Prevent intr_unmask() from reenabling the source at the hw. */ 1974 /* Prevent intr_unmask() from reenabling the source at the hw. */
1971 isp->is_distribute_pending = true; 1975 isp->is_distribute_pending = true;
1972 1976
1973 pin = isp->is_pin; 1977 pin = isp->is_pin;
1974 (*pic->pic_hwmask)(pic, pin); /* for ci_ipending check */ 1978 (*pic->pic_hwmask)(pic, pin); /* for ci_ipending check */
1975 membar_sync(); 1979 membar_sync();
1976 while (oldci->ci_ipending & (1ULL << oldslot)) { 1980 while (oldci->ci_ipending & (1ULL << oldslot)) {
1977 (void)kpause("intrdist", false, 1, &cpu_lock); 1981 (void)kpause("intrdist", false, 1, &cpu_lock);
1978 membar_sync(); 1982 membar_sync();
1979 } 1983 }
1980 1984
1981 kpreempt_disable(); 1985 kpreempt_disable();
1982 1986
1983 /* deactivate old interrupt setting */ 1987 /* deactivate old interrupt setting */
1984 if (oldci == curcpu() || !mp_online) { 1988 if (oldci == curcpu() || !mp_online) {
1985 intr_deactivate_xcall(ih, NULL); 1989 intr_deactivate_xcall(ih, NULL);
1986 } else { 1990 } else {
1987 uint64_t where; 1991 uint64_t where;
1988 where = xc_unicast(0, intr_deactivate_xcall, ih, 1992 where = xc_unicast(0, intr_deactivate_xcall, ih,
1989 NULL, oldci); 1993 NULL, oldci);
1990 xc_wait(where); 1994 xc_wait(where);
1991 } 1995 }
1992 intr_save_evcnt(isp, oldci->ci_cpuid); 1996 intr_save_evcnt(isp, oldci->ci_cpuid);
1993 (*pic->pic_delroute)(pic, oldci, pin, old_idtvec, isp->is_type); 1997 (*pic->pic_delroute)(pic, oldci, pin, old_idtvec, isp->is_type);
1994 1998
1995 /* activate new interrupt setting */ 1999 /* activate new interrupt setting */
1996 isp->is_idtvec = new_idtvec; 2000 isp->is_idtvec = new_idtvec;
1997 newci->ci_isources[newslot] = isp; 2001 newci->ci_isources[newslot] = isp;
1998 for (lih = ih; lih != NULL; lih = lih->ih_next) { 2002 for (lih = ih; lih != NULL; lih = lih->ih_next) {
1999 newci->ci_nintrhand++; 2003 newci->ci_nintrhand++;
2000 lih->ih_cpu = newci; 2004 lih->ih_cpu = newci;
2001 lih->ih_slot = newslot; 2005 lih->ih_slot = newslot;
2002 } 2006 }
2003 if (newci == curcpu() || !mp_online) { 2007 if (newci == curcpu() || !mp_online) {
2004 intr_activate_xcall(ih, NULL); 2008 intr_activate_xcall(ih, NULL);
2005 } else { 2009 } else {
2006 uint64_t where; 2010 uint64_t where;
2007 where = xc_unicast(0, intr_activate_xcall, ih, 2011 where = xc_unicast(0, intr_activate_xcall, ih,
2008 NULL, newci); 2012 NULL, newci);
2009 xc_wait(where); 2013 xc_wait(where);
2010 } 2014 }
2011 intr_restore_evcnt(isp, newci->ci_cpuid); 2015 intr_restore_evcnt(isp, newci->ci_cpuid);
2012 isp->is_active_cpu = newci->ci_cpuid; 2016 isp->is_active_cpu = newci->ci_cpuid;
2013 (*pic->pic_addroute)(pic, newci, pin, new_idtvec, isp->is_type); 2017 (*pic->pic_addroute)(pic, newci, pin, new_idtvec, isp->is_type);
2014 2018
2015 isp->is_distribute_pending = false; 2019 isp->is_distribute_pending = false;
2016 if (newci == curcpu() || !mp_online) { 2020 if (newci == curcpu() || !mp_online) {
2017 intr_hwunmask_xcall(ih, NULL); 2021 intr_hwunmask_xcall(ih, NULL);
2018 } else { 2022 } else {
2019 uint64_t where; 2023 uint64_t where;
2020 where = xc_unicast(0, intr_hwunmask_xcall, ih, NULL, newci); 2024 where = xc_unicast(0, intr_hwunmask_xcall, ih, NULL, newci);
2021 xc_wait(where); 2025 xc_wait(where);
2022 } 2026 }
2023 2027
2024 kpreempt_enable(); 2028 kpreempt_enable();
2025 2029
2026 return err; 2030 return err;
2027} 2031}
2028 2032
2029static bool 2033static bool
2030intr_is_affinity_intrsource(struct intrsource *isp, const kcpuset_t *cpuset) 2034intr_is_affinity_intrsource(struct intrsource *isp, const kcpuset_t *cpuset)
2031{ 2035{
2032 struct cpu_info *ci; 2036 struct cpu_info *ci;
2033 2037
2034 KASSERT(mutex_owned(&cpu_lock)); 2038 KASSERT(mutex_owned(&cpu_lock));
2035 2039
2036 /* 2040 /*
2037 * The device is already pci_intr_alloc'ed, however it is not 2041 * The device is already pci_intr_alloc'ed, however it is not
2038 * established yet. 2042 * established yet.
2039 */ 2043 */
2040 if (isp->is_handlers == NULL) 2044 if (isp->is_handlers == NULL)
2041 return false; 2045 return false;
2042 2046
2043 ci = isp->is_handlers->ih_cpu; 2047 ci = isp->is_handlers->ih_cpu;
2044 KASSERT(ci != NULL); 2048 KASSERT(ci != NULL);
2045 2049
2046 return kcpuset_isset(cpuset, cpu_index(ci)); 2050 return kcpuset_isset(cpuset, cpu_index(ci));
2047} 2051}
2048 2052
2049static struct intrhand * 2053static struct intrhand *
2050intr_get_handler(const char *intrid) 2054intr_get_handler(const char *intrid)
2051{ 2055{
2052 struct intrsource *isp; 2056 struct intrsource *isp;
2053 2057
2054 KASSERT(mutex_owned(&cpu_lock)); 2058 KASSERT(mutex_owned(&cpu_lock));
2055 2059
2056 isp = intr_get_io_intrsource(intrid); 2060 isp = intr_get_io_intrsource(intrid);
2057 if (isp == NULL) 2061 if (isp == NULL)
2058 return NULL; 2062 return NULL;
2059 2063
2060 return isp->is_handlers; 2064 return isp->is_handlers;
2061} 2065}
2062 2066
2063uint64_t 2067uint64_t
2064x86_intr_get_count(const char *intrid, u_int cpu_idx) 2068x86_intr_get_count(const char *intrid, u_int cpu_idx)
2065{ 2069{
2066 struct cpu_info *ci; 2070 struct cpu_info *ci;
2067 struct intrsource *isp; 2071 struct intrsource *isp;
2068 struct intrhand *ih; 2072 struct intrhand *ih;
2069 struct percpu_evcnt pep; 2073 struct percpu_evcnt pep;
2070 cpuid_t cpuid; 2074 cpuid_t cpuid;
2071 int i, slot; 2075 int i, slot;
2072 uint64_t count = 0; 2076 uint64_t count = 0;
2073 2077
2074 KASSERT(mutex_owned(&cpu_lock)); 2078 KASSERT(mutex_owned(&cpu_lock));
2075 ci = cpu_lookup(cpu_idx); 2079 ci = cpu_lookup(cpu_idx);
2076 cpuid = ci->ci_cpuid; 2080 cpuid = ci->ci_cpuid;
2077 2081
2078 ih = intr_get_handler(intrid); 2082 ih = intr_get_handler(intrid);
2079 if (ih == NULL) { 2083 if (ih == NULL) {
2080 count = 0; 2084 count = 0;
2081 goto out; 2085 goto out;
2082 } 2086 }
2083 slot = ih->ih_slot; 2087 slot = ih->ih_slot;
2084 isp = ih->ih_cpu->ci_isources[slot]; 2088 isp = ih->ih_cpu->ci_isources[slot];
2085 2089
2086 for (i = 0; i < ncpu; i++) { 2090 for (i = 0; i < ncpu; i++) {
2087 pep = isp->is_saved_evcnt[i]; 2091 pep = isp->is_saved_evcnt[i];
2088 if (cpuid == pep.cpuid) { 2092 if (cpuid == pep.cpuid) {
2089 if (isp->is_active_cpu == pep.cpuid) { 2093 if (isp->is_active_cpu == pep.cpuid) {
2090 count = isp->is_evcnt.ev_count; 2094 count = isp->is_evcnt.ev_count;
2091 goto out; 2095 goto out;
2092 } else { 2096 } else {
2093 count = pep.count; 2097 count = pep.count;
2094 goto out; 2098 goto out;
2095 } 2099 }
2096 } 2100 }
2097 } 2101 }
2098 2102
2099 out: 2103 out:
2100 return count; 2104 return count;
2101} 2105}
2102 2106
2103void 2107void
2104x86_intr_get_assigned(const char *intrid, kcpuset_t *cpuset) 2108x86_intr_get_assigned(const char *intrid, kcpuset_t *cpuset)
2105{ 2109{
2106 struct cpu_info *ci; 2110 struct cpu_info *ci;
2107 struct intrhand *ih; 2111 struct intrhand *ih;
2108 2112
2109 KASSERT(mutex_owned(&cpu_lock)); 2113 KASSERT(mutex_owned(&cpu_lock));
2110 kcpuset_zero(cpuset); 2114 kcpuset_zero(cpuset);
2111 2115
2112 ih = intr_get_handler(intrid); 2116 ih = intr_get_handler(intrid);
2113 if (ih == NULL) 2117 if (ih == NULL)
2114 return; 2118 return;
2115 2119
2116 ci = ih->ih_cpu; 2120 ci = ih->ih_cpu;
2117 kcpuset_set(cpuset, cpu_index(ci)); 2121 kcpuset_set(cpuset, cpu_index(ci));
2118} 2122}
2119 2123
2120void 2124void
2121x86_intr_get_devname(const char *intrid, char *buf, size_t len) 2125x86_intr_get_devname(const char *intrid, char *buf, size_t len)
2122{ 2126{
2123 struct intrsource *isp; 2127 struct intrsource *isp;
2124 struct intrhand *ih; 2128 struct intrhand *ih;
2125 int slot; 2129 int slot;
2126 2130
2127 KASSERT(mutex_owned(&cpu_lock)); 2131 KASSERT(mutex_owned(&cpu_lock));
2128 2132
2129 ih = intr_get_handler(intrid); 2133 ih = intr_get_handler(intrid);
2130 if (ih == NULL) { 2134 if (ih == NULL) {
2131 buf[0] = '\0'; 2135 buf[0] = '\0';
2132 return; 2136 return;
2133 } 2137 }
2134 slot = ih->ih_slot; 2138 slot = ih->ih_slot;
2135 isp = ih->ih_cpu->ci_isources[slot]; 2139 isp = ih->ih_cpu->ci_isources[slot];
2136 strlcpy(buf, isp->is_xname, len); 2140 strlcpy(buf, isp->is_xname, len);
2137 2141
2138} 2142}
2139 2143
2140/* 2144/*
2141 * MI interface for subr_interrupt.c 2145 * MI interface for subr_interrupt.c
2142 */ 2146 */
2143uint64_t 2147uint64_t
2144interrupt_get_count(const char *intrid, u_int cpu_idx) 2148interrupt_get_count(const char *intrid, u_int cpu_idx)
2145{ 2149{
2146 struct intrsource *isp; 2150 struct intrsource *isp;
2147 uint64_t count = 0; 2151 uint64_t count = 0;
2148 2152
2149 mutex_enter(&cpu_lock); 2153 mutex_enter(&cpu_lock);
2150 isp = intr_get_io_intrsource(intrid); 2154 isp = intr_get_io_intrsource(intrid);
2151 if (isp != NULL) 2155 if (isp != NULL)
2152 count = isp->is_pic->pic_intr_get_count(intrid, cpu_idx); 2156 count = isp->is_pic->pic_intr_get_count(intrid, cpu_idx);
2153 mutex_exit(&cpu_lock); 2157 mutex_exit(&cpu_lock);
2154 return count; 2158 return count;
2155} 2159}
2156 2160
2157/* 2161/*
2158 * MI interface for subr_interrupt.c 2162 * MI interface for subr_interrupt.c
2159 */ 2163 */
2160void 2164void
2161interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset) 2165interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset)
2162{ 2166{
2163 struct intrsource *isp; 2167 struct intrsource *isp;
2164 2168
2165 mutex_enter(&cpu_lock); 2169 mutex_enter(&cpu_lock);
2166 isp = intr_get_io_intrsource(intrid); 2170 isp = intr_get_io_intrsource(intrid);
2167 if (isp != NULL) 2171 if (isp != NULL)
2168 isp->is_pic->pic_intr_get_assigned(intrid, cpuset); 2172 isp->is_pic->pic_intr_get_assigned(intrid, cpuset);