| @@ -1,2191 +1,2191 @@ | | | @@ -1,2191 +1,2191 @@ |
1 | /* $NetBSD: intr.c,v 1.150.6.5 2020/04/19 19:39:10 bouyer Exp $ */ | | 1 | /* $NetBSD: intr.c,v 1.150.6.6 2020/04/20 20:19:07 bouyer Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (c) 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Andrew Doran, and by Jason R. Thorpe. | | 8 | * by Andrew Doran, and by Jason R. Thorpe. |
9 | * | | 9 | * |
10 | * Redistribution and use in source and binary forms, with or without | | 10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions | | 11 | * modification, are permitted provided that the following conditions |
12 | * are met: | | 12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright | | 13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. | | 14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright | | 15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the | | 16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. | | 17 | * documentation and/or other materials provided with the distribution. |
18 | * | | 18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. | | 29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ | | 30 | */ |
31 | | | 31 | |
32 | /* | | 32 | /* |
33 | * Copyright 2002 (c) Wasabi Systems, Inc. | | 33 | * Copyright 2002 (c) Wasabi Systems, Inc. |
34 | * All rights reserved. | | 34 | * All rights reserved. |
35 | * | | 35 | * |
36 | * Written by Frank van der Linden for Wasabi Systems, Inc. | | 36 | * Written by Frank van der Linden for Wasabi Systems, Inc. |
37 | * | | 37 | * |
38 | * Redistribution and use in source and binary forms, with or without | | 38 | * Redistribution and use in source and binary forms, with or without |
39 | * modification, are permitted provided that the following conditions | | 39 | * modification, are permitted provided that the following conditions |
40 | * are met: | | 40 | * are met: |
41 | * 1. Redistributions of source code must retain the above copyright | | 41 | * 1. Redistributions of source code must retain the above copyright |
42 | * notice, this list of conditions and the following disclaimer. | | 42 | * notice, this list of conditions and the following disclaimer. |
43 | * 2. Redistributions in binary form must reproduce the above copyright | | 43 | * 2. Redistributions in binary form must reproduce the above copyright |
44 | * notice, this list of conditions and the following disclaimer in the | | 44 | * notice, this list of conditions and the following disclaimer in the |
45 | * documentation and/or other materials provided with the distribution. | | 45 | * documentation and/or other materials provided with the distribution. |
46 | * 3. All advertising materials mentioning features or use of this software | | 46 | * 3. All advertising materials mentioning features or use of this software |
47 | * must display the following acknowledgement: | | 47 | * must display the following acknowledgement: |
48 | * This product includes software developed for the NetBSD Project by | | 48 | * This product includes software developed for the NetBSD Project by |
49 | * Wasabi Systems, Inc. | | 49 | * Wasabi Systems, Inc. |
50 | * 4. The name of Wasabi Systems, Inc. may not be used to endorse | | 50 | * 4. The name of Wasabi Systems, Inc. may not be used to endorse |
51 | * or promote products derived from this software without specific prior | | 51 | * or promote products derived from this software without specific prior |
52 | * written permission. | | 52 | * written permission. |
53 | * | | 53 | * |
54 | * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND | | 54 | * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND |
55 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 55 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
56 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 56 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
57 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC | | 57 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC |
58 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 58 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
59 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 59 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
60 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 60 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
61 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 61 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
62 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 62 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
63 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 63 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
64 | * POSSIBILITY OF SUCH DAMAGE. | | 64 | * POSSIBILITY OF SUCH DAMAGE. |
65 | */ | | 65 | */ |
66 | | | 66 | |
67 | /*- | | 67 | /*- |
68 | * Copyright (c) 1991 The Regents of the University of California. | | 68 | * Copyright (c) 1991 The Regents of the University of California. |
69 | * All rights reserved. | | 69 | * All rights reserved. |
70 | * | | 70 | * |
71 | * This code is derived from software contributed to Berkeley by | | 71 | * This code is derived from software contributed to Berkeley by |
72 | * William Jolitz. | | 72 | * William Jolitz. |
73 | * | | 73 | * |
74 | * Redistribution and use in source and binary forms, with or without | | 74 | * Redistribution and use in source and binary forms, with or without |
75 | * modification, are permitted provided that the following conditions | | 75 | * modification, are permitted provided that the following conditions |
76 | * are met: | | 76 | * are met: |
77 | * 1. Redistributions of source code must retain the above copyright | | 77 | * 1. Redistributions of source code must retain the above copyright |
78 | * notice, this list of conditions and the following disclaimer. | | 78 | * notice, this list of conditions and the following disclaimer. |
79 | * 2. Redistributions in binary form must reproduce the above copyright | | 79 | * 2. Redistributions in binary form must reproduce the above copyright |
80 | * notice, this list of conditions and the following disclaimer in the | | 80 | * notice, this list of conditions and the following disclaimer in the |
81 | * documentation and/or other materials provided with the distribution. | | 81 | * documentation and/or other materials provided with the distribution. |
82 | * 3. Neither the name of the University nor the names of its contributors | | 82 | * 3. Neither the name of the University nor the names of its contributors |
83 | * may be used to endorse or promote products derived from this software | | 83 | * may be used to endorse or promote products derived from this software |
84 | * without specific prior written permission. | | 84 | * without specific prior written permission. |
85 | * | | 85 | * |
86 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | | 86 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
87 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 87 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
88 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 88 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
89 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | | 89 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
90 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | | 90 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
91 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | | 91 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
92 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 92 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
93 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 93 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
94 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 94 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
95 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 95 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
96 | * SUCH DAMAGE. | | 96 | * SUCH DAMAGE. |
97 | * | | 97 | * |
98 | * @(#)isa.c 7.2 (Berkeley) 5/13/91 | | 98 | * @(#)isa.c 7.2 (Berkeley) 5/13/91 |
99 | */ | | 99 | */ |
100 | | | 100 | |
101 | /*- | | 101 | /*- |
102 | * Copyright (c) 1993, 1994 Charles Hannum. | | 102 | * Copyright (c) 1993, 1994 Charles Hannum. |
103 | * | | 103 | * |
104 | * Redistribution and use in source and binary forms, with or without | | 104 | * Redistribution and use in source and binary forms, with or without |
105 | * modification, are permitted provided that the following conditions | | 105 | * modification, are permitted provided that the following conditions |
106 | * are met: | | 106 | * are met: |
107 | * 1. Redistributions of source code must retain the above copyright | | 107 | * 1. Redistributions of source code must retain the above copyright |
108 | * notice, this list of conditions and the following disclaimer. | | 108 | * notice, this list of conditions and the following disclaimer. |
109 | * 2. Redistributions in binary form must reproduce the above copyright | | 109 | * 2. Redistributions in binary form must reproduce the above copyright |
110 | * notice, this list of conditions and the following disclaimer in the | | 110 | * notice, this list of conditions and the following disclaimer in the |
111 | * documentation and/or other materials provided with the distribution. | | 111 | * documentation and/or other materials provided with the distribution. |
112 | * 3. All advertising materials mentioning features or use of this software | | 112 | * 3. All advertising materials mentioning features or use of this software |
113 | * must display the following acknowledgement: | | 113 | * must display the following acknowledgement: |
114 | * This product includes software developed by the University of | | 114 | * This product includes software developed by the University of |
115 | * California, Berkeley and its contributors. | | 115 | * California, Berkeley and its contributors. |
116 | * 4. Neither the name of the University nor the names of its contributors | | 116 | * 4. Neither the name of the University nor the names of its contributors |
117 | * may be used to endorse or promote products derived from this software | | 117 | * may be used to endorse or promote products derived from this software |
118 | * without specific prior written permission. | | 118 | * without specific prior written permission. |
119 | * | | 119 | * |
120 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | | 120 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
121 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 121 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
122 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 122 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
123 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | | 123 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
124 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | | 124 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
125 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | | 125 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
126 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 126 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
127 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 127 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
128 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 128 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
129 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 129 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
130 | * SUCH DAMAGE. | | 130 | * SUCH DAMAGE. |
131 | * | | 131 | * |
132 | * @(#)isa.c 7.2 (Berkeley) 5/13/91 | | 132 | * @(#)isa.c 7.2 (Berkeley) 5/13/91 |
133 | */ | | 133 | */ |
134 | | | 134 | |
135 | #include <sys/cdefs.h> | | 135 | #include <sys/cdefs.h> |
136 | __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.150.6.5 2020/04/19 19:39:10 bouyer Exp $"); | | 136 | __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.150.6.6 2020/04/20 20:19:07 bouyer Exp $"); |
137 | | | 137 | |
138 | #include "opt_intrdebug.h" | | 138 | #include "opt_intrdebug.h" |
139 | #include "opt_multiprocessor.h" | | 139 | #include "opt_multiprocessor.h" |
140 | #include "opt_acpi.h" | | 140 | #include "opt_acpi.h" |
141 | | | 141 | |
142 | #include <sys/param.h> | | 142 | #include <sys/param.h> |
143 | #include <sys/systm.h> | | 143 | #include <sys/systm.h> |
144 | #include <sys/kernel.h> | | 144 | #include <sys/kernel.h> |
145 | #include <sys/syslog.h> | | 145 | #include <sys/syslog.h> |
146 | #include <sys/device.h> | | 146 | #include <sys/device.h> |
147 | #include <sys/kmem.h> | | 147 | #include <sys/kmem.h> |
148 | #include <sys/proc.h> | | 148 | #include <sys/proc.h> |
149 | #include <sys/errno.h> | | 149 | #include <sys/errno.h> |
150 | #include <sys/intr.h> | | 150 | #include <sys/intr.h> |
151 | #include <sys/cpu.h> | | 151 | #include <sys/cpu.h> |
152 | #include <sys/atomic.h> | | 152 | #include <sys/atomic.h> |
153 | #include <sys/xcall.h> | | 153 | #include <sys/xcall.h> |
154 | #include <sys/interrupt.h> | | 154 | #include <sys/interrupt.h> |
155 | #include <sys/reboot.h> /* for AB_VERBOSE */ | | 155 | #include <sys/reboot.h> /* for AB_VERBOSE */ |
156 | | | 156 | |
157 | #include <sys/kauth.h> | | 157 | #include <sys/kauth.h> |
158 | #include <sys/conf.h> | | 158 | #include <sys/conf.h> |
159 | | | 159 | |
160 | #include <uvm/uvm_extern.h> | | 160 | #include <uvm/uvm_extern.h> |
161 | | | 161 | |
162 | #include <machine/i8259.h> | | 162 | #include <machine/i8259.h> |
163 | #include <machine/pio.h> | | 163 | #include <machine/pio.h> |
164 | | | 164 | |
165 | #include "ioapic.h" | | 165 | #include "ioapic.h" |
166 | #include "lapic.h" | | 166 | #include "lapic.h" |
167 | #include "pci.h" | | 167 | #include "pci.h" |
168 | #include "acpica.h" | | 168 | #include "acpica.h" |
169 | #ifndef XENPV | | 169 | #ifndef XENPV |
170 | #include "hyperv.h" | | 170 | #include "hyperv.h" |
171 | #if NHYPERV > 0 | | 171 | #if NHYPERV > 0 |
172 | #include <dev/hyperv/hypervvar.h> | | 172 | #include <dev/hyperv/hypervvar.h> |
173 | | | 173 | |
174 | extern void Xresume_hyperv_hypercall(void); | | 174 | extern void Xresume_hyperv_hypercall(void); |
175 | extern void Xrecurse_hyperv_hypercall(void); | | 175 | extern void Xrecurse_hyperv_hypercall(void); |
176 | #endif | | 176 | #endif |
177 | #endif | | 177 | #endif |
178 | | | 178 | |
179 | #if NIOAPIC > 0 || NACPICA > 0 | | 179 | #if NIOAPIC > 0 || NACPICA > 0 |
180 | #include <machine/i82093var.h> | | 180 | #include <machine/i82093var.h> |
181 | #include <machine/mpbiosvar.h> | | 181 | #include <machine/mpbiosvar.h> |
182 | #include <machine/mpacpi.h> | | 182 | #include <machine/mpacpi.h> |
183 | #endif | | 183 | #endif |
184 | | | 184 | |
185 | #if NLAPIC > 0 | | 185 | #if NLAPIC > 0 |
186 | #include <machine/i82489var.h> | | 186 | #include <machine/i82489var.h> |
187 | #endif | | 187 | #endif |
188 | | | 188 | |
189 | #if NPCI > 0 | | 189 | #if NPCI > 0 |
190 | #include <dev/pci/ppbreg.h> | | 190 | #include <dev/pci/ppbreg.h> |
191 | #endif | | 191 | #endif |
192 | | | 192 | |
193 | #include <x86/pci/msipic.h> | | 193 | #include <x86/pci/msipic.h> |
194 | #include <x86/pci/pci_msi_machdep.h> | | 194 | #include <x86/pci/pci_msi_machdep.h> |
195 | | | 195 | |
196 | #if NPCI == 0 || !defined(__HAVE_PCI_MSI_MSIX) | | 196 | #if NPCI == 0 || !defined(__HAVE_PCI_MSI_MSIX) |
197 | #define msipic_is_msi_pic(PIC) (false) | | 197 | #define msipic_is_msi_pic(PIC) (false) |
198 | #endif | | 198 | #endif |
199 | | | 199 | |
200 | #ifdef DDB | | 200 | #ifdef DDB |
201 | #include <ddb/db_output.h> | | 201 | #include <ddb/db_output.h> |
202 | #endif | | 202 | #endif |
203 | | | 203 | |
204 | #ifdef INTRDEBUG | | 204 | #ifdef INTRDEBUG |
205 | #define DPRINTF(msg) printf msg | | 205 | #define DPRINTF(msg) printf msg |
206 | #else | | 206 | #else |
207 | #define DPRINTF(msg) | | 207 | #define DPRINTF(msg) |
208 | #endif | | 208 | #endif |
209 | | | 209 | |
210 | static SIMPLEQ_HEAD(, intrsource) io_interrupt_sources = | | 210 | static SIMPLEQ_HEAD(, intrsource) io_interrupt_sources = |
211 | SIMPLEQ_HEAD_INITIALIZER(io_interrupt_sources); | | 211 | SIMPLEQ_HEAD_INITIALIZER(io_interrupt_sources); |
212 | | | 212 | |
213 | static kmutex_t intr_distribute_lock; | | 213 | static kmutex_t intr_distribute_lock; |
214 | | | 214 | |
215 | static int intr_allocate_slot_cpu(struct cpu_info *, struct pic *, int, int *, | | 215 | static int intr_allocate_slot_cpu(struct cpu_info *, struct pic *, int, int *, |
216 | struct intrsource *); | | 216 | struct intrsource *); |
217 | static int __noinline intr_allocate_slot(struct pic *, int, int, | | 217 | static int __noinline intr_allocate_slot(struct pic *, int, int, |
218 | struct cpu_info **, int *, int *, | | 218 | struct cpu_info **, int *, int *, |
219 | struct intrsource *); | | 219 | struct intrsource *); |
220 | | | 220 | |
221 | static void intr_source_free(struct cpu_info *, int, struct pic *, int); | | 221 | static void intr_source_free(struct cpu_info *, int, struct pic *, int); |
222 | | | 222 | |
223 | static void intr_establish_xcall(void *, void *); | | 223 | static void intr_establish_xcall(void *, void *); |
224 | static void intr_disestablish_xcall(void *, void *); | | 224 | static void intr_disestablish_xcall(void *, void *); |
225 | | | 225 | |
226 | static const char *legacy_intr_string(int, char *, size_t, struct pic *); | | 226 | static const char *legacy_intr_string(int, char *, size_t, struct pic *); |
227 | | | 227 | |
228 | static const char *xen_intr_string(int, char *, size_t, struct pic *); | | 228 | static const char *xen_intr_string(int, char *, size_t, struct pic *); |
229 | | | 229 | |
230 | #if defined(INTRSTACKSIZE) | | 230 | #if defined(INTRSTACKSIZE) |
231 | static inline bool redzone_const_or_false(bool); | | 231 | static inline bool redzone_const_or_false(bool); |
232 | static inline int redzone_const_or_zero(int); | | 232 | static inline int redzone_const_or_zero(int); |
233 | #endif | | 233 | #endif |
234 | | | 234 | |
235 | static void intr_redistribute_xc_t(void *, void *); | | 235 | static void intr_redistribute_xc_t(void *, void *); |
236 | static void intr_redistribute_xc_s1(void *, void *); | | 236 | static void intr_redistribute_xc_s1(void *, void *); |
237 | static void intr_redistribute_xc_s2(void *, void *); | | 237 | static void intr_redistribute_xc_s2(void *, void *); |
238 | static bool intr_redistribute(struct cpu_info *); | | 238 | static bool intr_redistribute(struct cpu_info *); |
239 | static struct intrsource *intr_get_io_intrsource(const char *); | | 239 | static struct intrsource *intr_get_io_intrsource(const char *); |
240 | static void intr_free_io_intrsource_direct(struct intrsource *); | | 240 | static void intr_free_io_intrsource_direct(struct intrsource *); |
241 | static int intr_num_handlers(struct intrsource *); | | 241 | static int intr_num_handlers(struct intrsource *); |
242 | static int intr_find_unused_slot(struct cpu_info *, int *); | | 242 | static int intr_find_unused_slot(struct cpu_info *, int *); |
243 | static void intr_activate_xcall(void *, void *); | | 243 | static void intr_activate_xcall(void *, void *); |
244 | static void intr_deactivate_xcall(void *, void *); | | 244 | static void intr_deactivate_xcall(void *, void *); |
245 | static void intr_get_affinity(struct intrsource *, kcpuset_t *); | | 245 | static void intr_get_affinity(struct intrsource *, kcpuset_t *); |
246 | static int intr_set_affinity(struct intrsource *, const kcpuset_t *); | | 246 | static int intr_set_affinity(struct intrsource *, const kcpuset_t *); |
247 | | | 247 | |
248 | /* | | 248 | /* |
249 | * Fill in default interrupt table (in case of spurious interrupt | | 249 | * Fill in default interrupt table (in case of spurious interrupt |
250 | * during configuration of kernel), setup interrupt control unit | | 250 | * during configuration of kernel), setup interrupt control unit |
251 | */ | | 251 | */ |
252 | void | | 252 | void |
253 | intr_default_setup(void) | | 253 | intr_default_setup(void) |
254 | { | | 254 | { |
255 | int i; | | 255 | int i; |
256 | | | 256 | |
257 | /* icu vectors */ | | 257 | /* icu vectors */ |
258 | for (i = 0; i < NUM_LEGACY_IRQS; i++) { | | 258 | for (i = 0; i < NUM_LEGACY_IRQS; i++) { |
259 | idt_vec_reserve(ICU_OFFSET + i); | | 259 | idt_vec_reserve(ICU_OFFSET + i); |
260 | idt_vec_set(ICU_OFFSET + i, legacy_stubs[i].ist_entry); | | 260 | idt_vec_set(ICU_OFFSET + i, legacy_stubs[i].ist_entry); |
261 | } | | 261 | } |
262 | | | 262 | |
263 | /* | | 263 | /* |
264 | * Eventually might want to check if it's actually there. | | 264 | * Eventually might want to check if it's actually there. |
265 | */ | | 265 | */ |
266 | i8259_default_setup(); | | 266 | i8259_default_setup(); |
267 | | | 267 | |
268 | mutex_init(&intr_distribute_lock, MUTEX_DEFAULT, IPL_NONE); | | 268 | mutex_init(&intr_distribute_lock, MUTEX_DEFAULT, IPL_NONE); |
269 | } | | 269 | } |
270 | | | 270 | |
271 | /* | | 271 | /* |
272 | * Handle a NMI, possibly a machine check. | | 272 | * Handle a NMI, possibly a machine check. |
273 | * return true to panic system, false to ignore. | | 273 | * return true to panic system, false to ignore. |
274 | */ | | 274 | */ |
275 | void | | 275 | void |
276 | x86_nmi(void) | | 276 | x86_nmi(void) |
277 | { | | 277 | { |
278 | | | 278 | |
279 | log(LOG_CRIT, "NMI port 61 %x, port 70 %x\n", inb(0x61), inb(0x70)); | | 279 | log(LOG_CRIT, "NMI port 61 %x, port 70 %x\n", inb(0x61), inb(0x70)); |
280 | } | | 280 | } |
281 | | | 281 | |
282 | /* | | 282 | /* |
283 | * Create an interrupt id such as "ioapic0 pin 9". This interrupt id is used | | 283 | * Create an interrupt id such as "ioapic0 pin 9". This interrupt id is used |
284 | * by MI code and intrctl(8). | | 284 | * by MI code and intrctl(8). |
285 | */ | | 285 | */ |
286 | const char * | | 286 | const char * |
287 | intr_create_intrid(int legacy_irq, struct pic *pic, int pin, char *buf, | | 287 | intr_create_intrid(int legacy_irq, struct pic *pic, int pin, char *buf, |
288 | size_t len) | | 288 | size_t len) |
289 | { | | 289 | { |
290 | int ih = 0; | | 290 | int ih = 0; |
291 | | | 291 | |
292 | #if NPCI > 0 | | 292 | #if NPCI > 0 |
293 | #if defined(__HAVE_PCI_MSI_MSIX) | | 293 | #if defined(__HAVE_PCI_MSI_MSIX) |
294 | if ((pic->pic_type == PIC_MSI) || (pic->pic_type == PIC_MSIX)) { | | 294 | if ((pic->pic_type == PIC_MSI) || (pic->pic_type == PIC_MSIX)) { |
295 | uint64_t pih; | | 295 | uint64_t pih; |
296 | int dev, vec; | | 296 | int dev, vec; |
297 | | | 297 | |
298 | dev = msipic_get_devid(pic); | | 298 | dev = msipic_get_devid(pic); |
299 | vec = pin; | | 299 | vec = pin; |
300 | pih = __SHIFTIN((uint64_t)dev, MSI_INT_DEV_MASK) | | 300 | pih = __SHIFTIN((uint64_t)dev, MSI_INT_DEV_MASK) |
301 | | __SHIFTIN((uint64_t)vec, MSI_INT_VEC_MASK) | | 301 | | __SHIFTIN((uint64_t)vec, MSI_INT_VEC_MASK) |
302 | | APIC_INT_VIA_MSI; | | 302 | | APIC_INT_VIA_MSI; |
303 | if (pic->pic_type == PIC_MSI) | | 303 | if (pic->pic_type == PIC_MSI) |
304 | MSI_INT_MAKE_MSI(pih); | | 304 | MSI_INT_MAKE_MSI(pih); |
305 | else if (pic->pic_type == PIC_MSIX) | | 305 | else if (pic->pic_type == PIC_MSIX) |
306 | MSI_INT_MAKE_MSIX(pih); | | 306 | MSI_INT_MAKE_MSIX(pih); |
307 | | | 307 | |
308 | return x86_pci_msi_string(NULL, pih, buf, len); | | 308 | return x86_pci_msi_string(NULL, pih, buf, len); |
309 | } | | 309 | } |
310 | #endif /* __HAVE_PCI_MSI_MSIX */ | | 310 | #endif /* __HAVE_PCI_MSI_MSIX */ |
311 | #endif | | 311 | #endif |
312 | | | 312 | |
313 | if (pic->pic_type == PIC_XEN) { | | 313 | if (pic->pic_type == PIC_XEN) { |
314 | ih = pin; /* Port == pin */ | | 314 | ih = pin; /* Port == pin */ |
315 | return xen_intr_string(pin, buf, len, pic); | | 315 | return xen_intr_string(pin, buf, len, pic); |
316 | } | | 316 | } |
317 | | | 317 | |
318 | /* | | 318 | /* |
319 | * If the device is pci, "legacy_irq" is alway -1. Least 8 bit of "ih" | | 319 | * If the device is pci, "legacy_irq" is alway -1. Least 8 bit of "ih" |
320 | * is only used in intr_string() to show the irq number. | | 320 | * is only used in intr_string() to show the irq number. |
321 | * If the device is "legacy"(such as floppy), it should not use | | 321 | * If the device is "legacy"(such as floppy), it should not use |
322 | * intr_string(). | | 322 | * intr_string(). |
323 | */ | | 323 | */ |
324 | if (pic->pic_type == PIC_I8259) { | | 324 | if (pic->pic_type == PIC_I8259) { |
325 | ih = legacy_irq; | | 325 | ih = legacy_irq; |
326 | return legacy_intr_string(ih, buf, len, pic); | | 326 | return legacy_intr_string(ih, buf, len, pic); |
327 | } | | 327 | } |
328 | | | 328 | |
329 | #if NIOAPIC > 0 || NACPICA > 0 | | 329 | #if NIOAPIC > 0 || NACPICA > 0 |
330 | ih = ((pic->pic_apicid << APIC_INT_APIC_SHIFT) & APIC_INT_APIC_MASK) | | 330 | ih = ((pic->pic_apicid << APIC_INT_APIC_SHIFT) & APIC_INT_APIC_MASK) |
331 | | ((pin << APIC_INT_PIN_SHIFT) & APIC_INT_PIN_MASK); | | 331 | | ((pin << APIC_INT_PIN_SHIFT) & APIC_INT_PIN_MASK); |
332 | if (pic->pic_type == PIC_IOAPIC) { | | 332 | if (pic->pic_type == PIC_IOAPIC) { |
333 | ih |= APIC_INT_VIA_APIC; | | 333 | ih |= APIC_INT_VIA_APIC; |
334 | } | | 334 | } |
335 | ih |= pin; | | 335 | ih |= pin; |
336 | return intr_string(ih, buf, len); | | 336 | return intr_string(ih, buf, len); |
337 | #endif | | 337 | #endif |
338 | | | 338 | |
339 | return NULL; /* No pic found! */ | | 339 | return NULL; /* No pic found! */ |
340 | } | | 340 | } |
341 | | | 341 | |
342 | /* | | 342 | /* |
343 | * Find intrsource from io_interrupt_sources list. | | 343 | * Find intrsource from io_interrupt_sources list. |
344 | */ | | 344 | */ |
345 | static struct intrsource * | | 345 | static struct intrsource * |
346 | intr_get_io_intrsource(const char *intrid) | | 346 | intr_get_io_intrsource(const char *intrid) |
347 | { | | 347 | { |
348 | struct intrsource *isp; | | 348 | struct intrsource *isp; |
349 | | | 349 | |
350 | KASSERT(mutex_owned(&cpu_lock)); | | 350 | KASSERT(mutex_owned(&cpu_lock)); |
351 | | | 351 | |
352 | SIMPLEQ_FOREACH(isp, &io_interrupt_sources, is_list) { | | 352 | SIMPLEQ_FOREACH(isp, &io_interrupt_sources, is_list) { |
353 | KASSERT(isp->is_intrid != NULL); | | 353 | KASSERT(isp->is_intrid != NULL); |
354 | if (strncmp(intrid, isp->is_intrid, INTRIDBUF - 1) == 0) | | 354 | if (strncmp(intrid, isp->is_intrid, INTRIDBUF - 1) == 0) |
355 | return isp; | | 355 | return isp; |
356 | } | | 356 | } |
357 | return NULL; | | 357 | return NULL; |
358 | } | | 358 | } |
359 | | | 359 | |
360 | /* | | 360 | /* |
361 | * Allocate intrsource and add to io_interrupt_sources list. | | 361 | * Allocate intrsource and add to io_interrupt_sources list. |
362 | */ | | 362 | */ |
363 | struct intrsource * | | 363 | struct intrsource * |
364 | intr_allocate_io_intrsource(const char *intrid) | | 364 | intr_allocate_io_intrsource(const char *intrid) |
365 | { | | 365 | { |
366 | CPU_INFO_ITERATOR cii; | | 366 | CPU_INFO_ITERATOR cii; |
367 | struct cpu_info *ci; | | 367 | struct cpu_info *ci; |
368 | struct intrsource *isp; | | 368 | struct intrsource *isp; |
369 | struct percpu_evcnt *pep; | | 369 | struct percpu_evcnt *pep; |
370 | | | 370 | |
371 | KASSERT(mutex_owned(&cpu_lock)); | | 371 | KASSERT(mutex_owned(&cpu_lock)); |
372 | | | 372 | |
373 | if (intrid == NULL) | | 373 | if (intrid == NULL) |
374 | return NULL; | | 374 | return NULL; |
375 | | | 375 | |
376 | isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); | | 376 | isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); |
377 | pep = kmem_zalloc(sizeof(*pep) * ncpu, KM_SLEEP); | | 377 | pep = kmem_zalloc(sizeof(*pep) * ncpu, KM_SLEEP); |
378 | isp->is_saved_evcnt = pep; | | 378 | isp->is_saved_evcnt = pep; |
379 | for (CPU_INFO_FOREACH(cii, ci)) { | | 379 | for (CPU_INFO_FOREACH(cii, ci)) { |
380 | pep->cpuid = ci->ci_cpuid; | | 380 | pep->cpuid = ci->ci_cpuid; |
381 | pep++; | | 381 | pep++; |
382 | } | | 382 | } |
383 | strlcpy(isp->is_intrid, intrid, sizeof(isp->is_intrid)); | | 383 | strlcpy(isp->is_intrid, intrid, sizeof(isp->is_intrid)); |
384 | | | 384 | |
385 | SIMPLEQ_INSERT_TAIL(&io_interrupt_sources, isp, is_list); | | 385 | SIMPLEQ_INSERT_TAIL(&io_interrupt_sources, isp, is_list); |
386 | | | 386 | |
387 | return isp; | | 387 | return isp; |
388 | } | | 388 | } |
389 | | | 389 | |
390 | /* | | 390 | /* |
391 | * Remove from io_interrupt_sources list and free by the intrsource pointer. | | 391 | * Remove from io_interrupt_sources list and free by the intrsource pointer. |
392 | */ | | 392 | */ |
393 | static void | | 393 | static void |
394 | intr_free_io_intrsource_direct(struct intrsource *isp) | | 394 | intr_free_io_intrsource_direct(struct intrsource *isp) |
395 | { | | 395 | { |
396 | KASSERT(mutex_owned(&cpu_lock)); | | 396 | KASSERT(mutex_owned(&cpu_lock)); |
397 | | | 397 | |
398 | SIMPLEQ_REMOVE(&io_interrupt_sources, isp, intrsource, is_list); | | 398 | SIMPLEQ_REMOVE(&io_interrupt_sources, isp, intrsource, is_list); |
399 | | | 399 | |
400 | /* Is this interrupt established? */ | | 400 | /* Is this interrupt established? */ |
401 | if (isp->is_evname[0] != '\0') { | | 401 | if (isp->is_evname[0] != '\0') { |
402 | evcnt_detach(&isp->is_evcnt); | | 402 | evcnt_detach(&isp->is_evcnt); |
403 | isp->is_evname[0] = '\0'; | | 403 | isp->is_evname[0] = '\0'; |
404 | } | | 404 | } |
405 | | | 405 | |
406 | kmem_free(isp->is_saved_evcnt, | | 406 | kmem_free(isp->is_saved_evcnt, |
407 | sizeof(*(isp->is_saved_evcnt)) * ncpu); | | 407 | sizeof(*(isp->is_saved_evcnt)) * ncpu); |
408 | | | 408 | |
409 | kmem_free(isp, sizeof(*isp)); | | 409 | kmem_free(isp, sizeof(*isp)); |
410 | } | | 410 | } |
411 | | | 411 | |
412 | /* | | 412 | /* |
413 | * Remove from io_interrupt_sources list and free by the interrupt id. | | 413 | * Remove from io_interrupt_sources list and free by the interrupt id. |
414 | * This function can be used by MI code. | | 414 | * This function can be used by MI code. |
415 | */ | | 415 | */ |
416 | void | | 416 | void |
417 | intr_free_io_intrsource(const char *intrid) | | 417 | intr_free_io_intrsource(const char *intrid) |
418 | { | | 418 | { |
419 | struct intrsource *isp; | | 419 | struct intrsource *isp; |
420 | | | 420 | |
421 | KASSERT(mutex_owned(&cpu_lock)); | | 421 | KASSERT(mutex_owned(&cpu_lock)); |
422 | | | 422 | |
423 | if (intrid == NULL) | | 423 | if (intrid == NULL) |
424 | return; | | 424 | return; |
425 | | | 425 | |
426 | if ((isp = intr_get_io_intrsource(intrid)) == NULL) { | | 426 | if ((isp = intr_get_io_intrsource(intrid)) == NULL) { |
427 | return; | | 427 | return; |
428 | } | | 428 | } |
429 | | | 429 | |
430 | /* If the interrupt uses shared IRQ, don't free yet. */ | | 430 | /* If the interrupt uses shared IRQ, don't free yet. */ |
431 | if (isp->is_handlers != NULL) { | | 431 | if (isp->is_handlers != NULL) { |
432 | return; | | 432 | return; |
433 | } | | 433 | } |
434 | | | 434 | |
435 | intr_free_io_intrsource_direct(isp); | | 435 | intr_free_io_intrsource_direct(isp); |
436 | } | | 436 | } |
437 | | | 437 | |
438 | static int | | 438 | static int |
439 | intr_allocate_slot_cpu(struct cpu_info *ci, struct pic *pic, int pin, | | 439 | intr_allocate_slot_cpu(struct cpu_info *ci, struct pic *pic, int pin, |
440 | int *index, struct intrsource *chained) | | 440 | int *index, struct intrsource *chained) |
441 | { | | 441 | { |
442 | int slot, i; | | 442 | int slot, i; |
443 | struct intrsource *isp; | | 443 | struct intrsource *isp; |
444 | | | 444 | |
445 | KASSERT(mutex_owned(&cpu_lock)); | | 445 | KASSERT(mutex_owned(&cpu_lock)); |
446 | | | 446 | |
447 | if (pic == &i8259_pic) { | | 447 | if (pic == &i8259_pic) { |
448 | KASSERT(CPU_IS_PRIMARY(ci)); | | 448 | KASSERT(CPU_IS_PRIMARY(ci)); |
449 | slot = pin; | | 449 | slot = pin; |
450 | } else { | | 450 | } else { |
451 | int start = 0; | | 451 | int start = 0; |
452 | slot = -1; | | 452 | slot = -1; |
453 | | | 453 | |
454 | /* avoid reserved slots for legacy interrupts. */ | | 454 | /* avoid reserved slots for legacy interrupts. */ |
455 | if (CPU_IS_PRIMARY(ci) && msipic_is_msi_pic(pic)) | | 455 | if (CPU_IS_PRIMARY(ci) && msipic_is_msi_pic(pic)) |
456 | start = NUM_LEGACY_IRQS; | | 456 | start = NUM_LEGACY_IRQS; |
457 | /* | | 457 | /* |
458 | * intr_allocate_slot has checked for an existing mapping. | | 458 | * intr_allocate_slot has checked for an existing mapping. |
459 | * Now look for a free slot. | | 459 | * Now look for a free slot. |
460 | */ | | 460 | */ |
461 | for (i = start; i < MAX_INTR_SOURCES ; i++) { | | 461 | for (i = start; i < MAX_INTR_SOURCES ; i++) { |
462 | if (ci->ci_isources[i] == NULL) { | | 462 | if (ci->ci_isources[i] == NULL) { |
463 | slot = i; | | 463 | slot = i; |
464 | break; | | 464 | break; |
465 | } | | 465 | } |
466 | } | | 466 | } |
467 | if (slot == -1) { | | 467 | if (slot == -1) { |
468 | return EBUSY; | | 468 | return EBUSY; |
469 | } | | 469 | } |
470 | } | | 470 | } |
471 | | | 471 | |
472 | isp = ci->ci_isources[slot]; | | 472 | isp = ci->ci_isources[slot]; |
473 | if (isp == NULL) { | | 473 | if (isp == NULL) { |
474 | const char *via; | | 474 | const char *via; |
475 | | | 475 | |
476 | isp = chained; | | 476 | isp = chained; |
477 | KASSERT(isp != NULL); | | 477 | KASSERT(isp != NULL); |
478 | if (pic->pic_type == PIC_MSI || pic->pic_type == PIC_MSIX) | | 478 | if (pic->pic_type == PIC_MSI || pic->pic_type == PIC_MSIX) |
479 | via = "vec"; | | 479 | via = "vec"; |
480 | else | | 480 | else |
481 | via = "pin"; | | 481 | via = "pin"; |
482 | snprintf(isp->is_evname, sizeof (isp->is_evname), | | 482 | snprintf(isp->is_evname, sizeof (isp->is_evname), |
483 | "%s %d", via, pin); | | 483 | "%s %d", via, pin); |
484 | evcnt_attach_dynamic(&isp->is_evcnt, EVCNT_TYPE_INTR, NULL, | | 484 | evcnt_attach_dynamic(&isp->is_evcnt, EVCNT_TYPE_INTR, NULL, |
485 | pic->pic_name, isp->is_evname); | | 485 | pic->pic_name, isp->is_evname); |
486 | isp->is_active_cpu = ci->ci_cpuid; | | 486 | isp->is_active_cpu = ci->ci_cpuid; |
487 | ci->ci_isources[slot] = isp; | | 487 | ci->ci_isources[slot] = isp; |
488 | } | | 488 | } |
489 | | | 489 | |
490 | *index = slot; | | 490 | *index = slot; |
491 | return 0; | | 491 | return 0; |
492 | } | | 492 | } |
493 | | | 493 | |
494 | /* | | 494 | /* |
495 | * A simple round-robin allocator to assign interrupts to CPUs. | | 495 | * A simple round-robin allocator to assign interrupts to CPUs. |
496 | */ | | 496 | */ |
497 | static int __noinline | | 497 | static int __noinline |
498 | intr_allocate_slot(struct pic *pic, int pin, int level, | | 498 | intr_allocate_slot(struct pic *pic, int pin, int level, |
499 | struct cpu_info **cip, int *index, int *idt_slot, | | 499 | struct cpu_info **cip, int *index, int *idt_slot, |
500 | struct intrsource *chained) | | 500 | struct intrsource *chained) |
501 | { | | 501 | { |
502 | CPU_INFO_ITERATOR cii; | | 502 | CPU_INFO_ITERATOR cii; |
503 | struct cpu_info *ci, *lci; | | 503 | struct cpu_info *ci, *lci; |
504 | struct intrsource *isp; | | 504 | struct intrsource *isp; |
505 | int slot = 0, idtvec, error; | | 505 | int slot = 0, idtvec, error; |
506 | | | 506 | |
507 | KASSERT(mutex_owned(&cpu_lock)); | | 507 | KASSERT(mutex_owned(&cpu_lock)); |
508 | | | 508 | |
509 | /* First check if this pin is already used by an interrupt vector. */ | | 509 | /* First check if this pin is already used by an interrupt vector. */ |
510 | for (CPU_INFO_FOREACH(cii, ci)) { | | 510 | for (CPU_INFO_FOREACH(cii, ci)) { |
511 | for (slot = 0 ; slot < MAX_INTR_SOURCES ; slot++) { | | 511 | for (slot = 0 ; slot < MAX_INTR_SOURCES ; slot++) { |
512 | if ((isp = ci->ci_isources[slot]) == NULL) { | | 512 | if ((isp = ci->ci_isources[slot]) == NULL) { |
513 | continue; | | 513 | continue; |
514 | } | | 514 | } |
515 | if (isp->is_pic == pic && | | 515 | if (isp->is_pic == pic && |
516 | pin != -1 && isp->is_pin == pin) { | | 516 | pin != -1 && isp->is_pin == pin) { |
517 | *idt_slot = isp->is_idtvec; | | 517 | *idt_slot = isp->is_idtvec; |
518 | *index = slot; | | 518 | *index = slot; |
519 | *cip = ci; | | 519 | *cip = ci; |
520 | return 0; | | 520 | return 0; |
521 | } | | 521 | } |
522 | } | | 522 | } |
523 | } | | 523 | } |
524 | | | 524 | |
525 | /* | | 525 | /* |
526 | * The pic/pin combination doesn't have an existing mapping. | | 526 | * The pic/pin combination doesn't have an existing mapping. |
527 | * Find a slot for a new interrupt source. For the i8259 case, | | 527 | * Find a slot for a new interrupt source. For the i8259 case, |
528 | * we always use reserved slots of the primary CPU. Otherwise, | | 528 | * we always use reserved slots of the primary CPU. Otherwise, |
529 | * we make an attempt to balance the interrupt load. | | 529 | * we make an attempt to balance the interrupt load. |
530 | * | | 530 | * |
531 | * PIC and APIC usage are essentially exclusive, so the reservation | | 531 | * PIC and APIC usage are essentially exclusive, so the reservation |
532 | * of the ISA slots is ignored when assigning IOAPIC slots. | | 532 | * of the ISA slots is ignored when assigning IOAPIC slots. |
533 | */ | | 533 | */ |
534 | if (pic == &i8259_pic) { | | 534 | if (pic == &i8259_pic) { |
535 | /* | | 535 | /* |
536 | * Must be directed to BP. | | 536 | * Must be directed to BP. |
537 | */ | | 537 | */ |
538 | ci = &cpu_info_primary; | | 538 | ci = &cpu_info_primary; |
539 | error = intr_allocate_slot_cpu(ci, pic, pin, &slot, chained); | | 539 | error = intr_allocate_slot_cpu(ci, pic, pin, &slot, chained); |
540 | } else { | | 540 | } else { |
541 | /* | | 541 | /* |
542 | * Find least loaded AP/BP and try to allocate there. | | 542 | * Find least loaded AP/BP and try to allocate there. |
543 | */ | | 543 | */ |
544 | ci = NULL; | | 544 | ci = NULL; |
545 | for (CPU_INFO_FOREACH(cii, lci)) { | | 545 | for (CPU_INFO_FOREACH(cii, lci)) { |
546 | if ((lci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) { | | 546 | if ((lci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) { |
547 | continue; | | 547 | continue; |
548 | } | | 548 | } |
549 | #if 0 | | 549 | #if 0 |
550 | if (ci == NULL || | | 550 | if (ci == NULL || |
551 | ci->ci_nintrhand > lci->ci_nintrhand) { | | 551 | ci->ci_nintrhand > lci->ci_nintrhand) { |
552 | ci = lci; | | 552 | ci = lci; |
553 | } | | 553 | } |
554 | #else | | 554 | #else |
555 | ci = &cpu_info_primary; | | 555 | ci = &cpu_info_primary; |
556 | #endif | | 556 | #endif |
557 | } | | 557 | } |
558 | KASSERT(ci != NULL); | | 558 | KASSERT(ci != NULL); |
559 | error = intr_allocate_slot_cpu(ci, pic, pin, &slot, chained); | | 559 | error = intr_allocate_slot_cpu(ci, pic, pin, &slot, chained); |
560 | | | 560 | |
561 | /* | | 561 | /* |
562 | * If that did not work, allocate anywhere. | | 562 | * If that did not work, allocate anywhere. |
563 | */ | | 563 | */ |
564 | if (error != 0) { | | 564 | if (error != 0) { |
565 | for (CPU_INFO_FOREACH(cii, ci)) { | | 565 | for (CPU_INFO_FOREACH(cii, ci)) { |
566 | if ((ci->ci_schedstate.spc_flags & | | 566 | if ((ci->ci_schedstate.spc_flags & |
567 | SPCF_NOINTR) != 0) { | | 567 | SPCF_NOINTR) != 0) { |
568 | continue; | | 568 | continue; |
569 | } | | 569 | } |
570 | error = intr_allocate_slot_cpu(ci, pic, | | 570 | error = intr_allocate_slot_cpu(ci, pic, |
571 | pin, &slot, chained); | | 571 | pin, &slot, chained); |
572 | if (error == 0) { | | 572 | if (error == 0) { |
573 | break; | | 573 | break; |
574 | } | | 574 | } |
575 | } | | 575 | } |
576 | } | | 576 | } |
577 | } | | 577 | } |
578 | if (error != 0) { | | 578 | if (error != 0) { |
579 | return error; | | 579 | return error; |
580 | } | | 580 | } |
581 | KASSERT(ci != NULL); | | 581 | KASSERT(ci != NULL); |
582 | | | 582 | |
583 | /* | | 583 | /* |
584 | * Now allocate an IDT vector. | | 584 | * Now allocate an IDT vector. |
585 | * For the 8259 these are reserved up front. | | 585 | * For the 8259 these are reserved up front. |
586 | */ | | 586 | */ |
587 | if (pic == &i8259_pic) { | | 587 | if (pic == &i8259_pic) { |
588 | idtvec = ICU_OFFSET + pin; | | 588 | idtvec = ICU_OFFSET + pin; |
589 | } else { | | 589 | } else { |
590 | /* | | 590 | /* |
591 | * TODO to support MSI (not MSI-X) multiple vectors | | 591 | * TODO to support MSI (not MSI-X) multiple vectors |
592 | * | | 592 | * |
593 | * PCI Local Bus Specification Revision 3.0 says the devices | | 593 | * PCI Local Bus Specification Revision 3.0 says the devices |
594 | * which use MSI multiple vectors increment the low order bits | | 594 | * which use MSI multiple vectors increment the low order bits |
595 | * of MSI message data. | | 595 | * of MSI message data. |
596 | * On the other hand, Intel SDM "10.11.2 Message Data Register | | 596 | * On the other hand, Intel SDM "10.11.2 Message Data Register |
597 | * Format" says the 7:0 bits of MSI message data mean Interrupt | | 597 | * Format" says the 7:0 bits of MSI message data mean Interrupt |
598 | * Descriptor Table(IDT) vector. | | 598 | * Descriptor Table(IDT) vector. |
599 | * As the result of these two documents, the IDT vectors which | | 599 | * As the result of these two documents, the IDT vectors which |
600 | * are used by a device using MSI multiple vectors must be | | 600 | * are used by a device using MSI multiple vectors must be |
601 | * continuous. | | 601 | * continuous. |
602 | */ | | 602 | */ |
603 | idtvec = idt_vec_alloc(APIC_LEVEL(level), IDT_INTR_HIGH); | | 603 | idtvec = idt_vec_alloc(APIC_LEVEL(level), IDT_INTR_HIGH); |
604 | } | | 604 | } |
605 | if (idtvec == 0) { | | 605 | if (idtvec == 0) { |
606 | evcnt_detach(&ci->ci_isources[slot]->is_evcnt); | | 606 | evcnt_detach(&ci->ci_isources[slot]->is_evcnt); |
607 | ci->ci_isources[slot]->is_evname[0] = '\0'; | | 607 | ci->ci_isources[slot]->is_evname[0] = '\0'; |
608 | ci->ci_isources[slot] = NULL; | | 608 | ci->ci_isources[slot] = NULL; |
609 | return EBUSY; | | 609 | return EBUSY; |
610 | } | | 610 | } |
611 | ci->ci_isources[slot]->is_idtvec = idtvec; | | 611 | ci->ci_isources[slot]->is_idtvec = idtvec; |
612 | *idt_slot = idtvec; | | 612 | *idt_slot = idtvec; |
613 | *index = slot; | | 613 | *index = slot; |
614 | *cip = ci; | | 614 | *cip = ci; |
615 | return 0; | | 615 | return 0; |
616 | } | | 616 | } |
617 | | | 617 | |
618 | static void | | 618 | static void |
619 | intr_source_free(struct cpu_info *ci, int slot, struct pic *pic, int idtvec) | | 619 | intr_source_free(struct cpu_info *ci, int slot, struct pic *pic, int idtvec) |
620 | { | | 620 | { |
621 | struct intrsource *isp; | | 621 | struct intrsource *isp; |
622 | | | 622 | |
623 | isp = ci->ci_isources[slot]; | | 623 | isp = ci->ci_isources[slot]; |
624 | | | 624 | |
625 | if (isp->is_handlers != NULL) | | 625 | if (isp->is_handlers != NULL) |
626 | return; | | 626 | return; |
627 | ci->ci_isources[slot] = NULL; | | 627 | ci->ci_isources[slot] = NULL; |
628 | if (pic != &i8259_pic) | | 628 | if (pic != &i8259_pic) |
629 | idt_vec_free(idtvec); | | 629 | idt_vec_free(idtvec); |
630 | } | | 630 | } |
631 | | | 631 | |
632 | #ifdef MULTIPROCESSOR | | 632 | #ifdef MULTIPROCESSOR |
633 | static int intr_biglock_wrapper(void *); | | 633 | static int intr_biglock_wrapper(void *); |
634 | | | 634 | |
635 | /* | | 635 | /* |
636 | * intr_biglock_wrapper: grab biglock and call a real interrupt handler. | | 636 | * intr_biglock_wrapper: grab biglock and call a real interrupt handler. |
637 | */ | | 637 | */ |
638 | | | 638 | |
639 | static int | | 639 | static int |
640 | intr_biglock_wrapper(void *vp) | | 640 | intr_biglock_wrapper(void *vp) |
641 | { | | 641 | { |
642 | struct intrhand *ih = vp; | | 642 | struct intrhand *ih = vp; |
643 | int ret; | | 643 | int ret; |
644 | | | 644 | |
645 | KERNEL_LOCK(1, NULL); | | 645 | KERNEL_LOCK(1, NULL); |
646 | | | 646 | |
647 | ret = (*ih->ih_realfun)(ih->ih_realarg); | | 647 | ret = (*ih->ih_realfun)(ih->ih_realarg); |
648 | | | 648 | |
649 | KERNEL_UNLOCK_ONE(NULL); | | 649 | KERNEL_UNLOCK_ONE(NULL); |
650 | | | 650 | |
651 | return ret; | | 651 | return ret; |
652 | } | | 652 | } |
653 | #endif /* MULTIPROCESSOR */ | | 653 | #endif /* MULTIPROCESSOR */ |
654 | | | 654 | |
655 | /* | | 655 | /* |
656 | * Append device name to intrsource. If device A and device B share IRQ number, | | 656 | * Append device name to intrsource. If device A and device B share IRQ number, |
657 | * the device name of the interrupt id is "device A, device B". | | 657 | * the device name of the interrupt id is "device A, device B". |
658 | */ | | 658 | */ |
659 | static void | | 659 | static void |
660 | intr_append_intrsource_xname(struct intrsource *isp, const char *xname) | | 660 | intr_append_intrsource_xname(struct intrsource *isp, const char *xname) |
661 | { | | 661 | { |
662 | | | 662 | |
663 | if (isp->is_xname[0] != '\0') | | 663 | if (isp->is_xname[0] != '\0') |
664 | strlcat(isp->is_xname, ", ", sizeof(isp->is_xname)); | | 664 | strlcat(isp->is_xname, ", ", sizeof(isp->is_xname)); |
665 | strlcat(isp->is_xname, xname, sizeof(isp->is_xname)); | | 665 | strlcat(isp->is_xname, xname, sizeof(isp->is_xname)); |
666 | } | | 666 | } |
667 | | | 667 | |
668 | /* | | 668 | /* |
669 | * Called on bound CPU to handle calling pic_hwunmask from contexts | | 669 | * Called on bound CPU to handle calling pic_hwunmask from contexts |
670 | * that are not already running on the bound CPU. | | 670 | * that are not already running on the bound CPU. |
671 | * | | 671 | * |
672 | * => caller (on initiating CPU) holds cpu_lock on our behalf | | 672 | * => caller (on initiating CPU) holds cpu_lock on our behalf |
673 | * => arg1: struct intrhand *ih | | 673 | * => arg1: struct intrhand *ih |
674 | */ | | 674 | */ |
675 | static void | | 675 | static void |
676 | intr_hwunmask_xcall(void *arg1, void *arg2) | | 676 | intr_hwunmask_xcall(void *arg1, void *arg2) |
677 | { | | 677 | { |
678 | struct intrhand * const ih = arg1; | | 678 | struct intrhand * const ih = arg1; |
679 | struct cpu_info * const ci = ih->ih_cpu; | | 679 | struct cpu_info * const ci = ih->ih_cpu; |
680 | | | 680 | |
681 | KASSERT(ci == curcpu() || !mp_online); | | 681 | KASSERT(ci == curcpu() || !mp_online); |
682 | | | 682 | |
683 | const u_long psl = x86_read_psl(); | | 683 | const u_long psl = x86_read_psl(); |
684 | x86_disable_intr(); | | 684 | x86_disable_intr(); |
685 | | | 685 | |
686 | struct intrsource * const source = ci->ci_isources[ih->ih_slot]; | | 686 | struct intrsource * const source = ci->ci_isources[ih->ih_slot]; |
687 | struct pic * const pic = source->is_pic; | | 687 | struct pic * const pic = source->is_pic; |
688 | | | 688 | |
689 | if (source->is_mask_count == 0) { | | 689 | if (source->is_mask_count == 0) { |
690 | (*pic->pic_hwunmask)(pic, ih->ih_pin); | | 690 | (*pic->pic_hwunmask)(pic, ih->ih_pin); |
691 | } | | 691 | } |
692 | | | 692 | |
693 | x86_write_psl(psl); | | 693 | x86_write_psl(psl); |
694 | } | | 694 | } |
695 | | | 695 | |
696 | /* | | 696 | /* |
697 | * Handle per-CPU component of interrupt establish. | | 697 | * Handle per-CPU component of interrupt establish. |
698 | * | | 698 | * |
699 | * => caller (on initiating CPU) holds cpu_lock on our behalf | | 699 | * => caller (on initiating CPU) holds cpu_lock on our behalf |
700 | * => arg1: struct intrhand *ih | | 700 | * => arg1: struct intrhand *ih |
701 | * => arg2: int idt_vec | | 701 | * => arg2: int idt_vec |
702 | */ | | 702 | */ |
703 | static void | | 703 | static void |
704 | intr_establish_xcall(void *arg1, void *arg2) | | 704 | intr_establish_xcall(void *arg1, void *arg2) |
705 | { | | 705 | { |
706 | struct intrsource *source; | | 706 | struct intrsource *source; |
707 | struct intrstub *stubp; | | 707 | struct intrstub *stubp; |
708 | struct intrhand *ih; | | 708 | struct intrhand *ih; |
709 | struct cpu_info *ci; | | 709 | struct cpu_info *ci; |
710 | int idt_vec; | | 710 | int idt_vec; |
711 | u_long psl; | | 711 | u_long psl; |
712 | | | 712 | |
713 | ih = arg1; | | 713 | ih = arg1; |
714 | | | 714 | |
715 | KASSERT(ih->ih_cpu == curcpu() || !mp_online); | | 715 | KASSERT(ih->ih_cpu == curcpu() || !mp_online); |
716 | | | 716 | |
717 | ci = ih->ih_cpu; | | 717 | ci = ih->ih_cpu; |
718 | source = ci->ci_isources[ih->ih_slot]; | | 718 | source = ci->ci_isources[ih->ih_slot]; |
719 | idt_vec = (int)(intptr_t)arg2; | | 719 | idt_vec = (int)(intptr_t)arg2; |
720 | | | 720 | |
721 | /* Disable interrupts locally. */ | | 721 | /* Disable interrupts locally. */ |
722 | psl = x86_read_psl(); | | 722 | psl = x86_read_psl(); |
723 | x86_disable_intr(); | | 723 | x86_disable_intr(); |
724 | | | 724 | |
725 | /* Link in the handler and re-calculate masks. */ | | 725 | /* Link in the handler and re-calculate masks. */ |
726 | *(ih->ih_prevp) = ih; | | 726 | *(ih->ih_prevp) = ih; |
727 | x86_intr_calculatemasks(ci); | | 727 | x86_intr_calculatemasks(ci); |
728 | | | 728 | |
729 | /* Hook in new IDT vector and SPL state. */ | | 729 | /* Hook in new IDT vector and SPL state. */ |
730 | if (source->is_resume == NULL || source->is_idtvec != idt_vec) { | | 730 | if (source->is_resume == NULL || source->is_idtvec != idt_vec) { |
731 | if (source->is_idtvec != 0 && source->is_idtvec != idt_vec) | | 731 | if (source->is_idtvec != 0 && source->is_idtvec != idt_vec) |
732 | idt_vec_free(source->is_idtvec); | | 732 | idt_vec_free(source->is_idtvec); |
733 | source->is_idtvec = idt_vec; | | 733 | source->is_idtvec = idt_vec; |
734 | if (source->is_type == IST_LEVEL) { | | 734 | if (source->is_type == IST_LEVEL) { |
735 | stubp = &source->is_pic->pic_level_stubs[ih->ih_slot]; | | 735 | stubp = &source->is_pic->pic_level_stubs[ih->ih_slot]; |
736 | } else { | | 736 | } else { |
737 | stubp = &source->is_pic->pic_edge_stubs[ih->ih_slot]; | | 737 | stubp = &source->is_pic->pic_edge_stubs[ih->ih_slot]; |
738 | } | | 738 | } |
739 | source->is_resume = stubp->ist_resume; | | 739 | source->is_resume = stubp->ist_resume; |
740 | source->is_recurse = stubp->ist_recurse; | | 740 | source->is_recurse = stubp->ist_recurse; |
741 | idt_vec_set(idt_vec, stubp->ist_entry); | | 741 | idt_vec_set(idt_vec, stubp->ist_entry); |
742 | } | | 742 | } |
743 | | | 743 | |
744 | /* Re-enable interrupts locally. */ | | 744 | /* Re-enable interrupts locally. */ |
745 | x86_write_psl(psl); | | 745 | x86_write_psl(psl); |
746 | } | | 746 | } |
747 | | | 747 | |
748 | void * | | 748 | void * |
749 | intr_establish_xname(int legacy_irq, struct pic *pic, int pin, int type, | | 749 | intr_establish_xname(int legacy_irq, struct pic *pic, int pin, int type, |
750 | int level, int (*handler)(void *), void *arg, | | 750 | int level, int (*handler)(void *), void *arg, |
751 | bool known_mpsafe, const char *xname) | | 751 | bool known_mpsafe, const char *xname) |
752 | { | | 752 | { |
753 | struct intrhand **p, *q, *ih; | | 753 | struct intrhand **p, *q, *ih; |
754 | struct cpu_info *ci; | | 754 | struct cpu_info *ci; |
755 | int slot, error, idt_vec; | | 755 | int slot, error, idt_vec; |
756 | struct intrsource *chained, *source; | | 756 | struct intrsource *chained, *source; |
757 | #ifdef MULTIPROCESSOR | | 757 | #ifdef MULTIPROCESSOR |
758 | bool mpsafe = (known_mpsafe || level != IPL_VM); | | 758 | bool mpsafe = (known_mpsafe || level != IPL_VM); |
759 | #endif /* MULTIPROCESSOR */ | | 759 | #endif /* MULTIPROCESSOR */ |
760 | uint64_t where; | | 760 | uint64_t where; |
761 | const char *intrstr; | | 761 | const char *intrstr; |
762 | char intrstr_buf[INTRIDBUF]; | | 762 | char intrstr_buf[INTRIDBUF]; |
763 | | | 763 | |
764 | KASSERTMSG((legacy_irq == -1 || (0 <= legacy_irq && legacy_irq < 16)), | | 764 | KASSERTMSG((legacy_irq == -1 || (0 <= legacy_irq && legacy_irq < 16)), |
765 | "bad legacy IRQ value: %d", legacy_irq); | | 765 | "bad legacy IRQ value: %d", legacy_irq); |
766 | KASSERTMSG((legacy_irq != -1 || pic != &i8259_pic), | | 766 | KASSERTMSG((legacy_irq != -1 || pic != &i8259_pic), |
767 | "non-legacy IRQ on i8259"); | | 767 | "non-legacy IRQ on i8259"); |
768 | | | 768 | |
769 | ih = kmem_alloc(sizeof(*ih), KM_SLEEP); | | 769 | ih = kmem_alloc(sizeof(*ih), KM_SLEEP); |
770 | intrstr = intr_create_intrid(legacy_irq, pic, pin, intrstr_buf, | | 770 | intrstr = intr_create_intrid(legacy_irq, pic, pin, intrstr_buf, |
771 | sizeof(intrstr_buf)); | | 771 | sizeof(intrstr_buf)); |
772 | KASSERT(intrstr != NULL); | | 772 | KASSERT(intrstr != NULL); |
773 | | | 773 | |
774 | mutex_enter(&cpu_lock); | | 774 | mutex_enter(&cpu_lock); |
775 | | | 775 | |
776 | /* allocate intrsource pool, if not yet. */ | | 776 | /* allocate intrsource pool, if not yet. */ |
777 | chained = intr_get_io_intrsource(intrstr); | | 777 | chained = intr_get_io_intrsource(intrstr); |
778 | if (chained == NULL) { | | 778 | if (chained == NULL) { |
779 | if (msipic_is_msi_pic(pic)) { | | 779 | if (msipic_is_msi_pic(pic)) { |
780 | mutex_exit(&cpu_lock); | | 780 | mutex_exit(&cpu_lock); |
781 | kmem_free(ih, sizeof(*ih)); | | 781 | kmem_free(ih, sizeof(*ih)); |
782 | printf("%s: %s has no intrsource\n", __func__, intrstr); | | 782 | printf("%s: %s has no intrsource\n", __func__, intrstr); |
783 | return NULL; | | 783 | return NULL; |
784 | } | | 784 | } |
785 | chained = intr_allocate_io_intrsource(intrstr); | | 785 | chained = intr_allocate_io_intrsource(intrstr); |
786 | if (chained == NULL) { | | 786 | if (chained == NULL) { |
787 | mutex_exit(&cpu_lock); | | 787 | mutex_exit(&cpu_lock); |
788 | kmem_free(ih, sizeof(*ih)); | | 788 | kmem_free(ih, sizeof(*ih)); |
789 | printf("%s: can't allocate io_intersource\n", __func__); | | 789 | printf("%s: can't allocate io_intersource\n", __func__); |
790 | return NULL; | | 790 | return NULL; |
791 | } | | 791 | } |
792 | } | | 792 | } |
793 | | | 793 | |
794 | error = intr_allocate_slot(pic, pin, level, &ci, &slot, &idt_vec, | | 794 | error = intr_allocate_slot(pic, pin, level, &ci, &slot, &idt_vec, |
795 | chained); | | 795 | chained); |
796 | if (error != 0) { | | 796 | if (error != 0) { |
797 | intr_free_io_intrsource_direct(chained); | | 797 | intr_free_io_intrsource_direct(chained); |
798 | mutex_exit(&cpu_lock); | | 798 | mutex_exit(&cpu_lock); |
799 | kmem_free(ih, sizeof(*ih)); | | 799 | kmem_free(ih, sizeof(*ih)); |
800 | printf("failed to allocate interrupt slot for PIC %s pin %d\n", | | 800 | printf("failed to allocate interrupt slot for PIC %s pin %d\n", |
801 | pic->pic_name, pin); | | 801 | pic->pic_name, pin); |
802 | return NULL; | | 802 | return NULL; |
803 | } | | 803 | } |
804 | | | 804 | |
805 | source = ci->ci_isources[slot]; | | 805 | source = ci->ci_isources[slot]; |
806 | | | 806 | |
807 | if (source->is_handlers != NULL && | | 807 | if (source->is_handlers != NULL && |
808 | source->is_pic->pic_type != pic->pic_type) { | | 808 | source->is_pic->pic_type != pic->pic_type) { |
809 | intr_free_io_intrsource_direct(chained); | | 809 | intr_free_io_intrsource_direct(chained); |
810 | mutex_exit(&cpu_lock); | | 810 | mutex_exit(&cpu_lock); |
811 | kmem_free(ih, sizeof(*ih)); | | 811 | kmem_free(ih, sizeof(*ih)); |
812 | printf("%s: can't share intr source between " | | 812 | printf("%s: can't share intr source between " |
813 | "different PIC types (legacy_irq %d pin %d slot %d)\n", | | 813 | "different PIC types (legacy_irq %d pin %d slot %d)\n", |
814 | __func__, legacy_irq, pin, slot); | | 814 | __func__, legacy_irq, pin, slot); |
815 | return NULL; | | 815 | return NULL; |
816 | } | | 816 | } |
817 | | | 817 | |
818 | source->is_pin = pin; | | 818 | source->is_pin = pin; |
819 | source->is_pic = pic; | | 819 | source->is_pic = pic; |
820 | intr_append_intrsource_xname(source, xname); | | 820 | intr_append_intrsource_xname(source, xname); |
821 | switch (source->is_type) { | | 821 | switch (source->is_type) { |
822 | case IST_NONE: | | 822 | case IST_NONE: |
823 | source->is_type = type; | | 823 | source->is_type = type; |
824 | break; | | 824 | break; |
825 | case IST_EDGE: | | 825 | case IST_EDGE: |
826 | case IST_LEVEL: | | 826 | case IST_LEVEL: |
827 | if (source->is_type == type) | | 827 | if (source->is_type == type) |
828 | break; | | 828 | break; |
829 | /* FALLTHROUGH */ | | 829 | /* FALLTHROUGH */ |
830 | case IST_PULSE: | | 830 | case IST_PULSE: |
831 | if (type != IST_NONE) { | | 831 | if (type != IST_NONE) { |
832 | intr_source_free(ci, slot, pic, idt_vec); | | 832 | intr_source_free(ci, slot, pic, idt_vec); |
833 | intr_free_io_intrsource_direct(chained); | | 833 | intr_free_io_intrsource_direct(chained); |
834 | mutex_exit(&cpu_lock); | | 834 | mutex_exit(&cpu_lock); |
835 | kmem_free(ih, sizeof(*ih)); | | 835 | kmem_free(ih, sizeof(*ih)); |
836 | printf("%s: pic %s pin %d: can't share " | | 836 | printf("%s: pic %s pin %d: can't share " |
837 | "type %d with %d\n", | | 837 | "type %d with %d\n", |
838 | __func__, pic->pic_name, pin, | | 838 | __func__, pic->pic_name, pin, |
839 | source->is_type, type); | | 839 | source->is_type, type); |
840 | return NULL; | | 840 | return NULL; |
841 | } | | 841 | } |
842 | break; | | 842 | break; |
843 | default: | | 843 | default: |
844 | panic("%s: bad intr type %d for pic %s pin %d\n", | | 844 | panic("%s: bad intr type %d for pic %s pin %d\n", |
845 | __func__, source->is_type, pic->pic_name, pin); | | 845 | __func__, source->is_type, pic->pic_name, pin); |
846 | /* NOTREACHED */ | | 846 | /* NOTREACHED */ |
847 | } | | 847 | } |
848 | | | 848 | |
849 | /* | | 849 | /* |
850 | * If the establishing interrupt uses shared IRQ, the interrupt uses | | 850 | * If the establishing interrupt uses shared IRQ, the interrupt uses |
851 | * "ci->ci_isources[slot]" instead of allocated by the establishing | | 851 | * "ci->ci_isources[slot]" instead of allocated by the establishing |
852 | * device's pci_intr_alloc() or this function. | | 852 | * device's pci_intr_alloc() or this function. |
853 | */ | | 853 | */ |
854 | if (source->is_handlers != NULL) { | | 854 | if (source->is_handlers != NULL) { |
855 | struct intrsource *isp, *nisp; | | 855 | struct intrsource *isp, *nisp; |
856 | | | 856 | |
857 | SIMPLEQ_FOREACH_SAFE(isp, &io_interrupt_sources, | | 857 | SIMPLEQ_FOREACH_SAFE(isp, &io_interrupt_sources, |
858 | is_list, nisp) { | | 858 | is_list, nisp) { |
859 | if (strncmp(intrstr, isp->is_intrid, INTRIDBUF - 1) == 0 | | 859 | if (strncmp(intrstr, isp->is_intrid, INTRIDBUF - 1) == 0 |
860 | && isp->is_handlers == NULL) | | 860 | && isp->is_handlers == NULL) |
861 | intr_free_io_intrsource_direct(isp); | | 861 | intr_free_io_intrsource_direct(isp); |
862 | } | | 862 | } |
863 | } | | 863 | } |
864 | | | 864 | |
865 | /* | | 865 | /* |
866 | * We're now committed. Mask the interrupt in hardware and | | 866 | * We're now committed. Mask the interrupt in hardware and |
867 | * count it for load distribution. | | 867 | * count it for load distribution. |
868 | */ | | 868 | */ |
869 | (*pic->pic_hwmask)(pic, pin); | | 869 | (*pic->pic_hwmask)(pic, pin); |
870 | (ci->ci_nintrhand)++; | | 870 | (ci->ci_nintrhand)++; |
871 | | | 871 | |
872 | /* | | 872 | /* |
873 | * Figure out where to put the handler. | | 873 | * Figure out where to put the handler. |
874 | * This is O(N^2), but we want to preserve the order, and N is | | 874 | * This is O(N^2), but we want to preserve the order, and N is |
875 | * generally small. | | 875 | * generally small. |
876 | */ | | 876 | */ |
877 | for (p = &ci->ci_isources[slot]->is_handlers; | | 877 | for (p = &ci->ci_isources[slot]->is_handlers; |
878 | (q = *p) != NULL && q->ih_level > level; | | 878 | (q = *p) != NULL && q->ih_level > level; |
879 | p = &q->ih_next) { | | 879 | p = &q->ih_next) { |
880 | /* nothing */; | | 880 | /* nothing */; |
881 | } | | 881 | } |
882 | | | 882 | |
883 | ih->ih_pic = pic; | | 883 | ih->ih_pic = pic; |
884 | ih->ih_fun = ih->ih_realfun = handler; | | 884 | ih->ih_fun = ih->ih_realfun = handler; |
885 | ih->ih_arg = ih->ih_realarg = arg; | | 885 | ih->ih_arg = ih->ih_realarg = arg; |
886 | ih->ih_prevp = p; | | 886 | ih->ih_prevp = p; |
887 | ih->ih_next = *p; | | 887 | ih->ih_next = *p; |
888 | ih->ih_level = level; | | 888 | ih->ih_level = level; |
889 | ih->ih_pin = pin; | | 889 | ih->ih_pin = pin; |
890 | ih->ih_cpu = ci; | | 890 | ih->ih_cpu = ci; |
891 | ih->ih_slot = slot; | | 891 | ih->ih_slot = slot; |
892 | #ifdef MULTIPROCESSOR | | 892 | #ifdef MULTIPROCESSOR |
893 | if (!mpsafe) { | | 893 | if (!mpsafe) { |
894 | ih->ih_fun = intr_biglock_wrapper; | | 894 | ih->ih_fun = intr_biglock_wrapper; |
895 | ih->ih_arg = ih; | | 895 | ih->ih_arg = ih; |
896 | } | | 896 | } |
897 | #endif /* MULTIPROCESSOR */ | | 897 | #endif /* MULTIPROCESSOR */ |
898 | | | 898 | |
899 | /* | | 899 | /* |
900 | * Call out to the remote CPU to update its interrupt state. | | 900 | * Call out to the remote CPU to update its interrupt state. |
901 | * Only make RPCs if the APs are up and running. | | 901 | * Only make RPCs if the APs are up and running. |
902 | */ | | 902 | */ |
903 | if (ci == curcpu() || !mp_online) { | | 903 | if (ci == curcpu() || !mp_online) { |
904 | intr_establish_xcall(ih, (void *)(intptr_t)idt_vec); | | 904 | intr_establish_xcall(ih, (void *)(intptr_t)idt_vec); |
905 | } else { | | 905 | } else { |
906 | where = xc_unicast(0, intr_establish_xcall, ih, | | 906 | where = xc_unicast(0, intr_establish_xcall, ih, |
907 | (void *)(intptr_t)idt_vec, ci); | | 907 | (void *)(intptr_t)idt_vec, ci); |
908 | xc_wait(where); | | 908 | xc_wait(where); |
909 | } | | 909 | } |
910 | | | 910 | |
911 | /* All set up, so add a route for the interrupt and unmask it. */ | | 911 | /* All set up, so add a route for the interrupt and unmask it. */ |
912 | (*pic->pic_addroute)(pic, ci, pin, idt_vec, type); | | 912 | (*pic->pic_addroute)(pic, ci, pin, idt_vec, type); |
913 | if (ci == curcpu() || !mp_online) { | | 913 | if (ci == curcpu() || !mp_online) { |
914 | intr_hwunmask_xcall(ih, NULL); | | 914 | intr_hwunmask_xcall(ih, NULL); |
915 | } else { | | 915 | } else { |
916 | where = xc_unicast(0, intr_hwunmask_xcall, ih, NULL, ci); | | 916 | where = xc_unicast(0, intr_hwunmask_xcall, ih, NULL, ci); |
917 | xc_wait(where); | | 917 | xc_wait(where); |
918 | } | | 918 | } |
919 | mutex_exit(&cpu_lock); | | 919 | mutex_exit(&cpu_lock); |
920 | | | 920 | |
921 | if (bootverbose || cpu_index(ci) != 0) | | 921 | if (bootverbose || cpu_index(ci) != 0) |
922 | aprint_verbose("allocated pic %s type %s pin %d level %d to " | | 922 | aprint_verbose("allocated pic %s type %s pin %d level %d to " |
923 | "%s slot %d idt entry %d\n", | | 923 | "%s slot %d idt entry %d\n", |
924 | pic->pic_name, type == IST_EDGE ? "edge" : "level", pin, | | 924 | pic->pic_name, type == IST_EDGE ? "edge" : "level", pin, |
925 | level, device_xname(ci->ci_dev), slot, idt_vec); | | 925 | level, device_xname(ci->ci_dev), slot, idt_vec); |
926 | | | 926 | |
927 | return (ih); | | 927 | return (ih); |
928 | } | | 928 | } |
929 | | | 929 | |
930 | void * | | 930 | void * |
931 | intr_establish(int legacy_irq, struct pic *pic, int pin, int type, int level, | | 931 | intr_establish(int legacy_irq, struct pic *pic, int pin, int type, int level, |
932 | int (*handler)(void *), void *arg, bool known_mpsafe) | | 932 | int (*handler)(void *), void *arg, bool known_mpsafe) |
933 | { | | 933 | { |
934 | | | 934 | |
935 | return intr_establish_xname(legacy_irq, pic, pin, type, | | 935 | return intr_establish_xname(legacy_irq, pic, pin, type, |
936 | level, handler, arg, known_mpsafe, "unknown"); | | 936 | level, handler, arg, known_mpsafe, "unknown"); |
937 | } | | 937 | } |
938 | | | 938 | |
939 | /* | | 939 | /* |
940 | * Called on bound CPU to handle intr_mask() / intr_unmask(). | | 940 | * Called on bound CPU to handle intr_mask() / intr_unmask(). |
941 | * | | 941 | * |
942 | * => caller (on initiating CPU) holds cpu_lock on our behalf | | 942 | * => caller (on initiating CPU) holds cpu_lock on our behalf |
943 | * => arg1: struct intrhand *ih | | 943 | * => arg1: struct intrhand *ih |
944 | * => arg2: true -> mask, false -> unmask. | | 944 | * => arg2: true -> mask, false -> unmask. |
945 | */ | | 945 | */ |
946 | static void | | 946 | static void |
947 | intr_mask_xcall(void *arg1, void *arg2) | | 947 | intr_mask_xcall(void *arg1, void *arg2) |
948 | { | | 948 | { |
949 | struct intrhand * const ih = arg1; | | 949 | struct intrhand * const ih = arg1; |
950 | const uintptr_t mask = (uintptr_t)arg2; | | 950 | const uintptr_t mask = (uintptr_t)arg2; |
951 | struct cpu_info * const ci = ih->ih_cpu; | | 951 | struct cpu_info * const ci = ih->ih_cpu; |
952 | bool force_pending = false; | | 952 | bool force_pending = false; |
953 | | | 953 | |
954 | KASSERT(ci == curcpu() || !mp_online); | | 954 | KASSERT(ci == curcpu() || !mp_online); |
955 | | | 955 | |
956 | /* | | 956 | /* |
957 | * We need to disable interrupts to hold off the interrupt | | 957 | * We need to disable interrupts to hold off the interrupt |
958 | * vectors. | | 958 | * vectors. |
959 | */ | | 959 | */ |
960 | const u_long psl = x86_read_psl(); | | 960 | const u_long psl = x86_read_psl(); |
961 | x86_disable_intr(); | | 961 | x86_disable_intr(); |
962 | | | 962 | |
963 | struct intrsource * const source = ci->ci_isources[ih->ih_slot]; | | 963 | struct intrsource * const source = ci->ci_isources[ih->ih_slot]; |
964 | struct pic * const pic = source->is_pic; | | 964 | struct pic * const pic = source->is_pic; |
965 | | | 965 | |
966 | if (mask) { | | 966 | if (mask) { |
967 | source->is_mask_count++; | | 967 | source->is_mask_count++; |
968 | KASSERT(source->is_mask_count != 0); | | 968 | KASSERT(source->is_mask_count != 0); |
969 | if (source->is_mask_count == 1) { | | 969 | if (source->is_mask_count == 1) { |
970 | (*pic->pic_hwmask)(pic, ih->ih_pin); | | 970 | (*pic->pic_hwmask)(pic, ih->ih_pin); |
971 | } | | 971 | } |
972 | } else { | | 972 | } else { |
973 | KASSERT(source->is_mask_count != 0); | | 973 | KASSERT(source->is_mask_count != 0); |
974 | if (--source->is_mask_count == 0) { | | 974 | if (--source->is_mask_count == 0) { |
975 | /* | | 975 | /* |
976 | * If this interrupt source is being moved, don't | | 976 | * If this interrupt source is being moved, don't |
977 | * unmask it at the hw. | | 977 | * unmask it at the hw. |
978 | */ | | 978 | */ |
979 | if (! source->is_distribute_pending) { | | 979 | if (! source->is_distribute_pending) { |
980 | (*pic->pic_hwunmask)(pic, ih->ih_pin); | | 980 | (*pic->pic_hwunmask)(pic, ih->ih_pin); |
981 | } | | 981 | } |
982 | | | 982 | |
983 | /* | | 983 | /* |
984 | * For level-sensitive interrupts, the hardware | | 984 | * For level-sensitive interrupts, the hardware |
985 | * will let us know. For everything else, we | | 985 | * will let us know. For everything else, we |
986 | * need to explicitly handle interrupts that | | 986 | * need to explicitly handle interrupts that |
987 | * happened when when the source was masked. | | 987 | * happened when when the source was masked. |
988 | */ | | 988 | */ |
989 | const uint32_t bit = (1U << ih->ih_slot); | | 989 | const uint32_t bit = (1U << ih->ih_slot); |
990 | if (ci->ci_imasked & bit) { | | 990 | if (ci->ci_imasked & bit) { |
991 | ci->ci_imasked &= ~bit; | | 991 | ci->ci_imasked &= ~bit; |
992 | if (source->is_type != IST_LEVEL) { | | 992 | if (source->is_type != IST_LEVEL) { |
993 | ci->ci_ipending |= bit; | | 993 | ci->ci_ipending |= bit; |
994 | force_pending = true; | | 994 | force_pending = true; |
995 | } | | 995 | } |
996 | } | | 996 | } |
997 | } | | 997 | } |
998 | } | | 998 | } |
999 | | | 999 | |
1000 | /* Re-enable interrupts. */ | | 1000 | /* Re-enable interrupts. */ |
1001 | x86_write_psl(psl); | | 1001 | x86_write_psl(psl); |
1002 | | | 1002 | |
1003 | if (force_pending) { | | 1003 | if (force_pending) { |
1004 | /* Force processing of any pending interrupts. */ | | 1004 | /* Force processing of any pending interrupts. */ |
1005 | splx(splhigh()); | | 1005 | splx(splhigh()); |
1006 | } | | 1006 | } |
1007 | } | | 1007 | } |
1008 | | | 1008 | |
1009 | static void | | 1009 | static void |
1010 | intr_mask_internal(struct intrhand * const ih, const bool mask) | | 1010 | intr_mask_internal(struct intrhand * const ih, const bool mask) |
1011 | { | | 1011 | { |
1012 | | | 1012 | |
1013 | /* | | 1013 | /* |
1014 | * Call out to the remote CPU to update its interrupt state. | | 1014 | * Call out to the remote CPU to update its interrupt state. |
1015 | * Only make RPCs if the APs are up and running. | | 1015 | * Only make RPCs if the APs are up and running. |
1016 | */ | | 1016 | */ |
1017 | mutex_enter(&cpu_lock); | | 1017 | mutex_enter(&cpu_lock); |
1018 | struct cpu_info * const ci = ih->ih_cpu; | | 1018 | struct cpu_info * const ci = ih->ih_cpu; |
1019 | void * const mask_arg = (void *)(uintptr_t)mask; | | 1019 | void * const mask_arg = (void *)(uintptr_t)mask; |
1020 | if (ci == curcpu() || !mp_online) { | | 1020 | if (ci == curcpu() || !mp_online) { |
1021 | intr_mask_xcall(ih, mask_arg); | | 1021 | intr_mask_xcall(ih, mask_arg); |
1022 | } else { | | 1022 | } else { |
1023 | const uint64_t where = | | 1023 | const uint64_t where = |
1024 | xc_unicast(0, intr_mask_xcall, ih, mask_arg, ci); | | 1024 | xc_unicast(0, intr_mask_xcall, ih, mask_arg, ci); |
1025 | xc_wait(where); | | 1025 | xc_wait(where); |
1026 | } | | 1026 | } |
1027 | mutex_exit(&cpu_lock); | | 1027 | mutex_exit(&cpu_lock); |
1028 | } | | 1028 | } |
1029 | | | 1029 | |
1030 | void | | 1030 | void |
1031 | intr_mask(struct intrhand *ih) | | 1031 | intr_mask(struct intrhand *ih) |
1032 | { | | 1032 | { |
1033 | | | 1033 | |
1034 | if (cpu_intr_p()) { | | 1034 | if (cpu_intr_p()) { |
1035 | /* | | 1035 | /* |
1036 | * Special case of calling intr_mask() from an interrupt | | 1036 | * Special case of calling intr_mask() from an interrupt |
1037 | * handler: we MUST be called from the bound CPU for this | | 1037 | * handler: we MUST be called from the bound CPU for this |
1038 | * interrupt (presumably from a handler we're about to | | 1038 | * interrupt (presumably from a handler we're about to |
1039 | * mask). | | 1039 | * mask). |
1040 | * | | 1040 | * |
1041 | * We can't take the cpu_lock in this case, and we must | | 1041 | * We can't take the cpu_lock in this case, and we must |
1042 | * therefore be extra careful. | | 1042 | * therefore be extra careful. |
1043 | */ | | 1043 | */ |
1044 | KASSERT(ih->ih_cpu == curcpu() || !mp_online); | | 1044 | KASSERT(ih->ih_cpu == curcpu() || !mp_online); |
1045 | intr_mask_xcall(ih, (void *)(uintptr_t)true); | | 1045 | intr_mask_xcall(ih, (void *)(uintptr_t)true); |
1046 | return; | | 1046 | return; |
1047 | } | | 1047 | } |
1048 | | | 1048 | |
1049 | intr_mask_internal(ih, true); | | 1049 | intr_mask_internal(ih, true); |
1050 | } | | 1050 | } |
1051 | | | 1051 | |
1052 | void | | 1052 | void |
1053 | intr_unmask(struct intrhand *ih) | | 1053 | intr_unmask(struct intrhand *ih) |
1054 | { | | 1054 | { |
1055 | | | 1055 | |
1056 | /* | | 1056 | /* |
1057 | * This is not safe to call from an interrupt context because | | 1057 | * This is not safe to call from an interrupt context because |
1058 | * we don't want to accidentally unmask an interrupt source | | 1058 | * we don't want to accidentally unmask an interrupt source |
1059 | * that's masked because it's being serviced. | | 1059 | * that's masked because it's being serviced. |
1060 | */ | | 1060 | */ |
1061 | KASSERT(!cpu_intr_p()); | | 1061 | KASSERT(!cpu_intr_p()); |
1062 | intr_mask_internal(ih, false); | | 1062 | intr_mask_internal(ih, false); |
1063 | } | | 1063 | } |
1064 | | | 1064 | |
1065 | /* | | 1065 | /* |
1066 | * Called on bound CPU to handle intr_disestablish(). | | 1066 | * Called on bound CPU to handle intr_disestablish(). |
1067 | * | | 1067 | * |
1068 | * => caller (on initiating CPU) holds cpu_lock on our behalf | | 1068 | * => caller (on initiating CPU) holds cpu_lock on our behalf |
1069 | * => arg1: struct intrhand *ih | | 1069 | * => arg1: struct intrhand *ih |
1070 | * => arg2: unused | | 1070 | * => arg2: unused |
1071 | */ | | 1071 | */ |
1072 | static void | | 1072 | static void |
1073 | intr_disestablish_xcall(void *arg1, void *arg2) | | 1073 | intr_disestablish_xcall(void *arg1, void *arg2) |
1074 | { | | 1074 | { |
1075 | struct intrhand **p, *q; | | 1075 | struct intrhand **p, *q; |
1076 | struct cpu_info *ci; | | 1076 | struct cpu_info *ci; |
1077 | struct pic *pic; | | 1077 | struct pic *pic; |
1078 | struct intrsource *source; | | 1078 | struct intrsource *source; |
1079 | struct intrhand *ih; | | 1079 | struct intrhand *ih; |
1080 | u_long psl; | | 1080 | u_long psl; |
1081 | int idtvec; | | 1081 | int idtvec; |
1082 | | | 1082 | |
1083 | ih = arg1; | | 1083 | ih = arg1; |
1084 | ci = ih->ih_cpu; | | 1084 | ci = ih->ih_cpu; |
1085 | | | 1085 | |
1086 | KASSERT(ci == curcpu() || !mp_online); | | 1086 | KASSERT(ci == curcpu() || !mp_online); |
1087 | | | 1087 | |
1088 | /* Disable interrupts locally. */ | | 1088 | /* Disable interrupts locally. */ |
1089 | psl = x86_read_psl(); | | 1089 | psl = x86_read_psl(); |
1090 | x86_disable_intr(); | | 1090 | x86_disable_intr(); |
1091 | | | 1091 | |
1092 | pic = ci->ci_isources[ih->ih_slot]->is_pic; | | 1092 | pic = ci->ci_isources[ih->ih_slot]->is_pic; |
1093 | source = ci->ci_isources[ih->ih_slot]; | | 1093 | source = ci->ci_isources[ih->ih_slot]; |
1094 | idtvec = source->is_idtvec; | | 1094 | idtvec = source->is_idtvec; |
1095 | | | 1095 | |
1096 | (*pic->pic_hwmask)(pic, ih->ih_pin); | | 1096 | (*pic->pic_hwmask)(pic, ih->ih_pin); |
1097 | atomic_and_32(&ci->ci_ipending, ~(1 << ih->ih_slot)); | | 1097 | atomic_and_32(&ci->ci_ipending, ~(1 << ih->ih_slot)); |
1098 | | | 1098 | |
1099 | /* | | 1099 | /* |
1100 | * Remove the handler from the chain. | | 1100 | * Remove the handler from the chain. |
1101 | */ | | 1101 | */ |
1102 | for (p = &source->is_handlers; (q = *p) != NULL && q != ih; | | 1102 | for (p = &source->is_handlers; (q = *p) != NULL && q != ih; |
1103 | p = &q->ih_next) | | 1103 | p = &q->ih_next) |
1104 | ; | | 1104 | ; |
1105 | if (q == NULL) { | | 1105 | if (q == NULL) { |
1106 | x86_write_psl(psl); | | 1106 | x86_write_psl(psl); |
1107 | panic("%s: handler not registered", __func__); | | 1107 | panic("%s: handler not registered", __func__); |
1108 | /* NOTREACHED */ | | 1108 | /* NOTREACHED */ |
1109 | } | | 1109 | } |
1110 | | | 1110 | |
1111 | *p = q->ih_next; | | 1111 | *p = q->ih_next; |
1112 | | | 1112 | |
1113 | x86_intr_calculatemasks(ci); | | 1113 | x86_intr_calculatemasks(ci); |
1114 | /* | | 1114 | /* |
1115 | * If there is no any handler, 1) do delroute because it has no | | 1115 | * If there is no any handler, 1) do delroute because it has no |
1116 | * any source and 2) dont' hwunmask to prevent spurious interrupt. | | 1116 | * any source and 2) dont' hwunmask to prevent spurious interrupt. |
1117 | * | | 1117 | * |
1118 | * If there is any handler, 1) don't delroute because it has source | | 1118 | * If there is any handler, 1) don't delroute because it has source |
1119 | * and 2) do hwunmask to be able to get interrupt again. | | 1119 | * and 2) do hwunmask to be able to get interrupt again. |
1120 | * | | 1120 | * |
1121 | */ | | 1121 | */ |
1122 | if (source->is_handlers == NULL) | | 1122 | if (source->is_handlers == NULL) |
1123 | (*pic->pic_delroute)(pic, ci, ih->ih_pin, idtvec, | | 1123 | (*pic->pic_delroute)(pic, ci, ih->ih_pin, idtvec, |
1124 | source->is_type); | | 1124 | source->is_type); |
1125 | else if (source->is_mask_count == 0) | | 1125 | else if (source->is_mask_count == 0) |
1126 | (*pic->pic_hwunmask)(pic, ih->ih_pin); | | 1126 | (*pic->pic_hwunmask)(pic, ih->ih_pin); |
1127 | | | 1127 | |
1128 | /* Re-enable interrupts. */ | | 1128 | /* Re-enable interrupts. */ |
1129 | x86_write_psl(psl); | | 1129 | x86_write_psl(psl); |
1130 | | | 1130 | |
1131 | /* If the source is free we can drop it now. */ | | 1131 | /* If the source is free we can drop it now. */ |
1132 | intr_source_free(ci, ih->ih_slot, pic, idtvec); | | 1132 | intr_source_free(ci, ih->ih_slot, pic, idtvec); |
1133 | | | 1133 | |
1134 | DPRINTF(("%s: remove slot %d (pic %s pin %d vec %d)\n", | | 1134 | DPRINTF(("%s: remove slot %d (pic %s pin %d vec %d)\n", |
1135 | device_xname(ci->ci_dev), ih->ih_slot, pic->pic_name, | | 1135 | device_xname(ci->ci_dev), ih->ih_slot, pic->pic_name, |
1136 | ih->ih_pin, idtvec)); | | 1136 | ih->ih_pin, idtvec)); |
1137 | } | | 1137 | } |
1138 | | | 1138 | |
1139 | static int | | 1139 | static int |
1140 | intr_num_handlers(struct intrsource *isp) | | 1140 | intr_num_handlers(struct intrsource *isp) |
1141 | { | | 1141 | { |
1142 | struct intrhand *ih; | | 1142 | struct intrhand *ih; |
1143 | int num; | | 1143 | int num; |
1144 | | | 1144 | |
1145 | num = 0; | | 1145 | num = 0; |
1146 | for (ih = isp->is_handlers; ih != NULL; ih = ih->ih_next) | | 1146 | for (ih = isp->is_handlers; ih != NULL; ih = ih->ih_next) |
1147 | num++; | | 1147 | num++; |
1148 | | | 1148 | |
1149 | return num; | | 1149 | return num; |
1150 | } | | 1150 | } |
1151 | | | 1151 | |
1152 | /* | | 1152 | /* |
1153 | * Deregister an interrupt handler. | | 1153 | * Deregister an interrupt handler. |
1154 | */ | | 1154 | */ |
1155 | void | | 1155 | void |
1156 | intr_disestablish(struct intrhand *ih) | | 1156 | intr_disestablish(struct intrhand *ih) |
1157 | { | | 1157 | { |
1158 | struct cpu_info *ci; | | 1158 | struct cpu_info *ci; |
1159 | struct intrsource *isp; | | 1159 | struct intrsource *isp; |
1160 | uint64_t where; | | 1160 | uint64_t where; |
1161 | | | 1161 | |
1162 | /* | | 1162 | /* |
1163 | * Count the removal for load balancing. | | 1163 | * Count the removal for load balancing. |
1164 | * Call out to the remote CPU to update its interrupt state. | | 1164 | * Call out to the remote CPU to update its interrupt state. |
1165 | * Only make RPCs if the APs are up and running. | | 1165 | * Only make RPCs if the APs are up and running. |
1166 | */ | | 1166 | */ |
1167 | mutex_enter(&cpu_lock); | | 1167 | mutex_enter(&cpu_lock); |
1168 | ci = ih->ih_cpu; | | 1168 | ci = ih->ih_cpu; |
1169 | (ci->ci_nintrhand)--; | | 1169 | (ci->ci_nintrhand)--; |
1170 | KASSERT(ci->ci_nintrhand >= 0); | | 1170 | KASSERT(ci->ci_nintrhand >= 0); |
1171 | isp = ci->ci_isources[ih->ih_slot]; | | 1171 | isp = ci->ci_isources[ih->ih_slot]; |
1172 | if (ci == curcpu() || !mp_online) { | | 1172 | if (ci == curcpu() || !mp_online) { |
1173 | intr_disestablish_xcall(ih, NULL); | | 1173 | intr_disestablish_xcall(ih, NULL); |
1174 | } else { | | 1174 | } else { |
1175 | where = xc_unicast(0, intr_disestablish_xcall, ih, NULL, ci); | | 1175 | where = xc_unicast(0, intr_disestablish_xcall, ih, NULL, ci); |
1176 | xc_wait(where); | | 1176 | xc_wait(where); |
1177 | } | | 1177 | } |
1178 | if (!msipic_is_msi_pic(isp->is_pic) && intr_num_handlers(isp) < 1) { | | 1178 | if (!msipic_is_msi_pic(isp->is_pic) && intr_num_handlers(isp) < 1) { |
1179 | intr_free_io_intrsource_direct(isp); | | 1179 | intr_free_io_intrsource_direct(isp); |
1180 | } | | 1180 | } |
1181 | mutex_exit(&cpu_lock); | | 1181 | mutex_exit(&cpu_lock); |
1182 | kmem_free(ih, sizeof(*ih)); | | 1182 | kmem_free(ih, sizeof(*ih)); |
1183 | } | | 1183 | } |
1184 | | | 1184 | |
1185 | static const char * | | 1185 | static const char * |
1186 | xen_intr_string(int port, char *buf, size_t len, struct pic *pic) | | 1186 | xen_intr_string(int port, char *buf, size_t len, struct pic *pic) |
1187 | { | | 1187 | { |
1188 | KASSERT(pic->pic_type == PIC_XEN); | | 1188 | KASSERT(pic->pic_type == PIC_XEN); |
1189 | | | 1189 | |
1190 | KASSERT(port >= 0); | | 1190 | KASSERT(port >= 0); |
1191 | | | 1191 | |
1192 | snprintf(buf, len, "%s channel %d", pic->pic_name, port); | | 1192 | snprintf(buf, len, "%s chan %d", pic->pic_name, port); |
1193 | | | 1193 | |
1194 | return buf; | | 1194 | return buf; |
1195 | } | | 1195 | } |
1196 | | | 1196 | |
1197 | static const char * | | 1197 | static const char * |
1198 | legacy_intr_string(int ih, char *buf, size_t len, struct pic *pic) | | 1198 | legacy_intr_string(int ih, char *buf, size_t len, struct pic *pic) |
1199 | { | | 1199 | { |
1200 | int legacy_irq; | | 1200 | int legacy_irq; |
1201 | | | 1201 | |
1202 | KASSERT(pic->pic_type == PIC_I8259); | | 1202 | KASSERT(pic->pic_type == PIC_I8259); |
1203 | #if NLAPIC > 0 | | 1203 | #if NLAPIC > 0 |
1204 | KASSERT(APIC_IRQ_ISLEGACY(ih)); | | 1204 | KASSERT(APIC_IRQ_ISLEGACY(ih)); |
1205 | | | 1205 | |
1206 | legacy_irq = APIC_IRQ_LEGACY_IRQ(ih); | | 1206 | legacy_irq = APIC_IRQ_LEGACY_IRQ(ih); |
1207 | #else | | 1207 | #else |
1208 | legacy_irq = ih; | | 1208 | legacy_irq = ih; |
1209 | #endif | | 1209 | #endif |
1210 | KASSERT(legacy_irq >= 0 && legacy_irq < 16); | | 1210 | KASSERT(legacy_irq >= 0 && legacy_irq < 16); |
1211 | | | 1211 | |
1212 | snprintf(buf, len, "%s pin %d", pic->pic_name, legacy_irq); | | 1212 | snprintf(buf, len, "%s pin %d", pic->pic_name, legacy_irq); |
1213 | | | 1213 | |
1214 | return buf; | | 1214 | return buf; |
1215 | } | | 1215 | } |
1216 | | | 1216 | |
1217 | const char * | | 1217 | const char * |
1218 | intr_string(intr_handle_t ih, char *buf, size_t len) | | 1218 | intr_string(intr_handle_t ih, char *buf, size_t len) |
1219 | { | | 1219 | { |
1220 | #if NIOAPIC > 0 | | 1220 | #if NIOAPIC > 0 |
1221 | struct ioapic_softc *pic; | | 1221 | struct ioapic_softc *pic; |
1222 | #endif | | 1222 | #endif |
1223 | | | 1223 | |
1224 | if (ih == 0) | | 1224 | if (ih == 0) |
1225 | panic("%s: bogus handle 0x%" PRIx64, __func__, ih); | | 1225 | panic("%s: bogus handle 0x%" PRIx64, __func__, ih); |
1226 | | | 1226 | |
1227 | #if NIOAPIC > 0 | | 1227 | #if NIOAPIC > 0 |
1228 | if (ih & APIC_INT_VIA_APIC) { | | 1228 | if (ih & APIC_INT_VIA_APIC) { |
1229 | pic = ioapic_find(APIC_IRQ_APIC(ih)); | | 1229 | pic = ioapic_find(APIC_IRQ_APIC(ih)); |
1230 | if (pic != NULL) { | | 1230 | if (pic != NULL) { |
1231 | snprintf(buf, len, "%s pin %d", | | 1231 | snprintf(buf, len, "%s pin %d", |
1232 | device_xname(pic->sc_dev), APIC_IRQ_PIN(ih)); | | 1232 | device_xname(pic->sc_dev), APIC_IRQ_PIN(ih)); |
1233 | } else { | | 1233 | } else { |
1234 | snprintf(buf, len, | | 1234 | snprintf(buf, len, |
1235 | "apic %d int %d (irq %d)", | | 1235 | "apic %d int %d (irq %d)", |
1236 | APIC_IRQ_APIC(ih), | | 1236 | APIC_IRQ_APIC(ih), |
1237 | APIC_IRQ_PIN(ih), | | 1237 | APIC_IRQ_PIN(ih), |
1238 | APIC_IRQ_LEGACY_IRQ(ih)); | | 1238 | APIC_IRQ_LEGACY_IRQ(ih)); |
1239 | } | | 1239 | } |
1240 | } else | | 1240 | } else |
1241 | snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih)); | | 1241 | snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih)); |
1242 | | | 1242 | |
1243 | #elif NLAPIC > 0 | | 1243 | #elif NLAPIC > 0 |
1244 | snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih)); | | 1244 | snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih)); |
1245 | #else | | 1245 | #else |
1246 | snprintf(buf, len, "irq %d", (int) ih); | | 1246 | snprintf(buf, len, "irq %d", (int) ih); |
1247 | #endif | | 1247 | #endif |
1248 | return buf; | | 1248 | return buf; |
1249 | | | 1249 | |
1250 | } | | 1250 | } |
1251 | | | 1251 | |
1252 | /* | | 1252 | /* |
1253 | * Fake interrupt handler structures for the benefit of symmetry with | | 1253 | * Fake interrupt handler structures for the benefit of symmetry with |
1254 | * other interrupt sources, and the benefit of x86_intr_calculatemasks() | | 1254 | * other interrupt sources, and the benefit of x86_intr_calculatemasks() |
1255 | */ | | 1255 | */ |
1256 | struct intrhand fake_timer_intrhand; | | 1256 | struct intrhand fake_timer_intrhand; |
1257 | struct intrhand fake_ipi_intrhand; | | 1257 | struct intrhand fake_ipi_intrhand; |
1258 | #if NHYPERV > 0 | | 1258 | #if NHYPERV > 0 |
1259 | struct intrhand fake_hyperv_intrhand; | | 1259 | struct intrhand fake_hyperv_intrhand; |
1260 | #endif | | 1260 | #endif |
1261 | | | 1261 | |
1262 | #if NLAPIC > 0 && defined(MULTIPROCESSOR) | | 1262 | #if NLAPIC > 0 && defined(MULTIPROCESSOR) |
1263 | static const char *x86_ipi_names[X86_NIPI] = X86_IPI_NAMES; | | 1263 | static const char *x86_ipi_names[X86_NIPI] = X86_IPI_NAMES; |
1264 | #endif | | 1264 | #endif |
1265 | | | 1265 | |
1266 | #if defined(INTRSTACKSIZE) | | 1266 | #if defined(INTRSTACKSIZE) |
1267 | static inline bool | | 1267 | static inline bool |
1268 | redzone_const_or_false(bool x) | | 1268 | redzone_const_or_false(bool x) |
1269 | { | | 1269 | { |
1270 | #ifdef DIAGNOSTIC | | 1270 | #ifdef DIAGNOSTIC |
1271 | return x; | | 1271 | return x; |
1272 | #else | | 1272 | #else |
1273 | return false; | | 1273 | return false; |
1274 | #endif /* !DIAGNOSTIC */ | | 1274 | #endif /* !DIAGNOSTIC */ |
1275 | } | | 1275 | } |
1276 | | | 1276 | |
1277 | static inline int | | 1277 | static inline int |
1278 | redzone_const_or_zero(int x) | | 1278 | redzone_const_or_zero(int x) |
1279 | { | | 1279 | { |
1280 | return redzone_const_or_false(true) ? x : 0; | | 1280 | return redzone_const_or_false(true) ? x : 0; |
1281 | } | | 1281 | } |
1282 | #endif | | 1282 | #endif |
1283 | | | 1283 | |
1284 | /* | | 1284 | /* |
1285 | * Initialize all handlers that aren't dynamically allocated, and exist | | 1285 | * Initialize all handlers that aren't dynamically allocated, and exist |
1286 | * for each CPU. | | 1286 | * for each CPU. |
1287 | */ | | 1287 | */ |
1288 | void | | 1288 | void |
1289 | cpu_intr_init(struct cpu_info *ci) | | 1289 | cpu_intr_init(struct cpu_info *ci) |
1290 | { | | 1290 | { |
1291 | #if (NLAPIC > 0) || defined(MULTIPROCESSOR) || \ | | 1291 | #if (NLAPIC > 0) || defined(MULTIPROCESSOR) || \ |
1292 | (NHYPERV > 0) | | 1292 | (NHYPERV > 0) |
1293 | struct intrsource *isp; | | 1293 | struct intrsource *isp; |
1294 | #endif | | 1294 | #endif |
1295 | #if NLAPIC > 0 | | 1295 | #if NLAPIC > 0 |
1296 | static int first = 1; | | 1296 | static int first = 1; |
1297 | #if defined(MULTIPROCESSOR) | | 1297 | #if defined(MULTIPROCESSOR) |
1298 | int i; | | 1298 | int i; |
1299 | #endif | | 1299 | #endif |
1300 | #endif | | 1300 | #endif |
1301 | | | 1301 | |
1302 | #if NLAPIC > 0 | | 1302 | #if NLAPIC > 0 |
1303 | isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); | | 1303 | isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); |
1304 | isp->is_recurse = Xrecurse_lapic_ltimer; | | 1304 | isp->is_recurse = Xrecurse_lapic_ltimer; |
1305 | isp->is_resume = Xresume_lapic_ltimer; | | 1305 | isp->is_resume = Xresume_lapic_ltimer; |
1306 | fake_timer_intrhand.ih_pic = &local_pic; | | 1306 | fake_timer_intrhand.ih_pic = &local_pic; |
1307 | fake_timer_intrhand.ih_level = IPL_CLOCK; | | 1307 | fake_timer_intrhand.ih_level = IPL_CLOCK; |
1308 | isp->is_handlers = &fake_timer_intrhand; | | 1308 | isp->is_handlers = &fake_timer_intrhand; |
1309 | isp->is_pic = &local_pic; | | 1309 | isp->is_pic = &local_pic; |
1310 | ci->ci_isources[LIR_TIMER] = isp; | | 1310 | ci->ci_isources[LIR_TIMER] = isp; |
1311 | evcnt_attach_dynamic(&isp->is_evcnt, | | 1311 | evcnt_attach_dynamic(&isp->is_evcnt, |
1312 | first ? EVCNT_TYPE_INTR : EVCNT_TYPE_MISC, NULL, | | 1312 | first ? EVCNT_TYPE_INTR : EVCNT_TYPE_MISC, NULL, |
1313 | device_xname(ci->ci_dev), "timer"); | | 1313 | device_xname(ci->ci_dev), "timer"); |
1314 | first = 0; | | 1314 | first = 0; |
1315 | | | 1315 | |
1316 | #ifdef MULTIPROCESSOR | | 1316 | #ifdef MULTIPROCESSOR |
1317 | isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); | | 1317 | isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); |
1318 | isp->is_recurse = Xrecurse_lapic_ipi; | | 1318 | isp->is_recurse = Xrecurse_lapic_ipi; |
1319 | isp->is_resume = Xresume_lapic_ipi; | | 1319 | isp->is_resume = Xresume_lapic_ipi; |
1320 | fake_ipi_intrhand.ih_pic = &local_pic; | | 1320 | fake_ipi_intrhand.ih_pic = &local_pic; |
1321 | fake_ipi_intrhand.ih_level = IPL_HIGH; | | 1321 | fake_ipi_intrhand.ih_level = IPL_HIGH; |
1322 | isp->is_handlers = &fake_ipi_intrhand; | | 1322 | isp->is_handlers = &fake_ipi_intrhand; |
1323 | isp->is_pic = &local_pic; | | 1323 | isp->is_pic = &local_pic; |
1324 | ci->ci_isources[LIR_IPI] = isp; | | 1324 | ci->ci_isources[LIR_IPI] = isp; |
1325 | | | 1325 | |
1326 | for (i = 0; i < X86_NIPI; i++) | | 1326 | for (i = 0; i < X86_NIPI; i++) |
1327 | evcnt_attach_dynamic(&ci->ci_ipi_events[i], EVCNT_TYPE_MISC, | | 1327 | evcnt_attach_dynamic(&ci->ci_ipi_events[i], EVCNT_TYPE_MISC, |
1328 | NULL, device_xname(ci->ci_dev), x86_ipi_names[i]); | | 1328 | NULL, device_xname(ci->ci_dev), x86_ipi_names[i]); |
1329 | #endif | | 1329 | #endif |
1330 | | | 1330 | |
1331 | #if NHYPERV > 0 | | 1331 | #if NHYPERV > 0 |
1332 | if (hyperv_hypercall_enabled()) { | | 1332 | if (hyperv_hypercall_enabled()) { |
1333 | isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); | | 1333 | isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); |
1334 | isp->is_recurse = Xrecurse_hyperv_hypercall; | | 1334 | isp->is_recurse = Xrecurse_hyperv_hypercall; |
1335 | isp->is_resume = Xresume_hyperv_hypercall; | | 1335 | isp->is_resume = Xresume_hyperv_hypercall; |
1336 | fake_hyperv_intrhand.ih_level = IPL_NET; | | 1336 | fake_hyperv_intrhand.ih_level = IPL_NET; |
1337 | isp->is_handlers = &fake_hyperv_intrhand; | | 1337 | isp->is_handlers = &fake_hyperv_intrhand; |
1338 | isp->is_pic = &local_pic; | | 1338 | isp->is_pic = &local_pic; |
1339 | ci->ci_isources[LIR_HV] = isp; | | 1339 | ci->ci_isources[LIR_HV] = isp; |
1340 | evcnt_attach_dynamic(&isp->is_evcnt, EVCNT_TYPE_INTR, NULL, | | 1340 | evcnt_attach_dynamic(&isp->is_evcnt, EVCNT_TYPE_INTR, NULL, |
1341 | device_xname(ci->ci_dev), "Hyper-V hypercall"); | | 1341 | device_xname(ci->ci_dev), "Hyper-V hypercall"); |
1342 | } | | 1342 | } |
1343 | #endif | | 1343 | #endif |
1344 | #endif | | 1344 | #endif |
1345 | | | 1345 | |
1346 | #if defined(__HAVE_PREEMPTION) | | 1346 | #if defined(__HAVE_PREEMPTION) |
1347 | x86_init_preempt(ci); | | 1347 | x86_init_preempt(ci); |
1348 | | | 1348 | |
1349 | #endif | | 1349 | #endif |
1350 | x86_intr_calculatemasks(ci); | | 1350 | x86_intr_calculatemasks(ci); |
1351 | | | 1351 | |
1352 | #if defined(INTRSTACKSIZE) | | 1352 | #if defined(INTRSTACKSIZE) |
1353 | vaddr_t istack; | | 1353 | vaddr_t istack; |
1354 | | | 1354 | |
1355 | /* | | 1355 | /* |
1356 | * If the red zone is activated, protect both the top and | | 1356 | * If the red zone is activated, protect both the top and |
1357 | * the bottom of the stack with an unmapped page. | | 1357 | * the bottom of the stack with an unmapped page. |
1358 | */ | | 1358 | */ |
1359 | istack = uvm_km_alloc(kernel_map, | | 1359 | istack = uvm_km_alloc(kernel_map, |
1360 | INTRSTACKSIZE + redzone_const_or_zero(2 * PAGE_SIZE), 0, | | 1360 | INTRSTACKSIZE + redzone_const_or_zero(2 * PAGE_SIZE), 0, |
1361 | UVM_KMF_WIRED | UVM_KMF_ZERO); | | 1361 | UVM_KMF_WIRED | UVM_KMF_ZERO); |
1362 | if (redzone_const_or_false(true)) { | | 1362 | if (redzone_const_or_false(true)) { |
1363 | pmap_kremove(istack, PAGE_SIZE); | | 1363 | pmap_kremove(istack, PAGE_SIZE); |
1364 | pmap_kremove(istack + INTRSTACKSIZE + PAGE_SIZE, PAGE_SIZE); | | 1364 | pmap_kremove(istack + INTRSTACKSIZE + PAGE_SIZE, PAGE_SIZE); |
1365 | pmap_update(pmap_kernel()); | | 1365 | pmap_update(pmap_kernel()); |
1366 | } | | 1366 | } |
1367 | | | 1367 | |
1368 | /* | | 1368 | /* |
1369 | * 33 used to be 1. Arbitrarily reserve 32 more register_t's | | 1369 | * 33 used to be 1. Arbitrarily reserve 32 more register_t's |
1370 | * of space for ddb(4) to examine some subroutine arguments | | 1370 | * of space for ddb(4) to examine some subroutine arguments |
1371 | * and to hunt for the next stack frame. | | 1371 | * and to hunt for the next stack frame. |
1372 | */ | | 1372 | */ |
1373 | ci->ci_intrstack = (char *)istack + redzone_const_or_zero(PAGE_SIZE) + | | 1373 | ci->ci_intrstack = (char *)istack + redzone_const_or_zero(PAGE_SIZE) + |
1374 | INTRSTACKSIZE - 33 * sizeof(register_t); | | 1374 | INTRSTACKSIZE - 33 * sizeof(register_t); |
1375 | #endif | | 1375 | #endif |
1376 | | | 1376 | |
1377 | ci->ci_idepth = -1; | | 1377 | ci->ci_idepth = -1; |
1378 | } | | 1378 | } |
1379 | | | 1379 | |
1380 | #if defined(INTRDEBUG) || defined(DDB) | | 1380 | #if defined(INTRDEBUG) || defined(DDB) |
1381 | | | 1381 | |
1382 | void | | 1382 | void |
1383 | intr_printconfig(void) | | 1383 | intr_printconfig(void) |
1384 | { | | 1384 | { |
1385 | int i; | | 1385 | int i; |
1386 | struct intrhand *ih; | | 1386 | struct intrhand *ih; |
1387 | struct intrsource *isp; | | 1387 | struct intrsource *isp; |
1388 | struct cpu_info *ci; | | 1388 | struct cpu_info *ci; |
1389 | CPU_INFO_ITERATOR cii; | | 1389 | CPU_INFO_ITERATOR cii; |
1390 | void (*pr)(const char *, ...); | | 1390 | void (*pr)(const char *, ...); |
1391 | | | 1391 | |
1392 | pr = printf; | | 1392 | pr = printf; |
1393 | #ifdef DDB | | 1393 | #ifdef DDB |
1394 | extern int db_active; | | 1394 | extern int db_active; |
1395 | if (db_active) { | | 1395 | if (db_active) { |
1396 | pr = db_printf; | | 1396 | pr = db_printf; |
1397 | } | | 1397 | } |
1398 | #endif | | 1398 | #endif |
1399 | | | 1399 | |
1400 | for (CPU_INFO_FOREACH(cii, ci)) { | | 1400 | for (CPU_INFO_FOREACH(cii, ci)) { |
1401 | (*pr)("%s: interrupt masks:\n", device_xname(ci->ci_dev)); | | 1401 | (*pr)("%s: interrupt masks:\n", device_xname(ci->ci_dev)); |
1402 | for (i = 0; i < NIPL; i++) | | 1402 | for (i = 0; i < NIPL; i++) |
1403 | (*pr)("IPL %d mask %08lx unmask %08lx\n", i, | | 1403 | (*pr)("IPL %d mask %08lx unmask %08lx\n", i, |
1404 | (u_long)ci->ci_imask[i], (u_long)ci->ci_iunmask[i]); | | 1404 | (u_long)ci->ci_imask[i], (u_long)ci->ci_iunmask[i]); |
1405 | for (i = 0; i < MAX_INTR_SOURCES; i++) { | | 1405 | for (i = 0; i < MAX_INTR_SOURCES; i++) { |
1406 | isp = ci->ci_isources[i]; | | 1406 | isp = ci->ci_isources[i]; |
1407 | if (isp == NULL) | | 1407 | if (isp == NULL) |
1408 | continue; | | 1408 | continue; |
1409 | (*pr)("%s source %d is pin %d from pic %s type %d " | | 1409 | (*pr)("%s source %d is pin %d from pic %s type %d " |
1410 | "maxlevel %d\n", device_xname(ci->ci_dev), i, | | 1410 | "maxlevel %d\n", device_xname(ci->ci_dev), i, |
1411 | isp->is_pin, isp->is_pic->pic_name, isp->is_type, | | 1411 | isp->is_pin, isp->is_pic->pic_name, isp->is_type, |
1412 | isp->is_maxlevel); | | 1412 | isp->is_maxlevel); |
1413 | for (ih = isp->is_handlers; ih != NULL; | | 1413 | for (ih = isp->is_handlers; ih != NULL; |
1414 | ih = ih->ih_next) | | 1414 | ih = ih->ih_next) |
1415 | (*pr)("\thandler %p level %d\n", | | 1415 | (*pr)("\thandler %p level %d\n", |
1416 | ih->ih_fun, ih->ih_level); | | 1416 | ih->ih_fun, ih->ih_level); |
1417 | #if NIOAPIC > 0 | | 1417 | #if NIOAPIC > 0 |
1418 | if (isp->is_pic->pic_type == PIC_IOAPIC) { | | 1418 | if (isp->is_pic->pic_type == PIC_IOAPIC) { |
1419 | struct ioapic_softc *sc; | | 1419 | struct ioapic_softc *sc; |
1420 | sc = isp->is_pic->pic_ioapic; | | 1420 | sc = isp->is_pic->pic_ioapic; |
1421 | (*pr)("\tioapic redir 0x%x\n", | | 1421 | (*pr)("\tioapic redir 0x%x\n", |
1422 | sc->sc_pins[isp->is_pin].ip_map->redir); | | 1422 | sc->sc_pins[isp->is_pin].ip_map->redir); |
1423 | } | | 1423 | } |
1424 | #endif | | 1424 | #endif |
1425 | | | 1425 | |
1426 | } | | 1426 | } |
1427 | } | | 1427 | } |
1428 | } | | 1428 | } |
1429 | | | 1429 | |
1430 | #endif | | 1430 | #endif |
1431 | | | 1431 | |
1432 | /* | | 1432 | /* |
1433 | * Save current affinitied cpu's interrupt count. | | 1433 | * Save current affinitied cpu's interrupt count. |
1434 | */ | | 1434 | */ |
1435 | static void | | 1435 | static void |
1436 | intr_save_evcnt(struct intrsource *source, cpuid_t cpuid) | | 1436 | intr_save_evcnt(struct intrsource *source, cpuid_t cpuid) |
1437 | { | | 1437 | { |
1438 | struct percpu_evcnt *pep; | | 1438 | struct percpu_evcnt *pep; |
1439 | uint64_t curcnt; | | 1439 | uint64_t curcnt; |
1440 | int i; | | 1440 | int i; |
1441 | | | 1441 | |
1442 | curcnt = source->is_evcnt.ev_count; | | 1442 | curcnt = source->is_evcnt.ev_count; |
1443 | pep = source->is_saved_evcnt; | | 1443 | pep = source->is_saved_evcnt; |
1444 | | | 1444 | |
1445 | for (i = 0; i < ncpu; i++) { | | 1445 | for (i = 0; i < ncpu; i++) { |
1446 | if (pep[i].cpuid == cpuid) { | | 1446 | if (pep[i].cpuid == cpuid) { |
1447 | pep[i].count = curcnt; | | 1447 | pep[i].count = curcnt; |
1448 | break; | | 1448 | break; |
1449 | } | | 1449 | } |
1450 | } | | 1450 | } |
1451 | } | | 1451 | } |
1452 | | | 1452 | |
1453 | /* | | 1453 | /* |
1454 | * Restore current affinitied cpu's interrupt count. | | 1454 | * Restore current affinitied cpu's interrupt count. |
1455 | */ | | 1455 | */ |
1456 | static void | | 1456 | static void |
1457 | intr_restore_evcnt(struct intrsource *source, cpuid_t cpuid) | | 1457 | intr_restore_evcnt(struct intrsource *source, cpuid_t cpuid) |
1458 | { | | 1458 | { |
1459 | struct percpu_evcnt *pep; | | 1459 | struct percpu_evcnt *pep; |
1460 | int i; | | 1460 | int i; |
1461 | | | 1461 | |
1462 | pep = source->is_saved_evcnt; | | 1462 | pep = source->is_saved_evcnt; |
1463 | | | 1463 | |
1464 | for (i = 0; i < ncpu; i++) { | | 1464 | for (i = 0; i < ncpu; i++) { |
1465 | if (pep[i].cpuid == cpuid) { | | 1465 | if (pep[i].cpuid == cpuid) { |
1466 | source->is_evcnt.ev_count = pep[i].count; | | 1466 | source->is_evcnt.ev_count = pep[i].count; |
1467 | break; | | 1467 | break; |
1468 | } | | 1468 | } |
1469 | } | | 1469 | } |
1470 | } | | 1470 | } |
1471 | | | 1471 | |
1472 | static void | | 1472 | static void |
1473 | intr_redistribute_xc_t(void *arg1, void *arg2) | | 1473 | intr_redistribute_xc_t(void *arg1, void *arg2) |
1474 | { | | 1474 | { |
1475 | struct cpu_info *ci; | | 1475 | struct cpu_info *ci; |
1476 | struct intrsource *isp; | | 1476 | struct intrsource *isp; |
1477 | int slot; | | 1477 | int slot; |
1478 | u_long psl; | | 1478 | u_long psl; |
1479 | | | 1479 | |
1480 | ci = curcpu(); | | 1480 | ci = curcpu(); |
1481 | isp = arg1; | | 1481 | isp = arg1; |
1482 | slot = (int)(intptr_t)arg2; | | 1482 | slot = (int)(intptr_t)arg2; |
1483 | | | 1483 | |
1484 | /* Disable interrupts locally. */ | | 1484 | /* Disable interrupts locally. */ |
1485 | psl = x86_read_psl(); | | 1485 | psl = x86_read_psl(); |
1486 | x86_disable_intr(); | | 1486 | x86_disable_intr(); |
1487 | | | 1487 | |
1488 | /* Hook it in and re-calculate masks. */ | | 1488 | /* Hook it in and re-calculate masks. */ |
1489 | ci->ci_isources[slot] = isp; | | 1489 | ci->ci_isources[slot] = isp; |
1490 | x86_intr_calculatemasks(curcpu()); | | 1490 | x86_intr_calculatemasks(curcpu()); |
1491 | | | 1491 | |
1492 | /* Re-enable interrupts locally. */ | | 1492 | /* Re-enable interrupts locally. */ |
1493 | x86_write_psl(psl); | | 1493 | x86_write_psl(psl); |
1494 | } | | 1494 | } |
1495 | | | 1495 | |
1496 | static void | | 1496 | static void |
1497 | intr_redistribute_xc_s1(void *arg1, void *arg2) | | 1497 | intr_redistribute_xc_s1(void *arg1, void *arg2) |
1498 | { | | 1498 | { |
1499 | struct pic *pic; | | 1499 | struct pic *pic; |
1500 | struct intrsource *isp; | | 1500 | struct intrsource *isp; |
1501 | struct cpu_info *nci; | | 1501 | struct cpu_info *nci; |
1502 | u_long psl; | | 1502 | u_long psl; |
1503 | | | 1503 | |
1504 | isp = arg1; | | 1504 | isp = arg1; |
1505 | nci = arg2; | | 1505 | nci = arg2; |
1506 | | | 1506 | |
1507 | /* | | 1507 | /* |
1508 | * Disable interrupts on-chip and mask the pin. Back out | | 1508 | * Disable interrupts on-chip and mask the pin. Back out |
1509 | * and let the interrupt be processed if one is pending. | | 1509 | * and let the interrupt be processed if one is pending. |
1510 | */ | | 1510 | */ |
1511 | pic = isp->is_pic; | | 1511 | pic = isp->is_pic; |
1512 | for (;;) { | | 1512 | for (;;) { |
1513 | psl = x86_read_psl(); | | 1513 | psl = x86_read_psl(); |
1514 | x86_disable_intr(); | | 1514 | x86_disable_intr(); |
1515 | if ((*pic->pic_trymask)(pic, isp->is_pin)) { | | 1515 | if ((*pic->pic_trymask)(pic, isp->is_pin)) { |
1516 | break; | | 1516 | break; |
1517 | } | | 1517 | } |
1518 | x86_write_psl(psl); | | 1518 | x86_write_psl(psl); |
1519 | DELAY(1000); | | 1519 | DELAY(1000); |
1520 | } | | 1520 | } |
1521 | | | 1521 | |
1522 | /* pic_addroute will unmask the interrupt. */ | | 1522 | /* pic_addroute will unmask the interrupt. */ |
1523 | (*pic->pic_addroute)(pic, nci, isp->is_pin, isp->is_idtvec, | | 1523 | (*pic->pic_addroute)(pic, nci, isp->is_pin, isp->is_idtvec, |
1524 | isp->is_type); | | 1524 | isp->is_type); |
1525 | x86_write_psl(psl); | | 1525 | x86_write_psl(psl); |
1526 | } | | 1526 | } |
1527 | | | 1527 | |
1528 | static void | | 1528 | static void |
1529 | intr_redistribute_xc_s2(void *arg1, void *arg2) | | 1529 | intr_redistribute_xc_s2(void *arg1, void *arg2) |
1530 | { | | 1530 | { |
1531 | struct cpu_info *ci; | | 1531 | struct cpu_info *ci; |
1532 | u_long psl; | | 1532 | u_long psl; |
1533 | int slot; | | 1533 | int slot; |
1534 | | | 1534 | |
1535 | ci = curcpu(); | | 1535 | ci = curcpu(); |
1536 | slot = (int)(uintptr_t)arg1; | | 1536 | slot = (int)(uintptr_t)arg1; |
1537 | | | 1537 | |
1538 | /* Disable interrupts locally. */ | | 1538 | /* Disable interrupts locally. */ |
1539 | psl = x86_read_psl(); | | 1539 | psl = x86_read_psl(); |
1540 | x86_disable_intr(); | | 1540 | x86_disable_intr(); |
1541 | | | 1541 | |
1542 | /* Patch out the source and re-calculate masks. */ | | 1542 | /* Patch out the source and re-calculate masks. */ |
1543 | ci->ci_isources[slot] = NULL; | | 1543 | ci->ci_isources[slot] = NULL; |
1544 | x86_intr_calculatemasks(ci); | | 1544 | x86_intr_calculatemasks(ci); |
1545 | | | 1545 | |
1546 | /* Re-enable interrupts locally. */ | | 1546 | /* Re-enable interrupts locally. */ |
1547 | x86_write_psl(psl); | | 1547 | x86_write_psl(psl); |
1548 | } | | 1548 | } |
1549 | | | 1549 | |
1550 | static bool | | 1550 | static bool |
1551 | intr_redistribute(struct cpu_info *oci) | | 1551 | intr_redistribute(struct cpu_info *oci) |
1552 | { | | 1552 | { |
1553 | struct intrsource *isp; | | 1553 | struct intrsource *isp; |
1554 | struct intrhand *ih; | | 1554 | struct intrhand *ih; |
1555 | CPU_INFO_ITERATOR cii; | | 1555 | CPU_INFO_ITERATOR cii; |
1556 | struct cpu_info *nci, *ici; | | 1556 | struct cpu_info *nci, *ici; |
1557 | int oslot, nslot; | | 1557 | int oslot, nslot; |
1558 | uint64_t where; | | 1558 | uint64_t where; |
1559 | | | 1559 | |
1560 | KASSERT(mutex_owned(&cpu_lock)); | | 1560 | KASSERT(mutex_owned(&cpu_lock)); |
1561 | | | 1561 | |
1562 | /* Look for an interrupt source that we can migrate. */ | | 1562 | /* Look for an interrupt source that we can migrate. */ |
1563 | for (oslot = 0; oslot < MAX_INTR_SOURCES; oslot++) { | | 1563 | for (oslot = 0; oslot < MAX_INTR_SOURCES; oslot++) { |
1564 | if ((isp = oci->ci_isources[oslot]) == NULL) { | | 1564 | if ((isp = oci->ci_isources[oslot]) == NULL) { |
1565 | continue; | | 1565 | continue; |
1566 | } | | 1566 | } |
1567 | if (isp->is_pic->pic_type == PIC_IOAPIC) { | | 1567 | if (isp->is_pic->pic_type == PIC_IOAPIC) { |
1568 | break; | | 1568 | break; |
1569 | } | | 1569 | } |
1570 | } | | 1570 | } |
1571 | if (oslot == MAX_INTR_SOURCES) { | | 1571 | if (oslot == MAX_INTR_SOURCES) { |
1572 | return false; | | 1572 | return false; |
1573 | } | | 1573 | } |
1574 | | | 1574 | |
1575 | /* Find least loaded CPU and try to move there. */ | | 1575 | /* Find least loaded CPU and try to move there. */ |
1576 | nci = NULL; | | 1576 | nci = NULL; |
1577 | for (CPU_INFO_FOREACH(cii, ici)) { | | 1577 | for (CPU_INFO_FOREACH(cii, ici)) { |
1578 | if ((ici->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) { | | 1578 | if ((ici->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) { |
1579 | continue; | | 1579 | continue; |
1580 | } | | 1580 | } |
1581 | KASSERT(ici != oci); | | 1581 | KASSERT(ici != oci); |
1582 | if (nci == NULL || nci->ci_nintrhand > ici->ci_nintrhand) { | | 1582 | if (nci == NULL || nci->ci_nintrhand > ici->ci_nintrhand) { |
1583 | nci = ici; | | 1583 | nci = ici; |
1584 | } | | 1584 | } |
1585 | } | | 1585 | } |
1586 | if (nci == NULL) { | | 1586 | if (nci == NULL) { |
1587 | return false; | | 1587 | return false; |
1588 | } | | 1588 | } |
1589 | for (nslot = 0; nslot < MAX_INTR_SOURCES; nslot++) { | | 1589 | for (nslot = 0; nslot < MAX_INTR_SOURCES; nslot++) { |
1590 | if (nci->ci_isources[nslot] == NULL) { | | 1590 | if (nci->ci_isources[nslot] == NULL) { |
1591 | break; | | 1591 | break; |
1592 | } | | 1592 | } |
1593 | } | | 1593 | } |
1594 | | | 1594 | |
1595 | /* If that did not work, allocate anywhere. */ | | 1595 | /* If that did not work, allocate anywhere. */ |
1596 | if (nslot == MAX_INTR_SOURCES) { | | 1596 | if (nslot == MAX_INTR_SOURCES) { |
1597 | for (CPU_INFO_FOREACH(cii, nci)) { | | 1597 | for (CPU_INFO_FOREACH(cii, nci)) { |
1598 | if ((nci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) { | | 1598 | if ((nci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) { |
1599 | continue; | | 1599 | continue; |
1600 | } | | 1600 | } |
1601 | KASSERT(nci != oci); | | 1601 | KASSERT(nci != oci); |
1602 | for (nslot = 0; nslot < MAX_INTR_SOURCES; nslot++) { | | 1602 | for (nslot = 0; nslot < MAX_INTR_SOURCES; nslot++) { |
1603 | if (nci->ci_isources[nslot] == NULL) { | | 1603 | if (nci->ci_isources[nslot] == NULL) { |
1604 | break; | | 1604 | break; |
1605 | } | | 1605 | } |
1606 | } | | 1606 | } |
1607 | if (nslot != MAX_INTR_SOURCES) { | | 1607 | if (nslot != MAX_INTR_SOURCES) { |
1608 | break; | | 1608 | break; |
1609 | } | | 1609 | } |
1610 | } | | 1610 | } |
1611 | } | | 1611 | } |
1612 | if (nslot == MAX_INTR_SOURCES) { | | 1612 | if (nslot == MAX_INTR_SOURCES) { |
1613 | return false; | | 1613 | return false; |
1614 | } | | 1614 | } |
1615 | | | 1615 | |
1616 | /* | | 1616 | /* |
1617 | * Now we have new CPU and new slot. Run a cross-call to set up | | 1617 | * Now we have new CPU and new slot. Run a cross-call to set up |
1618 | * the new vector on the target CPU. | | 1618 | * the new vector on the target CPU. |
1619 | */ | | 1619 | */ |
1620 | where = xc_unicast(0, intr_redistribute_xc_t, isp, | | 1620 | where = xc_unicast(0, intr_redistribute_xc_t, isp, |
1621 | (void *)(intptr_t)nslot, nci); | | 1621 | (void *)(intptr_t)nslot, nci); |
1622 | xc_wait(where); | | 1622 | xc_wait(where); |
1623 | | | 1623 | |
1624 | /* | | 1624 | /* |
1625 | * We're ready to go on the target CPU. Run a cross call to | | 1625 | * We're ready to go on the target CPU. Run a cross call to |
1626 | * reroute the interrupt away from the source CPU. | | 1626 | * reroute the interrupt away from the source CPU. |
1627 | */ | | 1627 | */ |
1628 | where = xc_unicast(0, intr_redistribute_xc_s1, isp, nci, oci); | | 1628 | where = xc_unicast(0, intr_redistribute_xc_s1, isp, nci, oci); |
1629 | xc_wait(where); | | 1629 | xc_wait(where); |
1630 | | | 1630 | |
1631 | /* Sleep for (at least) 10ms to allow the change to take hold. */ | | 1631 | /* Sleep for (at least) 10ms to allow the change to take hold. */ |
1632 | (void)kpause("intrdist", false, mstohz(10), NULL); | | 1632 | (void)kpause("intrdist", false, mstohz(10), NULL); |
1633 | | | 1633 | |
1634 | /* Complete removal from the source CPU. */ | | 1634 | /* Complete removal from the source CPU. */ |
1635 | where = xc_unicast(0, intr_redistribute_xc_s2, | | 1635 | where = xc_unicast(0, intr_redistribute_xc_s2, |
1636 | (void *)(uintptr_t)oslot, NULL, oci); | | 1636 | (void *)(uintptr_t)oslot, NULL, oci); |
1637 | xc_wait(where); | | 1637 | xc_wait(where); |
1638 | | | 1638 | |
1639 | /* Finally, take care of book-keeping. */ | | 1639 | /* Finally, take care of book-keeping. */ |
1640 | for (ih = isp->is_handlers; ih != NULL; ih = ih->ih_next) { | | 1640 | for (ih = isp->is_handlers; ih != NULL; ih = ih->ih_next) { |
1641 | oci->ci_nintrhand--; | | 1641 | oci->ci_nintrhand--; |
1642 | nci->ci_nintrhand++; | | 1642 | nci->ci_nintrhand++; |
1643 | ih->ih_cpu = nci; | | 1643 | ih->ih_cpu = nci; |
1644 | } | | 1644 | } |
1645 | intr_save_evcnt(isp, oci->ci_cpuid); | | 1645 | intr_save_evcnt(isp, oci->ci_cpuid); |
1646 | intr_restore_evcnt(isp, nci->ci_cpuid); | | 1646 | intr_restore_evcnt(isp, nci->ci_cpuid); |
1647 | isp->is_active_cpu = nci->ci_cpuid; | | 1647 | isp->is_active_cpu = nci->ci_cpuid; |
1648 | | | 1648 | |
1649 | return true; | | 1649 | return true; |
1650 | } | | 1650 | } |
1651 | | | 1651 | |
1652 | void | | 1652 | void |
1653 | cpu_intr_redistribute(void) | | 1653 | cpu_intr_redistribute(void) |
1654 | { | | 1654 | { |
1655 | CPU_INFO_ITERATOR cii; | | 1655 | CPU_INFO_ITERATOR cii; |
1656 | struct cpu_info *ci; | | 1656 | struct cpu_info *ci; |
1657 | | | 1657 | |
1658 | KASSERT(mutex_owned(&cpu_lock)); | | 1658 | KASSERT(mutex_owned(&cpu_lock)); |
1659 | KASSERT(mp_online); | | 1659 | KASSERT(mp_online); |
1660 | | | 1660 | |
1661 | /* Direct interrupts away from shielded CPUs. */ | | 1661 | /* Direct interrupts away from shielded CPUs. */ |
1662 | for (CPU_INFO_FOREACH(cii, ci)) { | | 1662 | for (CPU_INFO_FOREACH(cii, ci)) { |
1663 | if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0) { | | 1663 | if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0) { |
1664 | continue; | | 1664 | continue; |
1665 | } | | 1665 | } |
1666 | while (intr_redistribute(ci)) { | | 1666 | while (intr_redistribute(ci)) { |
1667 | /* nothing */ | | 1667 | /* nothing */ |
1668 | } | | 1668 | } |
1669 | } | | 1669 | } |
1670 | | | 1670 | |
1671 | /* XXX should now re-balance */ | | 1671 | /* XXX should now re-balance */ |
1672 | } | | 1672 | } |
1673 | | | 1673 | |
1674 | u_int | | 1674 | u_int |
1675 | cpu_intr_count(struct cpu_info *ci) | | 1675 | cpu_intr_count(struct cpu_info *ci) |
1676 | { | | 1676 | { |
1677 | | | 1677 | |
1678 | KASSERT(ci->ci_nintrhand >= 0); | | 1678 | KASSERT(ci->ci_nintrhand >= 0); |
1679 | | | 1679 | |
1680 | return ci->ci_nintrhand; | | 1680 | return ci->ci_nintrhand; |
1681 | } | | 1681 | } |
1682 | | | 1682 | |
1683 | static int | | 1683 | static int |
1684 | intr_find_unused_slot(struct cpu_info *ci, int *index) | | 1684 | intr_find_unused_slot(struct cpu_info *ci, int *index) |
1685 | { | | 1685 | { |
1686 | int slot, i; | | 1686 | int slot, i; |
1687 | | | 1687 | |
1688 | KASSERT(mutex_owned(&cpu_lock)); | | 1688 | KASSERT(mutex_owned(&cpu_lock)); |
1689 | | | 1689 | |
1690 | slot = -1; | | 1690 | slot = -1; |
1691 | for (i = 0; i < MAX_INTR_SOURCES ; i++) { | | 1691 | for (i = 0; i < MAX_INTR_SOURCES ; i++) { |
1692 | if (ci->ci_isources[i] == NULL) { | | 1692 | if (ci->ci_isources[i] == NULL) { |
1693 | slot = i; | | 1693 | slot = i; |
1694 | break; | | 1694 | break; |
1695 | } | | 1695 | } |
1696 | } | | 1696 | } |
1697 | if (slot == -1) { | | 1697 | if (slot == -1) { |
1698 | DPRINTF(("cannot allocate ci_isources\n")); | | 1698 | DPRINTF(("cannot allocate ci_isources\n")); |
1699 | return EBUSY; | | 1699 | return EBUSY; |
1700 | } | | 1700 | } |
1701 | | | 1701 | |
1702 | *index = slot; | | 1702 | *index = slot; |
1703 | return 0; | | 1703 | return 0; |
1704 | } | | 1704 | } |
1705 | | | 1705 | |
1706 | /* | | 1706 | /* |
1707 | * Let cpu_info ready to accept the interrupt. | | 1707 | * Let cpu_info ready to accept the interrupt. |
1708 | */ | | 1708 | */ |
1709 | static void | | 1709 | static void |
1710 | intr_activate_xcall(void *arg1, void *arg2) | | 1710 | intr_activate_xcall(void *arg1, void *arg2) |
1711 | { | | 1711 | { |
1712 | struct cpu_info *ci; | | 1712 | struct cpu_info *ci; |
1713 | struct intrsource *source; | | 1713 | struct intrsource *source; |
1714 | struct intrstub *stubp; | | 1714 | struct intrstub *stubp; |
1715 | struct intrhand *ih; | | 1715 | struct intrhand *ih; |
1716 | u_long psl; | | 1716 | u_long psl; |
1717 | int idt_vec; | | 1717 | int idt_vec; |
1718 | int slot; | | 1718 | int slot; |
1719 | | | 1719 | |
1720 | ih = arg1; | | 1720 | ih = arg1; |
1721 | | | 1721 | |
1722 | kpreempt_disable(); | | 1722 | kpreempt_disable(); |
1723 | | | 1723 | |
1724 | KASSERT(ih->ih_cpu == curcpu() || !mp_online); | | 1724 | KASSERT(ih->ih_cpu == curcpu() || !mp_online); |
1725 | | | 1725 | |
1726 | ci = ih->ih_cpu; | | 1726 | ci = ih->ih_cpu; |
1727 | slot = ih->ih_slot; | | 1727 | slot = ih->ih_slot; |
1728 | source = ci->ci_isources[slot]; | | 1728 | source = ci->ci_isources[slot]; |
1729 | idt_vec = source->is_idtvec; | | 1729 | idt_vec = source->is_idtvec; |
1730 | | | 1730 | |
1731 | psl = x86_read_psl(); | | 1731 | psl = x86_read_psl(); |
1732 | x86_disable_intr(); | | 1732 | x86_disable_intr(); |
1733 | | | 1733 | |
1734 | x86_intr_calculatemasks(ci); | | 1734 | x86_intr_calculatemasks(ci); |
1735 | | | 1735 | |
1736 | if (source->is_type == IST_LEVEL) { | | 1736 | if (source->is_type == IST_LEVEL) { |
1737 | stubp = &source->is_pic->pic_level_stubs[slot]; | | 1737 | stubp = &source->is_pic->pic_level_stubs[slot]; |
1738 | } else { | | 1738 | } else { |
1739 | stubp = &source->is_pic->pic_edge_stubs[slot]; | | 1739 | stubp = &source->is_pic->pic_edge_stubs[slot]; |
1740 | } | | 1740 | } |
1741 | source->is_resume = stubp->ist_resume; | | 1741 | source->is_resume = stubp->ist_resume; |
1742 | source->is_recurse = stubp->ist_recurse; | | 1742 | source->is_recurse = stubp->ist_recurse; |
1743 | idt_vec_set(idt_vec, stubp->ist_entry); | | 1743 | idt_vec_set(idt_vec, stubp->ist_entry); |
1744 | | | 1744 | |
1745 | x86_write_psl(psl); | | 1745 | x86_write_psl(psl); |
1746 | | | 1746 | |
1747 | kpreempt_enable(); | | 1747 | kpreempt_enable(); |
1748 | } | | 1748 | } |
1749 | | | 1749 | |
1750 | /* | | 1750 | /* |
1751 | * Let cpu_info not accept the interrupt. | | 1751 | * Let cpu_info not accept the interrupt. |
1752 | */ | | 1752 | */ |
1753 | static void | | 1753 | static void |
1754 | intr_deactivate_xcall(void *arg1, void *arg2) | | 1754 | intr_deactivate_xcall(void *arg1, void *arg2) |
1755 | { | | 1755 | { |
1756 | struct cpu_info *ci; | | 1756 | struct cpu_info *ci; |
1757 | struct intrhand *ih, *lih; | | 1757 | struct intrhand *ih, *lih; |
1758 | u_long psl; | | 1758 | u_long psl; |
1759 | int slot; | | 1759 | int slot; |
1760 | | | 1760 | |
1761 | ih = arg1; | | 1761 | ih = arg1; |
1762 | | | 1762 | |
1763 | kpreempt_disable(); | | 1763 | kpreempt_disable(); |
1764 | | | 1764 | |
1765 | KASSERT(ih->ih_cpu == curcpu() || !mp_online); | | 1765 | KASSERT(ih->ih_cpu == curcpu() || !mp_online); |
1766 | | | 1766 | |
1767 | ci = ih->ih_cpu; | | 1767 | ci = ih->ih_cpu; |
1768 | slot = ih->ih_slot; | | 1768 | slot = ih->ih_slot; |
1769 | | | 1769 | |
1770 | psl = x86_read_psl(); | | 1770 | psl = x86_read_psl(); |
1771 | x86_disable_intr(); | | 1771 | x86_disable_intr(); |
1772 | | | 1772 | |
1773 | /* Move all devices sharing IRQ number. */ | | 1773 | /* Move all devices sharing IRQ number. */ |
1774 | ci->ci_isources[slot] = NULL; | | 1774 | ci->ci_isources[slot] = NULL; |
1775 | for (lih = ih; lih != NULL; lih = lih->ih_next) { | | 1775 | for (lih = ih; lih != NULL; lih = lih->ih_next) { |
1776 | ci->ci_nintrhand--; | | 1776 | ci->ci_nintrhand--; |
1777 | } | | 1777 | } |
1778 | | | 1778 | |
1779 | x86_intr_calculatemasks(ci); | | 1779 | x86_intr_calculatemasks(ci); |
1780 | | | 1780 | |
1781 | /* | | 1781 | /* |
1782 | * Skip unsetgate(), because the same itd[] entry is overwritten in | | 1782 | * Skip unsetgate(), because the same itd[] entry is overwritten in |
1783 | * intr_activate_xcall(). | | 1783 | * intr_activate_xcall(). |
1784 | */ | | 1784 | */ |
1785 | | | 1785 | |
1786 | x86_write_psl(psl); | | 1786 | x86_write_psl(psl); |
1787 | | | 1787 | |
1788 | kpreempt_enable(); | | 1788 | kpreempt_enable(); |
1789 | } | | 1789 | } |
1790 | | | 1790 | |
1791 | static void | | 1791 | static void |
1792 | intr_get_affinity(struct intrsource *isp, kcpuset_t *cpuset) | | 1792 | intr_get_affinity(struct intrsource *isp, kcpuset_t *cpuset) |
1793 | { | | 1793 | { |
1794 | struct cpu_info *ci; | | 1794 | struct cpu_info *ci; |
1795 | | | 1795 | |
1796 | KASSERT(mutex_owned(&cpu_lock)); | | 1796 | KASSERT(mutex_owned(&cpu_lock)); |
1797 | | | 1797 | |
1798 | if (isp == NULL) { | | 1798 | if (isp == NULL) { |
1799 | kcpuset_zero(cpuset); | | 1799 | kcpuset_zero(cpuset); |
1800 | return; | | 1800 | return; |
1801 | } | | 1801 | } |
1802 | | | 1802 | |
1803 | KASSERTMSG(isp->is_handlers != NULL, | | 1803 | KASSERTMSG(isp->is_handlers != NULL, |
1804 | "Don't get affinity for the device which is not established."); | | 1804 | "Don't get affinity for the device which is not established."); |
1805 | | | 1805 | |
1806 | ci = isp->is_handlers->ih_cpu; | | 1806 | ci = isp->is_handlers->ih_cpu; |
1807 | if (ci == NULL) { | | 1807 | if (ci == NULL) { |
1808 | kcpuset_zero(cpuset); | | 1808 | kcpuset_zero(cpuset); |
1809 | return; | | 1809 | return; |
1810 | } | | 1810 | } |
1811 | | | 1811 | |
1812 | kcpuset_set(cpuset, cpu_index(ci)); | | 1812 | kcpuset_set(cpuset, cpu_index(ci)); |
1813 | return; | | 1813 | return; |
1814 | } | | 1814 | } |
1815 | | | 1815 | |
1816 | static int | | 1816 | static int |
1817 | intr_set_affinity(struct intrsource *isp, const kcpuset_t *cpuset) | | 1817 | intr_set_affinity(struct intrsource *isp, const kcpuset_t *cpuset) |
1818 | { | | 1818 | { |
1819 | struct cpu_info *oldci, *newci; | | 1819 | struct cpu_info *oldci, *newci; |
1820 | struct intrhand *ih, *lih; | | 1820 | struct intrhand *ih, *lih; |
1821 | struct pic *pic; | | 1821 | struct pic *pic; |
1822 | u_int cpu_idx; | | 1822 | u_int cpu_idx; |
1823 | int idt_vec; | | 1823 | int idt_vec; |
1824 | int oldslot, newslot; | | 1824 | int oldslot, newslot; |
1825 | int err; | | 1825 | int err; |
1826 | int pin; | | 1826 | int pin; |
1827 | | | 1827 | |
1828 | KASSERT(mutex_owned(&intr_distribute_lock)); | | 1828 | KASSERT(mutex_owned(&intr_distribute_lock)); |
1829 | KASSERT(mutex_owned(&cpu_lock)); | | 1829 | KASSERT(mutex_owned(&cpu_lock)); |
1830 | | | 1830 | |
1831 | /* XXX | | 1831 | /* XXX |
1832 | * logical destination mode is not supported, use lowest index cpu. | | 1832 | * logical destination mode is not supported, use lowest index cpu. |
1833 | */ | | 1833 | */ |
1834 | cpu_idx = kcpuset_ffs(cpuset) - 1; | | 1834 | cpu_idx = kcpuset_ffs(cpuset) - 1; |
1835 | newci = cpu_lookup(cpu_idx); | | 1835 | newci = cpu_lookup(cpu_idx); |
1836 | if (newci == NULL) { | | 1836 | if (newci == NULL) { |
1837 | DPRINTF(("invalid cpu index: %u\n", cpu_idx)); | | 1837 | DPRINTF(("invalid cpu index: %u\n", cpu_idx)); |
1838 | return EINVAL; | | 1838 | return EINVAL; |
1839 | } | | 1839 | } |
1840 | if ((newci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) { | | 1840 | if ((newci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) { |
1841 | DPRINTF(("the cpu is set nointr shield. index:%u\n", cpu_idx)); | | 1841 | DPRINTF(("the cpu is set nointr shield. index:%u\n", cpu_idx)); |
1842 | return EINVAL; | | 1842 | return EINVAL; |
1843 | } | | 1843 | } |
1844 | | | 1844 | |
1845 | if (isp == NULL) { | | 1845 | if (isp == NULL) { |
1846 | DPRINTF(("invalid intrctl handler\n")); | | 1846 | DPRINTF(("invalid intrctl handler\n")); |
1847 | return EINVAL; | | 1847 | return EINVAL; |
1848 | } | | 1848 | } |
1849 | | | 1849 | |
1850 | /* i8259_pic supports only primary cpu, see i8259.c. */ | | 1850 | /* i8259_pic supports only primary cpu, see i8259.c. */ |
1851 | pic = isp->is_pic; | | 1851 | pic = isp->is_pic; |
1852 | if (pic == &i8259_pic) { | | 1852 | if (pic == &i8259_pic) { |
1853 | DPRINTF(("i8259 pic does not support set_affinity\n")); | | 1853 | DPRINTF(("i8259 pic does not support set_affinity\n")); |
1854 | return ENOTSUP; | | 1854 | return ENOTSUP; |
1855 | } | | 1855 | } |
1856 | | | 1856 | |
1857 | ih = isp->is_handlers; | | 1857 | ih = isp->is_handlers; |
1858 | KASSERTMSG(ih != NULL, | | 1858 | KASSERTMSG(ih != NULL, |
1859 | "Don't set affinity for the device which is not established."); | | 1859 | "Don't set affinity for the device which is not established."); |
1860 | | | 1860 | |
1861 | oldci = ih->ih_cpu; | | 1861 | oldci = ih->ih_cpu; |
1862 | if (newci == oldci) /* nothing to do */ | | 1862 | if (newci == oldci) /* nothing to do */ |
1863 | return 0; | | 1863 | return 0; |
1864 | | | 1864 | |
1865 | oldslot = ih->ih_slot; | | 1865 | oldslot = ih->ih_slot; |
1866 | idt_vec = isp->is_idtvec; | | 1866 | idt_vec = isp->is_idtvec; |
1867 | | | 1867 | |
1868 | err = intr_find_unused_slot(newci, &newslot); | | 1868 | err = intr_find_unused_slot(newci, &newslot); |
1869 | if (err) { | | 1869 | if (err) { |
1870 | DPRINTF(("failed to allocate interrupt slot for PIC %s intrid " | | 1870 | DPRINTF(("failed to allocate interrupt slot for PIC %s intrid " |
1871 | "%s\n", isp->is_pic->pic_name, isp->is_intrid)); | | 1871 | "%s\n", isp->is_pic->pic_name, isp->is_intrid)); |
1872 | return err; | | 1872 | return err; |
1873 | } | | 1873 | } |
1874 | | | 1874 | |
1875 | /* Prevent intr_unmask() from reenabling the source at the hw. */ | | 1875 | /* Prevent intr_unmask() from reenabling the source at the hw. */ |
1876 | isp->is_distribute_pending = true; | | 1876 | isp->is_distribute_pending = true; |
1877 | | | 1877 | |
1878 | pin = isp->is_pin; | | 1878 | pin = isp->is_pin; |
1879 | (*pic->pic_hwmask)(pic, pin); /* for ci_ipending check */ | | 1879 | (*pic->pic_hwmask)(pic, pin); /* for ci_ipending check */ |
1880 | while (oldci->ci_ipending & (1 << oldslot)) { | | 1880 | while (oldci->ci_ipending & (1 << oldslot)) { |
1881 | (void)kpause("intrdist", false, 1, &cpu_lock); | | 1881 | (void)kpause("intrdist", false, 1, &cpu_lock); |
1882 | } | | 1882 | } |
1883 | | | 1883 | |
1884 | kpreempt_disable(); | | 1884 | kpreempt_disable(); |
1885 | | | 1885 | |
1886 | /* deactivate old interrupt setting */ | | 1886 | /* deactivate old interrupt setting */ |
1887 | if (oldci == curcpu() || !mp_online) { | | 1887 | if (oldci == curcpu() || !mp_online) { |
1888 | intr_deactivate_xcall(ih, NULL); | | 1888 | intr_deactivate_xcall(ih, NULL); |
1889 | } else { | | 1889 | } else { |
1890 | uint64_t where; | | 1890 | uint64_t where; |
1891 | where = xc_unicast(0, intr_deactivate_xcall, ih, | | 1891 | where = xc_unicast(0, intr_deactivate_xcall, ih, |
1892 | NULL, oldci); | | 1892 | NULL, oldci); |
1893 | xc_wait(where); | | 1893 | xc_wait(where); |
1894 | } | | 1894 | } |
1895 | intr_save_evcnt(isp, oldci->ci_cpuid); | | 1895 | intr_save_evcnt(isp, oldci->ci_cpuid); |
1896 | (*pic->pic_delroute)(pic, oldci, pin, idt_vec, isp->is_type); | | 1896 | (*pic->pic_delroute)(pic, oldci, pin, idt_vec, isp->is_type); |
1897 | | | 1897 | |
1898 | /* activate new interrupt setting */ | | 1898 | /* activate new interrupt setting */ |
1899 | newci->ci_isources[newslot] = isp; | | 1899 | newci->ci_isources[newslot] = isp; |
1900 | for (lih = ih; lih != NULL; lih = lih->ih_next) { | | 1900 | for (lih = ih; lih != NULL; lih = lih->ih_next) { |
1901 | newci->ci_nintrhand++; | | 1901 | newci->ci_nintrhand++; |
1902 | lih->ih_cpu = newci; | | 1902 | lih->ih_cpu = newci; |
1903 | lih->ih_slot = newslot; | | 1903 | lih->ih_slot = newslot; |
1904 | } | | 1904 | } |
1905 | if (newci == curcpu() || !mp_online) { | | 1905 | if (newci == curcpu() || !mp_online) { |
1906 | intr_activate_xcall(ih, NULL); | | 1906 | intr_activate_xcall(ih, NULL); |
1907 | } else { | | 1907 | } else { |
1908 | uint64_t where; | | 1908 | uint64_t where; |
1909 | where = xc_unicast(0, intr_activate_xcall, ih, | | 1909 | where = xc_unicast(0, intr_activate_xcall, ih, |
1910 | NULL, newci); | | 1910 | NULL, newci); |
1911 | xc_wait(where); | | 1911 | xc_wait(where); |
1912 | } | | 1912 | } |
1913 | intr_restore_evcnt(isp, newci->ci_cpuid); | | 1913 | intr_restore_evcnt(isp, newci->ci_cpuid); |
1914 | isp->is_active_cpu = newci->ci_cpuid; | | 1914 | isp->is_active_cpu = newci->ci_cpuid; |
1915 | (*pic->pic_addroute)(pic, newci, pin, idt_vec, isp->is_type); | | 1915 | (*pic->pic_addroute)(pic, newci, pin, idt_vec, isp->is_type); |
1916 | | | 1916 | |
1917 | isp->is_distribute_pending = false; | | 1917 | isp->is_distribute_pending = false; |
1918 | if (newci == curcpu() || !mp_online) { | | 1918 | if (newci == curcpu() || !mp_online) { |
1919 | intr_hwunmask_xcall(ih, NULL); | | 1919 | intr_hwunmask_xcall(ih, NULL); |
1920 | } else { | | 1920 | } else { |
1921 | uint64_t where; | | 1921 | uint64_t where; |
1922 | where = xc_unicast(0, intr_hwunmask_xcall, ih, NULL, newci); | | 1922 | where = xc_unicast(0, intr_hwunmask_xcall, ih, NULL, newci); |
1923 | xc_wait(where); | | 1923 | xc_wait(where); |
1924 | } | | 1924 | } |
1925 | | | 1925 | |
1926 | kpreempt_enable(); | | 1926 | kpreempt_enable(); |
1927 | | | 1927 | |
1928 | return err; | | 1928 | return err; |
1929 | } | | 1929 | } |
1930 | | | 1930 | |
1931 | static bool | | 1931 | static bool |
1932 | intr_is_affinity_intrsource(struct intrsource *isp, const kcpuset_t *cpuset) | | 1932 | intr_is_affinity_intrsource(struct intrsource *isp, const kcpuset_t *cpuset) |
1933 | { | | 1933 | { |
1934 | struct cpu_info *ci; | | 1934 | struct cpu_info *ci; |
1935 | | | 1935 | |
1936 | KASSERT(mutex_owned(&cpu_lock)); | | 1936 | KASSERT(mutex_owned(&cpu_lock)); |
1937 | | | 1937 | |
1938 | /* | | 1938 | /* |
1939 | * The device is already pci_intr_alloc'ed, however it is not | | 1939 | * The device is already pci_intr_alloc'ed, however it is not |
1940 | * established yet. | | 1940 | * established yet. |
1941 | */ | | 1941 | */ |
1942 | if (isp->is_handlers == NULL) | | 1942 | if (isp->is_handlers == NULL) |
1943 | return false; | | 1943 | return false; |
1944 | | | 1944 | |
1945 | ci = isp->is_handlers->ih_cpu; | | 1945 | ci = isp->is_handlers->ih_cpu; |
1946 | KASSERT(ci != NULL); | | 1946 | KASSERT(ci != NULL); |
1947 | | | 1947 | |
1948 | return kcpuset_isset(cpuset, cpu_index(ci)); | | 1948 | return kcpuset_isset(cpuset, cpu_index(ci)); |
1949 | } | | 1949 | } |
1950 | | | 1950 | |
1951 | static struct intrhand * | | 1951 | static struct intrhand * |
1952 | intr_get_handler(const char *intrid) | | 1952 | intr_get_handler(const char *intrid) |
1953 | { | | 1953 | { |
1954 | struct intrsource *isp; | | 1954 | struct intrsource *isp; |
1955 | | | 1955 | |
1956 | KASSERT(mutex_owned(&cpu_lock)); | | 1956 | KASSERT(mutex_owned(&cpu_lock)); |
1957 | | | 1957 | |
1958 | isp = intr_get_io_intrsource(intrid); | | 1958 | isp = intr_get_io_intrsource(intrid); |
1959 | if (isp == NULL) | | 1959 | if (isp == NULL) |
1960 | return NULL; | | 1960 | return NULL; |
1961 | | | 1961 | |
1962 | return isp->is_handlers; | | 1962 | return isp->is_handlers; |
1963 | } | | 1963 | } |
1964 | | | 1964 | |
1965 | uint64_t | | 1965 | uint64_t |
1966 | x86_intr_get_count(const char *intrid, u_int cpu_idx) | | 1966 | x86_intr_get_count(const char *intrid, u_int cpu_idx) |
1967 | { | | 1967 | { |
1968 | struct cpu_info *ci; | | 1968 | struct cpu_info *ci; |
1969 | struct intrsource *isp; | | 1969 | struct intrsource *isp; |
1970 | struct intrhand *ih; | | 1970 | struct intrhand *ih; |
1971 | struct percpu_evcnt pep; | | 1971 | struct percpu_evcnt pep; |
1972 | cpuid_t cpuid; | | 1972 | cpuid_t cpuid; |
1973 | int i, slot; | | 1973 | int i, slot; |
1974 | uint64_t count = 0; | | 1974 | uint64_t count = 0; |
1975 | | | 1975 | |
1976 | KASSERT(mutex_owned(&cpu_lock)); | | 1976 | KASSERT(mutex_owned(&cpu_lock)); |
1977 | ci = cpu_lookup(cpu_idx); | | 1977 | ci = cpu_lookup(cpu_idx); |
1978 | cpuid = ci->ci_cpuid; | | 1978 | cpuid = ci->ci_cpuid; |
1979 | | | 1979 | |
1980 | ih = intr_get_handler(intrid); | | 1980 | ih = intr_get_handler(intrid); |
1981 | if (ih == NULL) { | | 1981 | if (ih == NULL) { |
1982 | count = 0; | | 1982 | count = 0; |
1983 | goto out; | | 1983 | goto out; |
1984 | } | | 1984 | } |
1985 | slot = ih->ih_slot; | | 1985 | slot = ih->ih_slot; |
1986 | isp = ih->ih_cpu->ci_isources[slot]; | | 1986 | isp = ih->ih_cpu->ci_isources[slot]; |
1987 | | | 1987 | |
1988 | for (i = 0; i < ncpu; i++) { | | 1988 | for (i = 0; i < ncpu; i++) { |
1989 | pep = isp->is_saved_evcnt[i]; | | 1989 | pep = isp->is_saved_evcnt[i]; |
1990 | if (cpuid == pep.cpuid) { | | 1990 | if (cpuid == pep.cpuid) { |
1991 | if (isp->is_active_cpu == pep.cpuid) { | | 1991 | if (isp->is_active_cpu == pep.cpuid) { |
1992 | count = isp->is_evcnt.ev_count; | | 1992 | count = isp->is_evcnt.ev_count; |
1993 | goto out; | | 1993 | goto out; |
1994 | } else { | | 1994 | } else { |
1995 | count = pep.count; | | 1995 | count = pep.count; |
1996 | goto out; | | 1996 | goto out; |
1997 | } | | 1997 | } |
1998 | } | | 1998 | } |
1999 | } | | 1999 | } |
2000 | | | 2000 | |
2001 | out: | | 2001 | out: |
2002 | return count; | | 2002 | return count; |
2003 | } | | 2003 | } |
2004 | | | 2004 | |
2005 | void | | 2005 | void |
2006 | x86_intr_get_assigned(const char *intrid, kcpuset_t *cpuset) | | 2006 | x86_intr_get_assigned(const char *intrid, kcpuset_t *cpuset) |
2007 | { | | 2007 | { |
2008 | struct cpu_info *ci; | | 2008 | struct cpu_info *ci; |
2009 | struct intrhand *ih; | | 2009 | struct intrhand *ih; |
2010 | | | 2010 | |
2011 | KASSERT(mutex_owned(&cpu_lock)); | | 2011 | KASSERT(mutex_owned(&cpu_lock)); |
2012 | kcpuset_zero(cpuset); | | 2012 | kcpuset_zero(cpuset); |
2013 | | | 2013 | |
2014 | ih = intr_get_handler(intrid); | | 2014 | ih = intr_get_handler(intrid); |
2015 | if (ih == NULL) | | 2015 | if (ih == NULL) |
2016 | return; | | 2016 | return; |
2017 | | | 2017 | |
2018 | ci = ih->ih_cpu; | | 2018 | ci = ih->ih_cpu; |
2019 | kcpuset_set(cpuset, cpu_index(ci)); | | 2019 | kcpuset_set(cpuset, cpu_index(ci)); |
2020 | } | | 2020 | } |
2021 | | | 2021 | |
2022 | void | | 2022 | void |
2023 | x86_intr_get_devname(const char *intrid, char *buf, size_t len) | | 2023 | x86_intr_get_devname(const char *intrid, char *buf, size_t len) |
2024 | { | | 2024 | { |
2025 | struct intrsource *isp; | | 2025 | struct intrsource *isp; |
2026 | struct intrhand *ih; | | 2026 | struct intrhand *ih; |
2027 | int slot; | | 2027 | int slot; |
2028 | | | 2028 | |
2029 | KASSERT(mutex_owned(&cpu_lock)); | | 2029 | KASSERT(mutex_owned(&cpu_lock)); |
2030 | | | 2030 | |
2031 | ih = intr_get_handler(intrid); | | 2031 | ih = intr_get_handler(intrid); |
2032 | if (ih == NULL) { | | 2032 | if (ih == NULL) { |
2033 | buf[0] = '\0'; | | 2033 | buf[0] = '\0'; |
2034 | return; | | 2034 | return; |
2035 | } | | 2035 | } |
2036 | slot = ih->ih_slot; | | 2036 | slot = ih->ih_slot; |
2037 | isp = ih->ih_cpu->ci_isources[slot]; | | 2037 | isp = ih->ih_cpu->ci_isources[slot]; |
2038 | strlcpy(buf, isp->is_xname, len); | | 2038 | strlcpy(buf, isp->is_xname, len); |
2039 | | | 2039 | |
2040 | } | | 2040 | } |
2041 | | | 2041 | |
2042 | /* | | 2042 | /* |
2043 | * MI interface for subr_interrupt.c | | 2043 | * MI interface for subr_interrupt.c |
2044 | */ | | 2044 | */ |
2045 | uint64_t | | 2045 | uint64_t |
2046 | interrupt_get_count(const char *intrid, u_int cpu_idx) | | 2046 | interrupt_get_count(const char *intrid, u_int cpu_idx) |
2047 | { | | 2047 | { |
2048 | struct intrsource *isp; | | 2048 | struct intrsource *isp; |
2049 | uint64_t count = 0; | | 2049 | uint64_t count = 0; |
2050 | | | 2050 | |
2051 | mutex_enter(&cpu_lock); | | 2051 | mutex_enter(&cpu_lock); |
2052 | isp = intr_get_io_intrsource(intrid); | | 2052 | isp = intr_get_io_intrsource(intrid); |
2053 | if (isp != NULL) | | 2053 | if (isp != NULL) |
2054 | count = isp->is_pic->pic_intr_get_count(intrid, cpu_idx); | | 2054 | count = isp->is_pic->pic_intr_get_count(intrid, cpu_idx); |
2055 | mutex_exit(&cpu_lock); | | 2055 | mutex_exit(&cpu_lock); |
2056 | return count; | | 2056 | return count; |
2057 | } | | 2057 | } |
2058 | | | 2058 | |
2059 | /* | | 2059 | /* |
2060 | * MI interface for subr_interrupt.c | | 2060 | * MI interface for subr_interrupt.c |
2061 | */ | | 2061 | */ |
2062 | void | | 2062 | void |
2063 | interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset) | | 2063 | interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset) |
2064 | { | | 2064 | { |
2065 | struct intrsource *isp; | | 2065 | struct intrsource *isp; |
2066 | | | 2066 | |
2067 | mutex_enter(&cpu_lock); | | 2067 | mutex_enter(&cpu_lock); |
2068 | isp = intr_get_io_intrsource(intrid); | | 2068 | isp = intr_get_io_intrsource(intrid); |
2069 | if (isp != NULL) | | 2069 | if (isp != NULL) |
2070 | isp->is_pic->pic_intr_get_assigned(intrid, cpuset); | | 2070 | isp->is_pic->pic_intr_get_assigned(intrid, cpuset); |
2071 | mutex_exit(&cpu_lock); | | 2071 | mutex_exit(&cpu_lock); |
2072 | } | | 2072 | } |
2073 | | | 2073 | |
2074 | /* | | 2074 | /* |
2075 | * MI interface for subr_interrupt.c | | 2075 | * MI interface for subr_interrupt.c |
2076 | */ | | 2076 | */ |
2077 | void | | 2077 | void |
2078 | interrupt_get_available(kcpuset_t *cpuset) | | 2078 | interrupt_get_available(kcpuset_t *cpuset) |
2079 | { | | 2079 | { |
2080 | CPU_INFO_ITERATOR cii; | | 2080 | CPU_INFO_ITERATOR cii; |
2081 | struct cpu_info *ci; | | 2081 | struct cpu_info *ci; |
2082 | | | 2082 | |
2083 | kcpuset_zero(cpuset); | | 2083 | kcpuset_zero(cpuset); |
2084 | | | 2084 | |
2085 | mutex_enter(&cpu_lock); | | 2085 | mutex_enter(&cpu_lock); |
2086 | for (CPU_INFO_FOREACH(cii, ci)) { | | 2086 | for (CPU_INFO_FOREACH(cii, ci)) { |
2087 | if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0) { | | 2087 | if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0) { |
2088 | kcpuset_set(cpuset, cpu_index(ci)); | | 2088 | kcpuset_set(cpuset, cpu_index(ci)); |
2089 | } | | 2089 | } |
2090 | } | | 2090 | } |
2091 | mutex_exit(&cpu_lock); | | 2091 | mutex_exit(&cpu_lock); |
2092 | } | | 2092 | } |
2093 | | | 2093 | |
2094 | /* | | 2094 | /* |
2095 | * MI interface for subr_interrupt.c | | 2095 | * MI interface for subr_interrupt.c |
2096 | */ | | 2096 | */ |
2097 | void | | 2097 | void |
2098 | interrupt_get_devname(const char *intrid, char *buf, size_t len) | | 2098 | interrupt_get_devname(const char *intrid, char *buf, size_t len) |
2099 | { | | 2099 | { |
2100 | struct intrsource *isp; | | 2100 | struct intrsource *isp; |
2101 | | | 2101 | |
2102 | mutex_enter(&cpu_lock); | | 2102 | mutex_enter(&cpu_lock); |
2103 | isp = intr_get_io_intrsource(intrid); | | 2103 | isp = intr_get_io_intrsource(intrid); |
2104 | if (isp != NULL) { | | 2104 | if (isp != NULL) { |
2105 | if (isp->is_pic->pic_intr_get_devname == NULL) { | | 2105 | if (isp->is_pic->pic_intr_get_devname == NULL) { |
2106 | printf("NULL get_devname intrid %s pic %s\n", | | 2106 | printf("NULL get_devname intrid %s pic %s\n", |
2107 | intrid, isp->is_pic->pic_name); | | 2107 | intrid, isp->is_pic->pic_name); |
2108 | } else { | | 2108 | } else { |
2109 | isp->is_pic->pic_intr_get_devname(intrid, buf, len); | | 2109 | isp->is_pic->pic_intr_get_devname(intrid, buf, len); |
2110 | } | | 2110 | } |
2111 | } | | 2111 | } |
2112 | mutex_exit(&cpu_lock); | | 2112 | mutex_exit(&cpu_lock); |
2113 | } | | 2113 | } |
2114 | | | 2114 | |
2115 | static int | | 2115 | static int |
2116 | intr_distribute_locked(struct intrhand *ih, const kcpuset_t *newset, | | 2116 | intr_distribute_locked(struct intrhand *ih, const kcpuset_t *newset, |
2117 | kcpuset_t *oldset) | | 2117 | kcpuset_t *oldset) |
2118 | { | | 2118 | { |
2119 | struct intrsource *isp; | | 2119 | struct intrsource *isp; |
2120 | int slot; | | 2120 | int slot; |
2121 | | | 2121 | |
2122 | KASSERT(mutex_owned(&intr_distribute_lock)); | | 2122 | KASSERT(mutex_owned(&intr_distribute_lock)); |
2123 | KASSERT(mutex_owned(&cpu_lock)); | | 2123 | KASSERT(mutex_owned(&cpu_lock)); |
2124 | | | 2124 | |
2125 | if (ih == NULL) | | 2125 | if (ih == NULL) |
2126 | return EINVAL; | | 2126 | return EINVAL; |
2127 | | | 2127 | |
2128 | slot = ih->ih_slot; | | 2128 | slot = ih->ih_slot; |
2129 | isp = ih->ih_cpu->ci_isources[slot]; | | 2129 | isp = ih->ih_cpu->ci_isources[slot]; |
2130 | KASSERT(isp != NULL); | | 2130 | KASSERT(isp != NULL); |
2131 | | | 2131 | |
2132 | if (oldset != NULL) | | 2132 | if (oldset != NULL) |
2133 | intr_get_affinity(isp, oldset); | | 2133 | intr_get_affinity(isp, oldset); |
2134 | | | 2134 | |
2135 | return intr_set_affinity(isp, newset); | | 2135 | return intr_set_affinity(isp, newset); |
2136 | } | | 2136 | } |
2137 | | | 2137 | |
2138 | /* | | 2138 | /* |
2139 | * MI interface for subr_interrupt.c | | 2139 | * MI interface for subr_interrupt.c |
2140 | */ | | 2140 | */ |
2141 | int | | 2141 | int |
2142 | interrupt_distribute(void *cookie, const kcpuset_t *newset, kcpuset_t *oldset) | | 2142 | interrupt_distribute(void *cookie, const kcpuset_t *newset, kcpuset_t *oldset) |
2143 | { | | 2143 | { |
2144 | int error; | | 2144 | int error; |
2145 | struct intrhand *ih = cookie; | | 2145 | struct intrhand *ih = cookie; |
2146 | | | 2146 | |
2147 | mutex_enter(&intr_distribute_lock); | | 2147 | mutex_enter(&intr_distribute_lock); |
2148 | mutex_enter(&cpu_lock); | | 2148 | mutex_enter(&cpu_lock); |
2149 | error = intr_distribute_locked(ih, newset, oldset); | | 2149 | error = intr_distribute_locked(ih, newset, oldset); |
2150 | mutex_exit(&cpu_lock); | | 2150 | mutex_exit(&cpu_lock); |
2151 | mutex_exit(&intr_distribute_lock); | | 2151 | mutex_exit(&intr_distribute_lock); |
2152 | | | 2152 | |
2153 | return error; | | 2153 | return error; |
2154 | } | | 2154 | } |
2155 | | | 2155 | |
2156 | /* | | 2156 | /* |
2157 | * MI interface for subr_interrupt.c | | 2157 | * MI interface for subr_interrupt.c |
2158 | */ | | 2158 | */ |
2159 | int | | 2159 | int |
2160 | interrupt_distribute_handler(const char *intrid, const kcpuset_t *newset, | | 2160 | interrupt_distribute_handler(const char *intrid, const kcpuset_t *newset, |
2161 | kcpuset_t *oldset) | | 2161 | kcpuset_t *oldset) |
2162 | { | | 2162 | { |
2163 | int error; | | 2163 | int error; |
2164 | struct intrhand *ih; | | 2164 | struct intrhand *ih; |
2165 | | | 2165 | |
2166 | mutex_enter(&intr_distribute_lock); | | 2166 | mutex_enter(&intr_distribute_lock); |
2167 | mutex_enter(&cpu_lock); | | 2167 | mutex_enter(&cpu_lock); |
2168 | | | 2168 | |
2169 | ih = intr_get_handler(intrid); | | 2169 | ih = intr_get_handler(intrid); |
2170 | if (ih == NULL) { | | 2170 | if (ih == NULL) { |
2171 | error = ENOENT; | | 2171 | error = ENOENT; |
2172 | goto out; | | 2172 | goto out; |
2173 | } | | 2173 | } |
2174 | error = intr_distribute_locked(ih, newset, oldset); | | 2174 | error = intr_distribute_locked(ih, newset, oldset); |
2175 | | | 2175 | |
2176 | out: | | 2176 | out: |
2177 | mutex_exit(&cpu_lock); | | 2177 | mutex_exit(&cpu_lock); |
2178 | mutex_exit(&intr_distribute_lock); | | 2178 | mutex_exit(&intr_distribute_lock); |
2179 | return error; | | 2179 | return error; |
2180 | } | | 2180 | } |
2181 | | | 2181 | |
2182 | /* | | 2182 | /* |
2183 | * MI interface for subr_interrupt.c | | 2183 | * MI interface for subr_interrupt.c |
2184 | */ | | 2184 | */ |
2185 | struct intrids_handler * | | 2185 | struct intrids_handler * |
2186 | interrupt_construct_intrids(const kcpuset_t *cpuset) | | 2186 | interrupt_construct_intrids(const kcpuset_t *cpuset) |
2187 | { | | 2187 | { |
2188 | struct intrsource *isp; | | 2188 | struct intrsource *isp; |
2189 | struct intrids_handler *ii_handler; | | 2189 | struct intrids_handler *ii_handler; |
2190 | intrid_t *ids; | | 2190 | intrid_t *ids; |
2191 | int i, count; | | 2191 | int i, count; |