| @@ -1,1185 +1,1184 @@ | | | @@ -1,1185 +1,1184 @@ |
1 | /* $NetBSD: evtchn.c,v 1.88.2.5 2020/04/18 15:06:18 bouyer Exp $ */ | | 1 | /* $NetBSD: evtchn.c,v 1.88.2.6 2020/04/18 20:36:31 bouyer Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (c) 2006 Manuel Bouyer. | | 4 | * Copyright (c) 2006 Manuel Bouyer. |
5 | * | | 5 | * |
6 | * Redistribution and use in source and binary forms, with or without | | 6 | * Redistribution and use in source and binary forms, with or without |
7 | * modification, are permitted provided that the following conditions | | 7 | * modification, are permitted provided that the following conditions |
8 | * are met: | | 8 | * are met: |
9 | * 1. Redistributions of source code must retain the above copyright | | 9 | * 1. Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. | | 10 | * notice, this list of conditions and the following disclaimer. |
11 | * 2. Redistributions in binary form must reproduce the above copyright | | 11 | * 2. Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the | | 12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. | | 13 | * documentation and/or other materials provided with the distribution. |
14 | * | | 14 | * |
15 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | | 15 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
16 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | | 16 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
17 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | | 17 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
18 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | | 18 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
19 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | | 19 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
20 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | | 20 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
21 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | | 21 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
22 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | | 22 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | | 23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
24 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | | 24 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
25 | * | | 25 | * |
26 | */ | | 26 | */ |
27 | | | 27 | |
28 | /* | | 28 | /* |
29 | * | | 29 | * |
30 | * Copyright (c) 2004 Christian Limpach. | | 30 | * Copyright (c) 2004 Christian Limpach. |
31 | * Copyright (c) 2004, K A Fraser. | | 31 | * Copyright (c) 2004, K A Fraser. |
32 | * All rights reserved. | | 32 | * All rights reserved. |
33 | * | | 33 | * |
34 | * Redistribution and use in source and binary forms, with or without | | 34 | * Redistribution and use in source and binary forms, with or without |
35 | * modification, are permitted provided that the following conditions | | 35 | * modification, are permitted provided that the following conditions |
36 | * are met: | | 36 | * are met: |
37 | * 1. Redistributions of source code must retain the above copyright | | 37 | * 1. Redistributions of source code must retain the above copyright |
38 | * notice, this list of conditions and the following disclaimer. | | 38 | * notice, this list of conditions and the following disclaimer. |
39 | * 2. Redistributions in binary form must reproduce the above copyright | | 39 | * 2. Redistributions in binary form must reproduce the above copyright |
40 | * notice, this list of conditions and the following disclaimer in the | | 40 | * notice, this list of conditions and the following disclaimer in the |
41 | * documentation and/or other materials provided with the distribution. | | 41 | * documentation and/or other materials provided with the distribution. |
42 | * | | 42 | * |
43 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | | 43 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
44 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | | 44 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
45 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | | 45 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
46 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | | 46 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
47 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | | 47 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
48 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | | 48 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
49 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | | 49 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
50 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | | 50 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
51 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | | 51 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
52 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | | 52 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
53 | */ | | 53 | */ |
54 | | | 54 | |
55 | | | 55 | |
56 | #include <sys/cdefs.h> | | 56 | #include <sys/cdefs.h> |
57 | __KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.88.2.5 2020/04/18 15:06:18 bouyer Exp $"); | | 57 | __KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.88.2.6 2020/04/18 20:36:31 bouyer Exp $"); |
58 | | | 58 | |
59 | #include "opt_xen.h" | | 59 | #include "opt_xen.h" |
60 | #include "isa.h" | | 60 | #include "isa.h" |
61 | #include "pci.h" | | 61 | #include "pci.h" |
62 | | | 62 | |
63 | #include <sys/param.h> | | 63 | #include <sys/param.h> |
64 | #include <sys/cpu.h> | | 64 | #include <sys/cpu.h> |
65 | #include <sys/kernel.h> | | 65 | #include <sys/kernel.h> |
66 | #include <sys/systm.h> | | 66 | #include <sys/systm.h> |
67 | #include <sys/device.h> | | 67 | #include <sys/device.h> |
68 | #include <sys/proc.h> | | 68 | #include <sys/proc.h> |
69 | #include <sys/kmem.h> | | 69 | #include <sys/kmem.h> |
70 | #include <sys/reboot.h> | | 70 | #include <sys/reboot.h> |
71 | #include <sys/mutex.h> | | 71 | #include <sys/mutex.h> |
72 | #include <sys/interrupt.h> | | 72 | #include <sys/interrupt.h> |
73 | | | 73 | |
74 | #include <uvm/uvm.h> | | 74 | #include <uvm/uvm.h> |
75 | | | 75 | |
76 | #include <xen/intr.h> | | 76 | #include <xen/intr.h> |
77 | | | 77 | |
78 | #include <xen/xen.h> | | 78 | #include <xen/xen.h> |
79 | #include <xen/hypervisor.h> | | 79 | #include <xen/hypervisor.h> |
80 | #include <xen/evtchn.h> | | 80 | #include <xen/evtchn.h> |
81 | #include <xen/xenfunc.h> | | 81 | #include <xen/xenfunc.h> |
82 | | | 82 | |
83 | /* | | 83 | /* |
84 | * This lock protects updates to the following mapping and reference-count | | 84 | * This lock protects updates to the following mapping and reference-count |
85 | * arrays. The lock does not need to be acquired to read the mapping tables. | | 85 | * arrays. The lock does not need to be acquired to read the mapping tables. |
86 | */ | | 86 | */ |
87 | static kmutex_t evtchn_lock; | | 87 | static kmutex_t evtchn_lock; |
88 | | | 88 | |
89 | /* event handlers */ | | 89 | /* event handlers */ |
90 | struct evtsource *evtsource[NR_EVENT_CHANNELS]; | | 90 | struct evtsource *evtsource[NR_EVENT_CHANNELS]; |
91 | | | 91 | |
92 | /* channel locks */ | | 92 | /* channel locks */ |
93 | static kmutex_t evtlock[NR_EVENT_CHANNELS]; | | 93 | static kmutex_t evtlock[NR_EVENT_CHANNELS]; |
94 | | | 94 | |
95 | /* Reference counts for bindings to event channels XXX: redo for SMP */ | | 95 | /* Reference counts for bindings to event channels XXX: redo for SMP */ |
96 | static uint8_t evtch_bindcount[NR_EVENT_CHANNELS]; | | 96 | static uint8_t evtch_bindcount[NR_EVENT_CHANNELS]; |
97 | | | 97 | |
98 | /* event-channel <-> VCPU mapping for IPIs. XXX: redo for SMP. */ | | 98 | /* event-channel <-> VCPU mapping for IPIs. XXX: redo for SMP. */ |
99 | static evtchn_port_t vcpu_ipi_to_evtch[XEN_LEGACY_MAX_VCPUS]; | | 99 | static evtchn_port_t vcpu_ipi_to_evtch[XEN_LEGACY_MAX_VCPUS]; |
100 | | | 100 | |
101 | /* event-channel <-> VCPU mapping for VIRQ_TIMER. XXX: redo for SMP. */ | | 101 | /* event-channel <-> VCPU mapping for VIRQ_TIMER. XXX: redo for SMP. */ |
102 | static int virq_timer_to_evtch[XEN_LEGACY_MAX_VCPUS]; | | 102 | static int virq_timer_to_evtch[XEN_LEGACY_MAX_VCPUS]; |
103 | | | 103 | |
104 | /* event-channel <-> VIRQ mapping. */ | | 104 | /* event-channel <-> VIRQ mapping. */ |
105 | static int virq_to_evtch[NR_VIRQS]; | | 105 | static int virq_to_evtch[NR_VIRQS]; |
106 | | | 106 | |
107 | | | 107 | |
108 | #if NPCI > 0 || NISA > 0 | | 108 | #if NPCI > 0 || NISA > 0 |
109 | /* event-channel <-> PIRQ mapping */ | | 109 | /* event-channel <-> PIRQ mapping */ |
110 | static int pirq_to_evtch[NR_PIRQS]; | | 110 | static int pirq_to_evtch[NR_PIRQS]; |
111 | /* PIRQ needing notify */ | | 111 | /* PIRQ needing notify */ |
112 | static uint32_t pirq_needs_unmask_notify[NR_EVENT_CHANNELS / 32]; | | 112 | static uint32_t pirq_needs_unmask_notify[NR_EVENT_CHANNELS / 32]; |
113 | int pirq_interrupt(void *); | | 113 | int pirq_interrupt(void *); |
114 | physdev_op_t physdev_op_notify = { | | 114 | physdev_op_t physdev_op_notify = { |
115 | .cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY, | | 115 | .cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY, |
116 | }; | | 116 | }; |
117 | #endif | | 117 | #endif |
118 | | | 118 | |
119 | static void xen_evtchn_mask(struct pic *, int); | | 119 | static void xen_evtchn_mask(struct pic *, int); |
120 | static void xen_evtchn_unmask(struct pic *, int); | | 120 | static void xen_evtchn_unmask(struct pic *, int); |
121 | static void xen_evtchn_addroute(struct pic *, struct cpu_info *, int, int, int); | | 121 | static void xen_evtchn_addroute(struct pic *, struct cpu_info *, int, int, int); |
122 | static void xen_evtchn_delroute(struct pic *, struct cpu_info *, int, int, int); | | 122 | static void xen_evtchn_delroute(struct pic *, struct cpu_info *, int, int, int); |
123 | static bool xen_evtchn_trymask(struct pic *, int); | | 123 | static bool xen_evtchn_trymask(struct pic *, int); |
124 | | | 124 | |
125 | | | 125 | |
126 | struct pic xen_pic = { | | 126 | struct pic xen_pic = { |
127 | .pic_name = "xenev0", | | 127 | .pic_name = "xenev0", |
128 | .pic_type = PIC_XEN, | | 128 | .pic_type = PIC_XEN, |
129 | .pic_vecbase = 0, | | 129 | .pic_vecbase = 0, |
130 | .pic_apicid = 0, | | 130 | .pic_apicid = 0, |
131 | .pic_lock = __SIMPLELOCK_UNLOCKED, | | 131 | .pic_lock = __SIMPLELOCK_UNLOCKED, |
132 | .pic_hwmask = xen_evtchn_mask, | | 132 | .pic_hwmask = xen_evtchn_mask, |
133 | .pic_hwunmask = xen_evtchn_unmask, | | 133 | .pic_hwunmask = xen_evtchn_unmask, |
134 | .pic_addroute = xen_evtchn_addroute, | | 134 | .pic_addroute = xen_evtchn_addroute, |
135 | .pic_delroute = xen_evtchn_delroute, | | 135 | .pic_delroute = xen_evtchn_delroute, |
136 | .pic_trymask = xen_evtchn_trymask, | | 136 | .pic_trymask = xen_evtchn_trymask, |
137 | .pic_level_stubs = xenev_stubs, | | 137 | .pic_level_stubs = xenev_stubs, |
138 | .pic_edge_stubs = xenev_stubs, | | 138 | .pic_edge_stubs = xenev_stubs, |
139 | }; | | 139 | }; |
140 | | | 140 | |
141 | /* | | 141 | /* |
142 | * We try to stick to the traditional x86 PIC semantics wrt Xen | | 142 | * We try to stick to the traditional x86 PIC semantics wrt Xen |
143 | * events. | | 143 | * events. |
144 | * | | 144 | * |
145 | * PIC pins exist in a global namespace which may be hierarchical, and | | 145 | * PIC pins exist in a global namespace which may be hierarchical, and |
146 | * are mapped to a cpu bus concept called 'IRQ' numbers, which are | | 146 | * are mapped to a cpu bus concept called 'IRQ' numbers, which are |
147 | * also global, but linear. Thus a PIC, pin tuple will always map to | | 147 | * also global, but linear. Thus a PIC, pin tuple will always map to |
148 | * an IRQ number. These tuples can alias to the same IRQ number, thus | | 148 | * an IRQ number. These tuples can alias to the same IRQ number, thus |
149 | * causing IRQ "sharing". IRQ numbers can be bound to specific CPUs, | | 149 | * causing IRQ "sharing". IRQ numbers can be bound to specific CPUs, |
150 | * and to specific callback vector indices on the CPU called idt_vec, | | 150 | * and to specific callback vector indices on the CPU called idt_vec, |
151 | * which are aliases to handlers meant to run on destination | | 151 | * which are aliases to handlers meant to run on destination |
152 | * CPUs. This binding can also happen at interrupt time and resolved | | 152 | * CPUs. This binding can also happen at interrupt time and resolved |
153 | * 'round-robin' between all CPUs, depending on the lapic setup. In | | 153 | * 'round-robin' between all CPUs, depending on the lapic setup. In |
154 | * this case, all CPUs need to have identical idt_vec->handler | | 154 | * this case, all CPUs need to have identical idt_vec->handler |
155 | * mappings. | | 155 | * mappings. |
156 | * | | 156 | * |
157 | * The job of pic_addroute() is to setup the 'wiring' between the | | 157 | * The job of pic_addroute() is to setup the 'wiring' between the |
158 | * source pin, and the destination CPU handler, ideally on a specific | | 158 | * source pin, and the destination CPU handler, ideally on a specific |
159 | * CPU in MP systems (or 'round-robin'). | | 159 | * CPU in MP systems (or 'round-robin'). |
160 | * | | 160 | * |
161 | * On Xen, a global namespace of 'events' exist, which are initially | | 161 | * On Xen, a global namespace of 'events' exist, which are initially |
162 | * bound to nothing. This is similar to the relationship between | | 162 | * bound to nothing. This is similar to the relationship between |
163 | * realworld realworld IRQ numbers wrt PIC pins, since before routing, | | 163 | * realworld realworld IRQ numbers wrt PIC pins, since before routing, |
164 | * IRQ numbers by themselves have no causal connection setup with the | | 164 | * IRQ numbers by themselves have no causal connection setup with the |
165 | * real world. (Except for the hardwired cases on the PC Architecture, | | 165 | * real world. (Except for the hardwired cases on the PC Architecture, |
166 | * which we ignore for the purpose of this description). However the | | 166 | * which we ignore for the purpose of this description). However the |
167 | * really important routing is from pin to idt_vec. On PIC_XEN, all | | 167 | * really important routing is from pin to idt_vec. On PIC_XEN, all |
168 | * three (pic, irq, idt_vec) belong to the same namespace and are | | 168 | * three (pic, irq, idt_vec) belong to the same namespace and are |
169 | * identical. Further, the mapping between idt_vec and the actual | | 169 | * identical. Further, the mapping between idt_vec and the actual |
170 | * callback handler is setup via calls to the evtchn.h api - this | | 170 | * callback handler is setup via calls to the evtchn.h api - this |
171 | * last bit is analogous to x86/idt.c:idt_vec_set() on real h/w | | 171 | * last bit is analogous to x86/idt.c:idt_vec_set() on real h/w |
172 | * | | 172 | * |
173 | * For now we handle two cases: | | 173 | * For now we handle two cases: |
174 | * - IPC style events - eg: timer, PV devices, etc. | | 174 | * - IPC style events - eg: timer, PV devices, etc. |
175 | * - dom0 physical irq bound events. | | 175 | * - dom0 physical irq bound events. |
176 | * | | 176 | * |
177 | * In the case of IPC style events, we currently externalise the | | 177 | * In the case of IPC style events, we currently externalise the |
178 | * event binding by using evtchn.h functions. From the POV of | | 178 | * event binding by using evtchn.h functions. From the POV of |
179 | * PIC_XEN , 'pin' , 'irq' and 'idt_vec' are all identical to the | | 179 | * PIC_XEN , 'pin' , 'irq' and 'idt_vec' are all identical to the |
180 | * port number of the event. | | 180 | * port number of the event. |
181 | * | | 181 | * |
182 | * In the case of dom0 physical irq bound events, we currently | | 182 | * In the case of dom0 physical irq bound events, we currently |
183 | * event binding by exporting evtchn.h functions. From the POV of | | 183 | * event binding by exporting evtchn.h functions. From the POV of |
184 | * PIC_LAPIC/PIC_IOAPIC, the 'pin' is the hardware pin, the 'irq' is | | 184 | * PIC_LAPIC/PIC_IOAPIC, the 'pin' is the hardware pin, the 'irq' is |
185 | * the x86 global irq number - the port number is extracted out of a | | 185 | * the x86 global irq number - the port number is extracted out of a |
186 | * global array (this is currently kludgy and breaks API abstraction) | | 186 | * global array (this is currently kludgy and breaks API abstraction) |
187 | * and the binding happens during pic_addroute() of the ioapic. | | 187 | * and the binding happens during pic_addroute() of the ioapic. |
188 | * | | 188 | * |
189 | * Later when we integrate more tightly with x86/intr.c, we will be | | 189 | * Later when we integrate more tightly with x86/intr.c, we will be |
190 | * able to conform better to (PIC_LAPIC/PIC_IOAPIC)->PIC_XEN | | 190 | * able to conform better to (PIC_LAPIC/PIC_IOAPIC)->PIC_XEN |
191 | * cascading model. | | 191 | * cascading model. |
192 | */ | | 192 | */ |
193 | | | 193 | |
194 | int debug_port = -1; | | 194 | int debug_port = -1; |
195 | | | 195 | |
196 | // #define IRQ_DEBUG 4 | | 196 | // #define IRQ_DEBUG 4 |
197 | | | 197 | |
198 | /* http://mail-index.netbsd.org/port-amd64/2004/02/22/0000.html */ | | 198 | /* http://mail-index.netbsd.org/port-amd64/2004/02/22/0000.html */ |
199 | #ifdef MULTIPROCESSOR | | 199 | #ifdef MULTIPROCESSOR |
200 | | | 200 | |
201 | /* | | 201 | /* |
202 | * intr_biglock_wrapper: grab biglock and call a real interrupt handler. | | 202 | * intr_biglock_wrapper: grab biglock and call a real interrupt handler. |
203 | */ | | 203 | */ |
204 | | | 204 | |
205 | int | | 205 | int |
206 | xen_intr_biglock_wrapper(void *vp) | | 206 | xen_intr_biglock_wrapper(void *vp) |
207 | { | | 207 | { |
208 | struct intrhand *ih = vp; | | 208 | struct intrhand *ih = vp; |
209 | int ret; | | 209 | int ret; |
210 | | | 210 | |
211 | KERNEL_LOCK(1, NULL); | | 211 | KERNEL_LOCK(1, NULL); |
212 | | | 212 | |
213 | ret = (*ih->ih_realfun)(ih->ih_realarg); | | 213 | ret = (*ih->ih_realfun)(ih->ih_realarg); |
214 | | | 214 | |
215 | KERNEL_UNLOCK_ONE(NULL); | | 215 | KERNEL_UNLOCK_ONE(NULL); |
216 | | | 216 | |
217 | return ret; | | 217 | return ret; |
218 | } | | 218 | } |
219 | #endif /* MULTIPROCESSOR */ | | 219 | #endif /* MULTIPROCESSOR */ |
220 | | | 220 | |
221 | void | | 221 | void |
222 | events_default_setup(void) | | 222 | events_default_setup(void) |
223 | { | | 223 | { |
224 | int i; | | 224 | int i; |
225 | | | 225 | |
226 | /* No VCPU -> event mappings. */ | | 226 | /* No VCPU -> event mappings. */ |
227 | for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++) | | 227 | for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++) |
228 | vcpu_ipi_to_evtch[i] = -1; | | 228 | vcpu_ipi_to_evtch[i] = -1; |
229 | | | 229 | |
230 | /* No VIRQ_TIMER -> event mappings. */ | | 230 | /* No VIRQ_TIMER -> event mappings. */ |
231 | for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++) | | 231 | for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++) |
232 | virq_timer_to_evtch[i] = -1; | | 232 | virq_timer_to_evtch[i] = -1; |
233 | | | 233 | |
234 | /* No VIRQ -> event mappings. */ | | 234 | /* No VIRQ -> event mappings. */ |
235 | for (i = 0; i < NR_VIRQS; i++) | | 235 | for (i = 0; i < NR_VIRQS; i++) |
236 | virq_to_evtch[i] = -1; | | 236 | virq_to_evtch[i] = -1; |
237 | | | 237 | |
238 | #if NPCI > 0 || NISA > 0 | | 238 | #if NPCI > 0 || NISA > 0 |
239 | /* No PIRQ -> event mappings. */ | | 239 | /* No PIRQ -> event mappings. */ |
240 | for (i = 0; i < NR_PIRQS; i++) | | 240 | for (i = 0; i < NR_PIRQS; i++) |
241 | pirq_to_evtch[i] = -1; | | 241 | pirq_to_evtch[i] = -1; |
242 | for (i = 0; i < NR_EVENT_CHANNELS / 32; i++) | | 242 | for (i = 0; i < NR_EVENT_CHANNELS / 32; i++) |
243 | pirq_needs_unmask_notify[i] = 0; | | 243 | pirq_needs_unmask_notify[i] = 0; |
244 | #endif | | 244 | #endif |
245 | | | 245 | |
246 | /* No event-channel are 'live' right now. */ | | 246 | /* No event-channel are 'live' right now. */ |
247 | for (i = 0; i < NR_EVENT_CHANNELS; i++) { | | 247 | for (i = 0; i < NR_EVENT_CHANNELS; i++) { |
248 | evtsource[i] = NULL; | | 248 | evtsource[i] = NULL; |
249 | evtch_bindcount[i] = 0; | | 249 | evtch_bindcount[i] = 0; |
250 | hypervisor_mask_event(i); | | 250 | hypervisor_mask_event(i); |
251 | } | | 251 | } |
252 | | | 252 | |
253 | } | | 253 | } |
254 | | | 254 | |
255 | void | | 255 | void |
256 | events_init(void) | | 256 | events_init(void) |
257 | { | | 257 | { |
258 | mutex_init(&evtchn_lock, MUTEX_DEFAULT, IPL_NONE); | | 258 | mutex_init(&evtchn_lock, MUTEX_DEFAULT, IPL_NONE); |
259 | #ifdef XENPV | | 259 | #ifdef XENPV |
260 | debug_port = bind_virq_to_evtch(VIRQ_DEBUG); | | 260 | debug_port = bind_virq_to_evtch(VIRQ_DEBUG); |
261 | | | 261 | |
262 | KASSERT(debug_port != -1); | | 262 | KASSERT(debug_port != -1); |
263 | | | 263 | |
264 | aprint_verbose("VIRQ_DEBUG interrupt using event channel %d\n", | | 264 | aprint_verbose("VIRQ_DEBUG interrupt using event channel %d\n", |
265 | debug_port); | | 265 | debug_port); |
266 | /* | | 266 | /* |
267 | * Don't call event_set_handler(), we'll use a shortcut. Just set | | 267 | * Don't call event_set_handler(), we'll use a shortcut. Just set |
268 | * evtsource[] to a non-NULL value so that evtchn_do_event will | | 268 | * evtsource[] to a non-NULL value so that evtchn_do_event will |
269 | * be called. | | 269 | * be called. |
270 | */ | | 270 | */ |
271 | evtsource[debug_port] = (void *)-1; | | 271 | evtsource[debug_port] = (void *)-1; |
272 | xen_atomic_set_bit(&curcpu()->ci_evtmask[0], debug_port); | | 272 | xen_atomic_set_bit(&curcpu()->ci_evtmask[0], debug_port); |
273 | hypervisor_unmask_event(debug_port); | | 273 | hypervisor_unmask_event(debug_port); |
274 | #if NPCI > 0 || NISA > 0 | | 274 | #if NPCI > 0 || NISA > 0 |
275 | hypervisor_ack_pirq_event(debug_port); | | 275 | hypervisor_ack_pirq_event(debug_port); |
276 | #endif /* NPCI > 0 || NISA > 0 */ | | 276 | #endif /* NPCI > 0 || NISA > 0 */ |
277 | #endif /* XENPV */ | | 277 | #endif /* XENPV */ |
278 | x86_enable_intr(); /* at long last... */ | | 278 | x86_enable_intr(); /* at long last... */ |
279 | } | | 279 | } |
280 | | | 280 | |
281 | bool | | 281 | bool |
282 | events_suspend(void) | | 282 | events_suspend(void) |
283 | { | | 283 | { |
284 | int evtch; | | 284 | int evtch; |
285 | | | 285 | |
286 | x86_disable_intr(); | | 286 | x86_disable_intr(); |
287 | | | 287 | |
288 | /* VIRQ_DEBUG is the last interrupt to remove */ | | 288 | /* VIRQ_DEBUG is the last interrupt to remove */ |
289 | evtch = unbind_virq_from_evtch(VIRQ_DEBUG); | | 289 | evtch = unbind_virq_from_evtch(VIRQ_DEBUG); |
290 | | | 290 | |
291 | KASSERT(evtch != -1); | | 291 | KASSERT(evtch != -1); |
292 | | | 292 | |
293 | hypervisor_mask_event(evtch); | | 293 | hypervisor_mask_event(evtch); |
294 | /* Remove the non-NULL value set in events_init() */ | | 294 | /* Remove the non-NULL value set in events_init() */ |
295 | evtsource[evtch] = NULL; | | 295 | evtsource[evtch] = NULL; |
296 | aprint_verbose("VIRQ_DEBUG interrupt disabled, " | | 296 | aprint_verbose("VIRQ_DEBUG interrupt disabled, " |
297 | "event channel %d removed\n", evtch); | | 297 | "event channel %d removed\n", evtch); |
298 | | | 298 | |
299 | return true; | | 299 | return true; |
300 | } | | 300 | } |
301 | | | 301 | |
302 | bool | | 302 | bool |
303 | events_resume (void) | | 303 | events_resume (void) |
304 | { | | 304 | { |
305 | events_init(); | | 305 | events_init(); |
306 | | | 306 | |
307 | return true; | | 307 | return true; |
308 | } | | 308 | } |
309 | | | 309 | |
310 | | | 310 | |
311 | unsigned int | | 311 | unsigned int |
312 | evtchn_do_event(int evtch, struct intrframe *regs) | | 312 | evtchn_do_event(int evtch, struct intrframe *regs) |
313 | { | | 313 | { |
314 | struct cpu_info *ci; | | 314 | struct cpu_info *ci; |
315 | int ilevel; | | 315 | int ilevel; |
316 | struct intrhand *ih; | | 316 | struct intrhand *ih; |
317 | int (*ih_fun)(void *, void *); | | 317 | int (*ih_fun)(void *, void *); |
318 | uint32_t iplmask; | | 318 | uint32_t iplmask; |
319 | | | 319 | |
320 | KASSERTMSG(evtch >= 0, "negative evtch: %d", evtch); | | 320 | KASSERTMSG(evtch >= 0, "negative evtch: %d", evtch); |
321 | KASSERTMSG(evtch < NR_EVENT_CHANNELS, | | 321 | KASSERTMSG(evtch < NR_EVENT_CHANNELS, |
322 | "evtch number %d > NR_EVENT_CHANNELS", evtch); | | 322 | "evtch number %d > NR_EVENT_CHANNELS", evtch); |
323 | | | 323 | |
324 | #ifdef IRQ_DEBUG | | 324 | #ifdef IRQ_DEBUG |
325 | if (evtch == IRQ_DEBUG) | | 325 | if (evtch == IRQ_DEBUG) |
326 | printf("evtchn_do_event: evtch %d\n", evtch); | | 326 | printf("evtchn_do_event: evtch %d\n", evtch); |
327 | #endif | | 327 | #endif |
328 | ci = curcpu(); | | 328 | ci = curcpu(); |
329 | | | 329 | |
330 | /* | | 330 | /* |
331 | * Shortcut for the debug handler, we want it to always run, | | 331 | * Shortcut for the debug handler, we want it to always run, |
332 | * regardless of the IPL level. | | 332 | * regardless of the IPL level. |
333 | */ | | 333 | */ |
334 | if (__predict_false(evtch == debug_port)) { | | 334 | if (__predict_false(evtch == debug_port)) { |
335 | xen_debug_handler(NULL); | | 335 | xen_debug_handler(NULL); |
336 | hypervisor_unmask_event(debug_port); | | 336 | hypervisor_unmask_event(debug_port); |
337 | #if NPCI > 0 || NISA > 0 | | 337 | #if NPCI > 0 || NISA > 0 |
338 | hypervisor_ack_pirq_event(debug_port); | | 338 | hypervisor_ack_pirq_event(debug_port); |
339 | #endif /* NPCI > 0 || NISA > 0 */ | | 339 | #endif /* NPCI > 0 || NISA > 0 */ |
340 | return 0; | | 340 | return 0; |
341 | } | | 341 | } |
342 | | | 342 | |
343 | KASSERTMSG(evtsource[evtch] != NULL, "unknown event %d", evtch); | | 343 | KASSERTMSG(evtsource[evtch] != NULL, "unknown event %d", evtch); |
| | | 344 | |
| | | 345 | if (evtsource[evtch]->ev_cpu != ci) |
| | | 346 | return 0; |
| | | 347 | |
344 | ci->ci_data.cpu_nintr++; | | 348 | ci->ci_data.cpu_nintr++; |
345 | evtsource[evtch]->ev_evcnt.ev_count++; | | 349 | evtsource[evtch]->ev_evcnt.ev_count++; |
346 | ilevel = ci->ci_ilevel; | | 350 | ilevel = ci->ci_ilevel; |
347 | | | 351 | |
348 | if (evtsource[evtch]->ev_cpu != ci /* XXX: get stats */) { | | | |
349 | hypervisor_send_event(evtsource[evtch]->ev_cpu, evtch); | | | |
350 | return 0; | | | |
351 | } | | | |
352 | | | | |
353 | if (evtsource[evtch]->ev_maxlevel <= ilevel) { | | 352 | if (evtsource[evtch]->ev_maxlevel <= ilevel) { |
354 | #ifdef IRQ_DEBUG | | 353 | #ifdef IRQ_DEBUG |
355 | if (evtch == IRQ_DEBUG) | | 354 | if (evtch == IRQ_DEBUG) |
356 | printf("evtsource[%d]->ev_maxlevel %d <= ilevel %d\n", | | 355 | printf("evtsource[%d]->ev_maxlevel %d <= ilevel %d\n", |
357 | evtch, evtsource[evtch]->ev_maxlevel, ilevel); | | 356 | evtch, evtsource[evtch]->ev_maxlevel, ilevel); |
358 | #endif | | 357 | #endif |
359 | hypervisor_set_ipending(evtsource[evtch]->ev_imask, | | 358 | hypervisor_set_ipending(evtsource[evtch]->ev_imask, |
360 | evtch >> LONG_SHIFT, | | 359 | evtch >> LONG_SHIFT, |
361 | evtch & LONG_MASK); | | 360 | evtch & LONG_MASK); |
362 | | | 361 | |
363 | /* leave masked */ | | 362 | /* leave masked */ |
364 | | | 363 | |
365 | return 0; | | 364 | return 0; |
366 | } | | 365 | } |
367 | ci->ci_ilevel = evtsource[evtch]->ev_maxlevel; | | 366 | ci->ci_ilevel = evtsource[evtch]->ev_maxlevel; |
368 | iplmask = evtsource[evtch]->ev_imask; | | 367 | iplmask = evtsource[evtch]->ev_imask; |
369 | x86_enable_intr(); | | 368 | x86_enable_intr(); |
370 | mutex_spin_enter(&evtlock[evtch]); | | 369 | mutex_spin_enter(&evtlock[evtch]); |
371 | ih = evtsource[evtch]->ev_handlers; | | 370 | ih = evtsource[evtch]->ev_handlers; |
372 | while (ih != NULL) { | | 371 | while (ih != NULL) { |
373 | if (ih->ih_cpu != ci) { | | 372 | if (ih->ih_cpu != ci) { |
374 | hypervisor_send_event(ih->ih_cpu, evtch); | | 373 | hypervisor_send_event(ih->ih_cpu, evtch); |
375 | iplmask &= ~(1 << XEN_IPL2SIR(ih->ih_level)); | | 374 | iplmask &= ~(1 << XEN_IPL2SIR(ih->ih_level)); |
376 | ih = ih->ih_evt_next; | | 375 | ih = ih->ih_evt_next; |
377 | continue; | | 376 | continue; |
378 | } | | 377 | } |
379 | if (ih->ih_level <= ilevel) { | | 378 | if (ih->ih_level <= ilevel) { |
380 | #ifdef IRQ_DEBUG | | 379 | #ifdef IRQ_DEBUG |
381 | if (evtch == IRQ_DEBUG) | | 380 | if (evtch == IRQ_DEBUG) |
382 | printf("ih->ih_level %d <= ilevel %d\n", ih->ih_level, ilevel); | | 381 | printf("ih->ih_level %d <= ilevel %d\n", ih->ih_level, ilevel); |
383 | #endif | | 382 | #endif |
384 | x86_disable_intr(); | | 383 | x86_disable_intr(); |
385 | hypervisor_set_ipending(iplmask, | | 384 | hypervisor_set_ipending(iplmask, |
386 | evtch >> LONG_SHIFT, evtch & LONG_MASK); | | 385 | evtch >> LONG_SHIFT, evtch & LONG_MASK); |
387 | /* leave masked */ | | 386 | /* leave masked */ |
388 | mutex_spin_exit(&evtlock[evtch]); | | 387 | mutex_spin_exit(&evtlock[evtch]); |
389 | goto splx; | | 388 | goto splx; |
390 | } | | 389 | } |
391 | iplmask &= ~(1 << XEN_IPL2SIR(ih->ih_level)); | | 390 | iplmask &= ~(1 << XEN_IPL2SIR(ih->ih_level)); |
392 | ci->ci_ilevel = ih->ih_level; | | 391 | ci->ci_ilevel = ih->ih_level; |
393 | ih_fun = (void *)ih->ih_fun; | | 392 | ih_fun = (void *)ih->ih_fun; |
394 | ih_fun(ih->ih_arg, regs); | | 393 | ih_fun(ih->ih_arg, regs); |
395 | ih = ih->ih_evt_next; | | 394 | ih = ih->ih_evt_next; |
396 | } | | 395 | } |
397 | mutex_spin_exit(&evtlock[evtch]); | | 396 | mutex_spin_exit(&evtlock[evtch]); |
398 | x86_disable_intr(); | | 397 | x86_disable_intr(); |
399 | hypervisor_unmask_event(evtch); | | 398 | hypervisor_unmask_event(evtch); |
400 | #if NPCI > 0 || NISA > 0 | | 399 | #if NPCI > 0 || NISA > 0 |
401 | hypervisor_ack_pirq_event(evtch); | | 400 | hypervisor_ack_pirq_event(evtch); |
402 | #endif /* NPCI > 0 || NISA > 0 */ | | 401 | #endif /* NPCI > 0 || NISA > 0 */ |
403 | | | 402 | |
404 | splx: | | 403 | splx: |
405 | ci->ci_ilevel = ilevel; | | 404 | ci->ci_ilevel = ilevel; |
406 | return 0; | | 405 | return 0; |
407 | } | | 406 | } |
408 | | | 407 | |
409 | #define PRIuCPUID "lu" /* XXX: move this somewhere more appropriate */ | | 408 | #define PRIuCPUID "lu" /* XXX: move this somewhere more appropriate */ |
410 | | | 409 | |
411 | /* PIC callbacks */ | | 410 | /* PIC callbacks */ |
412 | /* pic "pin"s are conceptually mapped to event port numbers */ | | 411 | /* pic "pin"s are conceptually mapped to event port numbers */ |
413 | static void | | 412 | static void |
414 | xen_evtchn_mask(struct pic *pic, int pin) | | 413 | xen_evtchn_mask(struct pic *pic, int pin) |
415 | { | | 414 | { |
416 | evtchn_port_t evtchn = pin; | | 415 | evtchn_port_t evtchn = pin; |
417 | | | 416 | |
418 | KASSERT(pic->pic_type == PIC_XEN); | | 417 | KASSERT(pic->pic_type == PIC_XEN); |
419 | KASSERT(evtchn < NR_EVENT_CHANNELS); | | 418 | KASSERT(evtchn < NR_EVENT_CHANNELS); |
420 | | | 419 | |
421 | hypervisor_mask_event(evtchn); | | 420 | hypervisor_mask_event(evtchn); |
422 | | | 421 | |
423 | } | | 422 | } |
424 | | | 423 | |
425 | static void | | 424 | static void |
426 | xen_evtchn_unmask(struct pic *pic, int pin) | | 425 | xen_evtchn_unmask(struct pic *pic, int pin) |
427 | { | | 426 | { |
428 | evtchn_port_t evtchn = pin; | | 427 | evtchn_port_t evtchn = pin; |
429 | | | 428 | |
430 | KASSERT(pic->pic_type == PIC_XEN); | | 429 | KASSERT(pic->pic_type == PIC_XEN); |
431 | KASSERT(evtchn < NR_EVENT_CHANNELS); | | 430 | KASSERT(evtchn < NR_EVENT_CHANNELS); |
432 | | | 431 | |
433 | hypervisor_unmask_event(evtchn); | | 432 | hypervisor_unmask_event(evtchn); |
434 | | | 433 | |
435 | } | | 434 | } |
436 | | | 435 | |
437 | | | 436 | |
438 | static void | | 437 | static void |
439 | xen_evtchn_addroute(struct pic *pic, struct cpu_info *ci, int pin, int idt_vec, int type) | | 438 | xen_evtchn_addroute(struct pic *pic, struct cpu_info *ci, int pin, int idt_vec, int type) |
440 | { | | 439 | { |
441 | | | 440 | |
442 | evtchn_port_t evtchn = pin; | | 441 | evtchn_port_t evtchn = pin; |
443 | | | 442 | |
444 | /* Events are simulated as level triggered interrupts */ | | 443 | /* Events are simulated as level triggered interrupts */ |
445 | KASSERT(type == IST_LEVEL); | | 444 | KASSERT(type == IST_LEVEL); |
446 | | | 445 | |
447 | KASSERT(evtchn < NR_EVENT_CHANNELS); | | 446 | KASSERT(evtchn < NR_EVENT_CHANNELS); |
448 | #if notyet | | 447 | #if notyet |
449 | evtchn_port_t boundport = idt_vec; | | 448 | evtchn_port_t boundport = idt_vec; |
450 | #endif | | 449 | #endif |
451 | | | 450 | |
452 | KASSERT(pic->pic_type == PIC_XEN); | | 451 | KASSERT(pic->pic_type == PIC_XEN); |
453 | | | 452 | |
454 | xen_atomic_set_bit(&ci->ci_evtmask[0], evtchn); | | 453 | xen_atomic_set_bit(&ci->ci_evtmask[0], evtchn); |
455 | | | 454 | |
456 | } | | 455 | } |
457 | | | 456 | |
458 | static void | | 457 | static void |
459 | xen_evtchn_delroute(struct pic *pic, struct cpu_info *ci, int pin, int idt_vec, int type) | | 458 | xen_evtchn_delroute(struct pic *pic, struct cpu_info *ci, int pin, int idt_vec, int type) |
460 | { | | 459 | { |
461 | /* | | 460 | /* |
462 | * XXX: In the future, this is a great place to | | 461 | * XXX: In the future, this is a great place to |
463 | * 'unbind' events to underlying events and cpus. | | 462 | * 'unbind' events to underlying events and cpus. |
464 | * For now, just disable interrupt servicing on this cpu for | | 463 | * For now, just disable interrupt servicing on this cpu for |
465 | * this pin aka cpu. | | 464 | * this pin aka cpu. |
466 | */ | | 465 | */ |
467 | evtchn_port_t evtchn = pin; | | 466 | evtchn_port_t evtchn = pin; |
468 | | | 467 | |
469 | /* Events are simulated as level triggered interrupts */ | | 468 | /* Events are simulated as level triggered interrupts */ |
470 | KASSERT(type == IST_LEVEL); | | 469 | KASSERT(type == IST_LEVEL); |
471 | | | 470 | |
472 | KASSERT(evtchn < NR_EVENT_CHANNELS); | | 471 | KASSERT(evtchn < NR_EVENT_CHANNELS); |
473 | #if notyet | | 472 | #if notyet |
474 | evtchn_port_t boundport = idt_vec; | | 473 | evtchn_port_t boundport = idt_vec; |
475 | #endif | | 474 | #endif |
476 | | | 475 | |
477 | KASSERT(pic->pic_type == PIC_XEN); | | 476 | KASSERT(pic->pic_type == PIC_XEN); |
478 | | | 477 | |
479 | xen_atomic_clear_bit(&ci->ci_evtmask[0], evtchn); | | 478 | xen_atomic_clear_bit(&ci->ci_evtmask[0], evtchn); |
480 | } | | 479 | } |
481 | | | 480 | |
482 | /* | | 481 | /* |
483 | * xen_evtchn_trymask(pic, pin) | | 482 | * xen_evtchn_trymask(pic, pin) |
484 | * | | 483 | * |
485 | * If there are interrupts pending on the bus-shared pic, return | | 484 | * If there are interrupts pending on the bus-shared pic, return |
486 | * false. Otherwise, mask interrupts on the bus-shared pic and | | 485 | * false. Otherwise, mask interrupts on the bus-shared pic and |
487 | * return true. | | 486 | * return true. |
488 | */ | | 487 | */ |
489 | static bool | | 488 | static bool |
490 | xen_evtchn_trymask(struct pic *pic, int pin) | | 489 | xen_evtchn_trymask(struct pic *pic, int pin) |
491 | { | | 490 | { |
492 | volatile struct shared_info *s = HYPERVISOR_shared_info; | | 491 | volatile struct shared_info *s = HYPERVISOR_shared_info; |
493 | unsigned long masked __diagused; | | 492 | unsigned long masked __diagused; |
494 | | | 493 | |
495 | /* Mask it. */ | | 494 | /* Mask it. */ |
496 | masked = xen_atomic_test_and_set_bit(&s->evtchn_mask[0], pin); | | 495 | masked = xen_atomic_test_and_set_bit(&s->evtchn_mask[0], pin); |
497 | | | 496 | |
498 | /* | | 497 | /* |
499 | * Caller is responsible for calling trymask only when the | | 498 | * Caller is responsible for calling trymask only when the |
500 | * interrupt pin is not masked, and for serializing calls to | | 499 | * interrupt pin is not masked, and for serializing calls to |
501 | * trymask. | | 500 | * trymask. |
502 | */ | | 501 | */ |
503 | KASSERT(!masked); | | 502 | KASSERT(!masked); |
504 | | | 503 | |
505 | /* | | 504 | /* |
506 | * Check whether there were any interrupts pending when we | | 505 | * Check whether there were any interrupts pending when we |
507 | * masked it. If there were, unmask and abort. | | 506 | * masked it. If there were, unmask and abort. |
508 | */ | | 507 | */ |
509 | if (xen_atomic_test_bit(&s->evtchn_pending[0], pin)) { | | 508 | if (xen_atomic_test_bit(&s->evtchn_pending[0], pin)) { |
510 | xen_atomic_clear_bit(&s->evtchn_mask[0], pin); | | 509 | xen_atomic_clear_bit(&s->evtchn_mask[0], pin); |
511 | return false; | | 510 | return false; |
512 | } | | 511 | } |
513 | | | 512 | |
514 | /* Success: masked, not pending. */ | | 513 | /* Success: masked, not pending. */ |
515 | return true; | | 514 | return true; |
516 | } | | 515 | } |
517 | | | 516 | |
518 | evtchn_port_t | | 517 | evtchn_port_t |
519 | bind_vcpu_to_evtch(cpuid_t vcpu) | | 518 | bind_vcpu_to_evtch(cpuid_t vcpu) |
520 | { | | 519 | { |
521 | evtchn_op_t op; | | 520 | evtchn_op_t op; |
522 | evtchn_port_t evtchn; | | 521 | evtchn_port_t evtchn; |
523 | | | 522 | |
524 | mutex_spin_enter(&evtchn_lock); | | 523 | mutex_spin_enter(&evtchn_lock); |
525 | | | 524 | |
526 | evtchn = vcpu_ipi_to_evtch[vcpu]; | | 525 | evtchn = vcpu_ipi_to_evtch[vcpu]; |
527 | if (evtchn == -1) { | | 526 | if (evtchn == -1) { |
528 | op.cmd = EVTCHNOP_bind_ipi; | | 527 | op.cmd = EVTCHNOP_bind_ipi; |
529 | op.u.bind_ipi.vcpu = (uint32_t) vcpu; | | 528 | op.u.bind_ipi.vcpu = (uint32_t) vcpu; |
530 | if (HYPERVISOR_event_channel_op(&op) != 0) | | 529 | if (HYPERVISOR_event_channel_op(&op) != 0) |
531 | panic("Failed to bind ipi to VCPU %"PRIuCPUID"\n", vcpu); | | 530 | panic("Failed to bind ipi to VCPU %"PRIuCPUID"\n", vcpu); |
532 | evtchn = op.u.bind_ipi.port; | | 531 | evtchn = op.u.bind_ipi.port; |
533 | | | 532 | |
534 | vcpu_ipi_to_evtch[vcpu] = evtchn; | | 533 | vcpu_ipi_to_evtch[vcpu] = evtchn; |
535 | } | | 534 | } |
536 | | | 535 | |
537 | evtch_bindcount[evtchn]++; | | 536 | evtch_bindcount[evtchn]++; |
538 | | | 537 | |
539 | mutex_spin_exit(&evtchn_lock); | | 538 | mutex_spin_exit(&evtchn_lock); |
540 | | | 539 | |
541 | return evtchn; | | 540 | return evtchn; |
542 | } | | 541 | } |
543 | | | 542 | |
544 | int | | 543 | int |
545 | bind_virq_to_evtch(int virq) | | 544 | bind_virq_to_evtch(int virq) |
546 | { | | 545 | { |
547 | evtchn_op_t op; | | 546 | evtchn_op_t op; |
548 | int evtchn; | | 547 | int evtchn; |
549 | | | 548 | |
550 | mutex_spin_enter(&evtchn_lock); | | 549 | mutex_spin_enter(&evtchn_lock); |
551 | | | 550 | |
552 | /* | | 551 | /* |
553 | * XXX: The only per-cpu VIRQ we currently use is VIRQ_TIMER. | | 552 | * XXX: The only per-cpu VIRQ we currently use is VIRQ_TIMER. |
554 | * Please re-visit this implementation when others are used. | | 553 | * Please re-visit this implementation when others are used. |
555 | * Note: VIRQ_DEBUG is special-cased, and not used or bound on APs. | | 554 | * Note: VIRQ_DEBUG is special-cased, and not used or bound on APs. |
556 | * XXX: event->virq/ipi can be unified in a linked-list | | 555 | * XXX: event->virq/ipi can be unified in a linked-list |
557 | * implementation. | | 556 | * implementation. |
558 | */ | | 557 | */ |
559 | struct cpu_info *ci = curcpu(); | | 558 | struct cpu_info *ci = curcpu(); |
560 | | | 559 | |
561 | if (virq == VIRQ_DEBUG && ci != &cpu_info_primary) { | | 560 | if (virq == VIRQ_DEBUG && ci != &cpu_info_primary) { |
562 | mutex_spin_exit(&evtchn_lock); | | 561 | mutex_spin_exit(&evtchn_lock); |
563 | return -1; | | 562 | return -1; |
564 | } | | 563 | } |
565 | | | 564 | |
566 | if (virq == VIRQ_TIMER) { | | 565 | if (virq == VIRQ_TIMER) { |
567 | evtchn = virq_timer_to_evtch[ci->ci_vcpuid]; | | 566 | evtchn = virq_timer_to_evtch[ci->ci_vcpuid]; |
568 | } else { | | 567 | } else { |
569 | evtchn = virq_to_evtch[virq]; | | 568 | evtchn = virq_to_evtch[virq]; |
570 | } | | 569 | } |
571 | | | 570 | |
572 | /* Allocate a channel if there is none already allocated */ | | 571 | /* Allocate a channel if there is none already allocated */ |
573 | if (evtchn == -1) { | | 572 | if (evtchn == -1) { |
574 | op.cmd = EVTCHNOP_bind_virq; | | 573 | op.cmd = EVTCHNOP_bind_virq; |
575 | op.u.bind_virq.virq = virq; | | 574 | op.u.bind_virq.virq = virq; |
576 | op.u.bind_virq.vcpu = ci->ci_vcpuid; | | 575 | op.u.bind_virq.vcpu = ci->ci_vcpuid; |
577 | if (HYPERVISOR_event_channel_op(&op) != 0) | | 576 | if (HYPERVISOR_event_channel_op(&op) != 0) |
578 | panic("Failed to bind virtual IRQ %d\n", virq); | | 577 | panic("Failed to bind virtual IRQ %d\n", virq); |
579 | evtchn = op.u.bind_virq.port; | | 578 | evtchn = op.u.bind_virq.port; |
580 | } | | 579 | } |
581 | | | 580 | |
582 | /* Set event channel */ | | 581 | /* Set event channel */ |
583 | if (virq == VIRQ_TIMER) { | | 582 | if (virq == VIRQ_TIMER) { |
584 | virq_timer_to_evtch[ci->ci_vcpuid] = evtchn; | | 583 | virq_timer_to_evtch[ci->ci_vcpuid] = evtchn; |
585 | } else { | | 584 | } else { |
586 | virq_to_evtch[virq] = evtchn; | | 585 | virq_to_evtch[virq] = evtchn; |
587 | } | | 586 | } |
588 | | | 587 | |
589 | /* Increase ref counter */ | | 588 | /* Increase ref counter */ |
590 | evtch_bindcount[evtchn]++; | | 589 | evtch_bindcount[evtchn]++; |
591 | | | 590 | |
592 | mutex_spin_exit(&evtchn_lock); | | 591 | mutex_spin_exit(&evtchn_lock); |
593 | | | 592 | |
594 | return evtchn; | | 593 | return evtchn; |
595 | } | | 594 | } |
596 | | | 595 | |
597 | int | | 596 | int |
598 | unbind_virq_from_evtch(int virq) | | 597 | unbind_virq_from_evtch(int virq) |
599 | { | | 598 | { |
600 | evtchn_op_t op; | | 599 | evtchn_op_t op; |
601 | int evtchn; | | 600 | int evtchn; |
602 | | | 601 | |
603 | struct cpu_info *ci = curcpu(); | | 602 | struct cpu_info *ci = curcpu(); |
604 | | | 603 | |
605 | if (virq == VIRQ_TIMER) { | | 604 | if (virq == VIRQ_TIMER) { |
606 | evtchn = virq_timer_to_evtch[ci->ci_vcpuid]; | | 605 | evtchn = virq_timer_to_evtch[ci->ci_vcpuid]; |
607 | } | | 606 | } |
608 | else { | | 607 | else { |
609 | evtchn = virq_to_evtch[virq]; | | 608 | evtchn = virq_to_evtch[virq]; |
610 | } | | 609 | } |
611 | | | 610 | |
612 | if (evtchn == -1) { | | 611 | if (evtchn == -1) { |
613 | return -1; | | 612 | return -1; |
614 | } | | 613 | } |
615 | | | 614 | |
616 | mutex_spin_enter(&evtchn_lock); | | 615 | mutex_spin_enter(&evtchn_lock); |
617 | | | 616 | |
618 | evtch_bindcount[evtchn]--; | | 617 | evtch_bindcount[evtchn]--; |
619 | if (evtch_bindcount[evtchn] == 0) { | | 618 | if (evtch_bindcount[evtchn] == 0) { |
620 | op.cmd = EVTCHNOP_close; | | 619 | op.cmd = EVTCHNOP_close; |
621 | op.u.close.port = evtchn; | | 620 | op.u.close.port = evtchn; |
622 | if (HYPERVISOR_event_channel_op(&op) != 0) | | 621 | if (HYPERVISOR_event_channel_op(&op) != 0) |
623 | panic("Failed to unbind virtual IRQ %d\n", virq); | | 622 | panic("Failed to unbind virtual IRQ %d\n", virq); |
624 | | | 623 | |
625 | if (virq == VIRQ_TIMER) { | | 624 | if (virq == VIRQ_TIMER) { |
626 | virq_timer_to_evtch[ci->ci_vcpuid] = -1; | | 625 | virq_timer_to_evtch[ci->ci_vcpuid] = -1; |
627 | } else { | | 626 | } else { |
628 | virq_to_evtch[virq] = -1; | | 627 | virq_to_evtch[virq] = -1; |
629 | } | | 628 | } |
630 | } | | 629 | } |
631 | | | 630 | |
632 | mutex_spin_exit(&evtchn_lock); | | 631 | mutex_spin_exit(&evtchn_lock); |
633 | | | 632 | |
634 | return evtchn; | | 633 | return evtchn; |
635 | } | | 634 | } |
636 | | | 635 | |
637 | #if NPCI > 0 || NISA > 0 | | 636 | #if NPCI > 0 || NISA > 0 |
638 | int | | 637 | int |
639 | get_pirq_to_evtch(int pirq) | | 638 | get_pirq_to_evtch(int pirq) |
640 | { | | 639 | { |
641 | int evtchn; | | 640 | int evtchn; |
642 | | | 641 | |
643 | if (pirq == -1) /* Match previous behaviour */ | | 642 | if (pirq == -1) /* Match previous behaviour */ |
644 | return -1; | | 643 | return -1; |
645 | | | 644 | |
646 | if (pirq >= NR_PIRQS) { | | 645 | if (pirq >= NR_PIRQS) { |
647 | panic("pirq %d out of bound, increase NR_PIRQS", pirq); | | 646 | panic("pirq %d out of bound, increase NR_PIRQS", pirq); |
648 | } | | 647 | } |
649 | mutex_spin_enter(&evtchn_lock); | | 648 | mutex_spin_enter(&evtchn_lock); |
650 | | | 649 | |
651 | evtchn = pirq_to_evtch[pirq]; | | 650 | evtchn = pirq_to_evtch[pirq]; |
652 | | | 651 | |
653 | mutex_spin_exit(&evtchn_lock); | | 652 | mutex_spin_exit(&evtchn_lock); |
654 | | | 653 | |
655 | return evtchn; | | 654 | return evtchn; |
656 | } | | 655 | } |
657 | | | 656 | |
658 | int | | 657 | int |
659 | bind_pirq_to_evtch(int pirq) | | 658 | bind_pirq_to_evtch(int pirq) |
660 | { | | 659 | { |
661 | evtchn_op_t op; | | 660 | evtchn_op_t op; |
662 | int evtchn; | | 661 | int evtchn; |
663 | | | 662 | |
664 | if (pirq >= NR_PIRQS) { | | 663 | if (pirq >= NR_PIRQS) { |
665 | panic("pirq %d out of bound, increase NR_PIRQS", pirq); | | 664 | panic("pirq %d out of bound, increase NR_PIRQS", pirq); |
666 | } | | 665 | } |
667 | | | 666 | |
668 | mutex_spin_enter(&evtchn_lock); | | 667 | mutex_spin_enter(&evtchn_lock); |
669 | | | 668 | |
670 | evtchn = pirq_to_evtch[pirq]; | | 669 | evtchn = pirq_to_evtch[pirq]; |
671 | if (evtchn == -1) { | | 670 | if (evtchn == -1) { |
672 | op.cmd = EVTCHNOP_bind_pirq; | | 671 | op.cmd = EVTCHNOP_bind_pirq; |
673 | op.u.bind_pirq.pirq = pirq; | | 672 | op.u.bind_pirq.pirq = pirq; |
674 | op.u.bind_pirq.flags = BIND_PIRQ__WILL_SHARE; | | 673 | op.u.bind_pirq.flags = BIND_PIRQ__WILL_SHARE; |
675 | if (HYPERVISOR_event_channel_op(&op) != 0) | | 674 | if (HYPERVISOR_event_channel_op(&op) != 0) |
676 | panic("Failed to bind physical IRQ %d\n", pirq); | | 675 | panic("Failed to bind physical IRQ %d\n", pirq); |
677 | evtchn = op.u.bind_pirq.port; | | 676 | evtchn = op.u.bind_pirq.port; |
678 | | | 677 | |
679 | #ifdef IRQ_DEBUG | | 678 | #ifdef IRQ_DEBUG |
680 | printf("pirq %d evtchn %d\n", pirq, evtchn); | | 679 | printf("pirq %d evtchn %d\n", pirq, evtchn); |
681 | #endif | | 680 | #endif |
682 | pirq_to_evtch[pirq] = evtchn; | | 681 | pirq_to_evtch[pirq] = evtchn; |
683 | } | | 682 | } |
684 | | | 683 | |
685 | evtch_bindcount[evtchn]++; | | 684 | evtch_bindcount[evtchn]++; |
686 | | | 685 | |
687 | mutex_spin_exit(&evtchn_lock); | | 686 | mutex_spin_exit(&evtchn_lock); |
688 | | | 687 | |
689 | return evtchn; | | 688 | return evtchn; |
690 | } | | 689 | } |
691 | | | 690 | |
692 | int | | 691 | int |
693 | unbind_pirq_from_evtch(int pirq) | | 692 | unbind_pirq_from_evtch(int pirq) |
694 | { | | 693 | { |
695 | evtchn_op_t op; | | 694 | evtchn_op_t op; |
696 | int evtchn = pirq_to_evtch[pirq]; | | 695 | int evtchn = pirq_to_evtch[pirq]; |
697 | | | 696 | |
698 | mutex_spin_enter(&evtchn_lock); | | 697 | mutex_spin_enter(&evtchn_lock); |
699 | | | 698 | |
700 | evtch_bindcount[evtchn]--; | | 699 | evtch_bindcount[evtchn]--; |
701 | if (evtch_bindcount[evtchn] == 0) { | | 700 | if (evtch_bindcount[evtchn] == 0) { |
702 | op.cmd = EVTCHNOP_close; | | 701 | op.cmd = EVTCHNOP_close; |
703 | op.u.close.port = evtchn; | | 702 | op.u.close.port = evtchn; |
704 | if (HYPERVISOR_event_channel_op(&op) != 0) | | 703 | if (HYPERVISOR_event_channel_op(&op) != 0) |
705 | panic("Failed to unbind physical IRQ %d\n", pirq); | | 704 | panic("Failed to unbind physical IRQ %d\n", pirq); |
706 | | | 705 | |
707 | pirq_to_evtch[pirq] = -1; | | 706 | pirq_to_evtch[pirq] = -1; |
708 | } | | 707 | } |
709 | | | 708 | |
710 | mutex_spin_exit(&evtchn_lock); | | 709 | mutex_spin_exit(&evtchn_lock); |
711 | | | 710 | |
712 | return evtchn; | | 711 | return evtchn; |
713 | } | | 712 | } |
714 | | | 713 | |
715 | struct pintrhand * | | 714 | struct pintrhand * |
716 | pirq_establish(int pirq, int evtch, int (*func)(void *), void *arg, int level, | | 715 | pirq_establish(int pirq, int evtch, int (*func)(void *), void *arg, int level, |
717 | const char *intrname, const char *xname, bool known_mpsafe) | | 716 | const char *intrname, const char *xname, bool known_mpsafe) |
718 | { | | 717 | { |
719 | struct pintrhand *ih; | | 718 | struct pintrhand *ih; |
720 | | | 719 | |
721 | ih = kmem_zalloc(sizeof(struct pintrhand), | | 720 | ih = kmem_zalloc(sizeof(struct pintrhand), |
722 | cold ? KM_NOSLEEP : KM_SLEEP); | | 721 | cold ? KM_NOSLEEP : KM_SLEEP); |
723 | if (ih == NULL) { | | 722 | if (ih == NULL) { |
724 | printf("pirq_establish: can't allocate handler info\n"); | | 723 | printf("pirq_establish: can't allocate handler info\n"); |
725 | return NULL; | | 724 | return NULL; |
726 | } | | 725 | } |
727 | | | 726 | |
728 | KASSERT(evtch > 0); | | 727 | KASSERT(evtch > 0); |
729 | | | 728 | |
730 | ih->pirq = pirq; | | 729 | ih->pirq = pirq; |
731 | ih->evtch = evtch; | | 730 | ih->evtch = evtch; |
732 | ih->func = func; | | 731 | ih->func = func; |
733 | ih->arg = arg; | | 732 | ih->arg = arg; |
734 | | | 733 | |
735 | if (event_set_handler(evtch, pirq_interrupt, ih, level, intrname, | | 734 | if (event_set_handler(evtch, pirq_interrupt, ih, level, intrname, |
736 | xname, known_mpsafe) != 0) { | | 735 | xname, known_mpsafe) != 0) { |
737 | kmem_free(ih, sizeof(struct pintrhand)); | | 736 | kmem_free(ih, sizeof(struct pintrhand)); |
738 | return NULL; | | 737 | return NULL; |
739 | } | | 738 | } |
740 | | | 739 | |
741 | hypervisor_prime_pirq_event(pirq, evtch); | | 740 | hypervisor_prime_pirq_event(pirq, evtch); |
742 | hypervisor_unmask_event(evtch); | | 741 | hypervisor_unmask_event(evtch); |
743 | hypervisor_ack_pirq_event(evtch); | | 742 | hypervisor_ack_pirq_event(evtch); |
744 | return ih; | | 743 | return ih; |
745 | } | | 744 | } |
746 | | | 745 | |
747 | void | | 746 | void |
748 | pirq_disestablish(struct pintrhand *ih) | | 747 | pirq_disestablish(struct pintrhand *ih) |
749 | { | | 748 | { |
750 | int error = event_remove_handler(ih->evtch, pirq_interrupt, ih); | | 749 | int error = event_remove_handler(ih->evtch, pirq_interrupt, ih); |
751 | if (error) { | | 750 | if (error) { |
752 | printf("pirq_disestablish(%p): %d\n", ih, error); | | 751 | printf("pirq_disestablish(%p): %d\n", ih, error); |
753 | return; | | 752 | return; |
754 | } | | 753 | } |
755 | kmem_free(ih, sizeof(struct pintrhand)); | | 754 | kmem_free(ih, sizeof(struct pintrhand)); |
756 | } | | 755 | } |
757 | | | 756 | |
758 | int | | 757 | int |
759 | pirq_interrupt(void *arg) | | 758 | pirq_interrupt(void *arg) |
760 | { | | 759 | { |
761 | struct pintrhand *ih = arg; | | 760 | struct pintrhand *ih = arg; |
762 | int ret; | | 761 | int ret; |
763 | | | 762 | |
764 | ret = ih->func(ih->arg); | | 763 | ret = ih->func(ih->arg); |
765 | #ifdef IRQ_DEBUG | | 764 | #ifdef IRQ_DEBUG |
766 | if (ih->evtch == IRQ_DEBUG) | | 765 | if (ih->evtch == IRQ_DEBUG) |
767 | printf("pirq_interrupt irq %d ret %d\n", ih->pirq, ret); | | 766 | printf("pirq_interrupt irq %d ret %d\n", ih->pirq, ret); |
768 | #endif | | 767 | #endif |
769 | return ret; | | 768 | return ret; |
770 | } | | 769 | } |
771 | | | 770 | |
772 | #endif /* NPCI > 0 || NISA > 0 */ | | 771 | #endif /* NPCI > 0 || NISA > 0 */ |
773 | | | 772 | |
774 | | | 773 | |
775 | /* | | 774 | /* |
776 | * Recalculate the interrupt from scratch for an event source. | | 775 | * Recalculate the interrupt from scratch for an event source. |
777 | */ | | 776 | */ |
778 | static void | | 777 | static void |
779 | intr_calculatemasks(struct evtsource *evts, int evtch, struct cpu_info *ci) | | 778 | intr_calculatemasks(struct evtsource *evts, int evtch, struct cpu_info *ci) |
780 | { | | 779 | { |
781 | struct intrhand *ih; | | 780 | struct intrhand *ih; |
782 | int cpu_receive = 0; | | 781 | int cpu_receive = 0; |
783 | | | 782 | |
784 | #ifdef MULTIPROCESSOR | | 783 | #ifdef MULTIPROCESSOR |
785 | KASSERT(!mutex_owned(&evtlock[evtch])); | | 784 | KASSERT(!mutex_owned(&evtlock[evtch])); |
786 | #endif | | 785 | #endif |
787 | mutex_spin_enter(&evtlock[evtch]); | | 786 | mutex_spin_enter(&evtlock[evtch]); |
788 | evts->ev_maxlevel = IPL_NONE; | | 787 | evts->ev_maxlevel = IPL_NONE; |
789 | evts->ev_imask = 0; | | 788 | evts->ev_imask = 0; |
790 | for (ih = evts->ev_handlers; ih != NULL; ih = ih->ih_evt_next) { | | 789 | for (ih = evts->ev_handlers; ih != NULL; ih = ih->ih_evt_next) { |
791 | if (ih->ih_level > evts->ev_maxlevel) | | 790 | if (ih->ih_level > evts->ev_maxlevel) |
792 | evts->ev_maxlevel = ih->ih_level; | | 791 | evts->ev_maxlevel = ih->ih_level; |
793 | evts->ev_imask |= (1 << XEN_IPL2SIR(ih->ih_level)); | | 792 | evts->ev_imask |= (1 << XEN_IPL2SIR(ih->ih_level)); |
794 | if (ih->ih_cpu == ci) | | 793 | if (ih->ih_cpu == ci) |
795 | cpu_receive = 1; | | 794 | cpu_receive = 1; |
796 | } | | 795 | } |
797 | if (cpu_receive) | | 796 | if (cpu_receive) |
798 | xen_atomic_set_bit(&curcpu()->ci_evtmask[0], evtch); | | 797 | xen_atomic_set_bit(&curcpu()->ci_evtmask[0], evtch); |
799 | else | | 798 | else |
800 | xen_atomic_clear_bit(&curcpu()->ci_evtmask[0], evtch); | | 799 | xen_atomic_clear_bit(&curcpu()->ci_evtmask[0], evtch); |
801 | mutex_spin_exit(&evtlock[evtch]); | | 800 | mutex_spin_exit(&evtlock[evtch]); |
802 | } | | 801 | } |
803 | | | 802 | |
804 | int | | 803 | int |
805 | event_set_handler(int evtch, int (*func)(void *), void *arg, int level, | | 804 | event_set_handler(int evtch, int (*func)(void *), void *arg, int level, |
806 | const char *intrname, const char *xname, bool mpsafe) | | 805 | const char *intrname, const char *xname, bool mpsafe) |
807 | { | | 806 | { |
808 | struct cpu_info *ci = curcpu(); /* XXX: pass in ci ? */ | | 807 | struct cpu_info *ci = curcpu(); /* XXX: pass in ci ? */ |
809 | struct evtsource *evts; | | 808 | struct evtsource *evts; |
810 | struct intrhand *ih, **ihp; | | 809 | struct intrhand *ih, **ihp; |
811 | int s; | | 810 | int s; |
812 | | | 811 | |
813 | #ifdef IRQ_DEBUG | | 812 | #ifdef IRQ_DEBUG |
814 | printf("event_set_handler IRQ %d handler %p\n", evtch, func); | | 813 | printf("event_set_handler IRQ %d handler %p\n", evtch, func); |
815 | #endif | | 814 | #endif |
816 | | | 815 | |
817 | KASSERTMSG(evtch >= 0, "negative evtch: %d", evtch); | | 816 | KASSERTMSG(evtch >= 0, "negative evtch: %d", evtch); |
818 | KASSERTMSG(evtch < NR_EVENT_CHANNELS, | | 817 | KASSERTMSG(evtch < NR_EVENT_CHANNELS, |
819 | "evtch number %d > NR_EVENT_CHANNELS", evtch); | | 818 | "evtch number %d > NR_EVENT_CHANNELS", evtch); |
820 | KASSERT(intrname != NULL && xname != NULL); | | 819 | KASSERT(intrname != NULL && xname != NULL); |
821 | | | 820 | |
822 | #if 0 | | 821 | #if 0 |
823 | printf("event_set_handler evtch %d handler %p level %d\n", evtch, | | 822 | printf("event_set_handler evtch %d handler %p level %d\n", evtch, |
824 | handler, level); | | 823 | handler, level); |
825 | #endif | | 824 | #endif |
826 | ih = kmem_zalloc(sizeof (struct intrhand), KM_NOSLEEP); | | 825 | ih = kmem_zalloc(sizeof (struct intrhand), KM_NOSLEEP); |
827 | if (ih == NULL) | | 826 | if (ih == NULL) |
828 | panic("can't allocate fixed interrupt source"); | | 827 | panic("can't allocate fixed interrupt source"); |
829 | | | 828 | |
830 | | | 829 | |
831 | ih->ih_level = level; | | 830 | ih->ih_level = level; |
832 | ih->ih_fun = ih->ih_realfun = func; | | 831 | ih->ih_fun = ih->ih_realfun = func; |
833 | ih->ih_arg = ih->ih_realarg = arg; | | 832 | ih->ih_arg = ih->ih_realarg = arg; |
834 | ih->ih_evt_next = NULL; | | 833 | ih->ih_evt_next = NULL; |
835 | ih->ih_next = NULL; | | 834 | ih->ih_next = NULL; |
836 | ih->ih_cpu = ci; | | 835 | ih->ih_cpu = ci; |
837 | #ifdef MULTIPROCESSOR | | 836 | #ifdef MULTIPROCESSOR |
838 | if (!mpsafe) { | | 837 | if (!mpsafe) { |
839 | ih->ih_fun = xen_intr_biglock_wrapper; | | 838 | ih->ih_fun = xen_intr_biglock_wrapper; |
840 | ih->ih_arg = ih; | | 839 | ih->ih_arg = ih; |
841 | } | | 840 | } |
842 | #endif /* MULTIPROCESSOR */ | | 841 | #endif /* MULTIPROCESSOR */ |
843 | | | 842 | |
844 | s = splhigh(); | | 843 | s = splhigh(); |
845 | | | 844 | |
846 | /* register per-cpu handler for spllower() */ | | 845 | /* register per-cpu handler for spllower() */ |
847 | event_set_iplhandler(ci, ih, level); | | 846 | event_set_iplhandler(ci, ih, level); |
848 | | | 847 | |
849 | /* register handler for event channel */ | | 848 | /* register handler for event channel */ |
850 | if (evtsource[evtch] == NULL) { | | 849 | if (evtsource[evtch] == NULL) { |
851 | evts = kmem_zalloc(sizeof (struct evtsource), | | 850 | evts = kmem_zalloc(sizeof (struct evtsource), |
852 | KM_NOSLEEP); | | 851 | KM_NOSLEEP); |
853 | if (evts == NULL) | | 852 | if (evts == NULL) |
854 | panic("can't allocate fixed interrupt source"); | | 853 | panic("can't allocate fixed interrupt source"); |
855 | | | 854 | |
856 | evts->ev_handlers = ih; | | 855 | evts->ev_handlers = ih; |
857 | /* | | 856 | /* |
858 | * XXX: We're assuming here that ci is the same cpu as | | 857 | * XXX: We're assuming here that ci is the same cpu as |
859 | * the one on which this event/port is bound on. The | | 858 | * the one on which this event/port is bound on. The |
860 | * api needs to be reshuffled so that this assumption | | 859 | * api needs to be reshuffled so that this assumption |
861 | * is more explicitly implemented. | | 860 | * is more explicitly implemented. |
862 | */ | | 861 | */ |
863 | evts->ev_cpu = ci; | | 862 | evts->ev_cpu = ci; |
864 | mutex_init(&evtlock[evtch], MUTEX_DEFAULT, IPL_HIGH); | | 863 | mutex_init(&evtlock[evtch], MUTEX_DEFAULT, IPL_HIGH); |
865 | evtsource[evtch] = evts; | | 864 | evtsource[evtch] = evts; |
866 | strlcpy(evts->ev_intrname, intrname, sizeof(evts->ev_intrname)); | | 865 | strlcpy(evts->ev_intrname, intrname, sizeof(evts->ev_intrname)); |
867 | | | 866 | |
868 | evcnt_attach_dynamic(&evts->ev_evcnt, EVCNT_TYPE_INTR, NULL, | | 867 | evcnt_attach_dynamic(&evts->ev_evcnt, EVCNT_TYPE_INTR, NULL, |
869 | device_xname(ci->ci_dev), evts->ev_intrname); | | 868 | device_xname(ci->ci_dev), evts->ev_intrname); |
870 | } else { | | 869 | } else { |
871 | evts = evtsource[evtch]; | | 870 | evts = evtsource[evtch]; |
872 | /* sort by IPL order, higher first */ | | 871 | /* sort by IPL order, higher first */ |
873 | mutex_spin_enter(&evtlock[evtch]); | | 872 | mutex_spin_enter(&evtlock[evtch]); |
874 | for (ihp = &evts->ev_handlers; ; ihp = &((*ihp)->ih_evt_next)) { | | 873 | for (ihp = &evts->ev_handlers; ; ihp = &((*ihp)->ih_evt_next)) { |
875 | if ((*ihp)->ih_level < ih->ih_level) { | | 874 | if ((*ihp)->ih_level < ih->ih_level) { |
876 | /* insert before *ihp */ | | 875 | /* insert before *ihp */ |
877 | ih->ih_evt_next = *ihp; | | 876 | ih->ih_evt_next = *ihp; |
878 | *ihp = ih; | | 877 | *ihp = ih; |
879 | break; | | 878 | break; |
880 | } | | 879 | } |
881 | if ((*ihp)->ih_evt_next == NULL) { | | 880 | if ((*ihp)->ih_evt_next == NULL) { |
882 | (*ihp)->ih_evt_next = ih; | | 881 | (*ihp)->ih_evt_next = ih; |
883 | break; | | 882 | break; |
884 | } | | 883 | } |
885 | } | | 884 | } |
886 | mutex_spin_exit(&evtlock[evtch]); | | 885 | mutex_spin_exit(&evtlock[evtch]); |
887 | } | | 886 | } |
888 | | | 887 | |
889 | | | 888 | |
890 | // append device name | | 889 | // append device name |
891 | if (evts->ev_xname[0] != '\0') | | 890 | if (evts->ev_xname[0] != '\0') |
892 | strlcat(evts->ev_xname, ", ", sizeof(evts->ev_xname)); | | 891 | strlcat(evts->ev_xname, ", ", sizeof(evts->ev_xname)); |
893 | strlcat(evts->ev_xname, xname, sizeof(evts->ev_xname)); | | 892 | strlcat(evts->ev_xname, xname, sizeof(evts->ev_xname)); |
894 | | | 893 | |
895 | intr_calculatemasks(evts, evtch, ci); | | 894 | intr_calculatemasks(evts, evtch, ci); |
896 | splx(s); | | 895 | splx(s); |
897 | | | 896 | |
898 | return 0; | | 897 | return 0; |
899 | } | | 898 | } |
900 | | | 899 | |
901 | void | | 900 | void |
902 | event_set_iplhandler(struct cpu_info *ci, | | 901 | event_set_iplhandler(struct cpu_info *ci, |
903 | struct intrhand *ih, | | 902 | struct intrhand *ih, |
904 | int level) | | 903 | int level) |
905 | { | | 904 | { |
906 | struct intrsource *ipls; | | 905 | struct intrsource *ipls; |
907 | int sir = XEN_IPL2SIR(level); | | 906 | int sir = XEN_IPL2SIR(level); |
908 | KASSERT(sir >= SIR_XENIPL_VM && sir <= SIR_XENIPL_HIGH); | | 907 | KASSERT(sir >= SIR_XENIPL_VM && sir <= SIR_XENIPL_HIGH); |
909 | | | 908 | |
910 | KASSERT(ci == ih->ih_cpu); | | 909 | KASSERT(ci == ih->ih_cpu); |
911 | if (ci->ci_isources[sir] == NULL) { | | 910 | if (ci->ci_isources[sir] == NULL) { |
912 | ipls = kmem_zalloc(sizeof (struct intrsource), | | 911 | ipls = kmem_zalloc(sizeof (struct intrsource), |
913 | KM_NOSLEEP); | | 912 | KM_NOSLEEP); |
914 | if (ipls == NULL) | | 913 | if (ipls == NULL) |
915 | panic("can't allocate fixed interrupt source"); | | 914 | panic("can't allocate fixed interrupt source"); |
916 | ipls->is_recurse = xenev_stubs[level - IPL_VM].ist_recurse; | | 915 | ipls->is_recurse = xenev_stubs[level - IPL_VM].ist_recurse; |
917 | ipls->is_resume = xenev_stubs[level - IPL_VM].ist_resume; | | 916 | ipls->is_resume = xenev_stubs[level - IPL_VM].ist_resume; |
918 | ipls->is_handlers = ih; | | 917 | ipls->is_handlers = ih; |
919 | ipls->is_maxlevel = level; | | 918 | ipls->is_maxlevel = level; |
920 | ipls->is_pic = &xen_pic; | | 919 | ipls->is_pic = &xen_pic; |
921 | ci->ci_isources[sir] = ipls; | | 920 | ci->ci_isources[sir] = ipls; |
922 | x86_intr_calculatemasks(ci); | | 921 | x86_intr_calculatemasks(ci); |
923 | } else { | | 922 | } else { |
924 | ipls = ci->ci_isources[sir]; | | 923 | ipls = ci->ci_isources[sir]; |
925 | ih->ih_next = ipls->is_handlers; | | 924 | ih->ih_next = ipls->is_handlers; |
926 | ipls->is_handlers = ih; | | 925 | ipls->is_handlers = ih; |
927 | } | | 926 | } |
928 | } | | 927 | } |
929 | | | 928 | |
930 | int | | 929 | int |
931 | event_remove_handler(int evtch, int (*func)(void *), void *arg) | | 930 | event_remove_handler(int evtch, int (*func)(void *), void *arg) |
932 | { | | 931 | { |
933 | struct intrsource *ipls; | | 932 | struct intrsource *ipls; |
934 | struct evtsource *evts; | | 933 | struct evtsource *evts; |
935 | struct intrhand *ih; | | 934 | struct intrhand *ih; |
936 | struct intrhand **ihp; | | 935 | struct intrhand **ihp; |
937 | struct cpu_info *ci; | | 936 | struct cpu_info *ci; |
938 | | | 937 | |
939 | evts = evtsource[evtch]; | | 938 | evts = evtsource[evtch]; |
940 | if (evts == NULL) | | 939 | if (evts == NULL) |
941 | return ENOENT; | | 940 | return ENOENT; |
942 | | | 941 | |
943 | mutex_spin_enter(&evtlock[evtch]); | | 942 | mutex_spin_enter(&evtlock[evtch]); |
944 | for (ihp = &evts->ev_handlers, ih = evts->ev_handlers; | | 943 | for (ihp = &evts->ev_handlers, ih = evts->ev_handlers; |
945 | ih != NULL; | | 944 | ih != NULL; |
946 | ihp = &ih->ih_evt_next, ih = ih->ih_evt_next) { | | 945 | ihp = &ih->ih_evt_next, ih = ih->ih_evt_next) { |
947 | if (ih->ih_realfun == func && ih->ih_realarg == arg) | | 946 | if (ih->ih_realfun == func && ih->ih_realarg == arg) |
948 | break; | | 947 | break; |
949 | } | | 948 | } |
950 | if (ih == NULL) { | | 949 | if (ih == NULL) { |
951 | mutex_spin_exit(&evtlock[evtch]); | | 950 | mutex_spin_exit(&evtlock[evtch]); |
952 | return ENOENT; | | 951 | return ENOENT; |
953 | } | | 952 | } |
954 | ci = ih->ih_cpu; | | 953 | ci = ih->ih_cpu; |
955 | *ihp = ih->ih_evt_next; | | 954 | *ihp = ih->ih_evt_next; |
956 | | | 955 | |
957 | int sir = XEN_IPL2SIR(ih->ih_level); | | 956 | int sir = XEN_IPL2SIR(ih->ih_level); |
958 | KASSERT(sir >= SIR_XENIPL_VM && sir <= SIR_XENIPL_HIGH); | | 957 | KASSERT(sir >= SIR_XENIPL_VM && sir <= SIR_XENIPL_HIGH); |
959 | ipls = ci->ci_isources[sir]; | | 958 | ipls = ci->ci_isources[sir]; |
960 | for (ihp = &ipls->is_handlers, ih = ipls->is_handlers; | | 959 | for (ihp = &ipls->is_handlers, ih = ipls->is_handlers; |
961 | ih != NULL; | | 960 | ih != NULL; |
962 | ihp = &ih->ih_next, ih = ih->ih_next) { | | 961 | ihp = &ih->ih_next, ih = ih->ih_next) { |
963 | if (ih->ih_realfun == func && ih->ih_realarg == arg) | | 962 | if (ih->ih_realfun == func && ih->ih_realarg == arg) |
964 | break; | | 963 | break; |
965 | } | | 964 | } |
966 | if (ih == NULL) | | 965 | if (ih == NULL) |
967 | panic("event_remove_handler"); | | 966 | panic("event_remove_handler"); |
968 | *ihp = ih->ih_next; | | 967 | *ihp = ih->ih_next; |
969 | mutex_spin_exit(&evtlock[evtch]); | | 968 | mutex_spin_exit(&evtlock[evtch]); |
970 | kmem_free(ih, sizeof (struct intrhand)); | | 969 | kmem_free(ih, sizeof (struct intrhand)); |
971 | if (evts->ev_handlers == NULL) { | | 970 | if (evts->ev_handlers == NULL) { |
972 | xen_atomic_clear_bit(&ci->ci_evtmask[0], evtch); | | 971 | xen_atomic_clear_bit(&ci->ci_evtmask[0], evtch); |
973 | evcnt_detach(&evts->ev_evcnt); | | 972 | evcnt_detach(&evts->ev_evcnt); |
974 | kmem_free(evts, sizeof (struct evtsource)); | | 973 | kmem_free(evts, sizeof (struct evtsource)); |
975 | evtsource[evtch] = NULL; | | 974 | evtsource[evtch] = NULL; |
976 | } else { | | 975 | } else { |
977 | intr_calculatemasks(evts, evtch, ci); | | 976 | intr_calculatemasks(evts, evtch, ci); |
978 | } | | 977 | } |
979 | return 0; | | 978 | return 0; |
980 | } | | 979 | } |
981 | | | 980 | |
982 | #if NPCI > 0 || NISA > 0 | | 981 | #if NPCI > 0 || NISA > 0 |
983 | void | | 982 | void |
984 | hypervisor_prime_pirq_event(int pirq, unsigned int evtch) | | 983 | hypervisor_prime_pirq_event(int pirq, unsigned int evtch) |
985 | { | | 984 | { |
986 | physdev_op_t physdev_op; | | 985 | physdev_op_t physdev_op; |
987 | physdev_op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY; | | 986 | physdev_op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY; |
988 | physdev_op.u.irq_status_query.irq = pirq; | | 987 | physdev_op.u.irq_status_query.irq = pirq; |
989 | if (HYPERVISOR_physdev_op(&physdev_op) < 0) | | 988 | if (HYPERVISOR_physdev_op(&physdev_op) < 0) |
990 | panic("HYPERVISOR_physdev_op(PHYSDEVOP_IRQ_STATUS_QUERY)"); | | 989 | panic("HYPERVISOR_physdev_op(PHYSDEVOP_IRQ_STATUS_QUERY)"); |
991 | if (physdev_op.u.irq_status_query.flags & | | 990 | if (physdev_op.u.irq_status_query.flags & |
992 | PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY) { | | 991 | PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY) { |
993 | pirq_needs_unmask_notify[evtch >> 5] |= (1 << (evtch & 0x1f)); | | 992 | pirq_needs_unmask_notify[evtch >> 5] |= (1 << (evtch & 0x1f)); |
994 | #ifdef IRQ_DEBUG | | 993 | #ifdef IRQ_DEBUG |
995 | printf("pirq %d needs notify\n", pirq); | | 994 | printf("pirq %d needs notify\n", pirq); |
996 | #endif | | 995 | #endif |
997 | } | | 996 | } |
998 | } | | 997 | } |
999 | | | 998 | |
1000 | void | | 999 | void |
1001 | hypervisor_ack_pirq_event(unsigned int evtch) | | 1000 | hypervisor_ack_pirq_event(unsigned int evtch) |
1002 | { | | 1001 | { |
1003 | #ifdef IRQ_DEBUG | | 1002 | #ifdef IRQ_DEBUG |
1004 | if (evtch == IRQ_DEBUG) | | 1003 | if (evtch == IRQ_DEBUG) |
1005 | printf("%s: evtch %d\n", __func__, evtch); | | 1004 | printf("%s: evtch %d\n", __func__, evtch); |
1006 | #endif | | 1005 | #endif |
1007 | | | 1006 | |
1008 | if (pirq_needs_unmask_notify[evtch >> 5] & (1 << (evtch & 0x1f))) { | | 1007 | if (pirq_needs_unmask_notify[evtch >> 5] & (1 << (evtch & 0x1f))) { |
1009 | #ifdef IRQ_DEBUG | | 1008 | #ifdef IRQ_DEBUG |
1010 | if (evtch == IRQ_DEBUG) | | 1009 | if (evtch == IRQ_DEBUG) |
1011 | printf("pirq_notify(%d)\n", evtch); | | 1010 | printf("pirq_notify(%d)\n", evtch); |
1012 | #endif | | 1011 | #endif |
1013 | (void)HYPERVISOR_physdev_op(&physdev_op_notify); | | 1012 | (void)HYPERVISOR_physdev_op(&physdev_op_notify); |
1014 | } | | 1013 | } |
1015 | } | | 1014 | } |
1016 | #endif /* NPCI > 0 || NISA > 0 */ | | 1015 | #endif /* NPCI > 0 || NISA > 0 */ |
1017 | | | 1016 | |
1018 | int | | 1017 | int |
1019 | xen_debug_handler(void *arg) | | 1018 | xen_debug_handler(void *arg) |
1020 | { | | 1019 | { |
1021 | struct cpu_info *ci = curcpu(); | | 1020 | struct cpu_info *ci = curcpu(); |
1022 | int i; | | 1021 | int i; |
1023 | int xci_ilevel = ci->ci_ilevel; | | 1022 | int xci_ilevel = ci->ci_ilevel; |
1024 | int xci_ipending = ci->ci_ipending; | | 1023 | int xci_ipending = ci->ci_ipending; |
1025 | int xci_idepth = ci->ci_idepth; | | 1024 | int xci_idepth = ci->ci_idepth; |
1026 | u_long upcall_pending = ci->ci_vcpu->evtchn_upcall_pending; | | 1025 | u_long upcall_pending = ci->ci_vcpu->evtchn_upcall_pending; |
1027 | u_long upcall_mask = ci->ci_vcpu->evtchn_upcall_mask; | | 1026 | u_long upcall_mask = ci->ci_vcpu->evtchn_upcall_mask; |
1028 | u_long pending_sel = ci->ci_vcpu->evtchn_pending_sel; | | 1027 | u_long pending_sel = ci->ci_vcpu->evtchn_pending_sel; |
1029 | unsigned long evtchn_mask[sizeof(unsigned long) * 8]; | | 1028 | unsigned long evtchn_mask[sizeof(unsigned long) * 8]; |
1030 | unsigned long evtchn_pending[sizeof(unsigned long) * 8]; | | 1029 | unsigned long evtchn_pending[sizeof(unsigned long) * 8]; |
1031 | | | 1030 | |
1032 | u_long p; | | 1031 | u_long p; |
1033 | | | 1032 | |
1034 | p = (u_long)&HYPERVISOR_shared_info->evtchn_mask[0]; | | 1033 | p = (u_long)&HYPERVISOR_shared_info->evtchn_mask[0]; |
1035 | memcpy(evtchn_mask, (void *)p, sizeof(evtchn_mask)); | | 1034 | memcpy(evtchn_mask, (void *)p, sizeof(evtchn_mask)); |
1036 | p = (u_long)&HYPERVISOR_shared_info->evtchn_pending[0]; | | 1035 | p = (u_long)&HYPERVISOR_shared_info->evtchn_pending[0]; |
1037 | memcpy(evtchn_pending, (void *)p, sizeof(evtchn_pending)); | | 1036 | memcpy(evtchn_pending, (void *)p, sizeof(evtchn_pending)); |
1038 | | | 1037 | |
1039 | __insn_barrier(); | | 1038 | __insn_barrier(); |
1040 | printf("debug event\n"); | | 1039 | printf("debug event\n"); |
1041 | printf("ci_ilevel 0x%x ci_ipending 0x%x ci_idepth %d\n", | | 1040 | printf("ci_ilevel 0x%x ci_ipending 0x%x ci_idepth %d\n", |
1042 | xci_ilevel, xci_ipending, xci_idepth); | | 1041 | xci_ilevel, xci_ipending, xci_idepth); |
1043 | printf("evtchn_upcall_pending %ld evtchn_upcall_mask %ld" | | 1042 | printf("evtchn_upcall_pending %ld evtchn_upcall_mask %ld" |
1044 | " evtchn_pending_sel 0x%lx\n", | | 1043 | " evtchn_pending_sel 0x%lx\n", |
1045 | upcall_pending, upcall_mask, pending_sel); | | 1044 | upcall_pending, upcall_mask, pending_sel); |
1046 | printf("evtchn_mask"); | | 1045 | printf("evtchn_mask"); |
1047 | for (i = 0 ; i <= LONG_MASK; i++) | | 1046 | for (i = 0 ; i <= LONG_MASK; i++) |
1048 | printf(" %lx", (u_long)evtchn_mask[i]); | | 1047 | printf(" %lx", (u_long)evtchn_mask[i]); |
1049 | printf("\n"); | | 1048 | printf("\n"); |
1050 | printf("evtchn_pending"); | | 1049 | printf("evtchn_pending"); |
1051 | for (i = 0 ; i <= LONG_MASK; i++) | | 1050 | for (i = 0 ; i <= LONG_MASK; i++) |
1052 | printf(" %lx", (u_long)evtchn_pending[i]); | | 1051 | printf(" %lx", (u_long)evtchn_pending[i]); |
1053 | printf("\n"); | | 1052 | printf("\n"); |
1054 | return 0; | | 1053 | return 0; |
1055 | } | | 1054 | } |
1056 | | | 1055 | |
1057 | #ifdef XENPV | | 1056 | #ifdef XENPV |
1058 | static struct evtsource * | | 1057 | static struct evtsource * |
1059 | event_get_handler(const char *intrid) | | 1058 | event_get_handler(const char *intrid) |
1060 | { | | 1059 | { |
1061 | for (int i = 0; i < NR_EVENT_CHANNELS; i++) { | | 1060 | for (int i = 0; i < NR_EVENT_CHANNELS; i++) { |
1062 | if (evtsource[i] == NULL || i == debug_port) | | 1061 | if (evtsource[i] == NULL || i == debug_port) |
1063 | continue; | | 1062 | continue; |
1064 | | | 1063 | |
1065 | struct evtsource *evp = evtsource[i]; | | 1064 | struct evtsource *evp = evtsource[i]; |
1066 | | | 1065 | |
1067 | if (strcmp(evp->ev_intrname, intrid) == 0) | | 1066 | if (strcmp(evp->ev_intrname, intrid) == 0) |
1068 | return evp; | | 1067 | return evp; |
1069 | } | | 1068 | } |
1070 | | | 1069 | |
1071 | return NULL; | | 1070 | return NULL; |
1072 | } | | 1071 | } |
1073 | | | 1072 | |
1074 | /* | | 1073 | /* |
1075 | * MI interface for subr_interrupt.c | | 1074 | * MI interface for subr_interrupt.c |
1076 | */ | | 1075 | */ |
1077 | uint64_t | | 1076 | uint64_t |
1078 | interrupt_get_count(const char *intrid, u_int cpu_idx) | | 1077 | interrupt_get_count(const char *intrid, u_int cpu_idx) |
1079 | { | | 1078 | { |
1080 | int count = 0; | | 1079 | int count = 0; |
1081 | struct evtsource *evp; | | 1080 | struct evtsource *evp; |
1082 | | | 1081 | |
1083 | mutex_spin_enter(&evtchn_lock); | | 1082 | mutex_spin_enter(&evtchn_lock); |
1084 | | | 1083 | |
1085 | evp = event_get_handler(intrid); | | 1084 | evp = event_get_handler(intrid); |
1086 | if (evp != NULL && cpu_idx == cpu_index(evp->ev_cpu)) | | 1085 | if (evp != NULL && cpu_idx == cpu_index(evp->ev_cpu)) |
1087 | count = evp->ev_evcnt.ev_count; | | 1086 | count = evp->ev_evcnt.ev_count; |
1088 | | | 1087 | |
1089 | mutex_spin_exit(&evtchn_lock); | | 1088 | mutex_spin_exit(&evtchn_lock); |
1090 | | | 1089 | |
1091 | return count; | | 1090 | return count; |
1092 | } | | 1091 | } |
1093 | | | 1092 | |
1094 | /* | | 1093 | /* |
1095 | * MI interface for subr_interrupt.c | | 1094 | * MI interface for subr_interrupt.c |
1096 | */ | | 1095 | */ |
1097 | void | | 1096 | void |
1098 | interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset) | | 1097 | interrupt_get_assigned(const char *intrid, kcpuset_t *cpuset) |
1099 | { | | 1098 | { |
1100 | struct evtsource *evp; | | 1099 | struct evtsource *evp; |
1101 | | | 1100 | |
1102 | kcpuset_zero(cpuset); | | 1101 | kcpuset_zero(cpuset); |
1103 | | | 1102 | |
1104 | mutex_spin_enter(&evtchn_lock); | | 1103 | mutex_spin_enter(&evtchn_lock); |
1105 | | | 1104 | |
1106 | evp = event_get_handler(intrid); | | 1105 | evp = event_get_handler(intrid); |
1107 | if (evp != NULL) | | 1106 | if (evp != NULL) |
1108 | kcpuset_set(cpuset, cpu_index(evp->ev_cpu)); | | 1107 | kcpuset_set(cpuset, cpu_index(evp->ev_cpu)); |
1109 | | | 1108 | |
1110 | mutex_spin_exit(&evtchn_lock); | | 1109 | mutex_spin_exit(&evtchn_lock); |
1111 | } | | 1110 | } |
1112 | | | 1111 | |
1113 | /* | | 1112 | /* |
1114 | * MI interface for subr_interrupt.c | | 1113 | * MI interface for subr_interrupt.c |
1115 | */ | | 1114 | */ |
1116 | void | | 1115 | void |
1117 | interrupt_get_devname(const char *intrid, char *buf, size_t len) | | 1116 | interrupt_get_devname(const char *intrid, char *buf, size_t len) |
1118 | { | | 1117 | { |
1119 | struct evtsource *evp; | | 1118 | struct evtsource *evp; |
1120 | | | 1119 | |
1121 | mutex_spin_enter(&evtchn_lock); | | 1120 | mutex_spin_enter(&evtchn_lock); |
1122 | | | 1121 | |
1123 | evp = event_get_handler(intrid); | | 1122 | evp = event_get_handler(intrid); |
1124 | strlcpy(buf, evp ? evp->ev_xname : "unknown", len); | | 1123 | strlcpy(buf, evp ? evp->ev_xname : "unknown", len); |
1125 | | | 1124 | |
1126 | mutex_spin_exit(&evtchn_lock); | | 1125 | mutex_spin_exit(&evtchn_lock); |
1127 | } | | 1126 | } |
1128 | | | 1127 | |
1129 | /* | | 1128 | /* |
1130 | * MI interface for subr_interrupt. | | 1129 | * MI interface for subr_interrupt. |
1131 | */ | | 1130 | */ |
1132 | struct intrids_handler * | | 1131 | struct intrids_handler * |
1133 | interrupt_construct_intrids(const kcpuset_t *cpuset) | | 1132 | interrupt_construct_intrids(const kcpuset_t *cpuset) |
1134 | { | | 1133 | { |
1135 | struct intrids_handler *ii_handler; | | 1134 | struct intrids_handler *ii_handler; |
1136 | intrid_t *ids; | | 1135 | intrid_t *ids; |
1137 | int i, count, off; | | 1136 | int i, count, off; |
1138 | struct evtsource *evp; | | 1137 | struct evtsource *evp; |
1139 | | | 1138 | |
1140 | if (kcpuset_iszero(cpuset)) | | 1139 | if (kcpuset_iszero(cpuset)) |
1141 | return 0; | | 1140 | return 0; |
1142 | | | 1141 | |
1143 | /* | | 1142 | /* |
1144 | * Count the number of interrupts which affinity to any cpu of "cpuset". | | 1143 | * Count the number of interrupts which affinity to any cpu of "cpuset". |
1145 | */ | | 1144 | */ |
1146 | count = 0; | | 1145 | count = 0; |
1147 | for (i = 0; i < NR_EVENT_CHANNELS; i++) { | | 1146 | for (i = 0; i < NR_EVENT_CHANNELS; i++) { |
1148 | evp = evtsource[i]; | | 1147 | evp = evtsource[i]; |
1149 | | | 1148 | |
1150 | if (evp == NULL || i == debug_port) | | 1149 | if (evp == NULL || i == debug_port) |
1151 | continue; | | 1150 | continue; |
1152 | | | 1151 | |
1153 | if (!kcpuset_isset(cpuset, cpu_index(evp->ev_cpu))) | | 1152 | if (!kcpuset_isset(cpuset, cpu_index(evp->ev_cpu))) |
1154 | continue; | | 1153 | continue; |
1155 | | | 1154 | |
1156 | count++; | | 1155 | count++; |
1157 | } | | 1156 | } |
1158 | | | 1157 | |
1159 | ii_handler = kmem_zalloc(sizeof(int) + sizeof(intrid_t) * count, | | 1158 | ii_handler = kmem_zalloc(sizeof(int) + sizeof(intrid_t) * count, |
1160 | KM_SLEEP); | | 1159 | KM_SLEEP); |
1161 | if (ii_handler == NULL) | | 1160 | if (ii_handler == NULL) |
1162 | return NULL; | | 1161 | return NULL; |
1163 | ii_handler->iih_nids = count; | | 1162 | ii_handler->iih_nids = count; |
1164 | if (count == 0) | | 1163 | if (count == 0) |
1165 | return ii_handler; | | 1164 | return ii_handler; |
1166 | | | 1165 | |
1167 | ids = ii_handler->iih_intrids; | | 1166 | ids = ii_handler->iih_intrids; |
1168 | mutex_spin_enter(&evtchn_lock); | | 1167 | mutex_spin_enter(&evtchn_lock); |
1169 | for (i = 0, off = 0; i < NR_EVENT_CHANNELS && off < count; i++) { | | 1168 | for (i = 0, off = 0; i < NR_EVENT_CHANNELS && off < count; i++) { |
1170 | evp = evtsource[i]; | | 1169 | evp = evtsource[i]; |
1171 | | | 1170 | |
1172 | if (evp == NULL || i == debug_port) | | 1171 | if (evp == NULL || i == debug_port) |
1173 | continue; | | 1172 | continue; |
1174 | | | 1173 | |
1175 | if (!kcpuset_isset(cpuset, cpu_index(evp->ev_cpu))) | | 1174 | if (!kcpuset_isset(cpuset, cpu_index(evp->ev_cpu))) |
1176 | continue; | | 1175 | continue; |
1177 | | | 1176 | |
1178 | snprintf(ids[off], sizeof(intrid_t), "%s", evp->ev_intrname); | | 1177 | snprintf(ids[off], sizeof(intrid_t), "%s", evp->ev_intrname); |
1179 | off++; | | 1178 | off++; |
1180 | } | | 1179 | } |
1181 | mutex_spin_exit(&evtchn_lock); | | 1180 | mutex_spin_exit(&evtchn_lock); |
1182 | | | 1181 | |
1183 | return ii_handler; | | 1182 | return ii_handler; |
1184 | } | | 1183 | } |
1185 | #endif /* XENPV */ | | 1184 | #endif /* XENPV */ |