| @@ -1,867 +1,867 @@ | | | @@ -1,867 +1,867 @@ |
1 | /* $NetBSD: evtchn.c,v 1.60 2011/12/07 16:26:23 cegger Exp $ */ | | 1 | /* $NetBSD: evtchn.c,v 1.61 2011/12/08 03:34:44 cherry Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (c) 2006 Manuel Bouyer. | | 4 | * Copyright (c) 2006 Manuel Bouyer. |
5 | * | | 5 | * |
6 | * Redistribution and use in source and binary forms, with or without | | 6 | * Redistribution and use in source and binary forms, with or without |
7 | * modification, are permitted provided that the following conditions | | 7 | * modification, are permitted provided that the following conditions |
8 | * are met: | | 8 | * are met: |
9 | * 1. Redistributions of source code must retain the above copyright | | 9 | * 1. Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. | | 10 | * notice, this list of conditions and the following disclaimer. |
11 | * 2. Redistributions in binary form must reproduce the above copyright | | 11 | * 2. Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the | | 12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. | | 13 | * documentation and/or other materials provided with the distribution. |
14 | * | | 14 | * |
15 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | | 15 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
16 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | | 16 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
17 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | | 17 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
18 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | | 18 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
19 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | | 19 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
20 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | | 20 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
21 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | | 21 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
22 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | | 22 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | | 23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
24 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | | 24 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
25 | * | | 25 | * |
26 | */ | | 26 | */ |
27 | | | 27 | |
28 | /* | | 28 | /* |
29 | * | | 29 | * |
30 | * Copyright (c) 2004 Christian Limpach. | | 30 | * Copyright (c) 2004 Christian Limpach. |
31 | * Copyright (c) 2004, K A Fraser. | | 31 | * Copyright (c) 2004, K A Fraser. |
32 | * All rights reserved. | | 32 | * All rights reserved. |
33 | * | | 33 | * |
34 | * Redistribution and use in source and binary forms, with or without | | 34 | * Redistribution and use in source and binary forms, with or without |
35 | * modification, are permitted provided that the following conditions | | 35 | * modification, are permitted provided that the following conditions |
36 | * are met: | | 36 | * are met: |
37 | * 1. Redistributions of source code must retain the above copyright | | 37 | * 1. Redistributions of source code must retain the above copyright |
38 | * notice, this list of conditions and the following disclaimer. | | 38 | * notice, this list of conditions and the following disclaimer. |
39 | * 2. Redistributions in binary form must reproduce the above copyright | | 39 | * 2. Redistributions in binary form must reproduce the above copyright |
40 | * notice, this list of conditions and the following disclaimer in the | | 40 | * notice, this list of conditions and the following disclaimer in the |
41 | * documentation and/or other materials provided with the distribution. | | 41 | * documentation and/or other materials provided with the distribution. |
42 | * | | 42 | * |
43 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | | 43 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
44 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | | 44 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
45 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | | 45 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
46 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | | 46 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
47 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | | 47 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
48 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | | 48 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
49 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | | 49 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
50 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | | 50 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
51 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | | 51 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
52 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | | 52 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
53 | */ | | 53 | */ |
54 | | | 54 | |
55 | | | 55 | |
56 | #include <sys/cdefs.h> | | 56 | #include <sys/cdefs.h> |
57 | __KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.60 2011/12/07 16:26:23 cegger Exp $"); | | 57 | __KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.61 2011/12/08 03:34:44 cherry Exp $"); |
58 | | | 58 | |
59 | #include "opt_xen.h" | | 59 | #include "opt_xen.h" |
60 | #include "isa.h" | | 60 | #include "isa.h" |
61 | #include "pci.h" | | 61 | #include "pci.h" |
62 | | | 62 | |
63 | #include <sys/param.h> | | 63 | #include <sys/param.h> |
64 | #include <sys/cpu.h> | | 64 | #include <sys/cpu.h> |
65 | #include <sys/kernel.h> | | 65 | #include <sys/kernel.h> |
66 | #include <sys/systm.h> | | 66 | #include <sys/systm.h> |
67 | #include <sys/device.h> | | 67 | #include <sys/device.h> |
68 | #include <sys/proc.h> | | 68 | #include <sys/proc.h> |
69 | #include <sys/kmem.h> | | 69 | #include <sys/kmem.h> |
70 | #include <sys/reboot.h> | | 70 | #include <sys/reboot.h> |
71 | #include <sys/mutex.h> | | 71 | #include <sys/mutex.h> |
72 | | | 72 | |
73 | #include <uvm/uvm.h> | | 73 | #include <uvm/uvm.h> |
74 | | | 74 | |
75 | #include <machine/intrdefs.h> | | 75 | #include <machine/intrdefs.h> |
76 | | | 76 | |
77 | #include <xen/xen.h> | | 77 | #include <xen/xen.h> |
78 | #include <xen/hypervisor.h> | | 78 | #include <xen/hypervisor.h> |
79 | #include <xen/evtchn.h> | | 79 | #include <xen/evtchn.h> |
80 | #include <xen/xenfunc.h> | | 80 | #include <xen/xenfunc.h> |
81 | | | 81 | |
82 | /* | | 82 | /* |
83 | * This lock protects updates to the following mapping and reference-count | | 83 | * This lock protects updates to the following mapping and reference-count |
84 | * arrays. The lock does not need to be acquired to read the mapping tables. | | 84 | * arrays. The lock does not need to be acquired to read the mapping tables. |
85 | */ | | 85 | */ |
86 | static kmutex_t evtchn_lock; | | 86 | static kmutex_t evtchn_lock; |
87 | | | 87 | |
88 | /* event handlers */ | | 88 | /* event handlers */ |
89 | struct evtsource *evtsource[NR_EVENT_CHANNELS]; | | 89 | struct evtsource *evtsource[NR_EVENT_CHANNELS]; |
90 | | | 90 | |
91 | /* channel locks */ | | 91 | /* channel locks */ |
92 | static kmutex_t evtlock[NR_EVENT_CHANNELS]; | | 92 | static kmutex_t evtlock[NR_EVENT_CHANNELS]; |
93 | | | 93 | |
94 | /* Reference counts for bindings to event channels XXX: redo for SMP */ | | 94 | /* Reference counts for bindings to event channels XXX: redo for SMP */ |
95 | static uint8_t evtch_bindcount[NR_EVENT_CHANNELS]; | | 95 | static uint8_t evtch_bindcount[NR_EVENT_CHANNELS]; |
96 | | | 96 | |
97 | /* event-channel <-> VCPU mapping for IPIs. XXX: redo for SMP. */ | | 97 | /* event-channel <-> VCPU mapping for IPIs. XXX: redo for SMP. */ |
98 | static evtchn_port_t vcpu_ipi_to_evtch[XEN_LEGACY_MAX_VCPUS]; | | 98 | static evtchn_port_t vcpu_ipi_to_evtch[XEN_LEGACY_MAX_VCPUS]; |
99 | | | 99 | |
100 | /* event-channel <-> VCPU mapping for VIRQ_TIMER. XXX: redo for SMP. */ | | 100 | /* event-channel <-> VCPU mapping for VIRQ_TIMER. XXX: redo for SMP. */ |
101 | static int virq_timer_to_evtch[XEN_LEGACY_MAX_VCPUS]; | | 101 | static int virq_timer_to_evtch[XEN_LEGACY_MAX_VCPUS]; |
102 | | | 102 | |
103 | /* event-channel <-> VIRQ mapping. */ | | 103 | /* event-channel <-> VIRQ mapping. */ |
104 | static int virq_to_evtch[NR_VIRQS]; | | 104 | static int virq_to_evtch[NR_VIRQS]; |
105 | | | 105 | |
106 | | | 106 | |
107 | #if NPCI > 0 || NISA > 0 | | 107 | #if NPCI > 0 || NISA > 0 |
108 | /* event-channel <-> PIRQ mapping */ | | 108 | /* event-channel <-> PIRQ mapping */ |
109 | static int pirq_to_evtch[NR_PIRQS]; | | 109 | static int pirq_to_evtch[NR_PIRQS]; |
110 | /* PIRQ needing notify */ | | 110 | /* PIRQ needing notify */ |
111 | static uint32_t pirq_needs_unmask_notify[NR_EVENT_CHANNELS / 32]; | | 111 | static uint32_t pirq_needs_unmask_notify[NR_EVENT_CHANNELS / 32]; |
112 | int pirq_interrupt(void *); | | 112 | int pirq_interrupt(void *); |
113 | physdev_op_t physdev_op_notify = { | | 113 | physdev_op_t physdev_op_notify = { |
114 | .cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY, | | 114 | .cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY, |
115 | }; | | 115 | }; |
116 | #endif | | 116 | #endif |
117 | | | 117 | |
118 | int debug_port = -1; | | 118 | int debug_port = -1; |
119 | | | 119 | |
120 | // #define IRQ_DEBUG 4 | | 120 | // #define IRQ_DEBUG 4 |
121 | | | 121 | |
122 | /* http://mail-index.netbsd.org/port-amd64/2004/02/22/0000.html */ | | 122 | /* http://mail-index.netbsd.org/port-amd64/2004/02/22/0000.html */ |
123 | #ifdef MULTIPROCESSOR | | 123 | #ifdef MULTIPROCESSOR |
124 | | | 124 | |
125 | /* | | 125 | /* |
126 | * intr_biglock_wrapper: grab biglock and call a real interrupt handler. | | 126 | * intr_biglock_wrapper: grab biglock and call a real interrupt handler. |
127 | */ | | 127 | */ |
128 | | | 128 | |
129 | int | | 129 | int |
130 | intr_biglock_wrapper(void *vp) | | 130 | intr_biglock_wrapper(void *vp) |
131 | { | | 131 | { |
132 | struct intrhand *ih = vp; | | 132 | struct intrhand *ih = vp; |
133 | int ret; | | 133 | int ret; |
134 | | | 134 | |
135 | KERNEL_LOCK(1, NULL); | | 135 | KERNEL_LOCK(1, NULL); |
136 | | | 136 | |
137 | ret = (*ih->ih_realfun)(ih->ih_realarg); | | 137 | ret = (*ih->ih_realfun)(ih->ih_realarg); |
138 | | | 138 | |
139 | KERNEL_UNLOCK_ONE(NULL); | | 139 | KERNEL_UNLOCK_ONE(NULL); |
140 | | | 140 | |
141 | return ret; | | 141 | return ret; |
142 | } | | 142 | } |
143 | #endif /* MULTIPROCESSOR */ | | 143 | #endif /* MULTIPROCESSOR */ |
144 | | | 144 | |
145 | void | | 145 | void |
146 | events_default_setup(void) | | 146 | events_default_setup(void) |
147 | { | | 147 | { |
148 | int i; | | 148 | int i; |
149 | | | 149 | |
150 | /* No VCPU -> event mappings. */ | | 150 | /* No VCPU -> event mappings. */ |
151 | for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++) | | 151 | for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++) |
152 | vcpu_ipi_to_evtch[i] = -1; | | 152 | vcpu_ipi_to_evtch[i] = -1; |
153 | | | 153 | |
154 | /* No VIRQ_TIMER -> event mappings. */ | | 154 | /* No VIRQ_TIMER -> event mappings. */ |
155 | for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++) | | 155 | for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++) |
156 | virq_timer_to_evtch[i] = -1; | | 156 | virq_timer_to_evtch[i] = -1; |
157 | | | 157 | |
158 | /* No VIRQ -> event mappings. */ | | 158 | /* No VIRQ -> event mappings. */ |
159 | for (i = 0; i < NR_VIRQS; i++) | | 159 | for (i = 0; i < NR_VIRQS; i++) |
160 | virq_to_evtch[i] = -1; | | 160 | virq_to_evtch[i] = -1; |
161 | | | 161 | |
162 | #if NPCI > 0 || NISA > 0 | | 162 | #if NPCI > 0 || NISA > 0 |
163 | /* No PIRQ -> event mappings. */ | | 163 | /* No PIRQ -> event mappings. */ |
164 | for (i = 0; i < NR_PIRQS; i++) | | 164 | for (i = 0; i < NR_PIRQS; i++) |
165 | pirq_to_evtch[i] = -1; | | 165 | pirq_to_evtch[i] = -1; |
166 | for (i = 0; i < NR_EVENT_CHANNELS / 32; i++) | | 166 | for (i = 0; i < NR_EVENT_CHANNELS / 32; i++) |
167 | pirq_needs_unmask_notify[i] = 0; | | 167 | pirq_needs_unmask_notify[i] = 0; |
168 | #endif | | 168 | #endif |
169 | | | 169 | |
170 | /* No event-channel are 'live' right now. */ | | 170 | /* No event-channel are 'live' right now. */ |
171 | for (i = 0; i < NR_EVENT_CHANNELS; i++) { | | 171 | for (i = 0; i < NR_EVENT_CHANNELS; i++) { |
172 | evtsource[i] = NULL; | | 172 | evtsource[i] = NULL; |
173 | evtch_bindcount[i] = 0; | | 173 | evtch_bindcount[i] = 0; |
174 | hypervisor_mask_event(i); | | 174 | hypervisor_mask_event(i); |
175 | } | | 175 | } |
176 | | | 176 | |
177 | } | | 177 | } |
178 | | | 178 | |
179 | void | | 179 | void |
180 | events_init(void) | | 180 | events_init(void) |
181 | { | | 181 | { |
182 | mutex_init(&evtchn_lock, MUTEX_DEFAULT, IPL_NONE); | | 182 | mutex_init(&evtchn_lock, MUTEX_DEFAULT, IPL_NONE); |
183 | debug_port = bind_virq_to_evtch(VIRQ_DEBUG); | | 183 | debug_port = bind_virq_to_evtch(VIRQ_DEBUG); |
184 | | | 184 | |
185 | KASSERT(debug_port != -1); | | 185 | KASSERT(debug_port != -1); |
186 | | | 186 | |
187 | aprint_verbose("VIRQ_DEBUG interrupt using event channel %d\n", | | 187 | aprint_verbose("VIRQ_DEBUG interrupt using event channel %d\n", |
188 | debug_port); | | 188 | debug_port); |
189 | /* | | 189 | /* |
190 | * Don't call event_set_handler(), we'll use a shortcut. Just set | | 190 | * Don't call event_set_handler(), we'll use a shortcut. Just set |
191 | * evtsource[] to a non-NULL value so that evtchn_do_event will | | 191 | * evtsource[] to a non-NULL value so that evtchn_do_event will |
192 | * be called. | | 192 | * be called. |
193 | */ | | 193 | */ |
194 | evtsource[debug_port] = (void *)-1; | | 194 | evtsource[debug_port] = (void *)-1; |
195 | xen_atomic_set_bit(&curcpu()->ci_evtmask[0], debug_port); | | 195 | xen_atomic_set_bit(&curcpu()->ci_evtmask[0], debug_port); |
196 | hypervisor_enable_event(debug_port); | | 196 | hypervisor_enable_event(debug_port); |
197 | | | 197 | |
198 | x86_enable_intr(); /* at long last... */ | | 198 | x86_enable_intr(); /* at long last... */ |
199 | } | | 199 | } |
200 | | | 200 | |
201 | bool | | 201 | bool |
202 | events_suspend(void) | | 202 | events_suspend(void) |
203 | { | | 203 | { |
204 | int evtch; | | 204 | int evtch; |
205 | | | 205 | |
206 | x86_disable_intr(); | | 206 | x86_disable_intr(); |
207 | | | 207 | |
208 | /* VIRQ_DEBUG is the last interrupt to remove */ | | 208 | /* VIRQ_DEBUG is the last interrupt to remove */ |
209 | evtch = unbind_virq_from_evtch(VIRQ_DEBUG); | | 209 | evtch = unbind_virq_from_evtch(VIRQ_DEBUG); |
210 | | | 210 | |
211 | KASSERT(evtch != -1); | | 211 | KASSERT(evtch != -1); |
212 | | | 212 | |
213 | hypervisor_mask_event(evtch); | | 213 | hypervisor_mask_event(evtch); |
214 | /* Remove the non-NULL value set in events_init() */ | | 214 | /* Remove the non-NULL value set in events_init() */ |
215 | evtsource[evtch] = NULL; | | 215 | evtsource[evtch] = NULL; |
216 | aprint_verbose("VIRQ_DEBUG interrupt disabled, " | | 216 | aprint_verbose("VIRQ_DEBUG interrupt disabled, " |
217 | "event channel %d removed\n", evtch); | | 217 | "event channel %d removed\n", evtch); |
218 | | | 218 | |
219 | return true; | | 219 | return true; |
220 | } | | 220 | } |
221 | | | 221 | |
222 | bool | | 222 | bool |
223 | events_resume (void) | | 223 | events_resume (void) |
224 | { | | 224 | { |
225 | events_init(); | | 225 | events_init(); |
226 | | | 226 | |
227 | return true; | | 227 | return true; |
228 | } | | 228 | } |
229 | | | 229 | |
230 | | | 230 | |
231 | unsigned int | | 231 | unsigned int |
232 | evtchn_do_event(int evtch, struct intrframe *regs) | | 232 | evtchn_do_event(int evtch, struct intrframe *regs) |
233 | { | | 233 | { |
234 | struct cpu_info *ci; | | 234 | struct cpu_info *ci; |
235 | int ilevel; | | 235 | int ilevel; |
236 | struct intrhand *ih; | | 236 | struct intrhand *ih; |
237 | int (*ih_fun)(void *, void *); | | 237 | int (*ih_fun)(void *, void *); |
238 | uint32_t iplmask; | | 238 | uint32_t iplmask; |
239 | int i; | | 239 | int i; |
240 | uint32_t iplbit; | | 240 | uint32_t iplbit; |
241 | | | 241 | |
242 | #ifdef DIAGNOSTIC | | 242 | #ifdef DIAGNOSTIC |
243 | if (evtch >= NR_EVENT_CHANNELS) { | | 243 | if (evtch >= NR_EVENT_CHANNELS) { |
244 | printf("event number %d > NR_IRQS\n", evtch); | | 244 | printf("event number %d > NR_IRQS\n", evtch); |
245 | panic("evtchn_do_event"); | | 245 | panic("evtchn_do_event"); |
246 | } | | 246 | } |
247 | #endif | | 247 | #endif |
248 | | | 248 | |
249 | #ifdef IRQ_DEBUG | | 249 | #ifdef IRQ_DEBUG |
250 | if (evtch == IRQ_DEBUG) | | 250 | if (evtch == IRQ_DEBUG) |
251 | printf("evtchn_do_event: evtch %d\n", evtch); | | 251 | printf("evtchn_do_event: evtch %d\n", evtch); |
252 | #endif | | 252 | #endif |
253 | ci = curcpu(); | | 253 | ci = curcpu(); |
254 | | | 254 | |
255 | /* | | 255 | /* |
256 | * Shortcut for the debug handler, we want it to always run, | | 256 | * Shortcut for the debug handler, we want it to always run, |
257 | * regardless of the IPL level. | | 257 | * regardless of the IPL level. |
258 | */ | | 258 | */ |
259 | if (__predict_false(evtch == debug_port)) { | | 259 | if (__predict_false(evtch == debug_port)) { |
260 | xen_debug_handler(NULL); | | 260 | xen_debug_handler(NULL); |
261 | hypervisor_enable_event(evtch); | | 261 | hypervisor_enable_event(evtch); |
262 | return 0; | | 262 | return 0; |
263 | } | | 263 | } |
264 | | | 264 | |
265 | #ifdef DIAGNOSTIC | | 265 | #ifdef DIAGNOSTIC |
266 | if (evtsource[evtch] == NULL) { | | 266 | if (evtsource[evtch] == NULL) { |
267 | panic("evtchn_do_event: unknown event"); | | 267 | panic("evtchn_do_event: unknown event"); |
268 | } | | 268 | } |
269 | #endif | | 269 | #endif |
270 | ci->ci_data.cpu_nintr++; | | 270 | ci->ci_data.cpu_nintr++; |
271 | evtsource[evtch]->ev_evcnt.ev_count++; | | 271 | evtsource[evtch]->ev_evcnt.ev_count++; |
272 | ilevel = ci->ci_ilevel; | | 272 | ilevel = ci->ci_ilevel; |
273 | | | 273 | |
274 | if (evtsource[evtch]->ev_cpu != ci /* XXX: get stats */) { | | 274 | if (evtsource[evtch]->ev_cpu != ci /* XXX: get stats */) { |
275 | hypervisor_send_event(evtsource[evtch]->ev_cpu, evtch); | | 275 | hypervisor_send_event(evtsource[evtch]->ev_cpu, evtch); |
276 | return 0; | | 276 | return 0; |
277 | } | | 277 | } |
278 | | | 278 | |
279 | if (evtsource[evtch]->ev_maxlevel <= ilevel) { | | 279 | if (evtsource[evtch]->ev_maxlevel <= ilevel) { |
280 | #ifdef IRQ_DEBUG | | 280 | #ifdef IRQ_DEBUG |
281 | if (evtch == IRQ_DEBUG) | | 281 | if (evtch == IRQ_DEBUG) |
282 | printf("evtsource[%d]->ev_maxlevel %d <= ilevel %d\n", | | 282 | printf("evtsource[%d]->ev_maxlevel %d <= ilevel %d\n", |
283 | evtch, evtsource[evtch]->ev_maxlevel, ilevel); | | 283 | evtch, evtsource[evtch]->ev_maxlevel, ilevel); |
284 | #endif | | 284 | #endif |
285 | hypervisor_set_ipending(evtsource[evtch]->ev_cpu, | | 285 | hypervisor_set_ipending(evtsource[evtch]->ev_cpu, |
286 | evtsource[evtch]->ev_imask, | | 286 | evtsource[evtch]->ev_imask, |
287 | evtch >> LONG_SHIFT, | | 287 | evtch >> LONG_SHIFT, |
288 | evtch & LONG_MASK); | | 288 | evtch & LONG_MASK); |
289 | | | 289 | |
290 | /* leave masked */ | | 290 | /* leave masked */ |
291 | | | 291 | |
292 | return 0; | | 292 | return 0; |
293 | } | | 293 | } |
294 | ci->ci_ilevel = evtsource[evtch]->ev_maxlevel; | | 294 | ci->ci_ilevel = evtsource[evtch]->ev_maxlevel; |
295 | iplmask = evtsource[evtch]->ev_imask; | | 295 | iplmask = evtsource[evtch]->ev_imask; |
296 | sti(); | | 296 | sti(); |
297 | mutex_spin_enter(&evtlock[evtch]); | | 297 | mutex_spin_enter(&evtlock[evtch]); |
298 | ih = evtsource[evtch]->ev_handlers; | | 298 | ih = evtsource[evtch]->ev_handlers; |
299 | while (ih != NULL) { | | 299 | while (ih != NULL) { |
300 | if (ih->ih_cpu != ci) { | | 300 | if (ih->ih_cpu != ci) { |
301 | hypervisor_set_ipending(ih->ih_cpu, 1 << ih->ih_level, | | 301 | hypervisor_set_ipending(ih->ih_cpu, 1 << ih->ih_level, |
302 | evtch >> LONG_SHIFT, evtch & LONG_MASK); | | 302 | evtch >> LONG_SHIFT, evtch & LONG_MASK); |
303 | iplmask &= ~IUNMASK(ci, ih->ih_level); | | 303 | iplmask &= ~IUNMASK(ci, ih->ih_level); |
304 | ih = ih->ih_evt_next; | | 304 | ih = ih->ih_evt_next; |
305 | continue; | | 305 | continue; |
306 | } | | 306 | } |
307 | if (ih->ih_level <= ilevel) { | | 307 | if (ih->ih_level <= ilevel) { |
308 | hypervisor_set_ipending(ih->ih_cpu, iplmask, | | 308 | hypervisor_set_ipending(ih->ih_cpu, iplmask, |
309 | evtch >> LONG_SHIFT, evtch & LONG_MASK); | | 309 | evtch >> LONG_SHIFT, evtch & LONG_MASK); |
310 | #ifdef IRQ_DEBUG | | 310 | #ifdef IRQ_DEBUG |
311 | if (evtch == IRQ_DEBUG) | | 311 | if (evtch == IRQ_DEBUG) |
312 | printf("ih->ih_level %d <= ilevel %d\n", ih->ih_level, ilevel); | | 312 | printf("ih->ih_level %d <= ilevel %d\n", ih->ih_level, ilevel); |
313 | #endif | | 313 | #endif |
314 | cli(); | | 314 | cli(); |
315 | hypervisor_set_ipending(ih->ih_cpu, iplmask, | | 315 | hypervisor_set_ipending(ih->ih_cpu, iplmask, |
316 | evtch >> LONG_SHIFT, evtch & LONG_MASK); | | 316 | evtch >> LONG_SHIFT, evtch & LONG_MASK); |
317 | /* leave masked */ | | 317 | /* leave masked */ |
318 | mutex_spin_exit(&evtlock[evtch]); | | 318 | mutex_spin_exit(&evtlock[evtch]); |
319 | goto splx; | | 319 | goto splx; |
320 | } | | 320 | } |
321 | iplmask &= ~IUNMASK(ci, ih->ih_level); | | 321 | iplmask &= ~IUNMASK(ci, ih->ih_level); |
322 | ci->ci_ilevel = ih->ih_level; | | 322 | ci->ci_ilevel = ih->ih_level; |
323 | ih_fun = (void *)ih->ih_fun; | | 323 | ih_fun = (void *)ih->ih_fun; |
324 | ih_fun(ih->ih_arg, regs); | | 324 | ih_fun(ih->ih_arg, regs); |
325 | ih = ih->ih_evt_next; | | 325 | ih = ih->ih_evt_next; |
326 | } | | 326 | } |
327 | mutex_spin_exit(&evtlock[evtch]); | | 327 | mutex_spin_exit(&evtlock[evtch]); |
328 | cli(); | | 328 | cli(); |
329 | hypervisor_enable_event(evtch); | | 329 | hypervisor_enable_event(evtch); |
330 | splx: | | 330 | splx: |
331 | /* | | 331 | /* |
332 | * C version of spllower(). ASTs will be checked when | | 332 | * C version of spllower(). ASTs will be checked when |
333 | * hypevisor_callback() exits, so no need to check here. | | 333 | * hypevisor_callback() exits, so no need to check here. |
334 | */ | | 334 | */ |
335 | iplmask = (IUNMASK(ci, ilevel) & ci->ci_ipending); | | 335 | iplmask = (IUNMASK(ci, ilevel) & ci->ci_ipending); |
336 | while (iplmask != 0) { | | 336 | while (iplmask != 0) { |
337 | iplbit = 1 << (NIPL - 1); | | 337 | iplbit = 1 << (NIPL - 1); |
338 | i = (NIPL - 1); | | 338 | i = (NIPL - 1); |
339 | while (iplmask != 0 && i > ilevel) { | | 339 | while (iplmask != 0 && i > ilevel) { |
340 | while (iplmask & iplbit) { | | 340 | while (iplmask & iplbit) { |
341 | ci->ci_ipending &= ~iplbit; | | 341 | ci->ci_ipending &= ~iplbit; |
342 | ci->ci_ilevel = i; | | 342 | ci->ci_ilevel = i; |
343 | for (ih = ci->ci_isources[i]->ipl_handlers; | | 343 | for (ih = ci->ci_isources[i]->ipl_handlers; |
344 | ih != NULL; ih = ih->ih_ipl_next) { | | 344 | ih != NULL; ih = ih->ih_ipl_next) { |
345 | KASSERT(ih->ih_cpu == ci); | | 345 | KASSERT(ih->ih_cpu == ci); |
346 | sti(); | | 346 | sti(); |
347 | ih_fun = (void *)ih->ih_fun; | | 347 | ih_fun = (void *)ih->ih_fun; |
348 | ih_fun(ih->ih_arg, regs); | | 348 | ih_fun(ih->ih_arg, regs); |
349 | cli(); | | 349 | cli(); |
350 | if (ci->ci_ilevel != i) { | | 350 | if (ci->ci_ilevel != i) { |
351 | printf("evtchn_do_event: " | | 351 | printf("evtchn_do_event: " |
352 | "handler %p didn't lower " | | 352 | "handler %p didn't lower " |
353 | "ipl %d %d\n", | | 353 | "ipl %d %d\n", |
354 | ih_fun, ci->ci_ilevel, i); | | 354 | ih_fun, ci->ci_ilevel, i); |
355 | ci->ci_ilevel = i; | | 355 | ci->ci_ilevel = i; |
356 | } | | 356 | } |
357 | } | | 357 | } |
358 | hypervisor_enable_ipl(i); | | 358 | hypervisor_enable_ipl(i); |
359 | /* more pending IPLs may have been registered */ | | 359 | /* more pending IPLs may have been registered */ |
360 | iplmask = | | 360 | iplmask = |
361 | (IUNMASK(ci, ilevel) & ci->ci_ipending); | | 361 | (IUNMASK(ci, ilevel) & ci->ci_ipending); |
362 | } | | 362 | } |
363 | i--; | | 363 | i--; |
364 | iplbit >>= 1; | | 364 | iplbit >>= 1; |
365 | } | | 365 | } |
366 | } | | 366 | } |
367 | ci->ci_ilevel = ilevel; | | 367 | ci->ci_ilevel = ilevel; |
368 | return 0; | | 368 | return 0; |
369 | } | | 369 | } |
370 | | | 370 | |
371 | #define PRIuCPUID "lu" /* XXX: move this somewhere more appropriate */ | | 371 | #define PRIuCPUID "lu" /* XXX: move this somewhere more appropriate */ |
372 | | | 372 | |
373 | evtchn_port_t | | 373 | evtchn_port_t |
374 | bind_vcpu_to_evtch(cpuid_t vcpu) | | 374 | bind_vcpu_to_evtch(cpuid_t vcpu) |
375 | { | | 375 | { |
376 | evtchn_op_t op; | | 376 | evtchn_op_t op; |
377 | evtchn_port_t evtchn; | | 377 | evtchn_port_t evtchn; |
378 | | | 378 | |
379 | mutex_spin_enter(&evtchn_lock); | | 379 | mutex_spin_enter(&evtchn_lock); |
380 | | | 380 | |
381 | evtchn = vcpu_ipi_to_evtch[vcpu]; | | 381 | evtchn = vcpu_ipi_to_evtch[vcpu]; |
382 | if (evtchn == -1) { | | 382 | if (evtchn == -1) { |
383 | op.cmd = EVTCHNOP_bind_ipi; | | 383 | op.cmd = EVTCHNOP_bind_ipi; |
384 | op.u.bind_ipi.vcpu = (uint32_t) vcpu; | | 384 | op.u.bind_ipi.vcpu = (uint32_t) vcpu; |
385 | if (HYPERVISOR_event_channel_op(&op) != 0) | | 385 | if (HYPERVISOR_event_channel_op(&op) != 0) |
386 | panic("Failed to bind ipi to VCPU %"PRIuCPUID"\n", vcpu); | | 386 | panic("Failed to bind ipi to VCPU %"PRIuCPUID"\n", vcpu); |
387 | evtchn = op.u.bind_ipi.port; | | 387 | evtchn = op.u.bind_ipi.port; |
388 | | | 388 | |
389 | vcpu_ipi_to_evtch[vcpu] = evtchn; | | 389 | vcpu_ipi_to_evtch[vcpu] = evtchn; |
390 | } | | 390 | } |
391 | | | 391 | |
392 | evtch_bindcount[evtchn]++; | | 392 | evtch_bindcount[evtchn]++; |
393 | | | 393 | |
394 | mutex_spin_exit(&evtchn_lock); | | 394 | mutex_spin_exit(&evtchn_lock); |
395 | | | 395 | |
396 | return evtchn; | | 396 | return evtchn; |
397 | } | | 397 | } |
398 | | | 398 | |
399 | int | | 399 | int |
400 | bind_virq_to_evtch(int virq) | | 400 | bind_virq_to_evtch(int virq) |
401 | { | | 401 | { |
402 | evtchn_op_t op; | | 402 | evtchn_op_t op; |
403 | int evtchn; | | 403 | int evtchn; |
404 | | | 404 | |
405 | mutex_spin_enter(&evtchn_lock); | | 405 | mutex_spin_enter(&evtchn_lock); |
406 | | | 406 | |
407 | /* | | 407 | /* |
408 | * XXX: The only per-cpu VIRQ we currently use is VIRQ_TIMER. | | 408 | * XXX: The only per-cpu VIRQ we currently use is VIRQ_TIMER. |
409 | * Please re-visit this implementation when others are used. | | 409 | * Please re-visit this implementation when others are used. |
410 | * Note: VIRQ_DEBUG is special-cased, and not used or bound on APs. | | 410 | * Note: VIRQ_DEBUG is special-cased, and not used or bound on APs. |
411 | * XXX: event->virq/ipi can be unified in a linked-list | | 411 | * XXX: event->virq/ipi can be unified in a linked-list |
412 | * implementation. | | 412 | * implementation. |
413 | */ | | 413 | */ |
414 | struct cpu_info *ci = curcpu(); | | 414 | struct cpu_info *ci = curcpu(); |
415 | | | 415 | |
416 | if (virq == VIRQ_DEBUG && ci != &cpu_info_primary) { | | 416 | if (virq == VIRQ_DEBUG && ci != &cpu_info_primary) { |
417 | mutex_spin_exit(&evtchn_lock); | | 417 | mutex_spin_exit(&evtchn_lock); |
418 | return -1; | | 418 | return -1; |
419 | } | | 419 | } |
420 | | | 420 | |
421 | if (virq == VIRQ_TIMER) { | | 421 | if (virq == VIRQ_TIMER) { |
422 | evtchn = virq_timer_to_evtch[ci->ci_cpuid]; | | 422 | evtchn = virq_timer_to_evtch[ci->ci_cpuid]; |
423 | } else { | | 423 | } else { |
424 | evtchn = virq_to_evtch[virq]; | | 424 | evtchn = virq_to_evtch[virq]; |
425 | } | | 425 | } |
426 | | | 426 | |
427 | /* Allocate a channel if there is none already allocated */ | | 427 | /* Allocate a channel if there is none already allocated */ |
428 | if (evtchn == -1) { | | 428 | if (evtchn == -1) { |
429 | op.cmd = EVTCHNOP_bind_virq; | | 429 | op.cmd = EVTCHNOP_bind_virq; |
430 | op.u.bind_virq.virq = virq; | | 430 | op.u.bind_virq.virq = virq; |
431 | op.u.bind_virq.vcpu = ci->ci_cpuid; | | 431 | op.u.bind_virq.vcpu = ci->ci_cpuid; |
432 | if (HYPERVISOR_event_channel_op(&op) != 0) | | 432 | if (HYPERVISOR_event_channel_op(&op) != 0) |
433 | panic("Failed to bind virtual IRQ %d\n", virq); | | 433 | panic("Failed to bind virtual IRQ %d\n", virq); |
434 | evtchn = op.u.bind_virq.port; | | 434 | evtchn = op.u.bind_virq.port; |
435 | } | | 435 | } |
436 | | | 436 | |
437 | /* Set event channel */ | | 437 | /* Set event channel */ |
438 | if (virq == VIRQ_TIMER) { | | 438 | if (virq == VIRQ_TIMER) { |
439 | virq_timer_to_evtch[ci->ci_cpuid] = evtchn; | | 439 | virq_timer_to_evtch[ci->ci_cpuid] = evtchn; |
440 | } else { | | 440 | } else { |
441 | virq_to_evtch[virq] = evtchn; | | 441 | virq_to_evtch[virq] = evtchn; |
442 | } | | 442 | } |
443 | | | 443 | |
444 | /* Increase ref counter */ | | 444 | /* Increase ref counter */ |
445 | evtch_bindcount[evtchn]++; | | 445 | evtch_bindcount[evtchn]++; |
446 | | | 446 | |
447 | mutex_spin_exit(&evtchn_lock); | | 447 | mutex_spin_exit(&evtchn_lock); |
448 | | | 448 | |
449 | return evtchn; | | 449 | return evtchn; |
450 | } | | 450 | } |
451 | | | 451 | |
452 | int | | 452 | int |
453 | unbind_virq_from_evtch(int virq) | | 453 | unbind_virq_from_evtch(int virq) |
454 | { | | 454 | { |
455 | evtchn_op_t op; | | 455 | evtchn_op_t op; |
456 | int evtchn; | | 456 | int evtchn; |
457 | | | 457 | |
458 | struct cpu_info *ci = curcpu(); | | 458 | struct cpu_info *ci = curcpu(); |
459 | | | 459 | |
460 | if (virq == VIRQ_TIMER) { | | 460 | if (virq == VIRQ_TIMER) { |
461 | evtchn = virq_timer_to_evtch[ci->ci_cpuid]; | | 461 | evtchn = virq_timer_to_evtch[ci->ci_cpuid]; |
462 | } | | 462 | } |
463 | else { | | 463 | else { |
464 | evtchn = virq_to_evtch[virq]; | | 464 | evtchn = virq_to_evtch[virq]; |
465 | } | | 465 | } |
466 | | | 466 | |
467 | if (evtchn == -1) { | | 467 | if (evtchn == -1) { |
468 | return -1; | | 468 | return -1; |
469 | } | | 469 | } |
470 | | | 470 | |
471 | mutex_spin_enter(&evtchn_lock); | | 471 | mutex_spin_enter(&evtchn_lock); |
472 | | | 472 | |
473 | evtch_bindcount[evtchn]--; | | 473 | evtch_bindcount[evtchn]--; |
474 | if (evtch_bindcount[evtchn] == 0) { | | 474 | if (evtch_bindcount[evtchn] == 0) { |
475 | op.cmd = EVTCHNOP_close; | | 475 | op.cmd = EVTCHNOP_close; |
476 | op.u.close.port = evtchn; | | 476 | op.u.close.port = evtchn; |
477 | if (HYPERVISOR_event_channel_op(&op) != 0) | | 477 | if (HYPERVISOR_event_channel_op(&op) != 0) |
478 | panic("Failed to unbind virtual IRQ %d\n", virq); | | 478 | panic("Failed to unbind virtual IRQ %d\n", virq); |
479 | | | 479 | |
480 | if (virq == VIRQ_TIMER) { | | 480 | if (virq == VIRQ_TIMER) { |
481 | virq_timer_to_evtch[ci->ci_cpuid] = -1; | | 481 | virq_timer_to_evtch[ci->ci_cpuid] = -1; |
482 | } else { | | 482 | } else { |
483 | virq_to_evtch[virq] = -1; | | 483 | virq_to_evtch[virq] = -1; |
484 | } | | 484 | } |
485 | } | | 485 | } |
486 | | | 486 | |
487 | mutex_spin_exit(&evtchn_lock); | | 487 | mutex_spin_exit(&evtchn_lock); |
488 | | | 488 | |
489 | return evtchn; | | 489 | return evtchn; |
490 | } | | 490 | } |
491 | | | 491 | |
492 | #if NPCI > 0 || NISA > 0 | | 492 | #if NPCI > 0 || NISA > 0 |
493 | int | | 493 | int |
494 | bind_pirq_to_evtch(int pirq) | | 494 | bind_pirq_to_evtch(int pirq) |
495 | { | | 495 | { |
496 | evtchn_op_t op; | | 496 | evtchn_op_t op; |
497 | int evtchn; | | 497 | int evtchn; |
498 | | | 498 | |
499 | if (pirq >= NR_PIRQS) { | | 499 | if (pirq >= NR_PIRQS) { |
500 | panic("pirq %d out of bound, increase NR_PIRQS", pirq); | | 500 | panic("pirq %d out of bound, increase NR_PIRQS", pirq); |
501 | } | | 501 | } |
502 | | | 502 | |
503 | mutex_spin_enter(&evtchn_lock); | | 503 | mutex_spin_enter(&evtchn_lock); |
504 | | | 504 | |
505 | evtchn = pirq_to_evtch[pirq]; | | 505 | evtchn = pirq_to_evtch[pirq]; |
506 | if (evtchn == -1) { | | 506 | if (evtchn == -1) { |
507 | op.cmd = EVTCHNOP_bind_pirq; | | 507 | op.cmd = EVTCHNOP_bind_pirq; |
508 | op.u.bind_pirq.pirq = pirq; | | 508 | op.u.bind_pirq.pirq = pirq; |
509 | op.u.bind_pirq.flags = BIND_PIRQ__WILL_SHARE; | | 509 | op.u.bind_pirq.flags = BIND_PIRQ__WILL_SHARE; |
510 | if (HYPERVISOR_event_channel_op(&op) != 0) | | 510 | if (HYPERVISOR_event_channel_op(&op) != 0) |
511 | panic("Failed to bind physical IRQ %d\n", pirq); | | 511 | panic("Failed to bind physical IRQ %d\n", pirq); |
512 | evtchn = op.u.bind_pirq.port; | | 512 | evtchn = op.u.bind_pirq.port; |
513 | | | 513 | |
514 | #ifdef IRQ_DEBUG | | 514 | #ifdef IRQ_DEBUG |
515 | printf("pirq %d evtchn %d\n", pirq, evtchn); | | 515 | printf("pirq %d evtchn %d\n", pirq, evtchn); |
516 | #endif | | 516 | #endif |
517 | pirq_to_evtch[pirq] = evtchn; | | 517 | pirq_to_evtch[pirq] = evtchn; |
518 | } | | 518 | } |
519 | | | 519 | |
520 | evtch_bindcount[evtchn]++; | | 520 | evtch_bindcount[evtchn]++; |
521 | | | 521 | |
522 | mutex_spin_exit(&evtchn_lock); | | 522 | mutex_spin_exit(&evtchn_lock); |
523 | | | 523 | |
524 | return evtchn; | | 524 | return evtchn; |
525 | } | | 525 | } |
526 | | | 526 | |
527 | int | | 527 | int |
528 | unbind_pirq_from_evtch(int pirq) | | 528 | unbind_pirq_from_evtch(int pirq) |
529 | { | | 529 | { |
530 | evtchn_op_t op; | | 530 | evtchn_op_t op; |
531 | int evtchn = pirq_to_evtch[pirq]; | | 531 | int evtchn = pirq_to_evtch[pirq]; |
532 | | | 532 | |
533 | mutex_spin_enter(&evtchn_lock); | | 533 | mutex_spin_enter(&evtchn_lock); |
534 | | | 534 | |
535 | evtch_bindcount[evtchn]--; | | 535 | evtch_bindcount[evtchn]--; |
536 | if (evtch_bindcount[evtchn] == 0) { | | 536 | if (evtch_bindcount[evtchn] == 0) { |
537 | op.cmd = EVTCHNOP_close; | | 537 | op.cmd = EVTCHNOP_close; |
538 | op.u.close.port = evtchn; | | 538 | op.u.close.port = evtchn; |
539 | if (HYPERVISOR_event_channel_op(&op) != 0) | | 539 | if (HYPERVISOR_event_channel_op(&op) != 0) |
540 | panic("Failed to unbind physical IRQ %d\n", pirq); | | 540 | panic("Failed to unbind physical IRQ %d\n", pirq); |
541 | | | 541 | |
542 | pirq_to_evtch[pirq] = -1; | | 542 | pirq_to_evtch[pirq] = -1; |
543 | } | | 543 | } |
544 | | | 544 | |
545 | mutex_spin_exit(&evtchn_lock); | | 545 | mutex_spin_exit(&evtchn_lock); |
546 | | | 546 | |
547 | return evtchn; | | 547 | return evtchn; |
548 | } | | 548 | } |
549 | | | 549 | |
550 | struct pintrhand * | | 550 | struct pintrhand * |
551 | pirq_establish(int pirq, int evtch, int (*func)(void *), void *arg, int level, | | 551 | pirq_establish(int pirq, int evtch, int (*func)(void *), void *arg, int level, |
552 | const char *evname) | | 552 | const char *evname) |
553 | { | | 553 | { |
554 | struct pintrhand *ih; | | 554 | struct pintrhand *ih; |
555 | physdev_op_t physdev_op; | | 555 | physdev_op_t physdev_op; |
556 | | | 556 | |
557 | ih = kmem_zalloc(sizeof(struct pintrhand), | | 557 | ih = kmem_zalloc(sizeof(struct pintrhand), |
558 | cold ? KM_NOSLEEP : KM_SLEEP); | | 558 | cold ? KM_NOSLEEP : KM_SLEEP); |
559 | if (ih == NULL) { | | 559 | if (ih == NULL) { |
560 | printf("pirq_establish: can't allocate handler info\n"); | | 560 | printf("pirq_establish: can't allocate handler info\n"); |
561 | return NULL; | | 561 | return NULL; |
562 | } | | 562 | } |
563 | | | 563 | |
564 | if (event_set_handler(evtch, pirq_interrupt, ih, level, evname) != 0) { | | 564 | if (event_set_handler(evtch, pirq_interrupt, ih, level, evname) != 0) { |
565 | kmem_free(ih, sizeof(struct pintrhand)); | | 565 | kmem_free(ih, sizeof(struct pintrhand)); |
566 | return NULL; | | 566 | return NULL; |
567 | } | | 567 | } |
568 | | | 568 | |
569 | ih->pirq = pirq; | | 569 | ih->pirq = pirq; |
570 | ih->evtch = evtch; | | 570 | ih->evtch = evtch; |
571 | ih->func = func; | | 571 | ih->func = func; |
572 | ih->arg = arg; | | 572 | ih->arg = arg; |
573 | | | 573 | |
574 | physdev_op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY; | | 574 | physdev_op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY; |
575 | physdev_op.u.irq_status_query.irq = pirq; | | 575 | physdev_op.u.irq_status_query.irq = pirq; |
576 | if (HYPERVISOR_physdev_op(&physdev_op) < 0) | | 576 | if (HYPERVISOR_physdev_op(&physdev_op) < 0) |
577 | panic("HYPERVISOR_physdev_op(PHYSDEVOP_IRQ_STATUS_QUERY)"); | | 577 | panic("HYPERVISOR_physdev_op(PHYSDEVOP_IRQ_STATUS_QUERY)"); |
578 | if (physdev_op.u.irq_status_query.flags & | | 578 | if (physdev_op.u.irq_status_query.flags & |
579 | PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY) { | | 579 | PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY) { |
580 | pirq_needs_unmask_notify[evtch >> 5] |= (1 << (evtch & 0x1f)); | | 580 | pirq_needs_unmask_notify[evtch >> 5] |= (1 << (evtch & 0x1f)); |
581 | #ifdef IRQ_DEBUG | | 581 | #ifdef IRQ_DEBUG |
582 | printf("pirq %d needs notify\n", pirq); | | 582 | printf("pirq %d needs notify\n", pirq); |
583 | #endif | | 583 | #endif |
584 | } | | 584 | } |
585 | hypervisor_enable_event(evtch); | | 585 | hypervisor_enable_event(evtch); |
586 | return ih; | | 586 | return ih; |
587 | } | | 587 | } |
588 | | | 588 | |
589 | int | | 589 | int |
590 | pirq_interrupt(void *arg) | | 590 | pirq_interrupt(void *arg) |
591 | { | | 591 | { |
592 | struct pintrhand *ih = arg; | | 592 | struct pintrhand *ih = arg; |
593 | int ret; | | 593 | int ret; |
594 | | | 594 | |
595 | | | 595 | |
596 | ret = ih->func(ih->arg); | | 596 | ret = ih->func(ih->arg); |
597 | #ifdef IRQ_DEBUG | | 597 | #ifdef IRQ_DEBUG |
598 | if (ih->evtch == IRQ_DEBUG) | | 598 | if (ih->evtch == IRQ_DEBUG) |
599 | printf("pirq_interrupt irq %d ret %d\n", ih->pirq, ret); | | 599 | printf("pirq_interrupt irq %d ret %d\n", ih->pirq, ret); |
600 | #endif | | 600 | #endif |
601 | return ret; | | 601 | return ret; |
602 | } | | 602 | } |
603 | | | 603 | |
604 | #endif /* NPCI > 0 || NISA > 0 */ | | 604 | #endif /* NPCI > 0 || NISA > 0 */ |
605 | | | 605 | |
606 | | | 606 | |
607 | /* | | 607 | /* |
608 | * Recalculate the interrupt from scratch for an event source. | | 608 | * Recalculate the interrupt from scratch for an event source. |
609 | */ | | 609 | */ |
610 | static void | | 610 | static void |
611 | intr_calculatemasks(struct evtsource *evts, int evtch, struct cpu_info *ci) | | 611 | intr_calculatemasks(struct evtsource *evts, int evtch, struct cpu_info *ci) |
612 | { | | 612 | { |
613 | struct intrhand *ih; | | 613 | struct intrhand *ih; |
614 | int cpu_receive = 0; | | 614 | int cpu_receive = 0; |
615 | | | 615 | |
616 | #ifdef MULTIPROCESSOR | | 616 | #ifdef MULTIPROCESSOR |
617 | KASSERT(!mutex_owned(&evtlock[evtch])); | | 617 | KASSERT(!mutex_owned(&evtlock[evtch])); |
618 | #endif | | 618 | #endif |
619 | mutex_spin_enter(&evtlock[evtch]); | | 619 | mutex_spin_enter(&evtlock[evtch]); |
620 | evts->ev_maxlevel = IPL_NONE; | | 620 | evts->ev_maxlevel = IPL_NONE; |
621 | evts->ev_imask = 0; | | 621 | evts->ev_imask = 0; |
622 | for (ih = evts->ev_handlers; ih != NULL; ih = ih->ih_evt_next) { | | 622 | for (ih = evts->ev_handlers; ih != NULL; ih = ih->ih_evt_next) { |
623 | if (ih->ih_level > evts->ev_maxlevel) | | 623 | if (ih->ih_level > evts->ev_maxlevel) |
624 | evts->ev_maxlevel = ih->ih_level; | | 624 | evts->ev_maxlevel = ih->ih_level; |
625 | evts->ev_imask |= (1 << ih->ih_level); | | 625 | evts->ev_imask |= (1 << ih->ih_level); |
626 | if (ih->ih_cpu == ci) | | 626 | if (ih->ih_cpu == ci) |
627 | cpu_receive = 1; | | 627 | cpu_receive = 1; |
628 | } | | 628 | } |
629 | if (cpu_receive) | | 629 | if (cpu_receive) |
630 | xen_atomic_set_bit(&curcpu()->ci_evtmask[0], evtch); | | 630 | xen_atomic_set_bit(&curcpu()->ci_evtmask[0], evtch); |
631 | else | | 631 | else |
632 | xen_atomic_clear_bit(&curcpu()->ci_evtmask[0], evtch); | | 632 | xen_atomic_clear_bit(&curcpu()->ci_evtmask[0], evtch); |
633 | mutex_spin_exit(&evtlock[evtch]); | | 633 | mutex_spin_exit(&evtlock[evtch]); |
634 | } | | 634 | } |
635 | | | 635 | |
636 | int | | 636 | int |
637 | event_set_handler(int evtch, int (*func)(void *), void *arg, int level, | | 637 | event_set_handler(int evtch, int (*func)(void *), void *arg, int level, |
638 | const char *evname) | | 638 | const char *evname) |
639 | { | | 639 | { |
640 | struct cpu_info *ci = curcpu(); /* XXX: pass in ci ? */ | | 640 | struct cpu_info *ci = curcpu(); /* XXX: pass in ci ? */ |
641 | struct evtsource *evts; | | 641 | struct evtsource *evts; |
642 | struct intrhand *ih, **ihp; | | 642 | struct intrhand *ih, **ihp; |
643 | int s; | | 643 | int s; |
644 | #ifdef MULTIPROCESSOR | | 644 | #ifdef MULTIPROCESSOR |
645 | bool mpsafe = (level != IPL_VM); | | 645 | bool mpsafe = (level != IPL_VM); |
646 | #endif /* MULTIPROCESSOR */ | | 646 | #endif /* MULTIPROCESSOR */ |
647 | | | 647 | |
648 | #ifdef IRQ_DEBUG | | 648 | #ifdef IRQ_DEBUG |
649 | printf("event_set_handler IRQ %d handler %p\n", evtch, func); | | 649 | printf("event_set_handler IRQ %d handler %p\n", evtch, func); |
650 | #endif | | 650 | #endif |
651 | | | 651 | |
652 | #ifdef DIAGNOSTIC | | 652 | #ifdef DIAGNOSTIC |
653 | if (evtch >= NR_EVENT_CHANNELS) { | | 653 | if (evtch >= NR_EVENT_CHANNELS) { |
654 | printf("evtch number %d > NR_EVENT_CHANNELS\n", evtch); | | 654 | printf("evtch number %d > NR_EVENT_CHANNELS\n", evtch); |
655 | panic("event_set_handler"); | | 655 | panic("event_set_handler"); |
656 | } | | 656 | } |
657 | #endif | | 657 | #endif |
658 | | | 658 | |
659 | #if 0 | | 659 | #if 0 |
660 | printf("event_set_handler evtch %d handler %p level %d\n", evtch, | | 660 | printf("event_set_handler evtch %d handler %p level %d\n", evtch, |
661 | handler, level); | | 661 | handler, level); |
662 | #endif | | 662 | #endif |
663 | ih = kmem_zalloc(sizeof (struct intrhand), KM_NOSLEEP); | | 663 | ih = kmem_zalloc(sizeof (struct intrhand), KM_NOSLEEP); |
664 | if (ih == NULL) | | 664 | if (ih == NULL) |
665 | panic("can't allocate fixed interrupt source"); | | 665 | panic("can't allocate fixed interrupt source"); |
666 | | | 666 | |
667 | | | 667 | |
668 | ih->ih_level = level; | | 668 | ih->ih_level = level; |
669 | ih->ih_fun = ih->ih_realfun = func; | | 669 | ih->ih_fun = ih->ih_realfun = func; |
670 | ih->ih_arg = ih->ih_realarg = arg; | | 670 | ih->ih_arg = ih->ih_realarg = arg; |
671 | ih->ih_evt_next = NULL; | | 671 | ih->ih_evt_next = NULL; |
672 | ih->ih_ipl_next = NULL; | | 672 | ih->ih_ipl_next = NULL; |
673 | ih->ih_cpu = ci; | | 673 | ih->ih_cpu = ci; |
674 | #ifdef MULTIPROCESSOR | | 674 | #ifdef MULTIPROCESSOR |
675 | if (!mpsafe) { | | 675 | if (!mpsafe) { |
676 | ih->ih_fun = intr_biglock_wrapper; | | 676 | ih->ih_fun = intr_biglock_wrapper; |
677 | ih->ih_arg = ih; | | 677 | ih->ih_arg = ih; |
678 | } | | 678 | } |
679 | #endif /* MULTIPROCESSOR */ | | 679 | #endif /* MULTIPROCESSOR */ |
680 | | | 680 | |
681 | s = splhigh(); | | 681 | s = splhigh(); |
682 | | | 682 | |
683 | /* register per-cpu handler for spllower() */ | | 683 | /* register per-cpu handler for spllower() */ |
684 | event_set_iplhandler(ci, ih, level); | | 684 | event_set_iplhandler(ci, ih, level); |
685 | | | 685 | |
686 | /* register handler for event channel */ | | 686 | /* register handler for event channel */ |
687 | if (evtsource[evtch] == NULL) { | | 687 | if (evtsource[evtch] == NULL) { |
688 | evts = kmem_zalloc(sizeof (struct evtsource), | | 688 | evts = kmem_zalloc(sizeof (struct evtsource), |
689 | KM_NOSLEEP); | | 689 | KM_NOSLEEP); |
690 | if (evts == NULL) | | 690 | if (evts == NULL) |
691 | panic("can't allocate fixed interrupt source"); | | 691 | panic("can't allocate fixed interrupt source"); |
692 | | | 692 | |
693 | evts->ev_handlers = ih; | | 693 | evts->ev_handlers = ih; |
694 | /* | | 694 | /* |
695 | * XXX: We're assuming here that ci is the same cpu as | | 695 | * XXX: We're assuming here that ci is the same cpu as |
696 | * the one on which this event/port is bound on. The | | 696 | * the one on which this event/port is bound on. The |
697 | * api needs to be reshuffled so that this assumption | | 697 | * api needs to be reshuffled so that this assumption |
698 | * is more explicitly implemented. | | 698 | * is more explicitly implemented. |
699 | */ | | 699 | */ |
700 | evts->ev_cpu = ci; | | 700 | evts->ev_cpu = ci; |
701 | mutex_init(&evtlock[evtch], MUTEX_DEFAULT, IPL_HIGH); | | 701 | mutex_init(&evtlock[evtch], MUTEX_DEFAULT, IPL_HIGH); |
702 | evtsource[evtch] = evts; | | 702 | evtsource[evtch] = evts; |
703 | if (evname) | | 703 | if (evname) |
704 | strncpy(evts->ev_evname, evname, | | 704 | strncpy(evts->ev_evname, evname, |
705 | sizeof(evts->ev_evname)); | | 705 | sizeof(evts->ev_evname)); |
706 | else | | 706 | else |
707 | snprintf(evts->ev_evname, sizeof(evts->ev_evname), | | 707 | snprintf(evts->ev_evname, sizeof(evts->ev_evname), |
708 | "evt%d", evtch); | | 708 | "evt%d", evtch); |
709 | evcnt_attach_dynamic(&evts->ev_evcnt, EVCNT_TYPE_INTR, NULL, | | 709 | evcnt_attach_dynamic(&evts->ev_evcnt, EVCNT_TYPE_INTR, NULL, |
710 | device_xname(ci->ci_dev), evts->ev_evname); | | 710 | device_xname(ci->ci_dev), evts->ev_evname); |
711 | } else { | | 711 | } else { |
712 | evts = evtsource[evtch]; | | 712 | evts = evtsource[evtch]; |
713 | /* sort by IPL order, higher first */ | | 713 | /* sort by IPL order, higher first */ |
714 | mutex_spin_enter(&evtlock[evtch]); | | 714 | mutex_spin_enter(&evtlock[evtch]); |
715 | for (ihp = &evts->ev_handlers; ; ihp = &((*ihp)->ih_evt_next)) { | | 715 | for (ihp = &evts->ev_handlers; ; ihp = &((*ihp)->ih_evt_next)) { |
716 | if ((*ihp)->ih_level < ih->ih_level) { | | 716 | if ((*ihp)->ih_level < ih->ih_level) { |
717 | /* insert before *ihp */ | | 717 | /* insert before *ihp */ |
718 | ih->ih_evt_next = *ihp; | | 718 | ih->ih_evt_next = *ihp; |
719 | *ihp = ih; | | 719 | *ihp = ih; |
720 | break; | | 720 | break; |
721 | } | | 721 | } |
722 | if ((*ihp)->ih_evt_next == NULL) { | | 722 | if ((*ihp)->ih_evt_next == NULL) { |
723 | (*ihp)->ih_evt_next = ih; | | 723 | (*ihp)->ih_evt_next = ih; |
724 | break; | | 724 | break; |
725 | } | | 725 | } |
726 | } | | 726 | } |
727 | mutex_spin_exit(&evtlock[evtch]); | | 727 | mutex_spin_exit(&evtlock[evtch]); |
728 | } | | 728 | } |
729 | | | 729 | |
730 | intr_calculatemasks(evts, evtch, ci); | | 730 | intr_calculatemasks(evts, evtch, ci); |
731 | splx(s); | | 731 | splx(s); |
732 | | | 732 | |
733 | return 0; | | 733 | return 0; |
734 | } | | 734 | } |
735 | | | 735 | |
736 | void | | 736 | void |
737 | event_set_iplhandler(struct cpu_info *ci, | | 737 | event_set_iplhandler(struct cpu_info *ci, |
738 | struct intrhand *ih, | | 738 | struct intrhand *ih, |
739 | int level) | | 739 | int level) |
740 | { | | 740 | { |
741 | struct iplsource *ipls; | | 741 | struct iplsource *ipls; |
742 | | | 742 | |
743 | KASSERT(ci == ih->ih_cpu); | | 743 | KASSERT(ci == ih->ih_cpu); |
744 | if (ci->ci_isources[level] == NULL) { | | 744 | if (ci->ci_isources[level] == NULL) { |
745 | ipls = kmem_zalloc(sizeof (struct iplsource), | | 745 | ipls = kmem_zalloc(sizeof (struct iplsource), |
746 | KM_NOSLEEP); | | 746 | KM_NOSLEEP); |
747 | if (ipls == NULL) | | 747 | if (ipls == NULL) |
748 | panic("can't allocate fixed interrupt source"); | | 748 | panic("can't allocate fixed interrupt source"); |
749 | ipls->ipl_recurse = xenev_stubs[level].ist_recurse; | | 749 | ipls->ipl_recurse = xenev_stubs[level].ist_recurse; |
750 | ipls->ipl_resume = xenev_stubs[level].ist_resume; | | 750 | ipls->ipl_resume = xenev_stubs[level].ist_resume; |
751 | ipls->ipl_handlers = ih; | | 751 | ipls->ipl_handlers = ih; |
752 | ci->ci_isources[level] = ipls; | | 752 | ci->ci_isources[level] = ipls; |
753 | } else { | | 753 | } else { |
754 | ipls = ci->ci_isources[level]; | | 754 | ipls = ci->ci_isources[level]; |
755 | ih->ih_ipl_next = ipls->ipl_handlers; | | 755 | ih->ih_ipl_next = ipls->ipl_handlers; |
756 | ipls->ipl_handlers = ih; | | 756 | ipls->ipl_handlers = ih; |
757 | } | | 757 | } |
758 | } | | 758 | } |
759 | | | 759 | |
760 | int | | 760 | int |
761 | event_remove_handler(int evtch, int (*func)(void *), void *arg) | | 761 | event_remove_handler(int evtch, int (*func)(void *), void *arg) |
762 | { | | 762 | { |
763 | struct iplsource *ipls; | | 763 | struct iplsource *ipls; |
764 | struct evtsource *evts; | | 764 | struct evtsource *evts; |
765 | struct intrhand *ih; | | 765 | struct intrhand *ih; |
766 | struct intrhand **ihp; | | 766 | struct intrhand **ihp; |
767 | struct cpu_info *ci; | | 767 | struct cpu_info *ci; |
768 | | | 768 | |
769 | evts = evtsource[evtch]; | | 769 | evts = evtsource[evtch]; |
770 | if (evts == NULL) | | 770 | if (evts == NULL) |
771 | return ENOENT; | | 771 | return ENOENT; |
772 | | | 772 | |
773 | mutex_spin_enter(&evtlock[evtch]); | | 773 | mutex_spin_enter(&evtlock[evtch]); |
774 | for (ihp = &evts->ev_handlers, ih = evts->ev_handlers; | | 774 | for (ihp = &evts->ev_handlers, ih = evts->ev_handlers; |
775 | ih != NULL; | | 775 | ih != NULL; |
776 | ihp = &ih->ih_evt_next, ih = ih->ih_evt_next) { | | 776 | ihp = &ih->ih_evt_next, ih = ih->ih_evt_next) { |
777 | if (ih->ih_fun == func && ih->ih_arg == arg) | | 777 | if (ih->ih_fun == func && ih->ih_arg == arg) |
778 | break; | | 778 | break; |
779 | } | | 779 | } |
780 | if (ih == NULL) { | | 780 | if (ih == NULL) { |
781 | mutex_spin_exit(&evtlock[evtch]); | | 781 | mutex_spin_exit(&evtlock[evtch]); |
782 | return ENOENT; | | 782 | return ENOENT; |
783 | } | | 783 | } |
784 | ci = ih->ih_cpu; | | 784 | ci = ih->ih_cpu; |
785 | *ihp = ih->ih_evt_next; | | 785 | *ihp = ih->ih_evt_next; |
786 | mutex_spin_exit(&evtlock[evtch]); | | 786 | mutex_spin_exit(&evtlock[evtch]); |
787 | | | 787 | |
788 | ipls = ci->ci_isources[ih->ih_level]; | | 788 | ipls = ci->ci_isources[ih->ih_level]; |
789 | for (ihp = &ipls->ipl_handlers, ih = ipls->ipl_handlers; | | 789 | for (ihp = &ipls->ipl_handlers, ih = ipls->ipl_handlers; |
790 | ih != NULL; | | 790 | ih != NULL; |
791 | ihp = &ih->ih_ipl_next, ih = ih->ih_ipl_next) { | | 791 | ihp = &ih->ih_ipl_next, ih = ih->ih_ipl_next) { |
792 | if (ih->ih_fun == func && ih->ih_arg == arg) | | 792 | if (ih->ih_fun == func && ih->ih_arg == arg) |
793 | break; | | 793 | break; |
794 | } | | 794 | } |
795 | if (ih == NULL) | | 795 | if (ih == NULL) |
796 | panic("event_remove_handler"); | | 796 | panic("event_remove_handler"); |
797 | *ihp = ih->ih_ipl_next; | | 797 | *ihp = ih->ih_ipl_next; |
798 | kmem_free(ih, sizeof (struct iplsource)); | | 798 | kmem_free(ih, sizeof (struct intrhand)); |
799 | if (evts->ev_handlers == NULL) { | | 799 | if (evts->ev_handlers == NULL) { |
800 | xen_atomic_clear_bit(&ci->ci_evtmask[0], evtch); | | 800 | xen_atomic_clear_bit(&ci->ci_evtmask[0], evtch); |
801 | evcnt_detach(&evts->ev_evcnt); | | 801 | evcnt_detach(&evts->ev_evcnt); |
802 | kmem_free(evts, sizeof (struct evtsource)); | | 802 | kmem_free(evts, sizeof (struct evtsource)); |
803 | evtsource[evtch] = NULL; | | 803 | evtsource[evtch] = NULL; |
804 | } else { | | 804 | } else { |
805 | intr_calculatemasks(evts, evtch, ci); | | 805 | intr_calculatemasks(evts, evtch, ci); |
806 | } | | 806 | } |
807 | return 0; | | 807 | return 0; |
808 | } | | 808 | } |
809 | | | 809 | |
810 | void | | 810 | void |
811 | hypervisor_enable_event(unsigned int evtch) | | 811 | hypervisor_enable_event(unsigned int evtch) |
812 | { | | 812 | { |
813 | #ifdef IRQ_DEBUG | | 813 | #ifdef IRQ_DEBUG |
814 | if (evtch == IRQ_DEBUG) | | 814 | if (evtch == IRQ_DEBUG) |
815 | printf("hypervisor_enable_evtch: evtch %d\n", evtch); | | 815 | printf("hypervisor_enable_evtch: evtch %d\n", evtch); |
816 | #endif | | 816 | #endif |
817 | | | 817 | |
818 | hypervisor_unmask_event(evtch); | | 818 | hypervisor_unmask_event(evtch); |
819 | #if NPCI > 0 || NISA > 0 | | 819 | #if NPCI > 0 || NISA > 0 |
820 | if (pirq_needs_unmask_notify[evtch >> 5] & (1 << (evtch & 0x1f))) { | | 820 | if (pirq_needs_unmask_notify[evtch >> 5] & (1 << (evtch & 0x1f))) { |
821 | #ifdef IRQ_DEBUG | | 821 | #ifdef IRQ_DEBUG |
822 | if (evtch == IRQ_DEBUG) | | 822 | if (evtch == IRQ_DEBUG) |
823 | printf("pirq_notify(%d)\n", evtch); | | 823 | printf("pirq_notify(%d)\n", evtch); |
824 | #endif | | 824 | #endif |
825 | (void)HYPERVISOR_physdev_op(&physdev_op_notify); | | 825 | (void)HYPERVISOR_physdev_op(&physdev_op_notify); |
826 | } | | 826 | } |
827 | #endif /* NPCI > 0 || NISA > 0 */ | | 827 | #endif /* NPCI > 0 || NISA > 0 */ |
828 | } | | 828 | } |
829 | | | 829 | |
830 | int | | 830 | int |
831 | xen_debug_handler(void *arg) | | 831 | xen_debug_handler(void *arg) |
832 | { | | 832 | { |
833 | struct cpu_info *ci = curcpu(); | | 833 | struct cpu_info *ci = curcpu(); |
834 | int i; | | 834 | int i; |
835 | int xci_ilevel = ci->ci_ilevel; | | 835 | int xci_ilevel = ci->ci_ilevel; |
836 | int xci_ipending = ci->ci_ipending; | | 836 | int xci_ipending = ci->ci_ipending; |
837 | int xci_idepth = ci->ci_idepth; | | 837 | int xci_idepth = ci->ci_idepth; |
838 | u_long upcall_pending = ci->ci_vcpu->evtchn_upcall_pending; | | 838 | u_long upcall_pending = ci->ci_vcpu->evtchn_upcall_pending; |
839 | u_long upcall_mask = ci->ci_vcpu->evtchn_upcall_mask; | | 839 | u_long upcall_mask = ci->ci_vcpu->evtchn_upcall_mask; |
840 | u_long pending_sel = ci->ci_vcpu->evtchn_pending_sel; | | 840 | u_long pending_sel = ci->ci_vcpu->evtchn_pending_sel; |
841 | unsigned long evtchn_mask[sizeof(unsigned long) * 8]; | | 841 | unsigned long evtchn_mask[sizeof(unsigned long) * 8]; |
842 | unsigned long evtchn_pending[sizeof(unsigned long) * 8]; | | 842 | unsigned long evtchn_pending[sizeof(unsigned long) * 8]; |
843 | | | 843 | |
844 | u_long p; | | 844 | u_long p; |
845 | | | 845 | |
846 | p = (u_long)&HYPERVISOR_shared_info->evtchn_mask[0]; | | 846 | p = (u_long)&HYPERVISOR_shared_info->evtchn_mask[0]; |
847 | memcpy(evtchn_mask, (void *)p, sizeof(evtchn_mask)); | | 847 | memcpy(evtchn_mask, (void *)p, sizeof(evtchn_mask)); |
848 | p = (u_long)&HYPERVISOR_shared_info->evtchn_pending[0]; | | 848 | p = (u_long)&HYPERVISOR_shared_info->evtchn_pending[0]; |
849 | memcpy(evtchn_pending, (void *)p, sizeof(evtchn_pending)); | | 849 | memcpy(evtchn_pending, (void *)p, sizeof(evtchn_pending)); |
850 | | | 850 | |
851 | __insn_barrier(); | | 851 | __insn_barrier(); |
852 | printf("debug event\n"); | | 852 | printf("debug event\n"); |
853 | printf("ci_ilevel 0x%x ci_ipending 0x%x ci_idepth %d\n", | | 853 | printf("ci_ilevel 0x%x ci_ipending 0x%x ci_idepth %d\n", |
854 | xci_ilevel, xci_ipending, xci_idepth); | | 854 | xci_ilevel, xci_ipending, xci_idepth); |
855 | printf("evtchn_upcall_pending %ld evtchn_upcall_mask %ld" | | 855 | printf("evtchn_upcall_pending %ld evtchn_upcall_mask %ld" |
856 | " evtchn_pending_sel 0x%lx\n", | | 856 | " evtchn_pending_sel 0x%lx\n", |
857 | upcall_pending, upcall_mask, pending_sel); | | 857 | upcall_pending, upcall_mask, pending_sel); |
858 | printf("evtchn_mask"); | | 858 | printf("evtchn_mask"); |
859 | for (i = 0 ; i <= LONG_MASK; i++) | | 859 | for (i = 0 ; i <= LONG_MASK; i++) |
860 | printf(" %lx", (u_long)evtchn_mask[i]); | | 860 | printf(" %lx", (u_long)evtchn_mask[i]); |
861 | printf("\n"); | | 861 | printf("\n"); |
862 | printf("evtchn_pending"); | | 862 | printf("evtchn_pending"); |
863 | for (i = 0 ; i <= LONG_MASK; i++) | | 863 | for (i = 0 ; i <= LONG_MASK; i++) |
864 | printf(" %lx", (u_long)evtchn_pending[i]); | | 864 | printf(" %lx", (u_long)evtchn_pending[i]); |
865 | printf("\n"); | | 865 | printf("\n"); |
866 | return 0; | | 866 | return 0; |
867 | } | | 867 | } |