Mon Apr 20 19:46:44 2020 UTC ()
Misc fixes after merge


(bouyer)
diff -r1.28.2.2 -r1.28.2.3 src/sys/arch/xen/include/evtchn.h
diff -r1.35.6.5 -r1.35.6.6 src/sys/arch/xen/x86/xen_ipi.c
diff -r1.88.2.10 -r1.88.2.11 src/sys/arch/xen/xen/evtchn.c

cvs diff -r1.28.2.2 -r1.28.2.3 src/sys/arch/xen/include/evtchn.h (switch to unified diff)

--- src/sys/arch/xen/include/evtchn.h 2020/04/20 11:29:00 1.28.2.2
+++ src/sys/arch/xen/include/evtchn.h 2020/04/20 19:46:44 1.28.2.3
@@ -1,77 +1,77 @@ @@ -1,77 +1,77 @@
1/* $NetBSD: evtchn.h,v 1.28.2.2 2020/04/20 11:29:00 bouyer Exp $ */ 1/* $NetBSD: evtchn.h,v 1.28.2.3 2020/04/20 19:46:44 bouyer Exp $ */
2 2
3/* 3/*
4 * 4 *
5 * Copyright (c) 2004 Christian Limpach. 5 * Copyright (c) 2004 Christian Limpach.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution. 15 * documentation and/or other materials provided with the distribution.
16 * 16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */ 27 */
28 28
29#ifndef _XEN_EVENTS_H_ 29#ifndef _XEN_EVENTS_H_
30#define _XEN_EVENTS_H_ 30#define _XEN_EVENTS_H_
31 31
32#define NR_PIRQS 256 32#define NR_PIRQS 256
33 33
34extern struct evtsource *evtsource[]; 34extern struct evtsource *evtsource[];
35 35
36void events_default_setup(void); 36void events_default_setup(void);
37void events_init(void); 37void events_init(void);
38bool events_suspend(void); 38bool events_suspend(void);
39bool events_resume(void); 39bool events_resume(void);
40 40
41unsigned int evtchn_do_event(int, struct intrframe *); 41unsigned int evtchn_do_event(int, struct intrframe *);
42void call_evtchn_do_event(int, struct intrframe *); 42void call_evtchn_do_event(int, struct intrframe *);
43void call_xenevt_event(int); 43void call_xenevt_event(int);
44int event_set_handler(int, int (*func)(void *), void *, int, const char *, 44struct intrhand *event_set_handler(int, int (*func)(void *), void *,
45 const char *, bool, bool); 45 int, const char *, const char *, bool, bool);
46int event_remove_handler(int, int (*func)(void *), void *); 46int event_remove_handler(int, int (*func)(void *), void *);
47 47
48struct cpu_info; 48struct cpu_info;
49struct intrhand; 49struct intrhand;
50void event_set_iplhandler(struct cpu_info *, struct intrhand *, int); 50void event_set_iplhandler(struct cpu_info *, struct intrhand *, int);
51 51
52extern int debug_port; 52extern int debug_port;
53extern int xen_debug_handler(void *); 53extern int xen_debug_handler(void *);
54 54
55int bind_virq_to_evtch(int); 55int bind_virq_to_evtch(int);
56int bind_pirq_to_evtch(int); 56int bind_pirq_to_evtch(int);
57int get_pirq_to_evtch(int); 57int get_pirq_to_evtch(int);
58int unbind_pirq_from_evtch(int); 58int unbind_pirq_from_evtch(int);
59int unbind_virq_from_evtch(int); 59int unbind_virq_from_evtch(int);
60 60
61evtchn_port_t bind_vcpu_to_evtch(cpuid_t); 61evtchn_port_t bind_vcpu_to_evtch(cpuid_t);
62 62
63struct pintrhand { 63struct pintrhand {
64 /* See comments in x86/include/intr.h:struct intrhand {} */ 64 /* See comments in x86/include/intr.h:struct intrhand {} */
65 struct pic *pic; 65 struct pic *pic;
66 struct intrhand *ih; 66 struct intrhand *ih;
67 int pirq; 67 int pirq;
68 int evtch; 68 int evtch;
69 int (*func)(void *); 69 int (*func)(void *);
70 void *arg; 70 void *arg;
71}; 71};
72 72
73struct pintrhand *pirq_establish(int, int, int (*)(void *), void *, int, 73struct pintrhand *pirq_establish(int, int, int (*)(void *), void *, int,
74 const char *, const char *, bool); 74 const char *, const char *, bool);
75void pirq_disestablish(struct pintrhand *); 75void pirq_disestablish(struct pintrhand *);
76 76
77#endif /* _XEN_EVENTS_H_ */ 77#endif /* _XEN_EVENTS_H_ */

cvs diff -r1.35.6.5 -r1.35.6.6 src/sys/arch/xen/x86/xen_ipi.c (switch to unified diff)

--- src/sys/arch/xen/x86/xen_ipi.c 2020/04/20 11:29:01 1.35.6.5
+++ src/sys/arch/xen/x86/xen_ipi.c 2020/04/20 19:46:44 1.35.6.6
@@ -1,354 +1,354 @@ @@ -1,354 +1,354 @@
1/* $NetBSD: xen_ipi.c,v 1.35.6.5 2020/04/20 11:29:01 bouyer Exp $ */ 1/* $NetBSD: xen_ipi.c,v 1.35.6.6 2020/04/20 19:46:44 bouyer Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2011, 2019 The NetBSD Foundation, Inc. 4 * Copyright (c) 2011, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Cherry G. Mathew <cherry@zyx.in> 8 * by Cherry G. Mathew <cherry@zyx.in>
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> /* RCS ID macro */ 32#include <sys/cdefs.h> /* RCS ID macro */
33 33
34/*  34/*
35 * Based on: x86/ipi.c 35 * Based on: x86/ipi.c
36 */ 36 */
37 37
38__KERNEL_RCSID(0, "$NetBSD: xen_ipi.c,v 1.35.6.5 2020/04/20 11:29:01 bouyer Exp $"); 38__KERNEL_RCSID(0, "$NetBSD: xen_ipi.c,v 1.35.6.6 2020/04/20 19:46:44 bouyer Exp $");
39 39
40#include "opt_ddb.h" 40#include "opt_ddb.h"
41 41
42#include <sys/types.h> 42#include <sys/types.h>
43 43
44#include <sys/atomic.h> 44#include <sys/atomic.h>
45#include <sys/cpu.h> 45#include <sys/cpu.h>
46#include <sys/mutex.h> 46#include <sys/mutex.h>
47#include <sys/device.h> 47#include <sys/device.h>
48#include <sys/xcall.h> 48#include <sys/xcall.h>
49#include <sys/ipi.h> 49#include <sys/ipi.h>
50#include <sys/errno.h> 50#include <sys/errno.h>
51#include <sys/systm.h> 51#include <sys/systm.h>
52 52
53#include <x86/fpu.h> 53#include <x86/fpu.h>
54#include <machine/frame.h> 54#include <machine/frame.h>
55#include <machine/segments.h> 55#include <machine/segments.h>
56 56
57#include <xen/evtchn.h> 57#include <xen/evtchn.h>
58#include <xen/intr.h> 58#include <xen/intr.h>
59#include <xen/intrdefs.h> 59#include <xen/intrdefs.h>
60#include <xen/hypervisor.h> 60#include <xen/hypervisor.h>
61#include <xen/include/public/vcpu.h> 61#include <xen/include/public/vcpu.h>
62 62
63#ifdef DDB 63#ifdef DDB
64extern void ddb_ipi(struct trapframe); 64extern void ddb_ipi(struct trapframe);
65static void xen_ipi_ddb(struct cpu_info *, struct intrframe *); 65static void xen_ipi_ddb(struct cpu_info *, struct intrframe *);
66#endif 66#endif
67 67
68static void xen_ipi_halt(struct cpu_info *, struct intrframe *); 68static void xen_ipi_halt(struct cpu_info *, struct intrframe *);
69static void xen_ipi_synch_fpu(struct cpu_info *, struct intrframe *); 69static void xen_ipi_synch_fpu(struct cpu_info *, struct intrframe *);
70static void xen_ipi_xcall(struct cpu_info *, struct intrframe *); 70static void xen_ipi_xcall(struct cpu_info *, struct intrframe *);
71static void xen_ipi_hvcb(struct cpu_info *, struct intrframe *); 71static void xen_ipi_hvcb(struct cpu_info *, struct intrframe *);
72static void xen_ipi_generic(struct cpu_info *, struct intrframe *); 72static void xen_ipi_generic(struct cpu_info *, struct intrframe *);
73static void xen_ipi_ast(struct cpu_info *, struct intrframe *); 73static void xen_ipi_ast(struct cpu_info *, struct intrframe *);
74static void xen_ipi_kpreempt(struct cpu_info *ci, struct intrframe *); 74static void xen_ipi_kpreempt(struct cpu_info *ci, struct intrframe *);
75 75
76static void (*xen_ipifunc[XEN_NIPIS])(struct cpu_info *, struct intrframe *) = 76static void (*xen_ipifunc[XEN_NIPIS])(struct cpu_info *, struct intrframe *) =
77{ /* In order of priority (see: xen/include/intrdefs.h */ 77{ /* In order of priority (see: xen/include/intrdefs.h */
78 xen_ipi_halt, 78 xen_ipi_halt,
79 xen_ipi_synch_fpu, 79 xen_ipi_synch_fpu,
80#ifdef DDB 80#ifdef DDB
81 xen_ipi_ddb, 81 xen_ipi_ddb,
82#else 82#else
83 NULL, 83 NULL,
84#endif 84#endif
85 xen_ipi_xcall, 85 xen_ipi_xcall,
86 xen_ipi_hvcb, 86 xen_ipi_hvcb,
87 xen_ipi_generic, 87 xen_ipi_generic,
88 xen_ipi_ast, 88 xen_ipi_ast,
89 xen_ipi_kpreempt 89 xen_ipi_kpreempt
90}; 90};
91 91
92static int 92static int
93xen_ipi_handler(void *arg) 93xen_ipi_handler(void *arg)
94{ 94{
95 uint32_t pending; 95 uint32_t pending;
96 int bit; 96 int bit;
97 struct cpu_info *ci; 97 struct cpu_info *ci;
98 struct intrframe *regs; 98 struct intrframe *regs;
99 99
100 ci = curcpu(); 100 ci = curcpu();
101 regs = arg; 101 regs = arg;
102 102
103 KASSERT(ci == arg); 103 KASSERT(ci == arg);
104  104
105 pending = atomic_swap_32(&ci->ci_ipis, 0); 105 pending = atomic_swap_32(&ci->ci_ipis, 0);
106 106
107 KDASSERT((pending >> XEN_NIPIS) == 0); 107 KDASSERT((pending >> XEN_NIPIS) == 0);
108 while ((bit = ffs(pending)) != 0) { 108 while ((bit = ffs(pending)) != 0) {
109 bit--; 109 bit--;
110 pending &= ~(1 << bit); 110 pending &= ~(1 << bit);
111 ci->ci_ipi_events[bit].ev_count++; 111 ci->ci_ipi_events[bit].ev_count++;
112 if (xen_ipifunc[bit] != NULL) { 112 if (xen_ipifunc[bit] != NULL) {
113 (*xen_ipifunc[bit])(ci, regs); 113 (*xen_ipifunc[bit])(ci, regs);
114 } else { 114 } else {
115 panic("xen_ipifunc[%d] unsupported!\n", bit); 115 panic("xen_ipifunc[%d] unsupported!\n", bit);
116 /* NOTREACHED */ 116 /* NOTREACHED */
117 } 117 }
118 } 118 }
119 119
120 return 0; 120 return 0;
121} 121}
122 122
123/* Must be called once for every cpu that expects to send/recv ipis */ 123/* Must be called once for every cpu that expects to send/recv ipis */
124void 124void
125xen_ipi_init(void) 125xen_ipi_init(void)
126{ 126{
127 cpuid_t vcpu; 127 cpuid_t vcpu;
128 evtchn_port_t evtchn; 128 evtchn_port_t evtchn;
129 struct cpu_info *ci; 129 struct cpu_info *ci;
130 char intr_xname[INTRDEVNAMEBUF]; 130 char intr_xname[INTRDEVNAMEBUF];
131 131
132 ci = curcpu(); 132 ci = curcpu();
133 133
134 vcpu = ci->ci_vcpuid; 134 vcpu = ci->ci_vcpuid;
135 KASSERT(vcpu < XEN_LEGACY_MAX_VCPUS); 135 KASSERT(vcpu < XEN_LEGACY_MAX_VCPUS);
136 136
137 evtchn = bind_vcpu_to_evtch(vcpu); 137 evtchn = bind_vcpu_to_evtch(vcpu);
138 ci->ci_ipi_evtchn = evtchn; 138 ci->ci_ipi_evtchn = evtchn;
139 139
140 KASSERT(evtchn != -1 && evtchn < NR_EVENT_CHANNELS); 140 KASSERT(evtchn != -1 && evtchn < NR_EVENT_CHANNELS);
141 141
142 snprintf(intr_xname, sizeof(intr_xname), "%s ipi", 142 snprintf(intr_xname, sizeof(intr_xname), "%s ipi",
143 device_xname(ci->ci_dev)); 143 device_xname(ci->ci_dev));
144 144
145 if (event_set_handler(evtchn, xen_ipi_handler, ci, IPL_HIGH, NULL, 145 if (event_set_handler(evtchn, xen_ipi_handler, ci, IPL_HIGH, NULL,
146 intr_xname, true, false) != 0) { 146 intr_xname, true, false) == NULL) {
147 panic("%s: unable to register ipi handler\n", __func__); 147 panic("%s: unable to register ipi handler\n", __func__);
148 /* NOTREACHED */ 148 /* NOTREACHED */
149 } 149 }
150 150
151 hypervisor_unmask_event(evtchn); 151 hypervisor_unmask_event(evtchn);
152} 152}
153 153
154#ifdef DIAGNOSTIC 154#ifdef DIAGNOSTIC
155static inline bool /* helper */ 155static inline bool /* helper */
156valid_ipimask(uint32_t ipimask) 156valid_ipimask(uint32_t ipimask)
157{ 157{
158 uint32_t masks = XEN_IPI_GENERIC | XEN_IPI_HVCB | XEN_IPI_XCALL | 158 uint32_t masks = XEN_IPI_GENERIC | XEN_IPI_HVCB | XEN_IPI_XCALL |
159 XEN_IPI_DDB | XEN_IPI_SYNCH_FPU | 159 XEN_IPI_DDB | XEN_IPI_SYNCH_FPU |
160 XEN_IPI_HALT | XEN_IPI_KICK | XEN_IPI_AST | XEN_IPI_KPREEMPT; 160 XEN_IPI_HALT | XEN_IPI_KICK | XEN_IPI_AST | XEN_IPI_KPREEMPT;
161 161
162 if (ipimask & ~masks) { 162 if (ipimask & ~masks) {
163 return false; 163 return false;
164 } else { 164 } else {
165 return true; 165 return true;
166 } 166 }
167 167
168} 168}
169#endif 169#endif
170 170
171int 171int
172xen_send_ipi(struct cpu_info *ci, uint32_t ipimask) 172xen_send_ipi(struct cpu_info *ci, uint32_t ipimask)
173{ 173{
174 evtchn_port_t evtchn; 174 evtchn_port_t evtchn;
175 175
176 KASSERT(ci != NULL && ci != curcpu()); 176 KASSERT(ci != NULL && ci != curcpu());
177 177
178 if ((ci->ci_flags & CPUF_RUNNING) == 0) { 178 if ((ci->ci_flags & CPUF_RUNNING) == 0) {
179 return ENOENT; 179 return ENOENT;
180 } 180 }
181 181
182 evtchn = ci->ci_ipi_evtchn; 182 evtchn = ci->ci_ipi_evtchn;
183 183
184 KASSERTMSG(valid_ipimask(ipimask) == true,  184 KASSERTMSG(valid_ipimask(ipimask) == true,
185 "xen_send_ipi() called with invalid ipimask\n"); 185 "xen_send_ipi() called with invalid ipimask\n");
186 186
187 atomic_or_32(&ci->ci_ipis, ipimask); 187 atomic_or_32(&ci->ci_ipis, ipimask);
188 hypervisor_notify_via_evtchn(evtchn); 188 hypervisor_notify_via_evtchn(evtchn);
189 189
190 return 0; 190 return 0;
191} 191}
192 192
193void 193void
194xen_broadcast_ipi(uint32_t ipimask) 194xen_broadcast_ipi(uint32_t ipimask)
195{ 195{
196 struct cpu_info *ci, *self = curcpu(); 196 struct cpu_info *ci, *self = curcpu();
197 CPU_INFO_ITERATOR cii; 197 CPU_INFO_ITERATOR cii;
198 198
199 KASSERTMSG(valid_ipimask(ipimask) == true,  199 KASSERTMSG(valid_ipimask(ipimask) == true,
200 "xen_broadcast_ipi() called with invalid ipimask\n"); 200 "xen_broadcast_ipi() called with invalid ipimask\n");
201 201
202 /*  202 /*
203 * XXX-cherry: there's an implicit broadcast sending order 203 * XXX-cherry: there's an implicit broadcast sending order
204 * which I dislike. Randomise this ? :-) 204 * which I dislike. Randomise this ? :-)
205 */ 205 */
206 206
207 for (CPU_INFO_FOREACH(cii, ci)) { 207 for (CPU_INFO_FOREACH(cii, ci)) {
208 if (ci == NULL) 208 if (ci == NULL)
209 continue; 209 continue;
210 if (ci == self) 210 if (ci == self)
211 continue; 211 continue;
212 if (ci->ci_data.cpu_idlelwp == NULL) 212 if (ci->ci_data.cpu_idlelwp == NULL)
213 continue; 213 continue;
214 if ((ci->ci_flags & CPUF_PRESENT) == 0) 214 if ((ci->ci_flags & CPUF_PRESENT) == 0)
215 continue; 215 continue;
216 if (ci->ci_flags & (CPUF_RUNNING)) { 216 if (ci->ci_flags & (CPUF_RUNNING)) {
217 if (0 != xen_send_ipi(ci, ipimask)) { 217 if (0 != xen_send_ipi(ci, ipimask)) {
218 panic("xen_ipi of %x from %s to %s failed\n", 218 panic("xen_ipi of %x from %s to %s failed\n",
219 ipimask, cpu_name(curcpu()), 219 ipimask, cpu_name(curcpu()),
220 cpu_name(ci)); 220 cpu_name(ci));
221 } 221 }
222 } 222 }
223 } 223 }
224} 224}
225 225
226/* MD wrapper for the xcall(9) callback. */ 226/* MD wrapper for the xcall(9) callback. */
227 227
228static void 228static void
229xen_ipi_halt(struct cpu_info *ci, struct intrframe *intrf) 229xen_ipi_halt(struct cpu_info *ci, struct intrframe *intrf)
230{ 230{
231 KASSERT(ci == curcpu()); 231 KASSERT(ci == curcpu());
232 KASSERT(ci != NULL); 232 KASSERT(ci != NULL);
233 if (HYPERVISOR_vcpu_op(VCPUOP_down, ci->ci_vcpuid, NULL)) { 233 if (HYPERVISOR_vcpu_op(VCPUOP_down, ci->ci_vcpuid, NULL)) {
234 panic("%s shutdown failed.\n", device_xname(ci->ci_dev)); 234 panic("%s shutdown failed.\n", device_xname(ci->ci_dev));
235 } 235 }
236 236
237} 237}
238 238
239static void 239static void
240xen_ipi_synch_fpu(struct cpu_info *ci, struct intrframe *intrf) 240xen_ipi_synch_fpu(struct cpu_info *ci, struct intrframe *intrf)
241{ 241{
242 KASSERT(ci != NULL); 242 KASSERT(ci != NULL);
243 KASSERT(intrf != NULL); 243 KASSERT(intrf != NULL);
244 244
245 panic("%s: impossible", __func__); 245 panic("%s: impossible", __func__);
246} 246}
247 247
248#ifdef DDB 248#ifdef DDB
249static void 249static void
250xen_ipi_ddb(struct cpu_info *ci, struct intrframe *intrf) 250xen_ipi_ddb(struct cpu_info *ci, struct intrframe *intrf)
251{ 251{
252 KASSERT(ci != NULL); 252 KASSERT(ci != NULL);
253 KASSERT(intrf != NULL); 253 KASSERT(intrf != NULL);
254 254
255#ifdef __x86_64__ 255#ifdef __x86_64__
256 ddb_ipi(intrf->if_tf); 256 ddb_ipi(intrf->if_tf);
257#else 257#else
258 struct trapframe tf; 258 struct trapframe tf;
259 tf.tf_gs = intrf->if_gs; 259 tf.tf_gs = intrf->if_gs;
260 tf.tf_fs = intrf->if_fs; 260 tf.tf_fs = intrf->if_fs;
261 tf.tf_es = intrf->if_es; 261 tf.tf_es = intrf->if_es;
262 tf.tf_ds = intrf->if_ds; 262 tf.tf_ds = intrf->if_ds;
263 tf.tf_edi = intrf->if_edi; 263 tf.tf_edi = intrf->if_edi;
264 tf.tf_esi = intrf->if_esi; 264 tf.tf_esi = intrf->if_esi;
265 tf.tf_ebp = intrf->if_ebp; 265 tf.tf_ebp = intrf->if_ebp;
266 tf.tf_ebx = intrf->if_ebx; 266 tf.tf_ebx = intrf->if_ebx;
267 tf.tf_ecx = intrf->if_ecx; 267 tf.tf_ecx = intrf->if_ecx;
268 tf.tf_eax = intrf->if_eax; 268 tf.tf_eax = intrf->if_eax;
269 tf.tf_trapno = intrf->__if_trapno; 269 tf.tf_trapno = intrf->__if_trapno;
270 tf.tf_err = intrf->__if_err; 270 tf.tf_err = intrf->__if_err;
271 tf.tf_eip = intrf->if_eip; 271 tf.tf_eip = intrf->if_eip;
272 tf.tf_cs = intrf->if_cs; 272 tf.tf_cs = intrf->if_cs;
273 tf.tf_eflags = intrf->if_eflags; 273 tf.tf_eflags = intrf->if_eflags;
274 tf.tf_esp = intrf->if_esp; 274 tf.tf_esp = intrf->if_esp;
275 tf.tf_ss = intrf->if_ss; 275 tf.tf_ss = intrf->if_ss;
276 276
277 ddb_ipi(tf); 277 ddb_ipi(tf);
278#endif 278#endif
279} 279}
280#endif /* DDB */ 280#endif /* DDB */
281 281
282static void 282static void
283xen_ipi_xcall(struct cpu_info *ci, struct intrframe *intrf) 283xen_ipi_xcall(struct cpu_info *ci, struct intrframe *intrf)
284{ 284{
285 KASSERT(ci != NULL); 285 KASSERT(ci != NULL);
286 KASSERT(intrf != NULL); 286 KASSERT(intrf != NULL);
287 287
288 xc_ipi_handler(); 288 xc_ipi_handler();
289} 289}
290 290
291static void 291static void
292xen_ipi_ast(struct cpu_info *ci, struct intrframe *intrf) 292xen_ipi_ast(struct cpu_info *ci, struct intrframe *intrf)
293{ 293{
294 KASSERT(ci != NULL); 294 KASSERT(ci != NULL);
295 KASSERT(intrf != NULL); 295 KASSERT(intrf != NULL);
296 296
297 aston(ci->ci_onproc); 297 aston(ci->ci_onproc);
298} 298}
299 299
300static void 300static void
301xen_ipi_generic(struct cpu_info *ci, struct intrframe *intrf) 301xen_ipi_generic(struct cpu_info *ci, struct intrframe *intrf)
302{ 302{
303 KASSERT(ci != NULL); 303 KASSERT(ci != NULL);
304 KASSERT(intrf != NULL); 304 KASSERT(intrf != NULL);
305 ipi_cpu_handler(); 305 ipi_cpu_handler();
306} 306}
307 307
308static void 308static void
309xen_ipi_hvcb(struct cpu_info *ci, struct intrframe *intrf) 309xen_ipi_hvcb(struct cpu_info *ci, struct intrframe *intrf)
310{ 310{
311 KASSERT(ci != NULL); 311 KASSERT(ci != NULL);
312 KASSERT(intrf != NULL); 312 KASSERT(intrf != NULL);
313 KASSERT(ci == curcpu()); 313 KASSERT(ci == curcpu());
314 KASSERT(!ci->ci_vcpu->evtchn_upcall_mask); 314 KASSERT(!ci->ci_vcpu->evtchn_upcall_mask);
315 315
316 hypervisor_force_callback(); 316 hypervisor_force_callback();
317} 317}
318 318
319static void 319static void
320xen_ipi_kpreempt(struct cpu_info *ci, struct intrframe * intrf) 320xen_ipi_kpreempt(struct cpu_info *ci, struct intrframe * intrf)
321{ 321{
322 softint_trigger(1 << SIR_PREEMPT); 322 softint_trigger(1 << SIR_PREEMPT);
323} 323}
324 324
325#ifdef XENPV 325#ifdef XENPV
326void 326void
327xc_send_ipi(struct cpu_info *ci) 327xc_send_ipi(struct cpu_info *ci)
328{ 328{
329 329
330 KASSERT(kpreempt_disabled()); 330 KASSERT(kpreempt_disabled());
331 KASSERT(curcpu() != ci); 331 KASSERT(curcpu() != ci);
332 if (ci) { 332 if (ci) {
333 if (0 != xen_send_ipi(ci, XEN_IPI_XCALL)) { 333 if (0 != xen_send_ipi(ci, XEN_IPI_XCALL)) {
334 panic("xen_send_ipi(XEN_IPI_XCALL) failed\n"); 334 panic("xen_send_ipi(XEN_IPI_XCALL) failed\n");
335 } 335 }
336 } else { 336 } else {
337 xen_broadcast_ipi(XEN_IPI_XCALL); 337 xen_broadcast_ipi(XEN_IPI_XCALL);
338 } 338 }
339} 339}
340 340
341void 341void
342cpu_ipi(struct cpu_info *ci) 342cpu_ipi(struct cpu_info *ci)
343{ 343{
344 KASSERT(kpreempt_disabled()); 344 KASSERT(kpreempt_disabled());
345 KASSERT(curcpu() != ci); 345 KASSERT(curcpu() != ci);
346 if (ci) { 346 if (ci) {
347 if (0 != xen_send_ipi(ci, XEN_IPI_GENERIC)) { 347 if (0 != xen_send_ipi(ci, XEN_IPI_GENERIC)) {
348 panic("xen_send_ipi(XEN_IPI_GENERIC) failed\n"); 348 panic("xen_send_ipi(XEN_IPI_GENERIC) failed\n");
349 } 349 }
350 } else { 350 } else {
351 xen_broadcast_ipi(XEN_IPI_GENERIC); 351 xen_broadcast_ipi(XEN_IPI_GENERIC);
352 } 352 }
353} 353}
354#endif /* XENPV */ 354#endif /* XENPV */

cvs diff -r1.88.2.10 -r1.88.2.11 src/sys/arch/xen/xen/evtchn.c (switch to unified diff)

--- src/sys/arch/xen/xen/evtchn.c 2020/04/20 11:29:01 1.88.2.10
+++ src/sys/arch/xen/xen/evtchn.c 2020/04/20 19:46:44 1.88.2.11
@@ -1,1231 +1,1231 @@ @@ -1,1231 +1,1231 @@
1/* $NetBSD: evtchn.c,v 1.88.2.10 2020/04/20 11:29:01 bouyer Exp $ */ 1/* $NetBSD: evtchn.c,v 1.88.2.11 2020/04/20 19:46:44 bouyer Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 Manuel Bouyer. 4 * Copyright (c) 2006 Manuel Bouyer.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 * 25 *
26 */ 26 */
27 27
28/* 28/*
29 * 29 *
30 * Copyright (c) 2004 Christian Limpach. 30 * Copyright (c) 2004 Christian Limpach.
31 * Copyright (c) 2004, K A Fraser. 31 * Copyright (c) 2004, K A Fraser.
32 * All rights reserved. 32 * All rights reserved.
33 * 33 *
34 * Redistribution and use in source and binary forms, with or without 34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions 35 * modification, are permitted provided that the following conditions
36 * are met: 36 * are met:
37 * 1. Redistributions of source code must retain the above copyright 37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer. 38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright 39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the 40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution. 41 * documentation and/or other materials provided with the distribution.
42 * 42 *
43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
44 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 44 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
45 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 45 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
46 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 46 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
47 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 47 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
48 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 48 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
49 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 49 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
50 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 50 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
51 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 51 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
52 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 52 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
53 */ 53 */
54 54
55 55
56#include <sys/cdefs.h> 56#include <sys/cdefs.h>
57__KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.88.2.10 2020/04/20 11:29:01 bouyer Exp $"); 57__KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.88.2.11 2020/04/20 19:46:44 bouyer Exp $");
58 58
59#include "opt_xen.h" 59#include "opt_xen.h"
60#include "isa.h" 60#include "isa.h"
61#include "pci.h" 61#include "pci.h"
62 62
63#include <sys/param.h> 63#include <sys/param.h>
64#include <sys/cpu.h> 64#include <sys/cpu.h>
65#include <sys/kernel.h> 65#include <sys/kernel.h>
66#include <sys/systm.h> 66#include <sys/systm.h>
67#include <sys/device.h> 67#include <sys/device.h>
68#include <sys/proc.h> 68#include <sys/proc.h>
69#include <sys/kmem.h> 69#include <sys/kmem.h>
70#include <sys/reboot.h> 70#include <sys/reboot.h>
71#include <sys/mutex.h> 71#include <sys/mutex.h>
72#include <sys/interrupt.h> 72#include <sys/interrupt.h>
73 73
74#include <uvm/uvm.h> 74#include <uvm/uvm.h>
75 75
76#include <xen/intr.h> 76#include <xen/intr.h>
77 77
78#include <xen/xen.h> 78#include <xen/xen.h>
79#include <xen/hypervisor.h> 79#include <xen/hypervisor.h>
80#include <xen/evtchn.h> 80#include <xen/evtchn.h>
81#include <xen/xenfunc.h> 81#include <xen/xenfunc.h>
82 82
83/* 83/*
84 * This lock protects updates to the following mapping and reference-count 84 * This lock protects updates to the following mapping and reference-count
85 * arrays. The lock does not need to be acquired to read the mapping tables. 85 * arrays. The lock does not need to be acquired to read the mapping tables.
86 */ 86 */
87static kmutex_t evtchn_lock; 87static kmutex_t evtchn_lock;
88 88
89/* event handlers */ 89/* event handlers */
90struct evtsource *evtsource[NR_EVENT_CHANNELS]; 90struct evtsource *evtsource[NR_EVENT_CHANNELS];
91 91
92/* channel locks */ 92/* channel locks */
93static kmutex_t evtlock[NR_EVENT_CHANNELS]; 93static kmutex_t evtlock[NR_EVENT_CHANNELS];
94 94
95/* Reference counts for bindings to event channels XXX: redo for SMP */ 95/* Reference counts for bindings to event channels XXX: redo for SMP */
96static uint8_t evtch_bindcount[NR_EVENT_CHANNELS]; 96static uint8_t evtch_bindcount[NR_EVENT_CHANNELS];
97 97
98/* event-channel <-> VCPU mapping for IPIs. XXX: redo for SMP. */ 98/* event-channel <-> VCPU mapping for IPIs. XXX: redo for SMP. */
99static evtchn_port_t vcpu_ipi_to_evtch[XEN_LEGACY_MAX_VCPUS]; 99static evtchn_port_t vcpu_ipi_to_evtch[XEN_LEGACY_MAX_VCPUS];
100 100
101/* event-channel <-> VCPU mapping for VIRQ_TIMER. XXX: redo for SMP. */ 101/* event-channel <-> VCPU mapping for VIRQ_TIMER. XXX: redo for SMP. */
102static int virq_timer_to_evtch[XEN_LEGACY_MAX_VCPUS]; 102static int virq_timer_to_evtch[XEN_LEGACY_MAX_VCPUS];
103 103
104/* event-channel <-> VIRQ mapping. */ 104/* event-channel <-> VIRQ mapping. */
105static int virq_to_evtch[NR_VIRQS]; 105static int virq_to_evtch[NR_VIRQS];
106 106
107 107
108#if NPCI > 0 || NISA > 0 108#if NPCI > 0 || NISA > 0
109/* event-channel <-> PIRQ mapping */ 109/* event-channel <-> PIRQ mapping */
110static int pirq_to_evtch[NR_PIRQS]; 110static int pirq_to_evtch[NR_PIRQS];
111/* PIRQ needing notify */ 111/* PIRQ needing notify */
112static uint32_t pirq_needs_unmask_notify[NR_EVENT_CHANNELS / 32]; 112static uint32_t pirq_needs_unmask_notify[NR_EVENT_CHANNELS / 32];
113int pirq_interrupt(void *); 113int pirq_interrupt(void *);
114physdev_op_t physdev_op_notify = { 114physdev_op_t physdev_op_notify = {
115 .cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY, 115 .cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY,
116}; 116};
117#endif 117#endif
118 118
119static void xen_evtchn_mask(struct pic *, int); 119static void xen_evtchn_mask(struct pic *, int);
120static void xen_evtchn_unmask(struct pic *, int); 120static void xen_evtchn_unmask(struct pic *, int);
121static void xen_evtchn_addroute(struct pic *, struct cpu_info *, int, int, int); 121static void xen_evtchn_addroute(struct pic *, struct cpu_info *, int, int, int);
122static void xen_evtchn_delroute(struct pic *, struct cpu_info *, int, int, int); 122static void xen_evtchn_delroute(struct pic *, struct cpu_info *, int, int, int);
123static bool xen_evtchn_trymask(struct pic *, int); 123static bool xen_evtchn_trymask(struct pic *, int);
124static void xen_intr_get_devname(const char *, char *, size_t); 124static void xen_intr_get_devname(const char *, char *, size_t);
125static void xen_intr_get_assigned(const char *, kcpuset_t *); 125static void xen_intr_get_assigned(const char *, kcpuset_t *);
126static uint64_t xen_intr_get_count(const char *, u_int); 126static uint64_t xen_intr_get_count(const char *, u_int);
127 127
128struct pic xen_pic = { 128struct pic xen_pic = {
129 .pic_name = "xenev0", 129 .pic_name = "xenev0",
130 .pic_type = PIC_XEN, 130 .pic_type = PIC_XEN,
131 .pic_vecbase = 0, 131 .pic_vecbase = 0,
132 .pic_apicid = 0, 132 .pic_apicid = 0,
133 .pic_lock = __SIMPLELOCK_UNLOCKED, 133 .pic_lock = __SIMPLELOCK_UNLOCKED,
134 .pic_hwmask = xen_evtchn_mask, 134 .pic_hwmask = xen_evtchn_mask,
135 .pic_hwunmask = xen_evtchn_unmask, 135 .pic_hwunmask = xen_evtchn_unmask,
136 .pic_addroute = xen_evtchn_addroute, 136 .pic_addroute = xen_evtchn_addroute,
137 .pic_delroute = xen_evtchn_delroute, 137 .pic_delroute = xen_evtchn_delroute,
138 .pic_trymask = xen_evtchn_trymask, 138 .pic_trymask = xen_evtchn_trymask,
139 .pic_level_stubs = xenev_stubs, 139 .pic_level_stubs = xenev_stubs,
140 .pic_edge_stubs = xenev_stubs, 140 .pic_edge_stubs = xenev_stubs,
141 .pic_intr_get_devname = xen_intr_get_devname, 141 .pic_intr_get_devname = xen_intr_get_devname,
142 .pic_intr_get_assigned = xen_intr_get_assigned, 142 .pic_intr_get_assigned = xen_intr_get_assigned,
143 .pic_intr_get_count = xen_intr_get_count, 143 .pic_intr_get_count = xen_intr_get_count,
144}; 144};
145  145
146/* 146/*
147 * We try to stick to the traditional x86 PIC semantics wrt Xen 147 * We try to stick to the traditional x86 PIC semantics wrt Xen
148 * events. 148 * events.
149 * 149 *
150 * PIC pins exist in a global namespace which may be hierarchical, and 150 * PIC pins exist in a global namespace which may be hierarchical, and
151 * are mapped to a cpu bus concept called 'IRQ' numbers, which are 151 * are mapped to a cpu bus concept called 'IRQ' numbers, which are
152 * also global, but linear. Thus a PIC, pin tuple will always map to 152 * also global, but linear. Thus a PIC, pin tuple will always map to
153 * an IRQ number. These tuples can alias to the same IRQ number, thus 153 * an IRQ number. These tuples can alias to the same IRQ number, thus
154 * causing IRQ "sharing". IRQ numbers can be bound to specific CPUs, 154 * causing IRQ "sharing". IRQ numbers can be bound to specific CPUs,
155 * and to specific callback vector indices on the CPU called idt_vec, 155 * and to specific callback vector indices on the CPU called idt_vec,
156 * which are aliases to handlers meant to run on destination 156 * which are aliases to handlers meant to run on destination
157 * CPUs. This binding can also happen at interrupt time and resolved 157 * CPUs. This binding can also happen at interrupt time and resolved
158 * 'round-robin' between all CPUs, depending on the lapic setup. In 158 * 'round-robin' between all CPUs, depending on the lapic setup. In
159 * this case, all CPUs need to have identical idt_vec->handler 159 * this case, all CPUs need to have identical idt_vec->handler
160 * mappings. 160 * mappings.
161 * 161 *
162 * The job of pic_addroute() is to setup the 'wiring' between the 162 * The job of pic_addroute() is to setup the 'wiring' between the
163 * source pin, and the destination CPU handler, ideally on a specific 163 * source pin, and the destination CPU handler, ideally on a specific
164 * CPU in MP systems (or 'round-robin'). 164 * CPU in MP systems (or 'round-robin').
165 * 165 *
166 * On Xen, a global namespace of 'events' exist, which are initially 166 * On Xen, a global namespace of 'events' exist, which are initially
167 * bound to nothing. This is similar to the relationship between 167 * bound to nothing. This is similar to the relationship between
168 * realworld realworld IRQ numbers wrt PIC pins, since before routing, 168 * realworld realworld IRQ numbers wrt PIC pins, since before routing,
169 * IRQ numbers by themselves have no causal connection setup with the 169 * IRQ numbers by themselves have no causal connection setup with the
170 * real world. (Except for the hardwired cases on the PC Architecture, 170 * real world. (Except for the hardwired cases on the PC Architecture,
171 * which we ignore for the purpose of this description). However the 171 * which we ignore for the purpose of this description). However the
172 * really important routing is from pin to idt_vec. On PIC_XEN, all 172 * really important routing is from pin to idt_vec. On PIC_XEN, all
173 * three (pic, irq, idt_vec) belong to the same namespace and are 173 * three (pic, irq, idt_vec) belong to the same namespace and are
174 * identical. Further, the mapping between idt_vec and the actual 174 * identical. Further, the mapping between idt_vec and the actual
175 * callback handler is setup via calls to the evtchn.h api - this 175 * callback handler is setup via calls to the evtchn.h api - this
176 * last bit is analogous to x86/idt.c:idt_vec_set() on real h/w 176 * last bit is analogous to x86/idt.c:idt_vec_set() on real h/w
177 * 177 *
178 * For now we handle two cases: 178 * For now we handle two cases:
179 * - IPC style events - eg: timer, PV devices, etc. 179 * - IPC style events - eg: timer, PV devices, etc.
180 * - dom0 physical irq bound events. 180 * - dom0 physical irq bound events.
181 * 181 *
182 * In the case of IPC style events, we currently externalise the 182 * In the case of IPC style events, we currently externalise the
183 * event binding by using evtchn.h functions. From the POV of 183 * event binding by using evtchn.h functions. From the POV of
184 * PIC_XEN , 'pin' , 'irq' and 'idt_vec' are all identical to the 184 * PIC_XEN , 'pin' , 'irq' and 'idt_vec' are all identical to the
185 * port number of the event. 185 * port number of the event.
186 * 186 *
187 * In the case of dom0 physical irq bound events, we currently 187 * In the case of dom0 physical irq bound events, we currently
188 * event binding by exporting evtchn.h functions. From the POV of 188 * event binding by exporting evtchn.h functions. From the POV of
189 * PIC_LAPIC/PIC_IOAPIC, the 'pin' is the hardware pin, the 'irq' is 189 * PIC_LAPIC/PIC_IOAPIC, the 'pin' is the hardware pin, the 'irq' is
190 * the x86 global irq number - the port number is extracted out of a 190 * the x86 global irq number - the port number is extracted out of a
191 * global array (this is currently kludgy and breaks API abstraction) 191 * global array (this is currently kludgy and breaks API abstraction)
192 * and the binding happens during pic_addroute() of the ioapic. 192 * and the binding happens during pic_addroute() of the ioapic.
193 * 193 *
194 * Later when we integrate more tightly with x86/intr.c, we will be 194 * Later when we integrate more tightly with x86/intr.c, we will be
195 * able to conform better to (PIC_LAPIC/PIC_IOAPIC)->PIC_XEN 195 * able to conform better to (PIC_LAPIC/PIC_IOAPIC)->PIC_XEN
196 * cascading model. 196 * cascading model.
197 */ 197 */
198 198
199int debug_port = -1; 199int debug_port = -1;
200 200
201// #define IRQ_DEBUG 4 201// #define IRQ_DEBUG 4
202 202
203/* http://mail-index.netbsd.org/port-amd64/2004/02/22/0000.html */ 203/* http://mail-index.netbsd.org/port-amd64/2004/02/22/0000.html */
204#ifdef MULTIPROCESSOR 204#ifdef MULTIPROCESSOR
205 205
206/* 206/*
207 * intr_biglock_wrapper: grab biglock and call a real interrupt handler. 207 * intr_biglock_wrapper: grab biglock and call a real interrupt handler.
208 */ 208 */
209 209
210int 210int
211xen_intr_biglock_wrapper(void *vp) 211xen_intr_biglock_wrapper(void *vp)
212{ 212{
213 struct intrhand *ih = vp; 213 struct intrhand *ih = vp;
214 int ret; 214 int ret;
215 215
216 KERNEL_LOCK(1, NULL); 216 KERNEL_LOCK(1, NULL);
217 217
218 ret = (*ih->ih_realfun)(ih->ih_realarg); 218 ret = (*ih->ih_realfun)(ih->ih_realarg);
219 219
220 KERNEL_UNLOCK_ONE(NULL); 220 KERNEL_UNLOCK_ONE(NULL);
221 221
222 return ret; 222 return ret;
223} 223}
224#endif /* MULTIPROCESSOR */ 224#endif /* MULTIPROCESSOR */
225 225
226void 226void
227events_default_setup(void) 227events_default_setup(void)
228{ 228{
229 int i; 229 int i;
230 230
231 /* No VCPU -> event mappings. */ 231 /* No VCPU -> event mappings. */
232 for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++) 232 for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++)
233 vcpu_ipi_to_evtch[i] = -1; 233 vcpu_ipi_to_evtch[i] = -1;
234 234
235 /* No VIRQ_TIMER -> event mappings. */ 235 /* No VIRQ_TIMER -> event mappings. */
236 for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++) 236 for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++)
237 virq_timer_to_evtch[i] = -1; 237 virq_timer_to_evtch[i] = -1;
238 238
239 /* No VIRQ -> event mappings. */ 239 /* No VIRQ -> event mappings. */
240 for (i = 0; i < NR_VIRQS; i++) 240 for (i = 0; i < NR_VIRQS; i++)
241 virq_to_evtch[i] = -1; 241 virq_to_evtch[i] = -1;
242 242
243#if NPCI > 0 || NISA > 0 243#if NPCI > 0 || NISA > 0
244 /* No PIRQ -> event mappings. */ 244 /* No PIRQ -> event mappings. */
245 for (i = 0; i < NR_PIRQS; i++) 245 for (i = 0; i < NR_PIRQS; i++)
246 pirq_to_evtch[i] = -1; 246 pirq_to_evtch[i] = -1;
247 for (i = 0; i < NR_EVENT_CHANNELS / 32; i++) 247 for (i = 0; i < NR_EVENT_CHANNELS / 32; i++)
248 pirq_needs_unmask_notify[i] = 0; 248 pirq_needs_unmask_notify[i] = 0;
249#endif 249#endif
250 250
251 /* No event-channel are 'live' right now. */ 251 /* No event-channel are 'live' right now. */
252 for (i = 0; i < NR_EVENT_CHANNELS; i++) { 252 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
253 evtsource[i] = NULL; 253 evtsource[i] = NULL;
254 evtch_bindcount[i] = 0; 254 evtch_bindcount[i] = 0;
255 hypervisor_mask_event(i); 255 hypervisor_mask_event(i);
256 } 256 }
257 257
258} 258}
259 259
260void 260void
261events_init(void) 261events_init(void)
262{ 262{
263 mutex_init(&evtchn_lock, MUTEX_DEFAULT, IPL_NONE); 263 mutex_init(&evtchn_lock, MUTEX_DEFAULT, IPL_NONE);
264#ifdef XENPV 264#ifdef XENPV
265 debug_port = bind_virq_to_evtch(VIRQ_DEBUG); 265 debug_port = bind_virq_to_evtch(VIRQ_DEBUG);
266 266
267 KASSERT(debug_port != -1); 267 KASSERT(debug_port != -1);
268 268
269 aprint_verbose("VIRQ_DEBUG interrupt using event channel %d\n", 269 aprint_verbose("VIRQ_DEBUG interrupt using event channel %d\n",
270 debug_port); 270 debug_port);
271 /* 271 /*
272 * Don't call event_set_handler(), we'll use a shortcut. Just set 272 * Don't call event_set_handler(), we'll use a shortcut. Just set
273 * evtsource[] to a non-NULL value so that evtchn_do_event will 273 * evtsource[] to a non-NULL value so that evtchn_do_event will
274 * be called. 274 * be called.
275 */ 275 */
276 evtsource[debug_port] = (void *)-1; 276 evtsource[debug_port] = (void *)-1;
277 xen_atomic_set_bit(&curcpu()->ci_evtmask[0], debug_port); 277 xen_atomic_set_bit(&curcpu()->ci_evtmask[0], debug_port);
278 hypervisor_unmask_event(debug_port); 278 hypervisor_unmask_event(debug_port);
279#if NPCI > 0 || NISA > 0 279#if NPCI > 0 || NISA > 0
280 hypervisor_ack_pirq_event(debug_port); 280 hypervisor_ack_pirq_event(debug_port);
281#endif /* NPCI > 0 || NISA > 0 */ 281#endif /* NPCI > 0 || NISA > 0 */
282#endif /* XENPV */ 282#endif /* XENPV */
283 x86_enable_intr(); /* at long last... */ 283 x86_enable_intr(); /* at long last... */
284} 284}
285 285
286bool 286bool
287events_suspend(void) 287events_suspend(void)
288{ 288{
289 int evtch; 289 int evtch;
290 290
291 x86_disable_intr(); 291 x86_disable_intr();
292 292
293 /* VIRQ_DEBUG is the last interrupt to remove */ 293 /* VIRQ_DEBUG is the last interrupt to remove */
294 evtch = unbind_virq_from_evtch(VIRQ_DEBUG); 294 evtch = unbind_virq_from_evtch(VIRQ_DEBUG);
295 295
296 KASSERT(evtch != -1); 296 KASSERT(evtch != -1);
297 297
298 hypervisor_mask_event(evtch); 298 hypervisor_mask_event(evtch);
299 /* Remove the non-NULL value set in events_init() */ 299 /* Remove the non-NULL value set in events_init() */
300 evtsource[evtch] = NULL; 300 evtsource[evtch] = NULL;
301 aprint_verbose("VIRQ_DEBUG interrupt disabled, " 301 aprint_verbose("VIRQ_DEBUG interrupt disabled, "
302 "event channel %d removed\n", evtch); 302 "event channel %d removed\n", evtch);
303 303
304 return true; 304 return true;
305} 305}
306 306
307bool 307bool
308events_resume (void) 308events_resume (void)
309{ 309{
310 events_init(); 310 events_init();
311 311
312 return true; 312 return true;
313} 313}
314 314
315 315
316unsigned int 316unsigned int
317evtchn_do_event(int evtch, struct intrframe *regs) 317evtchn_do_event(int evtch, struct intrframe *regs)
318{ 318{
319 struct cpu_info *ci; 319 struct cpu_info *ci;
320 int ilevel; 320 int ilevel;
321 struct intrhand *ih; 321 struct intrhand *ih;
322 int (*ih_fun)(void *, void *); 322 int (*ih_fun)(void *, void *);
323 uint32_t iplmask; 323 uint32_t iplmask;
324 324
325 KASSERTMSG(evtch >= 0, "negative evtch: %d", evtch); 325 KASSERTMSG(evtch >= 0, "negative evtch: %d", evtch);
326 KASSERTMSG(evtch < NR_EVENT_CHANNELS, 326 KASSERTMSG(evtch < NR_EVENT_CHANNELS,
327 "evtch number %d > NR_EVENT_CHANNELS", evtch); 327 "evtch number %d > NR_EVENT_CHANNELS", evtch);
328 328
329#ifdef IRQ_DEBUG 329#ifdef IRQ_DEBUG
330 if (evtch == IRQ_DEBUG) 330 if (evtch == IRQ_DEBUG)
331 printf("evtchn_do_event: evtch %d\n", evtch); 331 printf("evtchn_do_event: evtch %d\n", evtch);
332#endif 332#endif
333 ci = curcpu(); 333 ci = curcpu();
334 334
335 /* 335 /*
336 * Shortcut for the debug handler, we want it to always run, 336 * Shortcut for the debug handler, we want it to always run,
337 * regardless of the IPL level. 337 * regardless of the IPL level.
338 */ 338 */
339 if (__predict_false(evtch == debug_port)) { 339 if (__predict_false(evtch == debug_port)) {
340 xen_debug_handler(NULL); 340 xen_debug_handler(NULL);
341 hypervisor_unmask_event(debug_port); 341 hypervisor_unmask_event(debug_port);
342#if NPCI > 0 || NISA > 0 342#if NPCI > 0 || NISA > 0
343 hypervisor_ack_pirq_event(debug_port); 343 hypervisor_ack_pirq_event(debug_port);
344#endif /* NPCI > 0 || NISA > 0 */  344#endif /* NPCI > 0 || NISA > 0 */
345 return 0; 345 return 0;
346 } 346 }
347 347
348 KASSERTMSG(evtsource[evtch] != NULL, "unknown event %d", evtch); 348 KASSERTMSG(evtsource[evtch] != NULL, "unknown event %d", evtch);
349 349
350 if (evtsource[evtch]->ev_cpu != ci) 350 if (evtsource[evtch]->ev_cpu != ci)
351 return 0; 351 return 0;
352 352
353 ci->ci_data.cpu_nintr++; 353 ci->ci_data.cpu_nintr++;
354 evtsource[evtch]->ev_evcnt.ev_count++; 354 evtsource[evtch]->ev_evcnt.ev_count++;
355 ilevel = ci->ci_ilevel; 355 ilevel = ci->ci_ilevel;
356 356
357 if (evtsource[evtch]->ev_maxlevel <= ilevel) { 357 if (evtsource[evtch]->ev_maxlevel <= ilevel) {
358#ifdef IRQ_DEBUG 358#ifdef IRQ_DEBUG
359 if (evtch == IRQ_DEBUG) 359 if (evtch == IRQ_DEBUG)
360 printf("evtsource[%d]->ev_maxlevel %d <= ilevel %d\n", 360 printf("evtsource[%d]->ev_maxlevel %d <= ilevel %d\n",
361 evtch, evtsource[evtch]->ev_maxlevel, ilevel); 361 evtch, evtsource[evtch]->ev_maxlevel, ilevel);
362#endif 362#endif
363 hypervisor_set_ipending(evtsource[evtch]->ev_imask, 363 hypervisor_set_ipending(evtsource[evtch]->ev_imask,
364 evtch >> LONG_SHIFT, 364 evtch >> LONG_SHIFT,
365 evtch & LONG_MASK); 365 evtch & LONG_MASK);
366 366
367 /* leave masked */ 367 /* leave masked */
368 368
369 return 0; 369 return 0;
370 } 370 }
371 ci->ci_ilevel = evtsource[evtch]->ev_maxlevel; 371 ci->ci_ilevel = evtsource[evtch]->ev_maxlevel;
372 iplmask = evtsource[evtch]->ev_imask; 372 iplmask = evtsource[evtch]->ev_imask;
373 KASSERT(ci->ci_ilevel >= IPL_VM); 373 KASSERT(ci->ci_ilevel >= IPL_VM);
374 KASSERT(cpu_intr_p()); 374 KASSERT(cpu_intr_p());
375 x86_enable_intr(); 375 x86_enable_intr();
376 mutex_spin_enter(&evtlock[evtch]); 376 mutex_spin_enter(&evtlock[evtch]);
377 ih = evtsource[evtch]->ev_handlers; 377 ih = evtsource[evtch]->ev_handlers;
378 while (ih != NULL) { 378 while (ih != NULL) {
379 KASSERT(ih->ih_cpu == ci); 379 KASSERT(ih->ih_cpu == ci);
380#if 0 380#if 0
381 if (ih->ih_cpu != ci) { 381 if (ih->ih_cpu != ci) {
382 hypervisor_send_event(ih->ih_cpu, evtch); 382 hypervisor_send_event(ih->ih_cpu, evtch);
383 iplmask &= ~(1 << XEN_IPL2SIR(ih->ih_level)); 383 iplmask &= ~(1 << XEN_IPL2SIR(ih->ih_level));
384 ih = ih->ih_evt_next; 384 ih = ih->ih_evt_next;
385 continue; 385 continue;
386 } 386 }
387#endif 387#endif
388 if (ih->ih_level <= ilevel) { 388 if (ih->ih_level <= ilevel) {
389#ifdef IRQ_DEBUG 389#ifdef IRQ_DEBUG
390 if (evtch == IRQ_DEBUG) 390 if (evtch == IRQ_DEBUG)
391 printf("ih->ih_level %d <= ilevel %d\n", ih->ih_level, ilevel); 391 printf("ih->ih_level %d <= ilevel %d\n", ih->ih_level, ilevel);
392#endif 392#endif
393 x86_disable_intr(); 393 x86_disable_intr();
394 hypervisor_set_ipending(iplmask, 394 hypervisor_set_ipending(iplmask,
395 evtch >> LONG_SHIFT, evtch & LONG_MASK); 395 evtch >> LONG_SHIFT, evtch & LONG_MASK);
396 /* leave masked */ 396 /* leave masked */
397 mutex_spin_exit(&evtlock[evtch]); 397 mutex_spin_exit(&evtlock[evtch]);
398 goto splx; 398 goto splx;
399 } 399 }
400 iplmask &= ~(1 << XEN_IPL2SIR(ih->ih_level)); 400 iplmask &= ~(1 << XEN_IPL2SIR(ih->ih_level));
401 ci->ci_ilevel = ih->ih_level; 401 ci->ci_ilevel = ih->ih_level;
402 ih_fun = (void *)ih->ih_fun; 402 ih_fun = (void *)ih->ih_fun;
403 ih_fun(ih->ih_arg, regs); 403 ih_fun(ih->ih_arg, regs);
404 ih = ih->ih_evt_next; 404 ih = ih->ih_evt_next;
405 } 405 }
406 mutex_spin_exit(&evtlock[evtch]); 406 mutex_spin_exit(&evtlock[evtch]);
407 x86_disable_intr(); 407 x86_disable_intr();
408 hypervisor_unmask_event(evtch); 408 hypervisor_unmask_event(evtch);
409#if NPCI > 0 || NISA > 0 409#if NPCI > 0 || NISA > 0
410 hypervisor_ack_pirq_event(evtch); 410 hypervisor_ack_pirq_event(evtch);
411#endif /* NPCI > 0 || NISA > 0 */  411#endif /* NPCI > 0 || NISA > 0 */
412 412
413splx: 413splx:
414 ci->ci_ilevel = ilevel; 414 ci->ci_ilevel = ilevel;
415 return 0; 415 return 0;
416} 416}
417 417
418#define PRIuCPUID "lu" /* XXX: move this somewhere more appropriate */ 418#define PRIuCPUID "lu" /* XXX: move this somewhere more appropriate */
419 419
420/* PIC callbacks */ 420/* PIC callbacks */
421/* pic "pin"s are conceptually mapped to event port numbers */ 421/* pic "pin"s are conceptually mapped to event port numbers */
422static void 422static void
423xen_evtchn_mask(struct pic *pic, int pin) 423xen_evtchn_mask(struct pic *pic, int pin)
424{ 424{
425 evtchn_port_t evtchn = pin; 425 evtchn_port_t evtchn = pin;
426 426
427 KASSERT(pic->pic_type == PIC_XEN); 427 KASSERT(pic->pic_type == PIC_XEN);
428 KASSERT(evtchn < NR_EVENT_CHANNELS); 428 KASSERT(evtchn < NR_EVENT_CHANNELS);
429 429
430 hypervisor_mask_event(evtchn); 430 hypervisor_mask_event(evtchn);
431} 431}
432 432
433static void 433static void
434xen_evtchn_unmask(struct pic *pic, int pin) 434xen_evtchn_unmask(struct pic *pic, int pin)
435{ 435{
436 evtchn_port_t evtchn = pin; 436 evtchn_port_t evtchn = pin;
437 437
438 KASSERT(pic->pic_type == PIC_XEN); 438 KASSERT(pic->pic_type == PIC_XEN);
439 KASSERT(evtchn < NR_EVENT_CHANNELS); 439 KASSERT(evtchn < NR_EVENT_CHANNELS);
440 440
441 hypervisor_unmask_event(evtchn); 441 hypervisor_unmask_event(evtchn);
442  442
443} 443}
444 444
445 445
446static void 446static void
447xen_evtchn_addroute(struct pic *pic, struct cpu_info *ci, int pin, int idt_vec, int type) 447xen_evtchn_addroute(struct pic *pic, struct cpu_info *ci, int pin, int idt_vec, int type)
448{ 448{
449 449
450 evtchn_port_t evtchn = pin; 450 evtchn_port_t evtchn = pin;
451 451
452 /* Events are simulated as level triggered interrupts */ 452 /* Events are simulated as level triggered interrupts */
453 KASSERT(type == IST_LEVEL);  453 KASSERT(type == IST_LEVEL);
454 454
455 KASSERT(evtchn < NR_EVENT_CHANNELS); 455 KASSERT(evtchn < NR_EVENT_CHANNELS);
456#if notyet 456#if notyet
457 evtchn_port_t boundport = idt_vec; 457 evtchn_port_t boundport = idt_vec;
458#endif 458#endif
459  459
460 KASSERT(pic->pic_type == PIC_XEN); 460 KASSERT(pic->pic_type == PIC_XEN);
461 461
462 xen_atomic_set_bit(&ci->ci_evtmask[0], evtchn); 462 xen_atomic_set_bit(&ci->ci_evtmask[0], evtchn);
463 463
464} 464}
465 465
466static void 466static void
467xen_evtchn_delroute(struct pic *pic, struct cpu_info *ci, int pin, int idt_vec, int type) 467xen_evtchn_delroute(struct pic *pic, struct cpu_info *ci, int pin, int idt_vec, int type)
468{ 468{
469 /* 469 /*
470 * XXX: In the future, this is a great place to 470 * XXX: In the future, this is a great place to
471 * 'unbind' events to underlying events and cpus. 471 * 'unbind' events to underlying events and cpus.
472 * For now, just disable interrupt servicing on this cpu for 472 * For now, just disable interrupt servicing on this cpu for
473 * this pin aka cpu. 473 * this pin aka cpu.
474 */ 474 */
475 evtchn_port_t evtchn = pin; 475 evtchn_port_t evtchn = pin;
476 476
477 /* Events are simulated as level triggered interrupts */ 477 /* Events are simulated as level triggered interrupts */
478 KASSERT(type == IST_LEVEL);  478 KASSERT(type == IST_LEVEL);
479 479
480 KASSERT(evtchn < NR_EVENT_CHANNELS); 480 KASSERT(evtchn < NR_EVENT_CHANNELS);
481#if notyet 481#if notyet
482 evtchn_port_t boundport = idt_vec; 482 evtchn_port_t boundport = idt_vec;
483#endif 483#endif
484  484
485 KASSERT(pic->pic_type == PIC_XEN); 485 KASSERT(pic->pic_type == PIC_XEN);
486 486
487 xen_atomic_clear_bit(&ci->ci_evtmask[0], evtchn); 487 xen_atomic_clear_bit(&ci->ci_evtmask[0], evtchn);
488} 488}
489 489
490/* 490/*
491 * xen_evtchn_trymask(pic, pin) 491 * xen_evtchn_trymask(pic, pin)
492 * 492 *
493 * If there are interrupts pending on the bus-shared pic, return 493 * If there are interrupts pending on the bus-shared pic, return
494 * false. Otherwise, mask interrupts on the bus-shared pic and 494 * false. Otherwise, mask interrupts on the bus-shared pic and
495 * return true. 495 * return true.
496 */ 496 */
497static bool 497static bool
498xen_evtchn_trymask(struct pic *pic, int pin) 498xen_evtchn_trymask(struct pic *pic, int pin)
499{ 499{
500 volatile struct shared_info *s = HYPERVISOR_shared_info; 500 volatile struct shared_info *s = HYPERVISOR_shared_info;
501 unsigned long masked __diagused; 501 unsigned long masked __diagused;
502 502
503 /* Mask it. */ 503 /* Mask it. */
504 masked = xen_atomic_test_and_set_bit(&s->evtchn_mask[0], pin); 504 masked = xen_atomic_test_and_set_bit(&s->evtchn_mask[0], pin);
505 505
506 /* 506 /*
507 * Caller is responsible for calling trymask only when the 507 * Caller is responsible for calling trymask only when the
508 * interrupt pin is not masked, and for serializing calls to 508 * interrupt pin is not masked, and for serializing calls to
509 * trymask. 509 * trymask.
510 */ 510 */
511 KASSERT(!masked); 511 KASSERT(!masked);
512 512
513 /* 513 /*
514 * Check whether there were any interrupts pending when we 514 * Check whether there were any interrupts pending when we
515 * masked it. If there were, unmask and abort. 515 * masked it. If there were, unmask and abort.
516 */ 516 */
517 if (xen_atomic_test_bit(&s->evtchn_pending[0], pin)) { 517 if (xen_atomic_test_bit(&s->evtchn_pending[0], pin)) {
518 xen_atomic_clear_bit(&s->evtchn_mask[0], pin); 518 xen_atomic_clear_bit(&s->evtchn_mask[0], pin);
519 return false; 519 return false;
520 } 520 }
521 521
522 /* Success: masked, not pending. */ 522 /* Success: masked, not pending. */
523 return true; 523 return true;
524} 524}
525 525
526evtchn_port_t 526evtchn_port_t
527bind_vcpu_to_evtch(cpuid_t vcpu) 527bind_vcpu_to_evtch(cpuid_t vcpu)
528{ 528{
529 evtchn_op_t op; 529 evtchn_op_t op;
530 evtchn_port_t evtchn; 530 evtchn_port_t evtchn;
531 531
532 mutex_spin_enter(&evtchn_lock); 532 mutex_spin_enter(&evtchn_lock);
533 533
534 evtchn = vcpu_ipi_to_evtch[vcpu]; 534 evtchn = vcpu_ipi_to_evtch[vcpu];
535 if (evtchn == -1) { 535 if (evtchn == -1) {
536 op.cmd = EVTCHNOP_bind_ipi; 536 op.cmd = EVTCHNOP_bind_ipi;
537 op.u.bind_ipi.vcpu = (uint32_t) vcpu; 537 op.u.bind_ipi.vcpu = (uint32_t) vcpu;
538 if (HYPERVISOR_event_channel_op(&op) != 0) 538 if (HYPERVISOR_event_channel_op(&op) != 0)
539 panic("Failed to bind ipi to VCPU %"PRIuCPUID"\n", vcpu); 539 panic("Failed to bind ipi to VCPU %"PRIuCPUID"\n", vcpu);
540 evtchn = op.u.bind_ipi.port; 540 evtchn = op.u.bind_ipi.port;
541 541
542 vcpu_ipi_to_evtch[vcpu] = evtchn; 542 vcpu_ipi_to_evtch[vcpu] = evtchn;
543 } 543 }
544 544
545 evtch_bindcount[evtchn]++; 545 evtch_bindcount[evtchn]++;
546 546
547 mutex_spin_exit(&evtchn_lock); 547 mutex_spin_exit(&evtchn_lock);
548 548
549 return evtchn; 549 return evtchn;
550} 550}
551 551
552int 552int
553bind_virq_to_evtch(int virq) 553bind_virq_to_evtch(int virq)
554{ 554{
555 evtchn_op_t op; 555 evtchn_op_t op;
556 int evtchn; 556 int evtchn;
557 557
558 mutex_spin_enter(&evtchn_lock); 558 mutex_spin_enter(&evtchn_lock);
559 559
560 /* 560 /*
561 * XXX: The only per-cpu VIRQ we currently use is VIRQ_TIMER. 561 * XXX: The only per-cpu VIRQ we currently use is VIRQ_TIMER.
562 * Please re-visit this implementation when others are used. 562 * Please re-visit this implementation when others are used.
563 * Note: VIRQ_DEBUG is special-cased, and not used or bound on APs. 563 * Note: VIRQ_DEBUG is special-cased, and not used or bound on APs.
564 * XXX: event->virq/ipi can be unified in a linked-list 564 * XXX: event->virq/ipi can be unified in a linked-list
565 * implementation. 565 * implementation.
566 */ 566 */
567 struct cpu_info *ci = curcpu(); 567 struct cpu_info *ci = curcpu();
568 568
569 if (virq == VIRQ_DEBUG && ci != &cpu_info_primary) { 569 if (virq == VIRQ_DEBUG && ci != &cpu_info_primary) {
570 mutex_spin_exit(&evtchn_lock); 570 mutex_spin_exit(&evtchn_lock);
571 return -1; 571 return -1;
572 } 572 }
573 573
574 if (virq == VIRQ_TIMER) { 574 if (virq == VIRQ_TIMER) {
575 evtchn = virq_timer_to_evtch[ci->ci_vcpuid]; 575 evtchn = virq_timer_to_evtch[ci->ci_vcpuid];
576 } else { 576 } else {
577 evtchn = virq_to_evtch[virq]; 577 evtchn = virq_to_evtch[virq];
578 } 578 }
579 579
580 /* Allocate a channel if there is none already allocated */ 580 /* Allocate a channel if there is none already allocated */
581 if (evtchn == -1) { 581 if (evtchn == -1) {
582 op.cmd = EVTCHNOP_bind_virq; 582 op.cmd = EVTCHNOP_bind_virq;
583 op.u.bind_virq.virq = virq; 583 op.u.bind_virq.virq = virq;
584 op.u.bind_virq.vcpu = ci->ci_vcpuid; 584 op.u.bind_virq.vcpu = ci->ci_vcpuid;
585 if (HYPERVISOR_event_channel_op(&op) != 0) 585 if (HYPERVISOR_event_channel_op(&op) != 0)
586 panic("Failed to bind virtual IRQ %d\n", virq); 586 panic("Failed to bind virtual IRQ %d\n", virq);
587 evtchn = op.u.bind_virq.port; 587 evtchn = op.u.bind_virq.port;
588 } 588 }
589 589
590 /* Set event channel */ 590 /* Set event channel */
591 if (virq == VIRQ_TIMER) { 591 if (virq == VIRQ_TIMER) {
592 virq_timer_to_evtch[ci->ci_vcpuid] = evtchn; 592 virq_timer_to_evtch[ci->ci_vcpuid] = evtchn;
593 } else { 593 } else {
594 virq_to_evtch[virq] = evtchn; 594 virq_to_evtch[virq] = evtchn;
595 } 595 }
596 596
597 /* Increase ref counter */ 597 /* Increase ref counter */
598 evtch_bindcount[evtchn]++; 598 evtch_bindcount[evtchn]++;
599 599
600 mutex_spin_exit(&evtchn_lock); 600 mutex_spin_exit(&evtchn_lock);
601 601
602 return evtchn; 602 return evtchn;
603} 603}
604 604
605int 605int
606unbind_virq_from_evtch(int virq) 606unbind_virq_from_evtch(int virq)
607{ 607{
608 evtchn_op_t op; 608 evtchn_op_t op;
609 int evtchn; 609 int evtchn;
610 610
611 struct cpu_info *ci = curcpu(); 611 struct cpu_info *ci = curcpu();
612 612
613 if (virq == VIRQ_TIMER) { 613 if (virq == VIRQ_TIMER) {
614 evtchn = virq_timer_to_evtch[ci->ci_vcpuid]; 614 evtchn = virq_timer_to_evtch[ci->ci_vcpuid];
615 } 615 }
616 else { 616 else {
617 evtchn = virq_to_evtch[virq]; 617 evtchn = virq_to_evtch[virq];
618 } 618 }
619 619
620 if (evtchn == -1) { 620 if (evtchn == -1) {
621 return -1; 621 return -1;
622 } 622 }
623 623
624 mutex_spin_enter(&evtchn_lock); 624 mutex_spin_enter(&evtchn_lock);
625 625
626 evtch_bindcount[evtchn]--; 626 evtch_bindcount[evtchn]--;
627 if (evtch_bindcount[evtchn] == 0) { 627 if (evtch_bindcount[evtchn] == 0) {
628 op.cmd = EVTCHNOP_close; 628 op.cmd = EVTCHNOP_close;
629 op.u.close.port = evtchn; 629 op.u.close.port = evtchn;
630 if (HYPERVISOR_event_channel_op(&op) != 0) 630 if (HYPERVISOR_event_channel_op(&op) != 0)
631 panic("Failed to unbind virtual IRQ %d\n", virq); 631 panic("Failed to unbind virtual IRQ %d\n", virq);
632 632
633 if (virq == VIRQ_TIMER) { 633 if (virq == VIRQ_TIMER) {
634 virq_timer_to_evtch[ci->ci_vcpuid] = -1; 634 virq_timer_to_evtch[ci->ci_vcpuid] = -1;
635 } else { 635 } else {
636 virq_to_evtch[virq] = -1; 636 virq_to_evtch[virq] = -1;
637 } 637 }
638 } 638 }
639 639
640 mutex_spin_exit(&evtchn_lock); 640 mutex_spin_exit(&evtchn_lock);
641 641
642 return evtchn; 642 return evtchn;
643} 643}
644 644
645#if NPCI > 0 || NISA > 0 645#if NPCI > 0 || NISA > 0
646int 646int
647get_pirq_to_evtch(int pirq) 647get_pirq_to_evtch(int pirq)
648{ 648{
649 int evtchn; 649 int evtchn;
650 650
651 if (pirq == -1) /* Match previous behaviour */ 651 if (pirq == -1) /* Match previous behaviour */
652 return -1; 652 return -1;
653  653
654 if (pirq >= NR_PIRQS) { 654 if (pirq >= NR_PIRQS) {
655 panic("pirq %d out of bound, increase NR_PIRQS", pirq); 655 panic("pirq %d out of bound, increase NR_PIRQS", pirq);
656 } 656 }
657 mutex_spin_enter(&evtchn_lock); 657 mutex_spin_enter(&evtchn_lock);
658 658
659 evtchn = pirq_to_evtch[pirq]; 659 evtchn = pirq_to_evtch[pirq];
660 660
661 mutex_spin_exit(&evtchn_lock); 661 mutex_spin_exit(&evtchn_lock);
662 662
663 return evtchn; 663 return evtchn;
664} 664}
665 665
666int 666int
667bind_pirq_to_evtch(int pirq) 667bind_pirq_to_evtch(int pirq)
668{ 668{
669 evtchn_op_t op; 669 evtchn_op_t op;
670 int evtchn; 670 int evtchn;
671 671
672 if (pirq >= NR_PIRQS) { 672 if (pirq >= NR_PIRQS) {
673 panic("pirq %d out of bound, increase NR_PIRQS", pirq); 673 panic("pirq %d out of bound, increase NR_PIRQS", pirq);
674 } 674 }
675 675
676 mutex_spin_enter(&evtchn_lock); 676 mutex_spin_enter(&evtchn_lock);
677 677
678 evtchn = pirq_to_evtch[pirq]; 678 evtchn = pirq_to_evtch[pirq];
679 if (evtchn == -1) { 679 if (evtchn == -1) {
680 op.cmd = EVTCHNOP_bind_pirq; 680 op.cmd = EVTCHNOP_bind_pirq;
681 op.u.bind_pirq.pirq = pirq; 681 op.u.bind_pirq.pirq = pirq;
682 op.u.bind_pirq.flags = BIND_PIRQ__WILL_SHARE; 682 op.u.bind_pirq.flags = BIND_PIRQ__WILL_SHARE;
683 if (HYPERVISOR_event_channel_op(&op) != 0) 683 if (HYPERVISOR_event_channel_op(&op) != 0)
684 panic("Failed to bind physical IRQ %d\n", pirq); 684 panic("Failed to bind physical IRQ %d\n", pirq);
685 evtchn = op.u.bind_pirq.port; 685 evtchn = op.u.bind_pirq.port;
686 686
687#ifdef IRQ_DEBUG 687#ifdef IRQ_DEBUG
688 printf("pirq %d evtchn %d\n", pirq, evtchn); 688 printf("pirq %d evtchn %d\n", pirq, evtchn);
689#endif 689#endif
690 pirq_to_evtch[pirq] = evtchn; 690 pirq_to_evtch[pirq] = evtchn;
691 } 691 }
692 692
693 evtch_bindcount[evtchn]++; 693 evtch_bindcount[evtchn]++;
694 694
695 mutex_spin_exit(&evtchn_lock); 695 mutex_spin_exit(&evtchn_lock);
696 696
697 return evtchn; 697 return evtchn;
698} 698}
699 699
700int 700int
701unbind_pirq_from_evtch(int pirq) 701unbind_pirq_from_evtch(int pirq)
702{ 702{
703 evtchn_op_t op; 703 evtchn_op_t op;
704 int evtchn = pirq_to_evtch[pirq]; 704 int evtchn = pirq_to_evtch[pirq];
705 705
706 mutex_spin_enter(&evtchn_lock); 706 mutex_spin_enter(&evtchn_lock);
707 707
708 evtch_bindcount[evtchn]--; 708 evtch_bindcount[evtchn]--;
709 if (evtch_bindcount[evtchn] == 0) { 709 if (evtch_bindcount[evtchn] == 0) {
710 op.cmd = EVTCHNOP_close; 710 op.cmd = EVTCHNOP_close;
711 op.u.close.port = evtchn; 711 op.u.close.port = evtchn;
712 if (HYPERVISOR_event_channel_op(&op) != 0) 712 if (HYPERVISOR_event_channel_op(&op) != 0)
713 panic("Failed to unbind physical IRQ %d\n", pirq); 713 panic("Failed to unbind physical IRQ %d\n", pirq);
714 714
715 pirq_to_evtch[pirq] = -1; 715 pirq_to_evtch[pirq] = -1;
716 } 716 }
717 717
718 mutex_spin_exit(&evtchn_lock); 718 mutex_spin_exit(&evtchn_lock);
719 719
720 return evtchn; 720 return evtchn;
721} 721}
722 722
723struct pintrhand * 723struct pintrhand *
724pirq_establish(int pirq, int evtch, int (*func)(void *), void *arg, int level, 724pirq_establish(int pirq, int evtch, int (*func)(void *), void *arg, int level,
725 const char *intrname, const char *xname, bool known_mpsafe) 725 const char *intrname, const char *xname, bool known_mpsafe)
726{ 726{
727 struct pintrhand *ih; 727 struct pintrhand *ih;
728 728
729 ih = kmem_zalloc(sizeof(struct pintrhand), 729 ih = kmem_zalloc(sizeof(struct pintrhand),
730 cold ? KM_NOSLEEP : KM_SLEEP); 730 cold ? KM_NOSLEEP : KM_SLEEP);
731 if (ih == NULL) { 731 if (ih == NULL) {
732 printf("pirq_establish: can't allocate handler info\n"); 732 printf("pirq_establish: can't allocate handler info\n");
733 return NULL; 733 return NULL;
734 } 734 }
735 735
736 KASSERT(evtch > 0); 736 KASSERT(evtch > 0);
737 737
738 ih->pirq = pirq; 738 ih->pirq = pirq;
739 ih->evtch = evtch; 739 ih->evtch = evtch;
740 ih->func = func; 740 ih->func = func;
741 ih->arg = arg; 741 ih->arg = arg;
742 742
743 if (event_set_handler(evtch, pirq_interrupt, ih, level, intrname, 743 if (event_set_handler(evtch, pirq_interrupt, ih, level, intrname,
744 xname, known_mpsafe, true) == NULL) { 744 xname, known_mpsafe, true) == NULL) {
745 kmem_free(ih, sizeof(struct pintrhand)); 745 kmem_free(ih, sizeof(struct pintrhand));
746 return NULL; 746 return NULL;
747 } 747 }
748 748
749 hypervisor_prime_pirq_event(pirq, evtch); 749 hypervisor_prime_pirq_event(pirq, evtch);
750 hypervisor_unmask_event(evtch); 750 hypervisor_unmask_event(evtch);
751 hypervisor_ack_pirq_event(evtch); 751 hypervisor_ack_pirq_event(evtch);
752 return ih; 752 return ih;
753} 753}
754 754
755void 755void
756pirq_disestablish(struct pintrhand *ih) 756pirq_disestablish(struct pintrhand *ih)
757{ 757{
758 int error = event_remove_handler(ih->evtch, pirq_interrupt, ih); 758 int error = event_remove_handler(ih->evtch, pirq_interrupt, ih);
759 if (error) { 759 if (error) {
760 printf("pirq_disestablish(%p): %d\n", ih, error); 760 printf("pirq_disestablish(%p): %d\n", ih, error);
761 return; 761 return;
762 } 762 }
763 kmem_free(ih, sizeof(struct pintrhand)); 763 kmem_free(ih, sizeof(struct pintrhand));
764} 764}
765 765
766int 766int
767pirq_interrupt(void *arg) 767pirq_interrupt(void *arg)
768{ 768{
769 struct pintrhand *ih = arg; 769 struct pintrhand *ih = arg;
770 int ret; 770 int ret;
771 771
772 ret = ih->func(ih->arg); 772 ret = ih->func(ih->arg);
773#ifdef IRQ_DEBUG 773#ifdef IRQ_DEBUG
774 if (ih->evtch == IRQ_DEBUG) 774 if (ih->evtch == IRQ_DEBUG)
775 printf("pirq_interrupt irq %d ret %d\n", ih->pirq, ret); 775 printf("pirq_interrupt irq %d ret %d\n", ih->pirq, ret);
776#endif 776#endif
777 return ret; 777 return ret;
778} 778}
779 779
780#endif /* NPCI > 0 || NISA > 0 */ 780#endif /* NPCI > 0 || NISA > 0 */
781 781
782 782
783/* 783/*
784 * Recalculate the interrupt from scratch for an event source. 784 * Recalculate the interrupt from scratch for an event source.
785 */ 785 */
786static void 786static void
787intr_calculatemasks(struct evtsource *evts, int evtch, struct cpu_info *ci) 787intr_calculatemasks(struct evtsource *evts, int evtch, struct cpu_info *ci)
788{ 788{
789 struct intrhand *ih; 789 struct intrhand *ih;
790 int cpu_receive = 0; 790 int cpu_receive = 0;
791 791
792#ifdef MULTIPROCESSOR 792#ifdef MULTIPROCESSOR
793 KASSERT(!mutex_owned(&evtlock[evtch])); 793 KASSERT(!mutex_owned(&evtlock[evtch]));
794#endif 794#endif
795 mutex_spin_enter(&evtlock[evtch]); 795 mutex_spin_enter(&evtlock[evtch]);
796 evts->ev_maxlevel = IPL_NONE; 796 evts->ev_maxlevel = IPL_NONE;
797 evts->ev_imask = 0; 797 evts->ev_imask = 0;
798 for (ih = evts->ev_handlers; ih != NULL; ih = ih->ih_evt_next) { 798 for (ih = evts->ev_handlers; ih != NULL; ih = ih->ih_evt_next) {
799 if (ih->ih_level > evts->ev_maxlevel) 799 if (ih->ih_level > evts->ev_maxlevel)
800 evts->ev_maxlevel = ih->ih_level; 800 evts->ev_maxlevel = ih->ih_level;
801 evts->ev_imask |= (1 << XEN_IPL2SIR(ih->ih_level)); 801 evts->ev_imask |= (1 << XEN_IPL2SIR(ih->ih_level));
802 if (ih->ih_cpu == ci) 802 if (ih->ih_cpu == ci)
803 cpu_receive = 1; 803 cpu_receive = 1;
804 } 804 }
805 if (cpu_receive) 805 if (cpu_receive)
806 xen_atomic_set_bit(&curcpu()->ci_evtmask[0], evtch); 806 xen_atomic_set_bit(&curcpu()->ci_evtmask[0], evtch);
807 else 807 else
808 xen_atomic_clear_bit(&curcpu()->ci_evtmask[0], evtch); 808 xen_atomic_clear_bit(&curcpu()->ci_evtmask[0], evtch);
809 mutex_spin_exit(&evtlock[evtch]); 809 mutex_spin_exit(&evtlock[evtch]);
810} 810}
811 811
812struct intrhand * 812struct intrhand *
813event_set_handler(int evtch, int (*func)(void *), void *arg, int level, 813event_set_handler(int evtch, int (*func)(void *), void *arg, int level,
814 const char *intrname, const char *xname, bool mpsafe, bool bind) 814 const char *intrname, const char *xname, bool mpsafe, bool bind)
815{ 815{
816 struct cpu_info *ci = curcpu(); /* XXX: pass in ci ? */ 816 struct cpu_info *ci = curcpu(); /* XXX: pass in ci ? */
817 struct evtsource *evts; 817 struct evtsource *evts;
818 struct intrhand *ih, **ihp; 818 struct intrhand *ih, **ihp;
819 int s; 819 int s;
820 char intrstr_buf[INTRIDBUF]; 820 char intrstr_buf[INTRIDBUF];
821 821
822#ifdef IRQ_DEBUG 822#ifdef IRQ_DEBUG
823 printf("event_set_handler IRQ %d handler %p\n", evtch, func); 823 printf("event_set_handler IRQ %d handler %p\n", evtch, func);
824#endif 824#endif
825 825
826 KASSERTMSG(evtch >= 0, "negative evtch: %d", evtch); 826 KASSERTMSG(evtch >= 0, "negative evtch: %d", evtch);
827 KASSERTMSG(evtch < NR_EVENT_CHANNELS, 827 KASSERTMSG(evtch < NR_EVENT_CHANNELS,
828 "evtch number %d > NR_EVENT_CHANNELS", evtch); 828 "evtch number %d > NR_EVENT_CHANNELS", evtch);
829 KASSERT(xname != NULL); 829 KASSERT(xname != NULL);
830 830
831#if 0 831#if 0
832 printf("event_set_handler evtch %d handler %p level %d\n", evtch, 832 printf("event_set_handler evtch %d handler %p level %d\n", evtch,
833 handler, level); 833 handler, level);
834#endif 834#endif
835 ih = kmem_zalloc(sizeof (struct intrhand), KM_NOSLEEP); 835 ih = kmem_zalloc(sizeof (struct intrhand), KM_NOSLEEP);
836 if (ih == NULL) 836 if (ih == NULL)
837 panic("can't allocate fixed interrupt source"); 837 panic("can't allocate fixed interrupt source");
838 838
839 839
840 ih->ih_pic = &xen_pic; 840 ih->ih_pic = &xen_pic;
841 ih->ih_level = level; 841 ih->ih_level = level;
842 ih->ih_fun = ih->ih_realfun = func; 842 ih->ih_fun = ih->ih_realfun = func;
843 ih->ih_arg = ih->ih_realarg = arg; 843 ih->ih_arg = ih->ih_realarg = arg;
844 ih->ih_evt_next = NULL; 844 ih->ih_evt_next = NULL;
845 ih->ih_next = NULL; 845 ih->ih_next = NULL;
846 ih->ih_cpu = ci; 846 ih->ih_cpu = ci;
847 ih->ih_pin = evtch; 847 ih->ih_pin = evtch;
848#ifdef MULTIPROCESSOR 848#ifdef MULTIPROCESSOR
849 if (!mpsafe) { 849 if (!mpsafe) {
850 ih->ih_fun = xen_intr_biglock_wrapper; 850 ih->ih_fun = xen_intr_biglock_wrapper;
851 ih->ih_arg = ih; 851 ih->ih_arg = ih;
852 } 852 }
853#endif /* MULTIPROCESSOR */ 853#endif /* MULTIPROCESSOR */
854 854
855 s = splhigh(); 855 s = splhigh();
856 856
857 /* register per-cpu handler for spllower() */ 857 /* register per-cpu handler for spllower() */
858 event_set_iplhandler(ci, ih, level); 858 event_set_iplhandler(ci, ih, level);
859 859
860 /* register handler for event channel */ 860 /* register handler for event channel */
861 if (evtsource[evtch] == NULL) { 861 if (evtsource[evtch] == NULL) {
862 evtchn_op_t op; 862 evtchn_op_t op;
863 if (intrname == NULL) 863 if (intrname == NULL)
864 intrname = intr_create_intrid(-1, &xen_pic, evtch, 864 intrname = intr_create_intrid(-1, &xen_pic, evtch,
865 intrstr_buf, sizeof(intrstr_buf)); 865 intrstr_buf, sizeof(intrstr_buf));
866 evts = kmem_zalloc(sizeof (struct evtsource), 866 evts = kmem_zalloc(sizeof (struct evtsource),
867 KM_NOSLEEP); 867 KM_NOSLEEP);
868 if (evts == NULL) 868 if (evts == NULL)
869 panic("can't allocate fixed interrupt source"); 869 panic("can't allocate fixed interrupt source");
870 870
871 evts->ev_handlers = ih; 871 evts->ev_handlers = ih;
872 /* 872 /*
873 * XXX: We're assuming here that ci is the same cpu as 873 * XXX: We're assuming here that ci is the same cpu as
874 * the one on which this event/port is bound on. The 874 * the one on which this event/port is bound on. The
875 * api needs to be reshuffled so that this assumption 875 * api needs to be reshuffled so that this assumption
876 * is more explicitly implemented. 876 * is more explicitly implemented.
877 */ 877 */
878 evts->ev_cpu = ci; 878 evts->ev_cpu = ci;
879 mutex_init(&evtlock[evtch], MUTEX_DEFAULT, IPL_HIGH); 879 mutex_init(&evtlock[evtch], MUTEX_DEFAULT, IPL_HIGH);
880 evtsource[evtch] = evts; 880 evtsource[evtch] = evts;
881 strlcpy(evts->ev_intrname, intrname, sizeof(evts->ev_intrname)); 881 strlcpy(evts->ev_intrname, intrname, sizeof(evts->ev_intrname));
882 882
883 evcnt_attach_dynamic(&evts->ev_evcnt, EVCNT_TYPE_INTR, NULL, 883 evcnt_attach_dynamic(&evts->ev_evcnt, EVCNT_TYPE_INTR, NULL,
884 device_xname(ci->ci_dev), evts->ev_intrname); 884 device_xname(ci->ci_dev), evts->ev_intrname);
885 if (bind) { 885 if (bind) {
886 op.cmd = EVTCHNOP_bind_vcpu; 886 op.cmd = EVTCHNOP_bind_vcpu;
887 op.u.bind_vcpu.port = evtch; 887 op.u.bind_vcpu.port = evtch;
888 op.u.bind_vcpu.vcpu = ci->ci_cpuid; 888 op.u.bind_vcpu.vcpu = ci->ci_vcpuid;
889 if (HYPERVISOR_event_channel_op(&op) != 0) { 889 if (HYPERVISOR_event_channel_op(&op) != 0) {
890 panic("Failed to bind event %d to " 890 panic("Failed to bind event %d to VCPU %s %d",
891 "VCPU %"PRIuCPUID, evtch, ci->ci_cpuid); 891 evtch, device_xname(ci->ci_dev),
 892 ci->ci_vcpuid);
892 } 893 }
893 } 894 }
894 } else { 895 } else {
895 evts = evtsource[evtch]; 896 evts = evtsource[evtch];
896 /* sort by IPL order, higher first */ 897 /* sort by IPL order, higher first */
897 mutex_spin_enter(&evtlock[evtch]); 898 mutex_spin_enter(&evtlock[evtch]);
898 for (ihp = &evts->ev_handlers; ; ihp = &((*ihp)->ih_evt_next)) { 899 for (ihp = &evts->ev_handlers; ; ihp = &((*ihp)->ih_evt_next)) {
899 if ((*ihp)->ih_level < ih->ih_level) { 900 if ((*ihp)->ih_level < ih->ih_level) {
900 /* insert before *ihp */ 901 /* insert before *ihp */
901 ih->ih_evt_next = *ihp; 902 ih->ih_evt_next = *ihp;
902 *ihp = ih; 903 *ihp = ih;
903 break; 904 break;
904 } 905 }
905 if ((*ihp)->ih_evt_next == NULL) { 906 if ((*ihp)->ih_evt_next == NULL) {
906 (*ihp)->ih_evt_next = ih; 907 (*ihp)->ih_evt_next = ih;
907 break; 908 break;
908 } 909 }
909 } 910 }
910 mutex_spin_exit(&evtlock[evtch]); 911 mutex_spin_exit(&evtlock[evtch]);
911#ifndef XENPV 912#ifndef XENPV
912 mutex_enter(&cpu_lock); 913 mutex_enter(&cpu_lock);
913 evts->ev_isl->is_handlers = evts->ev_handlers; 914 evts->ev_isl->is_handlers = evts->ev_handlers;
914 mutex_exit(&cpu_lock); 915 mutex_exit(&cpu_lock);
915#endif 916#endif
916 } 917 }
917 918
918 919
919 // append device name 920 // append device name
920 if (evts->ev_xname[0] != '\0') 921 if (evts->ev_xname[0] != '\0')
921 strlcat(evts->ev_xname, ", ", sizeof(evts->ev_xname)); 922 strlcat(evts->ev_xname, ", ", sizeof(evts->ev_xname));
922 strlcat(evts->ev_xname, xname, sizeof(evts->ev_xname)); 923 strlcat(evts->ev_xname, xname, sizeof(evts->ev_xname));
923 924
924 intr_calculatemasks(evts, evtch, ci); 925 intr_calculatemasks(evts, evtch, ci);
925 splx(s); 926 splx(s);
926#ifndef XENPV 927#ifndef XENPV
927 mutex_enter(&cpu_lock); 928 mutex_enter(&cpu_lock);
928 if (evts->ev_isl == NULL) { 929 if (evts->ev_isl == NULL) {
929 evts->ev_isl = intr_allocate_io_intrsource(intrname); 930 evts->ev_isl = intr_allocate_io_intrsource(intrname);
930 evts->ev_isl->is_pic = &xen_pic; 931 evts->ev_isl->is_pic = &xen_pic;
931 } 932 }
932 evts->ev_isl->is_handlers = evts->ev_handlers; 933 evts->ev_isl->is_handlers = evts->ev_handlers;
933 mutex_exit(&cpu_lock); 934 mutex_exit(&cpu_lock);
934#endif 935#endif
935 936
936 937
937 return ih; 938 return ih;
938} 939}
939 940
940void 941void
941event_set_iplhandler(struct cpu_info *ci, 942event_set_iplhandler(struct cpu_info *ci,
942 struct intrhand *ih, 943 struct intrhand *ih,
943 int level) 944 int level)
944{ 945{
945 struct intrsource *ipls; 946 struct intrsource *ipls;
946 int sir = XEN_IPL2SIR(level); 947 int sir = XEN_IPL2SIR(level);
947 KASSERT(sir >= SIR_XENIPL_VM && sir <= SIR_XENIPL_HIGH); 948 KASSERT(sir >= SIR_XENIPL_VM && sir <= SIR_XENIPL_HIGH);
948 949
949 KASSERT(ci == ih->ih_cpu); 950 KASSERT(ci == ih->ih_cpu);
950 if (ci->ci_isources[sir] == NULL) { 951 if (ci->ci_isources[sir] == NULL) {
951 ipls = kmem_zalloc(sizeof (struct intrsource), 952 ipls = kmem_zalloc(sizeof (struct intrsource),
952 KM_NOSLEEP); 953 KM_NOSLEEP);
953 if (ipls == NULL) 954 if (ipls == NULL)
954 panic("can't allocate fixed interrupt source"); 955 panic("can't allocate fixed interrupt source");
955 ipls->is_recurse = xenev_stubs[level - IPL_VM].ist_recurse; 956 ipls->is_recurse = xenev_stubs[level - IPL_VM].ist_recurse;
956 ipls->is_resume = xenev_stubs[level - IPL_VM].ist_resume; 957 ipls->is_resume = xenev_stubs[level - IPL_VM].ist_resume;
957 ipls->is_handlers = ih; 958 ipls->is_handlers = ih;
958 ipls->is_maxlevel = level; 
959 ipls->is_pic = &xen_pic; 959 ipls->is_pic = &xen_pic;
960 ci->ci_isources[sir] = ipls; 960 ci->ci_isources[sir] = ipls;
961 x86_intr_calculatemasks(ci); 
962 } else { 961 } else {
963 ipls = ci->ci_isources[sir]; 962 ipls = ci->ci_isources[sir];
964 ih->ih_next = ipls->is_handlers; 963 ih->ih_next = ipls->is_handlers;
965 ipls->is_handlers = ih; 964 ipls->is_handlers = ih;
966 } 965 }
 966 x86_intr_calculatemasks(ci);
967} 967}
968 968
969int 969int
970event_remove_handler(int evtch, int (*func)(void *), void *arg) 970event_remove_handler(int evtch, int (*func)(void *), void *arg)
971{ 971{
972 struct intrsource *ipls; 972 struct intrsource *ipls;
973 struct evtsource *evts; 973 struct evtsource *evts;
974 struct intrhand *ih; 974 struct intrhand *ih;
975 struct intrhand **ihp; 975 struct intrhand **ihp;
976 struct cpu_info *ci; 976 struct cpu_info *ci;
977 977
978 evts = evtsource[evtch]; 978 evts = evtsource[evtch];
979 if (evts == NULL) 979 if (evts == NULL)
980 return ENOENT; 980 return ENOENT;
981 981
982 mutex_spin_enter(&evtlock[evtch]); 982 mutex_spin_enter(&evtlock[evtch]);
983 for (ihp = &evts->ev_handlers, ih = evts->ev_handlers; 983 for (ihp = &evts->ev_handlers, ih = evts->ev_handlers;
984 ih != NULL; 984 ih != NULL;
985 ihp = &ih->ih_evt_next, ih = ih->ih_evt_next) { 985 ihp = &ih->ih_evt_next, ih = ih->ih_evt_next) {
986 if (ih->ih_realfun == func && ih->ih_realarg == arg) 986 if (ih->ih_realfun == func && ih->ih_realarg == arg)
987 break; 987 break;
988 } 988 }
989 if (ih == NULL) { 989 if (ih == NULL) {
990 mutex_spin_exit(&evtlock[evtch]); 990 mutex_spin_exit(&evtlock[evtch]);
991 return ENOENT; 991 return ENOENT;
992 } 992 }
993 ci = ih->ih_cpu; 993 ci = ih->ih_cpu;
994 *ihp = ih->ih_evt_next; 994 *ihp = ih->ih_evt_next;
995 995
996 int sir = XEN_IPL2SIR(ih->ih_level); 996 int sir = XEN_IPL2SIR(ih->ih_level);
997 KASSERT(sir >= SIR_XENIPL_VM && sir <= SIR_XENIPL_HIGH); 997 KASSERT(sir >= SIR_XENIPL_VM && sir <= SIR_XENIPL_HIGH);
998 ipls = ci->ci_isources[sir]; 998 ipls = ci->ci_isources[sir];
999 for (ihp = &ipls->is_handlers, ih = ipls->is_handlers; 999 for (ihp = &ipls->is_handlers, ih = ipls->is_handlers;
1000 ih != NULL; 1000 ih != NULL;
1001 ihp = &ih->ih_next, ih = ih->ih_next) { 1001 ihp = &ih->ih_next, ih = ih->ih_next) {
1002 if (ih->ih_realfun == func && ih->ih_realarg == arg) 1002 if (ih->ih_realfun == func && ih->ih_realarg == arg)
1003 break; 1003 break;
1004 } 1004 }
1005 if (ih == NULL) 1005 if (ih == NULL)
1006 panic("event_remove_handler"); 1006 panic("event_remove_handler");
1007 *ihp = ih->ih_next; 1007 *ihp = ih->ih_next;
1008 mutex_spin_exit(&evtlock[evtch]); 1008 mutex_spin_exit(&evtlock[evtch]);
1009#ifndef XENPV 1009#ifndef XENPV
1010 mutex_enter(&cpu_lock); 1010 mutex_enter(&cpu_lock);
1011 evts->ev_isl->is_handlers = evts->ev_handlers; 1011 evts->ev_isl->is_handlers = evts->ev_handlers;
1012 mutex_exit(&cpu_lock); 1012 mutex_exit(&cpu_lock);
1013#endif 1013#endif
1014 kmem_free(ih, sizeof (struct intrhand)); 1014 kmem_free(ih, sizeof (struct intrhand));
1015 if (evts->ev_handlers == NULL) { 1015 if (evts->ev_handlers == NULL) {
1016#ifndef XENPV 1016#ifndef XENPV
1017 KASSERT(evts->ev_isl->is_handlers == NULL); 1017 KASSERT(evts->ev_isl->is_handlers == NULL);
1018 mutex_enter(&cpu_lock); 1018 mutex_enter(&cpu_lock);
1019 intr_free_io_intrsource(evts->ev_intrname); 1019 intr_free_io_intrsource(evts->ev_intrname);
1020 mutex_exit(&cpu_lock); 1020 mutex_exit(&cpu_lock);
1021#endif 1021#endif
1022 xen_atomic_clear_bit(&ci->ci_evtmask[0], evtch); 1022 xen_atomic_clear_bit(&ci->ci_evtmask[0], evtch);
1023 evcnt_detach(&evts->ev_evcnt); 1023 evcnt_detach(&evts->ev_evcnt);
1024 kmem_free(evts, sizeof (struct evtsource)); 1024 kmem_free(evts, sizeof (struct evtsource));
1025 evtsource[evtch] = NULL; 1025 evtsource[evtch] = NULL;
1026 } else { 1026 } else {
1027 intr_calculatemasks(evts, evtch, ci); 1027 intr_calculatemasks(evts, evtch, ci);
1028 } 1028 }
1029 return 0; 1029 return 0;
1030} 1030}
1031 1031
1032#if NPCI > 0 || NISA > 0 1032#if NPCI > 0 || NISA > 0
1033void 1033void
1034hypervisor_prime_pirq_event(int pirq, unsigned int evtch) 1034hypervisor_prime_pirq_event(int pirq, unsigned int evtch)
1035{ 1035{
1036 physdev_op_t physdev_op; 1036 physdev_op_t physdev_op;
1037 physdev_op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY; 1037 physdev_op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
1038 physdev_op.u.irq_status_query.irq = pirq; 1038 physdev_op.u.irq_status_query.irq = pirq;
1039 if (HYPERVISOR_physdev_op(&physdev_op) < 0) 1039 if (HYPERVISOR_physdev_op(&physdev_op) < 0)
1040 panic("HYPERVISOR_physdev_op(PHYSDEVOP_IRQ_STATUS_QUERY)"); 1040 panic("HYPERVISOR_physdev_op(PHYSDEVOP_IRQ_STATUS_QUERY)");
1041 if (physdev_op.u.irq_status_query.flags & 1041 if (physdev_op.u.irq_status_query.flags &
1042 PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY) { 1042 PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY) {
1043 pirq_needs_unmask_notify[evtch >> 5] |= (1 << (evtch & 0x1f)); 1043 pirq_needs_unmask_notify[evtch >> 5] |= (1 << (evtch & 0x1f));
1044#ifdef IRQ_DEBUG 1044#ifdef IRQ_DEBUG
1045 printf("pirq %d needs notify\n", pirq); 1045 printf("pirq %d needs notify\n", pirq);
1046#endif 1046#endif
1047 } 1047 }
1048} 1048}
1049 1049
1050void 1050void
1051hypervisor_ack_pirq_event(unsigned int evtch) 1051hypervisor_ack_pirq_event(unsigned int evtch)
1052{ 1052{
1053#ifdef IRQ_DEBUG 1053#ifdef IRQ_DEBUG
1054 if (evtch == IRQ_DEBUG) 1054 if (evtch == IRQ_DEBUG)
1055 printf("%s: evtch %d\n", __func__, evtch); 1055 printf("%s: evtch %d\n", __func__, evtch);
1056#endif 1056#endif
1057 1057
1058 if (pirq_needs_unmask_notify[evtch >> 5] & (1 << (evtch & 0x1f))) { 1058 if (pirq_needs_unmask_notify[evtch >> 5] & (1 << (evtch & 0x1f))) {
1059#ifdef IRQ_DEBUG 1059#ifdef IRQ_DEBUG
1060 if (evtch == IRQ_DEBUG) 1060 if (evtch == IRQ_DEBUG)
1061 printf("pirq_notify(%d)\n", evtch); 1061 printf("pirq_notify(%d)\n", evtch);
1062#endif 1062#endif
1063 (void)HYPERVISOR_physdev_op(&physdev_op_notify); 1063 (void)HYPERVISOR_physdev_op(&physdev_op_notify);
1064 } 1064 }
1065} 1065}
1066#endif /* NPCI > 0 || NISA > 0 */ 1066#endif /* NPCI > 0 || NISA > 0 */
1067 1067
1068int 1068int
1069xen_debug_handler(void *arg) 1069xen_debug_handler(void *arg)
1070{ 1070{
1071 struct cpu_info *ci = curcpu(); 1071 struct cpu_info *ci = curcpu();
1072 int i; 1072 int i;
1073 int xci_ilevel = ci->ci_ilevel; 1073 int xci_ilevel = ci->ci_ilevel;
1074 int xci_ipending = ci->ci_ipending; 1074 int xci_ipending = ci->ci_ipending;
1075 int xci_idepth = ci->ci_idepth; 1075 int xci_idepth = ci->ci_idepth;
1076 u_long upcall_pending = ci->ci_vcpu->evtchn_upcall_pending; 1076 u_long upcall_pending = ci->ci_vcpu->evtchn_upcall_pending;
1077 u_long upcall_mask = ci->ci_vcpu->evtchn_upcall_mask; 1077 u_long upcall_mask = ci->ci_vcpu->evtchn_upcall_mask;
1078 u_long pending_sel = ci->ci_vcpu->evtchn_pending_sel; 1078 u_long pending_sel = ci->ci_vcpu->evtchn_pending_sel;
1079 unsigned long evtchn_mask[sizeof(unsigned long) * 8]; 1079 unsigned long evtchn_mask[sizeof(unsigned long) * 8];
1080 unsigned long evtchn_pending[sizeof(unsigned long) * 8]; 1080 unsigned long evtchn_pending[sizeof(unsigned long) * 8];
1081 1081
1082 u_long p; 1082 u_long p;
1083 1083
1084 p = (u_long)&HYPERVISOR_shared_info->evtchn_mask[0]; 1084 p = (u_long)&HYPERVISOR_shared_info->evtchn_mask[0];
1085 memcpy(evtchn_mask, (void *)p, sizeof(evtchn_mask)); 1085 memcpy(evtchn_mask, (void *)p, sizeof(evtchn_mask));
1086 p = (u_long)&HYPERVISOR_shared_info->evtchn_pending[0]; 1086 p = (u_long)&HYPERVISOR_shared_info->evtchn_pending[0];
1087 memcpy(evtchn_pending, (void *)p, sizeof(evtchn_pending)); 1087 memcpy(evtchn_pending, (void *)p, sizeof(evtchn_pending));
1088 1088
1089 __insn_barrier(); 1089 __insn_barrier();
1090 printf("debug event\n"); 1090 printf("debug event\n");
1091 printf("ci_ilevel 0x%x ci_ipending 0x%x ci_idepth %d\n", 1091 printf("ci_ilevel 0x%x ci_ipending 0x%x ci_idepth %d\n",
1092 xci_ilevel, xci_ipending, xci_idepth); 1092 xci_ilevel, xci_ipending, xci_idepth);
1093 printf("evtchn_upcall_pending %ld evtchn_upcall_mask %ld" 1093 printf("evtchn_upcall_pending %ld evtchn_upcall_mask %ld"
1094 " evtchn_pending_sel 0x%lx\n", 1094 " evtchn_pending_sel 0x%lx\n",
1095 upcall_pending, upcall_mask, pending_sel); 1095 upcall_pending, upcall_mask, pending_sel);
1096 printf("evtchn_mask"); 1096 printf("evtchn_mask");
1097 for (i = 0 ; i <= LONG_MASK; i++) 1097 for (i = 0 ; i <= LONG_MASK; i++)
1098 printf(" %lx", (u_long)evtchn_mask[i]); 1098 printf(" %lx", (u_long)evtchn_mask[i]);
1099 printf("\n"); 1099 printf("\n");
1100 printf("evtchn_pending"); 1100 printf("evtchn_pending");
1101 for (i = 0 ; i <= LONG_MASK; i++) 1101 for (i = 0 ; i <= LONG_MASK; i++)
1102 printf(" %lx", (u_long)evtchn_pending[i]); 1102 printf(" %lx", (u_long)evtchn_pending[i]);
1103 printf("\n"); 1103 printf("\n");
1104 return 0; 1104 return 0;
1105} 1105}
1106 1106
1107static struct evtsource * 1107static struct evtsource *
1108event_get_handler(const char *intrid) 1108event_get_handler(const char *intrid)
1109{ 1109{
1110 for (int i = 0; i < NR_EVENT_CHANNELS; i++) { 1110 for (int i = 0; i < NR_EVENT_CHANNELS; i++) {
1111 if (evtsource[i] == NULL || i == debug_port) 1111 if (evtsource[i] == NULL || i == debug_port)
1112 continue; 1112 continue;
1113 1113
1114 struct evtsource *evp = evtsource[i]; 1114 struct evtsource *evp = evtsource[i];
1115 1115
1116 if (strcmp(evp->ev_intrname, intrid) == 0) 1116 if (strcmp(evp->ev_intrname, intrid) == 0)
1117 return evp; 1117 return evp;
1118 } 1118 }
1119 1119
1120 return NULL; 1120 return NULL;
1121} 1121}
1122 1122
1123static uint64_t 1123static uint64_t
1124xen_intr_get_count(const char *intrid, u_int cpu_idx) 1124xen_intr_get_count(const char *intrid, u_int cpu_idx)
1125{ 1125{
1126 int count = 0; 1126 int count = 0;
1127 struct evtsource *evp; 1127 struct evtsource *evp;
1128 1128
1129 mutex_spin_enter(&evtchn_lock); 1129 mutex_spin_enter(&evtchn_lock);
1130 1130
1131 evp = event_get_handler(intrid); 1131 evp = event_get_handler(intrid);
1132 if (evp != NULL && cpu_idx == cpu_index(evp->ev_cpu)) 1132 if (evp != NULL && cpu_idx == cpu_index(evp->ev_cpu))
1133 count = evp->ev_evcnt.ev_count; 1133 count = evp->ev_evcnt.ev_count;
1134 1134
1135 mutex_spin_exit(&evtchn_lock); 1135 mutex_spin_exit(&evtchn_lock);
1136 1136
1137 return count; 1137 return count;
1138} 1138}
1139 1139
1140static void 1140static void
1141xen_intr_get_assigned(const char *intrid, kcpuset_t *cpuset) 1141xen_intr_get_assigned(const char *intrid, kcpuset_t *cpuset)
1142{ 1142{
1143 struct evtsource *evp; 1143 struct evtsource *evp;
1144 1144
1145 kcpuset_zero(cpuset); 1145 kcpuset_zero(cpuset);
1146 1146
1147 mutex_spin_enter(&evtchn_lock); 1147 mutex_spin_enter(&evtchn_lock);
1148 1148
1149 evp = event_get_handler(intrid); 1149 evp = event_get_handler(intrid);
1150 if (evp != NULL) 1150 if (evp != NULL)
1151 kcpuset_set(cpuset, cpu_index(evp->ev_cpu)); 1151 kcpuset_set(cpuset, cpu_index(evp->ev_cpu));
1152 1152
1153 mutex_spin_exit(&evtchn_lock); 1153 mutex_spin_exit(&evtchn_lock);
1154} 1154}
1155 1155
1156static void 1156static void
1157xen_intr_get_devname(const char *intrid, char *buf, size_t len) 1157xen_intr_get_devname(const char *intrid, char *buf, size_t len)
1158{ 1158{
1159 struct evtsource *evp; 1159 struct evtsource *evp;
1160 1160
1161 mutex_spin_enter(&evtchn_lock); 1161 mutex_spin_enter(&evtchn_lock);
1162 1162
1163 evp = event_get_handler(intrid); 1163 evp = event_get_handler(intrid);
1164 strlcpy(buf, evp ? evp->ev_xname : "unknown", len); 1164 strlcpy(buf, evp ? evp->ev_xname : "unknown", len);
1165 1165
1166 mutex_spin_exit(&evtchn_lock); 1166 mutex_spin_exit(&evtchn_lock);
1167} 1167}
1168 1168
1169#ifdef XENPV 1169#ifdef XENPV
1170/* 1170/*
1171 * MI interface for subr_interrupt. 1171 * MI interface for subr_interrupt.
1172 */ 1172 */
1173struct intrids_handler * 1173struct intrids_handler *
1174interrupt_construct_intrids(const kcpuset_t *cpuset) 1174interrupt_construct_intrids(const kcpuset_t *cpuset)
1175{ 1175{
1176 struct intrids_handler *ii_handler; 1176 struct intrids_handler *ii_handler;
1177 intrid_t *ids; 1177 intrid_t *ids;
1178 int i, count, off; 1178 int i, count, off;
1179 struct evtsource *evp; 1179 struct evtsource *evp;
1180 1180
1181 if (kcpuset_iszero(cpuset)) 1181 if (kcpuset_iszero(cpuset))
1182 return 0; 1182 return 0;
1183 1183
1184 /* 1184 /*
1185 * Count the number of interrupts which affinity to any cpu of "cpuset". 1185 * Count the number of interrupts which affinity to any cpu of "cpuset".
1186 */ 1186 */
1187 count = 0; 1187 count = 0;
1188 for (i = 0; i < NR_EVENT_CHANNELS; i++) { 1188 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1189 evp = evtsource[i]; 1189 evp = evtsource[i];
1190 1190
1191 if (evp == NULL || i == debug_port) 1191 if (evp == NULL || i == debug_port)
1192 continue; 1192 continue;
1193 1193
1194 if (!kcpuset_isset(cpuset, cpu_index(evp->ev_cpu))) 1194 if (!kcpuset_isset(cpuset, cpu_index(evp->ev_cpu)))
1195 continue; 1195 continue;
1196 1196
1197 count++; 1197 count++;
1198 } 1198 }
1199 1199
1200 ii_handler = kmem_zalloc(sizeof(int) + sizeof(intrid_t) * count, 1200 ii_handler = kmem_zalloc(sizeof(int) + sizeof(intrid_t) * count,
1201 KM_SLEEP); 1201 KM_SLEEP);
1202 if (ii_handler == NULL) 1202 if (ii_handler == NULL)
1203 return NULL; 1203 return NULL;
1204 ii_handler->iih_nids = count; 1204 ii_handler->iih_nids = count;
1205 if (count == 0) 1205 if (count == 0)
1206 return ii_handler; 1206 return ii_handler;
1207 1207
1208 ids = ii_handler->iih_intrids; 1208 ids = ii_handler->iih_intrids;
1209 mutex_spin_enter(&evtchn_lock); 1209 mutex_spin_enter(&evtchn_lock);
1210 for (i = 0, off = 0; i < NR_EVENT_CHANNELS && off < count; i++) { 1210 for (i = 0, off = 0; i < NR_EVENT_CHANNELS && off < count; i++) {
1211 evp = evtsource[i]; 1211 evp = evtsource[i];
1212 1212
1213 if (evp == NULL || i == debug_port) 1213 if (evp == NULL || i == debug_port)
1214 continue; 1214 continue;
1215 1215
1216 if (!kcpuset_isset(cpuset, cpu_index(evp->ev_cpu))) 1216 if (!kcpuset_isset(cpuset, cpu_index(evp->ev_cpu)))
1217 continue; 1217 continue;
1218 1218
1219 snprintf(ids[off], sizeof(intrid_t), "%s", evp->ev_intrname); 1219 snprintf(ids[off], sizeof(intrid_t), "%s", evp->ev_intrname);
1220 off++; 1220 off++;
1221 } 1221 }
1222 mutex_spin_exit(&evtchn_lock); 1222 mutex_spin_exit(&evtchn_lock);
1223 return ii_handler; 1223 return ii_handler;
1224} 1224}
1225__strong_alias(interrupt_get_count, xen_intr_get_count); 1225__strong_alias(interrupt_get_count, xen_intr_get_count);
1226__strong_alias(interrupt_get_assigned, xen_intr_get_assigned); 1226__strong_alias(interrupt_get_assigned, xen_intr_get_assigned);
1227__strong_alias(interrupt_get_devname, xen_intr_get_devname); 1227__strong_alias(interrupt_get_devname, xen_intr_get_devname);
1228__strong_alias(x86_intr_get_count, xen_intr_get_count); 1228__strong_alias(x86_intr_get_count, xen_intr_get_count);
1229__strong_alias(x86_intr_get_assigned, xen_intr_get_assigned); 1229__strong_alias(x86_intr_get_assigned, xen_intr_get_assigned);
1230__strong_alias(x86_intr_get_devname, xen_intr_get_devname); 1230__strong_alias(x86_intr_get_devname, xen_intr_get_devname);
1231#endif /* XENPV */ 1231#endif /* XENPV */