Tue Apr 21 19:03:51 2020 UTC ()
adjust so that this at least compiles and links with __HAVE_PCI_MSI_MSIX


(jdolecek)
diff -r1.53 -r1.54 src/sys/arch/xen/include/intr.h
diff -r1.22 -r1.23 src/sys/arch/xen/x86/xen_intr.c

cvs diff -r1.53 -r1.54 src/sys/arch/xen/include/intr.h (switch to unified diff)

--- src/sys/arch/xen/include/intr.h 2019/12/23 13:35:37 1.53
+++ src/sys/arch/xen/include/intr.h 2020/04/21 19:03:51 1.54
@@ -1,100 +1,103 @@ @@ -1,100 +1,103 @@
1/* $NetBSD: intr.h,v 1.53 2019/12/23 13:35:37 thorpej Exp $ */ 1/* $NetBSD: intr.h,v 1.54 2020/04/21 19:03:51 jdolecek Exp $ */
2/* NetBSD intr.h,v 1.15 2004/10/31 10:39:34 yamt Exp */ 2/* NetBSD intr.h,v 1.15 2004/10/31 10:39:34 yamt Exp */
3 3
4/*- 4/*-
5 * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. 5 * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Charles M. Hannum, and by Jason R. Thorpe. 9 * by Charles M. Hannum, and by Jason R. Thorpe.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33#ifndef _XEN_INTR_H_ 33#ifndef _XEN_INTR_H_
34#define _XEN_INTR_H_ 34#define _XEN_INTR_H_
35 35
36#include <machine/intrdefs.h> 36#include <machine/intrdefs.h>
37 37
38#ifndef _LOCORE 38#ifndef _LOCORE
39#include <xen/include/public/xen.h> 39#include <xen/include/public/xen.h>
40#include <xen/include/public/event_channel.h> 40#include <xen/include/public/event_channel.h>
41#include <x86/intr.h> 41#include <x86/intr.h>
42#include <xen/xen.h> 42#include <xen/xen.h>
43#include <xen/hypervisor.h> 43#include <xen/hypervisor.h>
44#include <machine/pic.h> 44#include <machine/pic.h>
45#include <sys/evcnt.h> 45#include <sys/evcnt.h>
46 46
47#include "opt_xen.h" 47#include "opt_xen.h"
48 48
49 49
50struct cpu_info; 50struct cpu_info;
51/* 51/*
52 * Struct describing an event channel.  52 * Struct describing an event channel.
53 */ 53 */
54 54
55struct evtsource { 55struct evtsource {
56 int ev_maxlevel; /* max. IPL for this source */ 56 int ev_maxlevel; /* max. IPL for this source */
57 uint32_t ev_imask; /* interrupt mask */ 57 uint32_t ev_imask; /* interrupt mask */
58 struct intrhand *ev_handlers; /* handler chain */ 58 struct intrhand *ev_handlers; /* handler chain */
59 struct evcnt ev_evcnt; /* interrupt counter */ 59 struct evcnt ev_evcnt; /* interrupt counter */
60 struct cpu_info *ev_cpu; /* cpu on which this event is bound */ 60 struct cpu_info *ev_cpu; /* cpu on which this event is bound */
61 char ev_intrname[32]; /* interrupt string */ 61 char ev_intrname[32]; /* interrupt string */
62 char ev_xname[64]; /* handler device list */ 62 char ev_xname[64]; /* handler device list */
63}; 63};
64 64
65#define XMASK(ci,level) (ci)->ci_xmask[(level)] 65#define XMASK(ci,level) (ci)->ci_xmask[(level)]
66#define XUNMASK(ci,level) (ci)->ci_xunmask[(level)] 66#define XUNMASK(ci,level) (ci)->ci_xunmask[(level)]
67 67
68extern struct intrstub xenev_stubs[]; 68extern struct intrstub xenev_stubs[];
69extern int irq2port[NR_EVENT_CHANNELS]; /* actually port + 1, so that 0 is invaid */ 69extern int irq2port[NR_EVENT_CHANNELS]; /* actually port + 1, so that 0 is invaid */
70 70
71#ifdef MULTIPROCESSOR 71#ifdef MULTIPROCESSOR
72int xen_intr_biglock_wrapper(void *); 72int xen_intr_biglock_wrapper(void *);
73#endif 73#endif
74 74
75#if defined(DOM0OPS) || NPCI > 0 75#if defined(DOM0OPS) || NPCI > 0
76int xen_vec_alloc(int); 76int xen_vec_alloc(int);
77int xen_pic_to_gsi(struct pic *, int); 77int xen_pic_to_gsi(struct pic *, int);
78#endif /* defined(DOM0OPS) || NPCI > 0 */ 78#endif /* defined(DOM0OPS) || NPCI > 0 */
79 79
80#ifdef MULTIPROCESSOR 80#ifdef MULTIPROCESSOR
81void xen_ipi_init(void); 81void xen_ipi_init(void);
82int xen_send_ipi(struct cpu_info *, uint32_t); 82int xen_send_ipi(struct cpu_info *, uint32_t);
83void xen_broadcast_ipi(uint32_t); 83void xen_broadcast_ipi(uint32_t);
84#else 84#else
85#define xen_ipi_init(_1) ((void) 0) /* nothing */ 85#define xen_ipi_init(_1) ((void) 0) /* nothing */
86#define xen_send_ipi(_i1, _i2) (0) /* nothing */ 86#define xen_send_ipi(_i1, _i2) (0) /* nothing */
87#define xen_broadcast_ipi(_i1) ((void) 0) /* nothing */ 87#define xen_broadcast_ipi(_i1) ((void) 0) /* nothing */
88#endif /* MULTIPROCESSOR */ 88#endif /* MULTIPROCESSOR */
89 89
90void *xen_intr_establish_xname(int, struct pic *, int, int, int, int (*)(void *), 90void *xen_intr_establish_xname(int, struct pic *, int, int, int, int (*)(void *),
91 void *, bool, const char *); 91 void *, bool, const char *);
92void *xen_intr_establish(int, struct pic *, int, int, int, int (*)(void *), 92void *xen_intr_establish(int, struct pic *, int, int, int, int (*)(void *),
93 void *, bool); 93 void *, bool);
94void xen_intr_mask(struct intrhand *); 94void xen_intr_mask(struct intrhand *);
95void xen_intr_unmask(struct intrhand *); 95void xen_intr_unmask(struct intrhand *);
96void xen_intr_disestablish(struct intrhand *); 96void xen_intr_disestablish(struct intrhand *);
97 97
 98struct intrsource *xen_intr_allocate_io_intrsource(const char *);
 99void xen_intr_free_io_intrsource(const char *);
 100
98#endif /* !_LOCORE */ 101#endif /* !_LOCORE */
99 102
100#endif /* _XEN_INTR_H_ */ 103#endif /* _XEN_INTR_H_ */

cvs diff -r1.22 -r1.23 src/sys/arch/xen/x86/xen_intr.c (switch to unified diff)

--- src/sys/arch/xen/x86/xen_intr.c 2020/04/13 22:54:12 1.22
+++ src/sys/arch/xen/x86/xen_intr.c 2020/04/21 19:03:51 1.23
@@ -1,545 +1,566 @@ @@ -1,545 +1,566 @@
1/* $NetBSD: xen_intr.c,v 1.22 2020/04/13 22:54:12 bouyer Exp $ */ 1/* $NetBSD: xen_intr.c,v 1.23 2020/04/21 19:03:51 jdolecek Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum, and by Jason R. Thorpe. 8 * by Charles M. Hannum, and by Jason R. Thorpe.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: xen_intr.c,v 1.22 2020/04/13 22:54:12 bouyer Exp $"); 33__KERNEL_RCSID(0, "$NetBSD: xen_intr.c,v 1.23 2020/04/21 19:03:51 jdolecek Exp $");
34 34
35#include "opt_multiprocessor.h" 35#include "opt_multiprocessor.h"
36 36
37#include <sys/param.h> 37#include <sys/param.h>
38#include <sys/kernel.h> 38#include <sys/kernel.h>
39#include <sys/kmem.h> 39#include <sys/kmem.h>
40#include <sys/cpu.h> 40#include <sys/cpu.h>
41#include <sys/device.h> 41#include <sys/device.h>
42 42
43#include <xen/evtchn.h> 43#include <xen/evtchn.h>
44#include <xen/xenfunc.h> 44#include <xen/xenfunc.h>
45 45
46#include <uvm/uvm.h> 46#include <uvm/uvm.h>
47 47
48#include <machine/cpu.h> 48#include <machine/cpu.h>
49#include <machine/intr.h> 49#include <machine/intr.h>
50 50
51#include "acpica.h" 51#include "acpica.h"
52#include "ioapic.h" 52#include "ioapic.h"
53#include "lapic.h" 53#include "lapic.h"
54#include "pci.h" 54#include "pci.h"
55 55
56#if NACPICA > 0 56#if NACPICA > 0
57#include <dev/acpi/acpivar.h> 57#include <dev/acpi/acpivar.h>
58#endif 58#endif
59 59
60#if NIOAPIC > 0 || NACPICA > 0 60#if NIOAPIC > 0 || NACPICA > 0
61#include <machine/i82093var.h> 61#include <machine/i82093var.h>
62#endif 62#endif
63 63
64#if NLAPIC > 0 64#if NLAPIC > 0
65#include <machine/i82489var.h> 65#include <machine/i82489var.h>
66#endif 66#endif
67 67
68#if NPCI > 0 68#if NPCI > 0
69#include <dev/pci/ppbreg.h> 69#include <dev/pci/ppbreg.h>
 70#ifdef __HAVE_PCI_MSI_MSIX
 71#include <x86/pci/msipic.h>
 72#include <x86/pci/pci_msi_machdep.h>
 73#endif
70#endif 74#endif
71 75
72#if defined(MULTIPROCESSOR) 76#if defined(MULTIPROCESSOR)
73static const char *xen_ipi_names[XEN_NIPIS] = XEN_IPI_NAMES; 77static const char *xen_ipi_names[XEN_NIPIS] = XEN_IPI_NAMES;
74#endif 78#endif
75 79
76/* 80/*
77 * Restore a value to cpl (unmasking interrupts). If any unmasked 81 * Restore a value to cpl (unmasking interrupts). If any unmasked
78 * interrupts are pending, call Xspllower() to process them. 82 * interrupts are pending, call Xspllower() to process them.
79 */ 83 */
80void xen_spllower(int nlevel); 84void xen_spllower(int nlevel);
81 85
82void 86void
83xen_spllower(int nlevel) 87xen_spllower(int nlevel)
84{ 88{
85 struct cpu_info *ci = curcpu(); 89 struct cpu_info *ci = curcpu();
86 uint32_t xmask; 90 uint32_t xmask;
87 u_long psl; 91 u_long psl;
88 92
89 if (ci->ci_ilevel <= nlevel) 93 if (ci->ci_ilevel <= nlevel)
90 return; 94 return;
91 95
92 __insn_barrier(); 96 __insn_barrier();
93 97
94 xmask = XUNMASK(ci, nlevel); 98 xmask = XUNMASK(ci, nlevel);
95 psl = xen_read_psl(); 99 psl = xen_read_psl();
96 x86_disable_intr(); 100 x86_disable_intr();
97 if (ci->ci_xpending & xmask) { 101 if (ci->ci_xpending & xmask) {
98 KASSERT(psl == 0); 102 KASSERT(psl == 0);
99 Xspllower(nlevel); 103 Xspllower(nlevel);
100 /* Xspllower does enable_intr() */ 104 /* Xspllower does enable_intr() */
101 } else { 105 } else {
102 ci->ci_ilevel = nlevel; 106 ci->ci_ilevel = nlevel;
103 xen_write_psl(psl); 107 xen_write_psl(psl);
104 } 108 }
105} 109}
106 110
107 111
108#if !defined(XENPVHVM) 112#if !defined(XENPVHVM)
109void 113void
110x86_disable_intr(void) 114x86_disable_intr(void)
111{ 115{
112 curcpu()->ci_vcpu->evtchn_upcall_mask = 1; 116 curcpu()->ci_vcpu->evtchn_upcall_mask = 1;
113 x86_lfence(); 117 x86_lfence();
114} 118}
115 119
116void 120void
117x86_enable_intr(void) 121x86_enable_intr(void)
118{ 122{
119 volatile struct vcpu_info *_vci = curcpu()->ci_vcpu; 123 volatile struct vcpu_info *_vci = curcpu()->ci_vcpu;
120 __insn_barrier(); 124 __insn_barrier();
121 _vci->evtchn_upcall_mask = 0; 125 _vci->evtchn_upcall_mask = 0;
122 x86_lfence(); /* unmask then check (avoid races) */ 126 x86_lfence(); /* unmask then check (avoid races) */
123 if (__predict_false(_vci->evtchn_upcall_pending)) 127 if (__predict_false(_vci->evtchn_upcall_pending))
124 hypervisor_force_callback(); 128 hypervisor_force_callback();
125} 129}
126 130
127#endif /* !XENPVHVM */ 131#endif /* !XENPVHVM */
128 132
129u_long 133u_long
130xen_read_psl(void) 134xen_read_psl(void)
131{ 135{
132 136
133 return (curcpu()->ci_vcpu->evtchn_upcall_mask); 137 return (curcpu()->ci_vcpu->evtchn_upcall_mask);
134} 138}
135 139
136void 140void
137xen_write_psl(u_long psl) 141xen_write_psl(u_long psl)
138{ 142{
139 struct cpu_info *ci = curcpu(); 143 struct cpu_info *ci = curcpu();
140 144
141 ci->ci_vcpu->evtchn_upcall_mask = psl; 145 ci->ci_vcpu->evtchn_upcall_mask = psl;
142 xen_rmb(); 146 xen_rmb();
143 if (ci->ci_vcpu->evtchn_upcall_pending && psl == 0) { 147 if (ci->ci_vcpu->evtchn_upcall_pending && psl == 0) {
144 hypervisor_force_callback(); 148 hypervisor_force_callback();
145 } 149 }
146} 150}
147 151
148void * 152void *
149xen_intr_establish(int legacy_irq, struct pic *pic, int pin, 153xen_intr_establish(int legacy_irq, struct pic *pic, int pin,
150 int type, int level, int (*handler)(void *), void *arg, 154 int type, int level, int (*handler)(void *), void *arg,
151 bool known_mpsafe) 155 bool known_mpsafe)
152{ 156{
153 157
154 return xen_intr_establish_xname(legacy_irq, pic, pin, type, level, 158 return xen_intr_establish_xname(legacy_irq, pic, pin, type, level,
155 handler, arg, known_mpsafe, "XEN"); 159 handler, arg, known_mpsafe, "XEN");
156} 160}
157 161
158void * 162void *
159xen_intr_establish_xname(int legacy_irq, struct pic *pic, int pin, 163xen_intr_establish_xname(int legacy_irq, struct pic *pic, int pin,
160 int type, int level, int (*handler)(void *), void *arg, 164 int type, int level, int (*handler)(void *), void *arg,
161 bool known_mpsafe, const char *xname) 165 bool known_mpsafe, const char *xname)
162{ 166{
163 const char *intrstr; 167 const char *intrstr;
164 char intrstr_buf[INTRIDBUF]; 168 char intrstr_buf[INTRIDBUF];
165 169
166 if (pic->pic_type == PIC_XEN) { 170 if (pic->pic_type == PIC_XEN) {
167 struct intrhand *rih; 171 struct intrhand *rih;
168 172
169 intrstr = intr_create_intrid(legacy_irq, pic, pin, intrstr_buf, 173 intrstr = intr_create_intrid(legacy_irq, pic, pin, intrstr_buf,
170 sizeof(intrstr_buf)); 174 sizeof(intrstr_buf));
171 175
172 event_set_handler(pin, handler, arg, level, intrstr, xname, 176 event_set_handler(pin, handler, arg, level, intrstr, xname,
173 known_mpsafe, true); 177 known_mpsafe, true);
174 178
175 rih = kmem_zalloc(sizeof(*rih), cold ? KM_NOSLEEP : KM_SLEEP); 179 rih = kmem_zalloc(sizeof(*rih), cold ? KM_NOSLEEP : KM_SLEEP);
176 if (rih == NULL) { 180 if (rih == NULL) {
177 printf("%s: can't allocate handler info\n", __func__); 181 printf("%s: can't allocate handler info\n", __func__);
178 return NULL; 182 return NULL;
179 } 183 }
180 184
181 /* 185 /*
182 * XXX: 186 * XXX:
183 * This is just a copy for API conformance. 187 * This is just a copy for API conformance.
184 * The real ih is lost in the innards of 188 * The real ih is lost in the innards of
185 * event_set_handler(); where the details of 189 * event_set_handler(); where the details of
186 * biglock_wrapper etc are taken care of. 190 * biglock_wrapper etc are taken care of.
187 * All that goes away when we nuke event_set_handler() 191 * All that goes away when we nuke event_set_handler()
188 * et. al. and unify with x86/intr.c 192 * et. al. and unify with x86/intr.c
189 */ 193 */
190 rih->ih_pin = pin; /* port */ 194 rih->ih_pin = pin; /* port */
191 rih->ih_fun = rih->ih_realfun = handler; 195 rih->ih_fun = rih->ih_realfun = handler;
192 rih->ih_arg = rih->ih_realarg = arg; 196 rih->ih_arg = rih->ih_realarg = arg;
193 rih->pic_type = pic->pic_type; 197 rih->pic_type = pic->pic_type;
194 return rih; 198 return rih;
195 } /* Else we assume pintr */ 199 } /* Else we assume pintr */
196 200
197#if (NPCI > 0 || NISA > 0) && defined(XENPV) /* XXX: support PVHVM pirq */ 201#if (NPCI > 0 || NISA > 0) && defined(XENPV) /* XXX: support PVHVM pirq */
198 struct pintrhand *pih; 202 struct pintrhand *pih;
199 int gsi; 203 int gsi;
200 int vector, evtchn; 204 int vector, evtchn;
201 205
202 KASSERTMSG(legacy_irq == -1 || (0 <= legacy_irq && legacy_irq < NUM_XEN_IRQS), 206 KASSERTMSG(legacy_irq == -1 || (0 <= legacy_irq && legacy_irq < NUM_XEN_IRQS),
203 "bad legacy IRQ value: %d", legacy_irq); 207 "bad legacy IRQ value: %d", legacy_irq);
204 KASSERTMSG(!(legacy_irq == -1 && pic == &i8259_pic), 208 KASSERTMSG(!(legacy_irq == -1 && pic == &i8259_pic),
205 "non-legacy IRQon i8259 "); 209 "non-legacy IRQon i8259 ");
206 210
207 gsi = xen_pic_to_gsi(pic, pin); 211 gsi = xen_pic_to_gsi(pic, pin);
208 212
209 intrstr = intr_create_intrid(gsi, pic, pin, intrstr_buf, 213 intrstr = intr_create_intrid(gsi, pic, pin, intrstr_buf,
210 sizeof(intrstr_buf)); 214 sizeof(intrstr_buf));
211 215
212 vector = xen_vec_alloc(gsi); 216 vector = xen_vec_alloc(gsi);
213 217
214 if (irq2port[gsi] == 0) { 218 if (irq2port[gsi] == 0) {
215 extern struct cpu_info phycpu_info_primary; /* XXX */ 219 extern struct cpu_info phycpu_info_primary; /* XXX */
216 struct cpu_info *ci = &phycpu_info_primary; 220 struct cpu_info *ci = &phycpu_info_primary;
217 221
218 pic->pic_addroute(pic, ci, pin, vector, type); 222 pic->pic_addroute(pic, ci, pin, vector, type);
219 223
220 evtchn = bind_pirq_to_evtch(gsi); 224 evtchn = bind_pirq_to_evtch(gsi);
221 KASSERT(evtchn > 0); 225 KASSERT(evtchn > 0);
222 KASSERT(evtchn < NR_EVENT_CHANNELS); 226 KASSERT(evtchn < NR_EVENT_CHANNELS);
223 irq2port[gsi] = evtchn + 1; 227 irq2port[gsi] = evtchn + 1;
224 xen_atomic_set_bit(&ci->ci_evtmask[0], evtchn); 228 xen_atomic_set_bit(&ci->ci_evtmask[0], evtchn);
225 } else { 229 } else {
226 /* 230 /*
227 * Shared interrupt - we can't rebind. 231 * Shared interrupt - we can't rebind.
228 * The port is shared instead. 232 * The port is shared instead.
229 */ 233 */
230 evtchn = irq2port[gsi] - 1; 234 evtchn = irq2port[gsi] - 1;
231 } 235 }
232 236
233 pih = pirq_establish(gsi, evtchn, handler, arg, level, 237 pih = pirq_establish(gsi, evtchn, handler, arg, level,
234 intrstr, xname, known_mpsafe); 238 intrstr, xname, known_mpsafe);
235 pih->pic_type = pic->pic_type; 239 pih->pic_type = pic->pic_type;
236 return pih; 240 return pih;
237#endif /* NPCI > 0 || NISA > 0 */ 241#endif /* NPCI > 0 || NISA > 0 */
238 242
239 /* FALLTHROUGH */ 243 /* FALLTHROUGH */
240 return NULL; 244 return NULL;
241} 245}
242 246
243/* 247/*
244 * Mask an interrupt source. 248 * Mask an interrupt source.
245 */ 249 */
246void 250void
247xen_intr_mask(struct intrhand *ih) 251xen_intr_mask(struct intrhand *ih)
248{ 252{
249 /* XXX */ 253 /* XXX */
250 panic("xen_intr_mask: not yet implemented."); 254 panic("xen_intr_mask: not yet implemented.");
251} 255}
252 256
253/* 257/*
254 * Unmask an interrupt source. 258 * Unmask an interrupt source.
255 */ 259 */
256void 260void
257xen_intr_unmask(struct intrhand *ih) 261xen_intr_unmask(struct intrhand *ih)
258{ 262{
259 /* XXX */ 263 /* XXX */
260 panic("xen_intr_unmask: not yet implemented."); 264 panic("xen_intr_unmask: not yet implemented.");
261} 265}
262 266
263/* 267/*
264 * Deregister an interrupt handler. 268 * Deregister an interrupt handler.
265 */ 269 */
266void 270void
267xen_intr_disestablish(struct intrhand *ih) 271xen_intr_disestablish(struct intrhand *ih)
268{ 272{
269 273
270 if (ih->pic_type == PIC_XEN) { 274 if (ih->pic_type == PIC_XEN) {
271 event_remove_handler(ih->ih_pin, ih->ih_realfun, 275 event_remove_handler(ih->ih_pin, ih->ih_realfun,
272 ih->ih_realarg); 276 ih->ih_realarg);
273 kmem_free(ih, sizeof(*ih)); 277 kmem_free(ih, sizeof(*ih));
274 return; 278 return;
275 } 279 }
276#if defined(DOM0OPS) 280#if defined(DOM0OPS)
277 /*  281 /*
278 * Cache state, to prevent a use after free situation with 282 * Cache state, to prevent a use after free situation with
279 * ih. 283 * ih.
280 */ 284 */
281 285
282 struct pintrhand *pih = (struct pintrhand *)ih; 286 struct pintrhand *pih = (struct pintrhand *)ih;
283 287
284 int pirq = pih->pirq; 288 int pirq = pih->pirq;
285 int port = pih->evtch; 289 int port = pih->evtch;
286 KASSERT(irq2port[pirq] != 0); 290 KASSERT(irq2port[pirq] != 0);
287 291
288 pirq_disestablish(pih); 292 pirq_disestablish(pih);
289 293
290 if (evtsource[port] == NULL) { 294 if (evtsource[port] == NULL) {
291 /* 295 /*
292 * Last handler was removed by 296 * Last handler was removed by
293 * event_remove_handler(). 297 * event_remove_handler().
294 * 298 *
295 * We can safely unbind the pirq now. 299 * We can safely unbind the pirq now.
296 */ 300 */
297 301
298 port = unbind_pirq_from_evtch(pirq); 302 port = unbind_pirq_from_evtch(pirq);
299 KASSERT(port == pih->evtch); 303 KASSERT(port == pih->evtch);
300 irq2port[pirq] = 0; 304 irq2port[pirq] = 0;
301 } 305 }
302#endif 306#endif
303 return; 307 return;
304} 308}
305 309
306/* MI interface for kern_cpu.c */ 310/* MI interface for kern_cpu.c */
307void xen_cpu_intr_redistribute(void); 311void xen_cpu_intr_redistribute(void);
308 312
309void 313void
310xen_cpu_intr_redistribute(void) 314xen_cpu_intr_redistribute(void)
311{ 315{
312 KASSERT(mutex_owned(&cpu_lock)); 316 KASSERT(mutex_owned(&cpu_lock));
313 KASSERT(mp_online); 317 KASSERT(mp_online);
314 318
315 return; 319 return;
316} 320}
317 321
318/* MD - called by x86/cpu.c */ 322/* MD - called by x86/cpu.c */
319#if defined(INTRSTACKSIZE) 323#if defined(INTRSTACKSIZE)
320static inline bool 324static inline bool
321redzone_const_or_false(bool x) 325redzone_const_or_false(bool x)
322{ 326{
323#ifdef DIAGNOSTIC 327#ifdef DIAGNOSTIC
324 return x; 328 return x;
325#else 329#else
326 return false; 330 return false;
327#endif /* !DIAGNOSTIC */ 331#endif /* !DIAGNOSTIC */
328} 332}
329 333
330static inline int 334static inline int
331redzone_const_or_zero(int x) 335redzone_const_or_zero(int x)
332{ 336{
333 return redzone_const_or_false(true) ? x : 0; 337 return redzone_const_or_false(true) ? x : 0;
334} 338}
335#endif 339#endif
336 340
337void xen_cpu_intr_init(struct cpu_info *); 341void xen_cpu_intr_init(struct cpu_info *);
338void 342void
339xen_cpu_intr_init(struct cpu_info *ci) 343xen_cpu_intr_init(struct cpu_info *ci)
340{ 344{
341 int i; /* XXX: duplicate */ 345 int i; /* XXX: duplicate */
342 346
343 ci->ci_xunmask[0] = 0xfffffffe; 347 ci->ci_xunmask[0] = 0xfffffffe;
344 for (i = 1; i < NIPL; i++) 348 for (i = 1; i < NIPL; i++)
345 ci->ci_xunmask[i] = ci->ci_xunmask[i - 1] & ~(1 << i); 349 ci->ci_xunmask[i] = ci->ci_xunmask[i - 1] & ~(1 << i);
346 350
347#if defined(INTRSTACKSIZE) 351#if defined(INTRSTACKSIZE)
348 vaddr_t istack; 352 vaddr_t istack;
349 353
350 /* 354 /*
351 * If the red zone is activated, protect both the top and 355 * If the red zone is activated, protect both the top and
352 * the bottom of the stack with an unmapped page. 356 * the bottom of the stack with an unmapped page.
353 */ 357 */
354 istack = uvm_km_alloc(kernel_map, 358 istack = uvm_km_alloc(kernel_map,
355 INTRSTACKSIZE + redzone_const_or_zero(2 * PAGE_SIZE), 0, 359 INTRSTACKSIZE + redzone_const_or_zero(2 * PAGE_SIZE), 0,
356 UVM_KMF_WIRED|UVM_KMF_ZERO); 360 UVM_KMF_WIRED|UVM_KMF_ZERO);
357 if (redzone_const_or_false(true)) { 361 if (redzone_const_or_false(true)) {
358 pmap_kremove(istack, PAGE_SIZE); 362 pmap_kremove(istack, PAGE_SIZE);
359 pmap_kremove(istack + INTRSTACKSIZE + PAGE_SIZE, PAGE_SIZE); 363 pmap_kremove(istack + INTRSTACKSIZE + PAGE_SIZE, PAGE_SIZE);
360 pmap_update(pmap_kernel()); 364 pmap_update(pmap_kernel());
361 } 365 }
362 366
363 /* 367 /*
364 * 33 used to be 1. Arbitrarily reserve 32 more register_t's 368 * 33 used to be 1. Arbitrarily reserve 32 more register_t's
365 * of space for ddb(4) to examine some subroutine arguments 369 * of space for ddb(4) to examine some subroutine arguments
366 * and to hunt for the next stack frame. 370 * and to hunt for the next stack frame.
367 */ 371 */
368 ci->ci_intrstack = (char *)istack + redzone_const_or_zero(PAGE_SIZE) + 372 ci->ci_intrstack = (char *)istack + redzone_const_or_zero(PAGE_SIZE) +
369 INTRSTACKSIZE - 33 * sizeof(register_t); 373 INTRSTACKSIZE - 33 * sizeof(register_t);
370#endif 374#endif
371 375
372#ifdef MULTIPROCESSOR 376#ifdef MULTIPROCESSOR
373 for (i = 0; i < XEN_NIPIS; i++) 377 for (i = 0; i < XEN_NIPIS; i++)
374 evcnt_attach_dynamic(&ci->ci_ipi_events[i], EVCNT_TYPE_MISC, 378 evcnt_attach_dynamic(&ci->ci_ipi_events[i], EVCNT_TYPE_MISC,
375 NULL, device_xname(ci->ci_dev), xen_ipi_names[i]); 379 NULL, device_xname(ci->ci_dev), xen_ipi_names[i]);
376#endif 380#endif
377 381
378 ci->ci_idepth = -1; 382 ci->ci_idepth = -1;
379} 383}
380 384
381/* 385/*
382 * Everything below from here is duplicated from x86/intr.c 386 * Everything below from here is duplicated from x86/intr.c
383 * When intr.c and xen_intr.c are unified, these will need to be 387 * When intr.c and xen_intr.c are unified, these will need to be
384 * merged. 388 * merged.
385 */ 389 */
386 390
387u_int xen_cpu_intr_count(struct cpu_info *ci); 391u_int xen_cpu_intr_count(struct cpu_info *ci);
388 392
389u_int 393u_int
390xen_cpu_intr_count(struct cpu_info *ci) 394xen_cpu_intr_count(struct cpu_info *ci)
391{ 395{
392 396
393 KASSERT(ci->ci_nintrhand >= 0); 397 KASSERT(ci->ci_nintrhand >= 0);
394 398
395 return ci->ci_nintrhand; 399 return ci->ci_nintrhand;
396} 400}
397 401
398static const char * 402static const char *
399xen_intr_string(int port, char *buf, size_t len, struct pic *pic) 403xen_intr_string(int port, char *buf, size_t len, struct pic *pic)
400{ 404{
401 KASSERT(pic->pic_type == PIC_XEN); 405 KASSERT(pic->pic_type == PIC_XEN);
402 406
403 KASSERT(port >= 0); 407 KASSERT(port >= 0);
404 KASSERT(port < NR_EVENT_CHANNELS); 408 KASSERT(port < NR_EVENT_CHANNELS);
405 409
406 snprintf(buf, len, "%s channel %d", pic->pic_name, port); 410 snprintf(buf, len, "%s channel %d", pic->pic_name, port);
407 411
408 return buf; 412 return buf;
409} 413}
410 414
411static const char * 415static const char *
412legacy_intr_string(int ih, char *buf, size_t len, struct pic *pic) 416legacy_intr_string(int ih, char *buf, size_t len, struct pic *pic)
413{ 417{
414 int legacy_irq; 418 int legacy_irq;
415 419
416 KASSERT(pic->pic_type == PIC_I8259); 420 KASSERT(pic->pic_type == PIC_I8259);
417#if NLAPIC > 0 421#if NLAPIC > 0
418 KASSERT(APIC_IRQ_ISLEGACY(ih)); 422 KASSERT(APIC_IRQ_ISLEGACY(ih));
419 423
420 legacy_irq = APIC_IRQ_LEGACY_IRQ(ih); 424 legacy_irq = APIC_IRQ_LEGACY_IRQ(ih);
421#else 425#else
422 legacy_irq = ih; 426 legacy_irq = ih;
423#endif 427#endif
424 KASSERT(legacy_irq >= 0 && legacy_irq < 16); 428 KASSERT(legacy_irq >= 0 && legacy_irq < 16);
425 429
426 snprintf(buf, len, "%s pin %d", pic->pic_name, legacy_irq); 430 snprintf(buf, len, "%s pin %d", pic->pic_name, legacy_irq);
427 431
428 return buf; 432 return buf;
429} 433}
430 434
431const char * xintr_string(intr_handle_t ih, char *buf, size_t len); 435const char * xintr_string(intr_handle_t ih, char *buf, size_t len);
432 436
433const char * 437const char *
434xintr_string(intr_handle_t ih, char *buf, size_t len) 438xintr_string(intr_handle_t ih, char *buf, size_t len)
435{ 439{
436#if NIOAPIC > 0 440#if NIOAPIC > 0
437 struct ioapic_softc *pic; 441 struct ioapic_softc *pic;
438#endif 442#endif
439 443
440 if (ih == 0) 444 if (ih == 0)
441 panic("%s: bogus handle 0x%" PRIx64, __func__, ih); 445 panic("%s: bogus handle 0x%" PRIx64, __func__, ih);
442 446
443#if NIOAPIC > 0 447#if NIOAPIC > 0
444 if (ih & APIC_INT_VIA_APIC) { 448 if (ih & APIC_INT_VIA_APIC) {
445 pic = ioapic_find(APIC_IRQ_APIC(ih)); 449 pic = ioapic_find(APIC_IRQ_APIC(ih));
446 if (pic != NULL) { 450 if (pic != NULL) {
447 snprintf(buf, len, "%s pin %d", 451 snprintf(buf, len, "%s pin %d",
448 device_xname(pic->sc_dev), APIC_IRQ_PIN(ih)); 452 device_xname(pic->sc_dev), APIC_IRQ_PIN(ih));
449 } else { 453 } else {
450 snprintf(buf, len, 454 snprintf(buf, len,
451 "apic %d int %d (irq %d)", 455 "apic %d int %d (irq %d)",
452 APIC_IRQ_APIC(ih), 456 APIC_IRQ_APIC(ih),
453 APIC_IRQ_PIN(ih), 457 APIC_IRQ_PIN(ih),
454 APIC_IRQ_LEGACY_IRQ(ih)); 458 APIC_IRQ_LEGACY_IRQ(ih));
455 } 459 }
456 } else 460 } else
457 snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih)); 461 snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih));
458 462
459#elif NLAPIC > 0 463#elif NLAPIC > 0
460 snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih)); 464 snprintf(buf, len, "irq %d", APIC_IRQ_LEGACY_IRQ(ih));
461#else 465#else
462 snprintf(buf, len, "irq %d", (int) ih); 466 snprintf(buf, len, "irq %d", (int) ih);
463#endif 467#endif
464 return buf; 468 return buf;
465 469
466} 470}
467 471
468/* 472/*
469 * Create an interrupt id such as "ioapic0 pin 9". This interrupt id is used 473 * Create an interrupt id such as "ioapic0 pin 9". This interrupt id is used
470 * by MI code and intrctl(8). 474 * by MI code and intrctl(8).
471 */ 475 */
472const char * xen_intr_create_intrid(int legacy_irq, struct pic *pic, 476const char * xen_intr_create_intrid(int legacy_irq, struct pic *pic,
473 int pin, char *buf, size_t len); 477 int pin, char *buf, size_t len);
474 478
475const char * 479const char *
476xen_intr_create_intrid(int legacy_irq, struct pic *pic, int pin, char *buf, size_t len) 480xen_intr_create_intrid(int legacy_irq, struct pic *pic, int pin, char *buf, size_t len)
477{ 481{
478 int ih = 0; 482 int ih = 0;
479 483
480#if NPCI > 0 484#if NPCI > 0
481#if defined(__HAVE_PCI_MSI_MSIX) 485#if defined(__HAVE_PCI_MSI_MSIX)
482 if ((pic->pic_type == PIC_MSI) || (pic->pic_type == PIC_MSIX)) { 486 if ((pic->pic_type == PIC_MSI) || (pic->pic_type == PIC_MSIX)) {
483 uint64_t pih; 487 uint64_t pih;
484 int dev, vec; 488 int dev, vec;
485 489
486 dev = msipic_get_devid(pic); 490 dev = msipic_get_devid(pic);
487 vec = pin; 491 vec = pin;
488 pih = __SHIFTIN((uint64_t)dev, MSI_INT_DEV_MASK) 492 pih = __SHIFTIN((uint64_t)dev, MSI_INT_DEV_MASK)
489 | __SHIFTIN((uint64_t)vec, MSI_INT_VEC_MASK) 493 | __SHIFTIN((uint64_t)vec, MSI_INT_VEC_MASK)
490 | APIC_INT_VIA_MSI; 494 | APIC_INT_VIA_MSI;
491 if (pic->pic_type == PIC_MSI) 495 if (pic->pic_type == PIC_MSI)
492 MSI_INT_MAKE_MSI(pih); 496 MSI_INT_MAKE_MSI(pih);
493 else if (pic->pic_type == PIC_MSIX) 497 else if (pic->pic_type == PIC_MSIX)
494 MSI_INT_MAKE_MSIX(pih); 498 MSI_INT_MAKE_MSIX(pih);
495 499
496 return x86_pci_msi_string(NULL, pih, buf, len); 500 return x86_pci_msi_string(NULL, pih, buf, len);
497 } 501 }
498#endif /* __HAVE_PCI_MSI_MSIX */ 502#endif /* __HAVE_PCI_MSI_MSIX */
499#endif 503#endif
500 504
501 if (pic->pic_type == PIC_XEN) { 505 if (pic->pic_type == PIC_XEN) {
502 ih = pin; /* Port == pin */ 506 ih = pin; /* Port == pin */
503 return xen_intr_string(pin, buf, len, pic); 507 return xen_intr_string(pin, buf, len, pic);
504 } 508 }
505 509
506 /* 510 /*
507 * If the device is pci, "legacy_irq" is alway -1. Least 8 bit of "ih" 511 * If the device is pci, "legacy_irq" is alway -1. Least 8 bit of "ih"
508 * is only used in intr_string() to show the irq number. 512 * is only used in intr_string() to show the irq number.
509 * If the device is "legacy"(such as floppy), it should not use 513 * If the device is "legacy"(such as floppy), it should not use
510 * intr_string(). 514 * intr_string().
511 */ 515 */
512 if (pic->pic_type == PIC_I8259) { 516 if (pic->pic_type == PIC_I8259) {
513 ih = legacy_irq; 517 ih = legacy_irq;
514 return legacy_intr_string(ih, buf, len, pic); 518 return legacy_intr_string(ih, buf, len, pic);
515 } 519 }
516 520
517#if NIOAPIC > 0 || NACPICA > 0 521#if NIOAPIC > 0 || NACPICA > 0
518 ih = ((pic->pic_apicid << APIC_INT_APIC_SHIFT) & APIC_INT_APIC_MASK) 522 ih = ((pic->pic_apicid << APIC_INT_APIC_SHIFT) & APIC_INT_APIC_MASK)
519 | ((pin << APIC_INT_PIN_SHIFT) & APIC_INT_PIN_MASK); 523 | ((pin << APIC_INT_PIN_SHIFT) & APIC_INT_PIN_MASK);
520 if (pic->pic_type == PIC_IOAPIC) { 524 if (pic->pic_type == PIC_IOAPIC) {
521 ih |= APIC_INT_VIA_APIC; 525 ih |= APIC_INT_VIA_APIC;
522 } 526 }
523 ih |= pin; 527 ih |= pin;
524 return intr_string(ih, buf, len); 528 return intr_string(ih, buf, len);
525#endif 529#endif
526 530
527 return NULL; /* No pic found! */ 531 return NULL; /* No pic found! */
528} 532}
529 533
 534static struct intrsource xen_dummy_intrsource;
 535
 536struct intrsource *
 537xen_intr_allocate_io_intrsource(const char *intrid)
 538{
 539 /* Nothing to do, required by MSI code */
 540 return &xen_dummy_intrsource;
 541}
 542
 543void
 544xen_intr_free_io_intrsource(const char *intrid)
 545{
 546 /* Nothing to do, required by MSI code */
 547}
 548
530#if !defined(XENPVHVM) 549#if !defined(XENPVHVM)
531__strong_alias(spllower, xen_spllower); 550__strong_alias(spllower, xen_spllower);
532__strong_alias(x86_read_psl, xen_read_psl); 551__strong_alias(x86_read_psl, xen_read_psl);
533__strong_alias(x86_write_psl, xen_write_psl); 552__strong_alias(x86_write_psl, xen_write_psl);
534 553
535__strong_alias(intr_string, xintr_string); 554__strong_alias(intr_string, xintr_string);
536__strong_alias(intr_create_intrid, xen_intr_create_intrid); 555__strong_alias(intr_create_intrid, xen_intr_create_intrid);
537__strong_alias(intr_establish, xen_intr_establish); 556__strong_alias(intr_establish, xen_intr_establish);
538__strong_alias(intr_establish_xname, xen_intr_establish_xname); 557__strong_alias(intr_establish_xname, xen_intr_establish_xname);
539__strong_alias(intr_mask, xen_intr_mask); 558__strong_alias(intr_mask, xen_intr_mask);
540__strong_alias(intr_unmask, xen_intr_unmask); 559__strong_alias(intr_unmask, xen_intr_unmask);
541__strong_alias(intr_disestablish, xen_intr_disestablish); 560__strong_alias(intr_disestablish, xen_intr_disestablish);
542__strong_alias(cpu_intr_redistribute, xen_cpu_intr_redistribute); 561__strong_alias(cpu_intr_redistribute, xen_cpu_intr_redistribute);
543__strong_alias(cpu_intr_count, xen_cpu_intr_count); 562__strong_alias(cpu_intr_count, xen_cpu_intr_count);
544__strong_alias(cpu_intr_init, xen_cpu_intr_init); 563__strong_alias(cpu_intr_init, xen_cpu_intr_init);
 564__strong_alias(intr_allocate_io_intrsource, xen_intr_allocate_io_intrsource);
 565__strong_alias(intr_free_io_intrsource, xen_intr_free_io_intrsource);
545#endif /* !XENPVHVM */ 566#endif /* !XENPVHVM */