Mon Aug 1 10:42:24 2011 UTC ()
if checking whether an interrupt is shared, don't compare pin numbers
if it is "-1" -- this is a hack to allow MSIs which don't have a concept
of pin numbers, and are generally not shared
(This doesn't give us sensible event names for statistics display. The
whole abstraction has more exceptions than regular cases, it should
be redesigned imho.)


(drochner)
diff -r1.42 -r1.43 src/sys/arch/x86/include/intr.h
diff -r1.71 -r1.72 src/sys/arch/x86/x86/intr.c

cvs diff -r1.42 -r1.43 src/sys/arch/x86/include/intr.h (switch to unified diff)

--- src/sys/arch/x86/include/intr.h 2011/04/03 22:29:27 1.42
+++ src/sys/arch/x86/include/intr.h 2011/08/01 10:42:23 1.43
@@ -1,194 +1,195 @@ @@ -1,194 +1,195 @@
1/* $NetBSD: intr.h,v 1.42 2011/04/03 22:29:27 dyoung Exp $ */ 1/* $NetBSD: intr.h,v 1.43 2011/08/01 10:42:23 drochner Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum, and by Jason R. Thorpe. 8 * by Charles M. Hannum, and by Jason R. Thorpe.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#ifndef _X86_INTR_H_ 32#ifndef _X86_INTR_H_
33#define _X86_INTR_H_ 33#define _X86_INTR_H_
34 34
35#define __HAVE_FAST_SOFTINTS 35#define __HAVE_FAST_SOFTINTS
36#define __HAVE_PREEMPTION 36#define __HAVE_PREEMPTION
37 37
38#ifdef _KERNEL 38#ifdef _KERNEL
39#include <sys/types.h> 39#include <sys/types.h>
40#else 40#else
41#include <stdbool.h> 41#include <stdbool.h>
42#endif 42#endif
43 43
44#include <sys/evcnt.h> 44#include <sys/evcnt.h>
45#include <machine/intrdefs.h> 45#include <machine/intrdefs.h>
46 46
47#ifndef _LOCORE 47#ifndef _LOCORE
48#include <machine/pic.h> 48#include <machine/pic.h>
49 49
50/* 50/*
51 * Struct describing an interrupt source for a CPU. struct cpu_info 51 * Struct describing an interrupt source for a CPU. struct cpu_info
52 * has an array of MAX_INTR_SOURCES of these. The index in the array 52 * has an array of MAX_INTR_SOURCES of these. The index in the array
53 * is equal to the stub number of the stubcode as present in vector.s 53 * is equal to the stub number of the stubcode as present in vector.s
54 * 54 *
55 * The primary CPU's array of interrupt sources has its first 16 55 * The primary CPU's array of interrupt sources has its first 16
56 * entries reserved for legacy ISA irq handlers. This means that 56 * entries reserved for legacy ISA irq handlers. This means that
57 * they have a 1:1 mapping for arrayindex:irq_num. This is not 57 * they have a 1:1 mapping for arrayindex:irq_num. This is not
58 * true for interrupts that come in through IO APICs, to find 58 * true for interrupts that come in through IO APICs, to find
59 * their source, go through ci->ci_isources[index].is_pic 59 * their source, go through ci->ci_isources[index].is_pic
60 * 60 *
61 * It's possible to always maintain a 1:1 mapping, but that means 61 * It's possible to always maintain a 1:1 mapping, but that means
62 * limiting the total number of interrupt sources to MAX_INTR_SOURCES 62 * limiting the total number of interrupt sources to MAX_INTR_SOURCES
63 * (32), instead of 32 per CPU. It also would mean that having multiple 63 * (32), instead of 32 per CPU. It also would mean that having multiple
64 * IO APICs which deliver interrupts from an equal pin number would 64 * IO APICs which deliver interrupts from an equal pin number would
65 * overlap if they were to be sent to the same CPU. 65 * overlap if they were to be sent to the same CPU.
66 */ 66 */
67 67
68struct intrstub { 68struct intrstub {
69 void *ist_entry; 69 void *ist_entry;
70 void *ist_recurse;  70 void *ist_recurse;
71 void *ist_resume; 71 void *ist_resume;
72}; 72};
73 73
74struct intrsource { 74struct intrsource {
75 int is_maxlevel; /* max. IPL for this source */ 75 int is_maxlevel; /* max. IPL for this source */
76 int is_pin; /* IRQ for legacy; pin for IO APIC */ 76 int is_pin; /* IRQ for legacy; pin for IO APIC,
 77 -1 for MSI */
77 struct intrhand *is_handlers; /* handler chain */ 78 struct intrhand *is_handlers; /* handler chain */
78 struct pic *is_pic; /* originating PIC */ 79 struct pic *is_pic; /* originating PIC */
79 void *is_recurse; /* entry for spllower */ 80 void *is_recurse; /* entry for spllower */
80 void *is_resume; /* entry for doreti */ 81 void *is_resume; /* entry for doreti */
81 lwp_t *is_lwp; /* for soft interrupts */ 82 lwp_t *is_lwp; /* for soft interrupts */
82 struct evcnt is_evcnt; /* interrupt counter */ 83 struct evcnt is_evcnt; /* interrupt counter */
83 int is_flags; /* see below */ 84 int is_flags; /* see below */
84 int is_type; /* level, edge */ 85 int is_type; /* level, edge */
85 int is_idtvec; 86 int is_idtvec;
86 int is_minlevel; 87 int is_minlevel;
87 char is_evname[32]; /* event counter name */ 88 char is_evname[32]; /* event counter name */
88}; 89};
89 90
90#define IS_LEGACY 0x0001 /* legacy ISA irq source */ 91#define IS_LEGACY 0x0001 /* legacy ISA irq source */
91#define IS_IPI 0x0002 92#define IS_IPI 0x0002
92#define IS_LOG 0x0004 93#define IS_LOG 0x0004
93 94
94/* 95/*
95 * Interrupt handler chains. *_intr_establish() insert a handler into 96 * Interrupt handler chains. *_intr_establish() insert a handler into
96 * the list. The handler is called with its (single) argument. 97 * the list. The handler is called with its (single) argument.
97 */ 98 */
98 99
99struct intrhand { 100struct intrhand {
100 int (*ih_fun)(void *); 101 int (*ih_fun)(void *);
101 void *ih_arg; 102 void *ih_arg;
102 int ih_level; 103 int ih_level;
103 int (*ih_realfun)(void *); 104 int (*ih_realfun)(void *);
104 void *ih_realarg; 105 void *ih_realarg;
105 struct intrhand *ih_next; 106 struct intrhand *ih_next;
106 struct intrhand **ih_prevp; 107 struct intrhand **ih_prevp;
107 int ih_pin; 108 int ih_pin;
108 int ih_slot; 109 int ih_slot;
109 struct cpu_info *ih_cpu; 110 struct cpu_info *ih_cpu;
110}; 111};
111 112
112#define IMASK(ci,level) (ci)->ci_imask[(level)] 113#define IMASK(ci,level) (ci)->ci_imask[(level)]
113#define IUNMASK(ci,level) (ci)->ci_iunmask[(level)] 114#define IUNMASK(ci,level) (ci)->ci_iunmask[(level)]
114 115
115#ifdef _KERNEL 116#ifdef _KERNEL
116 117
117void Xspllower(int); 118void Xspllower(int);
118void spllower(int); 119void spllower(int);
119int splraise(int); 120int splraise(int);
120void softintr(int); 121void softintr(int);
121 122
122/* 123/*
123 * Convert spl level to local APIC level 124 * Convert spl level to local APIC level
124 */ 125 */
125 126
126#define APIC_LEVEL(l) ((l) << 4) 127#define APIC_LEVEL(l) ((l) << 4)
127 128
128/* 129/*
129 * Miscellaneous 130 * Miscellaneous
130 */ 131 */
131 132
132#define SPL_ASSERT_BELOW(x) KDASSERT(curcpu()->ci_ilevel < (x)) 133#define SPL_ASSERT_BELOW(x) KDASSERT(curcpu()->ci_ilevel < (x))
133#define spl0() spllower(IPL_NONE) 134#define spl0() spllower(IPL_NONE)
134#define splx(x) spllower(x) 135#define splx(x) spllower(x)
135 136
136typedef uint8_t ipl_t; 137typedef uint8_t ipl_t;
137typedef struct { 138typedef struct {
138 ipl_t _ipl; 139 ipl_t _ipl;
139} ipl_cookie_t; 140} ipl_cookie_t;
140 141
141static inline ipl_cookie_t 142static inline ipl_cookie_t
142makeiplcookie(ipl_t ipl) 143makeiplcookie(ipl_t ipl)
143{ 144{
144 145
145 return (ipl_cookie_t){._ipl = ipl}; 146 return (ipl_cookie_t){._ipl = ipl};
146} 147}
147 148
148static inline int 149static inline int
149splraiseipl(ipl_cookie_t icookie) 150splraiseipl(ipl_cookie_t icookie)
150{ 151{
151 152
152 return splraise(icookie._ipl); 153 return splraise(icookie._ipl);
153} 154}
154 155
155#include <sys/spl.h> 156#include <sys/spl.h>
156 157
157/* 158/*
158 * Stub declarations. 159 * Stub declarations.
159 */ 160 */
160 161
161void Xsoftintr(void); 162void Xsoftintr(void);
162void Xpreemptrecurse(void); 163void Xpreemptrecurse(void);
163void Xpreemptresume(void); 164void Xpreemptresume(void);
164 165
165extern struct intrstub i8259_stubs[]; 166extern struct intrstub i8259_stubs[];
166extern struct intrstub ioapic_edge_stubs[]; 167extern struct intrstub ioapic_edge_stubs[];
167extern struct intrstub ioapic_level_stubs[]; 168extern struct intrstub ioapic_level_stubs[];
168 169
169struct cpu_info; 170struct cpu_info;
170 171
171struct pcibus_attach_args; 172struct pcibus_attach_args;
172 173
173void intr_default_setup(void); 174void intr_default_setup(void);
174void x86_nmi(void); 175void x86_nmi(void);
175void *intr_establish(int, struct pic *, int, int, int, int (*)(void *), void *, bool); 176void *intr_establish(int, struct pic *, int, int, int, int (*)(void *), void *, bool);
176void intr_disestablish(struct intrhand *); 177void intr_disestablish(struct intrhand *);
177void intr_add_pcibus(struct pcibus_attach_args *); 178void intr_add_pcibus(struct pcibus_attach_args *);
178const char *intr_string(int); 179const char *intr_string(int);
179void cpu_intr_init(struct cpu_info *); 180void cpu_intr_init(struct cpu_info *);
180int intr_find_mpmapping(int, int, int *); 181int intr_find_mpmapping(int, int, int *);
181struct pic *intr_findpic(int); 182struct pic *intr_findpic(int);
182void intr_printconfig(void); 183void intr_printconfig(void);
183 184
184int x86_send_ipi(struct cpu_info *, int); 185int x86_send_ipi(struct cpu_info *, int);
185void x86_broadcast_ipi(int); 186void x86_broadcast_ipi(int);
186void x86_ipi_handler(void); 187void x86_ipi_handler(void);
187 188
188extern void (*ipifunc[X86_NIPI])(struct cpu_info *); 189extern void (*ipifunc[X86_NIPI])(struct cpu_info *);
189 190
190#endif /* _KERNEL */ 191#endif /* _KERNEL */
191 192
192#endif /* !_LOCORE */ 193#endif /* !_LOCORE */
193 194
194#endif /* !_X86_INTR_H_ */ 195#endif /* !_X86_INTR_H_ */

cvs diff -r1.71 -r1.72 src/sys/arch/x86/x86/intr.c (switch to unified diff)

--- src/sys/arch/x86/x86/intr.c 2011/04/03 22:29:27 1.71
+++ src/sys/arch/x86/x86/intr.c 2011/08/01 10:42:24 1.72
@@ -1,1358 +1,1359 @@ @@ -1,1358 +1,1359 @@
1/* $NetBSD: intr.c,v 1.71 2011/04/03 22:29:27 dyoung Exp $ */ 1/* $NetBSD: intr.c,v 1.72 2011/08/01 10:42:24 drochner Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2007, 2008, 2009 The NetBSD Foundation, Inc. 4 * Copyright (c) 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Copyright 2002 (c) Wasabi Systems, Inc. 33 * Copyright 2002 (c) Wasabi Systems, Inc.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Written by Frank van der Linden for Wasabi Systems, Inc. 36 * Written by Frank van der Linden for Wasabi Systems, Inc.
37 * 37 *
38 * Redistribution and use in source and binary forms, with or without 38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions 39 * modification, are permitted provided that the following conditions
40 * are met: 40 * are met:
41 * 1. Redistributions of source code must retain the above copyright 41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer. 42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright 43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the 44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution. 45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software 46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement: 47 * must display the following acknowledgement:
48 * This product includes software developed for the NetBSD Project by 48 * This product includes software developed for the NetBSD Project by
49 * Wasabi Systems, Inc. 49 * Wasabi Systems, Inc.
50 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 50 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
51 * or promote products derived from this software without specific prior 51 * or promote products derived from this software without specific prior
52 * written permission. 52 * written permission.
53 * 53 *
54 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 54 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 57 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64 * POSSIBILITY OF SUCH DAMAGE. 64 * POSSIBILITY OF SUCH DAMAGE.
65 */ 65 */
66 66
67/*- 67/*-
68 * Copyright (c) 1991 The Regents of the University of California. 68 * Copyright (c) 1991 The Regents of the University of California.
69 * All rights reserved. 69 * All rights reserved.
70 * 70 *
71 * This code is derived from software contributed to Berkeley by 71 * This code is derived from software contributed to Berkeley by
72 * William Jolitz. 72 * William Jolitz.
73 * 73 *
74 * Redistribution and use in source and binary forms, with or without 74 * Redistribution and use in source and binary forms, with or without
75 * modification, are permitted provided that the following conditions 75 * modification, are permitted provided that the following conditions
76 * are met: 76 * are met:
77 * 1. Redistributions of source code must retain the above copyright 77 * 1. Redistributions of source code must retain the above copyright
78 * notice, this list of conditions and the following disclaimer. 78 * notice, this list of conditions and the following disclaimer.
79 * 2. Redistributions in binary form must reproduce the above copyright 79 * 2. Redistributions in binary form must reproduce the above copyright
80 * notice, this list of conditions and the following disclaimer in the 80 * notice, this list of conditions and the following disclaimer in the
81 * documentation and/or other materials provided with the distribution. 81 * documentation and/or other materials provided with the distribution.
82 * 3. Neither the name of the University nor the names of its contributors 82 * 3. Neither the name of the University nor the names of its contributors
83 * may be used to endorse or promote products derived from this software 83 * may be used to endorse or promote products derived from this software
84 * without specific prior written permission. 84 * without specific prior written permission.
85 * 85 *
86 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 86 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
87 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 87 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
88 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 88 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
89 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 89 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
90 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 90 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
91 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 91 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
92 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 92 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
93 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 93 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
94 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 94 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
95 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 95 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
96 * SUCH DAMAGE. 96 * SUCH DAMAGE.
97 * 97 *
98 * @(#)isa.c 7.2 (Berkeley) 5/13/91 98 * @(#)isa.c 7.2 (Berkeley) 5/13/91
99 */ 99 */
100 100
101/*- 101/*-
102 * Copyright (c) 1993, 1994 Charles Hannum. 102 * Copyright (c) 1993, 1994 Charles Hannum.
103 * 103 *
104 * Redistribution and use in source and binary forms, with or without 104 * Redistribution and use in source and binary forms, with or without
105 * modification, are permitted provided that the following conditions 105 * modification, are permitted provided that the following conditions
106 * are met: 106 * are met:
107 * 1. Redistributions of source code must retain the above copyright 107 * 1. Redistributions of source code must retain the above copyright
108 * notice, this list of conditions and the following disclaimer. 108 * notice, this list of conditions and the following disclaimer.
109 * 2. Redistributions in binary form must reproduce the above copyright 109 * 2. Redistributions in binary form must reproduce the above copyright
110 * notice, this list of conditions and the following disclaimer in the 110 * notice, this list of conditions and the following disclaimer in the
111 * documentation and/or other materials provided with the distribution. 111 * documentation and/or other materials provided with the distribution.
112 * 3. All advertising materials mentioning features or use of this software 112 * 3. All advertising materials mentioning features or use of this software
113 * must display the following acknowledgement: 113 * must display the following acknowledgement:
114 * This product includes software developed by the University of 114 * This product includes software developed by the University of
115 * California, Berkeley and its contributors. 115 * California, Berkeley and its contributors.
116 * 4. Neither the name of the University nor the names of its contributors 116 * 4. Neither the name of the University nor the names of its contributors
117 * may be used to endorse or promote products derived from this software 117 * may be used to endorse or promote products derived from this software
118 * without specific prior written permission. 118 * without specific prior written permission.
119 * 119 *
120 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 120 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
121 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 121 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
122 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 122 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
123 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 123 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
124 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 124 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
125 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 125 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
126 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 126 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
127 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 127 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
128 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 128 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
129 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 129 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
130 * SUCH DAMAGE. 130 * SUCH DAMAGE.
131 * 131 *
132 * @(#)isa.c 7.2 (Berkeley) 5/13/91 132 * @(#)isa.c 7.2 (Berkeley) 5/13/91
133 */ 133 */
134 134
135#include <sys/cdefs.h> 135#include <sys/cdefs.h>
136__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.71 2011/04/03 22:29:27 dyoung Exp $"); 136__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.72 2011/08/01 10:42:24 drochner Exp $");
137 137
138#include "opt_intrdebug.h" 138#include "opt_intrdebug.h"
139#include "opt_multiprocessor.h" 139#include "opt_multiprocessor.h"
140#include "opt_acpi.h" 140#include "opt_acpi.h"
141 141
142#include <sys/param.h> 142#include <sys/param.h>
143#include <sys/systm.h> 143#include <sys/systm.h>
144#include <sys/kernel.h> 144#include <sys/kernel.h>
145#include <sys/syslog.h> 145#include <sys/syslog.h>
146#include <sys/device.h> 146#include <sys/device.h>
147#include <sys/kmem.h> 147#include <sys/kmem.h>
148#include <sys/proc.h> 148#include <sys/proc.h>
149#include <sys/errno.h> 149#include <sys/errno.h>
150#include <sys/intr.h> 150#include <sys/intr.h>
151#include <sys/cpu.h> 151#include <sys/cpu.h>
152#include <sys/atomic.h> 152#include <sys/atomic.h>
153#include <sys/xcall.h> 153#include <sys/xcall.h>
154 154
155#include <uvm/uvm_extern.h> 155#include <uvm/uvm_extern.h>
156 156
157#include <machine/i8259.h> 157#include <machine/i8259.h>
158#include <machine/pio.h> 158#include <machine/pio.h>
159 159
160#include "ioapic.h" 160#include "ioapic.h"
161#include "lapic.h" 161#include "lapic.h"
162#include "pci.h" 162#include "pci.h"
163#include "acpica.h" 163#include "acpica.h"
164 164
165#if NIOAPIC > 0 || NACPICA > 0 165#if NIOAPIC > 0 || NACPICA > 0
166#include <machine/i82093var.h>  166#include <machine/i82093var.h>
167#include <machine/mpbiosvar.h> 167#include <machine/mpbiosvar.h>
168#include <machine/mpacpi.h> 168#include <machine/mpacpi.h>
169#endif 169#endif
170 170
171#if NLAPIC > 0 171#if NLAPIC > 0
172#include <machine/i82489var.h> 172#include <machine/i82489var.h>
173#endif 173#endif
174 174
175#if NPCI > 0 175#if NPCI > 0
176#include <dev/pci/ppbreg.h> 176#include <dev/pci/ppbreg.h>
177#endif 177#endif
178 178
179#ifdef DDB 179#ifdef DDB
180#include <ddb/db_output.h> 180#include <ddb/db_output.h>
181#endif 181#endif
182 182
183struct pic softintr_pic = { 183struct pic softintr_pic = {
184 .pic_name = "softintr_fakepic", 184 .pic_name = "softintr_fakepic",
185 .pic_type = PIC_SOFT, 185 .pic_type = PIC_SOFT,
186 .pic_vecbase = 0, 186 .pic_vecbase = 0,
187 .pic_apicid = 0, 187 .pic_apicid = 0,
188 .pic_lock = __SIMPLELOCK_UNLOCKED, 188 .pic_lock = __SIMPLELOCK_UNLOCKED,
189}; 189};
190 190
191#if NIOAPIC > 0 || NACPICA > 0 191#if NIOAPIC > 0 || NACPICA > 0
192static int intr_scan_bus(int, int, int *); 192static int intr_scan_bus(int, int, int *);
193#if NPCI > 0 193#if NPCI > 0
194static int intr_find_pcibridge(int, pcitag_t *, pci_chipset_tag_t *); 194static int intr_find_pcibridge(int, pcitag_t *, pci_chipset_tag_t *);
195#endif 195#endif
196#endif 196#endif
197 197
198/* 198/*
199 * Fill in default interrupt table (in case of spurious interrupt 199 * Fill in default interrupt table (in case of spurious interrupt
200 * during configuration of kernel), setup interrupt control unit 200 * during configuration of kernel), setup interrupt control unit
201 */ 201 */
202void 202void
203intr_default_setup(void) 203intr_default_setup(void)
204{ 204{
205 int i; 205 int i;
206 206
207 /* icu vectors */ 207 /* icu vectors */
208 for (i = 0; i < NUM_LEGACY_IRQS; i++) { 208 for (i = 0; i < NUM_LEGACY_IRQS; i++) {
209 idt_vec_reserve(ICU_OFFSET + i); 209 idt_vec_reserve(ICU_OFFSET + i);
210 setgate(&idt[ICU_OFFSET + i], 210 setgate(&idt[ICU_OFFSET + i],
211 i8259_stubs[i].ist_entry, 0, SDT_SYS386IGT, 211 i8259_stubs[i].ist_entry, 0, SDT_SYS386IGT,
212 SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 212 SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
213 } 213 }
214 214
215 /* 215 /*
216 * Eventually might want to check if it's actually there. 216 * Eventually might want to check if it's actually there.
217 */ 217 */
218 i8259_default_setup(); 218 i8259_default_setup();
219} 219}
220 220
221/* 221/*
222 * Handle a NMI, possibly a machine check. 222 * Handle a NMI, possibly a machine check.
223 * return true to panic system, false to ignore. 223 * return true to panic system, false to ignore.
224 */ 224 */
225void 225void
226x86_nmi(void) 226x86_nmi(void)
227{ 227{
228 228
229 log(LOG_CRIT, "NMI port 61 %x, port 70 %x\n", inb(0x61), inb(0x70)); 229 log(LOG_CRIT, "NMI port 61 %x, port 70 %x\n", inb(0x61), inb(0x70));
230} 230}
231 231
232/* 232/*
233 * Recalculate the interrupt masks from scratch. 233 * Recalculate the interrupt masks from scratch.
234 * During early boot, anything goes and we are always called on the BP. 234 * During early boot, anything goes and we are always called on the BP.
235 * When the system is up and running: 235 * When the system is up and running:
236 * 236 *
237 * => called with ci == curcpu() 237 * => called with ci == curcpu()
238 * => cpu_lock held by the initiator 238 * => cpu_lock held by the initiator
239 * => interrupts disabled on-chip (PSL_I) 239 * => interrupts disabled on-chip (PSL_I)
240 * 240 *
241 * Do not call printf(), kmem_free() or other "heavyweight" routines 241 * Do not call printf(), kmem_free() or other "heavyweight" routines
242 * from here. This routine must be quick and must not block. 242 * from here. This routine must be quick and must not block.
243 */ 243 */
244static void 244static void
245intr_calculatemasks(struct cpu_info *ci) 245intr_calculatemasks(struct cpu_info *ci)
246{ 246{
247 int irq, level, unusedirqs, intrlevel[MAX_INTR_SOURCES]; 247 int irq, level, unusedirqs, intrlevel[MAX_INTR_SOURCES];
248 struct intrhand *q; 248 struct intrhand *q;
249 249
250 /* First, figure out which levels each IRQ uses. */ 250 /* First, figure out which levels each IRQ uses. */
251 unusedirqs = 0xffffffff; 251 unusedirqs = 0xffffffff;
252 for (irq = 0; irq < MAX_INTR_SOURCES; irq++) { 252 for (irq = 0; irq < MAX_INTR_SOURCES; irq++) {
253 int levels = 0; 253 int levels = 0;
254 254
255 if (ci->ci_isources[irq] == NULL) { 255 if (ci->ci_isources[irq] == NULL) {
256 intrlevel[irq] = 0; 256 intrlevel[irq] = 0;
257 continue; 257 continue;
258 } 258 }
259 for (q = ci->ci_isources[irq]->is_handlers; q; q = q->ih_next) 259 for (q = ci->ci_isources[irq]->is_handlers; q; q = q->ih_next)
260 levels |= 1 << q->ih_level; 260 levels |= 1 << q->ih_level;
261 intrlevel[irq] = levels; 261 intrlevel[irq] = levels;
262 if (levels) 262 if (levels)
263 unusedirqs &= ~(1 << irq); 263 unusedirqs &= ~(1 << irq);
264 } 264 }
265 265
266 /* Then figure out which IRQs use each level. */ 266 /* Then figure out which IRQs use each level. */
267 for (level = 0; level < NIPL; level++) { 267 for (level = 0; level < NIPL; level++) {
268 int irqs = 0; 268 int irqs = 0;
269 for (irq = 0; irq < MAX_INTR_SOURCES; irq++) 269 for (irq = 0; irq < MAX_INTR_SOURCES; irq++)
270 if (intrlevel[irq] & (1 << level)) 270 if (intrlevel[irq] & (1 << level))
271 irqs |= 1 << irq; 271 irqs |= 1 << irq;
272 ci->ci_imask[level] = irqs | unusedirqs; 272 ci->ci_imask[level] = irqs | unusedirqs;
273 } 273 }
274 274
275 for (level = 0; level<(NIPL-1); level++) 275 for (level = 0; level<(NIPL-1); level++)
276 ci->ci_imask[level+1] |= ci->ci_imask[level]; 276 ci->ci_imask[level+1] |= ci->ci_imask[level];
277 277
278 for (irq = 0; irq < MAX_INTR_SOURCES; irq++) { 278 for (irq = 0; irq < MAX_INTR_SOURCES; irq++) {
279 int maxlevel = IPL_NONE; 279 int maxlevel = IPL_NONE;
280 int minlevel = IPL_HIGH; 280 int minlevel = IPL_HIGH;
281 281
282 if (ci->ci_isources[irq] == NULL) 282 if (ci->ci_isources[irq] == NULL)
283 continue; 283 continue;
284 for (q = ci->ci_isources[irq]->is_handlers; q; 284 for (q = ci->ci_isources[irq]->is_handlers; q;
285 q = q->ih_next) { 285 q = q->ih_next) {
286 if (q->ih_level < minlevel) 286 if (q->ih_level < minlevel)
287 minlevel = q->ih_level; 287 minlevel = q->ih_level;
288 if (q->ih_level > maxlevel) 288 if (q->ih_level > maxlevel)
289 maxlevel = q->ih_level; 289 maxlevel = q->ih_level;
290 } 290 }
291 ci->ci_isources[irq]->is_maxlevel = maxlevel; 291 ci->ci_isources[irq]->is_maxlevel = maxlevel;
292 ci->ci_isources[irq]->is_minlevel = minlevel; 292 ci->ci_isources[irq]->is_minlevel = minlevel;
293 } 293 }
294 294
295 for (level = 0; level < NIPL; level++) 295 for (level = 0; level < NIPL; level++)
296 ci->ci_iunmask[level] = ~ci->ci_imask[level]; 296 ci->ci_iunmask[level] = ~ci->ci_imask[level];
297} 297}
298 298
299/* 299/*
300 * List to keep track of PCI buses that are probed but not known 300 * List to keep track of PCI buses that are probed but not known
301 * to the firmware. Used to  301 * to the firmware. Used to
302 * 302 *
303 * XXX should maintain one list, not an array and a linked list. 303 * XXX should maintain one list, not an array and a linked list.
304 */ 304 */
305#if (NPCI > 0) && ((NIOAPIC > 0) || NACPICA > 0) 305#if (NPCI > 0) && ((NIOAPIC > 0) || NACPICA > 0)
306struct intr_extra_bus { 306struct intr_extra_bus {
307 int bus; 307 int bus;
308 pcitag_t *pci_bridge_tag; 308 pcitag_t *pci_bridge_tag;
309 pci_chipset_tag_t pci_chipset_tag; 309 pci_chipset_tag_t pci_chipset_tag;
310 LIST_ENTRY(intr_extra_bus) list; 310 LIST_ENTRY(intr_extra_bus) list;
311}; 311};
312 312
313LIST_HEAD(, intr_extra_bus) intr_extra_buses = 313LIST_HEAD(, intr_extra_bus) intr_extra_buses =
314 LIST_HEAD_INITIALIZER(intr_extra_buses); 314 LIST_HEAD_INITIALIZER(intr_extra_buses);
315 315
316 316
317void 317void
318intr_add_pcibus(struct pcibus_attach_args *pba) 318intr_add_pcibus(struct pcibus_attach_args *pba)
319{ 319{
320 struct intr_extra_bus *iebp; 320 struct intr_extra_bus *iebp;
321 321
322 iebp = kmem_alloc(sizeof(*iebp), KM_SLEEP); 322 iebp = kmem_alloc(sizeof(*iebp), KM_SLEEP);
323 iebp->bus = pba->pba_bus; 323 iebp->bus = pba->pba_bus;
324 iebp->pci_chipset_tag = pba->pba_pc; 324 iebp->pci_chipset_tag = pba->pba_pc;
325 iebp->pci_bridge_tag = pba->pba_bridgetag; 325 iebp->pci_bridge_tag = pba->pba_bridgetag;
326 LIST_INSERT_HEAD(&intr_extra_buses, iebp, list); 326 LIST_INSERT_HEAD(&intr_extra_buses, iebp, list);
327} 327}
328 328
329static int 329static int
330intr_find_pcibridge(int bus, pcitag_t *pci_bridge_tag, 330intr_find_pcibridge(int bus, pcitag_t *pci_bridge_tag,
331 pci_chipset_tag_t *pc) 331 pci_chipset_tag_t *pc)
332{ 332{
333 struct intr_extra_bus *iebp; 333 struct intr_extra_bus *iebp;
334 struct mp_bus *mpb; 334 struct mp_bus *mpb;
335 335
336 if (bus < 0) 336 if (bus < 0)
337 return ENOENT; 337 return ENOENT;
338 338
339 if (bus < mp_nbus) { 339 if (bus < mp_nbus) {
340 mpb = &mp_busses[bus]; 340 mpb = &mp_busses[bus];
341 if (mpb->mb_pci_bridge_tag == NULL) 341 if (mpb->mb_pci_bridge_tag == NULL)
342 return ENOENT; 342 return ENOENT;
343 *pci_bridge_tag = *mpb->mb_pci_bridge_tag; 343 *pci_bridge_tag = *mpb->mb_pci_bridge_tag;
344 *pc = mpb->mb_pci_chipset_tag; 344 *pc = mpb->mb_pci_chipset_tag;
345 return 0; 345 return 0;
346 } 346 }
347 347
348 LIST_FOREACH(iebp, &intr_extra_buses, list) { 348 LIST_FOREACH(iebp, &intr_extra_buses, list) {
349 if (iebp->bus == bus) { 349 if (iebp->bus == bus) {
350 if (iebp->pci_bridge_tag == NULL) 350 if (iebp->pci_bridge_tag == NULL)
351 return ENOENT; 351 return ENOENT;
352 *pci_bridge_tag = *iebp->pci_bridge_tag; 352 *pci_bridge_tag = *iebp->pci_bridge_tag;
353 *pc = iebp->pci_chipset_tag; 353 *pc = iebp->pci_chipset_tag;
354 return 0; 354 return 0;
355 } 355 }
356 } 356 }
357 return ENOENT; 357 return ENOENT;
358} 358}
359#endif 359#endif
360 360
361#if NIOAPIC > 0 || NACPICA > 0 361#if NIOAPIC > 0 || NACPICA > 0
362int 362int
363intr_find_mpmapping(int bus, int pin, int *handle) 363intr_find_mpmapping(int bus, int pin, int *handle)
364{ 364{
365#if NPCI > 0 365#if NPCI > 0
366 int dev, func; 366 int dev, func;
367 pcitag_t pci_bridge_tag; 367 pcitag_t pci_bridge_tag;
368 pci_chipset_tag_t pc; 368 pci_chipset_tag_t pc;
369#endif 369#endif
370 370
371#if NPCI > 0 371#if NPCI > 0
372 while (intr_scan_bus(bus, pin, handle) != 0) { 372 while (intr_scan_bus(bus, pin, handle) != 0) {
373 if (intr_find_pcibridge(bus, &pci_bridge_tag, 373 if (intr_find_pcibridge(bus, &pci_bridge_tag,
374 &pc) != 0) 374 &pc) != 0)
375 return ENOENT; 375 return ENOENT;
376 dev = pin >> 2; 376 dev = pin >> 2;
377 pin = pin & 3; 377 pin = pin & 3;
378 pin = PPB_INTERRUPT_SWIZZLE(pin + 1, dev) - 1; 378 pin = PPB_INTERRUPT_SWIZZLE(pin + 1, dev) - 1;
379 pci_decompose_tag(pc, pci_bridge_tag, &bus, 379 pci_decompose_tag(pc, pci_bridge_tag, &bus,
380 &dev, &func); 380 &dev, &func);
381 pin |= (dev << 2); 381 pin |= (dev << 2);
382 } 382 }
383 return 0; 383 return 0;
384#else 384#else
385 return intr_scan_bus(bus, pin, handle); 385 return intr_scan_bus(bus, pin, handle);
386#endif 386#endif
387} 387}
388 388
389static int 389static int
390intr_scan_bus(int bus, int pin, int *handle) 390intr_scan_bus(int bus, int pin, int *handle)
391{ 391{
392 struct mp_intr_map *mip, *intrs; 392 struct mp_intr_map *mip, *intrs;
393 393
394 if (bus < 0 || bus >= mp_nbus) 394 if (bus < 0 || bus >= mp_nbus)
395 return ENOENT; 395 return ENOENT;
396 396
397 intrs = mp_busses[bus].mb_intrs; 397 intrs = mp_busses[bus].mb_intrs;
398 if (intrs == NULL) 398 if (intrs == NULL)
399 return ENOENT; 399 return ENOENT;
400 400
401 for (mip = intrs; mip != NULL; mip = mip->next) { 401 for (mip = intrs; mip != NULL; mip = mip->next) {
402 if (mip->bus_pin == pin) { 402 if (mip->bus_pin == pin) {
403#if NACPICA > 0 403#if NACPICA > 0
404 if (mip->linkdev != NULL) 404 if (mip->linkdev != NULL)
405 if (mpacpi_findintr_linkdev(mip) != 0) 405 if (mpacpi_findintr_linkdev(mip) != 0)
406 continue; 406 continue;
407#endif 407#endif
408 *handle = mip->ioapic_ih; 408 *handle = mip->ioapic_ih;
409 return 0; 409 return 0;
410 } 410 }
411 } 411 }
412 return ENOENT; 412 return ENOENT;
413} 413}
414#endif 414#endif
415 415
416static int 416static int
417intr_allocate_slot_cpu(struct cpu_info *ci, struct pic *pic, int pin, 417intr_allocate_slot_cpu(struct cpu_info *ci, struct pic *pic, int pin,
418 int *index) 418 int *index)
419{ 419{
420 int slot, i; 420 int slot, i;
421 struct intrsource *isp; 421 struct intrsource *isp;
422 422
423 KASSERT(mutex_owned(&cpu_lock)); 423 KASSERT(mutex_owned(&cpu_lock));
424 424
425 if (pic == &i8259_pic) { 425 if (pic == &i8259_pic) {
426 KASSERT(CPU_IS_PRIMARY(ci)); 426 KASSERT(CPU_IS_PRIMARY(ci));
427 slot = pin; 427 slot = pin;
428 } else { 428 } else {
429 slot = -1; 429 slot = -1;
430 430
431 /* 431 /*
432 * intr_allocate_slot has checked for an existing mapping. 432 * intr_allocate_slot has checked for an existing mapping.
433 * Now look for a free slot. 433 * Now look for a free slot.
434 */ 434 */
435 for (i = 0; i < MAX_INTR_SOURCES ; i++) { 435 for (i = 0; i < MAX_INTR_SOURCES ; i++) {
436 if (ci->ci_isources[i] == NULL) { 436 if (ci->ci_isources[i] == NULL) {
437 slot = i; 437 slot = i;
438 break; 438 break;
439 } 439 }
440 } 440 }
441 if (slot == -1) { 441 if (slot == -1) {
442 return EBUSY; 442 return EBUSY;
443 } 443 }
444 } 444 }
445 445
446 isp = ci->ci_isources[slot]; 446 isp = ci->ci_isources[slot];
447 if (isp == NULL) { 447 if (isp == NULL) {
448 isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); 448 isp = kmem_zalloc(sizeof(*isp), KM_SLEEP);
449 if (isp == NULL) { 449 if (isp == NULL) {
450 return ENOMEM; 450 return ENOMEM;
451 } 451 }
452 snprintf(isp->is_evname, sizeof (isp->is_evname), 452 snprintf(isp->is_evname, sizeof (isp->is_evname),
453 "pin %d", pin); 453 "pin %d", pin);
454 evcnt_attach_dynamic(&isp->is_evcnt, EVCNT_TYPE_INTR, NULL, 454 evcnt_attach_dynamic(&isp->is_evcnt, EVCNT_TYPE_INTR, NULL,
455 pic->pic_name, isp->is_evname); 455 pic->pic_name, isp->is_evname);
456 ci->ci_isources[slot] = isp; 456 ci->ci_isources[slot] = isp;
457 } 457 }
458 458
459 *index = slot; 459 *index = slot;
460 return 0; 460 return 0;
461} 461}
462 462
463/* 463/*
464 * A simple round-robin allocator to assign interrupts to CPUs. 464 * A simple round-robin allocator to assign interrupts to CPUs.
465 */ 465 */
466static int __noinline 466static int __noinline
467intr_allocate_slot(struct pic *pic, int pin, int level, 467intr_allocate_slot(struct pic *pic, int pin, int level,
468 struct cpu_info **cip, int *index, int *idt_slot) 468 struct cpu_info **cip, int *index, int *idt_slot)
469{ 469{
470 CPU_INFO_ITERATOR cii; 470 CPU_INFO_ITERATOR cii;
471 struct cpu_info *ci, *lci; 471 struct cpu_info *ci, *lci;
472 struct intrsource *isp; 472 struct intrsource *isp;
473 int slot = 0, idtvec, error; 473 int slot = 0, idtvec, error;
474 474
475 KASSERT(mutex_owned(&cpu_lock)); 475 KASSERT(mutex_owned(&cpu_lock));
476 476
477 /* First check if this pin is already used by an interrupt vector. */ 477 /* First check if this pin is already used by an interrupt vector. */
478 for (CPU_INFO_FOREACH(cii, ci)) { 478 for (CPU_INFO_FOREACH(cii, ci)) {
479 for (slot = 0 ; slot < MAX_INTR_SOURCES ; slot++) { 479 for (slot = 0 ; slot < MAX_INTR_SOURCES ; slot++) {
480 if ((isp = ci->ci_isources[slot]) == NULL) { 480 if ((isp = ci->ci_isources[slot]) == NULL) {
481 continue; 481 continue;
482 } 482 }
483 if (isp->is_pic == pic && isp->is_pin == pin) { 483 if (isp->is_pic == pic &&
 484 pin != -1 && isp->is_pin == pin) {
484 *idt_slot = isp->is_idtvec; 485 *idt_slot = isp->is_idtvec;
485 *index = slot; 486 *index = slot;
486 *cip = ci; 487 *cip = ci;
487 return 0; 488 return 0;
488 } 489 }
489 } 490 }
490 } 491 }
491 492
492 /* 493 /*
493 * The pic/pin combination doesn't have an existing mapping. 494 * The pic/pin combination doesn't have an existing mapping.
494 * Find a slot for a new interrupt source. For the i8259 case, 495 * Find a slot for a new interrupt source. For the i8259 case,
495 * we always use reserved slots of the primary CPU. Otherwise, 496 * we always use reserved slots of the primary CPU. Otherwise,
496 * we make an attempt to balance the interrupt load. 497 * we make an attempt to balance the interrupt load.
497 * 498 *
498 * PIC and APIC usage are essentially exclusive, so the reservation 499 * PIC and APIC usage are essentially exclusive, so the reservation
499 * of the ISA slots is ignored when assigning IOAPIC slots. 500 * of the ISA slots is ignored when assigning IOAPIC slots.
500 */ 501 */
501 if (pic == &i8259_pic) { 502 if (pic == &i8259_pic) {
502 /* 503 /*
503 * Must be directed to BP. 504 * Must be directed to BP.
504 */ 505 */
505 ci = &cpu_info_primary; 506 ci = &cpu_info_primary;
506 error = intr_allocate_slot_cpu(ci, pic, pin, &slot); 507 error = intr_allocate_slot_cpu(ci, pic, pin, &slot);
507 } else { 508 } else {
508 /* 509 /*
509 * Find least loaded AP/BP and try to allocate there. 510 * Find least loaded AP/BP and try to allocate there.
510 */ 511 */
511 ci = NULL; 512 ci = NULL;
512 for (CPU_INFO_FOREACH(cii, lci)) { 513 for (CPU_INFO_FOREACH(cii, lci)) {
513 if ((lci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) { 514 if ((lci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) {
514 continue; 515 continue;
515 } 516 }
516#if 0 517#if 0
517 if (ci == NULL || 518 if (ci == NULL ||
518 ci->ci_nintrhand > lci->ci_nintrhand) { 519 ci->ci_nintrhand > lci->ci_nintrhand) {
519 ci = lci; 520 ci = lci;
520 } 521 }
521#else 522#else
522 ci = &cpu_info_primary; 523 ci = &cpu_info_primary;
523#endif 524#endif
524 } 525 }
525 KASSERT(ci != NULL); 526 KASSERT(ci != NULL);
526 error = intr_allocate_slot_cpu(ci, pic, pin, &slot); 527 error = intr_allocate_slot_cpu(ci, pic, pin, &slot);
527 528
528 /* 529 /*
529 * If that did not work, allocate anywhere. 530 * If that did not work, allocate anywhere.
530 */ 531 */
531 if (error != 0) { 532 if (error != 0) {
532 for (CPU_INFO_FOREACH(cii, ci)) { 533 for (CPU_INFO_FOREACH(cii, ci)) {
533 if ((ci->ci_schedstate.spc_flags & 534 if ((ci->ci_schedstate.spc_flags &
534 SPCF_NOINTR) != 0) { 535 SPCF_NOINTR) != 0) {
535 continue; 536 continue;
536 } 537 }
537 error = intr_allocate_slot_cpu(ci, pic, 538 error = intr_allocate_slot_cpu(ci, pic,
538 pin, &slot); 539 pin, &slot);
539 if (error == 0) { 540 if (error == 0) {
540 break; 541 break;
541 } 542 }
542 } 543 }
543 } 544 }
544 } 545 }
545 if (error != 0) { 546 if (error != 0) {
546 return error; 547 return error;
547 } 548 }
548 KASSERT(ci != NULL); 549 KASSERT(ci != NULL);
549 550
550 /*  551 /*
551 * Now allocate an IDT vector. 552 * Now allocate an IDT vector.
552 * For the 8259 these are reserved up front. 553 * For the 8259 these are reserved up front.
553 */ 554 */
554 if (pic == &i8259_pic) { 555 if (pic == &i8259_pic) {
555 idtvec = ICU_OFFSET + pin; 556 idtvec = ICU_OFFSET + pin;
556 } else { 557 } else {
557 idtvec = idt_vec_alloc(APIC_LEVEL(level), IDT_INTR_HIGH); 558 idtvec = idt_vec_alloc(APIC_LEVEL(level), IDT_INTR_HIGH);
558 } 559 }
559 if (idtvec == 0) { 560 if (idtvec == 0) {
560 evcnt_detach(&ci->ci_isources[slot]->is_evcnt); 561 evcnt_detach(&ci->ci_isources[slot]->is_evcnt);
561 kmem_free(ci->ci_isources[slot], sizeof(*(ci->ci_isources[slot]))); 562 kmem_free(ci->ci_isources[slot], sizeof(*(ci->ci_isources[slot])));
562 ci->ci_isources[slot] = NULL; 563 ci->ci_isources[slot] = NULL;
563 return EBUSY; 564 return EBUSY;
564 } 565 }
565 ci->ci_isources[slot]->is_idtvec = idtvec; 566 ci->ci_isources[slot]->is_idtvec = idtvec;
566 *idt_slot = idtvec; 567 *idt_slot = idtvec;
567 *index = slot; 568 *index = slot;
568 *cip = ci; 569 *cip = ci;
569 return 0; 570 return 0;
570} 571}
571 572
572static void 573static void
573intr_source_free(struct cpu_info *ci, int slot, struct pic *pic, int idtvec) 574intr_source_free(struct cpu_info *ci, int slot, struct pic *pic, int idtvec)
574{ 575{
575 struct intrsource *isp; 576 struct intrsource *isp;
576 577
577 isp = ci->ci_isources[slot]; 578 isp = ci->ci_isources[slot];
578 579
579 if (isp->is_handlers != NULL) 580 if (isp->is_handlers != NULL)
580 return; 581 return;
581 ci->ci_isources[slot] = NULL; 582 ci->ci_isources[slot] = NULL;
582 evcnt_detach(&isp->is_evcnt); 583 evcnt_detach(&isp->is_evcnt);
583 kmem_free(isp, sizeof(*isp)); 584 kmem_free(isp, sizeof(*isp));
584 ci->ci_isources[slot] = NULL; 585 ci->ci_isources[slot] = NULL;
585 if (pic != &i8259_pic) 586 if (pic != &i8259_pic)
586 idt_vec_free(idtvec); 587 idt_vec_free(idtvec);
587} 588}
588 589
589#ifdef MULTIPROCESSOR 590#ifdef MULTIPROCESSOR
590static int intr_biglock_wrapper(void *); 591static int intr_biglock_wrapper(void *);
591 592
592/* 593/*
593 * intr_biglock_wrapper: grab biglock and call a real interrupt handler. 594 * intr_biglock_wrapper: grab biglock and call a real interrupt handler.
594 */ 595 */
595 596
596static int 597static int
597intr_biglock_wrapper(void *vp) 598intr_biglock_wrapper(void *vp)
598{ 599{
599 struct intrhand *ih = vp; 600 struct intrhand *ih = vp;
600 int ret; 601 int ret;
601 602
602 KERNEL_LOCK(1, NULL); 603 KERNEL_LOCK(1, NULL);
603 604
604 ret = (*ih->ih_realfun)(ih->ih_realarg); 605 ret = (*ih->ih_realfun)(ih->ih_realarg);
605 606
606 KERNEL_UNLOCK_ONE(NULL); 607 KERNEL_UNLOCK_ONE(NULL);
607 608
608 return ret; 609 return ret;
609} 610}
610#endif /* MULTIPROCESSOR */ 611#endif /* MULTIPROCESSOR */
611 612
612struct pic * 613struct pic *
613intr_findpic(int num) 614intr_findpic(int num)
614{ 615{
615#if NIOAPIC > 0 616#if NIOAPIC > 0
616 struct ioapic_softc *pic; 617 struct ioapic_softc *pic;
617 618
618 pic = ioapic_find_bybase(num); 619 pic = ioapic_find_bybase(num);
619 if (pic != NULL) 620 if (pic != NULL)
620 return &pic->sc_pic; 621 return &pic->sc_pic;
621#endif 622#endif
622 if (num < NUM_LEGACY_IRQS) 623 if (num < NUM_LEGACY_IRQS)
623 return &i8259_pic; 624 return &i8259_pic;
624 625
625 return NULL; 626 return NULL;
626} 627}
627 628
628/* 629/*
629 * Handle per-CPU component of interrupt establish. 630 * Handle per-CPU component of interrupt establish.
630 * 631 *
631 * => caller (on initiating CPU) holds cpu_lock on our behalf 632 * => caller (on initiating CPU) holds cpu_lock on our behalf
632 * => arg1: struct intrhand *ih 633 * => arg1: struct intrhand *ih
633 * => arg2: int idt_vec 634 * => arg2: int idt_vec
634 */ 635 */
635static void 636static void
636intr_establish_xcall(void *arg1, void *arg2) 637intr_establish_xcall(void *arg1, void *arg2)
637{ 638{
638 struct intrsource *source; 639 struct intrsource *source;
639 struct intrstub *stubp; 640 struct intrstub *stubp;
640 struct intrhand *ih; 641 struct intrhand *ih;
641 struct cpu_info *ci; 642 struct cpu_info *ci;
642 int idt_vec; 643 int idt_vec;
643 u_long psl; 644 u_long psl;
644 645
645 ih = arg1; 646 ih = arg1;
646 647
647 KASSERT(ih->ih_cpu == curcpu() || !mp_online); 648 KASSERT(ih->ih_cpu == curcpu() || !mp_online);
648 649
649 ci = ih->ih_cpu; 650 ci = ih->ih_cpu;
650 source = ci->ci_isources[ih->ih_slot]; 651 source = ci->ci_isources[ih->ih_slot];
651 idt_vec = (int)(intptr_t)arg2; 652 idt_vec = (int)(intptr_t)arg2;
652 653
653 /* Disable interrupts locally. */ 654 /* Disable interrupts locally. */
654 psl = x86_read_psl(); 655 psl = x86_read_psl();
655 x86_disable_intr(); 656 x86_disable_intr();
656 657
657 /* Link in the handler and re-calculate masks. */ 658 /* Link in the handler and re-calculate masks. */
658 *(ih->ih_prevp) = ih; 659 *(ih->ih_prevp) = ih;
659 intr_calculatemasks(ci); 660 intr_calculatemasks(ci);
660 661
661 /* Hook in new IDT vector and SPL state. */ 662 /* Hook in new IDT vector and SPL state. */
662 if (source->is_resume == NULL || source->is_idtvec != idt_vec) { 663 if (source->is_resume == NULL || source->is_idtvec != idt_vec) {
663 if (source->is_idtvec != 0 && source->is_idtvec != idt_vec) 664 if (source->is_idtvec != 0 && source->is_idtvec != idt_vec)
664 idt_vec_free(source->is_idtvec); 665 idt_vec_free(source->is_idtvec);
665 source->is_idtvec = idt_vec; 666 source->is_idtvec = idt_vec;
666 if (source->is_type == IST_LEVEL) { 667 if (source->is_type == IST_LEVEL) {
667 stubp = &source->is_pic->pic_level_stubs[ih->ih_slot]; 668 stubp = &source->is_pic->pic_level_stubs[ih->ih_slot];
668 } else { 669 } else {
669 stubp = &source->is_pic->pic_edge_stubs[ih->ih_slot]; 670 stubp = &source->is_pic->pic_edge_stubs[ih->ih_slot];
670 } 671 }
671 source->is_resume = stubp->ist_resume; 672 source->is_resume = stubp->ist_resume;
672 source->is_recurse = stubp->ist_recurse; 673 source->is_recurse = stubp->ist_recurse;
673 setgate(&idt[idt_vec], stubp->ist_entry, 0, SDT_SYS386IGT, 674 setgate(&idt[idt_vec], stubp->ist_entry, 0, SDT_SYS386IGT,
674 SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 675 SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
675 } 676 }
676 677
677 /* Re-enable interrupts locally. */ 678 /* Re-enable interrupts locally. */
678 x86_write_psl(psl); 679 x86_write_psl(psl);
679} 680}
680 681
681void * 682void *
682intr_establish(int legacy_irq, struct pic *pic, int pin, int type, int level, 683intr_establish(int legacy_irq, struct pic *pic, int pin, int type, int level,
683 int (*handler)(void *), void *arg, bool known_mpsafe) 684 int (*handler)(void *), void *arg, bool known_mpsafe)
684{ 685{
685 struct intrhand **p, *q, *ih; 686 struct intrhand **p, *q, *ih;
686 struct cpu_info *ci; 687 struct cpu_info *ci;
687 int slot, error, idt_vec; 688 int slot, error, idt_vec;
688 struct intrsource *source; 689 struct intrsource *source;
689#ifdef MULTIPROCESSOR 690#ifdef MULTIPROCESSOR
690 bool mpsafe = (known_mpsafe || level != IPL_VM); 691 bool mpsafe = (known_mpsafe || level != IPL_VM);
691#endif /* MULTIPROCESSOR */ 692#endif /* MULTIPROCESSOR */
692 uint64_t where; 693 uint64_t where;
693 694
694#ifdef DIAGNOSTIC 695#ifdef DIAGNOSTIC
695 if (legacy_irq != -1 && (legacy_irq < 0 || legacy_irq > 15)) 696 if (legacy_irq != -1 && (legacy_irq < 0 || legacy_irq > 15))
696 panic("%s: bad legacy IRQ value", __func__); 697 panic("%s: bad legacy IRQ value", __func__);
697 698
698 if (legacy_irq == -1 && pic == &i8259_pic) 699 if (legacy_irq == -1 && pic == &i8259_pic)
699 panic("intr_establish: non-legacy IRQ on i8259"); 700 panic("intr_establish: non-legacy IRQ on i8259");
700#endif 701#endif
701 702
702 ih = kmem_alloc(sizeof(*ih), KM_SLEEP); 703 ih = kmem_alloc(sizeof(*ih), KM_SLEEP);
703 if (ih == NULL) { 704 if (ih == NULL) {
704 printf("%s: can't allocate handler info\n", __func__); 705 printf("%s: can't allocate handler info\n", __func__);
705 return NULL; 706 return NULL;
706 } 707 }
707 708
708 mutex_enter(&cpu_lock); 709 mutex_enter(&cpu_lock);
709 error = intr_allocate_slot(pic, pin, level, &ci, &slot, &idt_vec); 710 error = intr_allocate_slot(pic, pin, level, &ci, &slot, &idt_vec);
710 if (error != 0) { 711 if (error != 0) {
711 mutex_exit(&cpu_lock); 712 mutex_exit(&cpu_lock);
712 kmem_free(ih, sizeof(*ih)); 713 kmem_free(ih, sizeof(*ih));
713 printf("failed to allocate interrupt slot for PIC %s pin %d\n", 714 printf("failed to allocate interrupt slot for PIC %s pin %d\n",
714 pic->pic_name, pin); 715 pic->pic_name, pin);
715 return NULL; 716 return NULL;
716 } 717 }
717 718
718 source = ci->ci_isources[slot]; 719 source = ci->ci_isources[slot];
719 720
720 if (source->is_handlers != NULL && 721 if (source->is_handlers != NULL &&
721 source->is_pic->pic_type != pic->pic_type) { 722 source->is_pic->pic_type != pic->pic_type) {
722 mutex_exit(&cpu_lock); 723 mutex_exit(&cpu_lock);
723 kmem_free(ih, sizeof(*ih)); 724 kmem_free(ih, sizeof(*ih));
724 printf("%s: can't share intr source between " 725 printf("%s: can't share intr source between "
725 "different PIC types (legacy_irq %d pin %d slot %d)\n", 726 "different PIC types (legacy_irq %d pin %d slot %d)\n",
726 __func__, legacy_irq, pin, slot); 727 __func__, legacy_irq, pin, slot);
727 return NULL; 728 return NULL;
728 } 729 }
729 730
730 source->is_pin = pin; 731 source->is_pin = pin;
731 source->is_pic = pic; 732 source->is_pic = pic;
732 733
733 switch (source->is_type) { 734 switch (source->is_type) {
734 case IST_NONE: 735 case IST_NONE:
735 source->is_type = type; 736 source->is_type = type;
736 break; 737 break;
737 case IST_EDGE: 738 case IST_EDGE:
738 case IST_LEVEL: 739 case IST_LEVEL:
739 if (source->is_type == type) 740 if (source->is_type == type)
740 break; 741 break;
741 /* FALLTHROUGH */ 742 /* FALLTHROUGH */
742 case IST_PULSE: 743 case IST_PULSE:
743 if (type != IST_NONE) { 744 if (type != IST_NONE) {
744 mutex_exit(&cpu_lock); 745 mutex_exit(&cpu_lock);
745 kmem_free(ih, sizeof(*ih)); 746 kmem_free(ih, sizeof(*ih));
746 intr_source_free(ci, slot, pic, idt_vec); 747 intr_source_free(ci, slot, pic, idt_vec);
747 printf("%s: pic %s pin %d: can't share " 748 printf("%s: pic %s pin %d: can't share "
748 "type %d with %d\n", 749 "type %d with %d\n",
749 __func__, pic->pic_name, pin, 750 __func__, pic->pic_name, pin,
750 source->is_type, type); 751 source->is_type, type);
751 return NULL; 752 return NULL;
752 } 753 }
753 break; 754 break;
754 default: 755 default:
755 panic("%s: bad intr type %d for pic %s pin %d\n", 756 panic("%s: bad intr type %d for pic %s pin %d\n",
756 __func__, source->is_type, pic->pic_name, pin); 757 __func__, source->is_type, pic->pic_name, pin);
757 /* NOTREACHED */ 758 /* NOTREACHED */
758 } 759 }
759 760
760 /* 761 /*
761 * We're now committed. Mask the interrupt in hardware and 762 * We're now committed. Mask the interrupt in hardware and
762 * count it for load distribution. 763 * count it for load distribution.
763 */ 764 */
764 (*pic->pic_hwmask)(pic, pin); 765 (*pic->pic_hwmask)(pic, pin);
765 (ci->ci_nintrhand)++; 766 (ci->ci_nintrhand)++;
766 767
767 /* 768 /*
768 * Figure out where to put the handler. 769 * Figure out where to put the handler.
769 * This is O(N^2), but we want to preserve the order, and N is 770 * This is O(N^2), but we want to preserve the order, and N is
770 * generally small. 771 * generally small.
771 */ 772 */
772 for (p = &ci->ci_isources[slot]->is_handlers; 773 for (p = &ci->ci_isources[slot]->is_handlers;
773 (q = *p) != NULL && q->ih_level > level; 774 (q = *p) != NULL && q->ih_level > level;
774 p = &q->ih_next) { 775 p = &q->ih_next) {
775 /* nothing */; 776 /* nothing */;
776 } 777 }
777 778
778 ih->ih_fun = ih->ih_realfun = handler; 779 ih->ih_fun = ih->ih_realfun = handler;
779 ih->ih_arg = ih->ih_realarg = arg; 780 ih->ih_arg = ih->ih_realarg = arg;
780 ih->ih_prevp = p; 781 ih->ih_prevp = p;
781 ih->ih_next = *p; 782 ih->ih_next = *p;
782 ih->ih_level = level; 783 ih->ih_level = level;
783 ih->ih_pin = pin; 784 ih->ih_pin = pin;
784 ih->ih_cpu = ci; 785 ih->ih_cpu = ci;
785 ih->ih_slot = slot; 786 ih->ih_slot = slot;
786#ifdef MULTIPROCESSOR 787#ifdef MULTIPROCESSOR
787 if (!mpsafe) { 788 if (!mpsafe) {
788 ih->ih_fun = intr_biglock_wrapper; 789 ih->ih_fun = intr_biglock_wrapper;
789 ih->ih_arg = ih; 790 ih->ih_arg = ih;
790 } 791 }
791#endif /* MULTIPROCESSOR */ 792#endif /* MULTIPROCESSOR */
792 793
793 /* 794 /*
794 * Call out to the remote CPU to update its interrupt state. 795 * Call out to the remote CPU to update its interrupt state.
795 * Only make RPCs if the APs are up and running. 796 * Only make RPCs if the APs are up and running.
796 */ 797 */
797 if (ci == curcpu() || !mp_online) { 798 if (ci == curcpu() || !mp_online) {
798 intr_establish_xcall(ih, (void *)(intptr_t)idt_vec); 799 intr_establish_xcall(ih, (void *)(intptr_t)idt_vec);
799 } else { 800 } else {
800 where = xc_unicast(0, intr_establish_xcall, ih, 801 where = xc_unicast(0, intr_establish_xcall, ih,
801 (void *)(intptr_t)idt_vec, ci); 802 (void *)(intptr_t)idt_vec, ci);
802 xc_wait(where); 803 xc_wait(where);
803 } 804 }
804 805
805 /* All set up, so add a route for the interrupt and unmask it. */ 806 /* All set up, so add a route for the interrupt and unmask it. */
806 (*pic->pic_addroute)(pic, ci, pin, idt_vec, type); 807 (*pic->pic_addroute)(pic, ci, pin, idt_vec, type);
807 (*pic->pic_hwunmask)(pic, pin); 808 (*pic->pic_hwunmask)(pic, pin);
808 mutex_exit(&cpu_lock); 809 mutex_exit(&cpu_lock);
809 810
810#ifdef INTRDEBUG 811#ifdef INTRDEBUG
811 printf("allocated pic %s type %s pin %d level %d to %s slot %d " 812 printf("allocated pic %s type %s pin %d level %d to %s slot %d "
812 "idt entry %d\n", 813 "idt entry %d\n",
813 pic->pic_name, type == IST_EDGE ? "edge" : "level", pin, level, 814 pic->pic_name, type == IST_EDGE ? "edge" : "level", pin, level,
814 device_xname(ci->ci_dev), slot, idt_vec); 815 device_xname(ci->ci_dev), slot, idt_vec);
815#endif 816#endif
816 817
817 return (ih); 818 return (ih);
818} 819}
819 820
820/* 821/*
821 * Called on bound CPU to handle intr_disestablish(). 822 * Called on bound CPU to handle intr_disestablish().
822 * 823 *
823 * => caller (on initiating CPU) holds cpu_lock on our behalf 824 * => caller (on initiating CPU) holds cpu_lock on our behalf
824 * => arg1: struct intrhand *ih 825 * => arg1: struct intrhand *ih
825 * => arg2: unused 826 * => arg2: unused
826 */ 827 */
827static void 828static void
828intr_disestablish_xcall(void *arg1, void *arg2) 829intr_disestablish_xcall(void *arg1, void *arg2)
829{ 830{
830 struct intrhand **p, *q; 831 struct intrhand **p, *q;
831 struct cpu_info *ci; 832 struct cpu_info *ci;
832 struct pic *pic; 833 struct pic *pic;
833 struct intrsource *source; 834 struct intrsource *source;
834 struct intrhand *ih; 835 struct intrhand *ih;
835 u_long psl; 836 u_long psl;
836 int idtvec; 837 int idtvec;
837 838
838 ih = arg1; 839 ih = arg1;
839 ci = ih->ih_cpu; 840 ci = ih->ih_cpu;
840 841
841 KASSERT(ci == curcpu() || !mp_online); 842 KASSERT(ci == curcpu() || !mp_online);
842 843
843 /* Disable interrupts locally. */ 844 /* Disable interrupts locally. */
844 psl = x86_read_psl(); 845 psl = x86_read_psl();
845 x86_disable_intr(); 846 x86_disable_intr();
846 847
847 pic = ci->ci_isources[ih->ih_slot]->is_pic; 848 pic = ci->ci_isources[ih->ih_slot]->is_pic;
848 source = ci->ci_isources[ih->ih_slot]; 849 source = ci->ci_isources[ih->ih_slot];
849 idtvec = source->is_idtvec; 850 idtvec = source->is_idtvec;
850 851
851 (*pic->pic_hwmask)(pic, ih->ih_pin);  852 (*pic->pic_hwmask)(pic, ih->ih_pin);
852 atomic_and_32(&ci->ci_ipending, ~(1 << ih->ih_slot)); 853 atomic_and_32(&ci->ci_ipending, ~(1 << ih->ih_slot));
853 854
854 /* 855 /*
855 * Remove the handler from the chain. 856 * Remove the handler from the chain.
856 */ 857 */
857 for (p = &source->is_handlers; (q = *p) != NULL && q != ih; 858 for (p = &source->is_handlers; (q = *p) != NULL && q != ih;
858 p = &q->ih_next) 859 p = &q->ih_next)
859 ; 860 ;
860 if (q == NULL) { 861 if (q == NULL) {
861 x86_write_psl(psl); 862 x86_write_psl(psl);
862 panic("%s: handler not registered", __func__); 863 panic("%s: handler not registered", __func__);
863 /* NOTREACHED */ 864 /* NOTREACHED */
864 } 865 }
865 866
866 *p = q->ih_next; 867 *p = q->ih_next;
867 868
868 intr_calculatemasks(ci); 869 intr_calculatemasks(ci);
869 (*pic->pic_delroute)(pic, ci, ih->ih_pin, idtvec, source->is_type); 870 (*pic->pic_delroute)(pic, ci, ih->ih_pin, idtvec, source->is_type);
870 (*pic->pic_hwunmask)(pic, ih->ih_pin); 871 (*pic->pic_hwunmask)(pic, ih->ih_pin);
871 872
872 /* Re-enable interrupts. */ 873 /* Re-enable interrupts. */
873 x86_write_psl(psl); 874 x86_write_psl(psl);
874 875
875 /* If the source is free we can drop it now. */ 876 /* If the source is free we can drop it now. */
876 intr_source_free(ci, ih->ih_slot, pic, idtvec); 877 intr_source_free(ci, ih->ih_slot, pic, idtvec);
877 878
878#ifdef INTRDEBUG 879#ifdef INTRDEBUG
879 printf("%s: remove slot %d (pic %s pin %d vec %d)\n", 880 printf("%s: remove slot %d (pic %s pin %d vec %d)\n",
880 device_xname(ci->ci_dev), ih->ih_slot, pic->pic_name, 881 device_xname(ci->ci_dev), ih->ih_slot, pic->pic_name,
881 ih->ih_pin, idtvec); 882 ih->ih_pin, idtvec);
882#endif 883#endif
883} 884}
884 885
885/* 886/*
886 * Deregister an interrupt handler. 887 * Deregister an interrupt handler.
887 */ 888 */
888void 889void
889intr_disestablish(struct intrhand *ih) 890intr_disestablish(struct intrhand *ih)
890{ 891{
891 struct cpu_info *ci; 892 struct cpu_info *ci;
892 uint64_t where; 893 uint64_t where;
893 894
894 /* 895 /*
895 * Count the removal for load balancing. 896 * Count the removal for load balancing.
896 * Call out to the remote CPU to update its interrupt state. 897 * Call out to the remote CPU to update its interrupt state.
897 * Only make RPCs if the APs are up and running. 898 * Only make RPCs if the APs are up and running.
898 */ 899 */
899 mutex_enter(&cpu_lock); 900 mutex_enter(&cpu_lock);
900 ci = ih->ih_cpu; 901 ci = ih->ih_cpu;
901 (ci->ci_nintrhand)--; 902 (ci->ci_nintrhand)--;
902 KASSERT(ci->ci_nintrhand >= 0); 903 KASSERT(ci->ci_nintrhand >= 0);
903 if (ci == curcpu() || !mp_online) { 904 if (ci == curcpu() || !mp_online) {
904 intr_disestablish_xcall(ih, NULL); 905 intr_disestablish_xcall(ih, NULL);
905 } else { 906 } else {
906 where = xc_unicast(0, intr_disestablish_xcall, ih, NULL, ci); 907 where = xc_unicast(0, intr_disestablish_xcall, ih, NULL, ci);
907 xc_wait(where); 908 xc_wait(where);
908 }  909 }
909 mutex_exit(&cpu_lock); 910 mutex_exit(&cpu_lock);
910 kmem_free(ih, sizeof(*ih)); 911 kmem_free(ih, sizeof(*ih));
911} 912}
912 913
913const char * 914const char *
914intr_string(int ih) 915intr_string(int ih)
915{ 916{
916 static char irqstr[64]; 917 static char irqstr[64];
917#if NIOAPIC > 0 918#if NIOAPIC > 0
918 struct ioapic_softc *pic; 919 struct ioapic_softc *pic;
919#endif 920#endif
920 921
921 if (ih == 0) 922 if (ih == 0)
922 panic("%s: bogus handle 0x%x", __func__, ih); 923 panic("%s: bogus handle 0x%x", __func__, ih);
923 924
924 925
925#if NIOAPIC > 0 926#if NIOAPIC > 0
926 if (ih & APIC_INT_VIA_APIC) { 927 if (ih & APIC_INT_VIA_APIC) {
927 pic = ioapic_find(APIC_IRQ_APIC(ih)); 928 pic = ioapic_find(APIC_IRQ_APIC(ih));
928 if (pic != NULL) { 929 if (pic != NULL) {
929 snprintf(irqstr, sizeof(irqstr), "%s pin %d", 930 snprintf(irqstr, sizeof(irqstr), "%s pin %d",
930 device_xname(pic->sc_dev), APIC_IRQ_PIN(ih)); 931 device_xname(pic->sc_dev), APIC_IRQ_PIN(ih));
931 } else { 932 } else {
932 snprintf(irqstr, sizeof(irqstr), 933 snprintf(irqstr, sizeof(irqstr),
933 "apic %d int %d (irq %d)", 934 "apic %d int %d (irq %d)",
934 APIC_IRQ_APIC(ih), 935 APIC_IRQ_APIC(ih),
935 APIC_IRQ_PIN(ih), 936 APIC_IRQ_PIN(ih),
936 ih&0xff); 937 ih&0xff);
937 } 938 }
938 } else 939 } else
939 snprintf(irqstr, sizeof(irqstr), "irq %d", ih&0xff); 940 snprintf(irqstr, sizeof(irqstr), "irq %d", ih&0xff);
940#else 941#else
941 942
942 snprintf(irqstr, sizeof(irqstr), "irq %d", ih&0xff); 943 snprintf(irqstr, sizeof(irqstr), "irq %d", ih&0xff);
943#endif 944#endif
944 return (irqstr); 945 return (irqstr);
945 946
946} 947}
947 948
948/* 949/*
949 * Fake interrupt handler structures for the benefit of symmetry with 950 * Fake interrupt handler structures for the benefit of symmetry with
950 * other interrupt sources, and the benefit of intr_calculatemasks() 951 * other interrupt sources, and the benefit of intr_calculatemasks()
951 */ 952 */
952struct intrhand fake_softclock_intrhand; 953struct intrhand fake_softclock_intrhand;
953struct intrhand fake_softnet_intrhand; 954struct intrhand fake_softnet_intrhand;
954struct intrhand fake_softserial_intrhand; 955struct intrhand fake_softserial_intrhand;
955struct intrhand fake_softbio_intrhand; 956struct intrhand fake_softbio_intrhand;
956struct intrhand fake_timer_intrhand; 957struct intrhand fake_timer_intrhand;
957struct intrhand fake_ipi_intrhand; 958struct intrhand fake_ipi_intrhand;
958struct intrhand fake_preempt_intrhand; 959struct intrhand fake_preempt_intrhand;
959 960
960#if NLAPIC > 0 && defined(MULTIPROCESSOR) 961#if NLAPIC > 0 && defined(MULTIPROCESSOR)
961static const char *x86_ipi_names[X86_NIPI] = X86_IPI_NAMES; 962static const char *x86_ipi_names[X86_NIPI] = X86_IPI_NAMES;
962#endif 963#endif
963 964
964static inline bool 965static inline bool
965redzone_const_or_false(bool x) 966redzone_const_or_false(bool x)
966{ 967{
967#ifdef DIAGNOSTIC 968#ifdef DIAGNOSTIC
968 return x; 969 return x;
969#else 970#else
970 return false; 971 return false;
971#endif /* !DIAGNOSTIC */ 972#endif /* !DIAGNOSTIC */
972} 973}
973 974
974static inline int 975static inline int
975redzone_const_or_zero(int x) 976redzone_const_or_zero(int x)
976{ 977{
977 return redzone_const_or_false(true) ? x : 0; 978 return redzone_const_or_false(true) ? x : 0;
978} 979}
979 980
980/* 981/*
981 * Initialize all handlers that aren't dynamically allocated, and exist 982 * Initialize all handlers that aren't dynamically allocated, and exist
982 * for each CPU. 983 * for each CPU.
983 */ 984 */
984void 985void
985cpu_intr_init(struct cpu_info *ci) 986cpu_intr_init(struct cpu_info *ci)
986{ 987{
987 struct intrsource *isp; 988 struct intrsource *isp;
988#if NLAPIC > 0 && defined(MULTIPROCESSOR) 989#if NLAPIC > 0 && defined(MULTIPROCESSOR)
989 int i; 990 int i;
990 static int first = 1; 991 static int first = 1;
991#endif 992#endif
992#ifdef INTRSTACKSIZE 993#ifdef INTRSTACKSIZE
993 vaddr_t istack; 994 vaddr_t istack;
994#endif 995#endif
995 996
996#if NLAPIC > 0 997#if NLAPIC > 0
997 isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); 998 isp = kmem_zalloc(sizeof(*isp), KM_SLEEP);
998 KASSERT(isp != NULL); 999 KASSERT(isp != NULL);
999 isp->is_recurse = Xrecurse_lapic_ltimer; 1000 isp->is_recurse = Xrecurse_lapic_ltimer;
1000 isp->is_resume = Xresume_lapic_ltimer; 1001 isp->is_resume = Xresume_lapic_ltimer;
1001 fake_timer_intrhand.ih_level = IPL_CLOCK; 1002 fake_timer_intrhand.ih_level = IPL_CLOCK;
1002 isp->is_handlers = &fake_timer_intrhand; 1003 isp->is_handlers = &fake_timer_intrhand;
1003 isp->is_pic = &local_pic; 1004 isp->is_pic = &local_pic;
1004 ci->ci_isources[LIR_TIMER] = isp; 1005 ci->ci_isources[LIR_TIMER] = isp;
1005 evcnt_attach_dynamic(&isp->is_evcnt, 1006 evcnt_attach_dynamic(&isp->is_evcnt,
1006 first ? EVCNT_TYPE_INTR : EVCNT_TYPE_MISC, NULL, 1007 first ? EVCNT_TYPE_INTR : EVCNT_TYPE_MISC, NULL,
1007 device_xname(ci->ci_dev), "timer"); 1008 device_xname(ci->ci_dev), "timer");
1008 first = 0; 1009 first = 0;
1009 1010
1010#ifdef MULTIPROCESSOR 1011#ifdef MULTIPROCESSOR
1011 isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); 1012 isp = kmem_zalloc(sizeof(*isp), KM_SLEEP);
1012 KASSERT(isp != NULL); 1013 KASSERT(isp != NULL);
1013 isp->is_recurse = Xrecurse_lapic_ipi; 1014 isp->is_recurse = Xrecurse_lapic_ipi;
1014 isp->is_resume = Xresume_lapic_ipi; 1015 isp->is_resume = Xresume_lapic_ipi;
1015 fake_ipi_intrhand.ih_level = IPL_HIGH; 1016 fake_ipi_intrhand.ih_level = IPL_HIGH;
1016 isp->is_handlers = &fake_ipi_intrhand; 1017 isp->is_handlers = &fake_ipi_intrhand;
1017 isp->is_pic = &local_pic; 1018 isp->is_pic = &local_pic;
1018 ci->ci_isources[LIR_IPI] = isp; 1019 ci->ci_isources[LIR_IPI] = isp;
1019 1020
1020 for (i = 0; i < X86_NIPI; i++) 1021 for (i = 0; i < X86_NIPI; i++)
1021 evcnt_attach_dynamic(&ci->ci_ipi_events[i], EVCNT_TYPE_MISC, 1022 evcnt_attach_dynamic(&ci->ci_ipi_events[i], EVCNT_TYPE_MISC,
1022 NULL, device_xname(ci->ci_dev), x86_ipi_names[i]); 1023 NULL, device_xname(ci->ci_dev), x86_ipi_names[i]);
1023#endif 1024#endif
1024#endif 1025#endif
1025 1026
1026 isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); 1027 isp = kmem_zalloc(sizeof(*isp), KM_SLEEP);
1027 KASSERT(isp != NULL); 1028 KASSERT(isp != NULL);
1028 isp->is_recurse = Xpreemptrecurse; 1029 isp->is_recurse = Xpreemptrecurse;
1029 isp->is_resume = Xpreemptresume; 1030 isp->is_resume = Xpreemptresume;
1030 fake_preempt_intrhand.ih_level = IPL_PREEMPT; 1031 fake_preempt_intrhand.ih_level = IPL_PREEMPT;
1031 isp->is_handlers = &fake_preempt_intrhand; 1032 isp->is_handlers = &fake_preempt_intrhand;
1032 isp->is_pic = &softintr_pic; 1033 isp->is_pic = &softintr_pic;
1033 ci->ci_isources[SIR_PREEMPT] = isp; 1034 ci->ci_isources[SIR_PREEMPT] = isp;
1034 1035
1035 intr_calculatemasks(ci); 1036 intr_calculatemasks(ci);
1036 1037
1037#if defined(INTRSTACKSIZE) 1038#if defined(INTRSTACKSIZE)
1038 /* 1039 /*
1039 * If the red zone is activated, protect both the top and 1040 * If the red zone is activated, protect both the top and
1040 * the bottom of the stack with an unmapped page. 1041 * the bottom of the stack with an unmapped page.
1041 */ 1042 */
1042 istack = uvm_km_alloc(kernel_map, 1043 istack = uvm_km_alloc(kernel_map,
1043 INTRSTACKSIZE + redzone_const_or_zero(2 * PAGE_SIZE), 0, 1044 INTRSTACKSIZE + redzone_const_or_zero(2 * PAGE_SIZE), 0,
1044 UVM_KMF_WIRED); 1045 UVM_KMF_WIRED);
1045 if (redzone_const_or_false(true)) { 1046 if (redzone_const_or_false(true)) {
1046 pmap_kremove(istack, PAGE_SIZE); 1047 pmap_kremove(istack, PAGE_SIZE);
1047 pmap_kremove(istack + INTRSTACKSIZE + PAGE_SIZE, PAGE_SIZE); 1048 pmap_kremove(istack + INTRSTACKSIZE + PAGE_SIZE, PAGE_SIZE);
1048 pmap_update(pmap_kernel()); 1049 pmap_update(pmap_kernel());
1049 } 1050 }
1050 /* 33 used to be 1. Arbitrarily reserve 32 more register_t's 1051 /* 33 used to be 1. Arbitrarily reserve 32 more register_t's
1051 * of space for ddb(4) to examine some subroutine arguments 1052 * of space for ddb(4) to examine some subroutine arguments
1052 * and to hunt for the next stack frame. 1053 * and to hunt for the next stack frame.
1053 */ 1054 */
1054 ci->ci_intrstack = (char *)istack + redzone_const_or_zero(PAGE_SIZE) + 1055 ci->ci_intrstack = (char *)istack + redzone_const_or_zero(PAGE_SIZE) +
1055 INTRSTACKSIZE - 33 * sizeof(register_t); 1056 INTRSTACKSIZE - 33 * sizeof(register_t);
1056#if defined(__x86_64__) 1057#if defined(__x86_64__)
1057 ci->ci_tss.tss_ist[0] = (uintptr_t)ci->ci_intrstack & ~0xf; 1058 ci->ci_tss.tss_ist[0] = (uintptr_t)ci->ci_intrstack & ~0xf;
1058#endif /* defined(__x86_64__) */ 1059#endif /* defined(__x86_64__) */
1059#endif /* defined(INTRSTACKSIZE) */ 1060#endif /* defined(INTRSTACKSIZE) */
1060 ci->ci_idepth = -1; 1061 ci->ci_idepth = -1;
1061} 1062}
1062 1063
1063#if defined(INTRDEBUG) || defined(DDB) 1064#if defined(INTRDEBUG) || defined(DDB)
1064 1065
1065#ifdef DDB 1066#ifdef DDB
1066#define printf db_printf 1067#define printf db_printf
1067#endif 1068#endif
1068 1069
1069void 1070void
1070intr_printconfig(void) 1071intr_printconfig(void)
1071{ 1072{
1072 int i; 1073 int i;
1073 struct intrhand *ih; 1074 struct intrhand *ih;
1074 struct intrsource *isp; 1075 struct intrsource *isp;
1075 struct cpu_info *ci; 1076 struct cpu_info *ci;
1076 CPU_INFO_ITERATOR cii; 1077 CPU_INFO_ITERATOR cii;
1077 1078
1078 for (CPU_INFO_FOREACH(cii, ci)) { 1079 for (CPU_INFO_FOREACH(cii, ci)) {
1079 printf("%s: interrupt masks:\n", device_xname(ci->ci_dev)); 1080 printf("%s: interrupt masks:\n", device_xname(ci->ci_dev));
1080 for (i = 0; i < NIPL; i++) 1081 for (i = 0; i < NIPL; i++)
1081 printf("IPL %d mask %lx unmask %lx\n", i, 1082 printf("IPL %d mask %lx unmask %lx\n", i,
1082 (u_long)ci->ci_imask[i], (u_long)ci->ci_iunmask[i]); 1083 (u_long)ci->ci_imask[i], (u_long)ci->ci_iunmask[i]);
1083 for (i = 0; i < MAX_INTR_SOURCES; i++) { 1084 for (i = 0; i < MAX_INTR_SOURCES; i++) {
1084 isp = ci->ci_isources[i]; 1085 isp = ci->ci_isources[i];
1085 if (isp == NULL) 1086 if (isp == NULL)
1086 continue; 1087 continue;
1087 printf("%s source %d is pin %d from pic %s maxlevel %d\n", 1088 printf("%s source %d is pin %d from pic %s maxlevel %d\n",
1088 device_xname(ci->ci_dev), i, isp->is_pin, 1089 device_xname(ci->ci_dev), i, isp->is_pin,
1089 isp->is_pic->pic_name, isp->is_maxlevel); 1090 isp->is_pic->pic_name, isp->is_maxlevel);
1090 for (ih = isp->is_handlers; ih != NULL; 1091 for (ih = isp->is_handlers; ih != NULL;
1091 ih = ih->ih_next) 1092 ih = ih->ih_next)
1092 printf("\thandler %p level %d\n", 1093 printf("\thandler %p level %d\n",
1093 ih->ih_fun, ih->ih_level); 1094 ih->ih_fun, ih->ih_level);
1094 1095
1095 } 1096 }
1096 } 1097 }
1097} 1098}
1098#ifdef DDB 1099#ifdef DDB
1099#undef printf 1100#undef printf
1100#endif 1101#endif
1101#endif 1102#endif
1102 1103
1103void 1104void
1104softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep) 1105softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep)
1105{ 1106{
1106 struct intrsource *isp; 1107 struct intrsource *isp;
1107 struct cpu_info *ci; 1108 struct cpu_info *ci;
1108 u_int sir; 1109 u_int sir;
1109 1110
1110 ci = l->l_cpu; 1111 ci = l->l_cpu;
1111 1112
1112 isp = kmem_zalloc(sizeof(*isp), KM_SLEEP); 1113 isp = kmem_zalloc(sizeof(*isp), KM_SLEEP);
1113 KASSERT(isp != NULL); 1114 KASSERT(isp != NULL);
1114 isp->is_recurse = Xsoftintr; 1115 isp->is_recurse = Xsoftintr;
1115 isp->is_resume = Xsoftintr; 1116 isp->is_resume = Xsoftintr;
1116 isp->is_pic = &softintr_pic; 1117 isp->is_pic = &softintr_pic;
1117 1118
1118 switch (level) { 1119 switch (level) {
1119 case SOFTINT_BIO: 1120 case SOFTINT_BIO:
1120 sir = SIR_BIO; 1121 sir = SIR_BIO;
1121 fake_softbio_intrhand.ih_level = IPL_SOFTBIO; 1122 fake_softbio_intrhand.ih_level = IPL_SOFTBIO;
1122 isp->is_handlers = &fake_softbio_intrhand; 1123 isp->is_handlers = &fake_softbio_intrhand;
1123 break; 1124 break;
1124 case SOFTINT_NET: 1125 case SOFTINT_NET:
1125 sir = SIR_NET; 1126 sir = SIR_NET;
1126 fake_softnet_intrhand.ih_level = IPL_SOFTNET; 1127 fake_softnet_intrhand.ih_level = IPL_SOFTNET;
1127 isp->is_handlers = &fake_softnet_intrhand; 1128 isp->is_handlers = &fake_softnet_intrhand;
1128 break; 1129 break;
1129 case SOFTINT_SERIAL: 1130 case SOFTINT_SERIAL:
1130 sir = SIR_SERIAL; 1131 sir = SIR_SERIAL;
1131 fake_softserial_intrhand.ih_level = IPL_SOFTSERIAL; 1132 fake_softserial_intrhand.ih_level = IPL_SOFTSERIAL;
1132 isp->is_handlers = &fake_softserial_intrhand; 1133 isp->is_handlers = &fake_softserial_intrhand;
1133 break; 1134 break;
1134 case SOFTINT_CLOCK: 1135 case SOFTINT_CLOCK:
1135 sir = SIR_CLOCK; 1136 sir = SIR_CLOCK;
1136 fake_softclock_intrhand.ih_level = IPL_SOFTCLOCK; 1137 fake_softclock_intrhand.ih_level = IPL_SOFTCLOCK;
1137 isp->is_handlers = &fake_softclock_intrhand; 1138 isp->is_handlers = &fake_softclock_intrhand;
1138 break; 1139 break;
1139 default: 1140 default:
1140 panic("softint_init_md"); 1141 panic("softint_init_md");
1141 } 1142 }
1142 1143
1143 KASSERT(ci->ci_isources[sir] == NULL); 1144 KASSERT(ci->ci_isources[sir] == NULL);
1144 1145
1145 *machdep = (1 << sir); 1146 *machdep = (1 << sir);
1146 ci->ci_isources[sir] = isp; 1147 ci->ci_isources[sir] = isp;
1147 ci->ci_isources[sir]->is_lwp = l; 1148 ci->ci_isources[sir]->is_lwp = l;
1148 1149
1149 intr_calculatemasks(ci); 1150 intr_calculatemasks(ci);
1150} 1151}
1151 1152
1152static void 1153static void
1153intr_redistribute_xc_t(void *arg1, void *arg2) 1154intr_redistribute_xc_t(void *arg1, void *arg2)
1154{ 1155{
1155 struct cpu_info *ci; 1156 struct cpu_info *ci;
1156 struct intrsource *isp; 1157 struct intrsource *isp;
1157 int slot; 1158 int slot;
1158 u_long psl; 1159 u_long psl;
1159 1160
1160 ci = curcpu(); 1161 ci = curcpu();
1161 isp = arg1; 1162 isp = arg1;
1162 slot = (int)(intptr_t)arg2; 1163 slot = (int)(intptr_t)arg2;
1163 1164
1164 /* Disable interrupts locally. */ 1165 /* Disable interrupts locally. */
1165 psl = x86_read_psl(); 1166 psl = x86_read_psl();
1166 x86_disable_intr(); 1167 x86_disable_intr();
1167 1168
1168 /* Hook it in and re-calculate masks. */ 1169 /* Hook it in and re-calculate masks. */
1169 ci->ci_isources[slot] = isp; 1170 ci->ci_isources[slot] = isp;
1170 intr_calculatemasks(curcpu()); 1171 intr_calculatemasks(curcpu());
1171 1172
1172 /* Re-enable interrupts locally. */ 1173 /* Re-enable interrupts locally. */
1173 x86_write_psl(psl); 1174 x86_write_psl(psl);
1174} 1175}
1175 1176
1176static void 1177static void
1177intr_redistribute_xc_s1(void *arg1, void *arg2) 1178intr_redistribute_xc_s1(void *arg1, void *arg2)
1178{ 1179{
1179 struct pic *pic; 1180 struct pic *pic;
1180 struct intrsource *isp; 1181 struct intrsource *isp;
1181 struct cpu_info *nci; 1182 struct cpu_info *nci;
1182 u_long psl; 1183 u_long psl;
1183 1184
1184 isp = arg1; 1185 isp = arg1;
1185 nci = arg2; 1186 nci = arg2;
1186 1187
1187 /* 1188 /*
1188 * Disable interrupts on-chip and mask the pin. Back out 1189 * Disable interrupts on-chip and mask the pin. Back out
1189 * and let the interrupt be processed if one is pending. 1190 * and let the interrupt be processed if one is pending.
1190 */ 1191 */
1191 pic = isp->is_pic; 1192 pic = isp->is_pic;
1192 for (;;) { 1193 for (;;) {
1193 psl = x86_read_psl(); 1194 psl = x86_read_psl();
1194 x86_disable_intr(); 1195 x86_disable_intr();
1195 if ((*pic->pic_trymask)(pic, isp->is_pin)) { 1196 if ((*pic->pic_trymask)(pic, isp->is_pin)) {
1196 break; 1197 break;
1197 } 1198 }
1198 x86_write_psl(psl); 1199 x86_write_psl(psl);
1199 DELAY(1000); 1200 DELAY(1000);
1200 } 1201 }
1201 1202
1202 /* pic_addroute will unmask the interrupt. */ 1203 /* pic_addroute will unmask the interrupt. */
1203 (*pic->pic_addroute)(pic, nci, isp->is_pin, isp->is_idtvec, 1204 (*pic->pic_addroute)(pic, nci, isp->is_pin, isp->is_idtvec,
1204 isp->is_type); 1205 isp->is_type);
1205 x86_write_psl(psl); 1206 x86_write_psl(psl);
1206} 1207}
1207 1208
1208static void 1209static void
1209intr_redistribute_xc_s2(void *arg1, void *arg2) 1210intr_redistribute_xc_s2(void *arg1, void *arg2)
1210{ 1211{
1211 struct cpu_info *ci; 1212 struct cpu_info *ci;
1212 u_long psl; 1213 u_long psl;
1213 int slot; 1214 int slot;
1214 1215
1215 ci = curcpu(); 1216 ci = curcpu();
1216 slot = (int)(uintptr_t)arg1; 1217 slot = (int)(uintptr_t)arg1;
1217 1218
1218 /* Disable interrupts locally. */ 1219 /* Disable interrupts locally. */
1219 psl = x86_read_psl(); 1220 psl = x86_read_psl();
1220 x86_disable_intr(); 1221 x86_disable_intr();
1221 1222
1222 /* Patch out the source and re-calculate masks. */ 1223 /* Patch out the source and re-calculate masks. */
1223 ci->ci_isources[slot] = NULL; 1224 ci->ci_isources[slot] = NULL;
1224 intr_calculatemasks(ci); 1225 intr_calculatemasks(ci);
1225 1226
1226 /* Re-enable interrupts locally. */ 1227 /* Re-enable interrupts locally. */
1227 x86_write_psl(psl); 1228 x86_write_psl(psl);
1228} 1229}
1229 1230
1230static bool 1231static bool
1231intr_redistribute(struct cpu_info *oci) 1232intr_redistribute(struct cpu_info *oci)
1232{ 1233{
1233 struct intrsource *isp; 1234 struct intrsource *isp;
1234 struct intrhand *ih; 1235 struct intrhand *ih;
1235 CPU_INFO_ITERATOR cii; 1236 CPU_INFO_ITERATOR cii;
1236 struct cpu_info *nci, *ici; 1237 struct cpu_info *nci, *ici;
1237 int oslot, nslot; 1238 int oslot, nslot;
1238 uint64_t where; 1239 uint64_t where;
1239 1240
1240 KASSERT(mutex_owned(&cpu_lock)); 1241 KASSERT(mutex_owned(&cpu_lock));
1241 1242
1242 /* Look for an interrupt source that we can migrate. */ 1243 /* Look for an interrupt source that we can migrate. */
1243 for (oslot = 0; oslot < MAX_INTR_SOURCES; oslot++) { 1244 for (oslot = 0; oslot < MAX_INTR_SOURCES; oslot++) {
1244 if ((isp = oci->ci_isources[oslot]) == NULL) { 1245 if ((isp = oci->ci_isources[oslot]) == NULL) {
1245 continue; 1246 continue;
1246 } 1247 }
1247 if (isp->is_pic->pic_type == PIC_IOAPIC) { 1248 if (isp->is_pic->pic_type == PIC_IOAPIC) {
1248 break; 1249 break;
1249 } 1250 }
1250 } 1251 }
1251 if (oslot == MAX_INTR_SOURCES) { 1252 if (oslot == MAX_INTR_SOURCES) {
1252 return false; 1253 return false;
1253 } 1254 }
1254 1255
1255 /* Find least loaded CPU and try to move there. */ 1256 /* Find least loaded CPU and try to move there. */
1256 nci = NULL; 1257 nci = NULL;
1257 for (CPU_INFO_FOREACH(cii, ici)) { 1258 for (CPU_INFO_FOREACH(cii, ici)) {
1258 if ((ici->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) { 1259 if ((ici->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) {
1259 continue; 1260 continue;
1260 } 1261 }
1261 KASSERT(ici != oci); 1262 KASSERT(ici != oci);
1262 if (nci == NULL || nci->ci_nintrhand > ici->ci_nintrhand) { 1263 if (nci == NULL || nci->ci_nintrhand > ici->ci_nintrhand) {
1263 nci = ici; 1264 nci = ici;
1264 } 1265 }
1265 } 1266 }
1266 if (nci == NULL) { 1267 if (nci == NULL) {
1267 return false; 1268 return false;
1268 } 1269 }
1269 for (nslot = 0; nslot < MAX_INTR_SOURCES; nslot++) { 1270 for (nslot = 0; nslot < MAX_INTR_SOURCES; nslot++) {
1270 if (nci->ci_isources[nslot] == NULL) { 1271 if (nci->ci_isources[nslot] == NULL) {
1271 break; 1272 break;
1272 } 1273 }
1273 } 1274 }
1274 1275
1275 /* If that did not work, allocate anywhere. */ 1276 /* If that did not work, allocate anywhere. */
1276 if (nslot == MAX_INTR_SOURCES) { 1277 if (nslot == MAX_INTR_SOURCES) {
1277 for (CPU_INFO_FOREACH(cii, nci)) { 1278 for (CPU_INFO_FOREACH(cii, nci)) {
1278 if ((nci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) { 1279 if ((nci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) {
1279 continue; 1280 continue;
1280 } 1281 }
1281 KASSERT(nci != oci); 1282 KASSERT(nci != oci);
1282 for (nslot = 0; nslot < MAX_INTR_SOURCES; nslot++) { 1283 for (nslot = 0; nslot < MAX_INTR_SOURCES; nslot++) {
1283 if (nci->ci_isources[nslot] == NULL) { 1284 if (nci->ci_isources[nslot] == NULL) {
1284 break; 1285 break;
1285 } 1286 }
1286 } 1287 }
1287 if (nslot != MAX_INTR_SOURCES) { 1288 if (nslot != MAX_INTR_SOURCES) {
1288 break; 1289 break;
1289 } 1290 }
1290 } 1291 }
1291 } 1292 }
1292 if (nslot == MAX_INTR_SOURCES) { 1293 if (nslot == MAX_INTR_SOURCES) {
1293 return false; 1294 return false;
1294 } 1295 }
1295 1296
1296 /* 1297 /*
1297 * Now we have new CPU and new slot. Run a cross-call to set up 1298 * Now we have new CPU and new slot. Run a cross-call to set up
1298 * the new vector on the target CPU. 1299 * the new vector on the target CPU.
1299 */ 1300 */
1300 where = xc_unicast(0, intr_redistribute_xc_t, isp, 1301 where = xc_unicast(0, intr_redistribute_xc_t, isp,
1301 (void *)(intptr_t)nslot, nci); 1302 (void *)(intptr_t)nslot, nci);
1302 xc_wait(where); 1303 xc_wait(where);
1303  1304
1304 /* 1305 /*
1305 * We're ready to go on the target CPU. Run a cross call to 1306 * We're ready to go on the target CPU. Run a cross call to
1306 * reroute the interrupt away from the source CPU. 1307 * reroute the interrupt away from the source CPU.
1307 */ 1308 */
1308 where = xc_unicast(0, intr_redistribute_xc_s1, isp, nci, oci); 1309 where = xc_unicast(0, intr_redistribute_xc_s1, isp, nci, oci);
1309 xc_wait(where); 1310 xc_wait(where);
1310 1311
1311 /* Sleep for (at least) 10ms to allow the change to take hold. */ 1312 /* Sleep for (at least) 10ms to allow the change to take hold. */
1312 (void)kpause("intrdist", false, mstohz(10), NULL); 1313 (void)kpause("intrdist", false, mstohz(10), NULL);
1313 1314
1314 /* Complete removal from the source CPU. */ 1315 /* Complete removal from the source CPU. */
1315 where = xc_unicast(0, intr_redistribute_xc_s2, 1316 where = xc_unicast(0, intr_redistribute_xc_s2,
1316 (void *)(uintptr_t)oslot, NULL, oci); 1317 (void *)(uintptr_t)oslot, NULL, oci);
1317 xc_wait(where); 1318 xc_wait(where);
1318 1319
1319 /* Finally, take care of book-keeping. */ 1320 /* Finally, take care of book-keeping. */
1320 for (ih = isp->is_handlers; ih != NULL; ih = ih->ih_next) { 1321 for (ih = isp->is_handlers; ih != NULL; ih = ih->ih_next) {
1321 oci->ci_nintrhand--; 1322 oci->ci_nintrhand--;
1322 nci->ci_nintrhand++; 1323 nci->ci_nintrhand++;
1323 ih->ih_cpu = nci; 1324 ih->ih_cpu = nci;
1324 } 1325 }
1325 1326
1326 return true; 1327 return true;
1327} 1328}
1328 1329
1329void 1330void
1330cpu_intr_redistribute(void) 1331cpu_intr_redistribute(void)
1331{ 1332{
1332 CPU_INFO_ITERATOR cii; 1333 CPU_INFO_ITERATOR cii;
1333 struct cpu_info *ci; 1334 struct cpu_info *ci;
1334 1335
1335 KASSERT(mutex_owned(&cpu_lock)); 1336 KASSERT(mutex_owned(&cpu_lock));
1336 KASSERT(mp_online); 1337 KASSERT(mp_online);
1337 1338
1338 /* Direct interrupts away from shielded CPUs. */ 1339 /* Direct interrupts away from shielded CPUs. */
1339 for (CPU_INFO_FOREACH(cii, ci)) { 1340 for (CPU_INFO_FOREACH(cii, ci)) {
1340 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0) { 1341 if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) == 0) {
1341 continue; 1342 continue;
1342 } 1343 }
1343 while (intr_redistribute(ci)) { 1344 while (intr_redistribute(ci)) {
1344 /* nothing */ 1345 /* nothing */
1345 } 1346 }
1346 } 1347 }
1347 1348
1348 /* XXX should now re-balance */ 1349 /* XXX should now re-balance */
1349} 1350}
1350 1351
1351u_int 1352u_int
1352cpu_intr_count(struct cpu_info *ci) 1353cpu_intr_count(struct cpu_info *ci)
1353{ 1354{
1354 1355
1355 KASSERT(ci->ci_nintrhand >= 0); 1356 KASSERT(ci->ci_nintrhand >= 0);
1356 1357
1357 return ci->ci_nintrhand; 1358 return ci->ci_nintrhand;
1358} 1359}