| @@ -1,646 +1,648 @@ | | | @@ -1,646 +1,648 @@ |
1 | /* $NetBSD: intr.c,v 1.20 2008/01/02 11:48:27 ad Exp $ */ | | 1 | /* $NetBSD: intr.c,v 1.21 2009/01/04 15:34:18 tsutsui Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright 2002 Wasabi Systems, Inc. | | 4 | * Copyright 2002 Wasabi Systems, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc. | | 7 | * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc. |
8 | * | | 8 | * |
9 | * Redistribution and use in source and binary forms, with or without | | 9 | * Redistribution and use in source and binary forms, with or without |
10 | * modification, are permitted provided that the following conditions | | 10 | * modification, are permitted provided that the following conditions |
11 | * are met: | | 11 | * are met: |
12 | * 1. Redistributions of source code must retain the above copyright | | 12 | * 1. Redistributions of source code must retain the above copyright |
13 | * notice, this list of conditions and the following disclaimer. | | 13 | * notice, this list of conditions and the following disclaimer. |
14 | * 2. Redistributions in binary form must reproduce the above copyright | | 14 | * 2. Redistributions in binary form must reproduce the above copyright |
15 | * notice, this list of conditions and the following disclaimer in the | | 15 | * notice, this list of conditions and the following disclaimer in the |
16 | * documentation and/or other materials provided with the distribution. | | 16 | * documentation and/or other materials provided with the distribution. |
17 | * 3. All advertising materials mentioning features or use of this software | | 17 | * 3. All advertising materials mentioning features or use of this software |
18 | * must display the following acknowledgement: | | 18 | * must display the following acknowledgement: |
19 | * This product includes software developed for the NetBSD Project by | | 19 | * This product includes software developed for the NetBSD Project by |
20 | * Wasabi Systems, Inc. | | 20 | * Wasabi Systems, Inc. |
21 | * 4. The name of Wasabi Systems, Inc. may not be used to endorse | | 21 | * 4. The name of Wasabi Systems, Inc. may not be used to endorse |
22 | * or promote products derived from this software without specific prior | | 22 | * or promote products derived from this software without specific prior |
23 | * written permission. | | 23 | * written permission. |
24 | * | | 24 | * |
25 | * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND | | 25 | * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND |
26 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 26 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
27 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 27 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
28 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC | | 28 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC |
29 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 29 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
31 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 31 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
32 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 32 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
33 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 33 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
34 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 34 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
35 | * POSSIBILITY OF SUCH DAMAGE. | | 35 | * POSSIBILITY OF SUCH DAMAGE. |
36 | */ | | 36 | */ |
37 | | | 37 | |
38 | #include <sys/cdefs.h> | | 38 | #include <sys/cdefs.h> |
39 | __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.20 2008/01/02 11:48:27 ad Exp $"); | | 39 | __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.21 2009/01/04 15:34:18 tsutsui Exp $"); |
40 | | | 40 | |
41 | #include <sys/param.h> | | 41 | #include <sys/param.h> |
42 | #include <sys/malloc.h> | | 42 | #include <sys/malloc.h> |
43 | #include <sys/kernel.h> | | 43 | #include <sys/kernel.h> |
44 | #include <sys/evcnt.h> | | 44 | #include <sys/evcnt.h> |
45 | | | 45 | |
46 | #include <uvm/uvm_extern.h> | | 46 | #include <uvm/uvm_extern.h> |
47 | | | 47 | |
48 | #include <machine/intr.h> | | 48 | #include <machine/intr.h> |
49 | #include <machine/psl.h> | | 49 | #include <machine/psl.h> |
50 | | | 50 | |
51 | #include <powerpc/cpu.h> | | 51 | #include <powerpc/cpu.h> |
52 | #include <powerpc/spr.h> | | 52 | #include <powerpc/spr.h> |
53 | | | 53 | |
54 | | | 54 | |
55 | /* | | 55 | /* |
56 | * Number of interrupts (hard + soft), irq number legality test, | | 56 | * Number of interrupts (hard + soft), irq number legality test, |
57 | * mapping of irq number to mask and a way to pick irq number | | 57 | * mapping of irq number to mask and a way to pick irq number |
58 | * off a mask of active intrs. | | 58 | * off a mask of active intrs. |
59 | */ | | 59 | */ |
60 | #define ICU_LEN 32 | | 60 | #define ICU_LEN 32 |
61 | #define LEGAL_IRQ(x) ((x) >= 0 && (x) < ICU_LEN) | | 61 | #define LEGAL_IRQ(x) ((x) >= 0 && (x) < ICU_LEN) |
62 | | | 62 | |
63 | #define IRQ_TO_MASK(irq) (0x80000000UL >> (irq)) | | 63 | #define IRQ_TO_MASK(irq) (0x80000000UL >> (irq)) |
64 | #define IRQ_OF_MASK(mask) cntlzw(mask) | | 64 | #define IRQ_OF_MASK(mask) cntlzw(mask) |
65 | | | 65 | |
66 | /* | | 66 | /* |
67 | * Assign these to unused (reserved) interrupt bits. | | 67 | * Assign these to unused (reserved) interrupt bits. |
68 | * | | 68 | * |
69 | * For 405GP (and 403CGX?) interrupt bits 0-18 and 25-31 are used | | 69 | * For 405GP (and 403CGX?) interrupt bits 0-18 and 25-31 are used |
70 | * by hardware. This leaves us bits 19-24 for software. | | 70 | * by hardware. This leaves us bits 19-24 for software. |
71 | */ | | 71 | */ |
72 | #define IRQ_SOFTNET 19 | | 72 | #define IRQ_SOFTNET 19 |
73 | #define IRQ_SOFTCLOCK 20 | | 73 | #define IRQ_SOFTCLOCK 20 |
74 | #define IRQ_SOFTSERIAL 21 | | 74 | #define IRQ_SOFTSERIAL 21 |
75 | #define IRQ_CLOCK 22 | | 75 | #define IRQ_CLOCK 22 |
76 | #define IRQ_STATCLOCK 23 | | 76 | #define IRQ_STATCLOCK 23 |
77 | | | 77 | |
78 | /* | | 78 | /* |
79 | * Platform specific code may override any of the above. | | 79 | * Platform specific code may override any of the above. |
80 | */ | | 80 | */ |
81 | #ifdef PPC_IBM403 | | 81 | #ifdef PPC_IBM403 |
82 | | | 82 | |
83 | #include <powerpc/ibm4xx/dcr403cgx.h> | | 83 | #include <powerpc/ibm4xx/dcr403cgx.h> |
84 | #define INTR_STATUS DCR_EXISR | | 84 | #define INTR_STATUS DCR_EXISR |
85 | #define INTR_ACK DCR_EXISR | | 85 | #define INTR_ACK DCR_EXISR |
86 | #define INTR_ENABLE DCR_EXIER | | 86 | #define INTR_ENABLE DCR_EXIER |
87 | | | 87 | |
88 | #elif defined(__virtex__) | | 88 | #elif defined(__virtex__) |
89 | | | 89 | |
90 | #include <evbppc/virtex/dev/xintcreg.h> | | 90 | #include <evbppc/virtex/dev/xintcreg.h> |
91 | #define INTR_STATUS XINTC_ISR | | 91 | #define INTR_STATUS XINTC_ISR |
92 | #define INTR_ACK XINTC_IAR | | 92 | #define INTR_ACK XINTC_IAR |
93 | #define INTR_ENABLE XINTC_IER | | 93 | #define INTR_ENABLE XINTC_IER |
94 | #define INTR_MASTER XINTC_MER | | 94 | #define INTR_MASTER XINTC_MER |
95 | #define INTR_MASTER_EN (MER_HIE|MER_ME) | | 95 | #define INTR_MASTER_EN (MER_HIE|MER_ME) |
96 | #undef IRQ_TO_MASK | | 96 | #undef IRQ_TO_MASK |
97 | #undef IRQ_OF_MASK | | 97 | #undef IRQ_OF_MASK |
98 | #undef IRQ_SOFTNET | | 98 | #undef IRQ_SOFTNET |
99 | #undef IRQ_SOFTCLOCK | | 99 | #undef IRQ_SOFTCLOCK |
100 | #undef IRQ_SOFTSERIAL | | 100 | #undef IRQ_SOFTSERIAL |
101 | #undef IRQ_CLOCK | | 101 | #undef IRQ_CLOCK |
102 | #undef IRQ_STATCLOCK | | 102 | #undef IRQ_STATCLOCK |
103 | #define IRQ_TO_MASK(i) (1 << (i)) /* Redefine mappings */ | | 103 | #define IRQ_TO_MASK(i) (1 << (i)) /* Redefine mappings */ |
104 | #define IRQ_OF_MASK(m) (31 - cntlzw(m)) | | 104 | #define IRQ_OF_MASK(m) (31 - cntlzw(m)) |
105 | #define IRQ_SOFTNET 31 /* Redefine "unused" pins */ | | 105 | #define IRQ_SOFTNET 31 /* Redefine "unused" pins */ |
106 | #define IRQ_SOFTCLOCK 30 | | 106 | #define IRQ_SOFTCLOCK 30 |
107 | #define IRQ_SOFTSERIAL 29 | | 107 | #define IRQ_SOFTSERIAL 29 |
108 | #define IRQ_CLOCK 28 | | 108 | #define IRQ_CLOCK 28 |
109 | #define IRQ_STATCLOCK 27 | | 109 | #define IRQ_STATCLOCK 27 |
110 | | | 110 | |
111 | #else /* Generic 405 Universal Interrupt Controller */ | | 111 | #else /* Generic 405 Universal Interrupt Controller */ |
112 | | | 112 | |
113 | #include <powerpc/ibm4xx/dcr405gp.h> | | 113 | #include <powerpc/ibm4xx/dcr405gp.h> |
114 | #define INTR_STATUS DCR_UIC0_MSR | | 114 | #define INTR_STATUS DCR_UIC0_MSR |
115 | #define INTR_ACK DCR_UIC0_SR | | 115 | #define INTR_ACK DCR_UIC0_SR |
116 | #define INTR_ENABLE DCR_UIC0_ER | | 116 | #define INTR_ENABLE DCR_UIC0_ER |
117 | | | 117 | |
118 | #endif | | 118 | #endif |
119 | | | 119 | |
120 | #define MASK_SOFTNET IRQ_TO_MASK(IRQ_SOFTNET) | | 120 | #define MASK_SOFTNET IRQ_TO_MASK(IRQ_SOFTNET) |
121 | #define MASK_SOFTCLOCK IRQ_TO_MASK(IRQ_SOFTCLOCK) | | 121 | #define MASK_SOFTCLOCK IRQ_TO_MASK(IRQ_SOFTCLOCK) |
122 | #define MASK_SOFTSERIAL IRQ_TO_MASK(IRQ_SOFTSERIAL) | | 122 | #define MASK_SOFTSERIAL IRQ_TO_MASK(IRQ_SOFTSERIAL) |
123 | #define MASK_STATCLOCK IRQ_TO_MASK(IRQ_STATCLOCK) | | 123 | #define MASK_STATCLOCK IRQ_TO_MASK(IRQ_STATCLOCK) |
124 | #define MASK_CLOCK (IRQ_TO_MASK(IRQ_CLOCK) | IRQ_TO_MASK(IRQ_STATCLOCK)) | | 124 | #define MASK_CLOCK (IRQ_TO_MASK(IRQ_CLOCK) | IRQ_TO_MASK(IRQ_STATCLOCK)) |
125 | #define MASK_SOFTINTR (MASK_SOFTCLOCK|MASK_SOFTNET|MASK_SOFTSERIAL) | | 125 | #define MASK_SOFTINTR (MASK_SOFTCLOCK|MASK_SOFTNET|MASK_SOFTSERIAL) |
126 | #define MASK_HARDINTR ~(MASK_SOFTINTR|MASK_CLOCK) | | 126 | #define MASK_HARDINTR ~(MASK_SOFTINTR|MASK_CLOCK) |
127 | | | 127 | |
128 | static inline void disable_irq(int); | | 128 | static inline void disable_irq(int); |
129 | static inline void enable_irq(int); | | 129 | static inline void enable_irq(int); |
130 | static void intr_calculatemasks(void); | | 130 | static void intr_calculatemasks(void); |
131 | static void do_pending_int(void); | | 131 | static void do_pending_int(void); |
132 | static const char *intr_typename(int); | | 132 | static const char *intr_typename(int); |
133 | | | 133 | |
134 | | | 134 | |
135 | /* | | 135 | /* |
136 | * Interrupt handler chains. intr_establish() inserts a handler into | | 136 | * Interrupt handler chains. intr_establish() inserts a handler into |
137 | * the list. The handler is called with its (single) argument. | | 137 | * the list. The handler is called with its (single) argument. |
138 | */ | | 138 | */ |
139 | struct intrhand { | | 139 | struct intrhand { |
140 | int (*ih_fun)(void *); | | 140 | int (*ih_fun)(void *); |
141 | void *ih_arg; | | 141 | void *ih_arg; |
142 | struct intrhand *ih_next; | | 142 | struct intrhand *ih_next; |
143 | int ih_level; | | 143 | int ih_level; |
144 | }; | | 144 | }; |
145 | | | 145 | |
146 | struct intrsrc { | | 146 | struct intrsrc { |
147 | struct evcnt is_evcnt; | | 147 | struct evcnt is_evcnt; |
148 | struct intrhand *is_head; | | 148 | struct intrhand *is_head; |
149 | u_int is_mask; | | 149 | u_int is_mask; |
150 | int is_level; /* spls bitmask */ | | 150 | int is_level; /* spls bitmask */ |
151 | int is_type; /* sensitivity */ | | 151 | int is_type; /* sensitivity */ |
152 | }; | | 152 | }; |
153 | | | 153 | |
154 | | | 154 | |
155 | volatile u_int imask[NIPL]; | | 155 | volatile u_int imask[NIPL]; |
156 | const int mask_clock = MASK_CLOCK; | | 156 | const int mask_clock = MASK_CLOCK; |
157 | const int mask_statclock = MASK_STATCLOCK; | | 157 | const int mask_statclock = MASK_STATCLOCK; |
158 | | | 158 | |
159 | static struct intrsrc intrs[ICU_LEN] = { | | 159 | static struct intrsrc intrs[ICU_LEN] = { |
160 | #define DEFINTR(name) \ | | 160 | #define DEFINTR(name) \ |
161 | { EVCNT_INITIALIZER(EVCNT_TYPE_INTR, NULL, "uic", name), NULL, 0, 0 } | | 161 | { EVCNT_INITIALIZER(EVCNT_TYPE_INTR, NULL, "uic", name), NULL, 0, 0 } |
162 | | | 162 | |
163 | DEFINTR("pin0"), DEFINTR("pin1"), DEFINTR("pin2"), | | 163 | DEFINTR("pin0"), DEFINTR("pin1"), DEFINTR("pin2"), |
164 | DEFINTR("pin3"), DEFINTR("pin4"), DEFINTR("pin5"), | | 164 | DEFINTR("pin3"), DEFINTR("pin4"), DEFINTR("pin5"), |
165 | DEFINTR("pin6"), DEFINTR("pin7"), DEFINTR("pin8"), | | 165 | DEFINTR("pin6"), DEFINTR("pin7"), DEFINTR("pin8"), |
166 | DEFINTR("pin9"), DEFINTR("pin10"), DEFINTR("pin11"), | | 166 | DEFINTR("pin9"), DEFINTR("pin10"), DEFINTR("pin11"), |
167 | DEFINTR("pin12"), DEFINTR("pin13"), DEFINTR("pin14"), | | 167 | DEFINTR("pin12"), DEFINTR("pin13"), DEFINTR("pin14"), |
168 | DEFINTR("pin15"), DEFINTR("pin16"), DEFINTR("pin17"), | | 168 | DEFINTR("pin15"), DEFINTR("pin16"), DEFINTR("pin17"), |
169 | DEFINTR("pin18"), | | 169 | DEFINTR("pin18"), |
170 | | | 170 | |
171 | /* Reserved intrs, accounted in cpu_info */ | | 171 | /* Reserved intrs, accounted in cpu_info */ |
172 | DEFINTR(NULL), /* unused "pin19", softnet */ | | 172 | DEFINTR(NULL), /* unused "pin19", softnet */ |
173 | DEFINTR(NULL), /* unused "pin20", softclock */ | | 173 | DEFINTR(NULL), /* unused "pin20", softclock */ |
174 | DEFINTR(NULL), /* unused "pin21", softserial */ | | 174 | DEFINTR(NULL), /* unused "pin21", softserial */ |
175 | DEFINTR(NULL), /* unused "pin22", PIT hardclock */ | | 175 | DEFINTR(NULL), /* unused "pin22", PIT hardclock */ |
176 | DEFINTR(NULL), /* unused "pin23", FIT statclock */ | | 176 | DEFINTR(NULL), /* unused "pin23", FIT statclock */ |
177 | | | 177 | |
178 | DEFINTR("pin24"), DEFINTR("pin25"), DEFINTR("pin26"), | | 178 | DEFINTR("pin24"), DEFINTR("pin25"), DEFINTR("pin26"), |
179 | DEFINTR("pin27"), DEFINTR("pin28"), DEFINTR("pin29"), | | 179 | DEFINTR("pin27"), DEFINTR("pin28"), DEFINTR("pin29"), |
180 | DEFINTR("pin30"), DEFINTR("pin31") | | 180 | DEFINTR("pin30"), DEFINTR("pin31") |
181 | | | 181 | |
182 | #undef DEFINTR | | 182 | #undef DEFINTR |
183 | }; | | 183 | }; |
184 | | | 184 | |
185 | | | 185 | |
186 | /* Write External Enable Immediate */ | | 186 | /* Write External Enable Immediate */ |
187 | #define wrteei(en) __asm volatile ("wrteei %0" : : "K"(en)) | | 187 | #define wrteei(en) __asm volatile ("wrteei %0" : : "K"(en)) |
188 | | | 188 | |
189 | /* Enforce In Order Execution Of I/O */ | | 189 | /* Enforce In Order Execution Of I/O */ |
190 | #define eieio() __asm volatile ("eieio") | | 190 | #define eieio() __asm volatile ("eieio") |
191 | | | 191 | |
192 | /* | | 192 | /* |
193 | * Set up interrupt mapping array. | | 193 | * Set up interrupt mapping array. |
194 | */ | | 194 | */ |
195 | void | | 195 | void |
196 | intr_init(void) | | 196 | intr_init(void) |
197 | { | | 197 | { |
198 | int i; | | 198 | int i; |
199 | | | 199 | |
200 | for (i = 0; i < ICU_LEN; i++) | | 200 | for (i = 0; i < ICU_LEN; i++) |
201 | switch (i) { | | 201 | switch (i) { |
202 | case IRQ_SOFTNET: | | 202 | case IRQ_SOFTNET: |
203 | case IRQ_SOFTCLOCK: | | 203 | case IRQ_SOFTCLOCK: |
204 | case IRQ_SOFTSERIAL: | | 204 | case IRQ_SOFTSERIAL: |
205 | case IRQ_CLOCK: | | 205 | case IRQ_CLOCK: |
206 | case IRQ_STATCLOCK: | | 206 | case IRQ_STATCLOCK: |
207 | continue; | | 207 | continue; |
208 | default: | | 208 | default: |
209 | evcnt_attach_static(&intrs[i].is_evcnt); | | 209 | evcnt_attach_static(&intrs[i].is_evcnt); |
210 | } | | 210 | } |
211 | | | 211 | |
212 | /* Initialized in powerpc/ibm4xx/cpu.c */ | | 212 | /* Initialized in powerpc/ibm4xx/cpu.c */ |
213 | evcnt_attach_static(&curcpu()->ci_ev_softclock); | | 213 | evcnt_attach_static(&curcpu()->ci_ev_softclock); |
214 | evcnt_attach_static(&curcpu()->ci_ev_softnet); | | 214 | evcnt_attach_static(&curcpu()->ci_ev_softnet); |
215 | evcnt_attach_static(&curcpu()->ci_ev_softserial); | | 215 | evcnt_attach_static(&curcpu()->ci_ev_softserial); |
216 | | | 216 | |
217 | mtdcr(INTR_ENABLE, 0x00000000); /* mask all */ | | 217 | mtdcr(INTR_ENABLE, 0x00000000); /* mask all */ |
218 | mtdcr(INTR_ACK, 0xffffffff); /* acknowledge all */ | | 218 | mtdcr(INTR_ACK, 0xffffffff); /* acknowledge all */ |
219 | #ifdef INTR_MASTER | | 219 | #ifdef INTR_MASTER |
220 | mtdcr(INTR_MASTER, INTR_MASTER_EN); /* enable controller */ | | 220 | mtdcr(INTR_MASTER, INTR_MASTER_EN); /* enable controller */ |
221 | #endif | | 221 | #endif |
222 | } | | 222 | } |
223 | | | 223 | |
224 | /* | | 224 | /* |
225 | * external interrupt handler | | 225 | * external interrupt handler |
226 | */ | | 226 | */ |
227 | void | | 227 | void |
228 | ext_intr(void) | | 228 | ext_intr(void) |
229 | { | | 229 | { |
230 | struct cpu_info *ci = curcpu(); | | 230 | struct cpu_info *ci = curcpu(); |
231 | struct intrhand *ih; | | 231 | struct intrhand *ih; |
232 | int i, bits_to_clear; | | 232 | int i, bits_to_clear; |
233 | int r_imen, msr; | | 233 | int r_imen, msr; |
234 | int pcpl; | | 234 | int pcpl; |
235 | u_long int_state; | | 235 | u_long int_state; |
236 | | | 236 | |
237 | pcpl = ci->ci_cpl; | | 237 | pcpl = ci->ci_cpl; |
238 | msr = mfmsr(); | | 238 | msr = mfmsr(); |
239 | | | 239 | |
240 | int_state = mfdcr(INTR_STATUS); /* Read non-masked interrupt status */ | | 240 | int_state = mfdcr(INTR_STATUS); /* Read non-masked interrupt status */ |
241 | bits_to_clear = int_state; | | 241 | bits_to_clear = int_state; |
242 | | | 242 | |
243 | while (int_state) { | | 243 | while (int_state) { |
244 | i = IRQ_OF_MASK(int_state); | | 244 | i = IRQ_OF_MASK(int_state); |
245 | | | 245 | |
246 | r_imen = IRQ_TO_MASK(i); | | 246 | r_imen = IRQ_TO_MASK(i); |
247 | int_state &= ~r_imen; | | 247 | int_state &= ~r_imen; |
248 | | | 248 | |
249 | if ((pcpl & r_imen) != 0) { | | 249 | if ((pcpl & r_imen) != 0) { |
250 | /* Masked! Mark as pending */ | | 250 | /* Masked! Mark as pending */ |
251 | ci->ci_ipending |= r_imen; | | 251 | ci->ci_ipending |= r_imen; |
252 | disable_irq(i); | | 252 | disable_irq(i); |
253 | } else { | | 253 | } else { |
| | | 254 | ci->ci_idepth++; |
254 | splraise(intrs[i].is_mask); | | 255 | splraise(intrs[i].is_mask); |
255 | if (intrs[i].is_type == IST_LEVEL) | | 256 | if (intrs[i].is_type == IST_LEVEL) |
256 | disable_irq(i); | | 257 | disable_irq(i); |
257 | wrteei(1); | | 258 | wrteei(1); |
258 | | | 259 | |
259 | ih = intrs[i].is_head; | | 260 | ih = intrs[i].is_head; |
260 | while (ih) { | | 261 | while (ih) { |
261 | if (ih->ih_level == IPL_VM) | | 262 | if (ih->ih_level == IPL_VM) |
262 | KERNEL_LOCK(1, NULL); | | 263 | KERNEL_LOCK(1, NULL); |
263 | (*ih->ih_fun)(ih->ih_arg); | | 264 | (*ih->ih_fun)(ih->ih_arg); |
264 | ih = ih->ih_next; | | 265 | ih = ih->ih_next; |
265 | if (ih->ih_level == IPL_VM) | | 266 | if (ih->ih_level == IPL_VM) |
266 | KERNEL_UNLOCK_ONE(NULL); | | 267 | KERNEL_UNLOCK_ONE(NULL); |
267 | } | | 268 | } |
268 | | | 269 | |
269 | mtmsr(msr); | | 270 | mtmsr(msr); |
270 | if (intrs[i].is_type == IST_LEVEL) | | 271 | if (intrs[i].is_type == IST_LEVEL) |
271 | enable_irq(i); | | 272 | enable_irq(i); |
272 | ci->ci_cpl = pcpl; | | 273 | ci->ci_cpl = pcpl; |
273 | uvmexp.intrs++; | | 274 | uvmexp.intrs++; |
274 | intrs[i].is_evcnt.ev_count++; | | 275 | intrs[i].is_evcnt.ev_count++; |
| | | 276 | ci->ci_idepth--; |
275 | } | | 277 | } |
276 | } | | 278 | } |
277 | mtdcr(INTR_ACK, bits_to_clear); /* Acknowledge all pending interrupts */ | | 279 | mtdcr(INTR_ACK, bits_to_clear); /* Acknowledge all pending interrupts */ |
278 | | | 280 | |
279 | wrteei(1); | | 281 | wrteei(1); |
280 | splx(pcpl); | | 282 | splx(pcpl); |
281 | mtmsr(msr); | | 283 | mtmsr(msr); |
282 | } | | 284 | } |
283 | | | 285 | |
284 | static inline void | | 286 | static inline void |
285 | disable_irq(int irq) | | 287 | disable_irq(int irq) |
286 | { | | 288 | { |
287 | int mask, omask; | | 289 | int mask, omask; |
288 | | | 290 | |
289 | mask = omask = mfdcr(INTR_ENABLE); | | 291 | mask = omask = mfdcr(INTR_ENABLE); |
290 | mask &= ~IRQ_TO_MASK(irq); | | 292 | mask &= ~IRQ_TO_MASK(irq); |
291 | if (mask == omask) | | 293 | if (mask == omask) |
292 | return; | | 294 | return; |
293 | mtdcr(INTR_ENABLE, mask); | | 295 | mtdcr(INTR_ENABLE, mask); |
294 | #ifdef IRQ_DEBUG | | 296 | #ifdef IRQ_DEBUG |
295 | printf("irq_disable: irq=%d, mask=%08x\n",irq,mask); | | 297 | printf("irq_disable: irq=%d, mask=%08x\n",irq,mask); |
296 | #endif | | 298 | #endif |
297 | } | | 299 | } |
298 | | | 300 | |
299 | static inline void | | 301 | static inline void |
300 | enable_irq(int irq) | | 302 | enable_irq(int irq) |
301 | { | | 303 | { |
302 | int mask, omask; | | 304 | int mask, omask; |
303 | | | 305 | |
304 | mask = omask = mfdcr(INTR_ENABLE); | | 306 | mask = omask = mfdcr(INTR_ENABLE); |
305 | mask |= IRQ_TO_MASK(irq); | | 307 | mask |= IRQ_TO_MASK(irq); |
306 | if (mask == omask) | | 308 | if (mask == omask) |
307 | return; | | 309 | return; |
308 | mtdcr(INTR_ENABLE, mask); | | 310 | mtdcr(INTR_ENABLE, mask); |
309 | #ifdef IRQ_DEBUG | | 311 | #ifdef IRQ_DEBUG |
310 | printf("enable_irq: irq=%d, mask=%08x\n",irq,mask); | | 312 | printf("enable_irq: irq=%d, mask=%08x\n",irq,mask); |
311 | #endif | | 313 | #endif |
312 | } | | 314 | } |
313 | | | 315 | |
314 | static const char * | | 316 | static const char * |
315 | intr_typename(int type) | | 317 | intr_typename(int type) |
316 | { | | 318 | { |
317 | | | 319 | |
318 | switch (type) { | | 320 | switch (type) { |
319 | case IST_NONE : | | 321 | case IST_NONE : |
320 | return ("none"); | | 322 | return ("none"); |
321 | case IST_PULSE: | | 323 | case IST_PULSE: |
322 | return ("pulsed"); | | 324 | return ("pulsed"); |
323 | case IST_EDGE: | | 325 | case IST_EDGE: |
324 | return ("edge-triggered"); | | 326 | return ("edge-triggered"); |
325 | case IST_LEVEL: | | 327 | case IST_LEVEL: |
326 | return ("level-triggered"); | | 328 | return ("level-triggered"); |
327 | default: | | 329 | default: |
328 | panic("intr_typename: invalid type %d", type); | | 330 | panic("intr_typename: invalid type %d", type); |
329 | } | | 331 | } |
330 | } | | 332 | } |
331 | | | 333 | |
332 | /* | | 334 | /* |
333 | * Register an interrupt handler. | | 335 | * Register an interrupt handler. |
334 | */ | | 336 | */ |
335 | void * | | 337 | void * |
336 | intr_establish(int irq, int type, int level, int (*ih_fun)(void *), | | 338 | intr_establish(int irq, int type, int level, int (*ih_fun)(void *), |
337 | void *ih_arg) | | 339 | void *ih_arg) |
338 | { | | 340 | { |
339 | struct intrhand *ih; | | 341 | struct intrhand *ih; |
340 | int msr; | | 342 | int msr; |
341 | | | 343 | |
342 | if (! LEGAL_IRQ(irq)) | | 344 | if (! LEGAL_IRQ(irq)) |
343 | panic("intr_establish: bogus irq %d", irq); | | 345 | panic("intr_establish: bogus irq %d", irq); |
344 | | | 346 | |
345 | if (type == IST_NONE) | | 347 | if (type == IST_NONE) |
346 | panic("intr_establish: bogus type %d for irq %d", type, irq); | | 348 | panic("intr_establish: bogus type %d for irq %d", type, irq); |
347 | | | 349 | |
348 | /* No point in sleeping unless someone can free memory. */ | | 350 | /* No point in sleeping unless someone can free memory. */ |
349 | ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK); | | 351 | ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK); |
350 | if (ih == NULL) | | 352 | if (ih == NULL) |
351 | panic("intr_establish: can't malloc handler info"); | | 353 | panic("intr_establish: can't malloc handler info"); |
352 | | | 354 | |
353 | switch (intrs[irq].is_type) { | | 355 | switch (intrs[irq].is_type) { |
354 | case IST_NONE: | | 356 | case IST_NONE: |
355 | intrs[irq].is_type = type; | | 357 | intrs[irq].is_type = type; |
356 | break; | | 358 | break; |
357 | | | 359 | |
358 | case IST_EDGE: | | 360 | case IST_EDGE: |
359 | case IST_LEVEL: | | 361 | case IST_LEVEL: |
360 | if (type == intrs[irq].is_type) | | 362 | if (type == intrs[irq].is_type) |
361 | break; | | 363 | break; |
362 | /* FALLTHROUGH */ | | 364 | /* FALLTHROUGH */ |
363 | | | 365 | |
364 | case IST_PULSE: | | 366 | case IST_PULSE: |
365 | if (type != IST_NONE) | | 367 | if (type != IST_NONE) |
366 | panic("intr_establish: can't share %s with %s", | | 368 | panic("intr_establish: can't share %s with %s", |
367 | intr_typename(intrs[irq].is_type), | | 369 | intr_typename(intrs[irq].is_type), |
368 | intr_typename(type)); | | 370 | intr_typename(type)); |
369 | break; | | 371 | break; |
370 | } | | 372 | } |
371 | | | 373 | |
372 | /* | | 374 | /* |
373 | * We're not on critical paths, so just block intrs for a while. | | 375 | * We're not on critical paths, so just block intrs for a while. |
374 | * Note that spl*() at this point would use old (wrong) masks. | | 376 | * Note that spl*() at this point would use old (wrong) masks. |
375 | */ | | 377 | */ |
376 | msr = mfmsr(); | | 378 | msr = mfmsr(); |
377 | wrteei(0); | | 379 | wrteei(0); |
378 | | | 380 | |
379 | /* | | 381 | /* |
380 | * Poke the real handler in now. We deliberately don't preserve order, | | 382 | * Poke the real handler in now. We deliberately don't preserve order, |
381 | * the user is not allowed to make any assumptions about it anyway. | | 383 | * the user is not allowed to make any assumptions about it anyway. |
382 | */ | | 384 | */ |
383 | ih->ih_fun = ih_fun; | | 385 | ih->ih_fun = ih_fun; |
384 | ih->ih_arg = ih_arg; | | 386 | ih->ih_arg = ih_arg; |
385 | ih->ih_level = level; | | 387 | ih->ih_level = level; |
386 | ih->ih_next = intrs[irq].is_head; | | 388 | ih->ih_next = intrs[irq].is_head; |
387 | intrs[irq].is_head = ih; | | 389 | intrs[irq].is_head = ih; |
388 | | | 390 | |
389 | intr_calculatemasks(); | | 391 | intr_calculatemasks(); |
390 | | | 392 | |
391 | eieio(); | | 393 | eieio(); |
392 | mtmsr(msr); | | 394 | mtmsr(msr); |
393 | | | 395 | |
394 | #ifdef IRQ_DEBUG | | 396 | #ifdef IRQ_DEBUG |
395 | printf("***** intr_establish: irq%d h=%p arg=%p\n",irq, ih_fun, ih_arg); | | 397 | printf("***** intr_establish: irq%d h=%p arg=%p\n",irq, ih_fun, ih_arg); |
396 | #endif | | 398 | #endif |
397 | return (ih); | | 399 | return (ih); |
398 | } | | 400 | } |
399 | | | 401 | |
400 | /* | | 402 | /* |
401 | * Deregister an interrupt handler. | | 403 | * Deregister an interrupt handler. |
402 | */ | | 404 | */ |
403 | void | | 405 | void |
404 | intr_disestablish(void *arg) | | 406 | intr_disestablish(void *arg) |
405 | { | | 407 | { |
406 | struct intrhand *ih = arg; | | 408 | struct intrhand *ih = arg; |
407 | struct intrhand **p; | | 409 | struct intrhand **p; |
408 | int i, msr; | | 410 | int i, msr; |
409 | | | 411 | |
410 | /* Lookup the handler. This is expensive, but not run often. */ | | 412 | /* Lookup the handler. This is expensive, but not run often. */ |
411 | for (i = 0; i < ICU_LEN; i++) | | 413 | for (i = 0; i < ICU_LEN; i++) |
412 | for (p = &intrs[i].is_head; *p != NULL; p = &(*p)->ih_next) | | 414 | for (p = &intrs[i].is_head; *p != NULL; p = &(*p)->ih_next) |
413 | if (*p == ih) | | 415 | if (*p == ih) |
414 | goto out; | | 416 | goto out; |
415 | out: | | 417 | out: |
416 | if (i == ICU_LEN) | | 418 | if (i == ICU_LEN) |
417 | panic("intr_disestablish: handler not registered"); | | 419 | panic("intr_disestablish: handler not registered"); |
418 | | | 420 | |
419 | *p = ih->ih_next; | | 421 | *p = ih->ih_next; |
420 | free(ih, M_DEVBUF); | | 422 | free(ih, M_DEVBUF); |
421 | | | 423 | |
422 | msr = mfmsr(); | | 424 | msr = mfmsr(); |
423 | wrteei(0); | | 425 | wrteei(0); |
424 | intr_calculatemasks(); | | 426 | intr_calculatemasks(); |
425 | mtmsr(msr); | | 427 | mtmsr(msr); |
426 | | | 428 | |
427 | if (intrs[i].is_head == NULL) | | 429 | if (intrs[i].is_head == NULL) |
428 | intrs[i].is_type = IST_NONE; | | 430 | intrs[i].is_type = IST_NONE; |
429 | } | | 431 | } |
430 | | | 432 | |
431 | /* | | 433 | /* |
432 | * Recalculate the interrupt masks from scratch. | | 434 | * Recalculate the interrupt masks from scratch. |
433 | * We could code special registry and deregistry versions of this function that | | 435 | * We could code special registry and deregistry versions of this function that |
434 | * would be faster, but the code would be nastier, and we don't expect this to | | 436 | * would be faster, but the code would be nastier, and we don't expect this to |
435 | * happen very much anyway. We assume PSL_EE is clear when we're called. | | 437 | * happen very much anyway. We assume PSL_EE is clear when we're called. |
436 | */ | | 438 | */ |
437 | static void | | 439 | static void |
438 | intr_calculatemasks(void) | | 440 | intr_calculatemasks(void) |
439 | { | | 441 | { |
440 | struct intrhand *q; | | 442 | struct intrhand *q; |
441 | int irq, level; | | 443 | int irq, level; |
442 | | | 444 | |
443 | /* First, figure out which levels each IRQ uses. */ | | 445 | /* First, figure out which levels each IRQ uses. */ |
444 | for (irq = 0; irq < ICU_LEN; irq++) { | | 446 | for (irq = 0; irq < ICU_LEN; irq++) { |
445 | register int levels = 0; | | 447 | register int levels = 0; |
446 | for (q = intrs[irq].is_head; q; q = q->ih_next) | | 448 | for (q = intrs[irq].is_head; q; q = q->ih_next) |
447 | levels |= 1 << q->ih_level; | | 449 | levels |= 1 << q->ih_level; |
448 | intrs[irq].is_level = levels; | | 450 | intrs[irq].is_level = levels; |
449 | } | | 451 | } |
450 | | | 452 | |
451 | /* Then figure out which IRQs use each level. */ | | 453 | /* Then figure out which IRQs use each level. */ |
452 | for (level = 0; level < NIPL; level++) { | | 454 | for (level = 0; level < NIPL; level++) { |
453 | register int irqs = 0; | | 455 | register int irqs = 0; |
454 | for (irq = 0; irq < ICU_LEN; irq++) | | 456 | for (irq = 0; irq < ICU_LEN; irq++) |
455 | if (intrs[irq].is_level & (1 << level)) | | 457 | if (intrs[irq].is_level & (1 << level)) |
456 | irqs |= IRQ_TO_MASK(irq); | | 458 | irqs |= IRQ_TO_MASK(irq); |
457 | imask[level] = irqs | MASK_SOFTINTR; | | 459 | imask[level] = irqs | MASK_SOFTINTR; |
458 | } | | 460 | } |
459 | | | 461 | |
460 | /* | | 462 | /* |
461 | * Enforce a hierarchy that gives slow devices a better chance at not | | 463 | * Enforce a hierarchy that gives slow devices a better chance at not |
462 | * dropping data. | | 464 | * dropping data. |
463 | */ | | 465 | */ |
464 | | | 466 | |
465 | /* | | 467 | /* |
466 | * Initialize the soft interrupt masks to block themselves. | | 468 | * Initialize the soft interrupt masks to block themselves. |
467 | */ | | 469 | */ |
468 | imask[IPL_NONE] = 0; | | 470 | imask[IPL_NONE] = 0; |
469 | imask[IPL_SOFTCLOCK] |= MASK_SOFTCLOCK; | | 471 | imask[IPL_SOFTCLOCK] |= MASK_SOFTCLOCK; |
470 | imask[IPL_SOFTNET] |= imask[IPL_SOFTCLOCK] | MASK_SOFTNET; | | 472 | imask[IPL_SOFTNET] |= imask[IPL_SOFTCLOCK] | MASK_SOFTNET; |
471 | imask[IPL_SOFTSERIAL] = imask[IPL_SOFTNET] | MASK_SOFTSERIAL; | | 473 | imask[IPL_SOFTSERIAL] = imask[IPL_SOFTNET] | MASK_SOFTSERIAL; |
472 | imask[IPL_VM] |= imask[IPL_SOFTSERIAL]; | | 474 | imask[IPL_VM] |= imask[IPL_SOFTSERIAL]; |
473 | imask[IPL_SCHED] = imask[IPL_VM] | MASK_CLOCK | MASK_STATCLOCK; | | 475 | imask[IPL_SCHED] = imask[IPL_VM] | MASK_CLOCK | MASK_STATCLOCK; |
474 | imask[IPL_HIGH] |= imask[IPL_SCHED]; | | 476 | imask[IPL_HIGH] |= imask[IPL_SCHED]; |
475 | | | 477 | |
476 | /* And eventually calculate the complete masks. */ | | 478 | /* And eventually calculate the complete masks. */ |
477 | for (irq = 0; irq < ICU_LEN; irq++) { | | 479 | for (irq = 0; irq < ICU_LEN; irq++) { |
478 | register int irqs = IRQ_TO_MASK(irq); | | 480 | register int irqs = IRQ_TO_MASK(irq); |
479 | for (q = intrs[irq].is_head; q; q = q->ih_next) | | 481 | for (q = intrs[irq].is_head; q; q = q->ih_next) |
480 | irqs |= imask[q->ih_level]; | | 482 | irqs |= imask[q->ih_level]; |
481 | intrs[irq].is_mask = irqs; | | 483 | intrs[irq].is_mask = irqs; |
482 | } | | 484 | } |
483 | | | 485 | |
484 | for (irq = 0; irq < ICU_LEN; irq++) | | 486 | for (irq = 0; irq < ICU_LEN; irq++) |
485 | if (intrs[irq].is_head != NULL) | | 487 | if (intrs[irq].is_head != NULL) |
486 | enable_irq(irq); | | 488 | enable_irq(irq); |
487 | else | | 489 | else |
488 | disable_irq(irq); | | 490 | disable_irq(irq); |
489 | } | | 491 | } |
490 | | | 492 | |
491 | static void | | 493 | static void |
492 | do_pending_int(void) | | 494 | do_pending_int(void) |
493 | { | | 495 | { |
494 | struct cpu_info *ci = curcpu(); | | 496 | struct cpu_info *ci = curcpu(); |
495 | struct intrhand *ih; | | 497 | struct intrhand *ih; |
496 | int irq; | | 498 | int irq; |
497 | int pcpl; | | 499 | int pcpl; |
498 | int hwpend; | | 500 | int hwpend; |
499 | int emsr; | | 501 | int emsr; |
500 | | | 502 | |
501 | if (ci->ci_idepth) | | 503 | if (ci->ci_idepth) |
502 | return; | | 504 | return; |
503 | #ifdef __HAVE_FAST_SOFTINTS | | 505 | #ifdef __HAVE_FAST_SOFTINTS |
504 | #error don't count soft interrupts | | 506 | #error don't count soft interrupts |
505 | #else | | 507 | #else |
506 | ci->ci_idepth++; | | 508 | ci->ci_idepth++; |
507 | #endif | | 509 | #endif |
508 | emsr = mfmsr(); | | 510 | emsr = mfmsr(); |
509 | wrteei(0); | | 511 | wrteei(0); |
510 | | | 512 | |
511 | pcpl = ci->ci_cpl; /* Turn off all */ | | 513 | pcpl = ci->ci_cpl; /* Turn off all */ |
512 | #ifdef __HAVE_FAST_SOFTINTS | | 514 | #ifdef __HAVE_FAST_SOFTINTS |
513 | again: | | 515 | again: |
514 | #endif | | 516 | #endif |
515 | while ((hwpend = ci->ci_ipending & ~pcpl & MASK_HARDINTR) != 0) { | | 517 | while ((hwpend = ci->ci_ipending & ~pcpl & MASK_HARDINTR) != 0) { |
516 | irq = IRQ_OF_MASK(hwpend); | | 518 | irq = IRQ_OF_MASK(hwpend); |
517 | if (intrs[irq].is_type != IST_LEVEL) | | 519 | if (intrs[irq].is_type != IST_LEVEL) |
518 | enable_irq(irq); | | 520 | enable_irq(irq); |
519 | | | 521 | |
520 | ci->ci_ipending &= ~IRQ_TO_MASK(irq); | | 522 | ci->ci_ipending &= ~IRQ_TO_MASK(irq); |
521 | | | 523 | |
522 | splraise(intrs[irq].is_mask); | | 524 | splraise(intrs[irq].is_mask); |
523 | mtmsr(emsr); | | 525 | mtmsr(emsr); |
524 | | | 526 | |
525 | ih = intrs[irq].is_head; | | 527 | ih = intrs[irq].is_head; |
526 | while(ih) { | | 528 | while(ih) { |
527 | if (ih->ih_level == IPL_VM) | | 529 | if (ih->ih_level == IPL_VM) |
528 | KERNEL_LOCK(1, NULL); | | 530 | KERNEL_LOCK(1, NULL); |
529 | (*ih->ih_fun)(ih->ih_arg); | | 531 | (*ih->ih_fun)(ih->ih_arg); |
530 | if (ih->ih_level == IPL_VM) | | 532 | if (ih->ih_level == IPL_VM) |
531 | KERNEL_UNLOCK_ONE(NULL); | | 533 | KERNEL_UNLOCK_ONE(NULL); |
532 | ih = ih->ih_next; | | 534 | ih = ih->ih_next; |
533 | } | | 535 | } |
534 | | | 536 | |
535 | wrteei(0); | | 537 | wrteei(0); |
536 | if (intrs[irq].is_type == IST_LEVEL) | | 538 | if (intrs[irq].is_type == IST_LEVEL) |
537 | enable_irq(irq); | | 539 | enable_irq(irq); |
538 | ci->ci_cpl = pcpl; | | 540 | ci->ci_cpl = pcpl; |
539 | intrs[irq].is_evcnt.ev_count++; | | 541 | intrs[irq].is_evcnt.ev_count++; |
540 | } | | 542 | } |
541 | #ifdef __HAVE_FAST_SOFTINTS | | 543 | #ifdef __HAVE_FAST_SOFTINTS |
542 | if ((ci->ci_ipending & ~pcpl) & MASK_SOFTSERIAL) { | | 544 | if ((ci->ci_ipending & ~pcpl) & MASK_SOFTSERIAL) { |
543 | ci->ci_ipending &= ~MASK_SOFTSERIAL; | | 545 | ci->ci_ipending &= ~MASK_SOFTSERIAL; |
544 | splsoftserial(); | | 546 | splsoftserial(); |
545 | mtmsr(emsr); | | 547 | mtmsr(emsr); |
546 | softintr__run(IPL_SOFTSERIAL); | | 548 | softintr__run(IPL_SOFTSERIAL); |
547 | wrteei(0); | | 549 | wrteei(0); |
548 | ci->ci_cpl = pcpl; | | 550 | ci->ci_cpl = pcpl; |
549 | ci->ci_ev_softserial.ev_count++; | | 551 | ci->ci_ev_softserial.ev_count++; |
550 | goto again; | | 552 | goto again; |
551 | } | | 553 | } |
552 | if ((ci->ci_ipending & ~pcpl) & MASK_SOFTNET) { | | 554 | if ((ci->ci_ipending & ~pcpl) & MASK_SOFTNET) { |
553 | ci->ci_ipending &= ~MASK_SOFTNET; | | 555 | ci->ci_ipending &= ~MASK_SOFTNET; |
554 | splsoftnet(); | | 556 | splsoftnet(); |
555 | mtmsr(emsr); | | 557 | mtmsr(emsr); |
556 | softintr__run(IPL_SOFTNET); | | 558 | softintr__run(IPL_SOFTNET); |
557 | wrteei(0); | | 559 | wrteei(0); |
558 | ci->ci_cpl = pcpl; | | 560 | ci->ci_cpl = pcpl; |
559 | ci->ci_ev_softnet.ev_count++; | | 561 | ci->ci_ev_softnet.ev_count++; |
560 | goto again; | | 562 | goto again; |
561 | } | | 563 | } |
562 | if ((ci->ci_ipending & ~pcpl) & MASK_SOFTCLOCK) { | | 564 | if ((ci->ci_ipending & ~pcpl) & MASK_SOFTCLOCK) { |
563 | ci->ci_ipending &= ~MASK_SOFTCLOCK; | | 565 | ci->ci_ipending &= ~MASK_SOFTCLOCK; |
564 | splsoftclock(); | | 566 | splsoftclock(); |
565 | mtmsr(emsr); | | 567 | mtmsr(emsr); |
566 | softintr__run(IPL_SOFTCLOCK); | | 568 | softintr__run(IPL_SOFTCLOCK); |
567 | wrteei(0); | | 569 | wrteei(0); |
568 | ci->ci_cpl = pcpl; | | 570 | ci->ci_cpl = pcpl; |
569 | ci->ci_ev_softclock.ev_count++; | | 571 | ci->ci_ev_softclock.ev_count++; |
570 | goto again; | | 572 | goto again; |
571 | } | | 573 | } |
572 | #endif | | 574 | #endif |
573 | ci->ci_cpl = pcpl; /* Don't use splx... we are here already! */ | | 575 | ci->ci_cpl = pcpl; /* Don't use splx... we are here already! */ |
574 | mtmsr(emsr); | | 576 | mtmsr(emsr); |
575 | ci->ci_idepth--; | | 577 | ci->ci_idepth--; |
576 | } | | 578 | } |
577 | | | 579 | |
578 | #ifdef __HAVE_FAST_SOFTINTS | | 580 | #ifdef __HAVE_FAST_SOFTINTS |
579 | void | | 581 | void |
580 | softintr(int idx) | | 582 | softintr(int idx) |
581 | { | | 583 | { |
582 | static const int softmap[3] = { | | 584 | static const int softmap[3] = { |
583 | MASK_SOFTCLOCK, MASK_SOFTNET, MASK_SOFTSERIAL | | 585 | MASK_SOFTCLOCK, MASK_SOFTNET, MASK_SOFTSERIAL |
584 | }; | | 586 | }; |
585 | int oldmsr; | | 587 | int oldmsr; |
586 | | | 588 | |
587 | KASSERT(idx >= 0 && idx < 3); | | 589 | KASSERT(idx >= 0 && idx < 3); |
588 | | | 590 | |
589 | /* | | 591 | /* |
590 | * This could be implemented with lwarx/stwcx to avoid the | | 592 | * This could be implemented with lwarx/stwcx to avoid the |
591 | * disable/enable... | | 593 | * disable/enable... |
592 | */ | | 594 | */ |
593 | | | 595 | |
594 | oldmsr = mfmsr(); | | 596 | oldmsr = mfmsr(); |
595 | wrteei(0); | | 597 | wrteei(0); |
596 | | | 598 | |
597 | curcpu()->ci_ipending |= softmap[idx]; | | 599 | curcpu()->ci_ipending |= softmap[idx]; |
598 | | | 600 | |
599 | mtmsr(oldmsr); | | 601 | mtmsr(oldmsr); |
600 | } | | 602 | } |
601 | #endif | | 603 | #endif |
602 | | | 604 | |
603 | int | | 605 | int |
604 | splraise(int newcpl) | | 606 | splraise(int newcpl) |
605 | { | | 607 | { |
606 | struct cpu_info *ci = curcpu(); | | 608 | struct cpu_info *ci = curcpu(); |
607 | int oldcpl, oldmsr; | | 609 | int oldcpl, oldmsr; |
608 | | | 610 | |
609 | /* | | 611 | /* |
610 | * We're about to block some intrs, so make sure they don't | | 612 | * We're about to block some intrs, so make sure they don't |
611 | * fire while we're busy. | | 613 | * fire while we're busy. |
612 | */ | | 614 | */ |
613 | | | 615 | |
614 | oldmsr = mfmsr(); | | 616 | oldmsr = mfmsr(); |
615 | wrteei(0); | | 617 | wrteei(0); |
616 | | | 618 | |
617 | oldcpl = ci->ci_cpl; | | 619 | oldcpl = ci->ci_cpl; |
618 | ci->ci_cpl |= newcpl; | | 620 | ci->ci_cpl |= newcpl; |
619 | | | 621 | |
620 | mtmsr(oldmsr); | | 622 | mtmsr(oldmsr); |
621 | return (oldcpl); | | 623 | return (oldcpl); |
622 | } | | 624 | } |
623 | | | 625 | |
624 | void | | 626 | void |
625 | splx(int newcpl) | | 627 | splx(int newcpl) |
626 | { | | 628 | { |
627 | struct cpu_info *ci = curcpu(); | | 629 | struct cpu_info *ci = curcpu(); |
628 | | | 630 | |
629 | ci->ci_cpl = newcpl; | | 631 | ci->ci_cpl = newcpl; |
630 | if (ci->ci_ipending & ~newcpl) | | 632 | if (ci->ci_ipending & ~newcpl) |
631 | do_pending_int(); | | 633 | do_pending_int(); |
632 | } | | 634 | } |
633 | | | 635 | |
634 | int | | 636 | int |
635 | spllower(int newcpl) | | 637 | spllower(int newcpl) |
636 | { | | 638 | { |
637 | struct cpu_info *ci = curcpu(); | | 639 | struct cpu_info *ci = curcpu(); |
638 | int oldcpl; | | 640 | int oldcpl; |
639 | | | 641 | |
640 | oldcpl = ci->ci_cpl; | | 642 | oldcpl = ci->ci_cpl; |
641 | ci->ci_cpl = newcpl; | | 643 | ci->ci_cpl = newcpl; |
642 | if (ci->ci_ipending & ~newcpl) | | 644 | if (ci->ci_ipending & ~newcpl) |
643 | do_pending_int(); | | 645 | do_pending_int(); |
644 | | | 646 | |
645 | return (oldcpl); | | 647 | return (oldcpl); |
646 | } | | 648 | } |