| @@ -1,14 +1,14 @@ | | | @@ -1,14 +1,14 @@ |
1 | /* $NetBSD: ofw_irq.S,v 1.16 2020/11/21 09:36:27 skrll Exp $ */ | | 1 | /* $NetBSD: ofw_irq.S,v 1.17 2020/11/21 19:57:35 skrll Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (c) 1994-1998 Mark Brinicombe. | | 4 | * Copyright (c) 1994-1998 Mark Brinicombe. |
5 | * Copyright (c) 1994 Brini. | | 5 | * Copyright (c) 1994 Brini. |
6 | * All rights reserved. | | 6 | * All rights reserved. |
7 | * | | 7 | * |
8 | * This code is derived from software written for Brini by Mark Brinicombe | | 8 | * This code is derived from software written for Brini by Mark Brinicombe |
9 | * | | 9 | * |
10 | * Redistribution and use in source and binary forms, with or without | | 10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions | | 11 | * modification, are permitted provided that the following conditions |
12 | * are met: | | 12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright | | 13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. | | 14 | * notice, this list of conditions and the following disclaimer. |
| @@ -85,32 +85,33 @@ Lirq_entry: | | | @@ -85,32 +85,33 @@ Lirq_entry: |
85 | .word irq_entry | | 85 | .word irq_entry |
86 | | | 86 | |
87 | Lofwirqstk: /* hack */ | | 87 | Lofwirqstk: /* hack */ |
88 | .word ofwirqstk + 4096 | | 88 | .word ofwirqstk + 4096 |
89 | | | 89 | |
90 | LOCK_CAS_CHECK_LOCALS | | 90 | LOCK_CAS_CHECK_LOCALS |
91 | | | 91 | |
92 | AST_ALIGNMENT_FAULT_LOCALS | | 92 | AST_ALIGNMENT_FAULT_LOCALS |
93 | | | 93 | |
94 | /* | | 94 | /* |
95 | * Regsister usage | | 95 | * Regsister usage |
96 | * | | 96 | * |
97 | * r4 - Address of cpu_info (on entry) | | 97 | * r4 - Address of cpu_info (on entry) |
98 | * r5 - Pointer to handler pointer list | | 98 | * r5 - Address of curlwp |
99 | * r6 - Address of current handler | | 99 | * r6 - Address of current handler |
100 | * r7 - pspr mode (must be preserved) | | 100 | * r7 - pspr mode (must be preserved) |
101 | * r8 - Current IRQ requests. | | 101 | * r8 - Current IRQ requests. |
102 | * r9 - Used to count through possible IRQ bits. | | 102 | * r9 - Used to count through possible IRQ bits. |
103 | * r10 - Base address of IOMD | | 103 | * r10 - Base address of IOMD |
| | | 104 | * r11 - Pointer to handler pointer list |
104 | */ | | 105 | */ |
105 | | | 106 | |
106 | ASENTRY_NP(irq_entry) | | 107 | ASENTRY_NP(irq_entry) |
107 | /* | | 108 | /* |
108 | * We come here following an OFW-handled timer tick. | | 109 | * We come here following an OFW-handled timer tick. |
109 | * | | 110 | * |
110 | * We are in the SVC frame, and interrupts are disabled. | | 111 | * We are in the SVC frame, and interrupts are disabled. |
111 | * The state of the interrupted context is partially in | | 112 | * The state of the interrupted context is partially in |
112 | * the registers and partially in the global storage area | | 113 | * the registers and partially in the global storage area |
113 | * labeled ofw_ticktmp. ofw_ticktmp is filled-in by the | | 114 | * labeled ofw_ticktmp. ofw_ticktmp is filled-in by the |
114 | * tick callback that is invoked by OFW on the way out of | | 115 | * tick callback that is invoked by OFW on the way out of |
115 | * its interrupt handler. ofw_ticktmp contains the following: | | 116 | * its interrupt handler. ofw_ticktmp contains the following: |
116 | * | | 117 | * |
| @@ -178,27 +179,27 @@ ASENTRY_NP(irq_entry) | | | @@ -178,27 +179,27 @@ ASENTRY_NP(irq_entry) |
178 | movs pc, lr /* Exit */ | | 179 | movs pc, lr /* Exit */ |
179 | | | 180 | |
180 | /* | | 181 | /* |
181 | * Stuff a bit-mask into r8 indicating which interrupts | | 182 | * Stuff a bit-mask into r8 indicating which interrupts |
182 | * are pending. In our case, that is just the timer0 | | 183 | * are pending. In our case, that is just the timer0 |
183 | * interrupt: (1 << TIMER0). The existing code will take | | 184 | * interrupt: (1 << TIMER0). The existing code will take |
184 | * care of invoking that handler and the softint/ast stuff | | 185 | * care of invoking that handler and the softint/ast stuff |
185 | * which follows it. | | 186 | * which follows it. |
186 | */ | | 187 | */ |
187 | ofwtakeint: | | 188 | ofwtakeint: |
188 | #ifdef EXEC_AOUT | | 189 | #ifdef EXEC_AOUT |
189 | ldr r0, [sp] /* Fetch SPSR */ | | 190 | ldr r0, [sp] /* Fetch SPSR */ |
190 | #endif | | 191 | #endif |
191 | ENABLE_ALIGNMENT_FAULTS | | 192 | ENABLE_ALIGNMENT_FAULTS /* puts cur{cpu,lwp} in r4/r5 */ |
192 | | | 193 | |
193 | mov r8, #0x00000001 /* timer interrupt pending! */ | | 194 | mov r8, #0x00000001 /* timer interrupt pending! */ |
194 | mov r8, r8, lsl #IRQ_TIMER0 | | 195 | mov r8, r8, lsl #IRQ_TIMER0 |
195 | | | 196 | |
196 | /* | | 197 | /* |
197 | * Note that we have entered the IRQ handler. | | 198 | * Note that we have entered the IRQ handler. |
198 | * We are in SVC mode so we cannot use the processor mode | | 199 | * We are in SVC mode so we cannot use the processor mode |
199 | * to determine if we are in an IRQ. Instead we will count the | | 200 | * to determine if we are in an IRQ. Instead we will count the |
200 | * each time the interrupt handler is nested. | | 201 | * each time the interrupt handler is nested. |
201 | */ | | 202 | */ |
202 | | | 203 | |
203 | ldr r1, [r4, #CI_INTR_DEPTH] | | 204 | ldr r1, [r4, #CI_INTR_DEPTH] |
204 | add r1, r1, #1 | | 205 | add r1, r1, #1 |
| @@ -214,63 +215,63 @@ ofwtakeint: | | | @@ -214,63 +215,63 @@ ofwtakeint: |
214 | * Need to block all interrupts at the IPL or lower for | | 215 | * Need to block all interrupts at the IPL or lower for |
215 | * all asserted interrupts. | | 216 | * all asserted interrupts. |
216 | * This basically emulates hardware interrupt priority levels. | | 217 | * This basically emulates hardware interrupt priority levels. |
217 | * Means we need to go through the interrupt mask and for | | 218 | * Means we need to go through the interrupt mask and for |
218 | * every asserted interrupt we need to mask out all other | | 219 | * every asserted interrupt we need to mask out all other |
219 | * interrupts at the same or lower IPL. | | 220 | * interrupts at the same or lower IPL. |
220 | * If only we could wait until the main loop but we need to sort | | 221 | * If only we could wait until the main loop but we need to sort |
221 | * this out first so interrupts can be re-enabled. | | 222 | * this out first so interrupts can be re-enabled. |
222 | * | | 223 | * |
223 | * This would benefit from a special ffs type routine | | 224 | * This would benefit from a special ffs type routine |
224 | */ | | 225 | */ |
225 | | | 226 | |
226 | mov r9, #(NIPL - 1) | | 227 | mov r9, #(NIPL - 1) |
227 | ldr r5, Lspl_masks | | 228 | ldr r11, Lspl_masks |
228 | | | 229 | |
229 | Lfind_highest_ipl: | | 230 | Lfind_highest_ipl: |
230 | ldr r2, [r5, r9, lsl #2] | | 231 | ldr r2, [r11, r9, lsl #2] |
231 | tst r8, r2 | | 232 | tst r8, r2 |
232 | subeq r9, r9, #1 | | 233 | subeq r9, r9, #1 |
233 | beq Lfind_highest_ipl | | 234 | beq Lfind_highest_ipl |
234 | | | 235 | |
235 | /* r9 = SPL level of highest priority interrupt */ | | 236 | /* r9 = SPL level of highest priority interrupt */ |
236 | add r9, r9, #1 | | 237 | add r9, r9, #1 |
237 | ldr r2, [r5, r9, lsl #2] | | 238 | ldr r2, [r11, r9, lsl #2] |
238 | mvn r2, r2 | | 239 | mvn r2, r2 |
239 | orr r0, r0, r2 | | 240 | orr r0, r0, r2 |
240 | | | 241 | |
241 | str r0, [r1] | | 242 | str r0, [r1] |
242 | | | 243 | |
243 | ldr r0, Lcurrent_spl_level | | 244 | ldr r0, Lcurrent_spl_level |
244 | ldr r1, [r4, #CI_CPL] | | 245 | ldr r1, [r4, #CI_CPL] |
245 | str r9, [r4, #CI_CPL] | | 246 | str r9, [r4, #CI_CPL] |
246 | stmfd sp!, {r1} | | 247 | stmfd sp!, {r1} |
247 | | | 248 | |
248 | /* Update the irq masks */ | | 249 | /* Update the irq masks */ |
249 | bl _C_LABEL(irq_setmasks) | | 250 | bl _C_LABEL(irq_setmasks) |
250 | | | 251 | |
251 | mrs r0, cpsr /* Enable IRQ's */ | | 252 | mrs r0, cpsr /* Enable IRQ's */ |
252 | bic r0, r0, #I32_bit | | 253 | bic r0, r0, #I32_bit |
253 | msr cpsr_all, r0 | | 254 | msr cpsr_all, r0 |
254 | | | 255 | |
255 | ldr r5, Lirqhandlers | | 256 | ldr r11, Lirqhandlers |
256 | mov r9, #0x00000001 | | 257 | mov r9, #0x00000001 |
257 | | | 258 | |
258 | irqloop: | | 259 | irqloop: |
259 | /* This would benefit from a special ffs type routine */ | | 260 | /* This would benefit from a special ffs type routine */ |
260 | tst r8, r9 /* Is a bit set ? */ | | 261 | tst r8, r9 /* Is a bit set ? */ |
261 | beq nextirq /* No ? try next bit */ | | 262 | beq nextirq /* No ? try next bit */ |
262 | | | 263 | |
263 | ldr r6, [r5] /* Get address of first handler structure */ | | 264 | ldr r6, [r11] /* Get address of first handler structure */ |
264 | | | 265 | |
265 | teq r6, #0x00000000 /* Do we have a handler */ | | 266 | teq r6, #0x00000000 /* Do we have a handler */ |
266 | moveq r0, r8 /* IRQ requests as arg 0 */ | | 267 | moveq r0, r8 /* IRQ requests as arg 0 */ |
267 | beq _C_LABEL(stray_irqhandler) /* call special handler */ | | 268 | beq _C_LABEL(stray_irqhandler) /* call special handler */ |
268 | | | 269 | |
269 | ldr r1, [r4, #(CI_CC_NINTR)] | | 270 | ldr r1, [r4, #(CI_CC_NINTR)] |
270 | ldr r2, [r4, #(CI_CC_NINTR+4)] | | 271 | ldr r2, [r4, #(CI_CC_NINTR+4)] |
271 | #ifdef _ARMEL | | 272 | #ifdef _ARMEL |
272 | adds r1, r1, #0x00000001 | | 273 | adds r1, r1, #0x00000001 |
273 | adc r2, r2, #0x00000000 | | 274 | adc r2, r2, #0x00000000 |
274 | #else | | 275 | #else |
275 | adds r2, r2, #0x00000001 | | 276 | adds r2, r2, #0x00000001 |
276 | adc r1, r1, #0x00000000 | | 277 | adc r1, r1, #0x00000000 |
| @@ -291,42 +292,40 @@ irqchainloop: | | | @@ -291,42 +292,40 @@ irqchainloop: |
291 | ldr r6, [r6, #(IH_NEXT)] | | 292 | ldr r6, [r6, #(IH_NEXT)] |
292 | teq r6, #0x00000000 | | 293 | teq r6, #0x00000000 |
293 | bne irqchainloop | | 294 | bne irqchainloop |
294 | b nextirq | | 295 | b nextirq |
295 | | | 296 | |
296 | irqdone: | | 297 | irqdone: |
297 | add r3, r6, #IH_EV_COUNT /* get address of ih's ev_count */ | | 298 | add r3, r6, #IH_EV_COUNT /* get address of ih's ev_count */ |
298 | ldmia r3, {r1-r2} /* load ev_count */ | | 299 | ldmia r3, {r1-r2} /* load ev_count */ |
299 | adds r1, r1, #0x00000001 /* 64bit incr (lo) */ | | 300 | adds r1, r1, #0x00000001 /* 64bit incr (lo) */ |
300 | adc r2, r2, #0x00000000 /* 64bit incr (hi) */ | | 301 | adc r2, r2, #0x00000000 /* 64bit incr (hi) */ |
301 | stmia r3, {r1-r2} /* store ev_count */ | | 302 | stmia r3, {r1-r2} /* store ev_count */ |
302 | | | 303 | |
303 | nextirq: | | 304 | nextirq: |
304 | add r5, r5, #0x00000004 /* update pointer to handlers */ | | 305 | add r11, r11, #0x00000004 /* update pointer to handlers */ |
305 | mov r9, r9, lsl #1 /* move on to next bit */ | | 306 | mov r9, r9, lsl #1 /* move on to next bit */ |
306 | teq r9, #(1 << 24) /* done the last bit ? */ | | 307 | teq r9, #(1 << 24) /* done the last bit ? */ |
307 | bne irqloop /* no - loop back. */ | | 308 | bne irqloop /* no - loop back. */ |
308 | | | 309 | |
309 | ldmfd sp!, {r2} | | 310 | ldmfd sp!, {r2} |
310 | str r2, [r4, #CI_CPL] | | 311 | str r2, [r4, #CI_CPL] |
311 | | | 312 | |
312 | /* Restore previous disabled mask */ | | 313 | /* Restore previous disabled mask */ |
313 | ldmfd sp!, {r2} | | 314 | ldmfd sp!, {r2} |
314 | ldr r1, Ldisabled_mask | | 315 | ldr r1, Ldisabled_mask |
315 | str r2, [r1] | | 316 | str r2, [r1] |
316 | bl _C_LABEL(irq_setmasks) | | 317 | bl _C_LABEL(irq_setmasks) |
317 | | | 318 | |
318 | bl _C_LABEL(dosoftints) /* Handle the soft interrupts */ | | | |
319 | | | | |
320 | /* Kill IRQ's in preparation for exit */ | | 319 | /* Kill IRQ's in preparation for exit */ |
321 | mrs r0, cpsr | | 320 | mrs r0, cpsr |
322 | orr r0, r0, #(I32_bit) | | 321 | orr r0, r0, #(I32_bit) |
323 | msr cpsr_all, r0 | | 322 | msr cpsr_all, r0 |
324 | | | 323 | |
325 | /* Decrement the nest count */ | | 324 | /* Decrement the nest count */ |
326 | ldr r1, [r4, #CI_INTR_DEPTH] | | 325 | ldr r1, [r4, #CI_INTR_DEPTH] |
327 | sub r1, r1, #1 | | 326 | sub r1, r1, #1 |
328 | str r1, [r4, #CI_INTR_DEPTH] | | 327 | str r1, [r4, #CI_INTR_DEPTH] |
329 | | | 328 | |
330 | LOCK_CAS_CHECK | | 329 | LOCK_CAS_CHECK |
331 | | | 330 | |
332 | DO_AST_AND_RESTORE_ALIGNMENT_FAULTS | | 331 | DO_AST_AND_RESTORE_ALIGNMENT_FAULTS |