Tue Nov 28 08:43:49 2017 UTC ()
style


(maxv)
diff -r1.30 -r1.31 src/sys/arch/amd64/amd64/spl.S

cvs diff -r1.30 -r1.31 src/sys/arch/amd64/amd64/spl.S (expand / switch to unified diff)

--- src/sys/arch/amd64/amd64/spl.S 2015/11/22 13:41:24 1.30
+++ src/sys/arch/amd64/amd64/spl.S 2017/11/28 08:43:49 1.31
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: spl.S,v 1.30 2015/11/22 13:41:24 maxv Exp $ */ 1/* $NetBSD: spl.S,v 1.31 2017/11/28 08:43:49 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2003 Wasabi Systems, Inc. 4 * Copyright (c) 2003 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Frank van der Linden for Wasabi Systems, Inc. 7 * Written by Frank van der Linden for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -80,161 +80,172 @@ @@ -80,161 +80,172 @@
80#ifndef XEN 80#ifndef XEN
81/* 81/*
82 * Xsoftintr() 82 * Xsoftintr()
83 * 83 *
84 * Switch to the LWP assigned to handle interrupts from the given 84 * Switch to the LWP assigned to handle interrupts from the given
85 * source. We borrow the VM context from the interrupted LWP. 85 * source. We borrow the VM context from the interrupted LWP.
86 * 86 *
87 * On entry: 87 * On entry:
88 * 88 *
89 * %rax intrsource 89 * %rax intrsource
90 * %r13 address to return to 90 * %r13 address to return to
91 */ 91 */
92IDTVEC(softintr) 92IDTVEC(softintr)
93 pushq $_C_LABEL(softintr_ret) /* set up struct switchframe */ 93 /* set up struct switchframe */
 94 pushq $_C_LABEL(softintr_ret)
94 pushq %rbx 95 pushq %rbx
95 pushq %r12 96 pushq %r12
96 pushq %r13 97 pushq %r13
97 pushq %r14 98 pushq %r14
98 pushq %r15 99 pushq %r15
 100
99 movl $IPL_HIGH,CPUVAR(ILEVEL) 101 movl $IPL_HIGH,CPUVAR(ILEVEL)
100 movq CPUVAR(CURLWP),%r15 102 movq CPUVAR(CURLWP),%r15
101 movq IS_LWP(%rax),%rdi /* switch to handler LWP */ 103 movq IS_LWP(%rax),%rdi /* switch to handler LWP */
102 movq L_PCB(%rdi),%rdx 104 movq L_PCB(%rdi),%rdx
103 movq L_PCB(%r15),%rcx 105 movq L_PCB(%r15),%rcx
104 movq %rdi,CPUVAR(CURLWP) 106 movq %rdi,CPUVAR(CURLWP)
 107
 108 /* save old context */
105 movq %rsp,PCB_RSP(%rcx) 109 movq %rsp,PCB_RSP(%rcx)
106 movq %rbp,PCB_RBP(%rcx) 110 movq %rbp,PCB_RBP(%rcx)
107 movq PCB_RSP0(%rdx),%rsp /* onto new stack */ 111
 112 /* switch to the new stack */
 113 movq PCB_RSP0(%rdx),%rsp
 114
 115 /* dispatch */
108 sti 116 sti
109 movq %r15,%rdi /* interrupted LWP */ 117 movq %r15,%rdi /* interrupted LWP */
110 movl IS_MAXLEVEL(%rax),%esi /* ipl to run at */ 118 movl IS_MAXLEVEL(%rax),%esi /* ipl to run at */
111 call _C_LABEL(softint_dispatch)/* run handlers */ 119 call _C_LABEL(softint_dispatch)/* run handlers */
112 cli 120 cli
 121
 122 /* restore old context */
113 movq L_PCB(%r15),%rcx 123 movq L_PCB(%r15),%rcx
114 movq PCB_RSP(%rcx),%rsp 124 movq PCB_RSP(%rcx),%rsp
 125
115 xchgq %r15,CPUVAR(CURLWP) /* must be globally visible */ 126 xchgq %r15,CPUVAR(CURLWP) /* must be globally visible */
116 popq %r15 /* unwind switchframe */ 127 popq %r15 /* unwind switchframe */
117 addq $(5 * 8),%rsp 128 addq $(5 * 8),%rsp
118 jmp *%r13 /* back to Xspllower/Xdoreti */ 129 jmp *%r13 /* back to Xspllower/Xdoreti */
119END(Xsoftintr) 130END(Xsoftintr)
120 131
121/* 132/*
122 * softintr_ret() 133 * softintr_ret()
123 * 134 *
124 * Trampoline function that gets returned to by cpu_switchto() when 135 * Trampoline function that gets returned to by cpu_switchto() when
125 * an interrupt handler blocks. On entry: 136 * an interrupt handler blocks. On entry:
126 * 137 *
127 * %rax prevlwp from cpu_switchto() 138 * %rax prevlwp from cpu_switchto()
128 */ 139 */
129NENTRY(softintr_ret) 140NENTRY(softintr_ret)
130 incl CPUVAR(MTX_COUNT) /* re-adjust after mi_switch */ 141 incl CPUVAR(MTX_COUNT) /* re-adjust after mi_switch */
131 movl $0, L_CTXSWTCH(%rax) /* %rax from cpu_switchto */ 142 movl $0,L_CTXSWTCH(%rax) /* %rax from cpu_switchto */
132 cli 143 cli
133 jmp *%r13 /* back to Xspllower/Xdoreti */ 144 jmp *%r13 /* back to Xspllower/Xdoreti */
134END(softintr_ret) 145END(softintr_ret)
135 146
136/* 147/*
137 * void softint_trigger(uintptr_t machdep); 148 * void softint_trigger(uintptr_t machdep);
138 * 149 *
139 * Software interrupt registration. 150 * Software interrupt registration.
140 */ 151 */
141NENTRY(softint_trigger) 152NENTRY(softint_trigger)
142 orl %edi,CPUVAR(IPENDING) /* atomic on local cpu */ 153 orl %edi,CPUVAR(IPENDING) /* atomic on local cpu */
143 ret 154 ret
144END(softint_trigger) 155END(softint_trigger)
145 156
146 157
147/* 158/*
148 * Xpreemptrecurse() 159 * Xpreemptrecurse()
149 * 160 *
150 * Handles preemption interrupts via Xspllower(). 161 * Handles preemption interrupts via Xspllower().
151 */ 162 */
152IDTVEC(preemptrecurse) 163IDTVEC(preemptrecurse)
153 movl $IPL_PREEMPT, CPUVAR(ILEVEL) 164 movl $IPL_PREEMPT,CPUVAR(ILEVEL)
154 sti 165 sti
155 xorq %rdi, %rdi 166 xorq %rdi,%rdi
156 call _C_LABEL(kpreempt) 167 call _C_LABEL(kpreempt)
157 cli 168 cli
158 jmp *%r13 /* back to Xspllower */ 169 jmp *%r13 /* back to Xspllower */
159END(Xpreemptrecurse) 170END(Xpreemptrecurse)
160 171
161/* 172/*
162 * Xpreemptresume() 173 * Xpreemptresume()
163 * 174 *
164 * Handles preemption interrupts via Xdoreti(). 175 * Handles preemption interrupts via Xdoreti().
165 */ 176 */
166IDTVEC(preemptresume) 177IDTVEC(preemptresume)
167 movl $IPL_PREEMPT, CPUVAR(ILEVEL) 178 movl $IPL_PREEMPT,CPUVAR(ILEVEL)
168 sti 179 sti
169 testq $SEL_RPL, TF_CS(%rsp) 180 testq $SEL_RPL,TF_CS(%rsp)
170 jnz 1f 181 jnz 1f
171 movq TF_RIP(%rsp), %rdi 182 movq TF_RIP(%rsp),%rdi
172 call _C_LABEL(kpreempt) # from kernel 183 call _C_LABEL(kpreempt) /* from kernel */
173 cli 184 cli
174 jmp *%r13 /* back to Xdoreti */ 185 jmp *%r13 /* back to Xdoreti */
1751: 1861:
176 call _C_LABEL(preempt) # from user 187 call _C_LABEL(preempt) /* from user */
177 cli 188 cli
178 jmp *%r13 /* back to Xdoreti */ 189 jmp *%r13 /* back to Xdoreti */
179END(Xpreemptresume) 190END(Xpreemptresume)
180 191
181/* 192/*
182 * int splraise(int s); 193 * int splraise(int s);
183 */ 194 */
184ENTRY(splraise) 195ENTRY(splraise)
185 movl CPUVAR(ILEVEL),%eax 196 movl CPUVAR(ILEVEL),%eax
186 cmpl %edi,%eax 197 cmpl %edi,%eax
187 cmoval %eax,%edi 198 cmoval %eax,%edi
188 movl %edi,CPUVAR(ILEVEL) 199 movl %edi,CPUVAR(ILEVEL)
189 ret 200 ret
190END(splraise) 201END(splraise)
191 202
192/* 203/*
193 * void spllower(int s); 204 * void spllower(int s);
194 * 205 *
195 * Must be the same size as cx8_spllower(). This must use 206 * Must be the same size as cx8_spllower(). This must use
196 * pushf/cli/popf as it is used early in boot where interrupts 207 * pushf/cli/popf as it is used early in boot where interrupts
197 * are disabled via eflags/IE. 208 * are disabled via eflags/IE.
198 */ 209 */
199ENTRY(spllower) 210ENTRY(spllower)
200 cmpl CPUVAR(ILEVEL), %edi 211 cmpl CPUVAR(ILEVEL),%edi
201 jae 1f 212 jae 1f
202 movl CPUVAR(IUNMASK)(,%rdi,4), %edx 213 movl CPUVAR(IUNMASK)(,%rdi,4),%edx
203 pushf 214 pushf
204 cli 215 cli
205 testl CPUVAR(IPENDING), %edx 216 testl CPUVAR(IPENDING),%edx
206 jnz 2f 217 jnz 2f
207 movl %edi, CPUVAR(ILEVEL) 218 movl %edi,CPUVAR(ILEVEL)
208 popf 219 popf
2091: 2201:
210 ret 221 ret
211 ret 222 ret
2122: 2232:
213 popf 224 popf
214 jmp _C_LABEL(Xspllower) 225 jmp _C_LABEL(Xspllower)
2153: 2263:
216 .space 16 227 .space 16
217 .align 16 228 .align 16
218END(spllower) 229END(spllower)
219LABEL(spllower_end) 230LABEL(spllower_end)
220#endif /* !XEN */ 231#endif /* !XEN */
221 232
222/* 233/*
223 * void cx8_spllower(int s); 234 * void cx8_spllower(int s);
224 * 235 *
225 * For cmpxchg8b, edx/ecx are the high words and eax/ebx the low. 236 * For cmpxchg8b, edx/ecx are the high words and eax/ebx the low.
226 * 237 *
227 * edx : eax = old level / old ipending  238 * edx : eax = old level / old ipending
228 * ecx : ebx = new level / old ipending 239 * ecx : ebx = new level / old ipending
229 */ 240 */
230ENTRY(cx8_spllower) 241ENTRY(cx8_spllower)
231 movl CPUVAR(ILEVEL),%edx 242 movl CPUVAR(ILEVEL),%edx
232 movq %rbx,%r8 243 movq %rbx,%r8
233 cmpl %edx,%edi /* new level is lower? */ 244 cmpl %edx,%edi /* new level is lower? */
234 jae 1f 245 jae 1f
2350: 2460:
236 movl CPUVAR(IPENDING),%eax 247 movl CPUVAR(IPENDING),%eax
237 movl %edi,%ecx 248 movl %edi,%ecx
238 testl %eax,CPUVAR(IUNMASK)(,%rcx,4)/* deferred interrupts? */ 249 testl %eax,CPUVAR(IUNMASK)(,%rcx,4)/* deferred interrupts? */
239 movl %eax,%ebx 250 movl %eax,%ebx
240 /* 251 /*
@@ -250,89 +261,89 @@ ENTRY(cx8_spllower) @@ -250,89 +261,89 @@ ENTRY(cx8_spllower)
2502: 2612:
251 movq %r8,%rbx 262 movq %r8,%rbx
252 .type _C_LABEL(cx8_spllower_patch), @function 263 .type _C_LABEL(cx8_spllower_patch), @function
253LABEL(cx8_spllower_patch) 264LABEL(cx8_spllower_patch)
254 jmp _C_LABEL(Xspllower) 265 jmp _C_LABEL(Xspllower)
255 266
256 .align 16 267 .align 16
257END(cx8_spllower_patch) 268END(cx8_spllower_patch)
258END(cx8_spllower) 269END(cx8_spllower)
259LABEL(cx8_spllower_end) 270LABEL(cx8_spllower_end)
260 271
261/* 272/*
262 * void Xspllower(int s); 273 * void Xspllower(int s);
263 *  274 *
264 * Process pending interrupts. 275 * Process pending interrupts.
265 * 276 *
266 * Important registers: 277 * Important registers:
267 * ebx - cpl 278 * ebx - cpl
268 * r13 - address to resume loop at 279 * r13 - address to resume loop at
269 * 280 *
270 * It is important that the bit scan instruction is bsr, it will get 281 * It is important that the bit scan instruction is bsr, it will get
271 * the highest 2 bits (currently the IPI and clock handlers) first, 282 * the highest 2 bits (currently the IPI and clock handlers) first,
272 * to avoid deadlocks where one CPU sends an IPI, another one is at 283 * to avoid deadlocks where one CPU sends an IPI, another one is at
273 * splhigh() and defers it, lands in here via splx(), and handles 284 * splhigh() and defers it, lands in here via splx(), and handles
274 * a lower-prio one first, which needs to take the kernel lock --> 285 * a lower-prio one first, which needs to take the kernel lock -->
275 * the sending CPU will never see the that CPU accept the IPI 286 * the sending CPU will never see the that CPU accept the IPI
276 * (see pmap_tlb_shootnow). 287 * (see pmap_tlb_shootnow).
277 */ 288 */
278 nop 289 nop
279 .align 4 /* Avoid confusion with cx8_spllower_end */ 290 .align 4 /* Avoid confusion with cx8_spllower_end */
280 291
281IDTVEC(spllower) 292IDTVEC(spllower)
282 pushq %rbx 293 pushq %rbx
283 pushq %r13 294 pushq %r13
284 pushq %r12 295 pushq %r12
285 movl %edi,%ebx 296 movl %edi,%ebx
286 leaq 1f(%rip),%r13 # address to resume loop at 297 leaq 1f(%rip),%r13 /* address to resume loop at */
2871: movl %ebx,%eax # get cpl 2981: movl %ebx,%eax /* get cpl */
288 movl CPUVAR(IUNMASK)(,%rax,4),%eax 299 movl CPUVAR(IUNMASK)(,%rax,4),%eax
289 CLI(si) 300 CLI(si)
290 andl CPUVAR(IPENDING),%eax # any non-masked bits left? 301 andl CPUVAR(IPENDING),%eax /* any non-masked bits left? */
291 jz 2f 302 jz 2f
292 bsrl %eax,%eax 303 bsrl %eax,%eax
293 btrl %eax,CPUVAR(IPENDING) 304 btrl %eax,CPUVAR(IPENDING)
294 movq CPUVAR(ISOURCES)(,%rax,8),%rax 305 movq CPUVAR(ISOURCES)(,%rax,8),%rax
295 jmp *IS_RECURSE(%rax) 306 jmp *IS_RECURSE(%rax)
2962: 3072:
297 movl %ebx,CPUVAR(ILEVEL) 308 movl %ebx,CPUVAR(ILEVEL)
298 STI(si) 309 STI(si)
299 popq %r12 310 popq %r12
300 popq %r13 311 popq %r13
301 popq %rbx 312 popq %rbx
302 ret 313 ret
303END(Xspllower) 314END(Xspllower)
304 315
305/* 316/*
306 * void Xdoreti(void); 317 * void Xdoreti(void);
307 *  318 *
308 * Handle return from interrupt after device handler finishes. 319 * Handle return from interrupt after device handler finishes.
309 * 320 *
310 * Important registers: 321 * Important registers:
311 * ebx - cpl to restore 322 * ebx - cpl to restore
312 * r13 - address to resume loop at 323 * r13 - address to resume loop at
313 */ 324 */
314IDTVEC(doreti) 325IDTVEC(doreti)
315 popq %rbx # get previous priority 326 popq %rbx /* get previous priority */
316 decl CPUVAR(IDEPTH) 327 decl CPUVAR(IDEPTH)
317 leaq 1f(%rip),%r13 328 leaq 1f(%rip),%r13
3181: movl %ebx,%eax 3291: movl %ebx,%eax
319 movl CPUVAR(IUNMASK)(,%rax,4),%eax 330 movl CPUVAR(IUNMASK)(,%rax,4),%eax
320 CLI(si) 331 CLI(si)
321 andl CPUVAR(IPENDING),%eax 332 andl CPUVAR(IPENDING),%eax
322 jz 2f 333 jz 2f
323 bsrl %eax,%eax # slow, but not worth optimizing 334 bsrl %eax,%eax /* slow, but not worth optimizing */
324 btrl %eax,CPUVAR(IPENDING) 335 btrl %eax,CPUVAR(IPENDING)
325 movq CPUVAR(ISOURCES)(,%rax, 8),%rax 336 movq CPUVAR(ISOURCES)(,%rax,8),%rax
326 jmp *IS_RESUME(%rax) 337 jmp *IS_RESUME(%rax)
3272: /* Check for ASTs on exit to user mode. */ 3382: /* Check for ASTs on exit to user mode. */
328 movl %ebx,CPUVAR(ILEVEL) 339 movl %ebx,CPUVAR(ILEVEL)
3295: 3405:
330 testb $SEL_RPL,TF_CS(%rsp) 341 testb $SEL_RPL,TF_CS(%rsp)
331 jz 6f 342 jz 6f
332 343
333 .type _C_LABEL(doreti_checkast), @function 344 .type _C_LABEL(doreti_checkast), @function
334LABEL(doreti_checkast) 345LABEL(doreti_checkast)
335 movq CPUVAR(CURLWP),%r14 346 movq CPUVAR(CURLWP),%r14
336 CHECK_ASTPENDING(%r14) 347 CHECK_ASTPENDING(%r14)
337 je 3f 348 je 3f
338 CLEAR_ASTPENDING(%r14) 349 CLEAR_ASTPENDING(%r14)