Sat Dec 7 10:19:35 2019 UTC ()
Panic instead of printf, same as syscall.


(maxv)
diff -r1.50 -r1.51 src/sys/arch/amd64/amd64/amd64_trap.S

cvs diff -r1.50 -r1.51 src/sys/arch/amd64/amd64/amd64_trap.S (switch to unified diff)

--- src/sys/arch/amd64/amd64/amd64_trap.S 2019/11/14 16:23:52 1.50
+++ src/sys/arch/amd64/amd64/amd64_trap.S 2019/12/07 10:19:35 1.51
@@ -1,743 +1,738 @@ @@ -1,743 +1,738 @@
1/* $NetBSD: amd64_trap.S,v 1.50 2019/11/14 16:23:52 maxv Exp $ */ 1/* $NetBSD: amd64_trap.S,v 1.51 2019/12/07 10:19:35 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1998, 2007, 2008, 2017 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 2007, 2008, 2017 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum, by Andrew Doran and by Maxime Villard. 8 * by Charles M. Hannum, by Andrew Doran and by Maxime Villard.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Copyright (c) 2001 Wasabi Systems, Inc. 33 * Copyright (c) 2001 Wasabi Systems, Inc.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Written by Frank van der Linden for Wasabi Systems, Inc. 36 * Written by Frank van der Linden for Wasabi Systems, Inc.
37 * 37 *
38 * Redistribution and use in source and binary forms, with or without 38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions 39 * modification, are permitted provided that the following conditions
40 * are met: 40 * are met:
41 * 1. Redistributions of source code must retain the above copyright 41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer. 42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright 43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the 44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution. 45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software 46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement: 47 * must display the following acknowledgement:
48 * This product includes software developed for the NetBSD Project by 48 * This product includes software developed for the NetBSD Project by
49 * Wasabi Systems, Inc. 49 * Wasabi Systems, Inc.
50 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 50 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
51 * or promote products derived from this software without specific prior 51 * or promote products derived from this software without specific prior
52 * written permission. 52 * written permission.
53 * 53 *
54 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 54 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 57 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64 * POSSIBILITY OF SUCH DAMAGE. 64 * POSSIBILITY OF SUCH DAMAGE.
65 */ 65 */
66 66
67#include <machine/asm.h> 67#include <machine/asm.h>
68 68
69#include "opt_xen.h" 69#include "opt_xen.h"
70#include "opt_dtrace.h" 70#include "opt_dtrace.h"
71 71
72#define ALIGN_TEXT .align 16,0x90 72#define ALIGN_TEXT .align 16,0x90
73 73
74#include <machine/frameasm.h> 74#include <machine/frameasm.h>
75#include <machine/segments.h> 75#include <machine/segments.h>
76#include <machine/trap.h> 76#include <machine/trap.h>
77#include <machine/specialreg.h> 77#include <machine/specialreg.h>
78 78
79#include "assym.h" 79#include "assym.h"
80 80
81/* 81/*
82 * Trap and fault vector routines 82 * Trap and fault vector routines
83 * 83 *
84 * On exit from the kernel to user mode, we always need to check for ASTs. In 84 * On exit from the kernel to user mode, we always need to check for ASTs. In
85 * addition, we need to do this atomically; otherwise an interrupt may occur 85 * addition, we need to do this atomically; otherwise an interrupt may occur
86 * which causes an AST, but it won't get processed until the next kernel entry 86 * which causes an AST, but it won't get processed until the next kernel entry
87 * (possibly the next clock tick). Thus, we disable interrupt before checking, 87 * (possibly the next clock tick). Thus, we disable interrupt before checking,
88 * and only enable them again on the final `iret' or before calling the AST 88 * and only enable them again on the final `iret' or before calling the AST
89 * handler. 89 * handler.
90 */ 90 */
91 91
92#ifdef XENPV 92#ifdef XENPV
93#define PRE_TRAP movq (%rsp),%rcx ; movq 8(%rsp),%r11 ; addq $0x10,%rsp 93#define PRE_TRAP movq (%rsp),%rcx ; movq 8(%rsp),%r11 ; addq $0x10,%rsp
94#else 94#else
95#define PRE_TRAP 95#define PRE_TRAP
96#endif 96#endif
97 97
98#define TRAPENTRY \ 98#define TRAPENTRY \
99 INTRENTRY ; \ 99 INTRENTRY ; \
100 jmp .Lalltraps_noentry 100 jmp .Lalltraps_noentry
101 101
102#define TRAP_NJ(a) PRE_TRAP ; pushq $(a) 102#define TRAP_NJ(a) PRE_TRAP ; pushq $(a)
103#define ZTRAP_NJ(a) PRE_TRAP ; pushq $0 ; pushq $(a) 103#define ZTRAP_NJ(a) PRE_TRAP ; pushq $0 ; pushq $(a)
104#define TRAP(a) TRAP_NJ(a) ; TRAPENTRY 104#define TRAP(a) TRAP_NJ(a) ; TRAPENTRY
105#define ZTRAP(a) ZTRAP_NJ(a) ; TRAPENTRY 105#define ZTRAP(a) ZTRAP_NJ(a) ; TRAPENTRY
106 106
107 .text 107 .text
108 108
109/* 109/*
110 * ASM macro, used to leave the IST3 stack and to put ourselves on a non-IST 110 * ASM macro, used to leave the IST3 stack and to put ourselves on a non-IST
111 * stack. Only RDX, RCX and RAX are allowed to be used. 111 * stack. Only RDX, RCX and RAX are allowed to be used.
112 * 112 *
113 * +------------------------------+ 113 * +------------------------------+
114 * The iret frame we copy is: | rip | cs | rflags | rsp | ss | 114 * The iret frame we copy is: | rip | cs | rflags | rsp | ss |
115 * +------------------------------+ 115 * +------------------------------+
116 */ 116 */
117.macro IST3_LEAVE is_user 117.macro IST3_LEAVE is_user
118 .if \is_user 118 .if \is_user
119 movq CPUVAR(CURLWP),%rax 119 movq CPUVAR(CURLWP),%rax
120 movq L_PCB(%rax),%rax 120 movq L_PCB(%rax),%rax
121 movq PCB_RSP0(%rax),%rax 121 movq PCB_RSP0(%rax),%rax
122 .else 122 .else
123 movq TF_RSP(%rsp),%rax 123 movq TF_RSP(%rsp),%rax
124 andq $(~0xF),%rax 124 andq $(~0xF),%rax
125 .endif 125 .endif
126 126
127 subq $(5*8),%rax 127 subq $(5*8),%rax
128 movq %rax,CPUVAR(SCRATCH) 128 movq %rax,CPUVAR(SCRATCH)
129 129
130 /* Copy the iret frame. */ 130 /* Copy the iret frame. */
131 movq TF_SS(%rsp),%rcx 131 movq TF_SS(%rsp),%rcx
132 movq %rcx,(4*8)(%rax) 132 movq %rcx,(4*8)(%rax)
133 movq TF_RSP(%rsp),%rcx 133 movq TF_RSP(%rsp),%rcx
134 movq %rcx,(3*8)(%rax) 134 movq %rcx,(3*8)(%rax)
135 movq TF_RFLAGS(%rsp),%rcx 135 movq TF_RFLAGS(%rsp),%rcx
136 movq %rcx,(2*8)(%rax) 136 movq %rcx,(2*8)(%rax)
137 movq TF_CS(%rsp),%rcx 137 movq TF_CS(%rsp),%rcx
138 movq %rcx,(1*8)(%rax) 138 movq %rcx,(1*8)(%rax)
139 movq TF_RIP(%rsp),%rcx 139 movq TF_RIP(%rsp),%rcx
140 movq %rcx,(0*8)(%rax) 140 movq %rcx,(0*8)(%rax)
141 141
142 /* Restore. */ 142 /* Restore. */
143 movq TF_RDX(%rsp),%rdx 143 movq TF_RDX(%rsp),%rdx
144 movq TF_RCX(%rsp),%rcx 144 movq TF_RCX(%rsp),%rcx
145 movq TF_RAX(%rsp),%rax 145 movq TF_RAX(%rsp),%rax
146 146
147 /* Zero out the stack we used, RDX+RCX+RAX+IRET. */ 147 /* Zero out the stack we used, RDX+RCX+RAX+IRET. */
148 movq $0,TF_RDX(%rsp) 148 movq $0,TF_RDX(%rsp)
149 movq $0,TF_RCX(%rsp) 149 movq $0,TF_RCX(%rsp)
150 movq $0,TF_RAX(%rsp) 150 movq $0,TF_RAX(%rsp)
151 movq $0,TF_RIP(%rsp) 151 movq $0,TF_RIP(%rsp)
152 movq $0,TF_CS(%rsp) 152 movq $0,TF_CS(%rsp)
153 movq $0,TF_RFLAGS(%rsp) 153 movq $0,TF_RFLAGS(%rsp)
154 movq $0,TF_RSP(%rsp) 154 movq $0,TF_RSP(%rsp)
155 movq $0,TF_SS(%rsp) 155 movq $0,TF_SS(%rsp)
156 156
157 movq CPUVAR(SCRATCH),%rsp 157 movq CPUVAR(SCRATCH),%rsp
158.endm 158.endm
159 159
160 TEXT_USER_BEGIN 160 TEXT_USER_BEGIN
161 161
162IDTVEC(trap00) 162IDTVEC(trap00)
163 ZTRAP(T_DIVIDE) 163 ZTRAP(T_DIVIDE)
164IDTVEC_END(trap00) 164IDTVEC_END(trap00)
165 165
166/* 166/*
167 * Handle the SS shadow, CVE-2018-8897. 167 * Handle the SS shadow, CVE-2018-8897.
168 * 168 *
169 * We are running on the IST3 stack. If we are under an SS shadow, ignore 169 * We are running on the IST3 stack. If we are under an SS shadow, ignore
170 * the exception and return immediately. Otherwise, copy the iret frame 170 * the exception and return immediately. Otherwise, copy the iret frame
171 * onto the non-IST stack, and ZTRAP on it as usual. 171 * onto the non-IST stack, and ZTRAP on it as usual.
172 * 172 *
173 * IST3 is used temporarily, and is mapped in userland by SVS. It contains 173 * IST3 is used temporarily, and is mapped in userland by SVS. It contains
174 * a few secrets, the values of the CPU context. These secrets are zeroed 174 * a few secrets, the values of the CPU context. These secrets are zeroed
175 * out when we leave. 175 * out when we leave.
176 * 176 *
177 * When we ignore an SS shadow, we can't zero out the iret frame. It is 177 * When we ignore an SS shadow, we can't zero out the iret frame. It is
178 * not a problem, because in this particular case, the frame is known not 178 * not a problem, because in this particular case, the frame is known not
179 * to contain secrets. 179 * to contain secrets.
180 */ 180 */
181IDTVEC(trap01) 181IDTVEC(trap01)
182#ifndef XENPV 182#ifndef XENPV
183 subq $(TF_REGSIZE+16),%rsp 183 subq $(TF_REGSIZE+16),%rsp
184 184
185 /* We clobber only RDX, RCX and RAX. */ 185 /* We clobber only RDX, RCX and RAX. */
186 movq %rdx,TF_RDX(%rsp) 186 movq %rdx,TF_RDX(%rsp)
187 movq %rcx,TF_RCX(%rsp) 187 movq %rcx,TF_RCX(%rsp)
188 movq %rax,TF_RAX(%rsp) 188 movq %rax,TF_RAX(%rsp)
189 189
190 testb $SEL_UPL,TF_CS(%rsp) 190 testb $SEL_UPL,TF_CS(%rsp)
191 jnz .Luser_dbentry 191 jnz .Luser_dbentry
192 192
193 movl $MSR_GSBASE,%ecx 193 movl $MSR_GSBASE,%ecx
194 rdmsr 194 rdmsr
195 cmpl $VM_SPACE_SEP_HIGH32,%edx 195 cmpl $VM_SPACE_SEP_HIGH32,%edx
196 jae .Lkern_dbentry 196 jae .Lkern_dbentry
197 197
198 /* SS shadow, ignore the exception. */ 198 /* SS shadow, ignore the exception. */
199 xorq %rax,%rax 199 xorq %rax,%rax
200 movq %rax,%dr6 200 movq %rax,%dr6
201 201
202 /* Restore and zero out. */ 202 /* Restore and zero out. */
203 movq TF_RDX(%rsp),%rdx 203 movq TF_RDX(%rsp),%rdx
204 movq TF_RCX(%rsp),%rcx 204 movq TF_RCX(%rsp),%rcx
205 movq TF_RAX(%rsp),%rax 205 movq TF_RAX(%rsp),%rax
206 movq $0,TF_RDX(%rsp) 206 movq $0,TF_RDX(%rsp)
207 movq $0,TF_RCX(%rsp) 207 movq $0,TF_RCX(%rsp)
208 movq $0,TF_RAX(%rsp) 208 movq $0,TF_RAX(%rsp)
209 209
210 addq $(TF_REGSIZE+16),%rsp 210 addq $(TF_REGSIZE+16),%rsp
211 iretq 211 iretq
212 212
213.Lkern_dbentry: 213.Lkern_dbentry:
214 IST3_LEAVE 0 214 IST3_LEAVE 0
215 ZTRAP(T_TRCTRAP) 215 ZTRAP(T_TRCTRAP)
216 216
217.Luser_dbentry: 217.Luser_dbentry:
218 swapgs 218 swapgs
219 SVS_ENTER_ALTSTACK 219 SVS_ENTER_ALTSTACK
220 IST3_LEAVE 1 220 IST3_LEAVE 1
221 ZTRAP_NJ(T_TRCTRAP) 221 ZTRAP_NJ(T_TRCTRAP)
222 subq $TF_REGSIZE,%rsp 222 subq $TF_REGSIZE,%rsp
223 INTR_SAVE_GPRS 223 INTR_SAVE_GPRS
224 cld 224 cld
225 SMAP_ENABLE 225 SMAP_ENABLE
226 IBRS_ENTER 226 IBRS_ENTER
227 KMSAN_ENTER 227 KMSAN_ENTER
228 movw %gs,TF_GS(%rsp) 228 movw %gs,TF_GS(%rsp)
229 movw %fs,TF_FS(%rsp) 229 movw %fs,TF_FS(%rsp)
230 movw %es,TF_ES(%rsp) 230 movw %es,TF_ES(%rsp)
231 movw %ds,TF_DS(%rsp) 231 movw %ds,TF_DS(%rsp)
232 232
233 jmp .Lalltraps_noentry 233 jmp .Lalltraps_noentry
234#else 234#else
235 ZTRAP(T_TRCTRAP) 235 ZTRAP(T_TRCTRAP)
236#endif 236#endif
237IDTVEC_END(trap01) 237IDTVEC_END(trap01)
238 238
239/* 239/*
240 * Non Maskable Interrupts are a special case: they can be triggered even 240 * Non Maskable Interrupts are a special case: they can be triggered even
241 * with interrupts disabled, and once triggered they block further NMIs 241 * with interrupts disabled, and once triggered they block further NMIs
242 * until an 'iret' instruction is executed. 242 * until an 'iret' instruction is executed.
243 * 243 *
244 * Therefore we don't enable interrupts, because the CPU could switch to 244 * Therefore we don't enable interrupts, because the CPU could switch to
245 * another LWP, call 'iret' and unintentionally leave the NMI mode. 245 * another LWP, call 'iret' and unintentionally leave the NMI mode.
246 * 246 *
247 * We need to be careful about %gs too, because it is possible that we were 247 * We need to be careful about %gs too, because it is possible that we were
248 * running in kernel mode with a userland %gs. 248 * running in kernel mode with a userland %gs.
249 */ 249 */
250IDTVEC(trap02) 250IDTVEC(trap02)
251#if defined(XENPV) 251#if defined(XENPV)
252 ZTRAP(T_NMI) 252 ZTRAP(T_NMI)
253#else 253#else
254 ZTRAP_NJ(T_NMI) 254 ZTRAP_NJ(T_NMI)
255 subq $TF_REGSIZE,%rsp 255 subq $TF_REGSIZE,%rsp
256 INTR_SAVE_GPRS 256 INTR_SAVE_GPRS
257 257
258 testb $SEL_UPL,TF_CS(%rsp) 258 testb $SEL_UPL,TF_CS(%rsp)
259 jz 1f 259 jz 1f
260 IBRS_ENTER 260 IBRS_ENTER
2611: 2611:
262 262
263 cld 263 cld
264 SMAP_ENABLE 264 SMAP_ENABLE
265 movw %gs,TF_GS(%rsp) 265 movw %gs,TF_GS(%rsp)
266 movw %fs,TF_FS(%rsp) 266 movw %fs,TF_FS(%rsp)
267 movw %es,TF_ES(%rsp) 267 movw %es,TF_ES(%rsp)
268 movw %ds,TF_DS(%rsp) 268 movw %ds,TF_DS(%rsp)
269 269
270 SVS_ENTER_NMI 270 SVS_ENTER_NMI
271 KMSAN_ENTER 271 KMSAN_ENTER
272 272
273 movl $MSR_GSBASE,%ecx 273 movl $MSR_GSBASE,%ecx
274 rdmsr 274 rdmsr
275 cmpl $VM_SPACE_SEP_HIGH32,%edx 275 cmpl $VM_SPACE_SEP_HIGH32,%edx
276 jae .Lnoswapgs 276 jae .Lnoswapgs
277 277
278 swapgs 278 swapgs
279 movq %rsp,%rdi 279 movq %rsp,%rdi
280 incq CPUVAR(NTRAP) 280 incq CPUVAR(NTRAP)
281 call _C_LABEL(nmitrap) 281 call _C_LABEL(nmitrap)
282 swapgs 282 swapgs
283 jmp .Lnmileave 283 jmp .Lnmileave
284 284
285.Lnoswapgs: 285.Lnoswapgs:
286 movq %rsp,%rdi 286 movq %rsp,%rdi
287 incq CPUVAR(NTRAP) 287 incq CPUVAR(NTRAP)
288 call _C_LABEL(nmitrap) 288 call _C_LABEL(nmitrap)
289 289
290.Lnmileave: 290.Lnmileave:
291 testb $SEL_UPL,TF_CS(%rsp) 291 testb $SEL_UPL,TF_CS(%rsp)
292 jz 1f 292 jz 1f
293 MDS_LEAVE 293 MDS_LEAVE
294 IBRS_LEAVE 294 IBRS_LEAVE
2951: 2951:
296 296
297 KMSAN_LEAVE 297 KMSAN_LEAVE
298 SVS_LEAVE_NMI 298 SVS_LEAVE_NMI
299 INTR_RESTORE_GPRS 299 INTR_RESTORE_GPRS
300 addq $TF_REGSIZE+16,%rsp 300 addq $TF_REGSIZE+16,%rsp
301 iretq 301 iretq
302#endif 302#endif
303IDTVEC_END(trap02) 303IDTVEC_END(trap02)
304 304
305IDTVEC(trap03) 305IDTVEC(trap03)
306#ifndef KDTRACE_HOOKS 306#ifndef KDTRACE_HOOKS
307 ZTRAP(T_BPTFLT) 307 ZTRAP(T_BPTFLT)
308#else 308#else
309 ZTRAP_NJ(T_BPTFLT) 309 ZTRAP_NJ(T_BPTFLT)
310 INTRENTRY 310 INTRENTRY
311 STI(si) 311 STI(si)
312 /* 312 /*
313 * DTrace Function Boundary Trace (fbt) probes are triggered 313 * DTrace Function Boundary Trace (fbt) probes are triggered
314 * by int3 (0xcc). 314 * by int3 (0xcc).
315 */ 315 */
316 /* Check if there is no DTrace hook registered. */ 316 /* Check if there is no DTrace hook registered. */
317 cmpq $0,dtrace_invop_jump_addr 317 cmpq $0,dtrace_invop_jump_addr
318 je calltrap 318 je calltrap
319 319
320 /* 320 /*
321 * Set our jump address for the jump back in the event that 321 * Set our jump address for the jump back in the event that
322 * the exception wasn't caused by DTrace at all. 322 * the exception wasn't caused by DTrace at all.
323 */ 323 */
324 /* XXX: This doesn't look right for SMP - unless it is a 324 /* XXX: This doesn't look right for SMP - unless it is a
325 * constant - so why set it everytime. (dsl) */ 325 * constant - so why set it everytime. (dsl) */
326 movq $calltrap, dtrace_invop_calltrap_addr(%rip) 326 movq $calltrap, dtrace_invop_calltrap_addr(%rip)
327 327
328 /* Jump to the code hooked in by DTrace. */ 328 /* Jump to the code hooked in by DTrace. */
329 movq dtrace_invop_jump_addr, %rax 329 movq dtrace_invop_jump_addr, %rax
330 jmpq *dtrace_invop_jump_addr 330 jmpq *dtrace_invop_jump_addr
331#endif 331#endif
332IDTVEC_END(trap03) 332IDTVEC_END(trap03)
333 333
334IDTVEC(trap04) 334IDTVEC(trap04)
335 ZTRAP(T_OFLOW) 335 ZTRAP(T_OFLOW)
336IDTVEC_END(trap04) 336IDTVEC_END(trap04)
337 337
338IDTVEC(trap05) 338IDTVEC(trap05)
339 ZTRAP(T_BOUND) 339 ZTRAP(T_BOUND)
340IDTVEC_END(trap05) 340IDTVEC_END(trap05)
341 341
342IDTVEC(trap06) 342IDTVEC(trap06)
343 ZTRAP(T_PRIVINFLT) 343 ZTRAP(T_PRIVINFLT)
344IDTVEC_END(trap06) 344IDTVEC_END(trap06)
345 345
346IDTVEC(trap07) 346IDTVEC(trap07)
347 ZTRAP_NJ(T_DNA) 347 ZTRAP_NJ(T_DNA)
348 INTRENTRY 348 INTRENTRY
349#ifdef DIAGNOSTIC 349#ifdef DIAGNOSTIC
350 movl CPUVAR(ILEVEL),%ebx 350 movl CPUVAR(ILEVEL),%ebx
351#endif 351#endif
352 movq %rsp,%rdi 352 movq %rsp,%rdi
353 call _C_LABEL(fpudna) 353 call _C_LABEL(fpudna)
354 jmp .Lalltraps_checkusr 354 jmp .Lalltraps_checkusr
355IDTVEC_END(trap07) 355IDTVEC_END(trap07)
356 356
357/* 357/*
358 * Double faults execute on a particular stack, and we must not jump out 358 * Double faults execute on a particular stack, and we must not jump out
359 * of it. So don't enable interrupts. 359 * of it. So don't enable interrupts.
360 */ 360 */
361IDTVEC(trap08) 361IDTVEC(trap08)
362#if defined(XENPV) 362#if defined(XENPV)
363 TRAP(T_DOUBLEFLT) 363 TRAP(T_DOUBLEFLT)
364#else 364#else
365 TRAP_NJ(T_DOUBLEFLT) 365 TRAP_NJ(T_DOUBLEFLT)
366 subq $TF_REGSIZE,%rsp 366 subq $TF_REGSIZE,%rsp
367 INTR_SAVE_GPRS 367 INTR_SAVE_GPRS
368 368
369 testb $SEL_UPL,TF_CS(%rsp) 369 testb $SEL_UPL,TF_CS(%rsp)
370 jz 1f 370 jz 1f
371 IBRS_ENTER 371 IBRS_ENTER
372 swapgs 372 swapgs
3731: 3731:
374 374
375 SVS_ENTER_ALTSTACK 375 SVS_ENTER_ALTSTACK
376 376
377 cld 377 cld
378 SMAP_ENABLE 378 SMAP_ENABLE
379 movw %gs,TF_GS(%rsp) 379 movw %gs,TF_GS(%rsp)
380 movw %fs,TF_FS(%rsp) 380 movw %fs,TF_FS(%rsp)
381 movw %es,TF_ES(%rsp) 381 movw %es,TF_ES(%rsp)
382 movw %ds,TF_DS(%rsp) 382 movw %ds,TF_DS(%rsp)
383 383
384 movq %rsp,%rdi 384 movq %rsp,%rdi
385 incq CPUVAR(NTRAP) 385 incq CPUVAR(NTRAP)
386 call _C_LABEL(doubletrap) 386 call _C_LABEL(doubletrap)
387 387
388 testb $SEL_UPL,TF_CS(%rsp) 388 testb $SEL_UPL,TF_CS(%rsp)
389 jz 1f 389 jz 1f
390 MDS_LEAVE 390 MDS_LEAVE
391 SVS_LEAVE_ALTSTACK 391 SVS_LEAVE_ALTSTACK
392 IBRS_LEAVE 392 IBRS_LEAVE
393 swapgs 393 swapgs
3941: 3941:
395 395
396 INTR_RESTORE_GPRS 396 INTR_RESTORE_GPRS
397 addq $TF_REGSIZE+16,%rsp 397 addq $TF_REGSIZE+16,%rsp
398 iretq 398 iretq
399#endif 399#endif
400IDTVEC_END(trap08) 400IDTVEC_END(trap08)
401 401
402IDTVEC(trap09) 402IDTVEC(trap09)
403 ZTRAP(T_FPOPFLT) 403 ZTRAP(T_FPOPFLT)
404IDTVEC_END(trap09) 404IDTVEC_END(trap09)
405 405
406IDTVEC(trap10) 406IDTVEC(trap10)
407 TRAP(T_TSSFLT) 407 TRAP(T_TSSFLT)
408IDTVEC_END(trap10) 408IDTVEC_END(trap10)
409 409
410#ifdef XENPV 410#ifdef XENPV
411/* 411/*
412 * I don't believe XEN generates in-kernel traps for the 412 * I don't believe XEN generates in-kernel traps for the
413 * equivalent of iret, if it does this code would be needed 413 * equivalent of iret, if it does this code would be needed
414 * in order to copy the user segment registers into the fault frame. 414 * in order to copy the user segment registers into the fault frame.
415 */ 415 */
416#define kernuser_reenter alltraps 416#define kernuser_reenter alltraps
417#endif 417#endif
418 418
419IDTVEC(trap11) /* #NP() Segment not present */ 419IDTVEC(trap11) /* #NP() Segment not present */
420 TRAP_NJ(T_SEGNPFLT) 420 TRAP_NJ(T_SEGNPFLT)
421 jmp kernuser_reenter 421 jmp kernuser_reenter
422IDTVEC_END(trap11) 422IDTVEC_END(trap11)
423 423
424IDTVEC(trap12) /* #SS() Stack exception */ 424IDTVEC(trap12) /* #SS() Stack exception */
425 TRAP_NJ(T_STKFLT) 425 TRAP_NJ(T_STKFLT)
426 jmp kernuser_reenter 426 jmp kernuser_reenter
427IDTVEC_END(trap12) 427IDTVEC_END(trap12)
428 428
429IDTVEC(trap13) /* #GP() General protection */ 429IDTVEC(trap13) /* #GP() General protection */
430 TRAP_NJ(T_PROTFLT) 430 TRAP_NJ(T_PROTFLT)
431 jmp kernuser_reenter 431 jmp kernuser_reenter
432IDTVEC_END(trap13) 432IDTVEC_END(trap13)
433 433
434IDTVEC(trap14) 434IDTVEC(trap14)
435 TRAP(T_PAGEFLT) 435 TRAP(T_PAGEFLT)
436IDTVEC_END(trap14) 436IDTVEC_END(trap14)
437 437
438IDTVEC(trap15) 438IDTVEC(trap15)
439 ZTRAP_NJ(T_ASTFLT) 439 ZTRAP_NJ(T_ASTFLT)
440 INTRENTRY 440 INTRENTRY
441#ifdef DIAGNOSTIC 441#ifdef DIAGNOSTIC
442 movl CPUVAR(ILEVEL),%ebx 442 movl CPUVAR(ILEVEL),%ebx
443#endif 443#endif
444 jmp .Lalltraps_checkusr 444 jmp .Lalltraps_checkusr
445IDTVEC_END(trap15) 445IDTVEC_END(trap15)
446 446
447IDTVEC(trap16) 447IDTVEC(trap16)
448 ZTRAP_NJ(T_ARITHTRAP) 448 ZTRAP_NJ(T_ARITHTRAP)
449.Ldo_fputrap: 449.Ldo_fputrap:
450 INTRENTRY 450 INTRENTRY
451#ifdef DIAGNOSTIC 451#ifdef DIAGNOSTIC
452 movl CPUVAR(ILEVEL),%ebx 452 movl CPUVAR(ILEVEL),%ebx
453#endif 453#endif
454 movq %rsp,%rdi 454 movq %rsp,%rdi
455 call _C_LABEL(fputrap) 455 call _C_LABEL(fputrap)
456 jmp .Lalltraps_checkusr 456 jmp .Lalltraps_checkusr
457IDTVEC_END(trap16) 457IDTVEC_END(trap16)
458 458
459IDTVEC(trap17) 459IDTVEC(trap17)
460 TRAP(T_ALIGNFLT) 460 TRAP(T_ALIGNFLT)
461IDTVEC_END(trap17) 461IDTVEC_END(trap17)
462 462
463IDTVEC(trap18) 463IDTVEC(trap18)
464 ZTRAP(T_MCA) 464 ZTRAP(T_MCA)
465IDTVEC_END(trap18) 465IDTVEC_END(trap18)
466 466
467IDTVEC(trap19) 467IDTVEC(trap19)
468 ZTRAP_NJ(T_XMM) 468 ZTRAP_NJ(T_XMM)
469 jmp .Ldo_fputrap 469 jmp .Ldo_fputrap
470IDTVEC_END(trap19) 470IDTVEC_END(trap19)
471 471
472IDTVEC(trap20) 472IDTVEC(trap20)
473IDTVEC(trap21) 473IDTVEC(trap21)
474IDTVEC(trap22) 474IDTVEC(trap22)
475IDTVEC(trap23) 475IDTVEC(trap23)
476IDTVEC(trap24) 476IDTVEC(trap24)
477IDTVEC(trap25) 477IDTVEC(trap25)
478IDTVEC(trap26) 478IDTVEC(trap26)
479IDTVEC(trap27) 479IDTVEC(trap27)
480IDTVEC(trap28) 480IDTVEC(trap28)
481IDTVEC(trap29) 481IDTVEC(trap29)
482IDTVEC(trap30) 482IDTVEC(trap30)
483IDTVEC(trap31) 483IDTVEC(trap31)
484 /* 20 - 31 reserved for future exp */ 484 /* 20 - 31 reserved for future exp */
485 ZTRAP(T_RESERVED) 485 ZTRAP(T_RESERVED)
486IDTVEC_END(trap20) 486IDTVEC_END(trap20)
487IDTVEC_END(trap21) 487IDTVEC_END(trap21)
488IDTVEC_END(trap22) 488IDTVEC_END(trap22)
489IDTVEC_END(trap23) 489IDTVEC_END(trap23)
490IDTVEC_END(trap24) 490IDTVEC_END(trap24)
491IDTVEC_END(trap25) 491IDTVEC_END(trap25)
492IDTVEC_END(trap26) 492IDTVEC_END(trap26)
493IDTVEC_END(trap27) 493IDTVEC_END(trap27)
494IDTVEC_END(trap28) 494IDTVEC_END(trap28)
495IDTVEC_END(trap29) 495IDTVEC_END(trap29)
496IDTVEC_END(trap30) 496IDTVEC_END(trap30)
497IDTVEC_END(trap31) 497IDTVEC_END(trap31)
498 498
499IDTVEC(intrspurious) 499IDTVEC(intrspurious)
500 ZTRAP_NJ(T_ASTFLT) 500 ZTRAP_NJ(T_ASTFLT)
501 INTRENTRY 501 INTRENTRY
502#ifdef DIAGNOSTIC 502#ifdef DIAGNOSTIC
503 movl CPUVAR(ILEVEL),%ebx 503 movl CPUVAR(ILEVEL),%ebx
504#endif 504#endif
505 jmp .Lalltraps_checkusr 505 jmp .Lalltraps_checkusr
506IDTVEC_END(intrspurious) 506IDTVEC_END(intrspurious)
507 507
508#ifndef kernuser_reenter 508#ifndef kernuser_reenter
509/* 509/*
510 * We need to worry about traps in kernel mode while the kernel %gs isn't 510 * We need to worry about traps in kernel mode while the kernel %gs isn't
511 * loaded. When such traps happen, we have CPL=0 and %gs=userland, and we 511 * loaded. When such traps happen, we have CPL=0 and %gs=userland, and we
512 * must perform an additional swapgs to get %gs=kernel. 512 * must perform an additional swapgs to get %gs=kernel.
513 */ 513 */
514 514
515#define TF_SMALL(val, reg) (val - TF_REGSIZE)(reg) 515#define TF_SMALL(val, reg) (val - TF_REGSIZE)(reg)
516#define TF_SMALL_REGPUSHED(val, reg) (val - (TF_REGSIZE - 8))(reg) 516#define TF_SMALL_REGPUSHED(val, reg) (val - (TF_REGSIZE - 8))(reg)
517 517
518/* 518/*
519 * It is possible that we received a trap in kernel mode, but with the user 519 * It is possible that we received a trap in kernel mode, but with the user
520 * context loaded. There are five cases where this can happen: 520 * context loaded. There are five cases where this can happen:
521 * 521 *
522 * o Execution of IRETQ. 522 * o Execution of IRETQ.
523 * o Reload of ES. 523 * o Reload of ES.
524 * o Reload of DS. 524 * o Reload of DS.
525 * o Reload of FS. 525 * o Reload of FS.
526 * o Reload of GS. 526 * o Reload of GS.
527 * 527 *
528 * When this happens, the kernel is re-entered in kernel mode, but the 528 * When this happens, the kernel is re-entered in kernel mode, but the
529 * previous context is in kernel mode too. 529 * previous context is in kernel mode too.
530 * 530 *
531 * We have two iret frames in the stack. In the first one, we also pushed 531 * We have two iret frames in the stack. In the first one, we also pushed
532 * 'trapno' and 'err'. The 'rsp' field points to the outer iret frame: 532 * 'trapno' and 'err'. The 'rsp' field points to the outer iret frame:
533 * 533 *
534 * +---------------------------------------------------+ 534 * +---------------------------------------------------+
535 * | trapno | err | rip | cs=ring0 | rflags | rsp | ss | 535 * | trapno | err | rip | cs=ring0 | rflags | rsp | ss |
536 * +-------------------------------------------|-------+ 536 * +-------------------------------------------|-------+
537 * | 537 * |
538 * +---------------------------------+ 538 * +---------------------------------+
539 * | 539 * |
540 * | +------------------------------------+ 540 * | +------------------------------------+
541 * +--> | rip | cs=ring3 | rflags | rsp | ss | 541 * +--> | rip | cs=ring3 | rflags | rsp | ss |
542 * +------------------------------------+ 542 * +------------------------------------+
543 * 543 *
544 * We perform a three-step procedure: 544 * We perform a three-step procedure:
545 * 545 *
546 * o We update RSP to point to the outer frame. This outer frame is in the 546 * o We update RSP to point to the outer frame. This outer frame is in the
547 * same stack as the current frame, and likely just after the current 547 * same stack as the current frame, and likely just after the current
548 * frame. 548 * frame.
549 * 549 *
550 * o We push, in this outer frame, the 'err' and 'trapno' fields of the 550 * o We push, in this outer frame, the 'err' and 'trapno' fields of the
551 * CURRENT frame. 551 * CURRENT frame.
552 * 552 *
553 * o We do a normal INTRENTRY. Now that RSP points to the outer frame, 553 * o We do a normal INTRENTRY. Now that RSP points to the outer frame,
554 * everything behaves as if we had received a trap from the outer frame, 554 * everything behaves as if we had received a trap from the outer frame,
555 * that is to say, from userland directly. 555 * that is to say, from userland directly.
556 * 556 *
557 * Finally, we jump to 'calltrap' and handle the trap smoothly. 557 * Finally, we jump to 'calltrap' and handle the trap smoothly.
558 * 558 *
559 * Two notes regarding SVS: 559 * Two notes regarding SVS:
560 * 560 *
561 * o With SVS, we will receive the trap while the user page tables are 561 * o With SVS, we will receive the trap while the user page tables are
562 * loaded. That's not a problem, we don't touch anything unmapped here. 562 * loaded. That's not a problem, we don't touch anything unmapped here.
563 * 563 *
564 * o With SVS, when the user page tables are loaded, the stack is really 564 * o With SVS, when the user page tables are loaded, the stack is really
565 * small, and can contain only one trapframe structure. Therefore, in 565 * small, and can contain only one trapframe structure. Therefore, in
566 * intrfastexit, we must save the GPRs and pop their part of the stack 566 * intrfastexit, we must save the GPRs and pop their part of the stack
567 * right away. If we weren't doing that, and the reload of ES faulted for 567 * right away. If we weren't doing that, and the reload of ES faulted for
568 * example, then the CPU would try to push an iret frame on the current 568 * example, then the CPU would try to push an iret frame on the current
569 * stack (nested), and would double-fault because it touches the redzone 569 * stack (nested), and would double-fault because it touches the redzone
570 * below the stack (see the documentation in x86/x86/svs.c). By popping 570 * below the stack (see the documentation in x86/x86/svs.c). By popping
571 * the GPR part of the stack, we leave enough stack for the CPU to push 571 * the GPR part of the stack, we leave enough stack for the CPU to push
572 * an iret frame, and for us to push one 8-byte register (%rdi) too. 572 * an iret frame, and for us to push one 8-byte register (%rdi) too.
573 */ 573 */
574 _ALIGN_TEXT 574 _ALIGN_TEXT
575LABEL(kernuser_reenter) 575LABEL(kernuser_reenter)
576 testb $SEL_UPL,TF_SMALL(TF_CS, %rsp) 576 testb $SEL_UPL,TF_SMALL(TF_CS, %rsp)
577 jz .Lkernelmode 577 jz .Lkernelmode
578 578
579.Lnormal_entry: 579.Lnormal_entry:
580 INTRENTRY 580 INTRENTRY
581 sti 581 sti
582 jmp calltrap 582 jmp calltrap
583 583
584.Lkernelmode: 584.Lkernelmode:
585 /* We will clobber %rdi */ 585 /* We will clobber %rdi */
586 pushq %rdi 586 pushq %rdi
587 587
588 /* Case 1: fault on iretq? */ 588 /* Case 1: fault on iretq? */
589 leaq do_iret(%rip),%rdi 589 leaq do_iret(%rip),%rdi
590 cmpq %rdi,TF_SMALL_REGPUSHED(TF_RIP, %rsp) 590 cmpq %rdi,TF_SMALL_REGPUSHED(TF_RIP, %rsp)
591 jne 5f 591 jne 5f
592 movq TF_SMALL_REGPUSHED(TF_RSP, %rsp),%rdi /* get %rsp */ 592 movq TF_SMALL_REGPUSHED(TF_RSP, %rsp),%rdi /* get %rsp */
593 testb $SEL_UPL,8(%rdi) /* check %cs of outer iret frame */ 593 testb $SEL_UPL,8(%rdi) /* check %cs of outer iret frame */
594 je .Lnormal_entry /* jump if iret was to kernel */ 594 je .Lnormal_entry /* jump if iret was to kernel */
595 jmp .Lkernelmode_but_user /* to user - must restore %gs */ 595 jmp .Lkernelmode_but_user /* to user - must restore %gs */
5965: 5965:
597 597
598 /* Case 2: move to %es? */ 598 /* Case 2: move to %es? */
599 leaq do_mov_es(%rip),%rdi 599 leaq do_mov_es(%rip),%rdi
600 cmpq %rdi,TF_SMALL_REGPUSHED(TF_RIP, %rsp) 600 cmpq %rdi,TF_SMALL_REGPUSHED(TF_RIP, %rsp)
601 je .Lkernelmode_but_user 601 je .Lkernelmode_but_user
602 602
603 /* Case 3: move to %ds? */ 603 /* Case 3: move to %ds? */
604 leaq do_mov_ds(%rip),%rdi 604 leaq do_mov_ds(%rip),%rdi
605 cmpq %rdi,TF_SMALL_REGPUSHED(TF_RIP, %rsp) 605 cmpq %rdi,TF_SMALL_REGPUSHED(TF_RIP, %rsp)
606 je .Lkernelmode_but_user 606 je .Lkernelmode_but_user
607 607
608 /* Case 4: move to %fs? */ 608 /* Case 4: move to %fs? */
609 leaq do_mov_fs(%rip),%rdi 609 leaq do_mov_fs(%rip),%rdi
610 cmpq %rdi,TF_SMALL_REGPUSHED(TF_RIP, %rsp) 610 cmpq %rdi,TF_SMALL_REGPUSHED(TF_RIP, %rsp)
611 je .Lkernelmode_but_user 611 je .Lkernelmode_but_user
612 612
613 /* Case 5: move to %gs? */ 613 /* Case 5: move to %gs? */
614 leaq do_mov_gs(%rip),%rdi 614 leaq do_mov_gs(%rip),%rdi
615 cmpq %rdi,TF_SMALL_REGPUSHED(TF_RIP, %rsp) 615 cmpq %rdi,TF_SMALL_REGPUSHED(TF_RIP, %rsp)
616 je .Lkernelmode_but_user 616 je .Lkernelmode_but_user
617 617
618 /* None of the above cases: normal kernel fault */ 618 /* None of the above cases: normal kernel fault */
619 popq %rdi 619 popq %rdi
620 jmp .Lnormal_entry 620 jmp .Lnormal_entry
621 621
622.Lkernelmode_but_user: 622.Lkernelmode_but_user:
623 /* 623 /*
624 * Here we have %rdi pushed on the stack, hence 8+. 624 * Here we have %rdi pushed on the stack, hence 8+.
625 */ 625 */
626 movq %rsp,%rdi 626 movq %rsp,%rdi
627 movq TF_SMALL_REGPUSHED(TF_RSP, %rsp),%rsp 627 movq TF_SMALL_REGPUSHED(TF_RSP, %rsp),%rsp
628 628
629 /* Push tf_err and tf_trapno */ 629 /* Push tf_err and tf_trapno */
630 pushq 8+8(%rdi) /* 8+8(%rdi) = current TF_ERR */ 630 pushq 8+8(%rdi) /* 8+8(%rdi) = current TF_ERR */
631 pushq 8+0(%rdi) /* 8+0(%rdi) = current TF_TRAPNO */ 631 pushq 8+0(%rdi) /* 8+0(%rdi) = current TF_TRAPNO */
632 632
633 /* Restore %rdi */ 633 /* Restore %rdi */
634 movq (%rdi),%rdi 634 movq (%rdi),%rdi
635 635
636 jmp .Lnormal_entry 636 jmp .Lnormal_entry
637END(kernuser_reenter) 637END(kernuser_reenter)
638#endif 638#endif
639 639
640 TEXT_USER_END 640 TEXT_USER_END
641 641
642/* 642/*
643 * All traps go through here. Call the generic trap handler, and 643 * All traps go through here. Call the generic trap handler, and
644 * check for ASTs afterwards. 644 * check for ASTs afterwards.
645 */ 645 */
646ENTRY(alltraps) 646ENTRY(alltraps)
647 INTRENTRY 647 INTRENTRY
648.Lalltraps_noentry: 648.Lalltraps_noentry:
649 STI(si) 649 STI(si)
650 650
651calltrap: 651calltrap:
652#ifdef DIAGNOSTIC 652#ifdef DIAGNOSTIC
653 movl CPUVAR(ILEVEL),%ebx 653 movl CPUVAR(ILEVEL),%ebx
654#endif 654#endif
655 movq %rsp,%rdi 655 movq %rsp,%rdi
656 incq CPUVAR(NTRAP) 656 incq CPUVAR(NTRAP)
657 call _C_LABEL(trap) 657 call _C_LABEL(trap)
658 658
659.Lalltraps_checkusr: 659.Lalltraps_checkusr:
660 testb $SEL_RPL,TF_CS(%rsp) 660 testb $SEL_RPL,TF_CS(%rsp)
661 jz 6f 661 jz 6f
662 662
663.Lalltraps_checkast: 663.Lalltraps_checkast:
664 movq CPUVAR(CURLWP),%r14 664 movq CPUVAR(CURLWP),%r14
665 /* Check for ASTs on exit to user mode. */ 665 /* Check for ASTs on exit to user mode. */
666 CLI(si) 666 CLI(si)
667 CHECK_ASTPENDING(%r14) 667 CHECK_ASTPENDING(%r14)
668 je 3f 668 je 3f
669 CLEAR_ASTPENDING(%r14) 669 CLEAR_ASTPENDING(%r14)
670 STI(si) 670 STI(si)
671 movl $T_ASTFLT,TF_TRAPNO(%rsp) 671 movl $T_ASTFLT,TF_TRAPNO(%rsp)
672 movq %rsp,%rdi 672 movq %rsp,%rdi
673 incq CPUVAR(NTRAP) 673 incq CPUVAR(NTRAP)
674 KMSAN_INIT_ARG(8) 674 KMSAN_INIT_ARG(8)
675 call _C_LABEL(trap) 675 call _C_LABEL(trap)
676 jmp .Lalltraps_checkast /* re-check ASTs */ 676 jmp .Lalltraps_checkast /* re-check ASTs */
6773: CHECK_DEFERRED_SWITCH 6773: CHECK_DEFERRED_SWITCH
678 jnz 9f 678 jnz 9f
679 HANDLE_DEFERRED_FPU 679 HANDLE_DEFERRED_FPU
680 680
6816: 6816:
682#ifdef DIAGNOSTIC 682#ifdef DIAGNOSTIC
683 cmpl CPUVAR(ILEVEL),%ebx 683 cmpl CPUVAR(ILEVEL),%ebx
684 jne .Lspl_error 684 jne .Lspl_error
685#endif 685#endif
686 INTRFASTEXIT 686 INTRFASTEXIT
687 687
6889: STI(si) 6889: STI(si)
689 call _C_LABEL(do_pmap_load) 689 call _C_LABEL(do_pmap_load)
690 jmp .Lalltraps_checkast /* re-check ASTs */ 690 jmp .Lalltraps_checkast /* re-check ASTs */
691 691
692#ifdef DIAGNOSTIC 692#ifdef DIAGNOSTIC
693.Lspl_error: 693.Lspl_error:
694 STI(si) 694 STI(si)
695 movabsq $4f,%rdi 695 movabsq $4f,%rdi
696 movl CPUVAR(ILEVEL),%esi 696 movl CPUVAR(ILEVEL),%esi
697 movl %ebx,%edx 697 call _C_LABEL(panic)
698 xorq %rax,%rax 6984: .asciz "spl not lowered on trap exit, ilevel=%x"
699 call _C_LABEL(printf) 
700 movl %ebx,%edi 
701 call _C_LABEL(spllower) 
702 jmp .Lalltraps_checkast 
7034: .asciz "WARNING: SPL NOT LOWERED ON TRAP EXIT %x %x\n" 
704#endif 699#endif
705END(alltraps) 700END(alltraps)
706 701
707#ifdef KDTRACE_HOOKS 702#ifdef KDTRACE_HOOKS
708 .bss 703 .bss
709 .globl dtrace_invop_jump_addr 704 .globl dtrace_invop_jump_addr
710 .align 8 705 .align 8
711 .type dtrace_invop_jump_addr, @object 706 .type dtrace_invop_jump_addr, @object
712 .size dtrace_invop_jump_addr, 8 707 .size dtrace_invop_jump_addr, 8
713dtrace_invop_jump_addr: 708dtrace_invop_jump_addr:
714 .zero 8 709 .zero 8
715 .globl dtrace_invop_calltrap_addr 710 .globl dtrace_invop_calltrap_addr
716 .align 8 711 .align 8
717 .type dtrace_invop_calltrap_addr, @object 712 .type dtrace_invop_calltrap_addr, @object
718 .size dtrace_invop_calltrap_addr, 8 713 .size dtrace_invop_calltrap_addr, 8
719dtrace_invop_calltrap_addr: 714dtrace_invop_calltrap_addr:
720 .zero 8 715 .zero 8
721#endif 716#endif
722 717
723 .section .rodata 718 .section .rodata
724 719
725LABEL(x86_exceptions) 720LABEL(x86_exceptions)
726 .quad _C_LABEL(Xtrap00), _C_LABEL(Xtrap01) 721 .quad _C_LABEL(Xtrap00), _C_LABEL(Xtrap01)
727 .quad _C_LABEL(Xtrap02), _C_LABEL(Xtrap03) 722 .quad _C_LABEL(Xtrap02), _C_LABEL(Xtrap03)
728 .quad _C_LABEL(Xtrap04), _C_LABEL(Xtrap05) 723 .quad _C_LABEL(Xtrap04), _C_LABEL(Xtrap05)
729 .quad _C_LABEL(Xtrap06), _C_LABEL(Xtrap07) 724 .quad _C_LABEL(Xtrap06), _C_LABEL(Xtrap07)
730 .quad _C_LABEL(Xtrap08), _C_LABEL(Xtrap09) 725 .quad _C_LABEL(Xtrap08), _C_LABEL(Xtrap09)
731 .quad _C_LABEL(Xtrap10), _C_LABEL(Xtrap11) 726 .quad _C_LABEL(Xtrap10), _C_LABEL(Xtrap11)
732 .quad _C_LABEL(Xtrap12), _C_LABEL(Xtrap13) 727 .quad _C_LABEL(Xtrap12), _C_LABEL(Xtrap13)
733 .quad _C_LABEL(Xtrap14), _C_LABEL(Xtrap15) 728 .quad _C_LABEL(Xtrap14), _C_LABEL(Xtrap15)
734 .quad _C_LABEL(Xtrap16), _C_LABEL(Xtrap17) 729 .quad _C_LABEL(Xtrap16), _C_LABEL(Xtrap17)
735 .quad _C_LABEL(Xtrap18), _C_LABEL(Xtrap19) 730 .quad _C_LABEL(Xtrap18), _C_LABEL(Xtrap19)
736 .quad _C_LABEL(Xtrap20), _C_LABEL(Xtrap21) 731 .quad _C_LABEL(Xtrap20), _C_LABEL(Xtrap21)
737 .quad _C_LABEL(Xtrap22), _C_LABEL(Xtrap23) 732 .quad _C_LABEL(Xtrap22), _C_LABEL(Xtrap23)
738 .quad _C_LABEL(Xtrap24), _C_LABEL(Xtrap25) 733 .quad _C_LABEL(Xtrap24), _C_LABEL(Xtrap25)
739 .quad _C_LABEL(Xtrap26), _C_LABEL(Xtrap27) 734 .quad _C_LABEL(Xtrap26), _C_LABEL(Xtrap27)
740 .quad _C_LABEL(Xtrap28), _C_LABEL(Xtrap29) 735 .quad _C_LABEL(Xtrap28), _C_LABEL(Xtrap29)
741 .quad _C_LABEL(Xtrap30), _C_LABEL(Xtrap31) 736 .quad _C_LABEL(Xtrap30), _C_LABEL(Xtrap31)
742END(x86_exceptions) 737END(x86_exceptions)
743 738