Sat Oct 12 06:31:04 2019 UTC ()
Rewrite the FPU code on x86. This greatly simplifies the logic and removes
the dependency on IPL_HIGH. NVMM is updated accordingly. Posted on
port-amd64 a week ago.

Bump the kernel version to 9.99.16.


(maxv)
diff -r1.48 -r1.49 src/sys/arch/amd64/amd64/amd64_trap.S
diff -r1.76 -r1.77 src/sys/arch/amd64/amd64/genassym.cf
diff -r1.188 -r1.189 src/sys/arch/amd64/amd64/locore.S
diff -r1.336 -r1.337 src/sys/arch/amd64/amd64/machdep.c
diff -r1.40 -r1.41 src/sys/arch/amd64/amd64/spl.S
diff -r1.44 -r1.45 src/sys/arch/amd64/include/frameasm.h
diff -r1.29 -r1.30 src/sys/arch/amd64/include/pcb.h
diff -r1.22 -r1.23 src/sys/arch/amd64/include/proc.h
diff -r1.113 -r1.114 src/sys/arch/i386/i386/genassym.cf
diff -r1.19 -r1.20 src/sys/arch/i386/i386/i386_trap.S
diff -r1.171 -r1.172 src/sys/arch/i386/i386/locore.S
diff -r1.820 -r1.821 src/sys/arch/i386/i386/machdep.c
diff -r1.48 -r1.49 src/sys/arch/i386/i386/spl.S
diff -r1.28 -r1.29 src/sys/arch/i386/include/frameasm.h
diff -r1.58 -r1.59 src/sys/arch/i386/include/pcb.h
diff -r1.45 -r1.46 src/sys/arch/i386/include/proc.h
diff -r1.50 -r1.51 src/sys/arch/x86/acpi/acpi_wakeup.c
diff -r1.109 -r1.110 src/sys/arch/x86/include/cpu.h
diff -r1.18 -r1.19 src/sys/arch/x86/include/fpu.h
diff -r1.172 -r1.173 src/sys/arch/x86/x86/cpu.c
diff -r1.57 -r1.58 src/sys/arch/x86/x86/fpu.c
diff -r1.27 -r1.28 src/sys/arch/x86/x86/ipi.c
diff -r1.37 -r1.38 src/sys/arch/x86/x86/vm_machdep.c
diff -r1.129 -r1.130 src/sys/arch/xen/x86/cpu.c
diff -r1.32 -r1.33 src/sys/arch/xen/x86/xen_ipi.c
diff -r1.49 -r1.50 src/sys/dev/nvmm/x86/nvmm_x86_svm.c
diff -r1.38 -r1.39 src/sys/dev/nvmm/x86/nvmm_x86_vmx.c
diff -r1.616 -r1.617 src/sys/sys/param.h

cvs diff -r1.48 -r1.49 src/sys/arch/amd64/amd64/amd64_trap.S (expand / switch to unified diff)

--- src/sys/arch/amd64/amd64/amd64_trap.S 2019/05/18 13:32:12 1.48
+++ src/sys/arch/amd64/amd64/amd64_trap.S 2019/10/12 06:31:03 1.49
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: amd64_trap.S,v 1.48 2019/05/18 13:32:12 maxv Exp $ */ 1/* $NetBSD: amd64_trap.S,v 1.49 2019/10/12 06:31:03 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1998, 2007, 2008, 2017 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 2007, 2008, 2017 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum, by Andrew Doran and by Maxime Villard. 8 * by Charles M. Hannum, by Andrew Doran and by Maxime Villard.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -662,26 +662,27 @@ calltrap: @@ -662,26 +662,27 @@ calltrap:
662 /* Check for ASTs on exit to user mode. */ 662 /* Check for ASTs on exit to user mode. */
663 CLI(si) 663 CLI(si)
664 CHECK_ASTPENDING(%r14) 664 CHECK_ASTPENDING(%r14)
665 je 3f 665 je 3f
666 CLEAR_ASTPENDING(%r14) 666 CLEAR_ASTPENDING(%r14)
667 STI(si) 667 STI(si)
668 movl $T_ASTFLT,TF_TRAPNO(%rsp) 668 movl $T_ASTFLT,TF_TRAPNO(%rsp)
669 movq %rsp,%rdi 669 movq %rsp,%rdi
670 incq CPUVAR(NTRAP) 670 incq CPUVAR(NTRAP)
671 call _C_LABEL(trap) 671 call _C_LABEL(trap)
672 jmp .Lalltraps_checkast /* re-check ASTs */ 672 jmp .Lalltraps_checkast /* re-check ASTs */
6733: CHECK_DEFERRED_SWITCH 6733: CHECK_DEFERRED_SWITCH
674 jnz 9f 674 jnz 9f
 675 HANDLE_DEFERRED_FPU
675 676
6766: 6776:
677#ifdef DIAGNOSTIC 678#ifdef DIAGNOSTIC
678 cmpl CPUVAR(ILEVEL),%ebx 679 cmpl CPUVAR(ILEVEL),%ebx
679 jne .Lspl_error 680 jne .Lspl_error
680#endif 681#endif
681 INTRFASTEXIT 682 INTRFASTEXIT
682 683
6839: STI(si) 6849: STI(si)
684 call _C_LABEL(do_pmap_load) 685 call _C_LABEL(do_pmap_load)
685 jmp .Lalltraps_checkast /* re-check ASTs */ 686 jmp .Lalltraps_checkast /* re-check ASTs */
686 687
687#ifdef DIAGNOSTIC 688#ifdef DIAGNOSTIC

cvs diff -r1.76 -r1.77 src/sys/arch/amd64/amd64/genassym.cf (expand / switch to unified diff)

--- src/sys/arch/amd64/amd64/genassym.cf 2019/05/29 16:54:41 1.76
+++ src/sys/arch/amd64/amd64/genassym.cf 2019/10/12 06:31:03 1.77
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1# $NetBSD: genassym.cf,v 1.76 2019/05/29 16:54:41 maxv Exp $ 1# $NetBSD: genassym.cf,v 1.77 2019/10/12 06:31:03 maxv Exp $
2 2
3# 3#
4# Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc. 4# Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5# All rights reserved. 5# All rights reserved.
6# 6#
7# This code is derived from software contributed to The NetBSD Foundation 7# This code is derived from software contributed to The NetBSD Foundation
8# by Charles M. Hannum, and by Andrew Doran. 8# by Charles M. Hannum, and by Andrew Doran.
9# 9#
10# Redistribution and use in source and binary forms, with or without 10# Redistribution and use in source and binary forms, with or without
11# modification, are permitted provided that the following conditions 11# modification, are permitted provided that the following conditions
12# are met: 12# are met:
13# 1. Redistributions of source code must retain the above copyright 13# 1. Redistributions of source code must retain the above copyright
14# notice, this list of conditions and the following disclaimer. 14# notice, this list of conditions and the following disclaimer.
@@ -154,50 +154,52 @@ define L_CTXSWTCH offsetof(struct lwp,  @@ -154,50 +154,52 @@ define L_CTXSWTCH offsetof(struct lwp,
154define L_NCSW offsetof(struct lwp, l_ncsw) 154define L_NCSW offsetof(struct lwp, l_ncsw)
155define L_NOPREEMPT offsetof(struct lwp, l_nopreempt) 155define L_NOPREEMPT offsetof(struct lwp, l_nopreempt)
156define L_DOPREEMPT offsetof(struct lwp, l_dopreempt) 156define L_DOPREEMPT offsetof(struct lwp, l_dopreempt)
157define L_CPU offsetof(struct lwp, l_cpu) 157define L_CPU offsetof(struct lwp, l_cpu)
158define L_KPRIORITY offsetof(struct lwp, l_kpriority) 158define L_KPRIORITY offsetof(struct lwp, l_kpriority)
159define L_MD_REGS offsetof(struct lwp, l_md.md_regs) 159define L_MD_REGS offsetof(struct lwp, l_md.md_regs)
160define L_MD_FLAGS offsetof(struct lwp, l_md.md_flags) 160define L_MD_FLAGS offsetof(struct lwp, l_md.md_flags)
161define L_MD_ASTPENDING offsetof(struct lwp, l_md.md_astpending) 161define L_MD_ASTPENDING offsetof(struct lwp, l_md.md_astpending)
162 162
163define PAGE_SIZE PAGE_SIZE 163define PAGE_SIZE PAGE_SIZE
164 164
165define MDL_IRET MDL_IRET 165define MDL_IRET MDL_IRET
166define MDL_COMPAT32 MDL_COMPAT32 166define MDL_COMPAT32 MDL_COMPAT32
 167define MDL_FPU_IN_CPU MDL_FPU_IN_CPU
167 168
168define P_FLAG offsetof(struct proc, p_flag) 169define P_FLAG offsetof(struct proc, p_flag)
169define P_RASLIST offsetof(struct proc, p_raslist) 170define P_RASLIST offsetof(struct proc, p_raslist)
170define P_MD_SYSCALL offsetof(struct proc, p_md.md_syscall) 171define P_MD_SYSCALL offsetof(struct proc, p_md.md_syscall)
171 172
172define LW_SYSTEM LW_SYSTEM 173define LW_SYSTEM LW_SYSTEM
173 174
174define M_DATA offsetof(struct mbuf, m_data) 175define M_DATA offsetof(struct mbuf, m_data)
175define M_LEN offsetof(struct mbuf, m_len) 176define M_LEN offsetof(struct mbuf, m_len)
176define M_NEXT offsetof(struct mbuf, m_next) 177define M_NEXT offsetof(struct mbuf, m_next)
177 178
178define IP_SRC offsetof(struct ip, ip_src) 179define IP_SRC offsetof(struct ip, ip_src)
179define IP_DST offsetof(struct ip, ip_dst) 180define IP_DST offsetof(struct ip, ip_dst)
180 181
181define PCB_CR3 offsetof(struct pcb, pcb_cr3) 182define PCB_CR3 offsetof(struct pcb, pcb_cr3)
182define PCB_RBP offsetof(struct pcb, pcb_rbp) 183define PCB_RBP offsetof(struct pcb, pcb_rbp)
183define PCB_RSP offsetof(struct pcb, pcb_rsp) 184define PCB_RSP offsetof(struct pcb, pcb_rsp)
184define PCB_RSP0 offsetof(struct pcb, pcb_rsp0) 185define PCB_RSP0 offsetof(struct pcb, pcb_rsp0)
185define PCB_CR0 offsetof(struct pcb, pcb_cr0) 186define PCB_CR0 offsetof(struct pcb, pcb_cr0)
186define PCB_ONFAULT offsetof(struct pcb, pcb_onfault) 187define PCB_ONFAULT offsetof(struct pcb, pcb_onfault)
187define PCB_FLAGS offsetof(struct pcb, pcb_flags) 188define PCB_FLAGS offsetof(struct pcb, pcb_flags)
188define PCB_COMPAT32 PCB_COMPAT32 189define PCB_COMPAT32 PCB_COMPAT32
189define PCB_FS offsetof(struct pcb, pcb_fs) 190define PCB_FS offsetof(struct pcb, pcb_fs)
190define PCB_GS offsetof(struct pcb, pcb_gs) 191define PCB_GS offsetof(struct pcb, pcb_gs)
 192define PCB_SAVEFPU offsetof(struct pcb, pcb_savefpu)
191 193
192define TF_RDI offsetof(struct trapframe, tf_rdi) 194define TF_RDI offsetof(struct trapframe, tf_rdi)
193define TF_RSI offsetof(struct trapframe, tf_rsi) 195define TF_RSI offsetof(struct trapframe, tf_rsi)
194define TF_RDX offsetof(struct trapframe, tf_rdx) 196define TF_RDX offsetof(struct trapframe, tf_rdx)
195define TF_RCX offsetof(struct trapframe, tf_rcx) 197define TF_RCX offsetof(struct trapframe, tf_rcx)
196define TF_R8 offsetof(struct trapframe, tf_r8) 198define TF_R8 offsetof(struct trapframe, tf_r8)
197define TF_R9 offsetof(struct trapframe, tf_r9) 199define TF_R9 offsetof(struct trapframe, tf_r9)
198define TF_R10 offsetof(struct trapframe, tf_r10) 200define TF_R10 offsetof(struct trapframe, tf_r10)
199define TF_R11 offsetof(struct trapframe, tf_r11) 201define TF_R11 offsetof(struct trapframe, tf_r11)
200define TF_R12 offsetof(struct trapframe, tf_r12) 202define TF_R12 offsetof(struct trapframe, tf_r12)
201define TF_R13 offsetof(struct trapframe, tf_r13) 203define TF_R13 offsetof(struct trapframe, tf_r13)
202define TF_R14 offsetof(struct trapframe, tf_r14) 204define TF_R14 offsetof(struct trapframe, tf_r14)
203define TF_R15 offsetof(struct trapframe, tf_r15) 205define TF_R15 offsetof(struct trapframe, tf_r15)
@@ -234,27 +236,26 @@ define CPU_INFO_CURLDT offsetof(struct  @@ -234,27 +236,26 @@ define CPU_INFO_CURLDT offsetof(struct
234define CPU_INFO_IDLELWP offsetof(struct cpu_info, ci_data.cpu_idlelwp) 236define CPU_INFO_IDLELWP offsetof(struct cpu_info, ci_data.cpu_idlelwp)
235define CPU_INFO_PMAP offsetof(struct cpu_info, ci_pmap) 237define CPU_INFO_PMAP offsetof(struct cpu_info, ci_pmap)
236define CPU_INFO_TSS offsetof(struct cpu_info, ci_tss) 238define CPU_INFO_TSS offsetof(struct cpu_info, ci_tss)
237ifdef SVS 239ifdef SVS
238define CPU_INFO_UPDIRPA offsetof(struct cpu_info, ci_svs_updirpa) 240define CPU_INFO_UPDIRPA offsetof(struct cpu_info, ci_svs_updirpa)
239define CPU_INFO_RSP0 offsetof(struct cpu_info, ci_svs_rsp0) 241define CPU_INFO_RSP0 offsetof(struct cpu_info, ci_svs_rsp0)
240define CPU_INFO_URSP0 offsetof(struct cpu_info, ci_svs_ursp0) 242define CPU_INFO_URSP0 offsetof(struct cpu_info, ci_svs_ursp0)
241define CPU_INFO_KRSP0 offsetof(struct cpu_info, ci_svs_krsp0) 243define CPU_INFO_KRSP0 offsetof(struct cpu_info, ci_svs_krsp0)
242endif 244endif
243define CPU_INFO_NSYSCALL offsetof(struct cpu_info, ci_data.cpu_nsyscall) 245define CPU_INFO_NSYSCALL offsetof(struct cpu_info, ci_data.cpu_nsyscall)
244define CPU_INFO_NTRAP offsetof(struct cpu_info, ci_data.cpu_ntrap) 246define CPU_INFO_NTRAP offsetof(struct cpu_info, ci_data.cpu_ntrap)
245define CPU_INFO_NINTR offsetof(struct cpu_info, ci_data.cpu_nintr) 247define CPU_INFO_NINTR offsetof(struct cpu_info, ci_data.cpu_nintr)
246define CPU_INFO_CURPRIORITY offsetof(struct cpu_info, ci_schedstate.spc_curpriority) 248define CPU_INFO_CURPRIORITY offsetof(struct cpu_info, ci_schedstate.spc_curpriority)
247define CPU_INFO_FPCURLWP offsetof(struct cpu_info, ci_fpcurlwp) 
248 249
249define CPU_INFO_GDT offsetof(struct cpu_info, ci_gdt) 250define CPU_INFO_GDT offsetof(struct cpu_info, ci_gdt)
250define CPU_INFO_ILEVEL offsetof(struct cpu_info, ci_ilevel) 251define CPU_INFO_ILEVEL offsetof(struct cpu_info, ci_ilevel)
251define CPU_INFO_IDEPTH offsetof(struct cpu_info, ci_idepth) 252define CPU_INFO_IDEPTH offsetof(struct cpu_info, ci_idepth)
252if !defined(XENPV) 253if !defined(XENPV)
253define CPU_INFO_IPENDING offsetof(struct cpu_info, ci_ipending) 254define CPU_INFO_IPENDING offsetof(struct cpu_info, ci_ipending)
254define CPU_INFO_IMASK offsetof(struct cpu_info, ci_imask) 255define CPU_INFO_IMASK offsetof(struct cpu_info, ci_imask)
255define CPU_INFO_IUNMASK offsetof(struct cpu_info, ci_iunmask) 256define CPU_INFO_IUNMASK offsetof(struct cpu_info, ci_iunmask)
256define CPU_INFO_ISOURCES offsetof(struct cpu_info, ci_isources) 257define CPU_INFO_ISOURCES offsetof(struct cpu_info, ci_isources)
257endif 258endif
258define CPU_INFO_MTX_COUNT offsetof(struct cpu_info, ci_mtx_count) 259define CPU_INFO_MTX_COUNT offsetof(struct cpu_info, ci_mtx_count)
259define CPU_INFO_MTX_OLDSPL offsetof(struct cpu_info, ci_mtx_oldspl) 260define CPU_INFO_MTX_OLDSPL offsetof(struct cpu_info, ci_mtx_oldspl)
260define CPU_INFO_CPUID offsetof(struct cpu_info, ci_cpuid) 261define CPU_INFO_CPUID offsetof(struct cpu_info, ci_cpuid)

cvs diff -r1.188 -r1.189 src/sys/arch/amd64/amd64/locore.S (expand / switch to unified diff)

--- src/sys/arch/amd64/amd64/locore.S 2019/10/04 11:47:07 1.188
+++ src/sys/arch/amd64/amd64/locore.S 2019/10/12 06:31:03 1.189
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: locore.S,v 1.188 2019/10/04 11:47:07 maxv Exp $ */ 1/* $NetBSD: locore.S,v 1.189 2019/10/12 06:31:03 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright-o-rama! 4 * Copyright-o-rama!
5 */ 5 */
6 6
7/* 7/*
8 * Copyright (c) 1998, 2000, 2007, 2008, 2016 The NetBSD Foundation, Inc. 8 * Copyright (c) 1998, 2000, 2007, 2008, 2016 The NetBSD Foundation, Inc.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * This code is derived from software contributed to The NetBSD Foundation 11 * This code is derived from software contributed to The NetBSD Foundation
12 * by Charles M. Hannum and by Maxime Villard. 12 * by Charles M. Hannum and by Maxime Villard.
13 * 13 *
14 * Redistribution and use in source and binary forms, with or without 14 * Redistribution and use in source and binary forms, with or without
@@ -1160,49 +1160,30 @@ ENTRY(cpu_switchto) @@ -1160,49 +1160,30 @@ ENTRY(cpu_switchto)
1160 movq L_PROC(%r12),%rdi 1160 movq L_PROC(%r12),%rdi
1161 cmpq $0,P_RASLIST(%rdi) 1161 cmpq $0,P_RASLIST(%rdi)
1162 je .Lno_RAS 1162 je .Lno_RAS
1163 1163
1164 /* Handle restartable atomic sequences (RAS). */ 1164 /* Handle restartable atomic sequences (RAS). */
1165 movq L_MD_REGS(%r12),%rbx 1165 movq L_MD_REGS(%r12),%rbx
1166 movq TF_RIP(%rbx),%rsi 1166 movq TF_RIP(%rbx),%rsi
1167 call _C_LABEL(ras_lookup) 1167 call _C_LABEL(ras_lookup)
1168 cmpq $-1,%rax 1168 cmpq $-1,%rax
1169 je .Lno_RAS 1169 je .Lno_RAS
1170 movq %rax,TF_RIP(%rbx) 1170 movq %rax,TF_RIP(%rbx)
1171.Lno_RAS: 1171.Lno_RAS:
1172 1172
1173 /* 
1174 * Restore cr0 including FPU state (may have CR0_TS set). Note that 
1175 * IPL_SCHED prevents from FPU interrupt altering the LWP's saved cr0. 
1176 */ 
1177#ifndef XENPV 1173#ifndef XENPV
 1174 /* Raise the IPL to IPL_HIGH. Dropping the priority is deferred until
 1175 * mi_switch(), when cpu_switchto() returns. XXX Still needed? */
1178 movl $IPL_HIGH,CPUVAR(ILEVEL) 1176 movl $IPL_HIGH,CPUVAR(ILEVEL)
1179 movl PCB_CR0(%r14),%ecx /* has CR0_TS clear */ 
1180 movq %cr0,%rdx 
1181 
1182 /* 
1183 * If our floating point registers are on a different CPU, 
1184 * set CR0_TS so we'll trap rather than reuse bogus state. 
1185 */ 
1186 cmpq CPUVAR(FPCURLWP),%r12 
1187 je .Lskip_TS 
1188 orq $CR0_TS,%rcx 
1189.Lskip_TS: 
1190 
1191 /* Reloading CR0 is very expensive - avoid if possible. */ 
1192 cmpq %rdx,%rcx 
1193 je .Lskip_CR0 
1194 movq %rcx,%cr0 
1195.Lskip_CR0: 
1196 1177
1197 /* The 32bit LWPs are handled differently. */ 1178 /* The 32bit LWPs are handled differently. */
1198 testl $PCB_COMPAT32,PCB_FLAGS(%r14) 1179 testl $PCB_COMPAT32,PCB_FLAGS(%r14)
1199 jnz .Llwp_32bit 1180 jnz .Llwp_32bit
1200 1181
1201.Llwp_64bit: 1182.Llwp_64bit:
1202 /* Set default 64bit values in %ds, %es, %fs and %gs. */ 1183 /* Set default 64bit values in %ds, %es, %fs and %gs. */
1203 movq $GSEL(GUDATA_SEL, SEL_UPL),%rax 1184 movq $GSEL(GUDATA_SEL, SEL_UPL),%rax
1204 movw %ax,%ds 1185 movw %ax,%ds
1205 movw %ax,%es 1186 movw %ax,%es
1206 xorq %rax,%rax 1187 xorq %rax,%rax
1207 movw %ax,%fs 1188 movw %ax,%fs
1208 CLI(cx) 1189 CLI(cx)
@@ -1295,26 +1276,28 @@ ENTRY(handle_syscall) @@ -1295,26 +1276,28 @@ ENTRY(handle_syscall)
1295 * registers loaded. 1276 * registers loaded.
1296 */ 1277 */
1297 CLI(si) 1278 CLI(si)
1298 /* Check for ASTs on exit to user mode. */ 1279 /* Check for ASTs on exit to user mode. */
1299 movl L_MD_ASTPENDING(%r14),%eax 1280 movl L_MD_ASTPENDING(%r14),%eax
1300 orl CPUVAR(WANT_PMAPLOAD),%eax 1281 orl CPUVAR(WANT_PMAPLOAD),%eax
1301 jnz 9f 1282 jnz 9f
1302 1283
1303#ifdef DIAGNOSTIC 1284#ifdef DIAGNOSTIC
1304 cmpl $IPL_NONE,CPUVAR(ILEVEL) 1285 cmpl $IPL_NONE,CPUVAR(ILEVEL)
1305 jne .Lspl_error 1286 jne .Lspl_error
1306#endif 1287#endif
1307 1288
 1289 HANDLE_DEFERRED_FPU
 1290
1308 /* 1291 /*
1309 * Decide if we need to take a slow path. That's the case when we 1292 * Decide if we need to take a slow path. That's the case when we
1310 * want to reload %cs and %ss on a 64bit LWP (MDL_IRET set), or when 1293 * want to reload %cs and %ss on a 64bit LWP (MDL_IRET set), or when
1311 * we're returning to a 32bit LWP (MDL_COMPAT32 set). 1294 * we're returning to a 32bit LWP (MDL_COMPAT32 set).
1312 * 1295 *
1313 * In either case, we jump into intrfastexit and return to userland 1296 * In either case, we jump into intrfastexit and return to userland
1314 * with the iret instruction. 1297 * with the iret instruction.
1315 */ 1298 */
1316 testl $(MDL_IRET|MDL_COMPAT32),L_MD_FLAGS(%r14) 1299 testl $(MDL_IRET|MDL_COMPAT32),L_MD_FLAGS(%r14)
1317 jnz intrfastexit 1300 jnz intrfastexit
1318 1301
1319 jmp syscall_sysret 1302 jmp syscall_sysret
1320 1303

cvs diff -r1.336 -r1.337 src/sys/arch/amd64/amd64/machdep.c (expand / switch to unified diff)

--- src/sys/arch/amd64/amd64/machdep.c 2019/08/21 20:30:36 1.336
+++ src/sys/arch/amd64/amd64/machdep.c 2019/10/12 06:31:03 1.337
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: machdep.c,v 1.336 2019/08/21 20:30:36 skrll Exp $ */ 1/* $NetBSD: machdep.c,v 1.337 2019/10/12 06:31:03 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1996, 1997, 1998, 2000, 2006, 2007, 2008, 2011 4 * Copyright (c) 1996, 1997, 1998, 2000, 2006, 2007, 2008, 2011
5 * The NetBSD Foundation, Inc. 5 * The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace 9 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center. 10 * Simulation Facility, NASA Ames Research Center.
11 * 11 *
12 * This code is derived from software contributed to The NetBSD Foundation 12 * This code is derived from software contributed to The NetBSD Foundation
13 * by Coyote Point Systems, Inc. which was written under contract to Coyote 13 * by Coyote Point Systems, Inc. which was written under contract to Coyote
14 * Point by Jed Davis and Devon O'Dell. 14 * Point by Jed Davis and Devon O'Dell.
@@ -100,27 +100,27 @@ @@ -100,27 +100,27 @@
100 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 100 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
101 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 101 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
102 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 102 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
103 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 103 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
104 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 104 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
105 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 105 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
106 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 106 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
107 * SUCH DAMAGE. 107 * SUCH DAMAGE.
108 * 108 *
109 * @(#)machdep.c 7.4 (Berkeley) 6/3/91 109 * @(#)machdep.c 7.4 (Berkeley) 6/3/91
110 */ 110 */
111 111
112#include <sys/cdefs.h> 112#include <sys/cdefs.h>
113__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.336 2019/08/21 20:30:36 skrll Exp $"); 113__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.337 2019/10/12 06:31:03 maxv Exp $");
114 114
115#include "opt_modular.h" 115#include "opt_modular.h"
116#include "opt_user_ldt.h" 116#include "opt_user_ldt.h"
117#include "opt_ddb.h" 117#include "opt_ddb.h"
118#include "opt_kgdb.h" 118#include "opt_kgdb.h"
119#include "opt_cpureset_delay.h" 119#include "opt_cpureset_delay.h"
120#include "opt_mtrr.h" 120#include "opt_mtrr.h"
121#include "opt_realmem.h" 121#include "opt_realmem.h"
122#include "opt_xen.h" 122#include "opt_xen.h"
123#include "opt_svs.h" 123#include "opt_svs.h"
124#include "opt_kaslr.h" 124#include "opt_kaslr.h"
125#include "opt_kasan.h" 125#include "opt_kasan.h"
126#ifndef XENPV 126#ifndef XENPV
@@ -432,38 +432,29 @@ x86_64_switch_context(struct pcb *new) @@ -432,38 +432,29 @@ x86_64_switch_context(struct pcb *new)
432 physop.u.set_iopl.iopl = new->pcb_iopl; 432 physop.u.set_iopl.iopl = new->pcb_iopl;
433 HYPERVISOR_physdev_op(&physop); 433 HYPERVISOR_physdev_op(&physop);
434} 434}
435 435
436void 436void
437x86_64_tls_switch(struct lwp *l) 437x86_64_tls_switch(struct lwp *l)
438{ 438{
439 struct cpu_info *ci = curcpu(); 439 struct cpu_info *ci = curcpu();
440 struct pcb *pcb = lwp_getpcb(l); 440 struct pcb *pcb = lwp_getpcb(l);
441 struct trapframe *tf = l->l_md.md_regs; 441 struct trapframe *tf = l->l_md.md_regs;
442 uint64_t zero = 0; 442 uint64_t zero = 0;
443 443
444 /* 444 /*
445 * Raise the IPL to IPL_HIGH. 445 * Raise the IPL to IPL_HIGH. XXX Still needed?
446 * FPU IPIs can alter the LWP's saved cr0. Dropping the priority 
447 * is deferred until mi_switch(), when cpu_switchto() returns. 
448 */ 446 */
449 (void)splhigh(); 447 (void)splhigh();
450 /* 
451 * If our floating point registers are on a different CPU, 
452 * set CR0_TS so we'll trap rather than reuse bogus state. 
453 */ 
454 if (l != ci->ci_fpcurlwp) { 
455 HYPERVISOR_fpu_taskswitch(1); 
456 } 
457 448
458 /* Update segment registers */ 449 /* Update segment registers */
459 if (pcb->pcb_flags & PCB_COMPAT32) { 450 if (pcb->pcb_flags & PCB_COMPAT32) {
460 update_descriptor(&ci->ci_gdt[GUFS_SEL], &pcb->pcb_fs); 451 update_descriptor(&ci->ci_gdt[GUFS_SEL], &pcb->pcb_fs);
461 update_descriptor(&ci->ci_gdt[GUGS_SEL], &pcb->pcb_gs); 452 update_descriptor(&ci->ci_gdt[GUGS_SEL], &pcb->pcb_gs);
462 setds(GSEL(GUDATA32_SEL, SEL_UPL)); 453 setds(GSEL(GUDATA32_SEL, SEL_UPL));
463 setes(GSEL(GUDATA32_SEL, SEL_UPL)); 454 setes(GSEL(GUDATA32_SEL, SEL_UPL));
464 setfs(GSEL(GUDATA32_SEL, SEL_UPL)); 455 setfs(GSEL(GUDATA32_SEL, SEL_UPL));
465 HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, tf->tf_gs); 456 HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, tf->tf_gs);
466 } else { 457 } else {
467 update_descriptor(&ci->ci_gdt[GUFS_SEL], &zero); 458 update_descriptor(&ci->ci_gdt[GUFS_SEL], &zero);
468 update_descriptor(&ci->ci_gdt[GUGS_SEL], &zero); 459 update_descriptor(&ci->ci_gdt[GUGS_SEL], &zero);
469 setds(GSEL(GUDATA_SEL, SEL_UPL)); 460 setds(GSEL(GUDATA_SEL, SEL_UPL));

cvs diff -r1.40 -r1.41 src/sys/arch/amd64/amd64/spl.S (expand / switch to unified diff)

--- src/sys/arch/amd64/amd64/spl.S 2019/02/14 08:18:25 1.40
+++ src/sys/arch/amd64/amd64/spl.S 2019/10/12 06:31:03 1.41
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: spl.S,v 1.40 2019/02/14 08:18:25 cherry Exp $ */ 1/* $NetBSD: spl.S,v 1.41 2019/10/12 06:31:03 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2003 Wasabi Systems, Inc. 4 * Copyright (c) 2003 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Frank van der Linden for Wasabi Systems, Inc. 7 * Written by Frank van der Linden for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -387,22 +387,23 @@ LABEL(doreti_checkast) @@ -387,22 +387,23 @@ LABEL(doreti_checkast)
387 CHECK_ASTPENDING(%r14) 387 CHECK_ASTPENDING(%r14)
388 je 3f 388 je 3f
389 CLEAR_ASTPENDING(%r14) 389 CLEAR_ASTPENDING(%r14)
390 STI(si) 390 STI(si)
391 movl $T_ASTFLT,TF_TRAPNO(%rsp) /* XXX undo later.. */ 391 movl $T_ASTFLT,TF_TRAPNO(%rsp) /* XXX undo later.. */
392 /* Pushed T_ASTFLT into tf_trapno on entry. */ 392 /* Pushed T_ASTFLT into tf_trapno on entry. */
393 movq %rsp,%rdi 393 movq %rsp,%rdi
394 call _C_LABEL(trap) 394 call _C_LABEL(trap)
395 CLI(si) 395 CLI(si)
396 jmp doreti_checkast 396 jmp doreti_checkast
3973: 3973:
398 CHECK_DEFERRED_SWITCH 398 CHECK_DEFERRED_SWITCH
399 jnz 9f 399 jnz 9f
 400 HANDLE_DEFERRED_FPU
4006: 4016:
401 INTRFASTEXIT 402 INTRFASTEXIT
4029: 4039:
403 STI(si) 404 STI(si)
404 call _C_LABEL(do_pmap_load) 405 call _C_LABEL(do_pmap_load)
405 CLI(si) 406 CLI(si)
406 jmp doreti_checkast /* recheck ASTs */ 407 jmp doreti_checkast /* recheck ASTs */
407END(doreti_checkast) 408END(doreti_checkast)
408IDTVEC_END(doreti) 409IDTVEC_END(doreti)

cvs diff -r1.44 -r1.45 src/sys/arch/amd64/include/frameasm.h (expand / switch to unified diff)

--- src/sys/arch/amd64/include/frameasm.h 2019/05/18 13:32:12 1.44
+++ src/sys/arch/amd64/include/frameasm.h 2019/10/12 06:31:03 1.45
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: frameasm.h,v 1.44 2019/05/18 13:32:12 maxv Exp $ */ 1/* $NetBSD: frameasm.h,v 1.45 2019/10/12 06:31:03 maxv Exp $ */
2 2
3#ifndef _AMD64_MACHINE_FRAMEASM_H 3#ifndef _AMD64_MACHINE_FRAMEASM_H
4#define _AMD64_MACHINE_FRAMEASM_H 4#define _AMD64_MACHINE_FRAMEASM_H
5 5
6#ifdef _KERNEL_OPT 6#ifdef _KERNEL_OPT
7#include "opt_xen.h" 7#include "opt_xen.h"
8#include "opt_svs.h" 8#include "opt_svs.h"
9#endif 9#endif
10 10
11/* 11/*
12 * Macros to define pushing/popping frames for interrupts, traps 12 * Macros to define pushing/popping frames for interrupts, traps
13 * and system calls. Currently all the same; will diverge later. 13 * and system calls. Currently all the same; will diverge later.
14 */ 14 */
@@ -236,14 +236,28 @@ @@ -236,14 +236,28 @@
236 pushq %r13 ; 236 pushq %r13 ;
237 237
238#define INTR_RECURSE_ENTRY \ 238#define INTR_RECURSE_ENTRY \
239 subq $TF_REGSIZE,%rsp ; \ 239 subq $TF_REGSIZE,%rsp ; \
240 INTR_SAVE_GPRS ; \ 240 INTR_SAVE_GPRS ; \
241 cld 241 cld
242 242
243#define CHECK_DEFERRED_SWITCH \ 243#define CHECK_DEFERRED_SWITCH \
244 cmpl $0, CPUVAR(WANT_PMAPLOAD) 244 cmpl $0, CPUVAR(WANT_PMAPLOAD)
245 245
246#define CHECK_ASTPENDING(reg) cmpl $0, L_MD_ASTPENDING(reg) 246#define CHECK_ASTPENDING(reg) cmpl $0, L_MD_ASTPENDING(reg)
247#define CLEAR_ASTPENDING(reg) movl $0, L_MD_ASTPENDING(reg) 247#define CLEAR_ASTPENDING(reg) movl $0, L_MD_ASTPENDING(reg)
248 248
 249/*
 250 * If the FPU state is not in the CPU, restore it. Executed with interrupts
 251 * disabled.
 252 *
 253 * %r14 is curlwp, must not be modified
 254 * %rbx must not be modified
 255 */
 256#define HANDLE_DEFERRED_FPU \
 257 testl $MDL_FPU_IN_CPU,L_MD_FLAGS(%r14) ; \
 258 jnz 1f ; \
 259 call _C_LABEL(fpu_handle_deferred) ; \
 260 orl $MDL_FPU_IN_CPU,L_MD_FLAGS(%r14) ; \
 2611:
 262
249#endif /* _AMD64_MACHINE_FRAMEASM_H */ 263#endif /* _AMD64_MACHINE_FRAMEASM_H */

cvs diff -r1.29 -r1.30 src/sys/arch/amd64/include/pcb.h (expand / switch to unified diff)

--- src/sys/arch/amd64/include/pcb.h 2018/07/26 09:29:08 1.29
+++ src/sys/arch/amd64/include/pcb.h 2019/10/12 06:31:03 1.30
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pcb.h,v 1.29 2018/07/26 09:29:08 maxv Exp $ */ 1/* $NetBSD: pcb.h,v 1.30 2019/10/12 06:31:03 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1998 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum. 8 * by Charles M. Hannum.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -86,26 +86,25 @@ struct pcb { @@ -86,26 +86,25 @@ struct pcb {
86 uint64_t pcb_cr2; /* page fault address (CR2) */ 86 uint64_t pcb_cr2; /* page fault address (CR2) */
87 uint64_t pcb_cr3; 87 uint64_t pcb_cr3;
88 uint64_t pcb_rsp; 88 uint64_t pcb_rsp;
89 uint64_t pcb_rbp; 89 uint64_t pcb_rbp;
90 void *pcb_onfault; /* copyin/out fault recovery */ 90 void *pcb_onfault; /* copyin/out fault recovery */
91 uint64_t pcb_fs; 91 uint64_t pcb_fs;
92 uint64_t pcb_gs; 92 uint64_t pcb_gs;
93 struct dbreg *pcb_dbregs; 93 struct dbreg *pcb_dbregs;
94 uint16_t pcb_fpu_dflt_cw; 94 uint16_t pcb_fpu_dflt_cw;
95 int pcb_iopl; 95 int pcb_iopl;
96 96
97 uint32_t pcb_unused[8]; /* unused */ 97 uint32_t pcb_unused[8]; /* unused */
98 98
99 struct cpu_info *pcb_fpcpu; /* cpu holding our fp state. */ 
100 union savefpu pcb_savefpu __aligned(64); /* floating point state */ 99 union savefpu pcb_savefpu __aligned(64); /* floating point state */
101 /* **** DO NOT ADD ANYTHING HERE **** */ 100 /* **** DO NOT ADD ANYTHING HERE **** */
102}; 101};
103__CTASSERT(sizeof(struct pcb) - sizeof (union savefpu) == 128); 102__CTASSERT(sizeof(struct pcb) - sizeof (union savefpu) == 128);
104 103
105#else /* __x86_64__ */ 104#else /* __x86_64__ */
106 105
107#include <i386/pcb.h> 106#include <i386/pcb.h>
108 107
109#endif /* __x86_64__ */ 108#endif /* __x86_64__ */
110 109
111#endif /* _AMD64_PCB_H_ */ 110#endif /* _AMD64_PCB_H_ */

cvs diff -r1.22 -r1.23 src/sys/arch/amd64/include/proc.h (expand / switch to unified diff)

--- src/sys/arch/amd64/include/proc.h 2017/02/25 13:34:21 1.22
+++ src/sys/arch/amd64/include/proc.h 2019/10/12 06:31:03 1.23
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: proc.h,v 1.22 2017/02/25 13:34:21 kamil Exp $ */ 1/* $NetBSD: proc.h,v 1.23 2019/10/12 06:31:03 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1991 Regents of the University of California. 4 * Copyright (c) 1991 Regents of the University of California.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -45,26 +45,27 @@ @@ -45,26 +45,27 @@
45struct pmap; 45struct pmap;
46struct vm_page; 46struct vm_page;
47 47
48struct mdlwp { 48struct mdlwp {
49 struct trapframe *md_regs; /* registers on current frame */ 49 struct trapframe *md_regs; /* registers on current frame */
50 struct pmap *md_gc_pmap; /* pmap being garbage collected */ 50 struct pmap *md_gc_pmap; /* pmap being garbage collected */
51 struct vm_page *md_gc_ptp; /* pages from pmap g/c */ 51 struct vm_page *md_gc_ptp; /* pages from pmap g/c */
52 int md_flags; /* machine-dependent flags */ 52 int md_flags; /* machine-dependent flags */
53 volatile int md_astpending; 53 volatile int md_astpending;
54}; 54};
55 55
56#define MDL_COMPAT32 0x0008 /* i386, always return via iret */ 56#define MDL_COMPAT32 0x0008 /* i386, always return via iret */
57#define MDL_IRET 0x0010 /* force return via iret, not sysret */ 57#define MDL_IRET 0x0010 /* force return via iret, not sysret */
 58#define MDL_FPU_IN_CPU 0x0020 /* the FPU state is in the CPU */
58 59
59struct mdproc { 60struct mdproc {
60 int md_flags; 61 int md_flags;
61 /* Syscall handling function */ 62 /* Syscall handling function */
62 void (*md_syscall)(struct trapframe *); 63 void (*md_syscall)(struct trapframe *);
63}; 64};
64 65
65/* md_flags */ 66/* md_flags */
66#define MDP_USEDMTRR 0x0008 /* has set volatile MTRRs */ 67#define MDP_USEDMTRR 0x0008 /* has set volatile MTRRs */
67 68
68#else /* __x86_64__ */ 69#else /* __x86_64__ */
69 70
70#include <i386/proc.h> 71#include <i386/proc.h>

cvs diff -r1.113 -r1.114 src/sys/arch/i386/i386/genassym.cf (expand / switch to unified diff)

--- src/sys/arch/i386/i386/genassym.cf 2019/03/09 08:42:25 1.113
+++ src/sys/arch/i386/i386/genassym.cf 2019/10/12 06:31:03 1.114
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1# $NetBSD: genassym.cf,v 1.113 2019/03/09 08:42:25 maxv Exp $ 1# $NetBSD: genassym.cf,v 1.114 2019/10/12 06:31:03 maxv Exp $
2 2
3# 3#
4# Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc. 4# Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5# All rights reserved. 5# All rights reserved.
6# 6#
7# This code is derived from software contributed to The NetBSD Foundation 7# This code is derived from software contributed to The NetBSD Foundation
8# by Charles M. Hannum, and by Andrew Doran. 8# by Charles M. Hannum, and by Andrew Doran.
9# 9#
10# Redistribution and use in source and binary forms, with or without 10# Redistribution and use in source and binary forms, with or without
11# modification, are permitted provided that the following conditions 11# modification, are permitted provided that the following conditions
12# are met: 12# are met:
13# 1. Redistributions of source code must retain the above copyright 13# 1. Redistributions of source code must retain the above copyright
14# notice, this list of conditions and the following disclaimer. 14# notice, this list of conditions and the following disclaimer.
@@ -155,61 +155,65 @@ define PDIR_SLOT_PTE PDIR_SLOT_PTE @@ -155,61 +155,65 @@ define PDIR_SLOT_PTE PDIR_SLOT_PTE
155define PTE_BASE PTE_BASE 155define PTE_BASE PTE_BASE
156 156
157define PDP_SIZE PDP_SIZE 157define PDP_SIZE PDP_SIZE
158define PDE_SIZE sizeof(pd_entry_t) 158define PDE_SIZE sizeof(pd_entry_t)
159 159
160define IOMAPSIZE IOMAPSIZE 160define IOMAPSIZE IOMAPSIZE
161 161
162define VM_MAXUSER_ADDRESS (int)VM_MAXUSER_ADDRESS 162define VM_MAXUSER_ADDRESS (int)VM_MAXUSER_ADDRESS
163 163
164define L_PCB offsetof(struct lwp, l_addr) 164define L_PCB offsetof(struct lwp, l_addr)
165define L_FLAG offsetof(struct lwp, l_flag) 165define L_FLAG offsetof(struct lwp, l_flag)
166define L_PROC offsetof(struct lwp, l_proc) 166define L_PROC offsetof(struct lwp, l_proc)
167define L_MD_REGS offsetof(struct lwp, l_md.md_regs) 167define L_MD_REGS offsetof(struct lwp, l_md.md_regs)
 168define L_MD_FLAGS offsetof(struct lwp, l_md.md_flags)
168define L_CTXSWTCH offsetof(struct lwp, l_ctxswtch) 169define L_CTXSWTCH offsetof(struct lwp, l_ctxswtch)
169define L_MD_ASTPENDING offsetof(struct lwp, l_md.md_astpending) 170define L_MD_ASTPENDING offsetof(struct lwp, l_md.md_astpending)
170define L_CPU offsetof(struct lwp, l_cpu) 171define L_CPU offsetof(struct lwp, l_cpu)
171define L_NCSW offsetof(struct lwp, l_ncsw) 172define L_NCSW offsetof(struct lwp, l_ncsw)
172define L_NOPREEMPT offsetof(struct lwp, l_nopreempt) 173define L_NOPREEMPT offsetof(struct lwp, l_nopreempt)
173define L_DOPREEMPT offsetof(struct lwp, l_dopreempt) 174define L_DOPREEMPT offsetof(struct lwp, l_dopreempt)
174define L_KPRIORITY offsetof(struct lwp, l_kpriority) 175define L_KPRIORITY offsetof(struct lwp, l_kpriority)
175define P_FLAG offsetof(struct proc, p_flag) 176define P_FLAG offsetof(struct proc, p_flag)
176define P_RASLIST offsetof(struct proc, p_raslist) 177define P_RASLIST offsetof(struct proc, p_raslist)
177define P_MD_SYSCALL offsetof(struct proc, p_md.md_syscall) 178define P_MD_SYSCALL offsetof(struct proc, p_md.md_syscall)
178 179
 180define MDL_FPU_IN_CPU MDL_FPU_IN_CPU
 181
179define LW_SYSTEM LW_SYSTEM 182define LW_SYSTEM LW_SYSTEM
180 183
181define GUFS_SEL GUFS_SEL 184define GUFS_SEL GUFS_SEL
182define GUGS_SEL GUGS_SEL 185define GUGS_SEL GUGS_SEL
183 186
184define M_DATA offsetof(struct mbuf, m_data) 187define M_DATA offsetof(struct mbuf, m_data)
185define M_LEN offsetof(struct mbuf, m_len) 188define M_LEN offsetof(struct mbuf, m_len)
186define M_NEXT offsetof(struct mbuf, m_next) 189define M_NEXT offsetof(struct mbuf, m_next)
187 190
188define IP_SRC offsetof(struct ip, ip_src) 191define IP_SRC offsetof(struct ip, ip_src)
189define IP_DST offsetof(struct ip, ip_dst) 192define IP_DST offsetof(struct ip, ip_dst)
190 193
191define IP6_SRC offsetof(struct ip6_hdr, ip6_src) 194define IP6_SRC offsetof(struct ip6_hdr, ip6_src)
192define IP6_DST offsetof(struct ip6_hdr, ip6_dst) 195define IP6_DST offsetof(struct ip6_hdr, ip6_dst)
193 196
194define PCB_CR3 offsetof(struct pcb, pcb_cr3) 197define PCB_CR3 offsetof(struct pcb, pcb_cr3)
195define PCB_EBP offsetof(struct pcb, pcb_ebp) 198define PCB_EBP offsetof(struct pcb, pcb_ebp)
196define PCB_ESP offsetof(struct pcb, pcb_esp) 199define PCB_ESP offsetof(struct pcb, pcb_esp)
197define PCB_ESP0 offsetof(struct pcb, pcb_esp0) 200define PCB_ESP0 offsetof(struct pcb, pcb_esp0)
198define PCB_CR0 offsetof(struct pcb, pcb_cr0) 201define PCB_CR0 offsetof(struct pcb, pcb_cr0)
199define PCB_ONFAULT offsetof(struct pcb, pcb_onfault) 202define PCB_ONFAULT offsetof(struct pcb, pcb_onfault)
200define PCB_FSD offsetof(struct pcb, pcb_fsd) 203define PCB_FSD offsetof(struct pcb, pcb_fsd)
201define PCB_GSD offsetof(struct pcb, pcb_gsd) 204define PCB_GSD offsetof(struct pcb, pcb_gsd)
202define PCB_IOMAP offsetof(struct pcb, pcb_iomap) 205define PCB_IOMAP offsetof(struct pcb, pcb_iomap)
 206define PCB_SAVEFPU offsetof(struct pcb, pcb_savefpu)
203 207
204define TF_CS offsetof(struct trapframe, tf_cs) 208define TF_CS offsetof(struct trapframe, tf_cs)
205define TF_EIP offsetof(struct trapframe, tf_eip) 209define TF_EIP offsetof(struct trapframe, tf_eip)
206define TF_ERR offsetof(struct trapframe, tf_err) 210define TF_ERR offsetof(struct trapframe, tf_err)
207define TF_TRAPNO offsetof(struct trapframe, tf_trapno) 211define TF_TRAPNO offsetof(struct trapframe, tf_trapno)
208define TF_EFLAGS offsetof(struct trapframe, tf_eflags) 212define TF_EFLAGS offsetof(struct trapframe, tf_eflags)
209 213
210define TF_GS offsetof(struct trapframe, tf_gs) 214define TF_GS offsetof(struct trapframe, tf_gs)
211define TF_FS offsetof(struct trapframe, tf_fs) 215define TF_FS offsetof(struct trapframe, tf_fs)
212define TF_ES offsetof(struct trapframe, tf_es) 216define TF_ES offsetof(struct trapframe, tf_es)
213define TF_DS offsetof(struct trapframe, tf_ds) 217define TF_DS offsetof(struct trapframe, tf_ds)
214define TF_EDI offsetof(struct trapframe, tf_edi) 218define TF_EDI offsetof(struct trapframe, tf_edi)
215define TF_ESI offsetof(struct trapframe, tf_esi) 219define TF_ESI offsetof(struct trapframe, tf_esi)
@@ -241,27 +245,26 @@ define IH_LEVEL offsetof(struct intrhan @@ -241,27 +245,26 @@ define IH_LEVEL offsetof(struct intrhan
241define IH_NEXT offsetof(struct intrhand, ih_next) 245define IH_NEXT offsetof(struct intrhand, ih_next)
242 246
243define TSS_TSS offsetof(struct cpu_tss, tss) 247define TSS_TSS offsetof(struct cpu_tss, tss)
244define TSS_ESP0 offsetof(struct cpu_tss, tss.tss_esp0) 248define TSS_ESP0 offsetof(struct cpu_tss, tss.tss_esp0)
245define TSS_IOBASE offsetof(struct cpu_tss, tss.tss_iobase) 249define TSS_IOBASE offsetof(struct cpu_tss, tss.tss_iobase)
246define TSS_IOMAP offsetof(struct cpu_tss, iomap) 250define TSS_IOMAP offsetof(struct cpu_tss, iomap)
247 251
248define CPU_INFO_SELF offsetof(struct cpu_info, ci_self) 252define CPU_INFO_SELF offsetof(struct cpu_info, ci_self)
249define CPU_INFO_RESCHED offsetof(struct cpu_info, ci_want_resched) 253define CPU_INFO_RESCHED offsetof(struct cpu_info, ci_want_resched)
250define CPU_INFO_WANT_PMAPLOAD offsetof(struct cpu_info, ci_want_pmapload) 254define CPU_INFO_WANT_PMAPLOAD offsetof(struct cpu_info, ci_want_pmapload)
251define CPU_INFO_TLBSTATE offsetof(struct cpu_info, ci_tlbstate) 255define CPU_INFO_TLBSTATE offsetof(struct cpu_info, ci_tlbstate)
252define TLBSTATE_VALID TLBSTATE_VALID 256define TLBSTATE_VALID TLBSTATE_VALID
253define CPU_INFO_CURLWP offsetof(struct cpu_info, ci_curlwp) 257define CPU_INFO_CURLWP offsetof(struct cpu_info, ci_curlwp)
254define CPU_INFO_FPCURLWP offsetof(struct cpu_info, ci_fpcurlwp) 
255define CPU_INFO_CURLDT offsetof(struct cpu_info, ci_curldt) 258define CPU_INFO_CURLDT offsetof(struct cpu_info, ci_curldt)
256define CPU_INFO_IDLELWP offsetof(struct cpu_info, ci_data.cpu_idlelwp) 259define CPU_INFO_IDLELWP offsetof(struct cpu_info, ci_data.cpu_idlelwp)
257define CPU_INFO_PMAP offsetof(struct cpu_info, ci_pmap) 260define CPU_INFO_PMAP offsetof(struct cpu_info, ci_pmap)
258define CPU_INFO_TSS offsetof(struct cpu_info, ci_tss) 261define CPU_INFO_TSS offsetof(struct cpu_info, ci_tss)
259define IOMAP_INVALOFF IOMAP_INVALOFF 262define IOMAP_INVALOFF IOMAP_INVALOFF
260define IOMAP_VALIDOFF IOMAP_VALIDOFF 263define IOMAP_VALIDOFF IOMAP_VALIDOFF
261define CPU_INFO_NSYSCALL offsetof(struct cpu_info, ci_data.cpu_nsyscall) 264define CPU_INFO_NSYSCALL offsetof(struct cpu_info, ci_data.cpu_nsyscall)
262define CPU_INFO_NTRAP offsetof(struct cpu_info, ci_data.cpu_ntrap) 265define CPU_INFO_NTRAP offsetof(struct cpu_info, ci_data.cpu_ntrap)
263define CPU_INFO_NINTR offsetof(struct cpu_info, ci_data.cpu_nintr) 266define CPU_INFO_NINTR offsetof(struct cpu_info, ci_data.cpu_nintr)
264define CPU_INFO_CURPRIORITY offsetof(struct cpu_info, ci_schedstate.spc_curpriority) 267define CPU_INFO_CURPRIORITY offsetof(struct cpu_info, ci_schedstate.spc_curpriority)
265define CPU_INFO_CC_SKEW offsetof(struct cpu_info, ci_data.cpu_cc_skew) 268define CPU_INFO_CC_SKEW offsetof(struct cpu_info, ci_data.cpu_cc_skew)
266 269
267 270

cvs diff -r1.19 -r1.20 src/sys/arch/i386/i386/i386_trap.S (expand / switch to unified diff)

--- src/sys/arch/i386/i386/i386_trap.S 2019/10/04 15:28:00 1.19
+++ src/sys/arch/i386/i386/i386_trap.S 2019/10/12 06:31:03 1.20
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: i386_trap.S,v 1.19 2019/10/04 15:28:00 maxv Exp $ */ 1/* $NetBSD: i386_trap.S,v 1.20 2019/10/12 06:31:03 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright 2002 (c) Wasabi Systems, Inc. 4 * Copyright 2002 (c) Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Frank van der Linden for Wasabi Systems, Inc. 7 * Written by Frank van der Linden for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -56,27 +56,27 @@ @@ -56,27 +56,27 @@
56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 57 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64 * POSSIBILITY OF SUCH DAMAGE. 64 * POSSIBILITY OF SUCH DAMAGE.
65 */ 65 */
66 66
67#if 0 67#if 0
68#include <machine/asm.h> 68#include <machine/asm.h>
69__KERNEL_RCSID(0, "$NetBSD: i386_trap.S,v 1.19 2019/10/04 15:28:00 maxv Exp $"); 69__KERNEL_RCSID(0, "$NetBSD: i386_trap.S,v 1.20 2019/10/12 06:31:03 maxv Exp $");
70#endif 70#endif
71 71
72/* 72/*
73 * Trap and fault vector routines 73 * Trap and fault vector routines
74 * 74 *
75 * On exit from the kernel to user mode, we always need to check for ASTs. In 75 * On exit from the kernel to user mode, we always need to check for ASTs. In
76 * addition, we need to do this atomically; otherwise an interrupt may occur 76 * addition, we need to do this atomically; otherwise an interrupt may occur
77 * which causes an AST, but it won't get processed until the next kernel entry 77 * which causes an AST, but it won't get processed until the next kernel entry
78 * (possibly the next clock tick). Thus, we disable interrupt before checking, 78 * (possibly the next clock tick). Thus, we disable interrupt before checking,
79 * and only enable them again on the final `iret' or before calling the AST 79 * and only enable them again on the final `iret' or before calling the AST
80 * handler. 80 * handler.
81 */ 81 */
82 82
@@ -426,47 +426,50 @@ calltrap: @@ -426,47 +426,50 @@ calltrap:
426 STI(%eax) 426 STI(%eax)
427 movl $T_ASTFLT,TF_TRAPNO(%esp) 427 movl $T_ASTFLT,TF_TRAPNO(%esp)
428 addl $1,CPUVAR(NTRAP) /* statistical info */ 428 addl $1,CPUVAR(NTRAP) /* statistical info */
429 adcl $0,CPUVAR(NTRAP)+4 429 adcl $0,CPUVAR(NTRAP)+4
430 pushl %esp 430 pushl %esp
431 call _C_LABEL(trap) 431 call _C_LABEL(trap)
432 addl $4,%esp 432 addl $4,%esp
433 jmp .Lalltraps_checkast /* re-check ASTs */ 433 jmp .Lalltraps_checkast /* re-check ASTs */
4343: CHECK_DEFERRED_SWITCH 4343: CHECK_DEFERRED_SWITCH
435 jnz 9f 435 jnz 9f
436 436
437#ifdef XEN 437#ifdef XEN
438 STIC(%eax) 438 STIC(%eax)
439 jz 6f 439 jz 22f
440 call _C_LABEL(stipending) 440 call _C_LABEL(stipending)
441 testl %eax,%eax 441 testl %eax,%eax
442 jz 6f 442 jz 22f
443 /* process pending interrupts */ 443 /* process pending interrupts */
444 CLI(%eax) 444 CLI(%eax)
445 movl CPUVAR(ILEVEL),%ebx 445 movl CPUVAR(ILEVEL),%ebx
446 movl $.Lalltraps_resume,%esi /* address to resume loop at */ 446 movl $.Lalltraps_resume,%esi /* address to resume loop at */
447.Lalltraps_resume: 447.Lalltraps_resume:
448 movl %ebx,%eax /* get cpl */ 448 movl %ebx,%eax /* get cpl */
449 movl CPUVAR(XUNMASK)(,%eax,4),%eax 449 movl CPUVAR(XUNMASK)(,%eax,4),%eax
450 andl CPUVAR(XPENDING),%eax /* any non-masked bits left? */ 450 andl CPUVAR(XPENDING),%eax /* any non-masked bits left? */
451 jz 7f 451 jz 11f
452 bsrl %eax,%eax 452 bsrl %eax,%eax
453 btrl %eax,CPUVAR(XPENDING) 453 btrl %eax,CPUVAR(XPENDING)
454 movl CPUVAR(XSOURCES)(,%eax,4),%eax 454 movl CPUVAR(XSOURCES)(,%eax,4),%eax
455 jmp *IS_RESUME(%eax) 455 jmp *IS_RESUME(%eax)
4567: movl %ebx,CPUVAR(ILEVEL) /* restore cpl */ 45611: movl %ebx,CPUVAR(ILEVEL) /* restore cpl */
457 jmp .Lalltraps_checkusr 457 jmp .Lalltraps_checkusr
 45822:
458#endif 459#endif
459 460
 461 HANDLE_DEFERRED_FPU
 462
4606: 4636:
461#ifdef DIAGNOSTIC 464#ifdef DIAGNOSTIC
462 cmpl CPUVAR(ILEVEL),%ebx 465 cmpl CPUVAR(ILEVEL),%ebx
463 jne .Lspl_error 466 jne .Lspl_error
464#endif 467#endif
465 INTRFASTEXIT 468 INTRFASTEXIT
466 469
4679: STI(%eax) 4709: STI(%eax)
468 call _C_LABEL(pmap_load) 471 call _C_LABEL(pmap_load)
469 jmp .Lalltraps_checkast /* re-check ASTs */ 472 jmp .Lalltraps_checkast /* re-check ASTs */
470 473
471#ifdef DIAGNOSTIC 474#ifdef DIAGNOSTIC
472.Lspl_error: 475.Lspl_error:

cvs diff -r1.171 -r1.172 src/sys/arch/i386/i386/locore.S (expand / switch to unified diff)

--- src/sys/arch/i386/i386/locore.S 2019/10/04 15:28:00 1.171
+++ src/sys/arch/i386/i386/locore.S 2019/10/12 06:31:03 1.172
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: locore.S,v 1.171 2019/10/04 15:28:00 maxv Exp $ */ 1/* $NetBSD: locore.S,v 1.172 2019/10/12 06:31:03 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright-o-rama! 4 * Copyright-o-rama!
5 */ 5 */
6 6
7/* 7/*
8 * Copyright (c) 1998, 2000, 2004, 2006, 2007, 2009, 2016 8 * Copyright (c) 1998, 2000, 2004, 2006, 2007, 2009, 2016
9 * The NetBSD Foundation, Inc., All rights reserved. 9 * The NetBSD Foundation, Inc., All rights reserved.
10 * 10 *
11 * This code is derived from software contributed to The NetBSD Foundation 11 * This code is derived from software contributed to The NetBSD Foundation
12 * by Charles M. Hannum, by Andrew Doran and by Maxime Villard. 12 * by Charles M. Hannum, by Andrew Doran and by Maxime Villard.
13 * 13 *
14 * Redistribution and use in source and binary forms, with or without 14 * Redistribution and use in source and binary forms, with or without
@@ -118,27 +118,27 @@ @@ -118,27 +118,27 @@
118 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 118 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
119 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 119 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
120 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 120 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
121 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 121 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
122 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 122 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
123 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 123 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
124 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 124 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
125 * SUCH DAMAGE. 125 * SUCH DAMAGE.
126 * 126 *
127 * @(#)locore.s 7.3 (Berkeley) 5/13/91 127 * @(#)locore.s 7.3 (Berkeley) 5/13/91
128 */ 128 */
129 129
130#include <machine/asm.h> 130#include <machine/asm.h>
131__KERNEL_RCSID(0, "$NetBSD: locore.S,v 1.171 2019/10/04 15:28:00 maxv Exp $"); 131__KERNEL_RCSID(0, "$NetBSD: locore.S,v 1.172 2019/10/12 06:31:03 maxv Exp $");
132 132
133#include "opt_copy_symtab.h" 133#include "opt_copy_symtab.h"
134#include "opt_ddb.h" 134#include "opt_ddb.h"
135#include "opt_modular.h" 135#include "opt_modular.h"
136#include "opt_multiboot.h" 136#include "opt_multiboot.h"
137#include "opt_realmem.h" 137#include "opt_realmem.h"
138#include "opt_xen.h" 138#include "opt_xen.h"
139 139
140#include "assym.h" 140#include "assym.h"
141#include "lapic.h" 141#include "lapic.h"
142#include "ioapic.h" 142#include "ioapic.h"
143#include "ksyms.h" 143#include "ksyms.h"
144 144
@@ -1177,53 +1177,34 @@ skip_save: @@ -1177,53 +1177,34 @@ skip_save:
1177 1177
1178 /* Handle restartable atomic sequences (RAS). */ 1178 /* Handle restartable atomic sequences (RAS). */
1179 movl L_MD_REGS(%edi),%ecx 1179 movl L_MD_REGS(%edi),%ecx
1180 pushl TF_EIP(%ecx) 1180 pushl TF_EIP(%ecx)
1181 pushl %eax 1181 pushl %eax
1182 call _C_LABEL(ras_lookup) 1182 call _C_LABEL(ras_lookup)
1183 addl $8,%esp 1183 addl $8,%esp
1184 cmpl $-1,%eax 1184 cmpl $-1,%eax
1185 je no_RAS 1185 je no_RAS
1186 movl L_MD_REGS(%edi),%ecx 1186 movl L_MD_REGS(%edi),%ecx
1187 movl %eax,TF_EIP(%ecx) 1187 movl %eax,TF_EIP(%ecx)
1188no_RAS: 1188no_RAS:
1189 1189
1190 /* 
1191 * Restore cr0 (including FPU state). Raise the IPL to IPL_HIGH. 
1192 * FPU IPIs can alter the LWP's saved cr0. Dropping the priority 
1193 * is deferred until mi_switch(), when cpu_switchto() returns. 
1194 */ 
1195#ifdef XENPV 1190#ifdef XENPV
1196 pushl %edi 1191 pushl %edi
1197 call _C_LABEL(i386_tls_switch) 1192 call _C_LABEL(i386_tls_switch)
1198 addl $4,%esp 1193 addl $4,%esp
1199#else 1194#else
 1195 /* Raise the IPL to IPL_HIGH. Dropping the priority is deferred until
 1196 * mi_switch(), when cpu_switchto() returns. XXX Still needed? */
1200 movl $IPL_HIGH,CPUVAR(ILEVEL) 1197 movl $IPL_HIGH,CPUVAR(ILEVEL)
1201 movl PCB_CR0(%ebx),%ecx /* has CR0_TS clear */ 
1202 movl %cr0,%edx 
1203 
1204 /* 
1205 * If our floating point registers are on a different CPU, 
1206 * set CR0_TS so we'll trap rather than reuse bogus state. 
1207 */ 
1208 cmpl CPUVAR(FPCURLWP),%edi 
1209 je skip_TS 
1210 orl $CR0_TS,%ecx 
1211skip_TS: 
1212 
1213 /* Reloading CR0 is very expensive - avoid if possible. */ 
1214 cmpl %edx,%ecx 
1215 je switch_return 
1216 movl %ecx,%cr0 
1217#endif 1198#endif
1218 1199
1219switch_return: 1200switch_return:
1220 /* Return to the new LWP, returning 'oldlwp' in %eax. */ 1201 /* Return to the new LWP, returning 'oldlwp' in %eax. */
1221 movl %esi,%eax 1202 movl %esi,%eax
1222 popl %edi 1203 popl %edi
1223 popl %esi 1204 popl %esi
1224 popl %ebx 1205 popl %ebx
1225 ret 1206 ret
1226 1207
1227.Lcopy_iobitmap: 1208.Lcopy_iobitmap:
1228 /* Copy I/O bitmap. */ 1209 /* Copy I/O bitmap. */
1229 incl _C_LABEL(pmap_iobmp_evcnt)+EV_COUNT 1210 incl _C_LABEL(pmap_iobmp_evcnt)+EV_COUNT
@@ -1312,26 +1293,28 @@ IDTVEC(syscall) @@ -1312,26 +1293,28 @@ IDTVEC(syscall)
1312 btrl %eax,CPUVAR(XPENDING) 1293 btrl %eax,CPUVAR(XPENDING)
1313 movl CPUVAR(XSOURCES)(,%eax,4),%eax 1294 movl CPUVAR(XSOURCES)(,%eax,4),%eax
1314 jmp *IS_RESUME(%eax) 1295 jmp *IS_RESUME(%eax)
131517: movl %ebx, CPUVAR(ILEVEL) /* restore cpl */ 129617: movl %ebx, CPUVAR(ILEVEL) /* restore cpl */
1316 jmp .Lsyscall_checkast 1297 jmp .Lsyscall_checkast
131714: 129814:
1318#endif /* XEN */ 1299#endif /* XEN */
1319 1300
1320#ifdef DIAGNOSTIC 1301#ifdef DIAGNOSTIC
1321 cmpl $IPL_NONE,CPUVAR(ILEVEL) 1302 cmpl $IPL_NONE,CPUVAR(ILEVEL)
1322 jne 3f 1303 jne 3f
1323#endif 1304#endif
1324 1305
 1306 HANDLE_DEFERRED_FPU
 1307
1325 INTRFASTEXIT 1308 INTRFASTEXIT
1326 1309
1327#ifdef DIAGNOSTIC 1310#ifdef DIAGNOSTIC
13283: STI(%eax) 13113: STI(%eax)
1329 pushl $4f 1312 pushl $4f
1330 call _C_LABEL(panic) 1313 call _C_LABEL(panic)
1331 addl $4,%esp 1314 addl $4,%esp
1332 pushl $IPL_NONE 1315 pushl $IPL_NONE
1333 call _C_LABEL(spllower) 1316 call _C_LABEL(spllower)
1334 addl $4,%esp 1317 addl $4,%esp
1335 jmp .Lsyscall_checkast 1318 jmp .Lsyscall_checkast
13364: .asciz "SPL NOT LOWERED ON SYSCALL EXIT\n" 13194: .asciz "SPL NOT LOWERED ON SYSCALL EXIT\n"
13375: .asciz "SPL NOT ZERO ON SYSCALL ENTRY\n" 13205: .asciz "SPL NOT ZERO ON SYSCALL ENTRY\n"

cvs diff -r1.820 -r1.821 src/sys/arch/i386/i386/machdep.c (expand / switch to unified diff)

--- src/sys/arch/i386/i386/machdep.c 2019/05/19 08:46:15 1.820
+++ src/sys/arch/i386/i386/machdep.c 2019/10/12 06:31:03 1.821
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: machdep.c,v 1.820 2019/05/19 08:46:15 maxv Exp $ */ 1/* $NetBSD: machdep.c,v 1.821 2019/10/12 06:31:03 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1996, 1997, 1998, 2000, 2004, 2006, 2008, 2009, 2017 4 * Copyright (c) 1996, 1997, 1998, 2000, 2004, 2006, 2008, 2009, 2017
5 * The NetBSD Foundation, Inc. 5 * The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Charles M. Hannum, by Jason R. Thorpe of the Numerical Aerospace 9 * by Charles M. Hannum, by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility NASA Ames Research Center, by Julio M. Merino Vidal, 10 * Simulation Facility NASA Ames Research Center, by Julio M. Merino Vidal,
11 * by Andrew Doran, and by Maxime Villard. 11 * by Andrew Doran, and by Maxime Villard.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions 14 * modification, are permitted provided that the following conditions
@@ -57,27 +57,27 @@ @@ -57,27 +57,27 @@
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE. 64 * SUCH DAMAGE.
65 * 65 *
66 * @(#)machdep.c 7.4 (Berkeley) 6/3/91 66 * @(#)machdep.c 7.4 (Berkeley) 6/3/91
67 */ 67 */
68 68
69#include <sys/cdefs.h> 69#include <sys/cdefs.h>
70__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.820 2019/05/19 08:46:15 maxv Exp $"); 70__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.821 2019/10/12 06:31:03 maxv Exp $");
71 71
72#include "opt_beep.h" 72#include "opt_beep.h"
73#include "opt_compat_freebsd.h" 73#include "opt_compat_freebsd.h"
74#include "opt_compat_netbsd.h" 74#include "opt_compat_netbsd.h"
75#include "opt_cpureset_delay.h" 75#include "opt_cpureset_delay.h"
76#include "opt_ddb.h" 76#include "opt_ddb.h"
77#include "opt_kgdb.h" 77#include "opt_kgdb.h"
78#include "opt_mtrr.h" 78#include "opt_mtrr.h"
79#include "opt_modular.h" 79#include "opt_modular.h"
80#include "opt_multiboot.h" 80#include "opt_multiboot.h"
81#include "opt_multiprocessor.h" 81#include "opt_multiprocessor.h"
82#include "opt_physmem.h" 82#include "opt_physmem.h"
83#include "opt_realmem.h" 83#include "opt_realmem.h"
@@ -499,40 +499,30 @@ i386_switch_context(lwp_t *l) @@ -499,40 +499,30 @@ i386_switch_context(lwp_t *l)
499 499
500 physop.cmd = PHYSDEVOP_SET_IOPL; 500 physop.cmd = PHYSDEVOP_SET_IOPL;
501 physop.u.set_iopl.iopl = pcb->pcb_iopl; 501 physop.u.set_iopl.iopl = pcb->pcb_iopl;
502 HYPERVISOR_physdev_op(&physop); 502 HYPERVISOR_physdev_op(&physop);
503} 503}
504 504
505void 505void
506i386_tls_switch(lwp_t *l) 506i386_tls_switch(lwp_t *l)
507{ 507{
508 struct cpu_info *ci = curcpu(); 508 struct cpu_info *ci = curcpu();
509 struct pcb *pcb = lwp_getpcb(l); 509 struct pcb *pcb = lwp_getpcb(l);
510 510
511 /* 511 /*
512 * Raise the IPL to IPL_HIGH. 512 * Raise the IPL to IPL_HIGH. XXX Still needed?
513 * FPU IPIs can alter the LWP's saved cr0. Dropping the priority 
514 * is deferred until mi_switch(), when cpu_switchto() returns. 
515 */ 513 */
516 (void)splhigh(); 514 (void)splhigh();
517 515
518 /* 
519 * If our floating point registers are on a different CPU, 
520 * set CR0_TS so we'll trap rather than reuse bogus state. 
521 */ 
522 if (l != ci->ci_fpcurlwp) { 
523 HYPERVISOR_fpu_taskswitch(1); 
524 } 
525 
526 /* Update TLS segment pointers */ 516 /* Update TLS segment pointers */
527 update_descriptor(&ci->ci_gdt[GUFS_SEL], 517 update_descriptor(&ci->ci_gdt[GUFS_SEL],
528 (union descriptor *)&pcb->pcb_fsd); 518 (union descriptor *)&pcb->pcb_fsd);
529 update_descriptor(&ci->ci_gdt[GUGS_SEL], 519 update_descriptor(&ci->ci_gdt[GUGS_SEL],
530 (union descriptor *)&pcb->pcb_gsd); 520 (union descriptor *)&pcb->pcb_gsd);
531} 521}
532#endif /* XENPV */ 522#endif /* XENPV */
533 523
534/* XXX */ 524/* XXX */
535#define IDTVEC(name) __CONCAT(X, name) 525#define IDTVEC(name) __CONCAT(X, name)
536typedef void (vector)(void); 526typedef void (vector)(void);
537 527
538#ifndef XENPV 528#ifndef XENPV

cvs diff -r1.48 -r1.49 src/sys/arch/i386/i386/spl.S (expand / switch to unified diff)

--- src/sys/arch/i386/i386/spl.S 2019/10/04 15:28:00 1.48
+++ src/sys/arch/i386/i386/spl.S 2019/10/12 06:31:03 1.49
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: spl.S,v 1.48 2019/10/04 15:28:00 maxv Exp $ */ 1/* $NetBSD: spl.S,v 1.49 2019/10/12 06:31:03 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum and Andrew Doran. 8 * by Charles M. Hannum and Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -20,27 +20,27 @@ @@ -20,27 +20,27 @@
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <machine/asm.h> 32#include <machine/asm.h>
33__KERNEL_RCSID(0, "$NetBSD: spl.S,v 1.48 2019/10/04 15:28:00 maxv Exp $"); 33__KERNEL_RCSID(0, "$NetBSD: spl.S,v 1.49 2019/10/12 06:31:03 maxv Exp $");
34 34
35#include "opt_ddb.h" 35#include "opt_ddb.h"
36#include "opt_spldebug.h" 36#include "opt_spldebug.h"
37#include "opt_xen.h" 37#include "opt_xen.h"
38 38
39#include <machine/trap.h> 39#include <machine/trap.h>
40#include <machine/segments.h> 40#include <machine/segments.h>
41#include <machine/frameasm.h> 41#include <machine/frameasm.h>
42 42
43#include "assym.h" 43#include "assym.h"
44 44
45 .text 45 .text
46 46
@@ -316,26 +316,27 @@ LABEL(doreti_checkast) @@ -316,26 +316,27 @@ LABEL(doreti_checkast)
316 STI(%eax) 316 STI(%eax)
317 movl $T_ASTFLT,TF_TRAPNO(%esp) /* XXX undo later.. */ 317 movl $T_ASTFLT,TF_TRAPNO(%esp) /* XXX undo later.. */
318 /* Pushed T_ASTFLT into tf_trapno on entry. */ 318 /* Pushed T_ASTFLT into tf_trapno on entry. */
319 pushl %esp 319 pushl %esp
320 call _C_LABEL(trap) 320 call _C_LABEL(trap)
321 addl $4,%esp 321 addl $4,%esp
322 CLI(%eax) 322 CLI(%eax)
323 jmp 5b 323 jmp 5b
324END(doreti_checkast) 324END(doreti_checkast)
325 325
3263: 3263:
327 CHECK_DEFERRED_SWITCH 327 CHECK_DEFERRED_SWITCH
328 jnz 9f 328 jnz 9f
 329 HANDLE_DEFERRED_FPU
3296: 3306:
330#ifdef XEN 331#ifdef XEN
331 STIC(%eax) 332 STIC(%eax)
332 jz 4f 333 jz 4f
333 call _C_LABEL(stipending) 334 call _C_LABEL(stipending)
334 testl %eax,%eax 335 testl %eax,%eax
335 jz 4f 336 jz 4f
336 CLI(%eax) 337 CLI(%eax)
337 jmp .Ldoreti_resume_stic 338 jmp .Ldoreti_resume_stic
3384: 3394:
339#endif 340#endif
340 INTRFASTEXIT 341 INTRFASTEXIT
3419: 3429:

cvs diff -r1.28 -r1.29 src/sys/arch/i386/include/frameasm.h (expand / switch to unified diff)

--- src/sys/arch/i386/include/frameasm.h 2019/02/14 08:18:25 1.28
+++ src/sys/arch/i386/include/frameasm.h 2019/10/12 06:31:03 1.29
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: frameasm.h,v 1.28 2019/02/14 08:18:25 cherry Exp $ */ 1/* $NetBSD: frameasm.h,v 1.29 2019/10/12 06:31:03 maxv Exp $ */
2 2
3#ifndef _I386_FRAMEASM_H_ 3#ifndef _I386_FRAMEASM_H_
4#define _I386_FRAMEASM_H_ 4#define _I386_FRAMEASM_H_
5 5
6#ifdef _KERNEL_OPT 6#ifdef _KERNEL_OPT
7#include "opt_multiprocessor.h" 7#include "opt_multiprocessor.h"
8#include "opt_xen.h" 8#include "opt_xen.h"
9#endif 9#endif
10 10
11 11
12#ifdef XEN 12#ifdef XEN
13/* XXX assym.h */ 13/* XXX assym.h */
14#define TRAP_INSTR int $0x82 14#define TRAP_INSTR int $0x82
@@ -89,26 +89,42 @@ @@ -89,26 +89,42 @@
89#define INTR_RECURSE_HWFRAME \ 89#define INTR_RECURSE_HWFRAME \
90 pushfl ; \ 90 pushfl ; \
91 pushl %cs ; \ 91 pushl %cs ; \
92 pushl %esi ; 92 pushl %esi ;
93 93
94#define CHECK_DEFERRED_SWITCH \ 94#define CHECK_DEFERRED_SWITCH \
95 cmpl $0, CPUVAR(WANT_PMAPLOAD) 95 cmpl $0, CPUVAR(WANT_PMAPLOAD)
96 96
97#define CHECK_ASTPENDING(reg) movl CPUVAR(CURLWP),reg ; \ 97#define CHECK_ASTPENDING(reg) movl CPUVAR(CURLWP),reg ; \
98 cmpl $0, L_MD_ASTPENDING(reg) 98 cmpl $0, L_MD_ASTPENDING(reg)
99#define CLEAR_ASTPENDING(reg) movl $0, L_MD_ASTPENDING(reg) 99#define CLEAR_ASTPENDING(reg) movl $0, L_MD_ASTPENDING(reg)
100 100
101/* 101/*
 102 * If the FPU state is not in the CPU, restore it. Executed with interrupts
 103 * disabled.
 104 *
 105 * %ebx must not be modified
 106 */
 107#define HANDLE_DEFERRED_FPU \
 108 movl CPUVAR(CURLWP),%eax ; \
 109 testl $MDL_FPU_IN_CPU,L_MD_FLAGS(%eax) ; \
 110 jnz 1f ; \
 111 pushl %eax ; \
 112 call _C_LABEL(fpu_handle_deferred) ; \
 113 popl %eax ; \
 114 orl $MDL_FPU_IN_CPU,L_MD_FLAGS(%eax) ; \
 1151:
 116
 117/*
102 * IDEPTH_INCR: 118 * IDEPTH_INCR:
103 * increase ci_idepth and switch to the interrupt stack if necessary. 119 * increase ci_idepth and switch to the interrupt stack if necessary.
104 * note that the initial value of ci_idepth is -1. 120 * note that the initial value of ci_idepth is -1.
105 * 121 *
106 * => should be called with interrupt disabled. 122 * => should be called with interrupt disabled.
107 * => save the old value of %esp in %eax. 123 * => save the old value of %esp in %eax.
108 */ 124 */
109 125
110#define IDEPTH_INCR \ 126#define IDEPTH_INCR \
111 incl CPUVAR(IDEPTH); \ 127 incl CPUVAR(IDEPTH); \
112 movl %esp, %eax; \ 128 movl %esp, %eax; \
113 jne 999f; \ 129 jne 999f; \
114 movl CPUVAR(INTRSTACK), %esp; \ 130 movl CPUVAR(INTRSTACK), %esp; \

cvs diff -r1.58 -r1.59 src/sys/arch/i386/include/pcb.h (expand / switch to unified diff)

--- src/sys/arch/i386/include/pcb.h 2018/07/26 09:29:08 1.58
+++ src/sys/arch/i386/include/pcb.h 2019/10/12 06:31:03 1.59
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pcb.h,v 1.58 2018/07/26 09:29:08 maxv Exp $ */ 1/* $NetBSD: pcb.h,v 1.59 2019/10/12 06:31:03 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1998, 2009 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum, and by Andrew Doran. 8 * by Charles M. Hannum, and by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -89,24 +89,23 @@ struct pcb { @@ -89,24 +89,23 @@ struct pcb {
89 struct segment_descriptor pcb_fsd; /* %fs descriptor */ 89 struct segment_descriptor pcb_fsd; /* %fs descriptor */
90 struct segment_descriptor pcb_gsd; /* %gs descriptor */ 90 struct segment_descriptor pcb_gsd; /* %gs descriptor */
91 void *pcb_onfault; /* copyin/out fault recovery */ 91 void *pcb_onfault; /* copyin/out fault recovery */
92 char *pcb_iomap; /* I/O permission bitmap */ 92 char *pcb_iomap; /* I/O permission bitmap */
93 struct dbreg *pcb_dbregs; /* CPU Debug Registers */ 93 struct dbreg *pcb_dbregs; /* CPU Debug Registers */
94 uint16_t pcb_fpu_dflt_cw; 94 uint16_t pcb_fpu_dflt_cw;
95 95
96#define PCB_DBREGS 0x01 96#define PCB_DBREGS 0x01
97 int pcb_flags; 97 int pcb_flags;
98 98
99 int not_used[15]; 99 int not_used[15];
100 100
101 /* floating point state */ 101 /* floating point state */
102 struct cpu_info *pcb_fpcpu; /* cpu holding our fp state. */ 
103 union savefpu pcb_savefpu __aligned(64); 102 union savefpu pcb_savefpu __aligned(64);
104 /* **** DO NOT ADD ANYTHING HERE **** */ 103 /* **** DO NOT ADD ANYTHING HERE **** */
105 104
106}; 105};
107#ifndef __lint__ 106#ifndef __lint__
108/* This doesn't really matter, but there is a lot of implied padding */ 107/* This doesn't really matter, but there is a lot of implied padding */
109__CTASSERT(sizeof(struct pcb) - sizeof (union savefpu) == 128); 108__CTASSERT(sizeof(struct pcb) - sizeof (union savefpu) == 128);
110#endif 109#endif
111 110
112#endif /* _I386_PCB_H_ */ 111#endif /* _I386_PCB_H_ */

cvs diff -r1.45 -r1.46 src/sys/arch/i386/include/proc.h (expand / switch to unified diff)

--- src/sys/arch/i386/include/proc.h 2017/02/25 13:34:21 1.45
+++ src/sys/arch/i386/include/proc.h 2019/10/12 06:31:03 1.46
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: proc.h,v 1.45 2017/02/25 13:34:21 kamil Exp $ */ 1/* $NetBSD: proc.h,v 1.46 2019/10/12 06:31:03 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1991 Regents of the University of California. 4 * Copyright (c) 1991 Regents of the University of California.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -33,26 +33,28 @@ @@ -33,26 +33,28 @@
33 33
34#ifndef _I386_PROC_H_ 34#ifndef _I386_PROC_H_
35#define _I386_PROC_H_ 35#define _I386_PROC_H_
36 36
37#include <machine/frame.h> 37#include <machine/frame.h>
38#include <machine/pcb.h> 38#include <machine/pcb.h>
39 39
40/* 40/*
41 * Machine-dependent part of the lwp structure for i386. 41 * Machine-dependent part of the lwp structure for i386.
42 */ 42 */
43struct pmap; 43struct pmap;
44struct vm_page; 44struct vm_page;
45 45
 46#define MDL_FPU_IN_CPU 0x0020 /* the FPU state is in the CPU */
 47
46struct mdlwp { 48struct mdlwp {
47 struct trapframe *md_regs; /* registers on current frame */ 49 struct trapframe *md_regs; /* registers on current frame */
48 int md_flags; /* machine-dependent flags */ 50 int md_flags; /* machine-dependent flags */
49 volatile int md_astpending; /* AST pending for this process */ 51 volatile int md_astpending; /* AST pending for this process */
50 struct pmap *md_gc_pmap; /* pmap being garbage collected */ 52 struct pmap *md_gc_pmap; /* pmap being garbage collected */
51 struct vm_page *md_gc_ptp; /* pages from pmap g/c */ 53 struct vm_page *md_gc_ptp; /* pages from pmap g/c */
52}; 54};
53 55
54/* md_flags */ 56/* md_flags */
55#define MDL_IOPL 0x0002 /* XEN: i/o privilege */ 57#define MDL_IOPL 0x0002 /* XEN: i/o privilege */
56 58
57struct mdproc { 59struct mdproc {
58 int md_flags; 60 int md_flags;

cvs diff -r1.50 -r1.51 src/sys/arch/x86/acpi/acpi_wakeup.c (expand / switch to unified diff)

--- src/sys/arch/x86/acpi/acpi_wakeup.c 2019/06/17 16:34:02 1.50
+++ src/sys/arch/x86/acpi/acpi_wakeup.c 2019/10/12 06:31:03 1.51
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: acpi_wakeup.c,v 1.50 2019/06/17 16:34:02 jmcneill Exp $ */ 1/* $NetBSD: acpi_wakeup.c,v 1.51 2019/10/12 06:31:03 maxv Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2002, 2011 The NetBSD Foundation, Inc. 4 * Copyright (c) 2002, 2011 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Takuya SHIOZAKI. 8 * by Takuya SHIOZAKI.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -49,27 +49,27 @@ @@ -49,27 +49,27 @@
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 50 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 51 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 52 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 * SUCH DAMAGE. 56 * SUCH DAMAGE.
57 * 57 *
58 * FreeBSD: src/sys/i386/acpica/acpi_wakeup.c,v 1.9 2002/01/10 03:26:46 wes Exp 58 * FreeBSD: src/sys/i386/acpica/acpi_wakeup.c,v 1.9 2002/01/10 03:26:46 wes Exp
59 */ 59 */
60 60
61#include <sys/cdefs.h> 61#include <sys/cdefs.h>
62__KERNEL_RCSID(0, "$NetBSD: acpi_wakeup.c,v 1.50 2019/06/17 16:34:02 jmcneill Exp $"); 62__KERNEL_RCSID(0, "$NetBSD: acpi_wakeup.c,v 1.51 2019/10/12 06:31:03 maxv Exp $");
63 63
64#include <sys/param.h> 64#include <sys/param.h>
65#include <sys/systm.h> 65#include <sys/systm.h>
66#include <sys/kernel.h> 66#include <sys/kernel.h>
67#include <sys/bus.h> 67#include <sys/bus.h>
68#include <sys/cpu.h> 68#include <sys/cpu.h>
69#include <sys/kcpuset.h> 69#include <sys/kcpuset.h>
70#include <sys/sysctl.h> 70#include <sys/sysctl.h>
71 71
72#include <uvm/uvm_extern.h> 72#include <uvm/uvm_extern.h>
73#include <uvm/uvm_page.h> 73#include <uvm/uvm_page.h>
74 74
75#ifdef __i386__ 75#ifdef __i386__
@@ -239,27 +239,27 @@ acpi_md_sleep_enter(int state) @@ -239,27 +239,27 @@ acpi_md_sleep_enter(int state)
239} 239}
240 240
241#ifdef MULTIPROCESSOR 241#ifdef MULTIPROCESSOR
242void 242void
243acpi_cpu_sleep(struct cpu_info *ci) 243acpi_cpu_sleep(struct cpu_info *ci)
244{ 244{
245 uint64_t xcr0 = 0; 245 uint64_t xcr0 = 0;
246 int s; 246 int s;
247 247
248 KASSERT(!CPU_IS_PRIMARY(ci)); 248 KASSERT(!CPU_IS_PRIMARY(ci));
249 KASSERT(ci == curcpu()); 249 KASSERT(ci == curcpu());
250 250
251 s = splhigh(); 251 s = splhigh();
252 fpusave_cpu(true); 252 fpu_save();
253 x86_disable_intr(); 253 x86_disable_intr();
254 254
255 /* 255 /*
256 * XXX also need to save the PMCs, the dbregs, and probably a few 256 * XXX also need to save the PMCs, the dbregs, and probably a few
257 * MSRs too. 257 * MSRs too.
258 */ 258 */
259 if (rcr4() & CR4_OSXSAVE) 259 if (rcr4() & CR4_OSXSAVE)
260 xcr0 = rdxcr(0); 260 xcr0 = rdxcr(0);
261 261
262 /* Go get some sleep */ 262 /* Go get some sleep */
263 if (acpi_md_sleep_prepare(-1)) 263 if (acpi_md_sleep_prepare(-1))
264 goto out; 264 goto out;
265 265
@@ -303,27 +303,27 @@ acpi_md_sleep(int state) @@ -303,27 +303,27 @@ acpi_md_sleep(int state)
303#endif 303#endif
304 304
305 KASSERT(acpi_wakeup_paddr != 0); 305 KASSERT(acpi_wakeup_paddr != 0);
306 KASSERT(sizeof(wakecode) <= PAGE_SIZE); 306 KASSERT(sizeof(wakecode) <= PAGE_SIZE);
307 307
308 if (!CPU_IS_PRIMARY(curcpu())) { 308 if (!CPU_IS_PRIMARY(curcpu())) {
309 printf("acpi0: WARNING: ignoring sleep from secondary CPU\n"); 309 printf("acpi0: WARNING: ignoring sleep from secondary CPU\n");
310 return -1; 310 return -1;
311 } 311 }
312 312
313 AcpiSetFirmwareWakingVector(acpi_wakeup_paddr, 0); 313 AcpiSetFirmwareWakingVector(acpi_wakeup_paddr, 0);
314 314
315 s = splhigh(); 315 s = splhigh();
316 fpusave_cpu(true); 316 fpu_save();
317 x86_disable_intr(); 317 x86_disable_intr();
318 318
319#ifdef MULTIPROCESSOR 319#ifdef MULTIPROCESSOR
320 /* Save and suspend Application Processors. */ 320 /* Save and suspend Application Processors. */
321 x86_broadcast_ipi(X86_IPI_ACPI_CPU_SLEEP); 321 x86_broadcast_ipi(X86_IPI_ACPI_CPU_SLEEP);
322 cid = cpu_index(curcpu()); 322 cid = cpu_index(curcpu());
323 while (kcpuset_isotherset(kcpuset_running, cid)) { 323 while (kcpuset_isotherset(kcpuset_running, cid)) {
324 delay(1); 324 delay(1);
325 } 325 }
326#endif 326#endif
327 327
328 /* 328 /*
329 * XXX also need to save the PMCs, the dbregs, and probably a few 329 * XXX also need to save the PMCs, the dbregs, and probably a few

cvs diff -r1.109 -r1.110 src/sys/arch/x86/include/cpu.h (expand / switch to unified diff)

--- src/sys/arch/x86/include/cpu.h 2019/10/03 05:06:29 1.109
+++ src/sys/arch/x86/include/cpu.h 2019/10/12 06:31:03 1.110
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpu.h,v 1.109 2019/10/03 05:06:29 maxv Exp $ */ 1/* $NetBSD: cpu.h,v 1.110 2019/10/12 06:31:03 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1990 The Regents of the University of California. 4 * Copyright (c) 1990 The Regents of the University of California.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to Berkeley by 7 * This code is derived from software contributed to Berkeley by
8 * William Jolitz. 8 * William Jolitz.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -109,27 +109,26 @@ struct clockframe { @@ -109,27 +109,26 @@ struct clockframe {
109struct cpu_info { 109struct cpu_info {
110 struct cpu_data ci_data; /* MI per-cpu data */ 110 struct cpu_data ci_data; /* MI per-cpu data */
111 device_t ci_dev; /* pointer to our device */ 111 device_t ci_dev; /* pointer to our device */
112 struct cpu_info *ci_self; /* self-pointer */ 112 struct cpu_info *ci_self; /* self-pointer */
113#ifdef XEN 113#ifdef XEN
114 volatile struct vcpu_info *ci_vcpu; /* for XEN */ 114 volatile struct vcpu_info *ci_vcpu; /* for XEN */
115#endif 115#endif
116 116
117 /* 117 /*
118 * Will be accessed by other CPUs. 118 * Will be accessed by other CPUs.
119 */ 119 */
120 struct cpu_info *ci_next; /* next cpu */ 120 struct cpu_info *ci_next; /* next cpu */
121 struct lwp *ci_curlwp; /* current owner of the processor */ 121 struct lwp *ci_curlwp; /* current owner of the processor */
122 struct lwp *ci_fpcurlwp; /* current owner of the FPU */ 
123 cpuid_t ci_cpuid; /* our CPU ID */ 122 cpuid_t ci_cpuid; /* our CPU ID */
124 uint32_t ci_acpiid; /* our ACPI/MADT ID */ 123 uint32_t ci_acpiid; /* our ACPI/MADT ID */
125 uint32_t ci_initapicid; /* our initial APIC ID */ 124 uint32_t ci_initapicid; /* our initial APIC ID */
126 125
127 /* 126 /*
128 * Private members. 127 * Private members.
129 */ 128 */
130 struct pmap *ci_pmap; /* current pmap */ 129 struct pmap *ci_pmap; /* current pmap */
131 int ci_want_pmapload; /* pmap_load() is needed */ 130 int ci_want_pmapload; /* pmap_load() is needed */
132 volatile int ci_tlbstate; /* one of TLBSTATE_ states. see below */ 131 volatile int ci_tlbstate; /* one of TLBSTATE_ states. see below */
133#define TLBSTATE_VALID 0 /* all user tlbs are valid */ 132#define TLBSTATE_VALID 0 /* all user tlbs are valid */
134#define TLBSTATE_LAZY 1 /* tlbs are valid but won't be kept uptodate */ 133#define TLBSTATE_LAZY 1 /* tlbs are valid but won't be kept uptodate */
135#define TLBSTATE_STALE 2 /* we might have stale user tlbs */ 134#define TLBSTATE_STALE 2 /* we might have stale user tlbs */

cvs diff -r1.18 -r1.19 src/sys/arch/x86/include/fpu.h (expand / switch to unified diff)

--- src/sys/arch/x86/include/fpu.h 2019/10/04 11:47:08 1.18
+++ src/sys/arch/x86/include/fpu.h 2019/10/12 06:31:03 1.19
@@ -1,44 +1,44 @@ @@ -1,44 +1,44 @@
1/* $NetBSD: fpu.h,v 1.18 2019/10/04 11:47:08 maxv Exp $ */ 1/* $NetBSD: fpu.h,v 1.19 2019/10/12 06:31:03 maxv Exp $ */
2 2
3#ifndef _X86_FPU_H_ 3#ifndef _X86_FPU_H_
4#define _X86_FPU_H_ 4#define _X86_FPU_H_
5 5
6#include <x86/cpu_extended_state.h> 6#include <x86/cpu_extended_state.h>
7 7
8#ifdef _KERNEL 8#ifdef _KERNEL
9 9
10struct cpu_info; 10struct cpu_info;
11struct lwp; 11struct lwp;
12struct trapframe; 12struct trapframe;
13 13
14void fpuinit(struct cpu_info *); 14void fpuinit(struct cpu_info *);
15void fpuinit_mxcsr_mask(void); 15void fpuinit_mxcsr_mask(void);
16 16
17void fpu_area_save(void *, uint64_t); 17void fpu_area_save(void *, uint64_t);
18void fpu_area_restore(void *, uint64_t); 18void fpu_area_restore(void *, uint64_t);
19 19
20void fpusave_lwp(struct lwp *, bool); 20void fpu_save(void);
21void fpusave_cpu(bool); 
22 21
23void fpu_set_default_cw(struct lwp *, unsigned int); 22void fpu_set_default_cw(struct lwp *, unsigned int);
24 23
25void fputrap(struct trapframe *); 24void fputrap(struct trapframe *);
26void fpudna(struct trapframe *); 25void fpudna(struct trapframe *);
27 26
28void fpu_clear(struct lwp *, unsigned int); 27void fpu_clear(struct lwp *, unsigned int);
29void fpu_sigreset(struct lwp *); 28void fpu_sigreset(struct lwp *);
30 29
31void fpu_save_area_fork(struct pcb *, const struct pcb *); 30void fpu_lwp_fork(struct lwp *, struct lwp *);
 31void fpu_lwp_abandon(struct lwp *l);
32 32
33void process_write_fpregs_xmm(struct lwp *, const struct fxsave *); 33void process_write_fpregs_xmm(struct lwp *, const struct fxsave *);
34void process_write_fpregs_s87(struct lwp *, const struct save87 *); 34void process_write_fpregs_s87(struct lwp *, const struct save87 *);
35 35
36void process_read_fpregs_xmm(struct lwp *, struct fxsave *); 36void process_read_fpregs_xmm(struct lwp *, struct fxsave *);
37void process_read_fpregs_s87(struct lwp *, struct save87 *); 37void process_read_fpregs_s87(struct lwp *, struct save87 *);
38 38
39int process_read_xstate(struct lwp *, struct xstate *); 39int process_read_xstate(struct lwp *, struct xstate *);
40int process_verify_xstate(const struct xstate *); 40int process_verify_xstate(const struct xstate *);
41int process_write_xstate(struct lwp *, const struct xstate *); 41int process_write_xstate(struct lwp *, const struct xstate *);
42 42
43#endif 43#endif
44 44

cvs diff -r1.172 -r1.173 src/sys/arch/x86/x86/cpu.c (expand / switch to unified diff)

--- src/sys/arch/x86/x86/cpu.c 2019/08/30 07:53:47 1.172
+++ src/sys/arch/x86/x86/cpu.c 2019/10/12 06:31:04 1.173
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpu.c,v 1.172 2019/08/30 07:53:47 mrg Exp $ */ 1/* $NetBSD: cpu.c,v 1.173 2019/10/12 06:31:04 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2000-2012 NetBSD Foundation, Inc. 4 * Copyright (c) 2000-2012 NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Bill Sommerfeld of RedBack Networks Inc, and by Andrew Doran. 8 * by Bill Sommerfeld of RedBack Networks Inc, and by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -52,27 +52,27 @@ @@ -52,27 +52,27 @@
52 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE 54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE. 61 * SUCH DAMAGE.
62 */ 62 */
63 63
64#include <sys/cdefs.h> 64#include <sys/cdefs.h>
65__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.172 2019/08/30 07:53:47 mrg Exp $"); 65__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.173 2019/10/12 06:31:04 maxv Exp $");
66 66
67#include "opt_ddb.h" 67#include "opt_ddb.h"
68#include "opt_mpbios.h" /* for MPDEBUG */ 68#include "opt_mpbios.h" /* for MPDEBUG */
69#include "opt_mtrr.h" 69#include "opt_mtrr.h"
70#include "opt_multiprocessor.h" 70#include "opt_multiprocessor.h"
71#include "opt_svs.h" 71#include "opt_svs.h"
72 72
73#include "lapic.h" 73#include "lapic.h"
74#include "ioapic.h" 74#include "ioapic.h"
75 75
76#include <sys/param.h> 76#include <sys/param.h>
77#include <sys/proc.h> 77#include <sys/proc.h>
78#include <sys/systm.h> 78#include <sys/systm.h>
@@ -976,35 +976,34 @@ cpu_hatch(void *v) @@ -976,35 +976,34 @@ cpu_hatch(void *v)
976 */ 976 */
977void 977void
978cpu_debug_dump(void) 978cpu_debug_dump(void)
979{ 979{
980 struct cpu_info *ci; 980 struct cpu_info *ci;
981 CPU_INFO_ITERATOR cii; 981 CPU_INFO_ITERATOR cii;
982 const char sixtyfour64space[] =  982 const char sixtyfour64space[] =
983#ifdef _LP64 983#ifdef _LP64
984 " " 984 " "
985#endif 985#endif
986 ""; 986 "";
987 987
988 db_printf("addr %sdev id flags ipis curlwp " 988 db_printf("addr %sdev id flags ipis curlwp "
989 "fpcurlwp\n", sixtyfour64space); 989 "\n", sixtyfour64space);
990 for (CPU_INFO_FOREACH(cii, ci)) { 990 for (CPU_INFO_FOREACH(cii, ci)) {
991 db_printf("%p %s %ld %x %x %10p %10p\n", 991 db_printf("%p %s %ld %x %x %10p\n",
992 ci, 992 ci,
993 ci->ci_dev == NULL ? "BOOT" : device_xname(ci->ci_dev), 993 ci->ci_dev == NULL ? "BOOT" : device_xname(ci->ci_dev),
994 (long)ci->ci_cpuid, 994 (long)ci->ci_cpuid,
995 ci->ci_flags, ci->ci_ipis, 995 ci->ci_flags, ci->ci_ipis,
996 ci->ci_curlwp, 996 ci->ci_curlwp);
997 ci->ci_fpcurlwp); 
998 } 997 }
999} 998}
1000#endif 999#endif
1001 1000
1002#ifdef MULTIPROCESSOR 1001#ifdef MULTIPROCESSOR
1003#if NLAPIC > 0 1002#if NLAPIC > 0
1004static void 1003static void
1005cpu_copy_trampoline(paddr_t pdir_pa) 1004cpu_copy_trampoline(paddr_t pdir_pa)
1006{ 1005{
1007 extern uint32_t nox_flag; 1006 extern uint32_t nox_flag;
1008 extern u_char cpu_spinup_trampoline[]; 1007 extern u_char cpu_spinup_trampoline[];
1009 extern u_char cpu_spinup_trampoline_end[]; 1008 extern u_char cpu_spinup_trampoline_end[];
1010 vaddr_t mp_trampoline_vaddr; 1009 vaddr_t mp_trampoline_vaddr;
@@ -1149,31 +1148,27 @@ cpu_init_msrs(struct cpu_info *ci, bool  @@ -1149,31 +1148,27 @@ cpu_init_msrs(struct cpu_info *ci, bool
1149 wrmsr(MSR_FSBASE, 0); 1148 wrmsr(MSR_FSBASE, 0);
1150 wrmsr(MSR_GSBASE, (uint64_t)ci); 1149 wrmsr(MSR_GSBASE, (uint64_t)ci);
1151 wrmsr(MSR_KERNELGSBASE, 0); 1150 wrmsr(MSR_KERNELGSBASE, 0);
1152 } 1151 }
1153#endif /* __x86_64__ */ 1152#endif /* __x86_64__ */
1154 1153
1155 if (cpu_feature[2] & CPUID_NOX) 1154 if (cpu_feature[2] & CPUID_NOX)
1156 wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NXE); 1155 wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NXE);
1157} 1156}
1158 1157
1159void 1158void
1160cpu_offline_md(void) 1159cpu_offline_md(void)
1161{ 1160{
1162 int s; 1161 return;
1163 
1164 s = splhigh(); 
1165 fpusave_cpu(true); 
1166 splx(s); 
1167} 1162}
1168 1163
1169/* XXX joerg restructure and restart CPUs individually */ 1164/* XXX joerg restructure and restart CPUs individually */
1170static bool 1165static bool
1171cpu_stop(device_t dv) 1166cpu_stop(device_t dv)
1172{ 1167{
1173 struct cpu_softc *sc = device_private(dv); 1168 struct cpu_softc *sc = device_private(dv);
1174 struct cpu_info *ci = sc->sc_info; 1169 struct cpu_info *ci = sc->sc_info;
1175 int err; 1170 int err;
1176 1171
1177 KASSERT((ci->ci_flags & CPUF_PRESENT) != 0); 1172 KASSERT((ci->ci_flags & CPUF_PRESENT) != 0);
1178 1173
1179 if ((ci->ci_flags & CPUF_PRIMARY) != 0) 1174 if ((ci->ci_flags & CPUF_PRIMARY) != 0)

cvs diff -r1.57 -r1.58 src/sys/arch/x86/x86/fpu.c (expand / switch to unified diff)

--- src/sys/arch/x86/x86/fpu.c 2019/10/04 11:47:08 1.57
+++ src/sys/arch/x86/x86/fpu.c 2019/10/12 06:31:04 1.58
@@ -1,21 +1,21 @@ @@ -1,21 +1,21 @@
1/* $NetBSD: fpu.c,v 1.57 2019/10/04 11:47:08 maxv Exp $ */ 1/* $NetBSD: fpu.c,v 1.58 2019/10/12 06:31:04 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2008 The NetBSD Foundation, Inc. All 4 * Copyright (c) 2008, 2019 The NetBSD Foundation, Inc. All
5 * rights reserved. 5 * rights reserved.
6 * 6 *
7 * This code is derived from software developed for The NetBSD Foundation 7 * This code is derived from software developed for The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran and Maxime Villard.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
@@ -86,27 +86,27 @@ @@ -86,27 +86,27 @@
86 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 86 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
87 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 87 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
88 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 88 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
89 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 89 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
90 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 90 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
91 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 91 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
92 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 92 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
93 * SUCH DAMAGE. 93 * SUCH DAMAGE.
94 * 94 *
95 * @(#)npx.c 7.2 (Berkeley) 5/12/91 95 * @(#)npx.c 7.2 (Berkeley) 5/12/91
96 */ 96 */
97 97
98#include <sys/cdefs.h> 98#include <sys/cdefs.h>
99__KERNEL_RCSID(0, "$NetBSD: fpu.c,v 1.57 2019/10/04 11:47:08 maxv Exp $"); 99__KERNEL_RCSID(0, "$NetBSD: fpu.c,v 1.58 2019/10/12 06:31:04 maxv Exp $");
100 100
101#include "opt_multiprocessor.h" 101#include "opt_multiprocessor.h"
102 102
103#include <sys/param.h> 103#include <sys/param.h>
104#include <sys/systm.h> 104#include <sys/systm.h>
105#include <sys/conf.h> 105#include <sys/conf.h>
106#include <sys/cpu.h> 106#include <sys/cpu.h>
107#include <sys/file.h> 107#include <sys/file.h>
108#include <sys/proc.h> 108#include <sys/proc.h>
109#include <sys/kernel.h> 109#include <sys/kernel.h>
110#include <sys/sysctl.h> 110#include <sys/sysctl.h>
111#include <sys/xcall.h> 111#include <sys/xcall.h>
112 112
@@ -116,34 +116,64 @@ __KERNEL_RCSID(0, "$NetBSD: fpu.c,v 1.57 @@ -116,34 +116,64 @@ __KERNEL_RCSID(0, "$NetBSD: fpu.c,v 1.57
116#include <machine/intr.h> 116#include <machine/intr.h>
117#include <machine/cpufunc.h> 117#include <machine/cpufunc.h>
118#include <machine/pcb.h> 118#include <machine/pcb.h>
119#include <machine/trap.h> 119#include <machine/trap.h>
120#include <machine/specialreg.h> 120#include <machine/specialreg.h>
121#include <x86/cpu.h> 121#include <x86/cpu.h>
122#include <x86/fpu.h> 122#include <x86/fpu.h>
123 123
124#ifdef XENPV 124#ifdef XENPV
125#define clts() HYPERVISOR_fpu_taskswitch(0) 125#define clts() HYPERVISOR_fpu_taskswitch(0)
126#define stts() HYPERVISOR_fpu_taskswitch(1) 126#define stts() HYPERVISOR_fpu_taskswitch(1)
127#endif 127#endif
128 128
 129void fpu_handle_deferred(void);
 130void fpu_switch(struct lwp *, struct lwp *);
 131
129uint32_t x86_fpu_mxcsr_mask __read_mostly = 0; 132uint32_t x86_fpu_mxcsr_mask __read_mostly = 0;
130 133
131static inline union savefpu * 134static inline union savefpu *
132lwp_fpuarea(struct lwp *l) 135fpu_lwp_area(struct lwp *l)
 136{
 137 struct pcb *pcb = lwp_getpcb(l);
 138 union savefpu *area = &pcb->pcb_savefpu;
 139
 140 KASSERT((l->l_flag & LW_SYSTEM) == 0);
 141 if (l == curlwp) {
 142 fpu_save();
 143 }
 144 KASSERT(!(l->l_md.md_flags & MDL_FPU_IN_CPU));
 145
 146 return area;
 147}
 148
 149/*
 150 * Bring curlwp's FPU state in memory. It will get installed back in the CPU
 151 * when returning to userland.
 152 */
 153void
 154fpu_save(void)
133{ 155{
 156 struct lwp *l = curlwp;
134 struct pcb *pcb = lwp_getpcb(l); 157 struct pcb *pcb = lwp_getpcb(l);
 158 union savefpu *area = &pcb->pcb_savefpu;
135 159
136 return &pcb->pcb_savefpu; 160 kpreempt_disable();
 161 if (l->l_md.md_flags & MDL_FPU_IN_CPU) {
 162 KASSERT((l->l_flag & LW_SYSTEM) == 0);
 163 fpu_area_save(area, x86_xsave_features);
 164 l->l_md.md_flags &= ~MDL_FPU_IN_CPU;
 165 }
 166 kpreempt_enable();
137} 167}
138 168
139void 169void
140fpuinit(struct cpu_info *ci) 170fpuinit(struct cpu_info *ci)
141{ 171{
142 /* 172 /*
143 * This might not be strictly necessary since it will be initialized 173 * This might not be strictly necessary since it will be initialized
144 * for each process. However it does no harm. 174 * for each process. However it does no harm.
145 */ 175 */
146 clts(); 176 clts();
147 fninit(); 177 fninit();
148 stts(); 178 stts();
149} 179}
@@ -203,106 +233,117 @@ fpu_errata_amd(void) @@ -203,106 +233,117 @@ fpu_errata_amd(void)
203 * Newer generations of AMD CPUs have CPUID_Fn80000008_EBX[2], 233 * Newer generations of AMD CPUs have CPUID_Fn80000008_EBX[2],
204 * which indicates that FIP/FDP/FOP are restored (same behavior 234 * which indicates that FIP/FDP/FOP are restored (same behavior
205 * as Intel). We're not using it though. 235 * as Intel). We're not using it though.
206 */ 236 */
207 fnstsw(&sw); 237 fnstsw(&sw);
208 if (sw & 0x80) 238 if (sw & 0x80)
209 fnclex(); 239 fnclex();
210 fldummy(); 240 fldummy();
211} 241}
212 242
213void 243void
214fpu_area_save(void *area, uint64_t xsave_features) 244fpu_area_save(void *area, uint64_t xsave_features)
215{ 245{
216 clts(); 
217 
218 switch (x86_fpu_save) { 246 switch (x86_fpu_save) {
219 case FPU_SAVE_FSAVE: 247 case FPU_SAVE_FSAVE:
220 fnsave(area); 248 fnsave(area);
221 break; 249 break;
222 case FPU_SAVE_FXSAVE: 250 case FPU_SAVE_FXSAVE:
223 fxsave(area); 251 fxsave(area);
224 break; 252 break;
225 case FPU_SAVE_XSAVE: 253 case FPU_SAVE_XSAVE:
226 xsave(area, xsave_features); 254 xsave(area, xsave_features);
227 break; 255 break;
228 case FPU_SAVE_XSAVEOPT: 256 case FPU_SAVE_XSAVEOPT:
229 xsaveopt(area, xsave_features); 257 xsaveopt(area, xsave_features);
230 break; 258 break;
231 } 259 }
 260
 261 stts();
232} 262}
233 263
234void 264void
235fpu_area_restore(void *area, uint64_t xsave_features) 265fpu_area_restore(void *area, uint64_t xsave_features)
236{ 266{
237 clts(); 267 clts();
238 268
239 switch (x86_fpu_save) { 269 switch (x86_fpu_save) {
240 case FPU_SAVE_FSAVE: 270 case FPU_SAVE_FSAVE:
241 frstor(area); 271 frstor(area);
242 break; 272 break;
243 case FPU_SAVE_FXSAVE: 273 case FPU_SAVE_FXSAVE:
244 if (cpu_vendor == CPUVENDOR_AMD) 274 if (cpu_vendor == CPUVENDOR_AMD)
245 fpu_errata_amd(); 275 fpu_errata_amd();
246 fxrstor(area); 276 fxrstor(area);
247 break; 277 break;
248 case FPU_SAVE_XSAVE: 278 case FPU_SAVE_XSAVE:
249 case FPU_SAVE_XSAVEOPT: 279 case FPU_SAVE_XSAVEOPT:
250 if (cpu_vendor == CPUVENDOR_AMD) 280 if (cpu_vendor == CPUVENDOR_AMD)
251 fpu_errata_amd(); 281 fpu_errata_amd();
252 xrstor(area, xsave_features); 282 xrstor(area, xsave_features);
253 break; 283 break;
254 } 284 }
255} 285}
256 286
257static void 287void
258fpu_lwp_install(struct lwp *l) 288fpu_handle_deferred(void)
259{ 289{
260 struct pcb *pcb = lwp_getpcb(l); 290 struct pcb *pcb = lwp_getpcb(curlwp);
261 struct cpu_info *ci = curcpu(); 
262 
263 KASSERT(ci->ci_fpcurlwp == NULL); 
264 KASSERT(pcb->pcb_fpcpu == NULL); 
265 ci->ci_fpcurlwp = l; 
266 pcb->pcb_fpcpu = ci; 
267 fpu_area_restore(&pcb->pcb_savefpu, x86_xsave_features); 291 fpu_area_restore(&pcb->pcb_savefpu, x86_xsave_features);
268} 292}
269 293
270void fpu_switch(struct lwp *, struct lwp *); 
271 
272void 294void
273fpu_switch(struct lwp *oldlwp, struct lwp *newlwp) 295fpu_switch(struct lwp *oldlwp, struct lwp *newlwp)
274{ 296{
275 int s; 297 struct pcb *pcb;
276 298
277 s = splhigh(); 299 if ((oldlwp != NULL) && (oldlwp->l_md.md_flags & MDL_FPU_IN_CPU)) {
278#ifdef DIAGNOSTIC 300 KASSERT(!(oldlwp->l_flag & LW_SYSTEM));
279 if (oldlwp != NULL) { 301 pcb = lwp_getpcb(oldlwp);
280 struct pcb *pcb = lwp_getpcb(oldlwp); 302 fpu_area_save(&pcb->pcb_savefpu, x86_xsave_features);
281 struct cpu_info *ci = curcpu(); 303 oldlwp->l_md.md_flags &= ~MDL_FPU_IN_CPU;
282 if (pcb->pcb_fpcpu == NULL) { 
283 KASSERT(ci->ci_fpcurlwp != oldlwp); 
284 } else if (pcb->pcb_fpcpu == ci) { 
285 KASSERT(ci->ci_fpcurlwp == oldlwp); 
286 } else { 
287 panic("%s: oldlwp's state installed elsewhere", 
288 __func__); 
289 } 
290 } 304 }
291#endif 305 KASSERT(!(newlwp->l_md.md_flags & MDL_FPU_IN_CPU));
292 fpusave_cpu(true); 306}
293 if (!(newlwp->l_flag & LW_SYSTEM)) 307
294 fpu_lwp_install(newlwp); 308void
295 splx(s); 309fpu_lwp_fork(struct lwp *l1, struct lwp *l2)
 310{
 311 struct pcb *pcb2 = lwp_getpcb(l2);
 312 union savefpu *fpu_save;
 313
 314 /* Kernel threads have no FPU. */
 315 if (__predict_false(l2->l_flag & LW_SYSTEM)) {
 316 return;
 317 }
 318 /* For init(8). */
 319 if (__predict_false(l1->l_flag & LW_SYSTEM)) {
 320 memset(&pcb2->pcb_savefpu, 0, x86_fpu_save_size);
 321 return;
 322 }
 323
 324 fpu_save = fpu_lwp_area(l1);
 325 memcpy(&pcb2->pcb_savefpu, fpu_save, x86_fpu_save_size);
 326 l2->l_md.md_flags &= ~MDL_FPU_IN_CPU;
 327}
 328
 329void
 330fpu_lwp_abandon(struct lwp *l)
 331{
 332 KASSERT(l == curlwp);
 333 kpreempt_disable();
 334 l->l_md.md_flags &= ~MDL_FPU_IN_CPU;
 335 stts();
 336 kpreempt_enable();
296} 337}
297 338
298/* -------------------------------------------------------------------------- */ 339/* -------------------------------------------------------------------------- */
299 340
300/* 341/*
301 * The following table is used to ensure that the FPE_... value 342 * The following table is used to ensure that the FPE_... value
302 * that is passed as a trapcode to the signal handler of the user 343 * that is passed as a trapcode to the signal handler of the user
303 * process does not have more than one bit set. 344 * process does not have more than one bit set.
304 * 345 *
305 * Multiple bits may be set if SSE simd instructions generate errors 346 * Multiple bits may be set if SSE simd instructions generate errors
306 * on more than one value or if the user process modifies the control 347 * on more than one value or if the user process modifies the control
307 * word while a status word bit is already set (which this is a sign 348 * word while a status word bit is already set (which this is a sign
308 * of bad coding). 349 * of bad coding).
@@ -389,31 +430,27 @@ static const uint8_t fpetable[128] = { @@ -389,31 +430,27 @@ static const uint8_t fpetable[128] = {
389 * 430 *
390 * We come here with interrupts disabled. 431 * We come here with interrupts disabled.
391 */ 432 */
392void 433void
393fputrap(struct trapframe *frame) 434fputrap(struct trapframe *frame)
394{ 435{
395 uint32_t statbits; 436 uint32_t statbits;
396 ksiginfo_t ksi; 437 ksiginfo_t ksi;
397 438
398 if (__predict_false(!USERMODE(frame->tf_cs))) { 439 if (__predict_false(!USERMODE(frame->tf_cs))) {
399 panic("fpu trap from kernel, trapframe %p\n", frame); 440 panic("fpu trap from kernel, trapframe %p\n", frame);
400 } 441 }
401 442
402 /* 443 KASSERT(curlwp->l_md.md_flags & MDL_FPU_IN_CPU);
403 * At this point, fpcurlwp should be curlwp. If it wasn't, the TS bit 
404 * should be set, and we should have gotten a DNA exception. 
405 */ 
406 KASSERT(curcpu()->ci_fpcurlwp == curlwp); 
407 444
408 if (frame->tf_trapno == T_XMM) { 445 if (frame->tf_trapno == T_XMM) {
409 uint32_t mxcsr; 446 uint32_t mxcsr;
410 x86_stmxcsr(&mxcsr); 447 x86_stmxcsr(&mxcsr);
411 statbits = mxcsr; 448 statbits = mxcsr;
412 /* Clear the sticky status bits */ 449 /* Clear the sticky status bits */
413 mxcsr &= ~0x3f; 450 mxcsr &= ~0x3f;
414 x86_ldmxcsr(&mxcsr); 451 x86_ldmxcsr(&mxcsr);
415 452
416 /* Remove masked interrupts and non-status bits */ 453 /* Remove masked interrupts and non-status bits */
417 statbits &= ~(statbits >> 7) & 0x3f; 454 statbits &= ~(statbits >> 7) & 0x3f;
418 /* Mark this is an XMM status */ 455 /* Mark this is an XMM status */
419 statbits |= 0x10000; 456 statbits |= 0x10000;
@@ -430,232 +467,122 @@ fputrap(struct trapframe *frame) @@ -430,232 +467,122 @@ fputrap(struct trapframe *frame)
430 } 467 }
431 468
432 /* Doesn't matter now if we get pre-empted */ 469 /* Doesn't matter now if we get pre-empted */
433 x86_enable_intr(); 470 x86_enable_intr();
434 471
435 KSI_INIT_TRAP(&ksi); 472 KSI_INIT_TRAP(&ksi);
436 ksi.ksi_signo = SIGFPE; 473 ksi.ksi_signo = SIGFPE;
437 ksi.ksi_addr = (void *)X86_TF_RIP(frame); 474 ksi.ksi_addr = (void *)X86_TF_RIP(frame);
438 ksi.ksi_code = fpetable[statbits & 0x7f]; 475 ksi.ksi_code = fpetable[statbits & 0x7f];
439 ksi.ksi_trap = statbits; 476 ksi.ksi_trap = statbits;
440 (*curlwp->l_proc->p_emul->e_trapsignal)(curlwp, &ksi); 477 (*curlwp->l_proc->p_emul->e_trapsignal)(curlwp, &ksi);
441} 478}
442 479
443/* 
444 * Implement device not available (DNA) exception. Called with interrupts still 
445 * disabled. 
446 */ 
447void 480void
448fpudna(struct trapframe *frame) 481fpudna(struct trapframe *frame)
449{ 482{
450 struct cpu_info *ci = curcpu(); 483 panic("fpudna from %s, ip %p, trapframe %p",
451 int s; 484 USERMODE(frame->tf_cs) ? "userland" : "kernel",
452 485 (void *)X86_TF_RIP(frame), frame);
453 if (!USERMODE(frame->tf_cs)) { 
454 panic("fpudna from kernel, ip %p, trapframe %p\n", 
455 (void *)X86_TF_RIP(frame), frame); 
456 } 
457 
458 /* Install the LWP's FPU state. */ 
459 s = splhigh(); 
460 fpu_lwp_install(ci->ci_curlwp); 
461 splx(s); 
462} 486}
463 487
464/* -------------------------------------------------------------------------- */ 488/* -------------------------------------------------------------------------- */
465 489
466/* 
467 * Save current CPU's FPU state. Must be called at IPL_HIGH. 
468 */ 
469void 
470fpusave_cpu(bool save) 
471{ 
472 struct cpu_info *ci; 
473 struct pcb *pcb; 
474 struct lwp *l; 
475 
476 KASSERT(curcpu()->ci_ilevel == IPL_HIGH); 
477 
478 ci = curcpu(); 
479 l = ci->ci_fpcurlwp; 
480 if (l == NULL) { 
481 return; 
482 } 
483 pcb = lwp_getpcb(l); 
484 
485 if (save) { 
486 fpu_area_save(&pcb->pcb_savefpu, x86_xsave_features); 
487 } 
488 
489 stts(); 
490 pcb->pcb_fpcpu = NULL; 
491 ci->ci_fpcurlwp = NULL; 
492} 
493 
494/* 
495 * Save l's FPU state, which may be on this processor or another processor. 
496 * It may take some time, so we avoid disabling preemption where possible. 
497 * Caller must know that the target LWP is stopped, otherwise this routine 
498 * may race against it. 
499 */ 
500void 
501fpusave_lwp(struct lwp *l, bool save) 
502{ 
503 struct pcb *pcb = lwp_getpcb(l); 
504 struct cpu_info *oci; 
505 int s, spins, ticks; 
506 
507 spins = 0; 
508 ticks = hardclock_ticks; 
509 for (;;) { 
510 s = splhigh(); 
511 oci = pcb->pcb_fpcpu; 
512 if (oci == NULL) { 
513 splx(s); 
514 break; 
515 } 
516 if (oci == curcpu()) { 
517 KASSERT(oci->ci_fpcurlwp == l); 
518 fpusave_cpu(save); 
519 splx(s); 
520 break; 
521 } 
522 splx(s); 
523#ifdef XENPV 
524 if (xen_send_ipi(oci, XEN_IPI_SYNCH_FPU) != 0) { 
525 panic("xen_send_ipi(%s, XEN_IPI_SYNCH_FPU) failed.", 
526 cpu_name(oci)); 
527 } 
528#else 
529 x86_send_ipi(oci, X86_IPI_SYNCH_FPU); 
530#endif 
531 while (pcb->pcb_fpcpu == oci && ticks == hardclock_ticks) { 
532 x86_pause(); 
533 spins++; 
534 } 
535 if (spins > 100000000) { 
536 panic("fpusave_lwp: did not"); 
537 } 
538 } 
539} 
540 
541static inline void 490static inline void
542fpu_xstate_reload(union savefpu *fpu_save, uint64_t xstate) 491fpu_xstate_reload(union savefpu *fpu_save, uint64_t xstate)
543{ 492{
544 /* 493 /*
545 * Force a reload of the given xstate during the next XRSTOR. 494 * Force a reload of the given xstate during the next XRSTOR.
546 */ 495 */
547 if (x86_fpu_save >= FPU_SAVE_XSAVE) { 496 if (x86_fpu_save >= FPU_SAVE_XSAVE) {
548 fpu_save->sv_xsave_hdr.xsh_xstate_bv |= xstate; 497 fpu_save->sv_xsave_hdr.xsh_xstate_bv |= xstate;
549 } 498 }
550} 499}
551 500
552void 501void
553fpu_set_default_cw(struct lwp *l, unsigned int x87_cw) 502fpu_set_default_cw(struct lwp *l, unsigned int x87_cw)
554{ 503{
555 union savefpu *fpu_save = lwp_fpuarea(l); 504 union savefpu *fpu_save = fpu_lwp_area(l);
556 struct pcb *pcb = lwp_getpcb(l); 505 struct pcb *pcb = lwp_getpcb(l);
557 506
558 if (i386_use_fxsave) { 507 if (i386_use_fxsave) {
559 fpu_save->sv_xmm.fx_cw = x87_cw; 508 fpu_save->sv_xmm.fx_cw = x87_cw;
560 if (x87_cw != __INITIAL_NPXCW__) { 509 if (x87_cw != __INITIAL_NPXCW__) {
561 fpu_xstate_reload(fpu_save, XCR0_X87); 510 fpu_xstate_reload(fpu_save, XCR0_X87);
562 } 511 }
563 } else { 512 } else {
564 fpu_save->sv_87.s87_cw = x87_cw; 513 fpu_save->sv_87.s87_cw = x87_cw;
565 } 514 }
566 pcb->pcb_fpu_dflt_cw = x87_cw; 515 pcb->pcb_fpu_dflt_cw = x87_cw;
567} 516}
568 517
569void 518void
570fpu_clear(struct lwp *l, unsigned int x87_cw) 519fpu_clear(struct lwp *l, unsigned int x87_cw)
571{ 520{
572 union savefpu *fpu_save; 521 union savefpu *fpu_save;
573 struct pcb *pcb; 522 struct pcb *pcb;
574 int s; 
575 523
576 KASSERT(l == curlwp); 524 KASSERT(l == curlwp);
577 KASSERT((l->l_flag & LW_SYSTEM) == 0); 525 fpu_save = fpu_lwp_area(l);
578 fpu_save = lwp_fpuarea(l); 
579 pcb = lwp_getpcb(l); 
580 
581 s = splhigh(); 
582 
583 KASSERT(pcb->pcb_fpcpu == NULL || pcb->pcb_fpcpu == curcpu()); 
584 fpusave_cpu(false); 
585 KASSERT(pcb->pcb_fpcpu == NULL); 
586 526
587 switch (x86_fpu_save) { 527 switch (x86_fpu_save) {
588 case FPU_SAVE_FSAVE: 528 case FPU_SAVE_FSAVE:
589 memset(&fpu_save->sv_87, 0, x86_fpu_save_size); 529 memset(&fpu_save->sv_87, 0, x86_fpu_save_size);
590 fpu_save->sv_87.s87_tw = 0xffff; 530 fpu_save->sv_87.s87_tw = 0xffff;
591 fpu_save->sv_87.s87_cw = x87_cw; 531 fpu_save->sv_87.s87_cw = x87_cw;
592 break; 532 break;
593 case FPU_SAVE_FXSAVE: 533 case FPU_SAVE_FXSAVE:
594 memset(&fpu_save->sv_xmm, 0, x86_fpu_save_size); 534 memset(&fpu_save->sv_xmm, 0, x86_fpu_save_size);
595 fpu_save->sv_xmm.fx_mxcsr = __INITIAL_MXCSR__; 535 fpu_save->sv_xmm.fx_mxcsr = __INITIAL_MXCSR__;
596 fpu_save->sv_xmm.fx_mxcsr_mask = x86_fpu_mxcsr_mask; 536 fpu_save->sv_xmm.fx_mxcsr_mask = x86_fpu_mxcsr_mask;
597 fpu_save->sv_xmm.fx_cw = x87_cw; 537 fpu_save->sv_xmm.fx_cw = x87_cw;
598 break; 538 break;
599 case FPU_SAVE_XSAVE: 539 case FPU_SAVE_XSAVE:
600 case FPU_SAVE_XSAVEOPT: 540 case FPU_SAVE_XSAVEOPT:
601 memset(&fpu_save->sv_xmm, 0, x86_fpu_save_size); 541 memset(&fpu_save->sv_xmm, 0, x86_fpu_save_size);
602 fpu_save->sv_xmm.fx_mxcsr = __INITIAL_MXCSR__; 542 fpu_save->sv_xmm.fx_mxcsr = __INITIAL_MXCSR__;
603 fpu_save->sv_xmm.fx_mxcsr_mask = x86_fpu_mxcsr_mask; 543 fpu_save->sv_xmm.fx_mxcsr_mask = x86_fpu_mxcsr_mask;
604 fpu_save->sv_xmm.fx_cw = x87_cw; 544 fpu_save->sv_xmm.fx_cw = x87_cw;
605 if (__predict_false(x87_cw != __INITIAL_NPXCW__)) { 545 if (__predict_false(x87_cw != __INITIAL_NPXCW__)) {
606 fpu_xstate_reload(fpu_save, XCR0_X87); 546 fpu_xstate_reload(fpu_save, XCR0_X87);
607 } 547 }
608 break; 548 break;
609 } 549 }
610 550
 551 pcb = lwp_getpcb(l);
611 pcb->pcb_fpu_dflt_cw = x87_cw; 552 pcb->pcb_fpu_dflt_cw = x87_cw;
612 
613 fpu_lwp_install(l); 
614 splx(s); 
615} 553}
616 554
617void 555void
618fpu_sigreset(struct lwp *l) 556fpu_sigreset(struct lwp *l)
619{ 557{
620 union savefpu *fpu_save = lwp_fpuarea(l); 558 union savefpu *fpu_save = fpu_lwp_area(l);
621 struct pcb *pcb = lwp_getpcb(l); 559 struct pcb *pcb = lwp_getpcb(l);
622 560
623 /* 561 /*
624 * For signal handlers the register values don't matter. Just reset 562 * For signal handlers the register values don't matter. Just reset
625 * a few fields. 563 * a few fields.
626 */ 564 */
627 if (i386_use_fxsave) { 565 if (i386_use_fxsave) {
628 fpu_save->sv_xmm.fx_mxcsr = __INITIAL_MXCSR__; 566 fpu_save->sv_xmm.fx_mxcsr = __INITIAL_MXCSR__;
629 fpu_save->sv_xmm.fx_mxcsr_mask = x86_fpu_mxcsr_mask; 567 fpu_save->sv_xmm.fx_mxcsr_mask = x86_fpu_mxcsr_mask;
630 fpu_save->sv_xmm.fx_tw = 0; 568 fpu_save->sv_xmm.fx_tw = 0;
631 fpu_save->sv_xmm.fx_cw = pcb->pcb_fpu_dflt_cw; 569 fpu_save->sv_xmm.fx_cw = pcb->pcb_fpu_dflt_cw;
632 } else { 570 } else {
633 fpu_save->sv_87.s87_tw = 0xffff; 571 fpu_save->sv_87.s87_tw = 0xffff;
634 fpu_save->sv_87.s87_cw = pcb->pcb_fpu_dflt_cw; 572 fpu_save->sv_87.s87_cw = pcb->pcb_fpu_dflt_cw;
635 } 573 }
636} 574}
637 575
638void 
639fpu_save_area_fork(struct pcb *pcb2, const struct pcb *pcb1) 
640{ 
641 const uint8_t *src = (const uint8_t *)&pcb1->pcb_savefpu; 
642 uint8_t *dst = (uint8_t *)&pcb2->pcb_savefpu; 
643 
644 memcpy(dst, src, x86_fpu_save_size); 
645 
646 KASSERT(pcb2->pcb_fpcpu == NULL); 
647} 
648 
649/* -------------------------------------------------------------------------- */ 576/* -------------------------------------------------------------------------- */
650 577
651static void 578static void
652process_xmm_to_s87(const struct fxsave *sxmm, struct save87 *s87) 579process_xmm_to_s87(const struct fxsave *sxmm, struct save87 *s87)
653{ 580{
654 unsigned int tag, ab_tag; 581 unsigned int tag, ab_tag;
655 const struct fpaccfx *fx_reg; 582 const struct fpaccfx *fx_reg;
656 struct fpacc87 *s87_reg; 583 struct fpacc87 *s87_reg;
657 int i; 584 int i;
658 585
659 /* 586 /*
660 * For historic reasons core dumps and ptrace all use the old save87 587 * For historic reasons core dumps and ptrace all use the old save87
661 * layout. Convert the important parts. 588 * layout. Convert the important parts.
@@ -759,103 +686,86 @@ process_s87_to_xmm(const struct save87 * @@ -759,103 +686,86 @@ process_s87_to_xmm(const struct save87 *
759 while ((tag >>= 1) >= (i <<= 1)); 686 while ((tag >>= 1) >= (i <<= 1));
760 sxmm->fx_tw = ab_tag; 687 sxmm->fx_tw = ab_tag;
761 688
762 /* FP registers (in stack order) */ 689 /* FP registers (in stack order) */
763 fx_reg = sxmm->fx_87_ac; 690 fx_reg = sxmm->fx_87_ac;
764 s87_reg = s87->s87_ac; 691 s87_reg = s87->s87_ac;
765 for (i = 0; i < 8; fx_reg++, s87_reg++, i++) 692 for (i = 0; i < 8; fx_reg++, s87_reg++, i++)
766 fx_reg->r = *s87_reg; 693 fx_reg->r = *s87_reg;
767} 694}
768 695
769void 696void
770process_write_fpregs_xmm(struct lwp *l, const struct fxsave *fpregs) 697process_write_fpregs_xmm(struct lwp *l, const struct fxsave *fpregs)
771{ 698{
772 union savefpu *fpu_save; 699 union savefpu *fpu_save = fpu_lwp_area(l);
773 
774 fpusave_lwp(l, true); 
775 fpu_save = lwp_fpuarea(l); 
776 700
777 if (i386_use_fxsave) { 701 if (i386_use_fxsave) {
778 memcpy(&fpu_save->sv_xmm, fpregs, sizeof(fpu_save->sv_xmm)); 702 memcpy(&fpu_save->sv_xmm, fpregs, sizeof(fpu_save->sv_xmm));
779 703
780 /* 704 /*
781 * Invalid bits in mxcsr or mxcsr_mask will cause faults. 705 * Invalid bits in mxcsr or mxcsr_mask will cause faults.
782 */ 706 */
783 fpu_save->sv_xmm.fx_mxcsr_mask &= x86_fpu_mxcsr_mask; 707 fpu_save->sv_xmm.fx_mxcsr_mask &= x86_fpu_mxcsr_mask;
784 fpu_save->sv_xmm.fx_mxcsr &= fpu_save->sv_xmm.fx_mxcsr_mask; 708 fpu_save->sv_xmm.fx_mxcsr &= fpu_save->sv_xmm.fx_mxcsr_mask;
785 709
786 fpu_xstate_reload(fpu_save, XCR0_X87 | XCR0_SSE); 710 fpu_xstate_reload(fpu_save, XCR0_X87 | XCR0_SSE);
787 } else { 711 } else {
788 process_xmm_to_s87(fpregs, &fpu_save->sv_87); 712 process_xmm_to_s87(fpregs, &fpu_save->sv_87);
789 } 713 }
790} 714}
791 715
792void 716void
793process_write_fpregs_s87(struct lwp *l, const struct save87 *fpregs) 717process_write_fpregs_s87(struct lwp *l, const struct save87 *fpregs)
794{ 718{
795 union savefpu *fpu_save; 719 union savefpu *fpu_save = fpu_lwp_area(l);
796 720
797 if (i386_use_fxsave) { 721 if (i386_use_fxsave) {
798 /* Save so we don't lose the xmm registers */ 
799 fpusave_lwp(l, true); 
800 fpu_save = lwp_fpuarea(l); 
801 process_s87_to_xmm(fpregs, &fpu_save->sv_xmm); 722 process_s87_to_xmm(fpregs, &fpu_save->sv_xmm);
802 fpu_xstate_reload(fpu_save, XCR0_X87 | XCR0_SSE); 723 fpu_xstate_reload(fpu_save, XCR0_X87 | XCR0_SSE);
803 } else { 724 } else {
804 fpusave_lwp(l, false); 
805 fpu_save = lwp_fpuarea(l); 
806 memcpy(&fpu_save->sv_87, fpregs, sizeof(fpu_save->sv_87)); 725 memcpy(&fpu_save->sv_87, fpregs, sizeof(fpu_save->sv_87));
807 } 726 }
808} 727}
809 728
810void 729void
811process_read_fpregs_xmm(struct lwp *l, struct fxsave *fpregs) 730process_read_fpregs_xmm(struct lwp *l, struct fxsave *fpregs)
812{ 731{
813 union savefpu *fpu_save; 732 union savefpu *fpu_save = fpu_lwp_area(l);
814 
815 fpusave_lwp(l, true); 
816 fpu_save = lwp_fpuarea(l); 
817 733
818 if (i386_use_fxsave) { 734 if (i386_use_fxsave) {
819 memcpy(fpregs, &fpu_save->sv_xmm, sizeof(fpu_save->sv_xmm)); 735 memcpy(fpregs, &fpu_save->sv_xmm, sizeof(fpu_save->sv_xmm));
820 } else { 736 } else {
821 memset(fpregs, 0, sizeof(*fpregs)); 737 memset(fpregs, 0, sizeof(*fpregs));
822 process_s87_to_xmm(&fpu_save->sv_87, fpregs); 738 process_s87_to_xmm(&fpu_save->sv_87, fpregs);
823 } 739 }
824} 740}
825 741
826void 742void
827process_read_fpregs_s87(struct lwp *l, struct save87 *fpregs) 743process_read_fpregs_s87(struct lwp *l, struct save87 *fpregs)
828{ 744{
829 union savefpu *fpu_save; 745 union savefpu *fpu_save = fpu_lwp_area(l);
830 
831 fpusave_lwp(l, true); 
832 fpu_save = lwp_fpuarea(l); 
833 746
834 if (i386_use_fxsave) { 747 if (i386_use_fxsave) {
835 memset(fpregs, 0, sizeof(*fpregs)); 748 memset(fpregs, 0, sizeof(*fpregs));
836 process_xmm_to_s87(&fpu_save->sv_xmm, fpregs); 749 process_xmm_to_s87(&fpu_save->sv_xmm, fpregs);
837 } else { 750 } else {
838 memcpy(fpregs, &fpu_save->sv_87, sizeof(fpu_save->sv_87)); 751 memcpy(fpregs, &fpu_save->sv_87, sizeof(fpu_save->sv_87));
839 } 752 }
840} 753}
841 754
842int 755int
843process_read_xstate(struct lwp *l, struct xstate *xstate) 756process_read_xstate(struct lwp *l, struct xstate *xstate)
844{ 757{
845 union savefpu *fpu_save; 758 union savefpu *fpu_save = fpu_lwp_area(l);
846 
847 fpusave_lwp(l, true); 
848 fpu_save = lwp_fpuarea(l); 
849 759
850 if (x86_fpu_save == FPU_SAVE_FSAVE) { 760 if (x86_fpu_save == FPU_SAVE_FSAVE) {
851 /* Convert from legacy FSAVE format. */ 761 /* Convert from legacy FSAVE format. */
852 memset(&xstate->xs_fxsave, 0, sizeof(xstate->xs_fxsave)); 762 memset(&xstate->xs_fxsave, 0, sizeof(xstate->xs_fxsave));
853 process_s87_to_xmm(&fpu_save->sv_87, &xstate->xs_fxsave); 763 process_s87_to_xmm(&fpu_save->sv_87, &xstate->xs_fxsave);
854 764
855 /* We only got x87 data. */ 765 /* We only got x87 data. */
856 xstate->xs_rfbm = XCR0_X87; 766 xstate->xs_rfbm = XCR0_X87;
857 xstate->xs_xstate_bv = XCR0_X87; 767 xstate->xs_xstate_bv = XCR0_X87;
858 return 0; 768 return 0;
859 } 769 }
860 770
861 /* Copy the legacy area. */ 771 /* Copy the legacy area. */
@@ -914,30 +824,27 @@ process_verify_xstate(const struct xstat @@ -914,30 +824,27 @@ process_verify_xstate(const struct xstat
914 break; 824 break;
915 default: 825 default:
916 /* Verify whether no unsupported features are enabled */ 826 /* Verify whether no unsupported features are enabled */
917 if ((xstate->xs_rfbm & ~(x86_xsave_features & XCR0_FPU)) != 0) 827 if ((xstate->xs_rfbm & ~(x86_xsave_features & XCR0_FPU)) != 0)
918 return EINVAL; 828 return EINVAL;
919 } 829 }
920 830
921 return 0; 831 return 0;
922} 832}
923 833
924int 834int
925process_write_xstate(struct lwp *l, const struct xstate *xstate) 835process_write_xstate(struct lwp *l, const struct xstate *xstate)
926{ 836{
927 union savefpu *fpu_save; 837 union savefpu *fpu_save = fpu_lwp_area(l);
928 
929 fpusave_lwp(l, true); 
930 fpu_save = lwp_fpuarea(l); 
931 838
932 /* Convert data into legacy FSAVE format. */ 839 /* Convert data into legacy FSAVE format. */
933 if (x86_fpu_save == FPU_SAVE_FSAVE) { 840 if (x86_fpu_save == FPU_SAVE_FSAVE) {
934 if (xstate->xs_xstate_bv & XCR0_X87) 841 if (xstate->xs_xstate_bv & XCR0_X87)
935 process_xmm_to_s87(&xstate->xs_fxsave, &fpu_save->sv_87); 842 process_xmm_to_s87(&xstate->xs_fxsave, &fpu_save->sv_87);
936 return 0; 843 return 0;
937 } 844 }
938 845
939 /* If XSAVE is supported, make sure that xstate_bv is set correctly. */ 846 /* If XSAVE is supported, make sure that xstate_bv is set correctly. */
940 if (x86_fpu_save >= FPU_SAVE_XSAVE) { 847 if (x86_fpu_save >= FPU_SAVE_XSAVE) {
941 /* 848 /*
942 * Bit-wise xstate->xs_rfbm ? xstate->xs_xstate_bv 849 * Bit-wise xstate->xs_rfbm ? xstate->xs_xstate_bv
943 * : fpu_save->sv_xsave_hdr.xsh_xstate_bv 850 * : fpu_save->sv_xsave_hdr.xsh_xstate_bv

cvs diff -r1.27 -r1.28 src/sys/arch/x86/x86/ipi.c (expand / switch to unified diff)

--- src/sys/arch/x86/x86/ipi.c 2017/02/08 10:08:26 1.27
+++ src/sys/arch/x86/x86/ipi.c 2019/10/12 06:31:04 1.28
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: ipi.c,v 1.27 2017/02/08 10:08:26 maxv Exp $ */ 1/* $NetBSD: ipi.c,v 1.28 2019/10/12 06:31:04 maxv Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2000, 2008, 2009 The NetBSD Foundation, Inc. 4 * Copyright (c) 2000, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by RedBack Networks Inc. 8 * by RedBack Networks Inc.
9 * 9 *
10 * Author: Bill Sommerfeld 10 * Author: Bill Sommerfeld
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
@@ -22,27 +22,27 @@ @@ -22,27 +22,27 @@
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE. 31 * POSSIBILITY OF SUCH DAMAGE.
32 */ 32 */
33 33
34#include <sys/cdefs.h> 34#include <sys/cdefs.h>
35__KERNEL_RCSID(0, "$NetBSD: ipi.c,v 1.27 2017/02/08 10:08:26 maxv Exp $"); 35__KERNEL_RCSID(0, "$NetBSD: ipi.c,v 1.28 2019/10/12 06:31:04 maxv Exp $");
36 36
37#include "opt_mtrr.h" 37#include "opt_mtrr.h"
38 38
39#include <sys/param.h> 39#include <sys/param.h>
40#include <sys/device.h> 40#include <sys/device.h>
41#include <sys/systm.h> 41#include <sys/systm.h>
42#include <sys/atomic.h> 42#include <sys/atomic.h>
43#include <sys/intr.h> 43#include <sys/intr.h>
44#include <sys/ipi.h> 44#include <sys/ipi.h>
45#include <sys/cpu.h> 45#include <sys/cpu.h>
46#include <sys/xcall.h> 46#include <sys/xcall.h>
47 47
48#ifdef MULTIPROCESSOR 48#ifdef MULTIPROCESSOR
@@ -166,27 +166,27 @@ x86_ipi_halt(struct cpu_info *ci) @@ -166,27 +166,27 @@ x86_ipi_halt(struct cpu_info *ci)
166 166
167 x86_disable_intr(); 167 x86_disable_intr();
168 atomic_and_32(&ci->ci_flags, ~CPUF_RUNNING); 168 atomic_and_32(&ci->ci_flags, ~CPUF_RUNNING);
169 169
170 for (;;) { 170 for (;;) {
171 x86_hlt(); 171 x86_hlt();
172 } 172 }
173} 173}
174 174
175static void 175static void
176x86_ipi_synch_fpu(struct cpu_info *ci) 176x86_ipi_synch_fpu(struct cpu_info *ci)
177{ 177{
178 178
179 fpusave_cpu(true); 179 panic("%s: impossible", __func__);
180} 180}
181 181
182#ifdef MTRR 182#ifdef MTRR
183static void 183static void
184x86_ipi_reload_mtrr(struct cpu_info *ci) 184x86_ipi_reload_mtrr(struct cpu_info *ci)
185{ 185{
186 186
187 if (mtrr_funcs != NULL) { 187 if (mtrr_funcs != NULL) {
188 /* 188 /*
189 * mtrr_reload_cpu() is a macro in mtrr.h which picks 189 * mtrr_reload_cpu() is a macro in mtrr.h which picks
190 * the appropriate function to use. 190 * the appropriate function to use.
191 */ 191 */
192 mtrr_reload_cpu(ci); 192 mtrr_reload_cpu(ci);

cvs diff -r1.37 -r1.38 src/sys/arch/x86/x86/vm_machdep.c (expand / switch to unified diff)

--- src/sys/arch/x86/x86/vm_machdep.c 2019/02/11 14:59:33 1.37
+++ src/sys/arch/x86/x86/vm_machdep.c 2019/10/12 06:31:04 1.38
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: vm_machdep.c,v 1.37 2019/02/11 14:59:33 cherry Exp $ */ 1/* $NetBSD: vm_machdep.c,v 1.38 2019/10/12 06:31:04 maxv Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1982, 1986 The Regents of the University of California. 4 * Copyright (c) 1982, 1986 The Regents of the University of California.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to Berkeley by 7 * This code is derived from software contributed to Berkeley by
8 * the Systems Programming Group of the University of Utah Computer 8 * the Systems Programming Group of the University of Utah Computer
9 * Science Department, and William Jolitz. 9 * Science Department, and William Jolitz.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -70,27 +70,27 @@ @@ -70,27 +70,27 @@
70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 70 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 71 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 72 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73 * SUCH DAMAGE. 73 * SUCH DAMAGE.
74 * 74 *
75 * @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 75 * @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
76 */ 76 */
77 77
78/* 78/*
79 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ 79 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
80 */ 80 */
81 81
82#include <sys/cdefs.h> 82#include <sys/cdefs.h>
83__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.37 2019/02/11 14:59:33 cherry Exp $"); 83__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.38 2019/10/12 06:31:04 maxv Exp $");
84 84
85#include "opt_mtrr.h" 85#include "opt_mtrr.h"
86 86
87#include <sys/param.h> 87#include <sys/param.h>
88#include <sys/systm.h> 88#include <sys/systm.h>
89#include <sys/proc.h> 89#include <sys/proc.h>
90#include <sys/vnode.h> 90#include <sys/vnode.h>
91#include <sys/buf.h> 91#include <sys/buf.h>
92#include <sys/core.h> 92#include <sys/core.h>
93#include <sys/exec.h> 93#include <sys/exec.h>
94#include <sys/ptrace.h> 94#include <sys/ptrace.h>
95 95
96#include <uvm/uvm.h> 96#include <uvm/uvm.h>
@@ -130,49 +130,40 @@ cpu_proc_fork(struct proc *p1, struct pr @@ -130,49 +130,40 @@ cpu_proc_fork(struct proc *p1, struct pr
130void 130void
131cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize, 131cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
132 void (*func)(void *), void *arg) 132 void (*func)(void *), void *arg)
133{ 133{
134 struct pcb *pcb1, *pcb2; 134 struct pcb *pcb1, *pcb2;
135 struct trapframe *tf; 135 struct trapframe *tf;
136 struct switchframe *sf; 136 struct switchframe *sf;
137 vaddr_t uv; 137 vaddr_t uv;
138 138
139 pcb1 = lwp_getpcb(l1); 139 pcb1 = lwp_getpcb(l1);
140 pcb2 = lwp_getpcb(l2); 140 pcb2 = lwp_getpcb(l2);
141 141
142 /* 142 /*
143 * If parent LWP was using FPU, then we have to save the FPU h/w 
144 * state to PCB so that we can copy it. 
145 */ 
146 fpusave_lwp(l1, true); 
147 
148 /* 
149 * Sync the PCB before we copy it. 143 * Sync the PCB before we copy it.
150 */ 144 */
151 if (l1 == curlwp) { 145 if (l1 == curlwp) {
152 KASSERT(pcb1 == curpcb); 146 KASSERT(pcb1 == curpcb);
153 savectx(pcb1); 147 savectx(pcb1);
154 } else { 148 } else {
155 KASSERT(l1 == &lwp0); 149 KASSERT(l1 == &lwp0);
156 } 150 }
157 151
158 /* Copy the PCB from parent, except the FPU state. */ 152 /* Copy the PCB from parent, except the FPU state. */
159 memcpy(pcb2, pcb1, offsetof(struct pcb, pcb_savefpu)); 153 memcpy(pcb2, pcb1, offsetof(struct pcb, pcb_savefpu));
160 154
161 /* FPU state not installed. */ 155 /* Fork the FPU state. */
162 pcb2->pcb_fpcpu = NULL; 156 fpu_lwp_fork(l1, l2);
163 
164 /* Copy FPU state. */ 
165 fpu_save_area_fork(pcb2, pcb1); 
166 157
167 /* Never inherit CPU Debug Registers */ 158 /* Never inherit CPU Debug Registers */
168 pcb2->pcb_dbregs = NULL; 159 pcb2->pcb_dbregs = NULL;
169 pcb2->pcb_flags &= ~PCB_DBREGS; 160 pcb2->pcb_flags &= ~PCB_DBREGS;
170 161
171#if defined(XENPV) 162#if defined(XENPV)
172 pcb2->pcb_iopl = IOPL_KPL; 163 pcb2->pcb_iopl = IOPL_KPL;
173#endif 164#endif
174 165
175 /* 166 /*
176 * Set the kernel stack address (from the address to uarea) and 167 * Set the kernel stack address (from the address to uarea) and
177 * trapframe address for child. 168 * trapframe address for child.
178 * 169 *
@@ -250,28 +241,28 @@ cpu_lwp_fork(struct lwp *l1, struct lwp  @@ -250,28 +241,28 @@ cpu_lwp_fork(struct lwp *l1, struct lwp
250 pcb2->pcb_ebp = (int)l2; 241 pcb2->pcb_ebp = (int)l2;
251#endif 242#endif
252} 243}
253 244
254/* 245/*
255 * cpu_lwp_free is called from exit() to let machine-dependent 246 * cpu_lwp_free is called from exit() to let machine-dependent
256 * code free machine-dependent resources. Note that this routine 247 * code free machine-dependent resources. Note that this routine
257 * must not block. 248 * must not block.
258 */ 249 */
259void 250void
260cpu_lwp_free(struct lwp *l, int proc) 251cpu_lwp_free(struct lwp *l, int proc)
261{ 252{
262 253
263 /* If we were using the FPU, forget about it. */ 254 /* Abandon the FPU state. */
264 fpusave_lwp(l, false); 255 fpu_lwp_abandon(l);
265 256
266 /* Abandon the dbregs state. */ 257 /* Abandon the dbregs state. */
267 x86_dbregs_abandon(l); 258 x86_dbregs_abandon(l);
268 259
269#ifdef MTRR 260#ifdef MTRR
270 if (proc && l->l_proc->p_md.md_flags & MDP_USEDMTRR) 261 if (proc && l->l_proc->p_md.md_flags & MDP_USEDMTRR)
271 mtrr_clean(l->l_proc); 262 mtrr_clean(l->l_proc);
272#endif 263#endif
273 /* 264 /*
274 * Free deferred mappings if any. 265 * Free deferred mappings if any.
275 */ 266 */
276 struct vm_page *empty_ptps = l->l_md.md_gc_ptp; 267 struct vm_page *empty_ptps = l->l_md.md_gc_ptp;
277 l->l_md.md_gc_ptp = NULL; 268 l->l_md.md_gc_ptp = NULL;

cvs diff -r1.129 -r1.130 src/sys/arch/xen/x86/cpu.c (expand / switch to unified diff)

--- src/sys/arch/xen/x86/cpu.c 2019/03/09 08:42:25 1.129
+++ src/sys/arch/xen/x86/cpu.c 2019/10/12 06:31:04 1.130
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpu.c,v 1.129 2019/03/09 08:42:25 maxv Exp $ */ 1/* $NetBSD: cpu.c,v 1.130 2019/10/12 06:31:04 maxv Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2000 The NetBSD Foundation, Inc. 4 * Copyright (c) 2000 The NetBSD Foundation, Inc.
5 * Copyright (c) 2002, 2006, 2007 YAMAMOTO Takashi, 5 * Copyright (c) 2002, 2006, 2007 YAMAMOTO Takashi,
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by RedBack Networks Inc. 9 * by RedBack Networks Inc.
10 * 10 *
11 * Author: Bill Sommerfeld 11 * Author: Bill Sommerfeld
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions 14 * modification, are permitted provided that the following conditions
@@ -55,27 +55,27 @@ @@ -55,27 +55,27 @@
55 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 55 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE. 64 * SUCH DAMAGE.
65 */ 65 */
66 66
67#include <sys/cdefs.h> 67#include <sys/cdefs.h>
68__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.129 2019/03/09 08:42:25 maxv Exp $"); 68__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.130 2019/10/12 06:31:04 maxv Exp $");
69 69
70#include "opt_ddb.h" 70#include "opt_ddb.h"
71#include "opt_multiprocessor.h" 71#include "opt_multiprocessor.h"
72#include "opt_mpbios.h" /* for MPDEBUG */ 72#include "opt_mpbios.h" /* for MPDEBUG */
73#include "opt_mtrr.h" 73#include "opt_mtrr.h"
74#include "opt_xen.h" 74#include "opt_xen.h"
75 75
76#include "lapic.h" 76#include "lapic.h"
77#include "ioapic.h" 77#include "ioapic.h"
78 78
79#include <sys/param.h> 79#include <sys/param.h>
80#include <sys/proc.h> 80#include <sys/proc.h>
81#include <sys/systm.h> 81#include <sys/systm.h>
@@ -749,35 +749,34 @@ cpu_hatch(void *v) @@ -749,35 +749,34 @@ cpu_hatch(void *v)
749 749
750#include <ddb/db_output.h> 750#include <ddb/db_output.h>
751#include <machine/db_machdep.h> 751#include <machine/db_machdep.h>
752 752
753/* 753/*
754 * Dump CPU information from ddb. 754 * Dump CPU information from ddb.
755 */ 755 */
756void 756void
757cpu_debug_dump(void) 757cpu_debug_dump(void)
758{ 758{
759 struct cpu_info *ci; 759 struct cpu_info *ci;
760 CPU_INFO_ITERATOR cii; 760 CPU_INFO_ITERATOR cii;
761 761
762 db_printf("addr dev id flags ipis curlwp fpcurlwp\n"); 762 db_printf("addr dev id flags ipis curlwp\n");
763 for (CPU_INFO_FOREACH(cii, ci)) { 763 for (CPU_INFO_FOREACH(cii, ci)) {
764 db_printf("%p %s %ld %x %x %10p %10p\n", 764 db_printf("%p %s %ld %x %x %10p\n",
765 ci, 765 ci,
766 ci->ci_dev == NULL ? "BOOT" : device_xname(ci->ci_dev), 766 ci->ci_dev == NULL ? "BOOT" : device_xname(ci->ci_dev),
767 (long)ci->ci_cpuid, 767 (long)ci->ci_cpuid,
768 ci->ci_flags, ci->ci_ipis, 768 ci->ci_flags, ci->ci_ipis,
769 ci->ci_curlwp, 769 ci->ci_curlwp);
770 ci->ci_fpcurlwp); 
771 } 770 }
772} 771}
773#endif /* DDB */ 772#endif /* DDB */
774 773
775#endif /* MULTIPROCESSOR */ 774#endif /* MULTIPROCESSOR */
776 775
777extern void hypervisor_callback(void); 776extern void hypervisor_callback(void);
778extern void failsafe_callback(void); 777extern void failsafe_callback(void);
779#ifdef __x86_64__ 778#ifdef __x86_64__
780typedef void (vector)(void); 779typedef void (vector)(void);
781extern vector Xsyscall, Xsyscall32; 780extern vector Xsyscall, Xsyscall32;
782#endif 781#endif
783 782
@@ -1056,31 +1055,27 @@ cpu_init_msrs(struct cpu_info *ci, bool  @@ -1056,31 +1055,27 @@ cpu_init_msrs(struct cpu_info *ci, bool
1056 HYPERVISOR_set_segment_base(SEGBASE_FS, 0); 1055 HYPERVISOR_set_segment_base(SEGBASE_FS, 0);
1057 HYPERVISOR_set_segment_base(SEGBASE_GS_KERNEL, (uint64_t)ci); 1056 HYPERVISOR_set_segment_base(SEGBASE_GS_KERNEL, (uint64_t)ci);
1058 HYPERVISOR_set_segment_base(SEGBASE_GS_USER, 0); 1057 HYPERVISOR_set_segment_base(SEGBASE_GS_USER, 0);
1059 } 1058 }
1060#endif 1059#endif
1061 1060
1062 if (cpu_feature[2] & CPUID_NOX) 1061 if (cpu_feature[2] & CPUID_NOX)
1063 wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NXE); 1062 wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NXE);
1064} 1063}
1065 1064
1066void 1065void
1067cpu_offline_md(void) 1066cpu_offline_md(void)
1068{ 1067{
1069 int s; 1068 return;
1070 
1071 s = splhigh(); 
1072 fpusave_cpu(true); 
1073 splx(s); 
1074} 1069}
1075 1070
1076void 1071void
1077cpu_get_tsc_freq(struct cpu_info *ci) 1072cpu_get_tsc_freq(struct cpu_info *ci)
1078{ 1073{
1079 uint32_t vcpu_tversion; 1074 uint32_t vcpu_tversion;
1080 const volatile vcpu_time_info_t *tinfo = &ci->ci_vcpu->time; 1075 const volatile vcpu_time_info_t *tinfo = &ci->ci_vcpu->time;
1081 1076
1082 vcpu_tversion = tinfo->version; 1077 vcpu_tversion = tinfo->version;
1083 while (tinfo->version == vcpu_tversion); /* Wait for a time update. XXX: timeout ? */ 1078 while (tinfo->version == vcpu_tversion); /* Wait for a time update. XXX: timeout ? */
1084 1079
1085 uint64_t freq = 1000000000ULL << 32; 1080 uint64_t freq = 1000000000ULL << 32;
1086 freq = freq / (uint64_t)tinfo->tsc_to_system_mul; 1081 freq = freq / (uint64_t)tinfo->tsc_to_system_mul;

cvs diff -r1.32 -r1.33 src/sys/arch/xen/x86/xen_ipi.c (expand / switch to unified diff)

--- src/sys/arch/xen/x86/xen_ipi.c 2019/02/02 12:32:55 1.32
+++ src/sys/arch/xen/x86/xen_ipi.c 2019/10/12 06:31:04 1.33
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: xen_ipi.c,v 1.32 2019/02/02 12:32:55 cherry Exp $ */ 1/* $NetBSD: xen_ipi.c,v 1.33 2019/10/12 06:31:04 maxv Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2011 The NetBSD Foundation, Inc. 4 * Copyright (c) 2011 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Cherry G. Mathew <cherry@zyx.in> 8 * by Cherry G. Mathew <cherry@zyx.in>
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -23,30 +23,30 @@ @@ -23,30 +23,30 @@
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> /* RCS ID macro */ 32#include <sys/cdefs.h> /* RCS ID macro */
33 33
34/*  34/*
35 * Based on: x86/ipi.c 35 * Based on: x86/ipi.c
36 * __KERNEL_RCSID(0, "$NetBSD: xen_ipi.c,v 1.32 2019/02/02 12:32:55 cherry Exp $"); 36 * __KERNEL_RCSID(0, "$NetBSD: xen_ipi.c,v 1.33 2019/10/12 06:31:04 maxv Exp $");
37 */ 37 */
38 38
39__KERNEL_RCSID(0, "$NetBSD: xen_ipi.c,v 1.32 2019/02/02 12:32:55 cherry Exp $"); 39__KERNEL_RCSID(0, "$NetBSD: xen_ipi.c,v 1.33 2019/10/12 06:31:04 maxv Exp $");
40 40
41#include "opt_ddb.h" 41#include "opt_ddb.h"
42 42
43#include <sys/types.h> 43#include <sys/types.h>
44 44
45#include <sys/atomic.h> 45#include <sys/atomic.h>
46#include <sys/cpu.h> 46#include <sys/cpu.h>
47#include <sys/mutex.h> 47#include <sys/mutex.h>
48#include <sys/device.h> 48#include <sys/device.h>
49#include <sys/xcall.h> 49#include <sys/xcall.h>
50#include <sys/ipi.h> 50#include <sys/ipi.h>
51#include <sys/errno.h> 51#include <sys/errno.h>
52#include <sys/systm.h> 52#include <sys/systm.h>
@@ -227,27 +227,27 @@ xen_ipi_halt(struct cpu_info *ci, struct @@ -227,27 +227,27 @@ xen_ipi_halt(struct cpu_info *ci, struct
227 KASSERT(ci != NULL); 227 KASSERT(ci != NULL);
228 if (HYPERVISOR_vcpu_op(VCPUOP_down, ci->ci_cpuid, NULL)) { 228 if (HYPERVISOR_vcpu_op(VCPUOP_down, ci->ci_cpuid, NULL)) {
229 panic("%s shutdown failed.\n", device_xname(ci->ci_dev)); 229 panic("%s shutdown failed.\n", device_xname(ci->ci_dev));
230 } 230 }
231 231
232} 232}
233 233
234static void 234static void
235xen_ipi_synch_fpu(struct cpu_info *ci, struct intrframe *intrf) 235xen_ipi_synch_fpu(struct cpu_info *ci, struct intrframe *intrf)
236{ 236{
237 KASSERT(ci != NULL); 237 KASSERT(ci != NULL);
238 KASSERT(intrf != NULL); 238 KASSERT(intrf != NULL);
239 239
240 fpusave_cpu(true); 240 panic("%s: impossible", __func__);
241} 241}
242 242
243#ifdef DDB 243#ifdef DDB
244static void 244static void
245xen_ipi_ddb(struct cpu_info *ci, struct intrframe *intrf) 245xen_ipi_ddb(struct cpu_info *ci, struct intrframe *intrf)
246{ 246{
247 KASSERT(ci != NULL); 247 KASSERT(ci != NULL);
248 KASSERT(intrf != NULL); 248 KASSERT(intrf != NULL);
249 249
250#ifdef __x86_64__ 250#ifdef __x86_64__
251 ddb_ipi(intrf->if_tf); 251 ddb_ipi(intrf->if_tf);
252#else 252#else
253 struct trapframe tf; 253 struct trapframe tf;

cvs diff -r1.49 -r1.50 src/sys/dev/nvmm/x86/nvmm_x86_svm.c (expand / switch to unified diff)

--- src/sys/dev/nvmm/x86/nvmm_x86_svm.c 2019/10/04 12:17:05 1.49
+++ src/sys/dev/nvmm/x86/nvmm_x86_svm.c 2019/10/12 06:31:04 1.50
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: nvmm_x86_svm.c,v 1.49 2019/10/04 12:17:05 maxv Exp $ */ 1/* $NetBSD: nvmm_x86_svm.c,v 1.50 2019/10/12 06:31:04 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2018 The NetBSD Foundation, Inc. 4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard. 8 * by Maxime Villard.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -20,27 +20,27 @@ @@ -20,27 +20,27 @@
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_svm.c,v 1.49 2019/10/04 12:17:05 maxv Exp $"); 33__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_svm.c,v 1.50 2019/10/12 06:31:04 maxv Exp $");
34 34
35#include <sys/param.h> 35#include <sys/param.h>
36#include <sys/systm.h> 36#include <sys/systm.h>
37#include <sys/kernel.h> 37#include <sys/kernel.h>
38#include <sys/kmem.h> 38#include <sys/kmem.h>
39#include <sys/cpu.h> 39#include <sys/cpu.h>
40#include <sys/xcall.h> 40#include <sys/xcall.h>
41#include <sys/mman.h> 41#include <sys/mman.h>
42 42
43#include <uvm/uvm.h> 43#include <uvm/uvm.h>
44#include <uvm/uvm_page.h> 44#include <uvm/uvm_page.h>
45 45
46#include <x86/cputypes.h> 46#include <x86/cputypes.h>
@@ -531,28 +531,26 @@ struct svm_cpudata { @@ -531,28 +531,26 @@ struct svm_cpudata {
531 531
532 /* MSR bitmap */ 532 /* MSR bitmap */
533 uint8_t *msrbm; 533 uint8_t *msrbm;
534 paddr_t msrbm_pa; 534 paddr_t msrbm_pa;
535 535
536 /* Host state */ 536 /* Host state */
537 uint64_t hxcr0; 537 uint64_t hxcr0;
538 uint64_t star; 538 uint64_t star;
539 uint64_t lstar; 539 uint64_t lstar;
540 uint64_t cstar; 540 uint64_t cstar;
541 uint64_t sfmask; 541 uint64_t sfmask;
542 uint64_t fsbase; 542 uint64_t fsbase;
543 uint64_t kernelgsbase; 543 uint64_t kernelgsbase;
544 bool ts_set; 
545 struct xsave_header hfpu __aligned(64); 
546 544
547 /* Intr state */ 545 /* Intr state */
548 bool int_window_exit; 546 bool int_window_exit;
549 bool nmi_window_exit; 547 bool nmi_window_exit;
550 bool evt_pending; 548 bool evt_pending;
551 549
552 /* Guest state */ 550 /* Guest state */
553 uint64_t gxcr0; 551 uint64_t gxcr0;
554 uint64_t gprs[NVMM_X64_NGPR]; 552 uint64_t gprs[NVMM_X64_NGPR];
555 uint64_t drs[NVMM_X64_NDR]; 553 uint64_t drs[NVMM_X64_NDR];
556 uint64_t gtsc; 554 uint64_t gtsc;
557 struct xsave_header gfpu __aligned(64); 555 struct xsave_header gfpu __aligned(64);
558}; 556};
@@ -1127,75 +1125,71 @@ svm_exit_xsetbv(struct nvmm_machine *mac @@ -1127,75 +1125,71 @@ svm_exit_xsetbv(struct nvmm_machine *mac
1127 (vmcb->state.rax & 0xFFFFFFFF); 1125 (vmcb->state.rax & 0xFFFFFFFF);
1128 1126
1129 if (__predict_false(cpudata->gprs[NVMM_X64_GPR_RCX] != 0)) { 1127 if (__predict_false(cpudata->gprs[NVMM_X64_GPR_RCX] != 0)) {
1130 goto error; 1128 goto error;
1131 } else if (__predict_false(vmcb->state.cpl != 0)) { 1129 } else if (__predict_false(vmcb->state.cpl != 0)) {
1132 goto error; 1130 goto error;
1133 } else if (__predict_false((val & ~svm_xcr0_mask) != 0)) { 1131 } else if (__predict_false((val & ~svm_xcr0_mask) != 0)) {
1134 goto error; 1132 goto error;
1135 } else if (__predict_false((val & XCR0_X87) == 0)) { 1133 } else if (__predict_false((val & XCR0_X87) == 0)) {
1136 goto error; 1134 goto error;
1137 } 1135 }
1138 1136
1139 cpudata->gxcr0 = val; 1137 cpudata->gxcr0 = val;
 1138 if (svm_xcr0_mask != 0) {
 1139 wrxcr(0, cpudata->gxcr0);
 1140 }
1140 1141
1141 svm_inkernel_advance(cpudata->vmcb); 1142 svm_inkernel_advance(cpudata->vmcb);
1142 return; 1143 return;
1143 1144
1144error: 1145error:
1145 svm_inject_gp(vcpu); 1146 svm_inject_gp(vcpu);
1146} 1147}
1147 1148
1148static void 1149static void
1149svm_exit_invalid(struct nvmm_exit *exit, uint64_t code) 1150svm_exit_invalid(struct nvmm_exit *exit, uint64_t code)
1150{ 1151{
1151 exit->u.inv.hwcode = code; 1152 exit->u.inv.hwcode = code;
1152 exit->reason = NVMM_EXIT_INVALID; 1153 exit->reason = NVMM_EXIT_INVALID;
1153} 1154}
1154 1155
1155/* -------------------------------------------------------------------------- */ 1156/* -------------------------------------------------------------------------- */
1156 1157
1157static void 1158static void
1158svm_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu) 1159svm_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu)
1159{ 1160{
1160 struct svm_cpudata *cpudata = vcpu->cpudata; 1161 struct svm_cpudata *cpudata = vcpu->cpudata;
1161 1162
1162 cpudata->ts_set = (rcr0() & CR0_TS) != 0; 1163 fpu_save();
1163 
1164 fpu_area_save(&cpudata->hfpu, svm_xcr0_mask); 
1165 fpu_area_restore(&cpudata->gfpu, svm_xcr0_mask); 1164 fpu_area_restore(&cpudata->gfpu, svm_xcr0_mask);
1166 1165
1167 if (svm_xcr0_mask != 0) { 1166 if (svm_xcr0_mask != 0) {
1168 cpudata->hxcr0 = rdxcr(0); 1167 cpudata->hxcr0 = rdxcr(0);
1169 wrxcr(0, cpudata->gxcr0); 1168 wrxcr(0, cpudata->gxcr0);
1170 } 1169 }
1171} 1170}
1172 1171
1173static void 1172static void
1174svm_vcpu_guest_fpu_leave(struct nvmm_cpu *vcpu) 1173svm_vcpu_guest_fpu_leave(struct nvmm_cpu *vcpu)
1175{ 1174{
1176 struct svm_cpudata *cpudata = vcpu->cpudata; 1175 struct svm_cpudata *cpudata = vcpu->cpudata;
1177 1176
1178 if (svm_xcr0_mask != 0) { 1177 if (svm_xcr0_mask != 0) {
1179 cpudata->gxcr0 = rdxcr(0); 1178 cpudata->gxcr0 = rdxcr(0);
1180 wrxcr(0, cpudata->hxcr0); 1179 wrxcr(0, cpudata->hxcr0);
1181 } 1180 }
1182 1181
1183 fpu_area_save(&cpudata->gfpu, svm_xcr0_mask); 1182 fpu_area_save(&cpudata->gfpu, svm_xcr0_mask);
1184 fpu_area_restore(&cpudata->hfpu, svm_xcr0_mask); 
1185 
1186 if (cpudata->ts_set) { 
1187 stts(); 
1188 } 
1189} 1183}
1190 1184
1191static void 1185static void
1192svm_vcpu_guest_dbregs_enter(struct nvmm_cpu *vcpu) 1186svm_vcpu_guest_dbregs_enter(struct nvmm_cpu *vcpu)
1193{ 1187{
1194 struct svm_cpudata *cpudata = vcpu->cpudata; 1188 struct svm_cpudata *cpudata = vcpu->cpudata;
1195 1189
1196 x86_dbregs_save(curlwp); 1190 x86_dbregs_save(curlwp);
1197 1191
1198 ldr7(0); 1192 ldr7(0);
1199 1193
1200 ldr0(cpudata->drs[NVMM_X64_DR_DR0]); 1194 ldr0(cpudata->drs[NVMM_X64_DR_DR0]);
1201 ldr1(cpudata->drs[NVMM_X64_DR_DR1]); 1195 ldr1(cpudata->drs[NVMM_X64_DR_DR1]);
@@ -1317,44 +1311,43 @@ svm_vcpu_run(struct nvmm_machine *mach,  @@ -1317,44 +1311,43 @@ svm_vcpu_run(struct nvmm_machine *mach,
1317 kpreempt_disable(); 1311 kpreempt_disable();
1318 hcpu = cpu_number(); 1312 hcpu = cpu_number();
1319 1313
1320 svm_gtlb_catchup(vcpu, hcpu); 1314 svm_gtlb_catchup(vcpu, hcpu);
1321 svm_htlb_catchup(vcpu, hcpu); 1315 svm_htlb_catchup(vcpu, hcpu);
1322 1316
1323 if (vcpu->hcpu_last != hcpu) { 1317 if (vcpu->hcpu_last != hcpu) {
1324 svm_vmcb_cache_flush_all(vmcb); 1318 svm_vmcb_cache_flush_all(vmcb);
1325 cpudata->gtsc_want_update = true; 1319 cpudata->gtsc_want_update = true;
1326 } 1320 }
1327 1321
1328 svm_vcpu_guest_dbregs_enter(vcpu); 1322 svm_vcpu_guest_dbregs_enter(vcpu);
1329 svm_vcpu_guest_misc_enter(vcpu); 1323 svm_vcpu_guest_misc_enter(vcpu);
 1324 svm_vcpu_guest_fpu_enter(vcpu);
1330 1325
1331 while (1) { 1326 while (1) {
1332 if (cpudata->gtlb_want_flush) { 1327 if (cpudata->gtlb_want_flush) {
1333 vmcb->ctrl.tlb_ctrl = svm_ctrl_tlb_flush; 1328 vmcb->ctrl.tlb_ctrl = svm_ctrl_tlb_flush;
1334 } else { 1329 } else {
1335 vmcb->ctrl.tlb_ctrl = 0; 1330 vmcb->ctrl.tlb_ctrl = 0;
1336 } 1331 }
1337 1332
1338 if (__predict_false(cpudata->gtsc_want_update)) { 1333 if (__predict_false(cpudata->gtsc_want_update)) {
1339 vmcb->ctrl.tsc_offset = cpudata->gtsc - rdtsc(); 1334 vmcb->ctrl.tsc_offset = cpudata->gtsc - rdtsc();
1340 svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_I); 1335 svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_I);
1341 } 1336 }
1342 1337
1343 s = splhigh(); 1338 s = splhigh();
1344 machgen = svm_htlb_flush(machdata, cpudata); 1339 machgen = svm_htlb_flush(machdata, cpudata);
1345 svm_vcpu_guest_fpu_enter(vcpu); 
1346 svm_vmrun(cpudata->vmcb_pa, cpudata->gprs); 1340 svm_vmrun(cpudata->vmcb_pa, cpudata->gprs);
1347 svm_vcpu_guest_fpu_leave(vcpu); 
1348 svm_htlb_flush_ack(cpudata, machgen); 1341 svm_htlb_flush_ack(cpudata, machgen);
1349 splx(s); 1342 splx(s);
1350 1343
1351 svm_vmcb_cache_default(vmcb); 1344 svm_vmcb_cache_default(vmcb);
1352 1345
1353 if (vmcb->ctrl.exitcode != VMCB_EXITCODE_INVALID) { 1346 if (vmcb->ctrl.exitcode != VMCB_EXITCODE_INVALID) {
1354 cpudata->gtlb_want_flush = false; 1347 cpudata->gtlb_want_flush = false;
1355 cpudata->gtsc_want_update = false; 1348 cpudata->gtsc_want_update = false;
1356 vcpu->hcpu_last = hcpu; 1349 vcpu->hcpu_last = hcpu;
1357 } 1350 }
1358 svm_exit_evt(cpudata, vmcb); 1351 svm_exit_evt(cpudata, vmcb);
1359 1352
1360 switch (vmcb->ctrl.exitcode) { 1353 switch (vmcb->ctrl.exitcode) {
@@ -1427,26 +1420,27 @@ svm_vcpu_run(struct nvmm_machine *mach,  @@ -1427,26 +1420,27 @@ svm_vcpu_run(struct nvmm_machine *mach,
1427 if (curcpu()->ci_data.cpu_softints != 0) { 1420 if (curcpu()->ci_data.cpu_softints != 0) {
1428 break; 1421 break;
1429 } 1422 }
1430 if (curlwp->l_flag & LW_USERRET) { 1423 if (curlwp->l_flag & LW_USERRET) {
1431 break; 1424 break;
1432 } 1425 }
1433 if (exit->reason != NVMM_EXIT_NONE) { 1426 if (exit->reason != NVMM_EXIT_NONE) {
1434 break; 1427 break;
1435 } 1428 }
1436 } 1429 }
1437 1430
1438 cpudata->gtsc = rdtsc() + vmcb->ctrl.tsc_offset; 1431 cpudata->gtsc = rdtsc() + vmcb->ctrl.tsc_offset;
1439 1432
 1433 svm_vcpu_guest_fpu_leave(vcpu);
1440 svm_vcpu_guest_misc_leave(vcpu); 1434 svm_vcpu_guest_misc_leave(vcpu);
1441 svm_vcpu_guest_dbregs_leave(vcpu); 1435 svm_vcpu_guest_dbregs_leave(vcpu);
1442 1436
1443 kpreempt_enable(); 1437 kpreempt_enable();
1444 1438
1445 exit->exitstate[NVMM_X64_EXITSTATE_CR8] = __SHIFTOUT(vmcb->ctrl.v, 1439 exit->exitstate[NVMM_X64_EXITSTATE_CR8] = __SHIFTOUT(vmcb->ctrl.v,
1446 VMCB_CTRL_V_TPR); 1440 VMCB_CTRL_V_TPR);
1447 exit->exitstate[NVMM_X64_EXITSTATE_RFLAGS] = vmcb->state.rflags; 1441 exit->exitstate[NVMM_X64_EXITSTATE_RFLAGS] = vmcb->state.rflags;
1448 1442
1449 exit->exitstate[NVMM_X64_EXITSTATE_INT_SHADOW] = 1443 exit->exitstate[NVMM_X64_EXITSTATE_INT_SHADOW] =
1450 ((vmcb->ctrl.intr & VMCB_CTRL_INTR_SHADOW) != 0); 1444 ((vmcb->ctrl.intr & VMCB_CTRL_INTR_SHADOW) != 0);
1451 exit->exitstate[NVMM_X64_EXITSTATE_INT_WINDOW_EXIT] = 1445 exit->exitstate[NVMM_X64_EXITSTATE_INT_WINDOW_EXIT] =
1452 cpudata->int_window_exit; 1446 cpudata->int_window_exit;

cvs diff -r1.38 -r1.39 src/sys/dev/nvmm/x86/nvmm_x86_vmx.c (expand / switch to unified diff)

--- src/sys/dev/nvmm/x86/nvmm_x86_vmx.c 2019/10/04 12:17:05 1.38
+++ src/sys/dev/nvmm/x86/nvmm_x86_vmx.c 2019/10/12 06:31:04 1.39
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: nvmm_x86_vmx.c,v 1.38 2019/10/04 12:17:05 maxv Exp $ */ 1/* $NetBSD: nvmm_x86_vmx.c,v 1.39 2019/10/12 06:31:04 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2018 The NetBSD Foundation, Inc. 4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard. 8 * by Maxime Villard.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -20,27 +20,27 @@ @@ -20,27 +20,27 @@
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.38 2019/10/04 12:17:05 maxv Exp $"); 33__KERNEL_RCSID(0, "$NetBSD: nvmm_x86_vmx.c,v 1.39 2019/10/12 06:31:04 maxv Exp $");
34 34
35#include <sys/param.h> 35#include <sys/param.h>
36#include <sys/systm.h> 36#include <sys/systm.h>
37#include <sys/kernel.h> 37#include <sys/kernel.h>
38#include <sys/kmem.h> 38#include <sys/kmem.h>
39#include <sys/cpu.h> 39#include <sys/cpu.h>
40#include <sys/xcall.h> 40#include <sys/xcall.h>
41#include <sys/mman.h> 41#include <sys/mman.h>
42 42
43#include <uvm/uvm.h> 43#include <uvm/uvm.h>
44#include <uvm/uvm_page.h> 44#include <uvm/uvm_page.h>
45 45
46#include <x86/cputypes.h> 46#include <x86/cputypes.h>
@@ -723,28 +723,26 @@ struct vmx_cpudata { @@ -723,28 +723,26 @@ struct vmx_cpudata {
723 bool vmcs_launched; 723 bool vmcs_launched;
724 724
725 /* MSR bitmap */ 725 /* MSR bitmap */
726 uint8_t *msrbm; 726 uint8_t *msrbm;
727 paddr_t msrbm_pa; 727 paddr_t msrbm_pa;
728 728
729 /* Host state */ 729 /* Host state */
730 uint64_t hxcr0; 730 uint64_t hxcr0;
731 uint64_t star; 731 uint64_t star;
732 uint64_t lstar; 732 uint64_t lstar;
733 uint64_t cstar; 733 uint64_t cstar;
734 uint64_t sfmask; 734 uint64_t sfmask;
735 uint64_t kernelgsbase; 735 uint64_t kernelgsbase;
736 bool ts_set; 
737 struct xsave_header hfpu __aligned(64); 
738 736
739 /* Intr state */ 737 /* Intr state */
740 bool int_window_exit; 738 bool int_window_exit;
741 bool nmi_window_exit; 739 bool nmi_window_exit;
742 bool evt_pending; 740 bool evt_pending;
743 741
744 /* Guest state */ 742 /* Guest state */
745 struct msr_entry *gmsr; 743 struct msr_entry *gmsr;
746 paddr_t gmsr_pa; 744 paddr_t gmsr_pa;
747 uint64_t gmsr_misc_enable; 745 uint64_t gmsr_misc_enable;
748 uint64_t gcr2; 746 uint64_t gcr2;
749 uint64_t gcr8; 747 uint64_t gcr8;
750 uint64_t gxcr0; 748 uint64_t gxcr0;
@@ -1649,26 +1647,29 @@ vmx_exit_xsetbv(struct nvmm_machine *mac @@ -1649,26 +1647,29 @@ vmx_exit_xsetbv(struct nvmm_machine *mac
1649 1647
1650 val = (cpudata->gprs[NVMM_X64_GPR_RDX] << 32) | 1648 val = (cpudata->gprs[NVMM_X64_GPR_RDX] << 32) |
1651 (cpudata->gprs[NVMM_X64_GPR_RAX] & 0xFFFFFFFF); 1649 (cpudata->gprs[NVMM_X64_GPR_RAX] & 0xFFFFFFFF);
1652 1650
1653 if (__predict_false(cpudata->gprs[NVMM_X64_GPR_RCX] != 0)) { 1651 if (__predict_false(cpudata->gprs[NVMM_X64_GPR_RCX] != 0)) {
1654 goto error; 1652 goto error;
1655 } else if (__predict_false((val & ~vmx_xcr0_mask) != 0)) { 1653 } else if (__predict_false((val & ~vmx_xcr0_mask) != 0)) {
1656 goto error; 1654 goto error;
1657 } else if (__predict_false((val & XCR0_X87) == 0)) { 1655 } else if (__predict_false((val & XCR0_X87) == 0)) {
1658 goto error; 1656 goto error;
1659 } 1657 }
1660 1658
1661 cpudata->gxcr0 = val; 1659 cpudata->gxcr0 = val;
 1660 if (vmx_xcr0_mask != 0) {
 1661 wrxcr(0, cpudata->gxcr0);
 1662 }
1662 1663
1663 vmx_inkernel_advance(); 1664 vmx_inkernel_advance();
1664 return; 1665 return;
1665 1666
1666error: 1667error:
1667 vmx_inject_gp(vcpu); 1668 vmx_inject_gp(vcpu);
1668} 1669}
1669 1670
1670#define VMX_EPT_VIOLATION_READ __BIT(0) 1671#define VMX_EPT_VIOLATION_READ __BIT(0)
1671#define VMX_EPT_VIOLATION_WRITE __BIT(1) 1672#define VMX_EPT_VIOLATION_WRITE __BIT(1)
1672#define VMX_EPT_VIOLATION_EXECUTE __BIT(2) 1673#define VMX_EPT_VIOLATION_EXECUTE __BIT(2)
1673 1674
1674static void 1675static void
@@ -1693,53 +1694,46 @@ vmx_exit_epf(struct nvmm_machine *mach,  @@ -1693,53 +1694,46 @@ vmx_exit_epf(struct nvmm_machine *mach,
1693 1694
1694 vmx_vcpu_state_provide(vcpu, 1695 vmx_vcpu_state_provide(vcpu,
1695 NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS | 1696 NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
1696 NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS); 1697 NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
1697} 1698}
1698 1699
1699/* -------------------------------------------------------------------------- */ 1700/* -------------------------------------------------------------------------- */
1700 1701
1701static void 1702static void
1702vmx_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu) 1703vmx_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu)
1703{ 1704{
1704 struct vmx_cpudata *cpudata = vcpu->cpudata; 1705 struct vmx_cpudata *cpudata = vcpu->cpudata;
1705 1706
1706 cpudata->ts_set = (rcr0() & CR0_TS) != 0; 1707 fpu_save();
1707 
1708 fpu_area_save(&cpudata->hfpu, vmx_xcr0_mask); 
1709 fpu_area_restore(&cpudata->gfpu, vmx_xcr0_mask); 1708 fpu_area_restore(&cpudata->gfpu, vmx_xcr0_mask);
1710 1709
1711 if (vmx_xcr0_mask != 0) { 1710 if (vmx_xcr0_mask != 0) {
1712 cpudata->hxcr0 = rdxcr(0); 1711 cpudata->hxcr0 = rdxcr(0);
1713 wrxcr(0, cpudata->gxcr0); 1712 wrxcr(0, cpudata->gxcr0);
1714 } 1713 }
1715} 1714}
1716 1715
1717static void 1716static void
1718vmx_vcpu_guest_fpu_leave(struct nvmm_cpu *vcpu) 1717vmx_vcpu_guest_fpu_leave(struct nvmm_cpu *vcpu)
1719{ 1718{
1720 struct vmx_cpudata *cpudata = vcpu->cpudata; 1719 struct vmx_cpudata *cpudata = vcpu->cpudata;
1721 1720
1722 if (vmx_xcr0_mask != 0) { 1721 if (vmx_xcr0_mask != 0) {
1723 cpudata->gxcr0 = rdxcr(0); 1722 cpudata->gxcr0 = rdxcr(0);
1724 wrxcr(0, cpudata->hxcr0); 1723 wrxcr(0, cpudata->hxcr0);
1725 } 1724 }
1726 1725
1727 fpu_area_save(&cpudata->gfpu, vmx_xcr0_mask); 1726 fpu_area_save(&cpudata->gfpu, vmx_xcr0_mask);
1728 fpu_area_restore(&cpudata->hfpu, vmx_xcr0_mask); 
1729 
1730 if (cpudata->ts_set) { 
1731 stts(); 
1732 } 
1733} 1727}
1734 1728
1735static void 1729static void
1736vmx_vcpu_guest_dbregs_enter(struct nvmm_cpu *vcpu) 1730vmx_vcpu_guest_dbregs_enter(struct nvmm_cpu *vcpu)
1737{ 1731{
1738 struct vmx_cpudata *cpudata = vcpu->cpudata; 1732 struct vmx_cpudata *cpudata = vcpu->cpudata;
1739 1733
1740 x86_dbregs_save(curlwp); 1734 x86_dbregs_save(curlwp);
1741 1735
1742 ldr7(0); 1736 ldr7(0);
1743 1737
1744 ldr0(cpudata->drs[NVMM_X64_DR_DR0]); 1738 ldr0(cpudata->drs[NVMM_X64_DR_DR0]);
1745 ldr1(cpudata->drs[NVMM_X64_DR_DR1]); 1739 ldr1(cpudata->drs[NVMM_X64_DR_DR1]);
@@ -1901,51 +1895,50 @@ vmx_vcpu_run(struct nvmm_machine *mach,  @@ -1901,51 +1895,50 @@ vmx_vcpu_run(struct nvmm_machine *mach,
1901 vmx_htlb_catchup(vcpu, hcpu); 1895 vmx_htlb_catchup(vcpu, hcpu);
1902 1896
1903 if (vcpu->hcpu_last != hcpu) { 1897 if (vcpu->hcpu_last != hcpu) {
1904 vmx_vmwrite(VMCS_HOST_TR_SELECTOR, ci->ci_tss_sel); 1898 vmx_vmwrite(VMCS_HOST_TR_SELECTOR, ci->ci_tss_sel);
1905 vmx_vmwrite(VMCS_HOST_TR_BASE, (uint64_t)ci->ci_tss); 1899 vmx_vmwrite(VMCS_HOST_TR_BASE, (uint64_t)ci->ci_tss);
1906 vmx_vmwrite(VMCS_HOST_GDTR_BASE, (uint64_t)ci->ci_gdt); 1900 vmx_vmwrite(VMCS_HOST_GDTR_BASE, (uint64_t)ci->ci_gdt);
1907 vmx_vmwrite(VMCS_HOST_GS_BASE, rdmsr(MSR_GSBASE)); 1901 vmx_vmwrite(VMCS_HOST_GS_BASE, rdmsr(MSR_GSBASE));
1908 cpudata->gtsc_want_update = true; 1902 cpudata->gtsc_want_update = true;
1909 vcpu->hcpu_last = hcpu; 1903 vcpu->hcpu_last = hcpu;
1910 } 1904 }
1911 1905
1912 vmx_vcpu_guest_dbregs_enter(vcpu); 1906 vmx_vcpu_guest_dbregs_enter(vcpu);
1913 vmx_vcpu_guest_misc_enter(vcpu); 1907 vmx_vcpu_guest_misc_enter(vcpu);
 1908 vmx_vcpu_guest_fpu_enter(vcpu);
1914 1909
1915 while (1) { 1910 while (1) {
1916 if (cpudata->gtlb_want_flush) { 1911 if (cpudata->gtlb_want_flush) {
1917 vpid_desc.vpid = cpudata->asid; 1912 vpid_desc.vpid = cpudata->asid;
1918 vpid_desc.addr = 0; 1913 vpid_desc.addr = 0;
1919 vmx_invvpid(vmx_tlb_flush_op, &vpid_desc); 1914 vmx_invvpid(vmx_tlb_flush_op, &vpid_desc);
1920 cpudata->gtlb_want_flush = false; 1915 cpudata->gtlb_want_flush = false;
1921 } 1916 }
1922 1917
1923 if (__predict_false(cpudata->gtsc_want_update)) { 1918 if (__predict_false(cpudata->gtsc_want_update)) {
1924 vmx_vmwrite(VMCS_TSC_OFFSET, cpudata->gtsc - rdtsc()); 1919 vmx_vmwrite(VMCS_TSC_OFFSET, cpudata->gtsc - rdtsc());
1925 cpudata->gtsc_want_update = false; 1920 cpudata->gtsc_want_update = false;
1926 } 1921 }
1927 1922
1928 s = splhigh(); 1923 s = splhigh();
1929 machgen = vmx_htlb_flush(machdata, cpudata); 1924 machgen = vmx_htlb_flush(machdata, cpudata);
1930 vmx_vcpu_guest_fpu_enter(vcpu); 
1931 lcr2(cpudata->gcr2); 1925 lcr2(cpudata->gcr2);
1932 if (launched) { 1926 if (launched) {
1933 ret = vmx_vmresume(cpudata->gprs); 1927 ret = vmx_vmresume(cpudata->gprs);
1934 } else { 1928 } else {
1935 ret = vmx_vmlaunch(cpudata->gprs); 1929 ret = vmx_vmlaunch(cpudata->gprs);
1936 } 1930 }
1937 cpudata->gcr2 = rcr2(); 1931 cpudata->gcr2 = rcr2();
1938 vmx_vcpu_guest_fpu_leave(vcpu); 
1939 vmx_htlb_flush_ack(cpudata, machgen); 1932 vmx_htlb_flush_ack(cpudata, machgen);
1940 splx(s); 1933 splx(s);
1941 1934
1942 if (__predict_false(ret != 0)) { 1935 if (__predict_false(ret != 0)) {
1943 vmx_exit_invalid(exit, -1); 1936 vmx_exit_invalid(exit, -1);
1944 break; 1937 break;
1945 } 1938 }
1946 vmx_exit_evt(cpudata); 1939 vmx_exit_evt(cpudata);
1947 1940
1948 launched = true; 1941 launched = true;
1949 1942
1950 exitcode = vmx_vmread(VMCS_EXIT_REASON); 1943 exitcode = vmx_vmread(VMCS_EXIT_REASON);
1951 exitcode &= __BITS(15,0); 1944 exitcode &= __BITS(15,0);
@@ -2029,26 +2022,27 @@ vmx_vcpu_run(struct nvmm_machine *mach,  @@ -2029,26 +2022,27 @@ vmx_vcpu_run(struct nvmm_machine *mach,
2029 } 2022 }
2030 if (curlwp->l_flag & LW_USERRET) { 2023 if (curlwp->l_flag & LW_USERRET) {
2031 break; 2024 break;
2032 } 2025 }
2033 if (exit->reason != NVMM_EXIT_NONE) { 2026 if (exit->reason != NVMM_EXIT_NONE) {
2034 break; 2027 break;
2035 } 2028 }
2036 } 2029 }
2037 2030
2038 cpudata->vmcs_launched = launched; 2031 cpudata->vmcs_launched = launched;
2039 2032
2040 cpudata->gtsc = vmx_vmread(VMCS_TSC_OFFSET) + rdtsc(); 2033 cpudata->gtsc = vmx_vmread(VMCS_TSC_OFFSET) + rdtsc();
2041 2034
 2035 vmx_vcpu_guest_fpu_leave(vcpu);
2042 vmx_vcpu_guest_misc_leave(vcpu); 2036 vmx_vcpu_guest_misc_leave(vcpu);
2043 vmx_vcpu_guest_dbregs_leave(vcpu); 2037 vmx_vcpu_guest_dbregs_leave(vcpu);
2044 2038
2045 exit->exitstate[NVMM_X64_EXITSTATE_CR8] = cpudata->gcr8; 2039 exit->exitstate[NVMM_X64_EXITSTATE_CR8] = cpudata->gcr8;
2046 exit->exitstate[NVMM_X64_EXITSTATE_RFLAGS] = 2040 exit->exitstate[NVMM_X64_EXITSTATE_RFLAGS] =
2047 vmx_vmread(VMCS_GUEST_RFLAGS); 2041 vmx_vmread(VMCS_GUEST_RFLAGS);
2048 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY); 2042 intstate = vmx_vmread(VMCS_GUEST_INTERRUPTIBILITY);
2049 exit->exitstate[NVMM_X64_EXITSTATE_INT_SHADOW] = 2043 exit->exitstate[NVMM_X64_EXITSTATE_INT_SHADOW] =
2050 (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0; 2044 (intstate & (INT_STATE_STI|INT_STATE_MOVSS)) != 0;
2051 exit->exitstate[NVMM_X64_EXITSTATE_INT_WINDOW_EXIT] = 2045 exit->exitstate[NVMM_X64_EXITSTATE_INT_WINDOW_EXIT] =
2052 cpudata->int_window_exit; 2046 cpudata->int_window_exit;
2053 exit->exitstate[NVMM_X64_EXITSTATE_NMI_WINDOW_EXIT] = 2047 exit->exitstate[NVMM_X64_EXITSTATE_NMI_WINDOW_EXIT] =
2054 cpudata->nmi_window_exit; 2048 cpudata->nmi_window_exit;

cvs diff -r1.616 -r1.617 src/sys/sys/param.h (expand / switch to unified diff)

--- src/sys/sys/param.h 2019/09/30 21:18:00 1.616
+++ src/sys/sys/param.h 2019/10/12 06:31:04 1.617
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: param.h,v 1.616 2019/09/30 21:18:00 kamil Exp $ */ 1/* $NetBSD: param.h,v 1.617 2019/10/12 06:31:04 maxv Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1982, 1986, 1989, 1993 4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc. 6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed 7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph 8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc. 10 * the permission of UNIX System Laboratories, Inc.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
@@ -57,27 +57,27 @@ @@ -57,27 +57,27 @@
57 * m = minor version; a minor number of 99 indicates current. 57 * m = minor version; a minor number of 99 indicates current.
58 * r = 0 (*) 58 * r = 0 (*)
59 * p = patchlevel 59 * p = patchlevel
60 * 60 *
61 * When new releases are made, src/gnu/usr.bin/groff/tmac/mdoc.local 61 * When new releases are made, src/gnu/usr.bin/groff/tmac/mdoc.local
62 * needs to be updated and the changes sent back to the groff maintainers. 62 * needs to be updated and the changes sent back to the groff maintainers.
63 * 63 *
64 * (*) Up to 2.0I "release" used to be "",A-Z,Z[A-Z] but numeric 64 * (*) Up to 2.0I "release" used to be "",A-Z,Z[A-Z] but numeric
65 * e.g. NetBSD-1.2D = 102040000 ('D' == 4) 65 * e.g. NetBSD-1.2D = 102040000 ('D' == 4)
66 * NetBSD-2.0H (200080000) was changed on 20041001 to: 66 * NetBSD-2.0H (200080000) was changed on 20041001 to:
67 * 2.99.9 (299000900) 67 * 2.99.9 (299000900)
68 */ 68 */
69 69
70#define __NetBSD_Version__ 999001500 /* NetBSD 9.99.15 */ 70#define __NetBSD_Version__ 999001600 /* NetBSD 9.99.16 */
71 71
72#define __NetBSD_Prereq__(M,m,p) (((((M) * 100000000) + \ 72#define __NetBSD_Prereq__(M,m,p) (((((M) * 100000000) + \
73 (m) * 1000000) + (p) * 100) <= __NetBSD_Version__) 73 (m) * 1000000) + (p) * 100) <= __NetBSD_Version__)
74 74
75/* 75/*
76 * Historical NetBSD #define 76 * Historical NetBSD #define
77 * 77 *
78 * NetBSD 1.4 was the last release for which this value was incremented. 78 * NetBSD 1.4 was the last release for which this value was incremented.
79 * The value is now permanently fixed at 199905. It will never be 79 * The value is now permanently fixed at 199905. It will never be
80 * changed again. 80 * changed again.
81 * 81 *
82 * New code must use __NetBSD_Version__ instead, and should not even 82 * New code must use __NetBSD_Version__ instead, and should not even
83 * count on NetBSD being defined. 83 * count on NetBSD being defined.