Sun Jan 7 16:10:16 2018 UTC ()
Add a new option, SVS (for Separate Virtual Space), that unmaps kernel
pages when running in userland. For now, only the PTE area is unmapped.

Sent on tech-kern@.


(maxv)
diff -r1.16 -r1.17 src/sys/arch/amd64/amd64/amd64_trap.S
diff -r1.64 -r1.65 src/sys/arch/amd64/amd64/genassym.cf
diff -r1.144 -r1.145 src/sys/arch/amd64/amd64/locore.S
diff -r1.284 -r1.285 src/sys/arch/amd64/amd64/machdep.c
diff -r1.476 -r1.477 src/sys/arch/amd64/conf/GENERIC
diff -r1.97 -r1.98 src/sys/arch/amd64/conf/files.amd64
diff -r1.26 -r1.27 src/sys/arch/amd64/include/frameasm.h
diff -r1.40 -r1.41 src/sys/arch/amd64/include/pmap.h
diff -r1.388 -r1.389 src/sys/arch/i386/conf/files.i386
diff -r1.87 -r1.88 src/sys/arch/x86/include/cpu.h
diff -r1.143 -r1.144 src/sys/arch/x86/x86/cpu.c
diff -r1.277 -r1.278 src/sys/arch/x86/x86/pmap.c

cvs diff -r1.16 -r1.17 src/sys/arch/amd64/amd64/amd64_trap.S (expand / switch to unified diff)

--- src/sys/arch/amd64/amd64/amd64_trap.S 2018/01/07 12:42:46 1.16
+++ src/sys/arch/amd64/amd64/amd64_trap.S 2018/01/07 16:10:16 1.17
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: amd64_trap.S,v 1.16 2018/01/07 12:42:46 maxv Exp $ */ 1/* $NetBSD: amd64_trap.S,v 1.17 2018/01/07 16:10:16 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1998, 2007, 2008, 2017 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 2007, 2008, 2017 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum, by Andrew Doran and by Maxime Villard. 8 * by Charles M. Hannum, by Andrew Doran and by Maxime Villard.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -56,27 +56,27 @@ @@ -56,27 +56,27 @@
56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 57 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64 * POSSIBILITY OF SUCH DAMAGE. 64 * POSSIBILITY OF SUCH DAMAGE.
65 */ 65 */
66 66
67#if 0 67#if 0
68#include <machine/asm.h> 68#include <machine/asm.h>
69__KERNEL_RCSID(0, "$NetBSD: amd64_trap.S,v 1.16 2018/01/07 12:42:46 maxv Exp $"); 69__KERNEL_RCSID(0, "$NetBSD: amd64_trap.S,v 1.17 2018/01/07 16:10:16 maxv Exp $");
70#endif 70#endif
71 71
72/* 72/*
73 * Trap and fault vector routines 73 * Trap and fault vector routines
74 * 74 *
75 * On exit from the kernel to user mode, we always need to check for ASTs. In 75 * On exit from the kernel to user mode, we always need to check for ASTs. In
76 * addition, we need to do this atomically; otherwise an interrupt may occur 76 * addition, we need to do this atomically; otherwise an interrupt may occur
77 * which causes an AST, but it won't get processed until the next kernel entry 77 * which causes an AST, but it won't get processed until the next kernel entry
78 * (possibly the next clock tick). Thus, we disable interrupt before checking, 78 * (possibly the next clock tick). Thus, we disable interrupt before checking,
79 * and only enable them again on the final `iret' or before calling the AST 79 * and only enable them again on the final `iret' or before calling the AST
80 * handler. 80 * handler.
81 */ 81 */
82 82
@@ -110,50 +110,53 @@ IDTVEC_END(trap01) @@ -110,50 +110,53 @@ IDTVEC_END(trap01)
110 * 110 *
111 * Therefore we don't enable interrupts, because the CPU could switch to 111 * Therefore we don't enable interrupts, because the CPU could switch to
112 * another LWP, call 'iret' and unintentionally leave the NMI mode. 112 * another LWP, call 'iret' and unintentionally leave the NMI mode.
113 * 113 *
114 * We need to be careful about %gs too, because it is possible that we were 114 * We need to be careful about %gs too, because it is possible that we were
115 * running in kernel mode with a userland %gs. 115 * running in kernel mode with a userland %gs.
116 */ 116 */
117IDTVEC(trap02) 117IDTVEC(trap02)
118#if defined(XEN) 118#if defined(XEN)
119 ZTRAP(T_NMI) 119 ZTRAP(T_NMI)
120#else 120#else
121 ZTRAP_NJ(T_NMI) 121 ZTRAP_NJ(T_NMI)
122 subq $TF_REGSIZE,%rsp 122 subq $TF_REGSIZE,%rsp
 123 SVS_ENTER
123 INTR_SAVE_GPRS 124 INTR_SAVE_GPRS
124 cld 125 cld
125 SMAP_ENABLE 126 SMAP_ENABLE
126 movw %gs,TF_GS(%rsp) 127 movw %gs,TF_GS(%rsp)
127 movw %fs,TF_FS(%rsp) 128 movw %fs,TF_FS(%rsp)
128 movw %es,TF_ES(%rsp) 129 movw %es,TF_ES(%rsp)
129 movw %ds,TF_DS(%rsp) 130 movw %ds,TF_DS(%rsp)
130 131
131 movl $MSR_GSBASE,%ecx 132 movl $MSR_GSBASE,%ecx
132 rdmsr 133 rdmsr
133 cmpl $VM_MIN_KERNEL_ADDRESS_HIGH32,%edx 134 cmpl $VM_MIN_KERNEL_ADDRESS_HIGH32,%edx
134 jae .Lnoswapgs 135 jae .Lnoswapgs
135 136
136 swapgs 137 swapgs
137 movq %rsp,%rdi 138 movq %rsp,%rdi
138 incq CPUVAR(NTRAP) 139 incq CPUVAR(NTRAP)
139 call _C_LABEL(nmitrap) 140 call _C_LABEL(nmitrap)
 141 SVS_LEAVE
140 swapgs 142 swapgs
141 jmp .Lnmileave 143 jmp .Lnmileave
142 144
143.Lnoswapgs: 145.Lnoswapgs:
144 movq %rsp,%rdi 146 movq %rsp,%rdi
145 incq CPUVAR(NTRAP) 147 incq CPUVAR(NTRAP)
146 call _C_LABEL(nmitrap) 148 call _C_LABEL(nmitrap)
 149 SVS_LEAVE
147 150
148.Lnmileave: 151.Lnmileave:
149 movw TF_ES(%rsp),%es 152 movw TF_ES(%rsp),%es
150 movw TF_DS(%rsp),%ds 153 movw TF_DS(%rsp),%ds
151 INTR_RESTORE_GPRS 154 INTR_RESTORE_GPRS
152 addq $TF_REGSIZE+16,%rsp 155 addq $TF_REGSIZE+16,%rsp
153 iretq 156 iretq
154#endif 157#endif
155IDTVEC_END(trap02) 158IDTVEC_END(trap02)
156 159
157IDTVEC(trap03) 160IDTVEC(trap03)
158#ifndef KDTRACE_HOOKS 161#ifndef KDTRACE_HOOKS
159 ZTRAP(T_BPTFLT) 162 ZTRAP(T_BPTFLT)

cvs diff -r1.64 -r1.65 src/sys/arch/amd64/amd64/genassym.cf (expand / switch to unified diff)

--- src/sys/arch/amd64/amd64/genassym.cf 2018/01/04 13:36:30 1.64
+++ src/sys/arch/amd64/amd64/genassym.cf 2018/01/07 16:10:16 1.65
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1# $NetBSD: genassym.cf,v 1.64 2018/01/04 13:36:30 maxv Exp $ 1# $NetBSD: genassym.cf,v 1.65 2018/01/07 16:10:16 maxv Exp $
2 2
3# 3#
4# Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc. 4# Copyright (c) 1998, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5# All rights reserved. 5# All rights reserved.
6# 6#
7# This code is derived from software contributed to The NetBSD Foundation 7# This code is derived from software contributed to The NetBSD Foundation
8# by Charles M. Hannum, and by Andrew Doran. 8# by Charles M. Hannum, and by Andrew Doran.
9# 9#
10# Redistribution and use in source and binary forms, with or without 10# Redistribution and use in source and binary forms, with or without
11# modification, are permitted provided that the following conditions 11# modification, are permitted provided that the following conditions
12# are met: 12# are met:
13# 1. Redistributions of source code must retain the above copyright 13# 1. Redistributions of source code must retain the above copyright
14# notice, this list of conditions and the following disclaimer. 14# notice, this list of conditions and the following disclaimer.
@@ -226,26 +226,28 @@ define FRAMESIZE sizeof(struct trapfram @@ -226,26 +226,28 @@ define FRAMESIZE sizeof(struct trapfram
226define TSS_RSP0 offsetof(struct cpu_tss, tss.tss_rsp0) 226define TSS_RSP0 offsetof(struct cpu_tss, tss.tss_rsp0)
227 227
228define CPU_INFO_SCRATCH offsetof(struct cpu_info, ci_scratch) 228define CPU_INFO_SCRATCH offsetof(struct cpu_info, ci_scratch)
229define CPU_INFO_SELF offsetof(struct cpu_info, ci_self) 229define CPU_INFO_SELF offsetof(struct cpu_info, ci_self)
230define CPU_INFO_RESCHED offsetof(struct cpu_info, ci_want_resched) 230define CPU_INFO_RESCHED offsetof(struct cpu_info, ci_want_resched)
231define CPU_INFO_WANT_PMAPLOAD offsetof(struct cpu_info, ci_want_pmapload) 231define CPU_INFO_WANT_PMAPLOAD offsetof(struct cpu_info, ci_want_pmapload)
232define CPU_INFO_TLBSTATE offsetof(struct cpu_info, ci_tlbstate) 232define CPU_INFO_TLBSTATE offsetof(struct cpu_info, ci_tlbstate)
233define TLBSTATE_VALID TLBSTATE_VALID 233define TLBSTATE_VALID TLBSTATE_VALID
234define CPU_INFO_CURLWP offsetof(struct cpu_info, ci_curlwp) 234define CPU_INFO_CURLWP offsetof(struct cpu_info, ci_curlwp)
235define CPU_INFO_CURLDT offsetof(struct cpu_info, ci_curldt) 235define CPU_INFO_CURLDT offsetof(struct cpu_info, ci_curldt)
236define CPU_INFO_IDLELWP offsetof(struct cpu_info, ci_data.cpu_idlelwp) 236define CPU_INFO_IDLELWP offsetof(struct cpu_info, ci_data.cpu_idlelwp)
237define CPU_INFO_PMAP offsetof(struct cpu_info, ci_pmap) 237define CPU_INFO_PMAP offsetof(struct cpu_info, ci_pmap)
238define CPU_INFO_TSS offsetof(struct cpu_info, ci_tss) 238define CPU_INFO_TSS offsetof(struct cpu_info, ci_tss)
 239define CPU_INFO_UPDIRPA offsetof(struct cpu_info, ci_svs_updirpa)
 240define CPU_INFO_KPDIRPA offsetof(struct cpu_info, ci_svs_kpdirpa)
239define CPU_INFO_NSYSCALL offsetof(struct cpu_info, ci_data.cpu_nsyscall) 241define CPU_INFO_NSYSCALL offsetof(struct cpu_info, ci_data.cpu_nsyscall)
240define CPU_INFO_NTRAP offsetof(struct cpu_info, ci_data.cpu_ntrap) 242define CPU_INFO_NTRAP offsetof(struct cpu_info, ci_data.cpu_ntrap)
241define CPU_INFO_NINTR offsetof(struct cpu_info, ci_data.cpu_nintr) 243define CPU_INFO_NINTR offsetof(struct cpu_info, ci_data.cpu_nintr)
242define CPU_INFO_CURPRIORITY offsetof(struct cpu_info, ci_schedstate.spc_curpriority) 244define CPU_INFO_CURPRIORITY offsetof(struct cpu_info, ci_schedstate.spc_curpriority)
243define CPU_INFO_FPCURLWP offsetof(struct cpu_info, ci_fpcurlwp) 245define CPU_INFO_FPCURLWP offsetof(struct cpu_info, ci_fpcurlwp)
244 246
245define CPU_INFO_GDT offsetof(struct cpu_info, ci_gdt) 247define CPU_INFO_GDT offsetof(struct cpu_info, ci_gdt)
246define CPU_INFO_IPENDING offsetof(struct cpu_info, ci_ipending) 248define CPU_INFO_IPENDING offsetof(struct cpu_info, ci_ipending)
247define CPU_INFO_IMASK offsetof(struct cpu_info, ci_imask) 249define CPU_INFO_IMASK offsetof(struct cpu_info, ci_imask)
248define CPU_INFO_IUNMASK offsetof(struct cpu_info, ci_iunmask) 250define CPU_INFO_IUNMASK offsetof(struct cpu_info, ci_iunmask)
249define CPU_INFO_ILEVEL offsetof(struct cpu_info, ci_ilevel) 251define CPU_INFO_ILEVEL offsetof(struct cpu_info, ci_ilevel)
250define CPU_INFO_IDEPTH offsetof(struct cpu_info, ci_idepth) 252define CPU_INFO_IDEPTH offsetof(struct cpu_info, ci_idepth)
251define CPU_INFO_ISOURCES offsetof(struct cpu_info, ci_isources) 253define CPU_INFO_ISOURCES offsetof(struct cpu_info, ci_isources)

cvs diff -r1.144 -r1.145 src/sys/arch/amd64/amd64/locore.S (expand / switch to unified diff)

--- src/sys/arch/amd64/amd64/locore.S 2018/01/04 13:36:30 1.144
+++ src/sys/arch/amd64/amd64/locore.S 2018/01/07 16:10:16 1.145
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: locore.S,v 1.144 2018/01/04 13:36:30 maxv Exp $ */ 1/* $NetBSD: locore.S,v 1.145 2018/01/07 16:10:16 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright-o-rama! 4 * Copyright-o-rama!
5 */ 5 */
6 6
7/* 7/*
8 * Copyright (c) 1998, 2000, 2007, 2008, 2016 The NetBSD Foundation, Inc. 8 * Copyright (c) 1998, 2000, 2007, 2008, 2016 The NetBSD Foundation, Inc.
9 * All rights reserved. 9 * All rights reserved.
10 * 10 *
11 * This code is derived from software contributed to The NetBSD Foundation 11 * This code is derived from software contributed to The NetBSD Foundation
12 * by Charles M. Hannum and by Maxime Villard. 12 * by Charles M. Hannum and by Maxime Villard.
13 * 13 *
14 * Redistribution and use in source and binary forms, with or without 14 * Redistribution and use in source and binary forms, with or without
@@ -149,26 +149,27 @@ @@ -149,26 +149,27 @@
149#define _ALIGN_TEXT ALIGN_TEXT 149#define _ALIGN_TEXT ALIGN_TEXT
150 150
151#include <machine/asm.h> 151#include <machine/asm.h>
152 152
153#include "opt_copy_symtab.h" 153#include "opt_copy_symtab.h"
154#include "opt_ddb.h" 154#include "opt_ddb.h"
155#include "opt_ddbparam.h" 155#include "opt_ddbparam.h"
156#include "opt_modular.h" 156#include "opt_modular.h"
157#include "opt_realmem.h" 157#include "opt_realmem.h"
158 158
159#include "opt_compat_netbsd.h" 159#include "opt_compat_netbsd.h"
160#include "opt_compat_netbsd32.h" 160#include "opt_compat_netbsd32.h"
161#include "opt_xen.h" 161#include "opt_xen.h"
 162#include "opt_svs.h"
162 163
163#include "assym.h" 164#include "assym.h"
164#include "lapic.h" 165#include "lapic.h"
165#include "ioapic.h" 166#include "ioapic.h"
166#include "ksyms.h" 167#include "ksyms.h"
167 168
168#include <sys/errno.h> 169#include <sys/errno.h>
169#include <sys/syscall.h> 170#include <sys/syscall.h>
170 171
171#include <machine/pte.h> 172#include <machine/pte.h>
172#include <machine/segments.h> 173#include <machine/segments.h>
173#include <machine/specialreg.h> 174#include <machine/specialreg.h>
174#include <machine/trap.h> 175#include <machine/trap.h>
@@ -1078,26 +1079,32 @@ ENTRY(cpu_switchto) @@ -1078,26 +1079,32 @@ ENTRY(cpu_switchto)
1078 1079
1079 movq %rdi,%r13 /* oldlwp */ 1080 movq %rdi,%r13 /* oldlwp */
1080 movq %rsi,%r12 /* newlwp */ 1081 movq %rsi,%r12 /* newlwp */
1081 1082
1082 testq %r13,%r13 /* oldlwp = NULL ? */ 1083 testq %r13,%r13 /* oldlwp = NULL ? */
1083 jz .Lskip_save 1084 jz .Lskip_save
1084 1085
1085 /* Save old context. */ 1086 /* Save old context. */
1086 movq L_PCB(%r13),%rax 1087 movq L_PCB(%r13),%rax
1087 movq %rsp,PCB_RSP(%rax) 1088 movq %rsp,PCB_RSP(%rax)
1088 movq %rbp,PCB_RBP(%rax) 1089 movq %rbp,PCB_RBP(%rax)
1089.Lskip_save: 1090.Lskip_save:
1090 1091
 1092#ifdef SVS
 1093 pushq %rdx
 1094 callq _C_LABEL(svs_lwp_switch)
 1095 popq %rdx
 1096#endif
 1097
1091 /* Switch to newlwp's stack. */ 1098 /* Switch to newlwp's stack. */
1092 movq L_PCB(%r12),%r14 1099 movq L_PCB(%r12),%r14
1093 movq PCB_RSP(%r14),%rsp 1100 movq PCB_RSP(%r14),%rsp
1094 movq PCB_RBP(%r14),%rbp 1101 movq PCB_RBP(%r14),%rbp
1095 1102
1096 /* 1103 /*
1097 * Set curlwp. This must be globally visible in order to permit 1104 * Set curlwp. This must be globally visible in order to permit
1098 * non-interlocked mutex release. 1105 * non-interlocked mutex release.
1099 */ 1106 */
1100 movq %r12,%rcx 1107 movq %r12,%rcx
1101 xchgq %rcx,CPUVAR(CURLWP) 1108 xchgq %rcx,CPUVAR(CURLWP)
1102 1109
1103 /* Skip the rest if returning to a pinned LWP. */ 1110 /* Skip the rest if returning to a pinned LWP. */
@@ -1278,26 +1285,27 @@ IDTVEC(syscall) @@ -1278,26 +1285,27 @@ IDTVEC(syscall)
1278 movq CPUVAR(SCRATCH),%r15 1285 movq CPUVAR(SCRATCH),%r15
1279#undef SP 1286#undef SP
1280 1287
1281 movq $2,TF_ERR(%rsp) /* syscall instruction size */ 1288 movq $2,TF_ERR(%rsp) /* syscall instruction size */
1282 movq $T_ASTFLT,TF_TRAPNO(%rsp) 1289 movq $T_ASTFLT,TF_TRAPNO(%rsp)
1283#else 1290#else
1284 /* Xen already switched to kernel stack */ 1291 /* Xen already switched to kernel stack */
1285 addq $0x10,%rsp /* gap to match cs:rip */ 1292 addq $0x10,%rsp /* gap to match cs:rip */
1286 pushq $2 /* error code */ 1293 pushq $2 /* error code */
1287 pushq $T_ASTFLT 1294 pushq $T_ASTFLT
1288 subq $TF_REGSIZE,%rsp 1295 subq $TF_REGSIZE,%rsp
1289 cld 1296 cld
1290#endif 1297#endif
 1298 SVS_ENTER
1291 INTR_SAVE_GPRS 1299 INTR_SAVE_GPRS
1292 movw $GSEL(GUDATA_SEL, SEL_UPL),TF_DS(%rsp) 1300 movw $GSEL(GUDATA_SEL, SEL_UPL),TF_DS(%rsp)
1293 movw $GSEL(GUDATA_SEL, SEL_UPL),TF_ES(%rsp) 1301 movw $GSEL(GUDATA_SEL, SEL_UPL),TF_ES(%rsp)
1294 movw $0,TF_FS(%rsp) 1302 movw $0,TF_FS(%rsp)
1295 movw $0,TF_GS(%rsp) 1303 movw $0,TF_GS(%rsp)
1296 STI(si) 1304 STI(si)
1297 1305
1298.Ldo_syscall: 1306.Ldo_syscall:
1299 movq CPUVAR(CURLWP),%r14 1307 movq CPUVAR(CURLWP),%r14
1300 incq CPUVAR(NSYSCALL) /* count it atomically */ 1308 incq CPUVAR(NSYSCALL) /* count it atomically */
1301 movq %rsp,L_MD_REGS(%r14) /* save pointer to frame */ 1309 movq %rsp,L_MD_REGS(%r14) /* save pointer to frame */
1302 movq L_PROC(%r14),%r15 1310 movq L_PROC(%r14),%r15
1303 andl $~MDL_IRET,L_MD_FLAGS(%r14) /* Allow sysret return */ 1311 andl $~MDL_IRET,L_MD_FLAGS(%r14) /* Allow sysret return */
@@ -1322,26 +1330,27 @@ IDTVEC(syscall) @@ -1322,26 +1330,27 @@ IDTVEC(syscall)
1322 1330
1323 /* 1331 /*
1324 * Decide if we need to take a slow path. That's the case when we 1332 * Decide if we need to take a slow path. That's the case when we
1325 * want to reload %cs and %ss on a 64bit LWP (MDL_IRET set), or when 1333 * want to reload %cs and %ss on a 64bit LWP (MDL_IRET set), or when
1326 * we're returning to a 32bit LWP (MDL_COMPAT32 set). 1334 * we're returning to a 32bit LWP (MDL_COMPAT32 set).
1327 * 1335 *
1328 * In either case, we jump into intrfastexit and return to userland 1336 * In either case, we jump into intrfastexit and return to userland
1329 * with the iret instruction. 1337 * with the iret instruction.
1330 */ 1338 */
1331 testl $(MDL_IRET|MDL_COMPAT32),L_MD_FLAGS(%r14) 1339 testl $(MDL_IRET|MDL_COMPAT32),L_MD_FLAGS(%r14)
1332 jnz intrfastexit 1340 jnz intrfastexit
1333 1341
1334 INTR_RESTORE_GPRS 1342 INTR_RESTORE_GPRS
 1343 SVS_LEAVE
1335 SWAPGS 1344 SWAPGS
1336#ifndef XEN 1345#ifndef XEN
1337 movq TF_RIP(%rsp),%rcx /* %rip for sysret */ 1346 movq TF_RIP(%rsp),%rcx /* %rip for sysret */
1338 movq TF_RFLAGS(%rsp),%r11 /* %flags for sysret */ 1347 movq TF_RFLAGS(%rsp),%r11 /* %flags for sysret */
1339 movq TF_RSP(%rsp),%rsp 1348 movq TF_RSP(%rsp),%rsp
1340do_sysret: 1349do_sysret:
1341 sysretq 1350 sysretq
1342#else 1351#else
1343 addq $TF_RIP,%rsp 1352 addq $TF_RIP,%rsp
1344 pushq $256 /* VGCF_IN_SYSCALL */ 1353 pushq $256 /* VGCF_IN_SYSCALL */
1345 jmp HYPERVISOR_iret 1354 jmp HYPERVISOR_iret
1346#endif 1355#endif
1347 1356
@@ -1484,29 +1493,31 @@ ENTRY(intrfastexit) @@ -1484,29 +1493,31 @@ ENTRY(intrfastexit)
1484#ifdef XEN 1493#ifdef XEN
1485 cmpw $FLAT_RING3_CS64,TF_CS(%rsp) 1494 cmpw $FLAT_RING3_CS64,TF_CS(%rsp)
1486 je .Luexit64 1495 je .Luexit64
1487#endif 1496#endif
1488 1497
1489.Luexit32: 1498.Luexit32:
1490 NOT_XEN(cli;) 1499 NOT_XEN(cli;)
1491do_mov_es: 1500do_mov_es:
1492 movw TF_ES(%rsp),%es 1501 movw TF_ES(%rsp),%es
1493do_mov_ds: 1502do_mov_ds:
1494 movw TF_DS(%rsp),%ds 1503 movw TF_DS(%rsp),%ds
1495do_mov_fs: 1504do_mov_fs:
1496 movw TF_FS(%rsp),%fs 1505 movw TF_FS(%rsp),%fs
 1506 SVS_LEAVE
1497 SWAPGS 1507 SWAPGS
1498#ifndef XEN 1508#ifndef XEN
1499do_mov_gs: 1509do_mov_gs:
1500 movw TF_GS(%rsp),%gs 1510 movw TF_GS(%rsp),%gs
1501#endif 1511#endif
1502 jmp .Lkexit 1512 jmp .Lkexit
1503 1513
1504.Luexit64: 1514.Luexit64:
1505 NOT_XEN(cli;) 1515 NOT_XEN(cli;)
 1516 SVS_LEAVE
1506 SWAPGS 1517 SWAPGS
1507 1518
1508.Lkexit: 1519.Lkexit:
1509 addq $TF_REGSIZE+16,%rsp /* + T_xxx and error code */ 1520 addq $TF_REGSIZE+16,%rsp /* + T_xxx and error code */
1510do_iret: 1521do_iret:
1511 iretq 1522 iretq
1512END(intrfastexit) 1523END(intrfastexit)

cvs diff -r1.284 -r1.285 src/sys/arch/amd64/amd64/machdep.c (expand / switch to unified diff)

--- src/sys/arch/amd64/amd64/machdep.c 2018/01/05 08:04:20 1.284
+++ src/sys/arch/amd64/amd64/machdep.c 2018/01/07 16:10:16 1.285
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: machdep.c,v 1.284 2018/01/05 08:04:20 maxv Exp $ */ 1/* $NetBSD: machdep.c,v 1.285 2018/01/07 16:10:16 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1996, 1997, 1998, 2000, 2006, 2007, 2008, 2011 4 * Copyright (c) 1996, 1997, 1998, 2000, 2006, 2007, 2008, 2011
5 * The NetBSD Foundation, Inc. 5 * The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace 9 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center. 10 * Simulation Facility, NASA Ames Research Center.
11 * 11 *
12 * This code is derived from software contributed to The NetBSD Foundation 12 * This code is derived from software contributed to The NetBSD Foundation
13 * by Coyote Point Systems, Inc. which was written under contract to Coyote 13 * by Coyote Point Systems, Inc. which was written under contract to Coyote
14 * Point by Jed Davis and Devon O'Dell. 14 * Point by Jed Davis and Devon O'Dell.
@@ -100,39 +100,40 @@ @@ -100,39 +100,40 @@
100 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 100 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
101 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 101 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
102 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 102 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
103 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 103 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
104 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 104 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
105 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 105 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
106 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 106 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
107 * SUCH DAMAGE. 107 * SUCH DAMAGE.
108 * 108 *
109 * @(#)machdep.c 7.4 (Berkeley) 6/3/91 109 * @(#)machdep.c 7.4 (Berkeley) 6/3/91
110 */ 110 */
111 111
112#include <sys/cdefs.h> 112#include <sys/cdefs.h>
113__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.284 2018/01/05 08:04:20 maxv Exp $"); 113__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.285 2018/01/07 16:10:16 maxv Exp $");
114 114
115/* #define XENDEBUG_LOW */ 115/* #define XENDEBUG_LOW */
116 116
117#include "opt_modular.h" 117#include "opt_modular.h"
118#include "opt_user_ldt.h" 118#include "opt_user_ldt.h"
119#include "opt_ddb.h" 119#include "opt_ddb.h"
120#include "opt_kgdb.h" 120#include "opt_kgdb.h"
121#include "opt_cpureset_delay.h" 121#include "opt_cpureset_delay.h"
122#include "opt_mtrr.h" 122#include "opt_mtrr.h"
123#include "opt_realmem.h" 123#include "opt_realmem.h"
124#include "opt_xen.h" 124#include "opt_xen.h"
125#include "opt_kaslr.h" 125#include "opt_kaslr.h"
 126#include "opt_svs.h"
126#ifndef XEN 127#ifndef XEN
127#include "opt_physmem.h" 128#include "opt_physmem.h"
128#endif 129#endif
129#include "isa.h" 130#include "isa.h"
130#include "pci.h" 131#include "pci.h"
131 132
132#include <sys/param.h> 133#include <sys/param.h>
133#include <sys/systm.h> 134#include <sys/systm.h>
134#include <sys/signal.h> 135#include <sys/signal.h>
135#include <sys/signalvar.h> 136#include <sys/signalvar.h>
136#include <sys/kernel.h> 137#include <sys/kernel.h>
137#include <sys/cpu.h> 138#include <sys/cpu.h>
138#include <sys/exec.h> 139#include <sys/exec.h>
@@ -2218,13 +2219,148 @@ mm_md_direct_mapped_io(void *addr, paddr @@ -2218,13 +2219,148 @@ mm_md_direct_mapped_io(void *addr, paddr
2218 *paddr = PMAP_DIRECT_UNMAP(va); 2219 *paddr = PMAP_DIRECT_UNMAP(va);
2219 return true; 2220 return true;
2220 } 2221 }
2221 return false; 2222 return false;
2222} 2223}
2223 2224
2224bool 2225bool
2225mm_md_direct_mapped_phys(paddr_t paddr, vaddr_t *vaddr) 2226mm_md_direct_mapped_phys(paddr_t paddr, vaddr_t *vaddr)
2226{ 2227{
2227 *vaddr = PMAP_DIRECT_MAP(paddr); 2228 *vaddr = PMAP_DIRECT_MAP(paddr);
2228 return true; 2229 return true;
2229} 2230}
2230#endif 2231#endif
 2232
 2233/* -------------------------------------------------------------------------- */
 2234
 2235#ifdef SVS
 2236/*
 2237 * Separate Virtual Space
 2238 *
 2239 * A per-cpu L4 page is maintained in ci_svs_updirpa. During each context
 2240 * switch to a user pmap, updirpa is populated with the entries of the new
 2241 * pmap, minus what we don't want to have mapped in userland.
 2242 *
 2243 * Note on locking/synchronization here:
 2244 *
 2245 * (a) Touching ci_svs_updir without holding ci_svs_mtx first is *not*
 2246 * allowed.
 2247 *
 2248 * (b) pm_kernel_cpus contains the set of CPUs that have the pmap loaded
 2249 * in their CR3 register. It must *not* be replaced by pm_cpus.
 2250 *
 2251 * (c) When a context switch on the current CPU is made from a user LWP
 2252 * towards a kernel LWP, CR3 is not updated. Therefore, the pmap's
 2253 * pm_kernel_cpus still contains the current CPU. It implies that the
 2254 * remote CPUs that execute other threads of the user process we just
 2255 * left will keep synchronizing us against their changes.
 2256 *
 2257 * TODO: for now, only PMAP_SLOT_PTE is unmapped.
 2258 */
 2259
 2260void
 2261cpu_svs_init(struct cpu_info *ci)
 2262{
 2263 struct vm_page *pg;
 2264
 2265 KASSERT(ci != NULL);
 2266
 2267 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
 2268 if (pg == 0)
 2269 panic("%s: failed to allocate L4 PA for CPU %d\n",
 2270 __func__, cpu_index(ci));
 2271 ci->ci_svs_updirpa = VM_PAGE_TO_PHYS(pg);
 2272
 2273 ci->ci_svs_updir = (pt_entry_t *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
 2274 UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
 2275 if (ci->ci_svs_updir == NULL)
 2276 panic("%s: failed to allocate L4 VA for CPU %d\n",
 2277 __func__, cpu_index(ci));
 2278
 2279 pmap_kenter_pa((vaddr_t)ci->ci_svs_updir, ci->ci_svs_updirpa,
 2280 VM_PROT_READ | VM_PROT_WRITE, 0);
 2281
 2282 pmap_update(pmap_kernel());
 2283
 2284 ci->ci_svs_kpdirpa = pmap_pdirpa(pmap_kernel(), 0);
 2285
 2286 mutex_init(&ci->ci_svs_mtx, MUTEX_DEFAULT, IPL_VM);
 2287}
 2288
 2289void
 2290svs_pmap_sync(struct pmap *pmap, int index)
 2291{
 2292 CPU_INFO_ITERATOR cii;
 2293 struct cpu_info *ci;
 2294 cpuid_t cid;
 2295
 2296 KASSERT(pmap != NULL);
 2297 KASSERT(pmap != pmap_kernel());
 2298 KASSERT(mutex_owned(pmap->pm_lock));
 2299 KASSERT(kpreempt_disabled());
 2300 KASSERT(index <= 255);
 2301
 2302 for (CPU_INFO_FOREACH(cii, ci)) {
 2303 cid = cpu_index(ci);
 2304
 2305 if (!kcpuset_isset(pmap->pm_kernel_cpus, cid)) {
 2306 continue;
 2307 }
 2308
 2309 /* take the lock and check again */
 2310 mutex_enter(&ci->ci_svs_mtx);
 2311 if (kcpuset_isset(pmap->pm_kernel_cpus, cid)) {
 2312 ci->ci_svs_updir[index] = pmap->pm_pdir[index];
 2313 }
 2314 mutex_exit(&ci->ci_svs_mtx);
 2315 }
 2316}
 2317
 2318void
 2319svs_lwp_switch(struct lwp *oldlwp, struct lwp *newlwp)
 2320{
 2321 /* Switch rsp0 */
 2322}
 2323
 2324static inline pt_entry_t
 2325svs_pte_atomic_read(struct pmap *pmap, size_t idx)
 2326{
 2327 /*
 2328 * XXX: We don't have a basic atomic_fetch_64 function?
 2329 */
 2330 return atomic_cas_64(&pmap->pm_pdir[idx], 666, 666);
 2331}
 2332
 2333/*
 2334 * We may come here with the pmap unlocked. So read its PTEs atomically. If
 2335 * a remote CPU is updating them at the same time, it's not that bad: the
 2336 * remote CPU will call svs_pmap_sync afterwards, and our updirpa will be
 2337 * synchronized properly.
 2338 */
 2339void
 2340svs_pdir_switch(struct pmap *pmap)
 2341{
 2342 struct cpu_info *ci = curcpu();
 2343 pt_entry_t pte;
 2344 size_t i;
 2345
 2346 KASSERT(kpreempt_disabled());
 2347 KASSERT(pmap != pmap_kernel());
 2348
 2349 ci->ci_svs_kpdirpa = pmap_pdirpa(pmap, 0);
 2350
 2351 mutex_enter(&ci->ci_svs_mtx);
 2352
 2353 for (i = 0; i < 512; i++) {
 2354 if (i == PDIR_SLOT_PTE) {
 2355 /* We don't want to have this mapped. */
 2356 ci->ci_svs_updir[i] = 0;
 2357 } else {
 2358 pte = svs_pte_atomic_read(pmap, i);
 2359 ci->ci_svs_updir[i] = pte;
 2360 }
 2361 }
 2362
 2363 mutex_exit(&ci->ci_svs_mtx);
 2364}
 2365#endif
 2366

cvs diff -r1.476 -r1.477 src/sys/arch/amd64/conf/GENERIC (expand / switch to unified diff)

--- src/sys/arch/amd64/conf/GENERIC 2017/12/31 03:38:06 1.476
+++ src/sys/arch/amd64/conf/GENERIC 2018/01/07 16:10:16 1.477
@@ -1,38 +1,38 @@ @@ -1,38 +1,38 @@
1# $NetBSD: GENERIC,v 1.476 2017/12/31 03:38:06 christos Exp $ 1# $NetBSD: GENERIC,v 1.477 2018/01/07 16:10:16 maxv Exp $
2# 2#
3# GENERIC machine description file 3# GENERIC machine description file
4# 4#
5# This machine description file is used to generate the default NetBSD 5# This machine description file is used to generate the default NetBSD
6# kernel. The generic kernel does not include all options, subsystems 6# kernel. The generic kernel does not include all options, subsystems
7# and device drivers, but should be useful for most applications. 7# and device drivers, but should be useful for most applications.
8# 8#
9# The machine description file can be customised for your specific 9# The machine description file can be customised for your specific
10# machine to reduce the kernel size and improve its performance. 10# machine to reduce the kernel size and improve its performance.
11# 11#
12# For further information on compiling NetBSD kernels, see the config(8) 12# For further information on compiling NetBSD kernels, see the config(8)
13# man page. 13# man page.
14# 14#
15# For further information on hardware support for this architecture, see 15# For further information on hardware support for this architecture, see
16# the intro(4) man page. For further information about kernel options 16# the intro(4) man page. For further information about kernel options
17# for this architecture, see the options(4) man page. For an explanation 17# for this architecture, see the options(4) man page. For an explanation
18# of each device driver in this file see the section 4 man page for the 18# of each device driver in this file see the section 4 man page for the
19# device. 19# device.
20 20
21include "arch/amd64/conf/std.amd64" 21include "arch/amd64/conf/std.amd64"
22 22
23options INCLUDE_CONFIG_FILE # embed config file in kernel binary 23options INCLUDE_CONFIG_FILE # embed config file in kernel binary
24 24
25#ident "GENERIC-$Revision: 1.476 $" 25#ident "GENERIC-$Revision: 1.477 $"
26 26
27maxusers 64 # estimated number of users 27maxusers 64 # estimated number of users
28 28
29# delay between "rebooting ..." message and hardware reset, in milliseconds 29# delay between "rebooting ..." message and hardware reset, in milliseconds
30#options CPURESET_DELAY=2000 30#options CPURESET_DELAY=2000
31 31
32# This option allows you to force a serial console at the specified 32# This option allows you to force a serial console at the specified
33# I/O address. see console(4) for details. 33# I/O address. see console(4) for details.
34#options CONSDEVNAME="\"com\"",CONADDR=0x2f8,CONSPEED=57600 34#options CONSDEVNAME="\"com\"",CONADDR=0x2f8,CONSPEED=57600
35# you don't want the option below ON iff you are using the 35# you don't want the option below ON iff you are using the
36# serial console option of the new boot strap code. 36# serial console option of the new boot strap code.
37#options CONS_OVERRIDE # Always use above! independent of boot info 37#options CONS_OVERRIDE # Always use above! independent of boot info
38 38
@@ -65,26 +65,27 @@ options CPU_UCODE # cpu ucode loading s @@ -65,26 +65,27 @@ options CPU_UCODE # cpu ucode loading s
65# Note: SysV IPC parameters could be changed dynamically, see sysctl(8). 65# Note: SysV IPC parameters could be changed dynamically, see sysctl(8).
66options SYSVMSG # System V-like message queues 66options SYSVMSG # System V-like message queues
67options SYSVSEM # System V-like semaphores 67options SYSVSEM # System V-like semaphores
68options SYSVSHM # System V-like memory sharing 68options SYSVSHM # System V-like memory sharing
69 69
70options MODULAR # new style module(7) framework 70options MODULAR # new style module(7) framework
71options MODULAR_DEFAULT_AUTOLOAD 71options MODULAR_DEFAULT_AUTOLOAD
72options USERCONF # userconf(4) support 72options USERCONF # userconf(4) support
73#options PIPE_SOCKETPAIR # smaller, but slower pipe(2) 73#options PIPE_SOCKETPAIR # smaller, but slower pipe(2)
74options SYSCTL_INCLUDE_DESCR # Include sysctl descriptions in kernel 74options SYSCTL_INCLUDE_DESCR # Include sysctl descriptions in kernel
75 75
76# CPU-related options 76# CPU-related options
77#options USER_LDT # user-settable LDT; used by WINE 77#options USER_LDT # user-settable LDT; used by WINE
 78options SVS # Separate Virtual Space
78 79
79# CPU features 80# CPU features
80acpicpu* at cpu? # ACPI CPU (including frequency scaling) 81acpicpu* at cpu? # ACPI CPU (including frequency scaling)
81coretemp* at cpu? # Intel on-die thermal sensor 82coretemp* at cpu? # Intel on-die thermal sensor
82est0 at cpu0 # Intel Enhanced SpeedStep (non-ACPI) 83est0 at cpu0 # Intel Enhanced SpeedStep (non-ACPI)
83#odcm0 at cpu0 # On-demand clock modulation 84#odcm0 at cpu0 # On-demand clock modulation
84powernow0 at cpu0 # AMD PowerNow! and Cool'n'Quiet (non-ACPI) 85powernow0 at cpu0 # AMD PowerNow! and Cool'n'Quiet (non-ACPI)
85vmt0 at cpu0 # VMware Tools 86vmt0 at cpu0 # VMware Tools
86 87
87options PMC # performance-monitoring counters support 88options PMC # performance-monitoring counters support
88 89
89# Alternate buffer queue strategies for better responsiveness under high 90# Alternate buffer queue strategies for better responsiveness under high
90# disk I/O load. 91# disk I/O load.

cvs diff -r1.97 -r1.98 src/sys/arch/amd64/conf/files.amd64 (expand / switch to unified diff)

--- src/sys/arch/amd64/conf/files.amd64 2018/01/01 08:14:13 1.97
+++ src/sys/arch/amd64/conf/files.amd64 2018/01/07 16:10:16 1.98
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1# $NetBSD: files.amd64,v 1.97 2018/01/01 08:14:13 maxv Exp $ 1# $NetBSD: files.amd64,v 1.98 2018/01/07 16:10:16 maxv Exp $
2# 2#
3# new style config file for amd64 architecture 3# new style config file for amd64 architecture
4# 4#
5 5
6ifndef xen 6ifndef xen
7 7
8# maxpartitions must be first item in files.${ARCH}.newconf 8# maxpartitions must be first item in files.${ARCH}.newconf
9maxpartitions 16 9maxpartitions 16
10 10
11maxusers 2 16 128 11maxusers 2 16 128
12 12
13# delay before cpu_reset() for reboot. 13# delay before cpu_reset() for reboot.
14defparam CPURESET_DELAY 14defparam CPURESET_DELAY
@@ -17,26 +17,27 @@ defparam CPURESET_DELAY @@ -17,26 +17,27 @@ defparam CPURESET_DELAY
17defparam opt_realmem.h REALBASEMEM REALEXTMEM 17defparam opt_realmem.h REALBASEMEM REALEXTMEM
18 18
19# The PHYSMEM_MAX_{SIZE,ADDR} optionms 19# The PHYSMEM_MAX_{SIZE,ADDR} optionms
20defparam opt_physmem.h PHYSMEM_MAX_ADDR PHYSMEM_MAX_SIZE 20defparam opt_physmem.h PHYSMEM_MAX_ADDR PHYSMEM_MAX_SIZE
21 21
22# 22#
23# XXX these are just here at the moment so that we can share files 23# XXX these are just here at the moment so that we can share files
24# with the i386 (they include the opt_*.h for these) 24# with the i386 (they include the opt_*.h for these)
25# 25#
26 26
27defflag PMC 27defflag PMC
28defflag USER_LDT 28defflag USER_LDT
29defflag KASLR 29defflag KASLR
 30defflag SVS
30defflag eisa.h EISA 31defflag eisa.h EISA
31 32
32# Start code 33# Start code
33file arch/amd64/amd64/locore.S machdep 34file arch/amd64/amd64/locore.S machdep
34file arch/amd64/amd64/vector.S machdep 35file arch/amd64/amd64/vector.S machdep
35file arch/amd64/amd64/copy.S machdep 36file arch/amd64/amd64/copy.S machdep
36file arch/amd64/amd64/spl.S machdep 37file arch/amd64/amd64/spl.S machdep
37 38
38file arch/amd64/amd64/amd64func.S machdep 39file arch/amd64/amd64/amd64func.S machdep
39file arch/amd64/amd64/autoconf.c machdep 40file arch/amd64/amd64/autoconf.c machdep
40file arch/amd64/amd64/busfunc.S machdep 41file arch/amd64/amd64/busfunc.S machdep
41file arch/amd64/amd64/cpu_in_cksum.S (inet | inet6) & cpu_in_cksum 42file arch/amd64/amd64/cpu_in_cksum.S (inet | inet6) & cpu_in_cksum
42file arch/amd64/amd64/cpufunc.S machdep 43file arch/amd64/amd64/cpufunc.S machdep

cvs diff -r1.26 -r1.27 src/sys/arch/amd64/include/frameasm.h (expand / switch to unified diff)

--- src/sys/arch/amd64/include/frameasm.h 2018/01/07 13:43:23 1.26
+++ src/sys/arch/amd64/include/frameasm.h 2018/01/07 16:10:16 1.27
@@ -1,20 +1,21 @@ @@ -1,20 +1,21 @@
1/* $NetBSD: frameasm.h,v 1.26 2018/01/07 13:43:23 maxv Exp $ */ 1/* $NetBSD: frameasm.h,v 1.27 2018/01/07 16:10:16 maxv Exp $ */
2 2
3#ifndef _AMD64_MACHINE_FRAMEASM_H 3#ifndef _AMD64_MACHINE_FRAMEASM_H
4#define _AMD64_MACHINE_FRAMEASM_H 4#define _AMD64_MACHINE_FRAMEASM_H
5 5
6#ifdef _KERNEL_OPT 6#ifdef _KERNEL_OPT
7#include "opt_xen.h" 7#include "opt_xen.h"
 8#include "opt_svs.h"
8#endif 9#endif
9 10
10/* 11/*
11 * Macros to define pushing/popping frames for interrupts, traps 12 * Macros to define pushing/popping frames for interrupts, traps
12 * and system calls. Currently all the same; will diverge later. 13 * and system calls. Currently all the same; will diverge later.
13 */ 14 */
14 15
15#ifdef XEN 16#ifdef XEN
16#define HYPERVISOR_iret hypercall_page + (__HYPERVISOR_iret * 32) 17#define HYPERVISOR_iret hypercall_page + (__HYPERVISOR_iret * 32)
17/* Xen do not need swapgs, done by hypervisor */ 18/* Xen do not need swapgs, done by hypervisor */
18#define swapgs 19#define swapgs
19#define iretq pushq $0 ; jmp HYPERVISOR_iret 20#define iretq pushq $0 ; jmp HYPERVISOR_iret
20#define XEN_ONLY2(x,y) x,y 21#define XEN_ONLY2(x,y) x,y
@@ -85,35 +86,52 @@ @@ -85,35 +86,52 @@
85 movq TF_RCX(%rsp),%rcx ; \ 86 movq TF_RCX(%rsp),%rcx ; \
86 movq TF_R8(%rsp),%r8 ; \ 87 movq TF_R8(%rsp),%r8 ; \
87 movq TF_R9(%rsp),%r9 ; \ 88 movq TF_R9(%rsp),%r9 ; \
88 movq TF_R10(%rsp),%r10 ; \ 89 movq TF_R10(%rsp),%r10 ; \
89 movq TF_R11(%rsp),%r11 ; \ 90 movq TF_R11(%rsp),%r11 ; \
90 movq TF_R12(%rsp),%r12 ; \ 91 movq TF_R12(%rsp),%r12 ; \
91 movq TF_R13(%rsp),%r13 ; \ 92 movq TF_R13(%rsp),%r13 ; \
92 movq TF_R14(%rsp),%r14 ; \ 93 movq TF_R14(%rsp),%r14 ; \
93 movq TF_R15(%rsp),%r15 ; \ 94 movq TF_R15(%rsp),%r15 ; \
94 movq TF_RBP(%rsp),%rbp ; \ 95 movq TF_RBP(%rsp),%rbp ; \
95 movq TF_RBX(%rsp),%rbx ; \ 96 movq TF_RBX(%rsp),%rbx ; \
96 movq TF_RAX(%rsp),%rax 97 movq TF_RAX(%rsp),%rax
97 98
 99#ifdef SVS
 100#define SVS_ENTER \
 101 pushq %rax ; \
 102 movq CPUVAR(KPDIRPA),%rax ; \
 103 movq %rax,%cr3 ; \
 104 popq %rax
 105#define SVS_LEAVE \
 106 pushq %rax ; \
 107 movq CPUVAR(UPDIRPA),%rax ; \
 108 movq %rax,%cr3 ; \
 109 popq %rax
 110#else
 111#define SVS_ENTER /* nothing */
 112#define SVS_LEAVE /* nothing */
 113#endif
 114
98#define INTRENTRY_L(kernel_trap, usertrap) \ 115#define INTRENTRY_L(kernel_trap, usertrap) \
99 subq $TF_REGSIZE,%rsp ; \ 116 subq $TF_REGSIZE,%rsp ; \
100 INTR_SAVE_GPRS ; \ 117 INTR_SAVE_GPRS ; \
101 cld ; \ 118 cld ; \
102 SMAP_ENABLE ; \ 119 SMAP_ENABLE ; \
103 testb $SEL_UPL,TF_CS(%rsp) ; \ 120 testb $SEL_UPL,TF_CS(%rsp) ; \
104 je kernel_trap ; \ 121 je kernel_trap ; \
105usertrap ; \ 122usertrap ; \
106 SWAPGS ; \ 123 SWAPGS ; \
 124 SVS_ENTER ; \
107 movw %gs,TF_GS(%rsp) ; \ 125 movw %gs,TF_GS(%rsp) ; \
108 movw %fs,TF_FS(%rsp) ; \ 126 movw %fs,TF_FS(%rsp) ; \
109 movw %es,TF_ES(%rsp) ; \ 127 movw %es,TF_ES(%rsp) ; \
110 movw %ds,TF_DS(%rsp)  128 movw %ds,TF_DS(%rsp)
111 129
112#define INTRENTRY \ 130#define INTRENTRY \
113 INTRENTRY_L(98f,) ; \ 131 INTRENTRY_L(98f,) ; \
11498: 13298:
115 133
116#define INTRFASTEXIT \ 134#define INTRFASTEXIT \
117 jmp intrfastexit 135 jmp intrfastexit
118 136
119#define INTR_RECURSE_HWFRAME \ 137#define INTR_RECURSE_HWFRAME \

cvs diff -r1.40 -r1.41 src/sys/arch/amd64/include/pmap.h (expand / switch to unified diff)

--- src/sys/arch/amd64/include/pmap.h 2017/06/17 08:40:46 1.40
+++ src/sys/arch/amd64/include/pmap.h 2018/01/07 16:10:16 1.41
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.h,v 1.40 2017/06/17 08:40:46 maxv Exp $ */ 1/* $NetBSD: pmap.h,v 1.41 2018/01/07 16:10:16 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -208,26 +208,30 @@ @@ -208,26 +208,30 @@
208 208
209#define PG_W PG_AVAIL1 /* "wired" mapping */ 209#define PG_W PG_AVAIL1 /* "wired" mapping */
210#define PG_PVLIST PG_AVAIL2 /* mapping has entry on pvlist */ 210#define PG_PVLIST PG_AVAIL2 /* mapping has entry on pvlist */
211/* PG_AVAIL3 not used */ 211/* PG_AVAIL3 not used */
212 212
213#define PG_X 0 /* XXX dummy */ 213#define PG_X 0 /* XXX dummy */
214 214
215/* 215/*
216 * Number of PTE's per cache line. 8 byte pte, 64-byte cache line 216 * Number of PTE's per cache line. 8 byte pte, 64-byte cache line
217 * Used to avoid false sharing of cache lines. 217 * Used to avoid false sharing of cache lines.
218 */ 218 */
219#define NPTECL 8 219#define NPTECL 8
220 220
 221void svs_pmap_sync(struct pmap *, int);
 222void svs_lwp_switch(struct lwp *, struct lwp *);
 223void svs_pdir_switch(struct pmap *);
 224
221#include <x86/pmap.h> 225#include <x86/pmap.h>
222 226
223#ifndef XEN 227#ifndef XEN
224#define pmap_pa2pte(a) (a) 228#define pmap_pa2pte(a) (a)
225#define pmap_pte2pa(a) ((a) & PG_FRAME) 229#define pmap_pte2pa(a) ((a) & PG_FRAME)
226#define pmap_pte_set(p, n) do { *(p) = (n); } while (0) 230#define pmap_pte_set(p, n) do { *(p) = (n); } while (0)
227#define pmap_pte_cas(p, o, n) atomic_cas_64((p), (o), (n)) 231#define pmap_pte_cas(p, o, n) atomic_cas_64((p), (o), (n))
228#define pmap_pte_testset(p, n) \ 232#define pmap_pte_testset(p, n) \
229 atomic_swap_ulong((volatile unsigned long *)p, n) 233 atomic_swap_ulong((volatile unsigned long *)p, n)
230#define pmap_pte_setbits(p, b) \ 234#define pmap_pte_setbits(p, b) \
231 atomic_or_ulong((volatile unsigned long *)p, b) 235 atomic_or_ulong((volatile unsigned long *)p, b)
232#define pmap_pte_clearbits(p, b) \ 236#define pmap_pte_clearbits(p, b) \
233 atomic_and_ulong((volatile unsigned long *)p, ~(b)) 237 atomic_and_ulong((volatile unsigned long *)p, ~(b))

cvs diff -r1.388 -r1.389 src/sys/arch/i386/conf/files.i386 (expand / switch to unified diff)

--- src/sys/arch/i386/conf/files.i386 2017/10/08 09:06:50 1.388
+++ src/sys/arch/i386/conf/files.i386 2018/01/07 16:10:16 1.389
@@ -1,35 +1,36 @@ @@ -1,35 +1,36 @@
1# $NetBSD: files.i386,v 1.388 2017/10/08 09:06:50 maxv Exp $ 1# $NetBSD: files.i386,v 1.389 2018/01/07 16:10:16 maxv Exp $
2# 2#
3# new style config file for i386 architecture 3# new style config file for i386 architecture
4# 4#
5 5
6ifndef xen 6ifndef xen
7 7
8# maxpartitions must be first item in files.${ARCH}.newconf 8# maxpartitions must be first item in files.${ARCH}.newconf
9maxpartitions 8 9maxpartitions 8
10 10
11maxusers 2 16 128 11maxusers 2 16 128
12 12
13defparam opt_kernbase.h KERNBASE 13defparam opt_kernbase.h KERNBASE
14 14
15# delay before cpu_reset() for reboot. 15# delay before cpu_reset() for reboot.
16defparam CPURESET_DELAY 16defparam CPURESET_DELAY
17 17
18# Obsolete Xbox support 18# Obsolete Xbox support
19obsolete defflag XBOX 19obsolete defflag XBOX
20 20
21defflag PMC 21defflag PMC
22defflag KASLR 22defflag KASLR
 23defflag SVS
23 24
24# User-settable LDT (used by WINE) 25# User-settable LDT (used by WINE)
25defflag USER_LDT 26defflag USER_LDT
26 27
27# X server support in console drivers 28# X server support in console drivers
28defflag opt_xserver.h XSERVER XSERVER_DDB 29defflag opt_xserver.h XSERVER XSERVER_DDB
29 30
30# The REAL{BASE,EXT}MEM options 31# The REAL{BASE,EXT}MEM options
31defparam opt_realmem.h REALBASEMEM REALEXTMEM 32defparam opt_realmem.h REALBASEMEM REALEXTMEM
32 33
33# The PHYSMEM_MAX_{SIZE,ADDR} optionms 34# The PHYSMEM_MAX_{SIZE,ADDR} optionms
34defparam opt_physmem.h PHYSMEM_MAX_ADDR PHYSMEM_MAX_SIZE 35defparam opt_physmem.h PHYSMEM_MAX_ADDR PHYSMEM_MAX_SIZE
35 36

cvs diff -r1.87 -r1.88 src/sys/arch/x86/include/cpu.h (expand / switch to unified diff)

--- src/sys/arch/x86/include/cpu.h 2018/01/05 08:04:21 1.87
+++ src/sys/arch/x86/include/cpu.h 2018/01/07 16:10:16 1.88
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpu.h,v 1.87 2018/01/05 08:04:21 maxv Exp $ */ 1/* $NetBSD: cpu.h,v 1.88 2018/01/07 16:10:16 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1990 The Regents of the University of California. 4 * Copyright (c) 1990 The Regents of the University of California.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to Berkeley by 7 * This code is derived from software contributed to Berkeley by
8 * William Jolitz. 8 * William Jolitz.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -37,26 +37,27 @@ @@ -37,26 +37,27 @@
37#ifndef _X86_CPU_H_ 37#ifndef _X86_CPU_H_
38#define _X86_CPU_H_ 38#define _X86_CPU_H_
39 39
40#if defined(_KERNEL) || defined(_STANDALONE) 40#if defined(_KERNEL) || defined(_STANDALONE)
41#include <sys/types.h> 41#include <sys/types.h>
42#else 42#else
43#include <stdint.h> 43#include <stdint.h>
44#include <stdbool.h> 44#include <stdbool.h>
45#endif /* _KERNEL || _STANDALONE */ 45#endif /* _KERNEL || _STANDALONE */
46 46
47#if defined(_KERNEL) || defined(_KMEMUSER) 47#if defined(_KERNEL) || defined(_KMEMUSER)
48#if defined(_KERNEL_OPT) 48#if defined(_KERNEL_OPT)
49#include "opt_xen.h" 49#include "opt_xen.h"
 50#include "opt_svs.h"
50#ifdef i386 51#ifdef i386
51#include "opt_user_ldt.h" 52#include "opt_user_ldt.h"
52#endif 53#endif
53#endif 54#endif
54 55
55/* 56/*
56 * Definitions unique to x86 cpu support. 57 * Definitions unique to x86 cpu support.
57 */ 58 */
58#include <machine/frame.h> 59#include <machine/frame.h>
59#include <machine/pte.h> 60#include <machine/pte.h>
60#include <machine/segments.h> 61#include <machine/segments.h>
61#include <machine/tss.h> 62#include <machine/tss.h>
62#include <machine/intrdefs.h> 63#include <machine/intrdefs.h>
@@ -177,26 +178,33 @@ struct cpu_info { @@ -177,26 +178,33 @@ struct cpu_info {
177 */ 178 */
178  179
179 const struct cpu_functions *ci_func; /* start/stop functions */ 180 const struct cpu_functions *ci_func; /* start/stop functions */
180 struct trapframe *ci_ddb_regs; 181 struct trapframe *ci_ddb_regs;
181 182
182 u_int ci_cflush_lsize; /* CLFLUSH insn line size */ 183 u_int ci_cflush_lsize; /* CLFLUSH insn line size */
183 struct x86_cache_info ci_cinfo[CAI_COUNT]; 184 struct x86_cache_info ci_cinfo[CAI_COUNT];
184 185
185#ifdef PAE 186#ifdef PAE
186 uint32_t ci_pae_l3_pdirpa; /* PA of L3 PD */ 187 uint32_t ci_pae_l3_pdirpa; /* PA of L3 PD */
187 pd_entry_t * ci_pae_l3_pdir; /* VA pointer to L3 PD */ 188 pd_entry_t * ci_pae_l3_pdir; /* VA pointer to L3 PD */
188#endif 189#endif
189 190
 191#ifdef SVS
 192 pd_entry_t * ci_svs_updir;
 193 paddr_t ci_svs_updirpa;
 194 paddr_t ci_svs_kpdirpa;
 195 kmutex_t ci_svs_mtx;
 196#endif
 197
190#if defined(XEN) && (defined(PAE) || defined(__x86_64__)) 198#if defined(XEN) && (defined(PAE) || defined(__x86_64__))
191 /* Currently active user PGD (can't use rcr3() with Xen) */ 199 /* Currently active user PGD (can't use rcr3() with Xen) */
192 pd_entry_t * ci_kpm_pdir; /* per-cpu PMD (va) */ 200 pd_entry_t * ci_kpm_pdir; /* per-cpu PMD (va) */
193 paddr_t ci_kpm_pdirpa; /* per-cpu PMD (pa) */ 201 paddr_t ci_kpm_pdirpa; /* per-cpu PMD (pa) */
194 kmutex_t ci_kpm_mtx; 202 kmutex_t ci_kpm_mtx;
195#if defined(__x86_64__) 203#if defined(__x86_64__)
196 /* per-cpu version of normal_pdes */ 204 /* per-cpu version of normal_pdes */
197 pd_entry_t * ci_normal_pdes[3]; /* Ok to hardcode. only for x86_64 && XEN */ 205 pd_entry_t * ci_normal_pdes[3]; /* Ok to hardcode. only for x86_64 && XEN */
198 paddr_t ci_xen_current_user_pgd; 206 paddr_t ci_xen_current_user_pgd;
199#endif /* __x86_64__ */ 207#endif /* __x86_64__ */
200#endif /* XEN et.al */ 208#endif /* XEN et.al */
201 209
202#ifdef XEN 210#ifdef XEN
@@ -323,26 +331,27 @@ lwp_t *x86_curlwp(void); @@ -323,26 +331,27 @@ lwp_t *x86_curlwp(void);
323#define X86_AST_PREEMPT 0x02 331#define X86_AST_PREEMPT 0x02
324 332
325#define aston(l, why) ((l)->l_md.md_astpending |= (why)) 333#define aston(l, why) ((l)->l_md.md_astpending |= (why))
326#define cpu_did_resched(l) ((l)->l_md.md_astpending &= ~X86_AST_PREEMPT) 334#define cpu_did_resched(l) ((l)->l_md.md_astpending &= ~X86_AST_PREEMPT)
327 335
328void cpu_boot_secondary_processors(void); 336void cpu_boot_secondary_processors(void);
329void cpu_init_idle_lwps(void); 337void cpu_init_idle_lwps(void);
330void cpu_init_msrs(struct cpu_info *, bool); 338void cpu_init_msrs(struct cpu_info *, bool);
331void cpu_load_pmap(struct pmap *, struct pmap *); 339void cpu_load_pmap(struct pmap *, struct pmap *);
332void cpu_broadcast_halt(void); 340void cpu_broadcast_halt(void);
333void cpu_kick(struct cpu_info *); 341void cpu_kick(struct cpu_info *);
334 342
335void cpu_pcpuarea_init(struct cpu_info *); 343void cpu_pcpuarea_init(struct cpu_info *);
 344void cpu_svs_init(struct cpu_info *);
336 345
337#define curcpu() x86_curcpu() 346#define curcpu() x86_curcpu()
338#define curlwp x86_curlwp() 347#define curlwp x86_curlwp()
339#define curpcb ((struct pcb *)lwp_getpcb(curlwp)) 348#define curpcb ((struct pcb *)lwp_getpcb(curlwp))
340 349
341/* 350/*
342 * Arguments to hardclock, softclock and statclock 351 * Arguments to hardclock, softclock and statclock
343 * encapsulate the previous machine state in an opaque 352 * encapsulate the previous machine state in an opaque
344 * clockframe; for now, use generic intrframe. 353 * clockframe; for now, use generic intrframe.
345 */ 354 */
346struct clockframe { 355struct clockframe {
347 struct intrframe cf_if; 356 struct intrframe cf_if;
348}; 357};

cvs diff -r1.143 -r1.144 src/sys/arch/x86/x86/cpu.c (expand / switch to unified diff)

--- src/sys/arch/x86/x86/cpu.c 2018/01/07 10:16:13 1.143
+++ src/sys/arch/x86/x86/cpu.c 2018/01/07 16:10:16 1.144
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpu.c,v 1.143 2018/01/07 10:16:13 maxv Exp $ */ 1/* $NetBSD: cpu.c,v 1.144 2018/01/07 16:10:16 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2000-2012 NetBSD Foundation, Inc. 4 * Copyright (c) 2000-2012 NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Bill Sommerfeld of RedBack Networks Inc, and by Andrew Doran. 8 * by Bill Sommerfeld of RedBack Networks Inc, and by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -52,32 +52,33 @@ @@ -52,32 +52,33 @@
52 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE 54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE. 61 * SUCH DAMAGE.
62 */ 62 */
63 63
64#include <sys/cdefs.h> 64#include <sys/cdefs.h>
65__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.143 2018/01/07 10:16:13 maxv Exp $"); 65__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.144 2018/01/07 16:10:16 maxv Exp $");
66 66
67#include "opt_ddb.h" 67#include "opt_ddb.h"
68#include "opt_mpbios.h" /* for MPDEBUG */ 68#include "opt_mpbios.h" /* for MPDEBUG */
69#include "opt_mtrr.h" 69#include "opt_mtrr.h"
70#include "opt_multiprocessor.h" 70#include "opt_multiprocessor.h"
 71#include "opt_svs.h"
71 72
72#include "lapic.h" 73#include "lapic.h"
73#include "ioapic.h" 74#include "ioapic.h"
74 75
75#include <sys/param.h> 76#include <sys/param.h>
76#include <sys/proc.h> 77#include <sys/proc.h>
77#include <sys/systm.h> 78#include <sys/systm.h>
78#include <sys/device.h> 79#include <sys/device.h>
79#include <sys/cpu.h> 80#include <sys/cpu.h>
80#include <sys/cpufreq.h> 81#include <sys/cpufreq.h>
81#include <sys/idle.h> 82#include <sys/idle.h>
82#include <sys/atomic.h> 83#include <sys/atomic.h>
83#include <sys/reboot.h> 84#include <sys/reboot.h>
@@ -369,26 +370,30 @@ cpu_attach(device_t parent, device_t sel @@ -369,26 +370,30 @@ cpu_attach(device_t parent, device_t sel
369 } 370 }
370 371
371 ci->ci_self = ci; 372 ci->ci_self = ci;
372 sc->sc_info = ci; 373 sc->sc_info = ci;
373 ci->ci_dev = self; 374 ci->ci_dev = self;
374 ci->ci_acpiid = caa->cpu_id; 375 ci->ci_acpiid = caa->cpu_id;
375 ci->ci_cpuid = caa->cpu_number; 376 ci->ci_cpuid = caa->cpu_number;
376 ci->ci_func = caa->cpu_func; 377 ci->ci_func = caa->cpu_func;
377 aprint_normal("\n"); 378 aprint_normal("\n");
378 379
379 /* Must be before mi_cpu_attach(). */ 380 /* Must be before mi_cpu_attach(). */
380 cpu_vm_init(ci); 381 cpu_vm_init(ci);
381 382
 383#ifdef SVS
 384 cpu_svs_init(ci);
 385#endif
 386
382 if (caa->cpu_role == CPU_ROLE_AP) { 387 if (caa->cpu_role == CPU_ROLE_AP) {
383 int error; 388 int error;
384 389
385 error = mi_cpu_attach(ci); 390 error = mi_cpu_attach(ci);
386 if (error != 0) { 391 if (error != 0) {
387 aprint_error_dev(self, 392 aprint_error_dev(self,
388 "mi_cpu_attach failed with %d\n", error); 393 "mi_cpu_attach failed with %d\n", error);
389 return; 394 return;
390 } 395 }
391#ifdef __HAVE_PCPU_AREA 396#ifdef __HAVE_PCPU_AREA
392 cpu_pcpuarea_init(ci); 397 cpu_pcpuarea_init(ci);
393#endif 398#endif
394 cpu_init_tss(ci); 399 cpu_init_tss(ci);
@@ -1238,26 +1243,30 @@ x86_cpu_idle_halt(void) @@ -1238,26 +1243,30 @@ x86_cpu_idle_halt(void)
1238 if (!__predict_false(ci->ci_want_resched)) { 1243 if (!__predict_false(ci->ci_want_resched)) {
1239 x86_stihlt(); 1244 x86_stihlt();
1240 } else { 1245 } else {
1241 x86_enable_intr(); 1246 x86_enable_intr();
1242 } 1247 }
1243} 1248}
1244 1249
1245/* 1250/*
1246 * Loads pmap for the current CPU. 1251 * Loads pmap for the current CPU.
1247 */ 1252 */
1248void 1253void
1249cpu_load_pmap(struct pmap *pmap, struct pmap *oldpmap) 1254cpu_load_pmap(struct pmap *pmap, struct pmap *oldpmap)
1250{ 1255{
 1256#ifdef SVS
 1257 svs_pdir_switch(pmap);
 1258#endif
 1259
1251#ifdef PAE 1260#ifdef PAE
1252 struct cpu_info *ci = curcpu(); 1261 struct cpu_info *ci = curcpu();
1253 bool interrupts_enabled; 1262 bool interrupts_enabled;
1254 pd_entry_t *l3_pd = ci->ci_pae_l3_pdir; 1263 pd_entry_t *l3_pd = ci->ci_pae_l3_pdir;
1255 int i; 1264 int i;
1256 1265
1257 /* 1266 /*
1258 * disable interrupts to block TLB shootdowns, which can reload cr3. 1267 * disable interrupts to block TLB shootdowns, which can reload cr3.
1259 * while this doesn't block NMIs, it's probably ok as NMIs unlikely 1268 * while this doesn't block NMIs, it's probably ok as NMIs unlikely
1260 * reload cr3. 1269 * reload cr3.
1261 */ 1270 */
1262 interrupts_enabled = (x86_read_flags() & PSL_I) != 0; 1271 interrupts_enabled = (x86_read_flags() & PSL_I) != 0;
1263 if (interrupts_enabled) 1272 if (interrupts_enabled)

cvs diff -r1.277 -r1.278 src/sys/arch/x86/x86/pmap.c (expand / switch to unified diff)

--- src/sys/arch/x86/x86/pmap.c 2018/01/05 09:13:48 1.277
+++ src/sys/arch/x86/x86/pmap.c 2018/01/07 16:10:16 1.278
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.277 2018/01/05 09:13:48 martin Exp $ */ 1/* $NetBSD: pmap.c,v 1.278 2018/01/07 16:10:16 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2008, 2010, 2016, 2017 The NetBSD Foundation, Inc. 4 * Copyright (c) 2008, 2010, 2016, 2017 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, and by Maxime Villard. 8 * by Andrew Doran, and by Maxime Villard.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -160,32 +160,33 @@ @@ -160,32 +160,33 @@
160 * Hibler/Jolitz pmap, as modified for FreeBSD by John S. Dyson 160 * Hibler/Jolitz pmap, as modified for FreeBSD by John S. Dyson
161 * and David Greenman. 161 * and David Greenman.
162 * 162 *
163 * [3] the Mach pmap. this pmap, from CMU, seems to have migrated 163 * [3] the Mach pmap. this pmap, from CMU, seems to have migrated
164 * between several processors. the VAX version was done by 164 * between several processors. the VAX version was done by
165 * Avadis Tevanian, Jr., and Michael Wayne Young. the i386 165 * Avadis Tevanian, Jr., and Michael Wayne Young. the i386
166 * version was done by Lance Berc, Mike Kupfer, Bob Baron, 166 * version was done by Lance Berc, Mike Kupfer, Bob Baron,
167 * David Golub, and Richard Draves. the alpha version was 167 * David Golub, and Richard Draves. the alpha version was
168 * done by Alessandro Forin (CMU/Mach) and Chris Demetriou 168 * done by Alessandro Forin (CMU/Mach) and Chris Demetriou
169 * (NetBSD/alpha). 169 * (NetBSD/alpha).
170 */ 170 */
171 171
172#include <sys/cdefs.h> 172#include <sys/cdefs.h>
173__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.277 2018/01/05 09:13:48 martin Exp $"); 173__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.278 2018/01/07 16:10:16 maxv Exp $");
174 174
175#include "opt_user_ldt.h" 175#include "opt_user_ldt.h"
176#include "opt_lockdebug.h" 176#include "opt_lockdebug.h"
177#include "opt_multiprocessor.h" 177#include "opt_multiprocessor.h"
178#include "opt_xen.h" 178#include "opt_xen.h"
 179#include "opt_svs.h"
179 180
180#include <sys/param.h> 181#include <sys/param.h>
181#include <sys/systm.h> 182#include <sys/systm.h>
182#include <sys/proc.h> 183#include <sys/proc.h>
183#include <sys/pool.h> 184#include <sys/pool.h>
184#include <sys/kernel.h> 185#include <sys/kernel.h>
185#include <sys/atomic.h> 186#include <sys/atomic.h>
186#include <sys/cpu.h> 187#include <sys/cpu.h>
187#include <sys/intr.h> 188#include <sys/intr.h>
188#include <sys/xcall.h> 189#include <sys/xcall.h>
189#include <sys/kcore.h> 190#include <sys/kcore.h>
190 191
191#include <uvm/uvm.h> 192#include <uvm/uvm.h>
@@ -548,29 +549,31 @@ extern int end; @@ -548,29 +549,31 @@ extern int end;
548extern vaddr_t pentium_idt_vaddr; 549extern vaddr_t pentium_idt_vaddr;
549#endif 550#endif
550 551
551/* 552/*
552 * Local prototypes 553 * Local prototypes
553 */ 554 */
554 555
555#ifdef __HAVE_PCPU_AREA 556#ifdef __HAVE_PCPU_AREA
556static void pmap_init_pcpu(void); 557static void pmap_init_pcpu(void);
557#endif 558#endif
558#ifdef __HAVE_DIRECT_MAP 559#ifdef __HAVE_DIRECT_MAP
559static void pmap_init_directmap(struct pmap *); 560static void pmap_init_directmap(struct pmap *);
560#endif 561#endif
 562#if !defined(XEN) && !defined(SVS)
 563static void pmap_remap_global(void);
 564#endif
561#ifndef XEN 565#ifndef XEN
562static void pmap_init_lapic(void); 566static void pmap_init_lapic(void);
563static void pmap_remap_global(void); 
564static void pmap_remap_largepages(void); 567static void pmap_remap_largepages(void);
565#endif 568#endif
566 569
567static struct vm_page *pmap_get_ptp(struct pmap *, vaddr_t, 570static struct vm_page *pmap_get_ptp(struct pmap *, vaddr_t,
568 pd_entry_t * const *, int); 571 pd_entry_t * const *, int);
569static struct vm_page *pmap_find_ptp(struct pmap *, vaddr_t, paddr_t, int); 572static struct vm_page *pmap_find_ptp(struct pmap *, vaddr_t, paddr_t, int);
570static void pmap_freepage(struct pmap *, struct vm_page *, int); 573static void pmap_freepage(struct pmap *, struct vm_page *, int);
571static void pmap_free_ptp(struct pmap *, struct vm_page *, vaddr_t, 574static void pmap_free_ptp(struct pmap *, struct vm_page *, vaddr_t,
572 pt_entry_t *, pd_entry_t * const *); 575 pt_entry_t *, pd_entry_t * const *);
573static bool pmap_remove_pte(struct pmap *, struct vm_page *, pt_entry_t *, 576static bool pmap_remove_pte(struct pmap *, struct vm_page *, pt_entry_t *,
574 vaddr_t, struct pv_entry **); 577 vaddr_t, struct pv_entry **);
575static void pmap_remove_ptes(struct pmap *, struct vm_page *, vaddr_t, vaddr_t, 578static void pmap_remove_ptes(struct pmap *, struct vm_page *, vaddr_t, vaddr_t,
576 vaddr_t, struct pv_entry **); 579 vaddr_t, struct pv_entry **);
@@ -1276,40 +1279,42 @@ pmap_bootstrap(vaddr_t kva_start) @@ -1276,40 +1279,42 @@ pmap_bootstrap(vaddr_t kva_start)
1276 1279
1277 kcpuset_create(&kpm->pm_cpus, true); 1280 kcpuset_create(&kpm->pm_cpus, true);
1278 kcpuset_create(&kpm->pm_kernel_cpus, true); 1281 kcpuset_create(&kpm->pm_kernel_cpus, true);
1279 1282
1280 kpm->pm_ldt = NULL; 1283 kpm->pm_ldt = NULL;
1281 kpm->pm_ldt_len = 0; 1284 kpm->pm_ldt_len = 0;
1282 kpm->pm_ldt_sel = GSYSSEL(GLDT_SEL, SEL_KPL); 1285 kpm->pm_ldt_sel = GSYSSEL(GLDT_SEL, SEL_KPL);
1283 1286
1284 /* 1287 /*
1285 * the above is just a rough estimate and not critical to the proper 1288 * the above is just a rough estimate and not critical to the proper
1286 * operation of the system. 1289 * operation of the system.
1287 */ 1290 */
1288 1291
1289#ifndef XEN 1292#if !defined(XEN) && !defined(SVS)
1290 /* 1293 /*
1291 * Begin to enable global TLB entries if they are supported. 1294 * Begin to enable global TLB entries if they are supported.
1292 * The G bit has no effect until the CR4_PGE bit is set in CR4, 1295 * The G bit has no effect until the CR4_PGE bit is set in CR4,
1293 * which happens in cpu_init(), which is run on each cpu 1296 * which happens in cpu_init(), which is run on each cpu
1294 * (and happens later) 1297 * (and happens later)
1295 */ 1298 */
1296 if (cpu_feature[0] & CPUID_PGE) { 1299 if (cpu_feature[0] & CPUID_PGE) {
1297 pmap_pg_g = PG_G; /* enable software */ 1300 pmap_pg_g = PG_G; /* enable software */
1298 1301
1299 /* add PG_G attribute to already mapped kernel pages */ 1302 /* add PG_G attribute to already mapped kernel pages */
1300 pmap_remap_global(); 1303 pmap_remap_global();
1301 } 1304 }
 1305#endif
1302 1306
 1307#ifndef XEN
1303 /* 1308 /*
1304 * Enable large pages if they are supported. 1309 * Enable large pages if they are supported.
1305 */ 1310 */
1306 if (cpu_feature[0] & CPUID_PSE) { 1311 if (cpu_feature[0] & CPUID_PSE) {
1307 lcr4(rcr4() | CR4_PSE); /* enable hardware (via %cr4) */ 1312 lcr4(rcr4() | CR4_PSE); /* enable hardware (via %cr4) */
1308 pmap_largepages = 1; /* enable software */ 1313 pmap_largepages = 1; /* enable software */
1309 1314
1310 /* 1315 /*
1311 * The TLB must be flushed after enabling large pages on Pentium 1316 * The TLB must be flushed after enabling large pages on Pentium
1312 * CPUs, according to section 3.6.2.2 of "Intel Architecture 1317 * CPUs, according to section 3.6.2.2 of "Intel Architecture
1313 * Software Developer's Manual, Volume 3: System Programming". 1318 * Software Developer's Manual, Volume 3: System Programming".
1314 */ 1319 */
1315 tlbflushg(); 1320 tlbflushg();
@@ -1638,27 +1643,27 @@ pmap_init_directmap(struct pmap *kpm) @@ -1638,27 +1643,27 @@ pmap_init_directmap(struct pmap *kpm)
1638 1643
1639 *pte = 0; 1644 *pte = 0;
1640 pmap_update_pg(tmpva); 1645 pmap_update_pg(tmpva);
1641 1646
1642 pmap_direct_base = startva; 1647 pmap_direct_base = startva;
1643 pmap_direct_end = endva; 1648 pmap_direct_end = endva;
1644 pmap_direct_pdpe = L4e_idx; 1649 pmap_direct_pdpe = L4e_idx;
1645 pmap_direct_npdp = nL4e; 1650 pmap_direct_npdp = nL4e;
1646 1651
1647 tlbflush(); 1652 tlbflush();
1648} 1653}
1649#endif /* __HAVE_DIRECT_MAP */ 1654#endif /* __HAVE_DIRECT_MAP */
1650 1655
1651#ifndef XEN 1656#if !defined(XEN) && !defined(SVS)
1652/* 1657/*
1653 * Remap all of the virtual pages created so far with the PG_G bit. 1658 * Remap all of the virtual pages created so far with the PG_G bit.
1654 */ 1659 */
1655static void 1660static void
1656pmap_remap_global(void) 1661pmap_remap_global(void)
1657{ 1662{
1658 vaddr_t kva, kva_end; 1663 vaddr_t kva, kva_end;
1659 unsigned long p1i; 1664 unsigned long p1i;
1660 size_t i; 1665 size_t i;
1661 1666
1662 /* head */ 1667 /* head */
1663 kva = bootspace.head.va; 1668 kva = bootspace.head.va;
1664 kva_end = kva + bootspace.head.sz; 1669 kva_end = kva + bootspace.head.sz;
@@ -1681,27 +1686,29 @@ pmap_remap_global(void) @@ -1681,27 +1686,29 @@ pmap_remap_global(void)
1681 PTE_BASE[p1i] |= PG_G; 1686 PTE_BASE[p1i] |= PG_G;
1682 } 1687 }
1683 } 1688 }
1684 1689
1685 /* boot space */ 1690 /* boot space */
1686 kva = bootspace.boot.va; 1691 kva = bootspace.boot.va;
1687 kva_end = kva + bootspace.boot.sz; 1692 kva_end = kva + bootspace.boot.sz;
1688 for ( ; kva < kva_end; kva += PAGE_SIZE) { 1693 for ( ; kva < kva_end; kva += PAGE_SIZE) {
1689 p1i = pl1_i(kva); 1694 p1i = pl1_i(kva);
1690 if (pmap_valid_entry(PTE_BASE[p1i])) 1695 if (pmap_valid_entry(PTE_BASE[p1i]))
1691 PTE_BASE[p1i] |= PG_G; 1696 PTE_BASE[p1i] |= PG_G;
1692 } 1697 }
1693} 1698}
 1699#endif
1694 1700
 1701#ifndef XEN
1695/* 1702/*
1696 * Remap several kernel segments with large pages. We cover as many pages as we 1703 * Remap several kernel segments with large pages. We cover as many pages as we
1697 * can. Called only once at boot time, if the CPU supports large pages. 1704 * can. Called only once at boot time, if the CPU supports large pages.
1698 */ 1705 */
1699static void 1706static void
1700pmap_remap_largepages(void) 1707pmap_remap_largepages(void)
1701{ 1708{
1702 pd_entry_t *pde; 1709 pd_entry_t *pde;
1703 vaddr_t kva, kva_end; 1710 vaddr_t kva, kva_end;
1704 paddr_t pa; 1711 paddr_t pa;
1705 size_t i; 1712 size_t i;
1706 1713
1707 /* Remap the kernel text using large pages. */ 1714 /* Remap the kernel text using large pages. */
@@ -2103,33 +2110,37 @@ pmap_free_ptp(struct pmap *pmap, struct  @@ -2103,33 +2110,37 @@ pmap_free_ptp(struct pmap *pmap, struct
2103 vaddr_t invaladdr; 2110 vaddr_t invaladdr;
2104 pd_entry_t opde; 2111 pd_entry_t opde;
2105 2112
2106 KASSERT(pmap != pmap_kernel()); 2113 KASSERT(pmap != pmap_kernel());
2107 KASSERT(mutex_owned(pmap->pm_lock)); 2114 KASSERT(mutex_owned(pmap->pm_lock));
2108 KASSERT(kpreempt_disabled()); 2115 KASSERT(kpreempt_disabled());
2109 2116
2110 level = 1; 2117 level = 1;
2111 do { 2118 do {
2112 index = pl_i(va, level + 1); 2119 index = pl_i(va, level + 1);
2113 opde = pmap_pte_testset(&pdes[level - 1][index], 0); 2120 opde = pmap_pte_testset(&pdes[level - 1][index], 0);
2114 2121
2115 /* 2122 /*
2116 * On Xen-amd64, we need to sync the top level page 2123 * On Xen-amd64 or SVS, we need to sync the top level page
2117 * directory on each CPU. 2124 * directory on each CPU.
2118 */ 2125 */
2119#if defined(XEN) && defined(__x86_64__) 2126#if defined(XEN) && defined(__x86_64__)
2120 if (level == PTP_LEVELS - 1) { 2127 if (level == PTP_LEVELS - 1) {
2121 xen_kpm_sync(pmap, index); 2128 xen_kpm_sync(pmap, index);
2122 } 2129 }
 2130#elif defined(SVS)
 2131 if (level == PTP_LEVELS - 1) {
 2132 svs_pmap_sync(pmap, index);
 2133 }
2123#endif 2134#endif
2124 2135
2125 invaladdr = level == 1 ? (vaddr_t)ptes : 2136 invaladdr = level == 1 ? (vaddr_t)ptes :
2126 (vaddr_t)pdes[level - 2]; 2137 (vaddr_t)pdes[level - 2];
2127 pmap_tlb_shootdown(pmap, invaladdr + index * PAGE_SIZE, 2138 pmap_tlb_shootdown(pmap, invaladdr + index * PAGE_SIZE,
2128 opde, TLBSHOOT_FREE_PTP1); 2139 opde, TLBSHOOT_FREE_PTP1);
2129 2140
2130#if defined(XEN) 2141#if defined(XEN)
2131 pmap_tlb_shootnow(); 2142 pmap_tlb_shootnow();
2132#endif 2143#endif
2133 2144
2134 pmap_freepage(pmap, ptp, level); 2145 pmap_freepage(pmap, ptp, level);
2135 if (level < PTP_LEVELS - 1) { 2146 if (level < PTP_LEVELS - 1) {
@@ -2204,33 +2215,37 @@ pmap_get_ptp(struct pmap *pmap, vaddr_t  @@ -2204,33 +2215,37 @@ pmap_get_ptp(struct pmap *pmap, vaddr_t
2204 KASSERT(!pt[i].new); 2215 KASSERT(!pt[i].new);
2205 continue; 2216 continue;
2206 } 2217 }
2207 2218
2208 ptp = pt[i].pg; 2219 ptp = pt[i].pg;
2209 ptp->flags &= ~PG_BUSY; /* never busy */ 2220 ptp->flags &= ~PG_BUSY; /* never busy */
2210 ptp->wire_count = 1; 2221 ptp->wire_count = 1;
2211 pmap->pm_ptphint[i - 2] = ptp; 2222 pmap->pm_ptphint[i - 2] = ptp;
2212 pa = VM_PAGE_TO_PHYS(ptp); 2223 pa = VM_PAGE_TO_PHYS(ptp);
2213 pmap_pte_set(&pva[index], (pd_entry_t) 2224 pmap_pte_set(&pva[index], (pd_entry_t)
2214 (pmap_pa2pte(pa) | PG_u | PG_RW | PG_V)); 2225 (pmap_pa2pte(pa) | PG_u | PG_RW | PG_V));
2215 2226
2216 /* 2227 /*
2217 * On Xen-amd64, we need to sync the top level page 2228 * On Xen-amd64 or SVS, we need to sync the top level page
2218 * directory on each CPU. 2229 * directory on each CPU.
2219 */ 2230 */
2220#if defined(XEN) && defined(__x86_64__) 2231#if defined(XEN) && defined(__x86_64__)
2221 if (i == PTP_LEVELS) { 2232 if (i == PTP_LEVELS) {
2222 xen_kpm_sync(pmap, index); 2233 xen_kpm_sync(pmap, index);
2223 } 2234 }
 2235#elif defined(SVS)
 2236 if (i == PTP_LEVELS) {
 2237 svs_pmap_sync(pmap, index);
 2238 }
2224#endif 2239#endif
2225 2240
2226 pmap_pte_flush(); 2241 pmap_pte_flush();
2227 pmap_stats_update(pmap, 1, 0); 2242 pmap_stats_update(pmap, 1, 0);
2228 2243
2229 /* 2244 /*
2230 * If we're not in the top level, increase the 2245 * If we're not in the top level, increase the
2231 * wire count of the parent page. 2246 * wire count of the parent page.
2232 */ 2247 */
2233 if (i < PTP_LEVELS) { 2248 if (i < PTP_LEVELS) {
2234 pt[i + 1].pg->wire_count++; 2249 pt[i + 1].pg->wire_count++;
2235 } 2250 }
2236 } 2251 }