Wed Jun 24 18:09:37 2020 UTC ()
remove unused x86_stos


(maxv)
diff -r1.62 -r1.63 src/sys/arch/amd64/amd64/cpufunc.S
diff -r1.47 -r1.48 src/sys/arch/i386/i386/cpufunc.S
diff -r1.122 -r1.123 src/sys/arch/x86/include/pmap.h

cvs diff -r1.62 -r1.63 src/sys/arch/amd64/amd64/cpufunc.S (switch to unified diff)

--- src/sys/arch/amd64/amd64/cpufunc.S 2020/06/15 20:27:30 1.62
+++ src/sys/arch/amd64/amd64/cpufunc.S 2020/06/24 18:09:37 1.63
@@ -1,464 +1,455 @@ @@ -1,464 +1,455 @@
1/* $NetBSD: cpufunc.S,v 1.62 2020/06/15 20:27:30 riastradh Exp $ */ 1/* $NetBSD: cpufunc.S,v 1.63 2020/06/24 18:09:37 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1998, 2007, 2008, 2020 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 2007, 2008, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum, and by Andrew Doran. 8 * by Charles M. Hannum, and by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/errno.h> 32#include <sys/errno.h>
33 33
34#include <machine/asm.h> 34#include <machine/asm.h>
35#include <machine/frameasm.h> 35#include <machine/frameasm.h>
36#include <machine/specialreg.h> 36#include <machine/specialreg.h>
37#include <machine/segments.h> 37#include <machine/segments.h>
38 38
39#include "opt_xen.h" 39#include "opt_xen.h"
40#include "opt_svs.h" 40#include "opt_svs.h"
41 41
42#include "assym.h" 42#include "assym.h"
43 43
44/* Small and slow, so align less. */ 44/* Small and slow, so align less. */
45#undef _ALIGN_TEXT 45#undef _ALIGN_TEXT
46#define _ALIGN_TEXT .align 8 46#define _ALIGN_TEXT .align 8
47 47
48ENTRY(x86_lfence) 48ENTRY(x86_lfence)
49 lfence 49 lfence
50 ret 50 ret
51END(x86_lfence) 51END(x86_lfence)
52 52
53ENTRY(x86_sfence) 53ENTRY(x86_sfence)
54 sfence 54 sfence
55 ret 55 ret
56END(x86_sfence) 56END(x86_sfence)
57 57
58ENTRY(x86_mfence) 58ENTRY(x86_mfence)
59 mfence 59 mfence
60 ret 60 ret
61END(x86_mfence) 61END(x86_mfence)
62 62
63#ifndef XENPV 63#ifndef XENPV
64ENTRY(invlpg) 64ENTRY(invlpg)
65#ifdef SVS 65#ifdef SVS
66 movb _C_LABEL(svs_pcid),%al 66 movb _C_LABEL(svs_pcid),%al
67 testb %al,%al 67 testb %al,%al
68 jz 1f 68 jz 1f
69 pushq %rdi 69 pushq %rdi
70 pushq $PMAP_PCID_USER 70 pushq $PMAP_PCID_USER
71 movq $INVPCID_ADDRESS,%rax 71 movq $INVPCID_ADDRESS,%rax
72 invpcid (%rsp),%rax 72 invpcid (%rsp),%rax
73 addq $16,%rsp 73 addq $16,%rsp
741: /* FALLTHROUGH */ 741: /* FALLTHROUGH */
75#endif 75#endif
76 invlpg (%rdi) 76 invlpg (%rdi)
77 ret 77 ret
78END(invlpg) 78END(invlpg)
79 79
80ENTRY(lgdt) 80ENTRY(lgdt)
81 /* Reload the descriptor table. */ 81 /* Reload the descriptor table. */
82 movq %rdi,%rax 82 movq %rdi,%rax
83 lgdt (%rax) 83 lgdt (%rax)
84 /* Flush the prefetch queue. */ 84 /* Flush the prefetch queue. */
85 jmp 1f 85 jmp 1f
86 nop 86 nop
871: jmp _C_LABEL(lgdt_finish) 871: jmp _C_LABEL(lgdt_finish)
88END(lgdt) 88END(lgdt)
89 89
90ENTRY(lidt) 90ENTRY(lidt)
91 lidt (%rdi) 91 lidt (%rdi)
92 ret 92 ret
93END(lidt) 93END(lidt)
94 94
95ENTRY(lldt) 95ENTRY(lldt)
96 cmpl %edi, CPUVAR(CURLDT) 96 cmpl %edi, CPUVAR(CURLDT)
97 jne 1f 97 jne 1f
98 ret 98 ret
991: 991:
100 movl %edi, CPUVAR(CURLDT) 100 movl %edi, CPUVAR(CURLDT)
101 lldt %di 101 lldt %di
102 ret 102 ret
103END(lldt) 103END(lldt)
104 104
105ENTRY(ltr) 105ENTRY(ltr)
106 ltr %di 106 ltr %di
107 ret 107 ret
108END(ltr) 108END(ltr)
109 109
110ENTRY(tlbflushg) 110ENTRY(tlbflushg)
111 movq %cr4, %rax 111 movq %cr4, %rax
112 testq $CR4_PGE, %rax 112 testq $CR4_PGE, %rax
113 jz tlbflush 113 jz tlbflush
114 movq %rax, %rdx 114 movq %rax, %rdx
115 andq $~CR4_PGE, %rdx 115 andq $~CR4_PGE, %rdx
116 movq %rdx, %cr4 116 movq %rdx, %cr4
117 movq %rax, %cr4 117 movq %rax, %cr4
118 ret 118 ret
119END(tlbflushg) 119END(tlbflushg)
120 120
121ENTRY(tlbflush) 121ENTRY(tlbflush)
122#ifdef SVS 122#ifdef SVS
123 movb _C_LABEL(svs_pcid),%al 123 movb _C_LABEL(svs_pcid),%al
124 testb %al,%al 124 testb %al,%al
125 jz 1f 125 jz 1f
126 xorq %rax,%rax 126 xorq %rax,%rax
127 pushq %rax 127 pushq %rax
128 pushq %rax 128 pushq %rax
129 movq $INVPCID_ALL_NONGLOBAL,%rax 129 movq $INVPCID_ALL_NONGLOBAL,%rax
130 invpcid (%rsp),%rax 130 invpcid (%rsp),%rax
131 addq $16,%rsp 131 addq $16,%rsp
132 ret 132 ret
133#endif 133#endif
1341: movq %cr3, %rax 1341: movq %cr3, %rax
135 movq %rax, %cr3 135 movq %rax, %cr3
136 ret 136 ret
137END(tlbflush) 137END(tlbflush)
138 138
139ENTRY(wbinvd) 139ENTRY(wbinvd)
140 wbinvd 140 wbinvd
141 ret 141 ret
142END(wbinvd) 142END(wbinvd)
143 143
144ENTRY(setusergs) 144ENTRY(setusergs)
145 CLI(ax) 145 CLI(ax)
146 swapgs 146 swapgs
147 movw %di, %gs 147 movw %di, %gs
148 swapgs 148 swapgs
149 STI(ax) 149 STI(ax)
150 ret 150 ret
151END(setusergs) 151END(setusergs)
152 152
153ENTRY(x86_read_flags) 153ENTRY(x86_read_flags)
154 pushfq 154 pushfq
155 popq %rax 155 popq %rax
156 KMSAN_INIT_RET(8) 156 KMSAN_INIT_RET(8)
157 ret 157 ret
158END(x86_read_flags) 158END(x86_read_flags)
159 159
160STRONG_ALIAS(x86_read_psl,x86_read_flags) 160STRONG_ALIAS(x86_read_psl,x86_read_flags)
161 161
162ENTRY(x86_write_flags) 162ENTRY(x86_write_flags)
163 pushq %rdi 163 pushq %rdi
164 popfq 164 popfq
165 ret 165 ret
166END(x86_write_flags) 166END(x86_write_flags)
167 167
168STRONG_ALIAS(x86_write_psl,x86_write_flags) 168STRONG_ALIAS(x86_write_psl,x86_write_flags)
169 169
170/* 170/*
171 * %rdi = name 171 * %rdi = name
172 * %rsi = sel 172 * %rsi = sel
173 */ 173 */
174ENTRY(x86_hotpatch) 174ENTRY(x86_hotpatch)
175 /* save RFLAGS, and disable intrs */ 175 /* save RFLAGS, and disable intrs */
176 pushfq 176 pushfq
177 cli 177 cli
178 178
179 /* save CR0, and disable WP */ 179 /* save CR0, and disable WP */
180 movq %cr0,%rcx 180 movq %cr0,%rcx
181 pushq %rcx 181 pushq %rcx
182 andq $~CR0_WP,%rcx 182 andq $~CR0_WP,%rcx
183 movq %rcx,%cr0 183 movq %rcx,%cr0
184 184
185 callq _C_LABEL(x86_hotpatch_apply) 185 callq _C_LABEL(x86_hotpatch_apply)
186 186
187 /* write back and invalidate cache */ 187 /* write back and invalidate cache */
188 wbinvd 188 wbinvd
189 189
190 /* restore CR0 */ 190 /* restore CR0 */
191 popq %rcx 191 popq %rcx
192 movq %rcx,%cr0 192 movq %rcx,%cr0
193 193
194 /* flush instruction pipeline */ 194 /* flush instruction pipeline */
195 pushq %rax 195 pushq %rax
196 callq x86_flush 196 callq x86_flush
197 popq %rax 197 popq %rax
198 198
199 /* clean up */ 199 /* clean up */
200 movq %rax,%rdi 200 movq %rax,%rdi
201 callq _C_LABEL(x86_hotpatch_cleanup) 201 callq _C_LABEL(x86_hotpatch_cleanup)
202 202
203 /* restore RFLAGS */ 203 /* restore RFLAGS */
204 popfq 204 popfq
205 ret 205 ret
206END(x86_hotpatch) 206END(x86_hotpatch)
207#endif /* !XENPV */ 207#endif /* !XENPV */
208 208
209/* 209/*
210 * cpu_counter and cpu_counter32 could be exact same, but KMSAN needs to have 210 * cpu_counter and cpu_counter32 could be exact same, but KMSAN needs to have
211 * the correct size of the return value. 211 * the correct size of the return value.
212 */ 212 */
213#define SERIALIZE_lfence lfence 213#define SERIALIZE_lfence lfence
214#define SERIALIZE_mfence mfence 214#define SERIALIZE_mfence mfence
215 215
216#define ADD_counter32 addl CPUVAR(CC_SKEW), %eax 216#define ADD_counter32 addl CPUVAR(CC_SKEW), %eax
217#define ADD_counter shlq $32, %rdx ;\ 217#define ADD_counter shlq $32, %rdx ;\
218 orq %rdx, %rax ;\ 218 orq %rdx, %rax ;\
219 addq CPUVAR(CC_SKEW), %rax 219 addq CPUVAR(CC_SKEW), %rax
220 220
221#define RSIZE_counter32 4 221#define RSIZE_counter32 4
222#define RSIZE_counter 8 222#define RSIZE_counter 8
223 223
224#define CPU_COUNTER_FENCE(counter, fence) \ 224#define CPU_COUNTER_FENCE(counter, fence) \
225ENTRY(cpu_ ## counter ## _ ## fence) ;\ 225ENTRY(cpu_ ## counter ## _ ## fence) ;\
226 movq CPUVAR(CURLWP), %rcx ;\ 226 movq CPUVAR(CURLWP), %rcx ;\
2271: ;\ 2271: ;\
228 movq L_NCSW(%rcx), %rdi ;\ 228 movq L_NCSW(%rcx), %rdi ;\
229 SERIALIZE_ ## fence ;\ 229 SERIALIZE_ ## fence ;\
230 rdtsc ;\ 230 rdtsc ;\
231 ADD_ ## counter ;\ 231 ADD_ ## counter ;\
232 cmpq %rdi, L_NCSW(%rcx) ;\ 232 cmpq %rdi, L_NCSW(%rcx) ;\
233 jne 2f ;\ 233 jne 2f ;\
234 KMSAN_INIT_RET(RSIZE_ ## counter) ;\ 234 KMSAN_INIT_RET(RSIZE_ ## counter) ;\
235 ret ;\ 235 ret ;\
2362: ;\ 2362: ;\
237 jmp 1b ;\ 237 jmp 1b ;\
238END(cpu_ ## counter ## _ ## fence) 238END(cpu_ ## counter ## _ ## fence)
239 239
240CPU_COUNTER_FENCE(counter, lfence) 240CPU_COUNTER_FENCE(counter, lfence)
241CPU_COUNTER_FENCE(counter, mfence) 241CPU_COUNTER_FENCE(counter, mfence)
242CPU_COUNTER_FENCE(counter32, lfence) 242CPU_COUNTER_FENCE(counter32, lfence)
243CPU_COUNTER_FENCE(counter32, mfence) 243CPU_COUNTER_FENCE(counter32, mfence)
244 244
245#define CPU_COUNTER_CPUID(counter) \ 245#define CPU_COUNTER_CPUID(counter) \
246ENTRY(cpu_ ## counter ## _cpuid) ;\ 246ENTRY(cpu_ ## counter ## _cpuid) ;\
247 movq %rbx, %r9 ;\ 247 movq %rbx, %r9 ;\
248 movq CPUVAR(CURLWP), %r8 ;\ 248 movq CPUVAR(CURLWP), %r8 ;\
2491: ;\ 2491: ;\
250 movq L_NCSW(%r8), %rdi ;\ 250 movq L_NCSW(%r8), %rdi ;\
251 xor %eax, %eax ;\ 251 xor %eax, %eax ;\
252 cpuid ;\ 252 cpuid ;\
253 rdtsc ;\ 253 rdtsc ;\
254 ADD_ ## counter ;\ 254 ADD_ ## counter ;\
255 cmpq %rdi, L_NCSW(%r8) ;\ 255 cmpq %rdi, L_NCSW(%r8) ;\
256 jne 2f ;\ 256 jne 2f ;\
257 movq %r9, %rbx ;\ 257 movq %r9, %rbx ;\
258 KMSAN_INIT_RET(RSIZE_ ## counter) ;\ 258 KMSAN_INIT_RET(RSIZE_ ## counter) ;\
259 ret ;\ 259 ret ;\
2602: ;\ 2602: ;\
261 jmp 1b ;\ 261 jmp 1b ;\
262END(cpu_ ## counter ## _cpuid) 262END(cpu_ ## counter ## _cpuid)
263 263
264CPU_COUNTER_CPUID(counter) 264CPU_COUNTER_CPUID(counter)
265CPU_COUNTER_CPUID(counter32) 265CPU_COUNTER_CPUID(counter32)
266 266
267ENTRY(rdmsr_safe) 267ENTRY(rdmsr_safe)
268 movq CPUVAR(CURLWP), %r8 268 movq CPUVAR(CURLWP), %r8
269 movq L_PCB(%r8), %r8 269 movq L_PCB(%r8), %r8
270 movq $_C_LABEL(msr_onfault), PCB_ONFAULT(%r8) 270 movq $_C_LABEL(msr_onfault), PCB_ONFAULT(%r8)
271 271
272 movl %edi, %ecx 272 movl %edi, %ecx
273 rdmsr 273 rdmsr
274 salq $32, %rdx 274 salq $32, %rdx
275 movl %eax, %eax /* zero-extend %eax -> %rax */ 275 movl %eax, %eax /* zero-extend %eax -> %rax */
276 orq %rdx, %rax 276 orq %rdx, %rax
277 movq %rax, (%rsi) 277 movq %rax, (%rsi)
278 278
279 xorq %rax, %rax 279 xorq %rax, %rax
280 movq %rax, PCB_ONFAULT(%r8) 280 movq %rax, PCB_ONFAULT(%r8)
281#ifdef KMSAN 281#ifdef KMSAN
282 movq %rsi,%rdi 282 movq %rsi,%rdi
283 movq $8,%rsi 283 movq $8,%rsi
284 xorq %rdx,%rdx 284 xorq %rdx,%rdx
285 callq _C_LABEL(kmsan_mark) 285 callq _C_LABEL(kmsan_mark)
286#endif 286#endif
287 KMSAN_INIT_RET(4) 287 KMSAN_INIT_RET(4)
288 ret 288 ret
289END(rdmsr_safe) 289END(rdmsr_safe)
290 290
291ENTRY(msr_onfault) 291ENTRY(msr_onfault)
292 movq CPUVAR(CURLWP), %r8 292 movq CPUVAR(CURLWP), %r8
293 movq L_PCB(%r8), %r8 293 movq L_PCB(%r8), %r8
294 movq $0, PCB_ONFAULT(%r8) 294 movq $0, PCB_ONFAULT(%r8)
295 movl $EFAULT, %eax 295 movl $EFAULT, %eax
296 ret 296 ret
297END(msr_onfault) 297END(msr_onfault)
298 298
299ENTRY(breakpoint) 299ENTRY(breakpoint)
300 pushq %rbp 300 pushq %rbp
301 movq %rsp, %rbp 301 movq %rsp, %rbp
302 int $0x03 /* paranoid, not 'int3' */ 302 int $0x03 /* paranoid, not 'int3' */
303 leave 303 leave
304 ret 304 ret
305END(breakpoint) 305END(breakpoint)
306 306
307ENTRY(x86_curcpu) 307ENTRY(x86_curcpu)
308 movq %gs:(CPU_INFO_SELF), %rax 308 movq %gs:(CPU_INFO_SELF), %rax
309 KMSAN_INIT_RET(8) 309 KMSAN_INIT_RET(8)
310 ret 310 ret
311END(x86_curcpu) 311END(x86_curcpu)
312 312
313ENTRY(x86_curlwp) 313ENTRY(x86_curlwp)
314 movq %gs:(CPU_INFO_CURLWP), %rax 314 movq %gs:(CPU_INFO_CURLWP), %rax
315 KMSAN_INIT_RET(8) 315 KMSAN_INIT_RET(8)
316 ret 316 ret
317END(x86_curlwp) 317END(x86_curlwp)
318 318
319ENTRY(__byte_swap_u32_variable) 319ENTRY(__byte_swap_u32_variable)
320 movl %edi, %eax 320 movl %edi, %eax
321 bswapl %eax 321 bswapl %eax
322 KMSAN_INIT_RET(4) 322 KMSAN_INIT_RET(4)
323 ret 323 ret
324END(__byte_swap_u32_variable) 324END(__byte_swap_u32_variable)
325 325
326ENTRY(__byte_swap_u16_variable) 326ENTRY(__byte_swap_u16_variable)
327 movl %edi, %eax 327 movl %edi, %eax
328 xchgb %al, %ah 328 xchgb %al, %ah
329 KMSAN_INIT_RET(2) 329 KMSAN_INIT_RET(2)
330 ret 330 ret
331END(__byte_swap_u16_variable) 331END(__byte_swap_u16_variable)
332 332
333/* 333/*
334 * Reload segments after a GDT change. 334 * Reload segments after a GDT change.
335 */ 335 */
336ENTRY(lgdt_finish) 336ENTRY(lgdt_finish)
337 movl $GSEL(GDATA_SEL, SEL_KPL),%eax 337 movl $GSEL(GDATA_SEL, SEL_KPL),%eax
338 movl %eax,%ds 338 movl %eax,%ds
339 movl %eax,%es 339 movl %eax,%es
340 movl %eax,%ss 340 movl %eax,%ss
341 jmp _C_LABEL(x86_flush) 341 jmp _C_LABEL(x86_flush)
342END(lgdt_finish) 342END(lgdt_finish)
343 343
344/* 344/*
345 * Flush instruction pipelines by doing an intersegment (far) return. 345 * Flush instruction pipelines by doing an intersegment (far) return.
346 */ 346 */
347ENTRY(x86_flush) 347ENTRY(x86_flush)
348 popq %rax 348 popq %rax
349 pushq $GSEL(GCODE_SEL, SEL_KPL) 349 pushq $GSEL(GCODE_SEL, SEL_KPL)
350 pushq %rax 350 pushq %rax
351 lretq 351 lretq
352END(x86_flush) 352END(x86_flush)
353 353
354/* Waits - set up stack frame. */ 354/* Waits - set up stack frame. */
355ENTRY(x86_hlt) 355ENTRY(x86_hlt)
356 pushq %rbp 356 pushq %rbp
357 movq %rsp, %rbp 357 movq %rsp, %rbp
358 hlt 358 hlt
359 leave 359 leave
360 ret 360 ret
361END(x86_hlt) 361END(x86_hlt)
362 362
363/* Waits - set up stack frame. */ 363/* Waits - set up stack frame. */
364ENTRY(x86_stihlt) 364ENTRY(x86_stihlt)
365 pushq %rbp 365 pushq %rbp
366 movq %rsp, %rbp 366 movq %rsp, %rbp
367 sti 367 sti
368 hlt 368 hlt
369 leave 369 leave
370 ret 370 ret
371END(x86_stihlt) 371END(x86_stihlt)
372 372
373ENTRY(x86_monitor) 373ENTRY(x86_monitor)
374 movq %rdi, %rax 374 movq %rdi, %rax
375 movq %rsi, %rcx 375 movq %rsi, %rcx
376 monitor %rax, %rcx, %rdx 376 monitor %rax, %rcx, %rdx
377 ret 377 ret
378END(x86_monitor) 378END(x86_monitor)
379 379
380/* Waits - set up stack frame. */ 380/* Waits - set up stack frame. */
381ENTRY(x86_mwait) 381ENTRY(x86_mwait)
382 pushq %rbp 382 pushq %rbp
383 movq %rsp, %rbp 383 movq %rsp, %rbp
384 movq %rdi, %rax 384 movq %rdi, %rax
385 movq %rsi, %rcx 385 movq %rsi, %rcx
386 mwait %rax, %rcx 386 mwait %rax, %rcx
387 leave 387 leave
388 ret 388 ret
389END(x86_mwait) 389END(x86_mwait)
390 390
391ENTRY(stts) 391ENTRY(stts)
392 movq %cr0, %rax 392 movq %cr0, %rax
393 orq $CR0_TS, %rax 393 orq $CR0_TS, %rax
394 movq %rax, %cr0 394 movq %rax, %cr0
395 ret 395 ret
396END(stts) 396END(stts)
397 397
398ENTRY(fldummy) 398ENTRY(fldummy)
399 ffree %st(7) 399 ffree %st(7)
400 fldz 400 fldz
401 ret 401 ret
402END(fldummy) 402END(fldummy)
403 403
404ENTRY(inb) 404ENTRY(inb)
405 movq %rdi, %rdx 405 movq %rdi, %rdx
406 xorq %rax, %rax 406 xorq %rax, %rax
407 inb %dx, %al 407 inb %dx, %al
408 KMSAN_INIT_RET(1) 408 KMSAN_INIT_RET(1)
409 ret 409 ret
410END(inb) 410END(inb)
411 411
412ENTRY(inw) 412ENTRY(inw)
413 movq %rdi, %rdx 413 movq %rdi, %rdx
414 xorq %rax, %rax 414 xorq %rax, %rax
415 inw %dx, %ax 415 inw %dx, %ax
416 KMSAN_INIT_RET(2) 416 KMSAN_INIT_RET(2)
417 ret 417 ret
418END(inw) 418END(inw)
419 419
420ENTRY(inl) 420ENTRY(inl)
421 movq %rdi, %rdx 421 movq %rdi, %rdx
422 xorq %rax, %rax 422 xorq %rax, %rax
423 inl %dx, %eax 423 inl %dx, %eax
424 KMSAN_INIT_RET(4) 424 KMSAN_INIT_RET(4)
425 ret 425 ret
426END(inl) 426END(inl)
427 427
428ENTRY(outb) 428ENTRY(outb)
429 movq %rdi, %rdx 429 movq %rdi, %rdx
430 movq %rsi, %rax 430 movq %rsi, %rax
431 outb %al, %dx 431 outb %al, %dx
432 ret 432 ret
433END(outb) 433END(outb)
434 434
435ENTRY(outw) 435ENTRY(outw)
436 movq %rdi, %rdx 436 movq %rdi, %rdx
437 movq %rsi, %rax 437 movq %rsi, %rax
438 outw %ax, %dx 438 outw %ax, %dx
439 ret 439 ret
440END(outw) 440END(outw)
441 441
442ENTRY(outl) 442ENTRY(outl)
443 movq %rdi, %rdx 443 movq %rdi, %rdx
444 movq %rsi, %rax 444 movq %rsi, %rax
445 outl %eax, %dx 445 outl %eax, %dx
446 ret 446 ret
447END(outl) 447END(outl)
448 448
449ENTRY(x86_stos) 
450 movq %rsi,%rax 
451 movq %rdx,%rcx 
452 KMSAN_REP_STOS(8) 
453 rep 
454 stosq 
455 ret 
456END(x86_stos) 
457 
458ENTRY(x86_movs) 449ENTRY(x86_movs)
459 movq %rdx,%rcx 450 movq %rdx,%rcx
460 KMSAN_REP_STOS(8) 451 KMSAN_REP_STOS(8)
461 rep 452 rep
462 movsq 453 movsq
463 ret 454 ret
464END(x86_movs) 455END(x86_movs)

cvs diff -r1.47 -r1.48 src/sys/arch/i386/i386/cpufunc.S (switch to unified diff)

--- src/sys/arch/i386/i386/cpufunc.S 2020/06/15 09:09:23 1.47
+++ src/sys/arch/i386/i386/cpufunc.S 2020/06/24 18:09:37 1.48
@@ -1,387 +1,371 @@ @@ -1,387 +1,371 @@
1/* $NetBSD: cpufunc.S,v 1.47 2020/06/15 09:09:23 msaitoh Exp $ */ 1/* $NetBSD: cpufunc.S,v 1.48 2020/06/24 18:09:37 maxv Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 2007, 2020 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 2007, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum, and by Andrew Doran. 8 * by Charles M. Hannum, and by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Functions to provide access to i386-specific instructions. 33 * Functions to provide access to i386-specific instructions.
34 * 34 *
35 * These are shared with NetBSD/xen. 35 * These are shared with NetBSD/xen.
36 */ 36 */
37 37
38#include <sys/errno.h> 38#include <sys/errno.h>
39 39
40#include <machine/asm.h> 40#include <machine/asm.h>
41__KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.47 2020/06/15 09:09:23 msaitoh Exp $"); 41__KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.48 2020/06/24 18:09:37 maxv Exp $");
42 42
43#include "opt_xen.h" 43#include "opt_xen.h"
44 44
45#include <machine/specialreg.h> 45#include <machine/specialreg.h>
46#include <machine/segments.h> 46#include <machine/segments.h>
47 47
48#include "assym.h" 48#include "assym.h"
49 49
50ENTRY(x86_lfence) 50ENTRY(x86_lfence)
51 lock 51 lock
52 addl $0, -4(%esp) 52 addl $0, -4(%esp)
53 ret 53 ret
54END(x86_lfence) 54END(x86_lfence)
55 55
56ENTRY(x86_sfence) 56ENTRY(x86_sfence)
57 lock 57 lock
58 addl $0, -4(%esp) 58 addl $0, -4(%esp)
59 ret 59 ret
60END(x86_sfence) 60END(x86_sfence)
61 61
62ENTRY(x86_mfence) 62ENTRY(x86_mfence)
63 lock 63 lock
64 addl $0, -4(%esp) 64 addl $0, -4(%esp)
65 ret 65 ret
66END(x86_mfence) 66END(x86_mfence)
67 67
68#ifndef XENPV 68#ifndef XENPV
69ENTRY(lidt) 69ENTRY(lidt)
70 movl 4(%esp), %eax 70 movl 4(%esp), %eax
71 lidt (%eax) 71 lidt (%eax)
72 ret 72 ret
73END(lidt) 73END(lidt)
74 74
75ENTRY(x86_hotpatch) 75ENTRY(x86_hotpatch)
76 /* save EFLAGS, and disable intrs */ 76 /* save EFLAGS, and disable intrs */
77 pushfl 77 pushfl
78 cli 78 cli
79 79
80 /* save CR0, and disable WP */ 80 /* save CR0, and disable WP */
81 movl %cr0,%ecx 81 movl %cr0,%ecx
82 pushl %ecx 82 pushl %ecx
83 andl $~CR0_WP,%ecx 83 andl $~CR0_WP,%ecx
84 movl %ecx,%cr0 84 movl %ecx,%cr0
85 85
86 pushl 4*4(%esp) /* arg2 */ 86 pushl 4*4(%esp) /* arg2 */
87 pushl 4*4(%esp) /* arg1 */ 87 pushl 4*4(%esp) /* arg1 */
88 call _C_LABEL(x86_hotpatch_apply) 88 call _C_LABEL(x86_hotpatch_apply)
89 addl $2*4,%esp 89 addl $2*4,%esp
90 90
91 /* write back and invalidate cache */ 91 /* write back and invalidate cache */
92 wbinvd 92 wbinvd
93 93
94 /* restore CR0 */ 94 /* restore CR0 */
95 popl %ecx 95 popl %ecx
96 movl %ecx,%cr0 96 movl %ecx,%cr0
97 97
98 /* flush instruction pipeline */ 98 /* flush instruction pipeline */
99 pushl %eax 99 pushl %eax
100 call x86_flush 100 call x86_flush
101 popl %eax 101 popl %eax
102 102
103 /* clean up */ 103 /* clean up */
104 pushl %eax 104 pushl %eax
105 call _C_LABEL(x86_hotpatch_cleanup) 105 call _C_LABEL(x86_hotpatch_cleanup)
106 addl $4,%esp 106 addl $4,%esp
107 107
108 /* restore RFLAGS */ 108 /* restore RFLAGS */
109 popfl 109 popfl
110 ret 110 ret
111END(x86_hotpatch) 111END(x86_hotpatch)
112#endif /* XENPV */ 112#endif /* XENPV */
113 113
114ENTRY(x86_read_flags) 114ENTRY(x86_read_flags)
115 pushfl 115 pushfl
116 popl %eax 116 popl %eax
117 ret 117 ret
118END(x86_read_flags) 118END(x86_read_flags)
119 119
120ENTRY(x86_write_flags) 120ENTRY(x86_write_flags)
121 movl 4(%esp), %eax 121 movl 4(%esp), %eax
122 pushl %eax 122 pushl %eax
123 popfl 123 popfl
124 ret 124 ret
125END(x86_write_flags) 125END(x86_write_flags)
126 126
127#ifndef XENPV 127#ifndef XENPV
128STRONG_ALIAS(x86_write_psl,x86_write_flags) 128STRONG_ALIAS(x86_write_psl,x86_write_flags)
129STRONG_ALIAS(x86_read_psl,x86_read_flags) 129STRONG_ALIAS(x86_read_psl,x86_read_flags)
130#endif /* XENPV */ 130#endif /* XENPV */
131 131
132/* 132/*
133 * Support for reading MSRs in the safe manner (returns EFAULT on fault) 133 * Support for reading MSRs in the safe manner (returns EFAULT on fault)
134 */ 134 */
135/* int rdmsr_safe(u_int msr, uint64_t *data) */ 135/* int rdmsr_safe(u_int msr, uint64_t *data) */
136ENTRY(rdmsr_safe) 136ENTRY(rdmsr_safe)
137 movl CPUVAR(CURLWP), %ecx 137 movl CPUVAR(CURLWP), %ecx
138 movl L_PCB(%ecx), %ecx 138 movl L_PCB(%ecx), %ecx
139 movl $_C_LABEL(msr_onfault), PCB_ONFAULT(%ecx) 139 movl $_C_LABEL(msr_onfault), PCB_ONFAULT(%ecx)
140 140
141 movl 4(%esp), %ecx /* u_int msr */ 141 movl 4(%esp), %ecx /* u_int msr */
142 rdmsr 142 rdmsr
143 movl 8(%esp), %ecx /* *data */ 143 movl 8(%esp), %ecx /* *data */
144 movl %eax, (%ecx) /* low-order bits */ 144 movl %eax, (%ecx) /* low-order bits */
145 movl %edx, 4(%ecx) /* high-order bits */ 145 movl %edx, 4(%ecx) /* high-order bits */
146 xorl %eax, %eax /* "no error" */ 146 xorl %eax, %eax /* "no error" */
147 147
148 movl CPUVAR(CURLWP), %ecx 148 movl CPUVAR(CURLWP), %ecx
149 movl L_PCB(%ecx), %ecx 149 movl L_PCB(%ecx), %ecx
150 movl %eax, PCB_ONFAULT(%ecx) 150 movl %eax, PCB_ONFAULT(%ecx)
151 151
152 ret 152 ret
153END(rdmsr_safe) 153END(rdmsr_safe)
154 154
155/* 155/*
156 * MSR operations fault handler 156 * MSR operations fault handler
157 */ 157 */
158ENTRY(msr_onfault) 158ENTRY(msr_onfault)
159 movl CPUVAR(CURLWP), %ecx 159 movl CPUVAR(CURLWP), %ecx
160 movl L_PCB(%ecx), %ecx 160 movl L_PCB(%ecx), %ecx
161 movl $0, PCB_ONFAULT(%ecx) 161 movl $0, PCB_ONFAULT(%ecx)
162 movl $EFAULT, %eax 162 movl $EFAULT, %eax
163 ret 163 ret
164END(msr_onfault) 164END(msr_onfault)
165 165
166#define ADD_counter32 addl CPUVAR(CC_SKEW), %eax 166#define ADD_counter32 addl CPUVAR(CC_SKEW), %eax
167#define ADD_counter ADD_counter32 ;\ 167#define ADD_counter ADD_counter32 ;\
168 adcl CPUVAR(CC_SKEW+4), %edx 168 adcl CPUVAR(CC_SKEW+4), %edx
169 169
170#define SERIALIZE_lfence lfence 170#define SERIALIZE_lfence lfence
171#define SERIALIZE_mfence mfence 171#define SERIALIZE_mfence mfence
172 172
173#define CPU_COUNTER_FENCE(counter, fence) \ 173#define CPU_COUNTER_FENCE(counter, fence) \
174ENTRY(cpu_ ## counter ## _ ## fence) ;\ 174ENTRY(cpu_ ## counter ## _ ## fence) ;\
175 pushl %ebx ;\ 175 pushl %ebx ;\
176 movl CPUVAR(CURLWP), %ecx ;\ 176 movl CPUVAR(CURLWP), %ecx ;\
1771: ;\ 1771: ;\
178 movl L_NCSW(%ecx), %ebx ;\ 178 movl L_NCSW(%ecx), %ebx ;\
179 SERIALIZE_ ## fence ;\ 179 SERIALIZE_ ## fence ;\
180 rdtsc ;\ 180 rdtsc ;\
181 ADD_ ## counter ;\ 181 ADD_ ## counter ;\
182 cmpl %ebx, L_NCSW(%ecx) ;\ 182 cmpl %ebx, L_NCSW(%ecx) ;\
183 jne 2f ;\ 183 jne 2f ;\
184 popl %ebx ;\ 184 popl %ebx ;\
185 ret ;\ 185 ret ;\
1862: ;\ 1862: ;\
187 jmp 1b ;\ 187 jmp 1b ;\
188END(cpu_ ## counter ## _ ## fence) 188END(cpu_ ## counter ## _ ## fence)
189 189
190CPU_COUNTER_FENCE(counter, lfence) 190CPU_COUNTER_FENCE(counter, lfence)
191CPU_COUNTER_FENCE(counter, mfence) 191CPU_COUNTER_FENCE(counter, mfence)
192CPU_COUNTER_FENCE(counter32, lfence) 192CPU_COUNTER_FENCE(counter32, lfence)
193CPU_COUNTER_FENCE(counter32, mfence) 193CPU_COUNTER_FENCE(counter32, mfence)
194 194
195#define CPU_COUNTER_CPUID(counter) \ 195#define CPU_COUNTER_CPUID(counter) \
196ENTRY(cpu_ ## counter ## _cpuid) ;\ 196ENTRY(cpu_ ## counter ## _cpuid) ;\
197 pushl %ebx ;\ 197 pushl %ebx ;\
198 pushl %esi ;\ 198 pushl %esi ;\
199 movl CPUVAR(CURLWP), %ecx ;\ 199 movl CPUVAR(CURLWP), %ecx ;\
2001: ;\ 2001: ;\
201 movl L_NCSW(%ecx), %esi ;\ 201 movl L_NCSW(%ecx), %esi ;\
202 pushl %ecx ;\ 202 pushl %ecx ;\
203 xor %eax, %eax ;\ 203 xor %eax, %eax ;\
204 cpuid ;\ 204 cpuid ;\
205 rdtsc ;\ 205 rdtsc ;\
206 ADD_ ## counter ;\ 206 ADD_ ## counter ;\
207 popl %ecx ;\ 207 popl %ecx ;\
208 cmpl %esi, L_NCSW(%ecx) ;\ 208 cmpl %esi, L_NCSW(%ecx) ;\
209 jne 2f ;\ 209 jne 2f ;\
210 popl %esi ;\ 210 popl %esi ;\
211 popl %ebx ;\ 211 popl %ebx ;\
212 ret ;\ 212 ret ;\
2132: ;\ 2132: ;\
214 jmp 1b ;\ 214 jmp 1b ;\
215END(cpu_ ## counter ##_cpuid) 215END(cpu_ ## counter ##_cpuid)
216 216
217CPU_COUNTER_CPUID(counter) 217CPU_COUNTER_CPUID(counter)
218CPU_COUNTER_CPUID(counter32) 218CPU_COUNTER_CPUID(counter32)
219 219
220ENTRY(breakpoint) 220ENTRY(breakpoint)
221 pushl %ebp 221 pushl %ebp
222 movl %esp, %ebp 222 movl %esp, %ebp
223 int $0x03 /* paranoid, not 'int3' */ 223 int $0x03 /* paranoid, not 'int3' */
224 popl %ebp 224 popl %ebp
225 ret 225 ret
226END(breakpoint) 226END(breakpoint)
227 227
228ENTRY(x86_curcpu) 228ENTRY(x86_curcpu)
229 movl %fs:(CPU_INFO_SELF), %eax 229 movl %fs:(CPU_INFO_SELF), %eax
230 ret 230 ret
231END(x86_curcpu) 231END(x86_curcpu)
232 232
233ENTRY(x86_curlwp) 233ENTRY(x86_curlwp)
234 movl %fs:(CPU_INFO_CURLWP), %eax 234 movl %fs:(CPU_INFO_CURLWP), %eax
235 ret 235 ret
236END(x86_curlwp) 236END(x86_curlwp)
237 237
238ENTRY(__byte_swap_u32_variable) 238ENTRY(__byte_swap_u32_variable)
239 movl 4(%esp), %eax 239 movl 4(%esp), %eax
240 bswapl %eax 240 bswapl %eax
241 ret 241 ret
242END(__byte_swap_u32_variable) 242END(__byte_swap_u32_variable)
243 243
244ENTRY(__byte_swap_u16_variable) 244ENTRY(__byte_swap_u16_variable)
245 movl 4(%esp), %eax 245 movl 4(%esp), %eax
246 xchgb %al, %ah 246 xchgb %al, %ah
247 ret 247 ret
248END(__byte_swap_u16_variable) 248END(__byte_swap_u16_variable)
249 249
250/* 250/*
251 * void x86_flush() 251 * void x86_flush()
252 * 252 *
253 * Flush instruction pipelines by doing an intersegment (far) return. 253 * Flush instruction pipelines by doing an intersegment (far) return.
254 */ 254 */
255ENTRY(x86_flush) 255ENTRY(x86_flush)
256 popl %eax 256 popl %eax
257 pushl $GSEL(GCODE_SEL, SEL_KPL) 257 pushl $GSEL(GCODE_SEL, SEL_KPL)
258 pushl %eax 258 pushl %eax
259 lret 259 lret
260END(x86_flush) 260END(x86_flush)
261 261
262/* Waits - set up stack frame. */ 262/* Waits - set up stack frame. */
263ENTRY(x86_hlt) 263ENTRY(x86_hlt)
264 pushl %ebp 264 pushl %ebp
265 movl %esp, %ebp 265 movl %esp, %ebp
266 hlt 266 hlt
267 leave 267 leave
268 ret 268 ret
269END(x86_hlt) 269END(x86_hlt)
270 270
271/* Waits - set up stack frame. */ 271/* Waits - set up stack frame. */
272ENTRY(x86_stihlt) 272ENTRY(x86_stihlt)
273 pushl %ebp 273 pushl %ebp
274 movl %esp, %ebp 274 movl %esp, %ebp
275 sti 275 sti
276 hlt 276 hlt
277 leave 277 leave
278 ret 278 ret
279END(x86_stihlt) 279END(x86_stihlt)
280 280
281ENTRY(x86_monitor) 281ENTRY(x86_monitor)
282 movl 4(%esp), %eax 282 movl 4(%esp), %eax
283 movl 8(%esp), %ecx 283 movl 8(%esp), %ecx
284 movl 12(%esp), %edx 284 movl 12(%esp), %edx
285 monitor %eax, %ecx, %edx 285 monitor %eax, %ecx, %edx
286 ret 286 ret
287END(x86_monitor) 287END(x86_monitor)
288 288
289/* Waits - set up stack frame. */ 289/* Waits - set up stack frame. */
290ENTRY(x86_mwait)  290ENTRY(x86_mwait)
291 pushl %ebp 291 pushl %ebp
292 movl %esp, %ebp 292 movl %esp, %ebp
293 movl 8(%ebp), %eax 293 movl 8(%ebp), %eax
294 movl 12(%ebp), %ecx 294 movl 12(%ebp), %ecx
295 mwait %eax, %ecx 295 mwait %eax, %ecx
296 leave 296 leave
297 ret 297 ret
298END(x86_mwait)  298END(x86_mwait)
299 299
300ENTRY(stts) 300ENTRY(stts)
301 movl %cr0, %eax 301 movl %cr0, %eax
302 testl $CR0_TS, %eax 302 testl $CR0_TS, %eax
303 jnz 1f 303 jnz 1f
304 orl $CR0_TS, %eax 304 orl $CR0_TS, %eax
305 movl %eax, %cr0 305 movl %eax, %cr0
3061: 3061:
307 ret 307 ret
308END(stts) 308END(stts)
309 309
310ENTRY(fldummy) 310ENTRY(fldummy)
311 ffree %st(7) 311 ffree %st(7)
312 fldz 312 fldz
313 ret 313 ret
314END(fldummy) 314END(fldummy)
315 315
316ENTRY(inb) 316ENTRY(inb)
317 movl 4(%esp), %edx 317 movl 4(%esp), %edx
318 xorl %eax, %eax 318 xorl %eax, %eax
319 inb %dx, %al 319 inb %dx, %al
320 ret 320 ret
321END(inb) 321END(inb)
322 322
323ENTRY(inw) 323ENTRY(inw)
324 movl 4(%esp), %edx 324 movl 4(%esp), %edx
325 xorl %eax, %eax 325 xorl %eax, %eax
326 inw %dx, %ax 326 inw %dx, %ax
327 ret 327 ret
328END(inw) 328END(inw)
329 329
330ENTRY(inl) 330ENTRY(inl)
331 movl 4(%esp), %edx 331 movl 4(%esp), %edx
332 inl %dx, %eax 332 inl %dx, %eax
333 ret 333 ret
334END(inl) 334END(inl)
335 335
336ENTRY(outb) 336ENTRY(outb)
337 movl 4(%esp), %edx 337 movl 4(%esp), %edx
338 movl 8(%esp), %eax 338 movl 8(%esp), %eax
339 outb %al, %dx 339 outb %al, %dx
340 ret 340 ret
341END(outb) 341END(outb)
342 342
343ENTRY(outw) 343ENTRY(outw)
344 movl 4(%esp), %edx 344 movl 4(%esp), %edx
345 movl 8(%esp), %eax 345 movl 8(%esp), %eax
346 outw %ax, %dx 346 outw %ax, %dx
347 ret 347 ret
348END(outw) 348END(outw)
349 349
350ENTRY(outl) 350ENTRY(outl)
351 movl 4(%esp), %edx 351 movl 4(%esp), %edx
352 movl 8(%esp), %eax 352 movl 8(%esp), %eax
353 outl %eax, %dx 353 outl %eax, %dx
354 ret 354 ret
355END(outl) 355END(outl)
356 356
357ENTRY(x86_stos) 
358 pushl %ebp 
359 movl %esp,%ebp 
360 pushl %edi 
361 pushl %esi 
362 movl 8(%ebp),%edi 
363 movl 12(%ebp),%eax 
364 movl 16(%ebp),%ecx 
365 rep 
366 stosl 
367 popl %esi 
368 popl %edi 
369 leave 
370 ret 
371END(x86_stos) 
372 
373ENTRY(x86_movs) 357ENTRY(x86_movs)
374 pushl %ebp 358 pushl %ebp
375 movl %esp,%ebp 359 movl %esp,%ebp
376 pushl %edi 360 pushl %edi
377 pushl %esi 361 pushl %esi
378 movl 8(%ebp),%edi 362 movl 8(%ebp),%edi
379 movl 12(%ebp),%esi 363 movl 12(%ebp),%esi
380 movl 16(%ebp),%ecx 364 movl 16(%ebp),%ecx
381 rep 365 rep
382 movsl 366 movsl
383 popl %esi 367 popl %esi
384 popl %edi 368 popl %edi
385 leave 369 leave
386 ret 370 ret
387END(x86_movs) 371END(x86_movs)

cvs diff -r1.122 -r1.123 src/sys/arch/x86/include/pmap.h (switch to unified diff)

--- src/sys/arch/x86/include/pmap.h 2020/05/27 19:33:40 1.122
+++ src/sys/arch/x86/include/pmap.h 2020/06/24 18:09:37 1.123
@@ -1,620 +1,619 @@ @@ -1,620 +1,619 @@
1/* $NetBSD: pmap.h,v 1.122 2020/05/27 19:33:40 ad Exp $ */ 1/* $NetBSD: pmap.h,v 1.123 2020/06/24 18:09:37 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 4 * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */ 26 */
27 27
28/* 28/*
29 * Copyright (c) 2001 Wasabi Systems, Inc. 29 * Copyright (c) 2001 Wasabi Systems, Inc.
30 * All rights reserved. 30 * All rights reserved.
31 * 31 *
32 * Written by Frank van der Linden for Wasabi Systems, Inc. 32 * Written by Frank van der Linden for Wasabi Systems, Inc.
33 * 33 *
34 * Redistribution and use in source and binary forms, with or without 34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions 35 * modification, are permitted provided that the following conditions
36 * are met: 36 * are met:
37 * 1. Redistributions of source code must retain the above copyright 37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer. 38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright 39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the 40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution. 41 * documentation and/or other materials provided with the distribution.
42 * 3. All advertising materials mentioning features or use of this software 42 * 3. All advertising materials mentioning features or use of this software
43 * must display the following acknowledgement: 43 * must display the following acknowledgement:
44 * This product includes software developed for the NetBSD Project by 44 * This product includes software developed for the NetBSD Project by
45 * Wasabi Systems, Inc. 45 * Wasabi Systems, Inc.
46 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 46 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
47 * or promote products derived from this software without specific prior 47 * or promote products derived from this software without specific prior
48 * written permission. 48 * written permission.
49 * 49 *
50 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 50 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
52 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 52 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
53 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 53 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
54 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 54 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
55 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 55 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
56 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 56 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
57 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 57 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
58 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 58 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
59 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 59 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
60 * POSSIBILITY OF SUCH DAMAGE. 60 * POSSIBILITY OF SUCH DAMAGE.
61 */ 61 */
62 62
63/* 63/*
64 * pmap.h: see pmap.c for the history of this pmap module. 64 * pmap.h: see pmap.c for the history of this pmap module.
65 */ 65 */
66 66
67#ifndef _X86_PMAP_H_ 67#ifndef _X86_PMAP_H_
68#define _X86_PMAP_H_ 68#define _X86_PMAP_H_
69 69
70/* 70/*
71 * pl*_pi: index in the ptp page for a pde mapping a VA. 71 * pl*_pi: index in the ptp page for a pde mapping a VA.
72 * (pl*_i below is the index in the virtual array of all pdes per level) 72 * (pl*_i below is the index in the virtual array of all pdes per level)
73 */ 73 */
74#define pl1_pi(VA) (((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT) 74#define pl1_pi(VA) (((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
75#define pl2_pi(VA) (((VA_SIGN_POS(VA)) & L2_MASK) >> L2_SHIFT) 75#define pl2_pi(VA) (((VA_SIGN_POS(VA)) & L2_MASK) >> L2_SHIFT)
76#define pl3_pi(VA) (((VA_SIGN_POS(VA)) & L3_MASK) >> L3_SHIFT) 76#define pl3_pi(VA) (((VA_SIGN_POS(VA)) & L3_MASK) >> L3_SHIFT)
77#define pl4_pi(VA) (((VA_SIGN_POS(VA)) & L4_MASK) >> L4_SHIFT) 77#define pl4_pi(VA) (((VA_SIGN_POS(VA)) & L4_MASK) >> L4_SHIFT)
78#define pl_pi(va, lvl) \ 78#define pl_pi(va, lvl) \
79 (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1]) 79 (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1])
80 80
81/* 81/*
82 * pl*_i: generate index into pde/pte arrays in virtual space 82 * pl*_i: generate index into pde/pte arrays in virtual space
83 * 83 *
84 * pl_i(va, X) == plX_i(va) <= pl_i_roundup(va, X) 84 * pl_i(va, X) == plX_i(va) <= pl_i_roundup(va, X)
85 */ 85 */
86#define pl1_i(VA) (((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT) 86#define pl1_i(VA) (((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT)
87#define pl2_i(VA) (((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT) 87#define pl2_i(VA) (((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT)
88#define pl3_i(VA) (((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT) 88#define pl3_i(VA) (((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT)
89#define pl4_i(VA) (((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT) 89#define pl4_i(VA) (((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT)
90#define pl_i(va, lvl) \ 90#define pl_i(va, lvl) \
91 (((VA_SIGN_POS(va)) & ptp_frames[(lvl)-1]) >> ptp_shifts[(lvl)-1]) 91 (((VA_SIGN_POS(va)) & ptp_frames[(lvl)-1]) >> ptp_shifts[(lvl)-1])
92 92
93#define pl_i_roundup(va, lvl) pl_i((va)+ ~ptp_frames[(lvl)-1], (lvl)) 93#define pl_i_roundup(va, lvl) pl_i((va)+ ~ptp_frames[(lvl)-1], (lvl))
94 94
95/* 95/*
96 * PTP macros: 96 * PTP macros:
97 * a PTP's index is the PD index of the PDE that points to it 97 * a PTP's index is the PD index of the PDE that points to it
98 * a PTP's offset is the byte-offset in the PTE space that this PTP is at 98 * a PTP's offset is the byte-offset in the PTE space that this PTP is at
99 * a PTP's VA is the first VA mapped by that PTP 99 * a PTP's VA is the first VA mapped by that PTP
100 */ 100 */
101 101
102#define ptp_va2o(va, lvl) (pl_i(va, (lvl)+1) * PAGE_SIZE) 102#define ptp_va2o(va, lvl) (pl_i(va, (lvl)+1) * PAGE_SIZE)
103 103
104/* size of a PDP: usually one page, except for PAE */ 104/* size of a PDP: usually one page, except for PAE */
105#ifdef PAE 105#ifdef PAE
106#define PDP_SIZE 4 106#define PDP_SIZE 4
107#else 107#else
108#define PDP_SIZE 1 108#define PDP_SIZE 1
109#endif 109#endif
110 110
111 111
112#if defined(_KERNEL) 112#if defined(_KERNEL)
113#include <sys/kcpuset.h> 113#include <sys/kcpuset.h>
114#include <sys/rwlock.h> 114#include <sys/rwlock.h>
115#include <x86/pmap_pv.h> 115#include <x86/pmap_pv.h>
116#include <uvm/pmap/pmap_pvt.h> 116#include <uvm/pmap/pmap_pvt.h>
117 117
118#define PATENTRY(n, type) (type << ((n) * 8)) 118#define PATENTRY(n, type) (type << ((n) * 8))
119#define PAT_UC 0x0ULL 119#define PAT_UC 0x0ULL
120#define PAT_WC 0x1ULL 120#define PAT_WC 0x1ULL
121#define PAT_WT 0x4ULL 121#define PAT_WT 0x4ULL
122#define PAT_WP 0x5ULL 122#define PAT_WP 0x5ULL
123#define PAT_WB 0x6ULL 123#define PAT_WB 0x6ULL
124#define PAT_UCMINUS 0x7ULL 124#define PAT_UCMINUS 0x7ULL
125 125
126#define BTSEG_NONE 0 126#define BTSEG_NONE 0
127#define BTSEG_TEXT 1 127#define BTSEG_TEXT 1
128#define BTSEG_RODATA 2 128#define BTSEG_RODATA 2
129#define BTSEG_DATA 3 129#define BTSEG_DATA 3
130#define BTSPACE_NSEGS 64 130#define BTSPACE_NSEGS 64
131 131
132struct bootspace { 132struct bootspace {
133 struct { 133 struct {
134 vaddr_t va; 134 vaddr_t va;
135 paddr_t pa; 135 paddr_t pa;
136 size_t sz; 136 size_t sz;
137 } head; 137 } head;
138 138
139 /* Kernel segments. */ 139 /* Kernel segments. */
140 struct { 140 struct {
141 int type; 141 int type;
142 vaddr_t va; 142 vaddr_t va;
143 paddr_t pa; 143 paddr_t pa;
144 size_t sz; 144 size_t sz;
145 } segs[BTSPACE_NSEGS]; 145 } segs[BTSPACE_NSEGS];
146 146
147 /* 147 /*
148 * The area used by the early kernel bootstrap. It contains the kernel 148 * The area used by the early kernel bootstrap. It contains the kernel
149 * symbols, the preloaded modules, the bootstrap tables, and the ISA I/O 149 * symbols, the preloaded modules, the bootstrap tables, and the ISA I/O
150 * mem. 150 * mem.
151 */ 151 */
152 struct { 152 struct {
153 vaddr_t va; 153 vaddr_t va;
154 paddr_t pa; 154 paddr_t pa;
155 size_t sz; 155 size_t sz;
156 } boot; 156 } boot;
157 157
158 /* A magic VA usable by the bootstrap code. */ 158 /* A magic VA usable by the bootstrap code. */
159 vaddr_t spareva; 159 vaddr_t spareva;
160 160
161 /* Virtual address of the page directory. */ 161 /* Virtual address of the page directory. */
162 vaddr_t pdir; 162 vaddr_t pdir;
163 163
164 /* Area dedicated to kernel modules (amd64 only). */ 164 /* Area dedicated to kernel modules (amd64 only). */
165 vaddr_t smodule; 165 vaddr_t smodule;
166 vaddr_t emodule; 166 vaddr_t emodule;
167}; 167};
168 168
169#define SLAREA_USER 0 169#define SLAREA_USER 0
170#define SLAREA_PTE 1 170#define SLAREA_PTE 1
171#define SLAREA_MAIN 2 171#define SLAREA_MAIN 2
172#define SLAREA_PCPU 3 172#define SLAREA_PCPU 3
173#define SLAREA_DMAP 4 173#define SLAREA_DMAP 4
174#define SLAREA_HYPV 5 174#define SLAREA_HYPV 5
175#define SLAREA_ASAN 6 175#define SLAREA_ASAN 6
176#define SLAREA_MSAN 7 176#define SLAREA_MSAN 7
177#define SLAREA_KERN 8 177#define SLAREA_KERN 8
178#define SLSPACE_NAREAS 9 178#define SLSPACE_NAREAS 9
179 179
180struct slotspace { 180struct slotspace {
181 struct { 181 struct {
182 size_t sslot; /* start slot */ 182 size_t sslot; /* start slot */
183 size_t nslot; /* # of slots */ 183 size_t nslot; /* # of slots */
184 bool active; /* area is active */ 184 bool active; /* area is active */
185 } area[SLSPACE_NAREAS]; 185 } area[SLSPACE_NAREAS];
186}; 186};
187 187
188extern struct slotspace slotspace; 188extern struct slotspace slotspace;
189 189
190#ifndef MAXGDTSIZ 190#ifndef MAXGDTSIZ
191#define MAXGDTSIZ 65536 /* XXX */ 191#define MAXGDTSIZ 65536 /* XXX */
192#endif 192#endif
193 193
194#ifndef MAX_USERLDT_SIZE 194#ifndef MAX_USERLDT_SIZE
195#define MAX_USERLDT_SIZE PAGE_SIZE /* XXX */ 195#define MAX_USERLDT_SIZE PAGE_SIZE /* XXX */
196#endif 196#endif
197 197
198struct pcpu_entry { 198struct pcpu_entry {
199 uint8_t gdt[MAXGDTSIZ]; 199 uint8_t gdt[MAXGDTSIZ];
200 uint8_t ldt[MAX_USERLDT_SIZE]; 200 uint8_t ldt[MAX_USERLDT_SIZE];
201 uint8_t tss[PAGE_SIZE]; 201 uint8_t tss[PAGE_SIZE];
202 uint8_t ist0[PAGE_SIZE]; 202 uint8_t ist0[PAGE_SIZE];
203 uint8_t ist1[PAGE_SIZE]; 203 uint8_t ist1[PAGE_SIZE];
204 uint8_t ist2[PAGE_SIZE]; 204 uint8_t ist2[PAGE_SIZE];
205 uint8_t ist3[PAGE_SIZE]; 205 uint8_t ist3[PAGE_SIZE];
206 uint8_t rsp0[2 * PAGE_SIZE]; 206 uint8_t rsp0[2 * PAGE_SIZE];
207} __packed; 207} __packed;
208 208
209struct pcpu_area { 209struct pcpu_area {
210#ifdef SVS 210#ifdef SVS
211 uint8_t utls[PAGE_SIZE]; 211 uint8_t utls[PAGE_SIZE];
212#endif 212#endif
213 uint8_t idt[PAGE_SIZE]; 213 uint8_t idt[PAGE_SIZE];
214 uint8_t ldt[PAGE_SIZE]; 214 uint8_t ldt[PAGE_SIZE];
215 struct pcpu_entry ent[MAXCPUS]; 215 struct pcpu_entry ent[MAXCPUS];
216} __packed; 216} __packed;
217 217
218extern struct pcpu_area *pcpuarea; 218extern struct pcpu_area *pcpuarea;
219 219
220#define PMAP_PCID_KERN 0 220#define PMAP_PCID_KERN 0
221#define PMAP_PCID_USER 1 221#define PMAP_PCID_USER 1
222 222
223/* 223/*
224 * pmap data structures: see pmap.c for details of locking. 224 * pmap data structures: see pmap.c for details of locking.
225 */ 225 */
226 226
227/* 227/*
228 * we maintain a list of all non-kernel pmaps 228 * we maintain a list of all non-kernel pmaps
229 */ 229 */
230 230
231LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */ 231LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
232 232
233/* 233/*
234 * linked list of all non-kernel pmaps 234 * linked list of all non-kernel pmaps
235 */ 235 */
236extern struct pmap_head pmaps; 236extern struct pmap_head pmaps;
237extern kmutex_t pmaps_lock; /* protects pmaps */ 237extern kmutex_t pmaps_lock; /* protects pmaps */
238 238
239/* 239/*
240 * pool_cache(9) that pmaps are allocated from  240 * pool_cache(9) that pmaps are allocated from
241 */ 241 */
242extern struct pool_cache pmap_cache; 242extern struct pool_cache pmap_cache;
243 243
244/* 244/*
245 * the pmap structure 245 * the pmap structure
246 * 246 *
247 * note that the pm_obj contains the lock pointer, the reference count, 247 * note that the pm_obj contains the lock pointer, the reference count,
248 * page list, and number of PTPs within the pmap. 248 * page list, and number of PTPs within the pmap.
249 * 249 *
250 * pm_lock is the same as the lock for vm object 0. Changes to 250 * pm_lock is the same as the lock for vm object 0. Changes to
251 * the other objects may only be made if that lock has been taken 251 * the other objects may only be made if that lock has been taken
252 * (the other object locks are only used when uvm_pagealloc is called) 252 * (the other object locks are only used when uvm_pagealloc is called)
253 */ 253 */
254 254
255struct pv_page; 255struct pv_page;
256 256
257struct pmap { 257struct pmap {
258 struct uvm_object pm_obj[PTP_LEVELS-1];/* objects for lvl >= 1) */ 258 struct uvm_object pm_obj[PTP_LEVELS-1];/* objects for lvl >= 1) */
259 LIST_ENTRY(pmap) pm_list; /* list of all pmaps */ 259 LIST_ENTRY(pmap) pm_list; /* list of all pmaps */
260 pd_entry_t *pm_pdir; /* VA of PD */ 260 pd_entry_t *pm_pdir; /* VA of PD */
261 paddr_t pm_pdirpa[PDP_SIZE]; /* PA of PDs (read-only after create) */ 261 paddr_t pm_pdirpa[PDP_SIZE]; /* PA of PDs (read-only after create) */
262 struct vm_page *pm_ptphint[PTP_LEVELS-1]; 262 struct vm_page *pm_ptphint[PTP_LEVELS-1];
263 /* pointer to a PTP in our pmap */ 263 /* pointer to a PTP in our pmap */
264 struct pmap_statistics pm_stats; /* pmap stats */ 264 struct pmap_statistics pm_stats; /* pmap stats */
265 struct pv_entry *pm_pve; /* spare pv_entry */ 265 struct pv_entry *pm_pve; /* spare pv_entry */
266 LIST_HEAD(, pv_page) pm_pvp_part; 266 LIST_HEAD(, pv_page) pm_pvp_part;
267 LIST_HEAD(, pv_page) pm_pvp_empty; 267 LIST_HEAD(, pv_page) pm_pvp_empty;
268 LIST_HEAD(, pv_page) pm_pvp_full; 268 LIST_HEAD(, pv_page) pm_pvp_full;
269 269
270#if !defined(__x86_64__) 270#if !defined(__x86_64__)
271 vaddr_t pm_hiexec; /* highest executable mapping */ 271 vaddr_t pm_hiexec; /* highest executable mapping */
272#endif /* !defined(__x86_64__) */ 272#endif /* !defined(__x86_64__) */
273 273
274 union descriptor *pm_ldt; /* user-set LDT */ 274 union descriptor *pm_ldt; /* user-set LDT */
275 size_t pm_ldt_len; /* XXX unused, remove */ 275 size_t pm_ldt_len; /* XXX unused, remove */
276 int pm_ldt_sel; /* LDT selector */ 276 int pm_ldt_sel; /* LDT selector */
277 277
278 kcpuset_t *pm_cpus; /* mask of CPUs using pmap */ 278 kcpuset_t *pm_cpus; /* mask of CPUs using pmap */
279 kcpuset_t *pm_kernel_cpus; /* mask of CPUs using kernel part 279 kcpuset_t *pm_kernel_cpus; /* mask of CPUs using kernel part
280 of pmap */ 280 of pmap */
281 kcpuset_t *pm_xen_ptp_cpus; /* mask of CPUs which have this pmap's 281 kcpuset_t *pm_xen_ptp_cpus; /* mask of CPUs which have this pmap's
282 ptp mapped */ 282 ptp mapped */
283 uint64_t pm_ncsw; /* for assertions */ 283 uint64_t pm_ncsw; /* for assertions */
284 LIST_HEAD(,vm_page) pm_gc_ptp; /* PTPs queued for free */ 284 LIST_HEAD(,vm_page) pm_gc_ptp; /* PTPs queued for free */
285 285
286 /* Used by NVMM and Xen */ 286 /* Used by NVMM and Xen */
287 int (*pm_enter)(struct pmap *, vaddr_t, paddr_t, vm_prot_t, u_int); 287 int (*pm_enter)(struct pmap *, vaddr_t, paddr_t, vm_prot_t, u_int);
288 bool (*pm_extract)(struct pmap *, vaddr_t, paddr_t *); 288 bool (*pm_extract)(struct pmap *, vaddr_t, paddr_t *);
289 void (*pm_remove)(struct pmap *, vaddr_t, vaddr_t); 289 void (*pm_remove)(struct pmap *, vaddr_t, vaddr_t);
290 int (*pm_sync_pv)(struct vm_page *, vaddr_t, paddr_t, int, uint8_t *, 290 int (*pm_sync_pv)(struct vm_page *, vaddr_t, paddr_t, int, uint8_t *,
291 pt_entry_t *); 291 pt_entry_t *);
292 void (*pm_pp_remove_ent)(struct pmap *, struct vm_page *, pt_entry_t, 292 void (*pm_pp_remove_ent)(struct pmap *, struct vm_page *, pt_entry_t,
293 vaddr_t); 293 vaddr_t);
294 void (*pm_write_protect)(struct pmap *, vaddr_t, vaddr_t, vm_prot_t); 294 void (*pm_write_protect)(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
295 void (*pm_unwire)(struct pmap *, vaddr_t); 295 void (*pm_unwire)(struct pmap *, vaddr_t);
296 296
297 void (*pm_tlb_flush)(struct pmap *); 297 void (*pm_tlb_flush)(struct pmap *);
298 void *pm_data; 298 void *pm_data;
299 299
300 kmutex_t pm_lock /* locks for pm_objs */ 300 kmutex_t pm_lock /* locks for pm_objs */
301 __aligned(64); /* give lock own cache line */ 301 __aligned(64); /* give lock own cache line */
302 krwlock_t pm_dummy_lock; /* ugly hack for abusing uvm_object */ 302 krwlock_t pm_dummy_lock; /* ugly hack for abusing uvm_object */
303}; 303};
304 304
305/* macro to access pm_pdirpa slots */ 305/* macro to access pm_pdirpa slots */
306#ifdef PAE 306#ifdef PAE
307#define pmap_pdirpa(pmap, index) \ 307#define pmap_pdirpa(pmap, index) \
308 ((pmap)->pm_pdirpa[l2tol3(index)] + l2tol2(index) * sizeof(pd_entry_t)) 308 ((pmap)->pm_pdirpa[l2tol3(index)] + l2tol2(index) * sizeof(pd_entry_t))
309#else 309#else
310#define pmap_pdirpa(pmap, index) \ 310#define pmap_pdirpa(pmap, index) \
311 ((pmap)->pm_pdirpa[0] + (index) * sizeof(pd_entry_t)) 311 ((pmap)->pm_pdirpa[0] + (index) * sizeof(pd_entry_t))
312#endif 312#endif
313 313
314/* 314/*
315 * MD flags that we use for pmap_enter and pmap_kenter_pa: 315 * MD flags that we use for pmap_enter and pmap_kenter_pa:
316 */ 316 */
317 317
318/* 318/*
319 * global kernel variables 319 * global kernel variables
320 */ 320 */
321 321
322/* 322/*
323 * PDPpaddr is the physical address of the kernel's PDP. 323 * PDPpaddr is the physical address of the kernel's PDP.
324 * - i386 non-PAE and amd64: PDPpaddr corresponds directly to the %cr3 324 * - i386 non-PAE and amd64: PDPpaddr corresponds directly to the %cr3
325 * value associated to the kernel process, proc0. 325 * value associated to the kernel process, proc0.
326 * - i386 PAE: it still represents the PA of the kernel's PDP (L2). Due to 326 * - i386 PAE: it still represents the PA of the kernel's PDP (L2). Due to
327 * the L3 PD, it cannot be considered as the equivalent of a %cr3 any more. 327 * the L3 PD, it cannot be considered as the equivalent of a %cr3 any more.
328 * - Xen: it corresponds to the PFN of the kernel's PDP. 328 * - Xen: it corresponds to the PFN of the kernel's PDP.
329 */ 329 */
330extern u_long PDPpaddr; 330extern u_long PDPpaddr;
331 331
332extern pd_entry_t pmap_pg_g; /* do we support PTE_G? */ 332extern pd_entry_t pmap_pg_g; /* do we support PTE_G? */
333extern pd_entry_t pmap_pg_nx; /* do we support PTE_NX? */ 333extern pd_entry_t pmap_pg_nx; /* do we support PTE_NX? */
334extern int pmap_largepages; 334extern int pmap_largepages;
335extern long nkptp[PTP_LEVELS]; 335extern long nkptp[PTP_LEVELS];
336 336
337/* 337/*
338 * macros 338 * macros
339 */ 339 */
340 340
341#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) 341#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
342#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) 342#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
343 343
344#define pmap_clear_modify(pg) pmap_clear_attrs(pg, PP_ATTRS_D) 344#define pmap_clear_modify(pg) pmap_clear_attrs(pg, PP_ATTRS_D)
345#define pmap_clear_reference(pg) pmap_clear_attrs(pg, PP_ATTRS_A) 345#define pmap_clear_reference(pg) pmap_clear_attrs(pg, PP_ATTRS_A)
346#define pmap_copy(DP,SP,D,L,S) __USE(L) 346#define pmap_copy(DP,SP,D,L,S) __USE(L)
347#define pmap_is_modified(pg) pmap_test_attrs(pg, PP_ATTRS_D) 347#define pmap_is_modified(pg) pmap_test_attrs(pg, PP_ATTRS_D)
348#define pmap_is_referenced(pg) pmap_test_attrs(pg, PP_ATTRS_A) 348#define pmap_is_referenced(pg) pmap_test_attrs(pg, PP_ATTRS_A)
349#define pmap_move(DP,SP,D,L,S) 349#define pmap_move(DP,SP,D,L,S)
350#define pmap_phys_address(ppn) (x86_ptob(ppn) & ~X86_MMAP_FLAG_MASK) 350#define pmap_phys_address(ppn) (x86_ptob(ppn) & ~X86_MMAP_FLAG_MASK)
351#define pmap_mmap_flags(ppn) x86_mmap_flags(ppn) 351#define pmap_mmap_flags(ppn) x86_mmap_flags(ppn)
352#define pmap_valid_entry(E) ((E) & PTE_P) /* is PDE or PTE valid? */ 352#define pmap_valid_entry(E) ((E) & PTE_P) /* is PDE or PTE valid? */
353 353
354#if defined(__x86_64__) || defined(PAE) 354#if defined(__x86_64__) || defined(PAE)
355#define X86_MMAP_FLAG_SHIFT (64 - PGSHIFT) 355#define X86_MMAP_FLAG_SHIFT (64 - PGSHIFT)
356#else 356#else
357#define X86_MMAP_FLAG_SHIFT (32 - PGSHIFT) 357#define X86_MMAP_FLAG_SHIFT (32 - PGSHIFT)
358#endif 358#endif
359 359
360#define X86_MMAP_FLAG_MASK 0xf 360#define X86_MMAP_FLAG_MASK 0xf
361#define X86_MMAP_FLAG_PREFETCH 0x1 361#define X86_MMAP_FLAG_PREFETCH 0x1
362 362
363/* 363/*
364 * prototypes 364 * prototypes
365 */ 365 */
366 366
367void pmap_activate(struct lwp *); 367void pmap_activate(struct lwp *);
368void pmap_bootstrap(vaddr_t); 368void pmap_bootstrap(vaddr_t);
369bool pmap_clear_attrs(struct vm_page *, unsigned); 369bool pmap_clear_attrs(struct vm_page *, unsigned);
370bool pmap_pv_clear_attrs(paddr_t, unsigned); 370bool pmap_pv_clear_attrs(paddr_t, unsigned);
371void pmap_deactivate(struct lwp *); 371void pmap_deactivate(struct lwp *);
372void pmap_page_remove(struct vm_page *); 372void pmap_page_remove(struct vm_page *);
373void pmap_pv_remove(paddr_t); 373void pmap_pv_remove(paddr_t);
374void pmap_remove(struct pmap *, vaddr_t, vaddr_t); 374void pmap_remove(struct pmap *, vaddr_t, vaddr_t);
375bool pmap_test_attrs(struct vm_page *, unsigned); 375bool pmap_test_attrs(struct vm_page *, unsigned);
376void pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t); 376void pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
377void pmap_load(void); 377void pmap_load(void);
378paddr_t pmap_init_tmp_pgtbl(paddr_t); 378paddr_t pmap_init_tmp_pgtbl(paddr_t);
379bool pmap_remove_all(struct pmap *); 379bool pmap_remove_all(struct pmap *);
380void pmap_ldt_cleanup(struct lwp *); 380void pmap_ldt_cleanup(struct lwp *);
381void pmap_ldt_sync(struct pmap *); 381void pmap_ldt_sync(struct pmap *);
382void pmap_kremove_local(vaddr_t, vsize_t); 382void pmap_kremove_local(vaddr_t, vsize_t);
383 383
384#define __HAVE_PMAP_PV_TRACK 1 384#define __HAVE_PMAP_PV_TRACK 1
385void pmap_pv_init(void); 385void pmap_pv_init(void);
386void pmap_pv_track(paddr_t, psize_t); 386void pmap_pv_track(paddr_t, psize_t);
387void pmap_pv_untrack(paddr_t, psize_t); 387void pmap_pv_untrack(paddr_t, psize_t);
388 388
389void pmap_map_ptes(struct pmap *, struct pmap **, pd_entry_t **, 389void pmap_map_ptes(struct pmap *, struct pmap **, pd_entry_t **,
390 pd_entry_t * const **); 390 pd_entry_t * const **);
391void pmap_unmap_ptes(struct pmap *, struct pmap *); 391void pmap_unmap_ptes(struct pmap *, struct pmap *);
392 392
393bool pmap_pdes_valid(vaddr_t, pd_entry_t * const *, pd_entry_t *, 393bool pmap_pdes_valid(vaddr_t, pd_entry_t * const *, pd_entry_t *,
394 int *lastlvl); 394 int *lastlvl);
395 395
396u_int x86_mmap_flags(paddr_t); 396u_int x86_mmap_flags(paddr_t);
397 397
398bool pmap_is_curpmap(struct pmap *); 398bool pmap_is_curpmap(struct pmap *);
399 399
400void pmap_ept_transform(struct pmap *); 400void pmap_ept_transform(struct pmap *);
401 401
402#ifndef __HAVE_DIRECT_MAP 402#ifndef __HAVE_DIRECT_MAP
403void pmap_vpage_cpu_init(struct cpu_info *); 403void pmap_vpage_cpu_init(struct cpu_info *);
404#endif 404#endif
405vaddr_t slotspace_rand(int, size_t, size_t, size_t, vaddr_t); 405vaddr_t slotspace_rand(int, size_t, size_t, size_t, vaddr_t);
406 406
407vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */ 407vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
408 408
409typedef enum tlbwhy { 409typedef enum tlbwhy {
410 TLBSHOOT_REMOVE_ALL, 410 TLBSHOOT_REMOVE_ALL,
411 TLBSHOOT_KENTER, 411 TLBSHOOT_KENTER,
412 TLBSHOOT_KREMOVE, 412 TLBSHOOT_KREMOVE,
413 TLBSHOOT_FREE_PTP, 413 TLBSHOOT_FREE_PTP,
414 TLBSHOOT_REMOVE_PTE, 414 TLBSHOOT_REMOVE_PTE,
415 TLBSHOOT_SYNC_PV, 415 TLBSHOOT_SYNC_PV,
416 TLBSHOOT_WRITE_PROTECT, 416 TLBSHOOT_WRITE_PROTECT,
417 TLBSHOOT_ENTER, 417 TLBSHOOT_ENTER,
418 TLBSHOOT_NVMM, 418 TLBSHOOT_NVMM,
419 TLBSHOOT_BUS_DMA, 419 TLBSHOOT_BUS_DMA,
420 TLBSHOOT_BUS_SPACE, 420 TLBSHOOT_BUS_SPACE,
421 TLBSHOOT__MAX, 421 TLBSHOOT__MAX,
422} tlbwhy_t; 422} tlbwhy_t;
423 423
424void pmap_tlb_init(void); 424void pmap_tlb_init(void);
425void pmap_tlb_cpu_init(struct cpu_info *); 425void pmap_tlb_cpu_init(struct cpu_info *);
426void pmap_tlb_shootdown(pmap_t, vaddr_t, pt_entry_t, tlbwhy_t); 426void pmap_tlb_shootdown(pmap_t, vaddr_t, pt_entry_t, tlbwhy_t);
427void pmap_tlb_shootnow(void); 427void pmap_tlb_shootnow(void);
428void pmap_tlb_intr(void); 428void pmap_tlb_intr(void);
429 429
430#define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */ 430#define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
431#define PMAP_FORK /* turn on pmap_fork interface */ 431#define PMAP_FORK /* turn on pmap_fork interface */
432 432
433/* 433/*
434 * inline functions 434 * inline functions
435 */ 435 */
436 436
437/* 437/*
438 * pmap_update_pg: flush one page from the TLB (or flush the whole thing 438 * pmap_update_pg: flush one page from the TLB (or flush the whole thing
439 * if hardware doesn't support one-page flushing) 439 * if hardware doesn't support one-page flushing)
440 */ 440 */
441 441
442__inline static void __unused 442__inline static void __unused
443pmap_update_pg(vaddr_t va) 443pmap_update_pg(vaddr_t va)
444{ 444{
445 invlpg(va); 445 invlpg(va);
446} 446}
447 447
448/* 448/*
449 * pmap_page_protect: change the protection of all recorded mappings 449 * pmap_page_protect: change the protection of all recorded mappings
450 * of a managed page 450 * of a managed page
451 * 451 *
452 * => this function is a frontend for pmap_page_remove/pmap_clear_attrs 452 * => this function is a frontend for pmap_page_remove/pmap_clear_attrs
453 * => we only have to worry about making the page more protected. 453 * => we only have to worry about making the page more protected.
454 * unprotecting a page is done on-demand at fault time. 454 * unprotecting a page is done on-demand at fault time.
455 */ 455 */
456 456
457__inline static void __unused 457__inline static void __unused
458pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 458pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
459{ 459{
460 if ((prot & VM_PROT_WRITE) == 0) { 460 if ((prot & VM_PROT_WRITE) == 0) {
461 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) { 461 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
462 (void)pmap_clear_attrs(pg, PP_ATTRS_W); 462 (void)pmap_clear_attrs(pg, PP_ATTRS_W);
463 } else { 463 } else {
464 pmap_page_remove(pg); 464 pmap_page_remove(pg);
465 } 465 }
466 } 466 }
467} 467}
468 468
469/* 469/*
470 * pmap_pv_protect: change the protection of all recorded mappings 470 * pmap_pv_protect: change the protection of all recorded mappings
471 * of an unmanaged page 471 * of an unmanaged page
472 */ 472 */
473 473
474__inline static void __unused 474__inline static void __unused
475pmap_pv_protect(paddr_t pa, vm_prot_t prot) 475pmap_pv_protect(paddr_t pa, vm_prot_t prot)
476{ 476{
477 if ((prot & VM_PROT_WRITE) == 0) { 477 if ((prot & VM_PROT_WRITE) == 0) {
478 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) { 478 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
479 (void)pmap_pv_clear_attrs(pa, PP_ATTRS_W); 479 (void)pmap_pv_clear_attrs(pa, PP_ATTRS_W);
480 } else { 480 } else {
481 pmap_pv_remove(pa); 481 pmap_pv_remove(pa);
482 } 482 }
483 } 483 }
484} 484}
485 485
486/* 486/*
487 * pmap_protect: change the protection of pages in a pmap 487 * pmap_protect: change the protection of pages in a pmap
488 * 488 *
489 * => this function is a frontend for pmap_remove/pmap_write_protect 489 * => this function is a frontend for pmap_remove/pmap_write_protect
490 * => we only have to worry about making the page more protected. 490 * => we only have to worry about making the page more protected.
491 * unprotecting a page is done on-demand at fault time. 491 * unprotecting a page is done on-demand at fault time.
492 */ 492 */
493 493
494__inline static void __unused 494__inline static void __unused
495pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) 495pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
496{ 496{
497 if ((prot & VM_PROT_WRITE) == 0) { 497 if ((prot & VM_PROT_WRITE) == 0) {
498 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) { 498 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
499 pmap_write_protect(pmap, sva, eva, prot); 499 pmap_write_protect(pmap, sva, eva, prot);
500 } else { 500 } else {
501 pmap_remove(pmap, sva, eva); 501 pmap_remove(pmap, sva, eva);
502 } 502 }
503 } 503 }
504} 504}
505 505
506/* 506/*
507 * various address inlines 507 * various address inlines
508 * 508 *
509 * vtopte: return a pointer to the PTE mapping a VA, works only for 509 * vtopte: return a pointer to the PTE mapping a VA, works only for
510 * user and PT addresses 510 * user and PT addresses
511 * 511 *
512 * kvtopte: return a pointer to the PTE mapping a kernel VA 512 * kvtopte: return a pointer to the PTE mapping a kernel VA
513 */ 513 */
514 514
515#include <lib/libkern/libkern.h> 515#include <lib/libkern/libkern.h>
516 516
517static __inline pt_entry_t * __unused 517static __inline pt_entry_t * __unused
518vtopte(vaddr_t va) 518vtopte(vaddr_t va)
519{ 519{
520 520
521 KASSERT(va < VM_MIN_KERNEL_ADDRESS); 521 KASSERT(va < VM_MIN_KERNEL_ADDRESS);
522 522
523 return (PTE_BASE + pl1_i(va)); 523 return (PTE_BASE + pl1_i(va));
524} 524}
525 525
526static __inline pt_entry_t * __unused 526static __inline pt_entry_t * __unused
527kvtopte(vaddr_t va) 527kvtopte(vaddr_t va)
528{ 528{
529 pd_entry_t *pde; 529 pd_entry_t *pde;
530 530
531 KASSERT(va >= VM_MIN_KERNEL_ADDRESS); 531 KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
532 532
533 pde = L2_BASE + pl2_i(va); 533 pde = L2_BASE + pl2_i(va);
534 if (*pde & PTE_PS) 534 if (*pde & PTE_PS)
535 return ((pt_entry_t *)pde); 535 return ((pt_entry_t *)pde);
536 536
537 return (PTE_BASE + pl1_i(va)); 537 return (PTE_BASE + pl1_i(va));
538} 538}
539 539
540paddr_t vtophys(vaddr_t); 540paddr_t vtophys(vaddr_t);
541vaddr_t pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t); 541vaddr_t pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t);
542void pmap_cpu_init_late(struct cpu_info *); 542void pmap_cpu_init_late(struct cpu_info *);
543 543
544#ifdef XENPV 544#ifdef XENPV
545#include <sys/bitops.h> 545#include <sys/bitops.h>
546 546
547#define XPTE_MASK L1_FRAME 547#define XPTE_MASK L1_FRAME
548/* Selects the index of a PTE in (A)PTE_BASE */ 548/* Selects the index of a PTE in (A)PTE_BASE */
549#define XPTE_SHIFT (L1_SHIFT - ilog2(sizeof(pt_entry_t))) 549#define XPTE_SHIFT (L1_SHIFT - ilog2(sizeof(pt_entry_t)))
550 550
551/* PTE access inline fuctions */ 551/* PTE access inline fuctions */
552 552
553/* 553/*
554 * Get the machine address of the pointed pte 554 * Get the machine address of the pointed pte
555 * We use hardware MMU to get value so works only for levels 1-3 555 * We use hardware MMU to get value so works only for levels 1-3
556 */ 556 */
557 557
558static __inline paddr_t 558static __inline paddr_t
559xpmap_ptetomach(pt_entry_t *pte) 559xpmap_ptetomach(pt_entry_t *pte)
560{ 560{
561 pt_entry_t *up_pte; 561 pt_entry_t *up_pte;
562 vaddr_t va = (vaddr_t) pte; 562 vaddr_t va = (vaddr_t) pte;
563 563
564 va = ((va & XPTE_MASK) >> XPTE_SHIFT) | (vaddr_t) PTE_BASE; 564 va = ((va & XPTE_MASK) >> XPTE_SHIFT) | (vaddr_t) PTE_BASE;
565 up_pte = (pt_entry_t *) va; 565 up_pte = (pt_entry_t *) va;
566 566
567 return (paddr_t) (((*up_pte) & PTE_FRAME) + (((vaddr_t) pte) & (~PTE_FRAME & ~VA_SIGN_MASK))); 567 return (paddr_t) (((*up_pte) & PTE_FRAME) + (((vaddr_t) pte) & (~PTE_FRAME & ~VA_SIGN_MASK)));
568} 568}
569 569
570/* Xen helpers to change bits of a pte */ 570/* Xen helpers to change bits of a pte */
571#define XPMAP_UPDATE_DIRECT 1 /* Update direct map entry flags too */ 571#define XPMAP_UPDATE_DIRECT 1 /* Update direct map entry flags too */
572 572
573paddr_t vtomach(vaddr_t); 573paddr_t vtomach(vaddr_t);
574#define vtomfn(va) (vtomach(va) >> PAGE_SHIFT) 574#define vtomfn(va) (vtomach(va) >> PAGE_SHIFT)
575#endif /* XENPV */ 575#endif /* XENPV */
576 576
577/* pmap functions with machine addresses */ 577/* pmap functions with machine addresses */
578void pmap_kenter_ma(vaddr_t, paddr_t, vm_prot_t, u_int); 578void pmap_kenter_ma(vaddr_t, paddr_t, vm_prot_t, u_int);
579int pmap_enter_ma(struct pmap *, vaddr_t, paddr_t, paddr_t, 579int pmap_enter_ma(struct pmap *, vaddr_t, paddr_t, paddr_t,
580 vm_prot_t, u_int, int); 580 vm_prot_t, u_int, int);
581bool pmap_extract_ma(pmap_t, vaddr_t, paddr_t *); 581bool pmap_extract_ma(pmap_t, vaddr_t, paddr_t *);
582 582
583paddr_t pmap_get_physpage(void); 583paddr_t pmap_get_physpage(void);
584 584
585/* 585/*
586 * Hooks for the pool allocator. 586 * Hooks for the pool allocator.
587 */ 587 */
588#define POOL_VTOPHYS(va) vtophys((vaddr_t) (va)) 588#define POOL_VTOPHYS(va) vtophys((vaddr_t) (va))
589 589
590#ifdef __HAVE_PCPU_AREA 590#ifdef __HAVE_PCPU_AREA
591extern struct pcpu_area *pcpuarea; 591extern struct pcpu_area *pcpuarea;
592#define PDIR_SLOT_PCPU 510 592#define PDIR_SLOT_PCPU 510
593#define PMAP_PCPU_BASE (VA_SIGN_NEG((PDIR_SLOT_PCPU * NBPD_L4))) 593#define PMAP_PCPU_BASE (VA_SIGN_NEG((PDIR_SLOT_PCPU * NBPD_L4)))
594#endif 594#endif
595 595
596#ifdef __HAVE_DIRECT_MAP 596#ifdef __HAVE_DIRECT_MAP
597 597
598extern vaddr_t pmap_direct_base; 598extern vaddr_t pmap_direct_base;
599extern vaddr_t pmap_direct_end; 599extern vaddr_t pmap_direct_end;
600 600
601#define PMAP_DIRECT_BASE pmap_direct_base 601#define PMAP_DIRECT_BASE pmap_direct_base
602#define PMAP_DIRECT_END pmap_direct_end 602#define PMAP_DIRECT_END pmap_direct_end
603 603
604#define PMAP_DIRECT_MAP(pa) ((vaddr_t)PMAP_DIRECT_BASE + (pa)) 604#define PMAP_DIRECT_MAP(pa) ((vaddr_t)PMAP_DIRECT_BASE + (pa))
605#define PMAP_DIRECT_UNMAP(va) ((paddr_t)(va) - PMAP_DIRECT_BASE) 605#define PMAP_DIRECT_UNMAP(va) ((paddr_t)(va) - PMAP_DIRECT_BASE)
606 606
607/* 607/*
608 * Alternate mapping hooks for pool pages. 608 * Alternate mapping hooks for pool pages.
609 */ 609 */
610#define PMAP_MAP_POOLPAGE(pa) PMAP_DIRECT_MAP((pa)) 610#define PMAP_MAP_POOLPAGE(pa) PMAP_DIRECT_MAP((pa))
611#define PMAP_UNMAP_POOLPAGE(va) PMAP_DIRECT_UNMAP((va)) 611#define PMAP_UNMAP_POOLPAGE(va) PMAP_DIRECT_UNMAP((va))
612 612
613#endif /* __HAVE_DIRECT_MAP */ 613#endif /* __HAVE_DIRECT_MAP */
614 614
615void x86_stos(void *, long, long); 
616void x86_movs(void *, void *, long); 615void x86_movs(void *, void *, long);
617 616
618#endif /* _KERNEL */ 617#endif /* _KERNEL */
619 618
620#endif /* _X86_PMAP_H_ */ 619#endif /* _X86_PMAP_H_ */