Sat Dec 22 21:27:22 2018 UTC ()
Introduce a weak alias method of exporting different implementations
of the same API.

For eg: the amd64 native implementation of invlpg() now becomes
amd64_invlpg() with a weak symbol export of invlpg(), while the XEN
implementation becomes xen_invlpg(), also weakly exported as invlpg()

Note that linking in both together without having an override function
named invlpg() would be a mistake, as we have limited control over
which of the two options would emerge as the finally exported invlpg()
resulting in a potential situation where the wrong function is finally
exported. This change avoids this situation.

We should however include an override function invlpg() in that case,
such that it is able to then pass on the call to the appropriate
backing function (amd64_invlpg() in the case of native, and
xen_invlpg() in the case of under XEN virtualisation) at runtime.

This change does not introduce such a function and therefore does not
alter builds to include native as well as XEN implementations in the
same binary. This will be done later, with the introduction of XEN
PVHVM mode, where precisely such a runtime switch is required.

There are no operational changes introduced by this change.


(cherry)
diff -r1.33 -r1.34 src/sys/arch/amd64/amd64/cpufunc.S
diff -r1.25 -r1.26 src/sys/arch/i386/i386/cpufunc.S
diff -r1.18 -r1.19 src/sys/arch/i386/i386/i386func.S
diff -r1.22 -r1.23 src/sys/arch/xen/x86/xenfunc.c

cvs diff -r1.33 -r1.34 src/sys/arch/amd64/amd64/cpufunc.S (switch to unified diff)

--- src/sys/arch/amd64/amd64/cpufunc.S 2018/07/21 06:09:13 1.33
+++ src/sys/arch/amd64/amd64/cpufunc.S 2018/12/22 21:27:22 1.34
@@ -1,766 +1,801 @@ @@ -1,766 +1,801 @@
1/* $NetBSD: cpufunc.S,v 1.33 2018/07/21 06:09:13 maxv Exp $ */ 1/* $NetBSD: cpufunc.S,v 1.34 2018/12/22 21:27:22 cherry Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum, and by Andrew Doran. 8 * by Charles M. Hannum, and by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Functions to provide access to i386-specific instructions. 33 * Functions to provide access to i386-specific instructions.
34 */ 34 */
35 35
36#include <sys/errno.h> 36#include <sys/errno.h>
37 37
38#include <machine/asm.h> 38#include <machine/asm.h>
39#include <machine/frameasm.h> 39#include <machine/frameasm.h>
40#include <machine/specialreg.h> 40#include <machine/specialreg.h>
41#include <machine/segments.h> 41#include <machine/segments.h>
42 42
43#include "opt_xen.h" 43#include "opt_xen.h"
44 44
45#include "assym.h" 45#include "assym.h"
46 46
47/* Small and slow, so align less. */ 47/* Small and slow, so align less. */
48#undef _ALIGN_TEXT 48#undef _ALIGN_TEXT
49#define _ALIGN_TEXT .align 8 49#define _ALIGN_TEXT .align 8
50 50
51ENTRY(x86_lfence) 51ENTRY(x86_lfence)
52 lfence 52 lfence
53 ret 53 ret
54END(x86_lfence) 54END(x86_lfence)
55 55
56ENTRY(x86_sfence) 56ENTRY(x86_sfence)
57 sfence 57 sfence
58 ret 58 ret
59END(x86_sfence) 59END(x86_sfence)
60 60
61ENTRY(x86_mfence) 61ENTRY(x86_mfence)
62 mfence 62 mfence
63 ret 63 ret
64END(x86_mfence) 64END(x86_mfence)
65 65
 66/*
 67 * These functions below should always be accessed via the corresponding wrapper
 68 * function names defined in x86/include/cpufunc.h and exported as WEAK_ALIAS()
 69 *
 70 * We use this rather roundabout method so that a runtime wrapper function may
 71 * be made available for PVHVM, which could override both native and PV aliases
 72 * and decide which to invoke at run time.
 73 */
 74
 75WEAK_ALIAS(invlpg, amd64_invlpg)
 76WEAK_ALIAS(lidt, amd64_lidt)
 77WEAK_ALIAS(lldt, amd64_lldt)
 78WEAK_ALIAS(ltr, amd64_ltr)
 79WEAK_ALIAS(lcr0, amd64_lcr0)
 80WEAK_ALIAS(rcr0, amd64_rcr0)
 81WEAK_ALIAS(rcr2, amd64_rcr2)
 82WEAK_ALIAS(lcr2, amd64_lcr2)
 83WEAK_ALIAS(rcr3, amd64_rcr3)
 84WEAK_ALIAS(lcr3, amd64_lcr3)
 85WEAK_ALIAS(tlbflush, amd64_tlbflush)
 86WEAK_ALIAS(tlbflushg, amd64_tlbflushg)
 87WEAK_ALIAS(rdr0, amd64_rdr0)
 88WEAK_ALIAS(ldr0, amd64_ldr0)
 89WEAK_ALIAS(rdr1, amd64_rdr1)
 90WEAK_ALIAS(ldr1, amd64_ldr1)
 91WEAK_ALIAS(rdr2, amd64_rdr2)
 92WEAK_ALIAS(ldr2, amd64_ldr2)
 93WEAK_ALIAS(rdr3, amd64_rdr3)
 94WEAK_ALIAS(ldr3, amd64_ldr3)
 95WEAK_ALIAS(rdr6, amd64_rdr6)
 96WEAK_ALIAS(ldr6, amd64_ldr6)
 97WEAK_ALIAS(rdr7, amd64_rdr7)
 98WEAK_ALIAS(ldr7, amd64_ldr7)
 99WEAK_ALIAS(wbinvd, amd64_wbinvd)
 100
66#ifndef XEN 101#ifndef XEN
67ENTRY(invlpg) 102ENTRY(amd64_invlpg)
68 invlpg (%rdi) 103 invlpg (%rdi)
69 ret 104 ret
70END(invlpg) 105END(amd64_invlpg)
71 106
72ENTRY(lidt) 107ENTRY(amd64_lidt)
73 lidt (%rdi) 108 lidt (%rdi)
74 ret 109 ret
75END(lidt) 110END(amd64_lidt)
76 111
77ENTRY(lldt) 112ENTRY(amd64_lldt)
78 cmpl %edi, CPUVAR(CURLDT) 113 cmpl %edi, CPUVAR(CURLDT)
79 jne 1f 114 jne 1f
80 ret 115 ret
811: 1161:
82 movl %edi, CPUVAR(CURLDT) 117 movl %edi, CPUVAR(CURLDT)
83 lldt %di 118 lldt %di
84 ret 119 ret
85END(lldt) 120END(amd64_lldt)
86 121
87ENTRY(ltr) 122ENTRY(amd64_ltr)
88 ltr %di 123 ltr %di
89 ret 124 ret
90END(ltr) 125END(amd64_ltr)
91 126
92ENTRY(lcr0) 127ENTRY(amd64_lcr0)
93 movq %rdi, %cr0 128 movq %rdi, %cr0
94 ret 129 ret
95END(lcr0) 130END(amd64_lcr0)
96 131
97ENTRY(rcr0) 132ENTRY(amd64_rcr0)
98 movq %cr0, %rax 133 movq %cr0, %rax
99 ret 134 ret
100END(rcr0) 135END(amd64_rcr0)
101 136
102ENTRY(lcr2) 137ENTRY(amd64_lcr2)
103 movq %rdi, %cr2 138 movq %rdi, %cr2
104 ret 139 ret
105END(lcr2) 140END(amd64_lcr2)
106 141
107ENTRY(rcr2) 142ENTRY(amd64_rcr2)
108 movq %cr2, %rax 143 movq %cr2, %rax
109 ret 144 ret
110END(rcr2) 145END(amd64_rcr2)
111 146
112ENTRY(lcr3) 147ENTRY(amd64_lcr3)
113 movq %rdi, %cr3 148 movq %rdi, %cr3
114 ret 149 ret
115END(lcr3) 150END(amd64_lcr3)
116 151
117ENTRY(rcr3) 152ENTRY(amd64_rcr3)
118 movq %cr3, %rax 153 movq %cr3, %rax
119 ret 154 ret
120END(rcr3) 155END(amd64_rcr3)
121#endif 156#endif
122 157
123ENTRY(lcr4) 158ENTRY(lcr4)
124 movq %rdi, %cr4 159 movq %rdi, %cr4
125 ret 160 ret
126END(lcr4) 161END(lcr4)
127 162
128ENTRY(rcr4) 163ENTRY(rcr4)
129 movq %cr4, %rax 164 movq %cr4, %rax
130 ret 165 ret
131END(rcr4) 166END(rcr4)
132 167
133ENTRY(lcr8) 168ENTRY(lcr8)
134 movq %rdi, %cr8 169 movq %rdi, %cr8
135 ret 170 ret
136END(lcr8) 171END(lcr8)
137 172
138ENTRY(rcr8) 173ENTRY(rcr8)
139 movq %cr8, %rax 174 movq %cr8, %rax
140 ret 175 ret
141END(rcr8) 176END(rcr8)
142 177
143/* 178/*
144 * Big hammer: flush all TLB entries, including ones from PTE's 179 * Big hammer: flush all TLB entries, including ones from PTE's
145 * with the G bit set. This should only be necessary if TLB 180 * with the G bit set. This should only be necessary if TLB
146 * shootdown falls far behind. 181 * shootdown falls far behind.
147 * 182 *
148 * Intel Architecture Software Developer's Manual, Volume 3, 183 * Intel Architecture Software Developer's Manual, Volume 3,
149 * System Programming, section 9.10, "Invalidating the 184 * System Programming, section 9.10, "Invalidating the
150 * Translation Lookaside Buffers (TLBS)": 185 * Translation Lookaside Buffers (TLBS)":
151 * "The following operations invalidate all TLB entries, irrespective 186 * "The following operations invalidate all TLB entries, irrespective
152 * of the setting of the G flag: 187 * of the setting of the G flag:
153 * ... 188 * ...
154 * "(P6 family processors only): Writing to control register CR4 to 189 * "(P6 family processors only): Writing to control register CR4 to
155 * modify the PSE, PGE, or PAE flag." 190 * modify the PSE, PGE, or PAE flag."
156 * 191 *
157 * (the alternatives not quoted above are not an option here.) 192 * (the alternatives not quoted above are not an option here.)
158 * 193 *
159 * If PGE is not in use, we reload CR3. 194 * If PGE is not in use, we reload CR3.
160 */ 195 */
161#ifndef XEN 196#ifndef XEN
162ENTRY(tlbflushg) 197ENTRY(amd64_tlbflushg)
163 movq %cr4, %rax 198 movq %cr4, %rax
164 testq $CR4_PGE, %rax 199 testq $CR4_PGE, %rax
165 jz 1f 200 jz 1f
166 movq %rax, %rdx 201 movq %rax, %rdx
167 andq $~CR4_PGE, %rdx 202 andq $~CR4_PGE, %rdx
168 movq %rdx, %cr4 203 movq %rdx, %cr4
169 movq %rax, %cr4 204 movq %rax, %cr4
170 ret 205 ret
171END(tlbflushg) 206END(amd64_tlbflushg)
172 207
173ENTRY(tlbflush) 208ENTRY(amd64_tlbflush)
1741: 2091:
175 movq %cr3, %rax 210 movq %cr3, %rax
176 movq %rax, %cr3 211 movq %rax, %cr3
177 ret 212 ret
178END(tlbflush) 213END(amd64_tlbflush)
179 214
180ENTRY(ldr0) 215ENTRY(amd64_ldr0)
181 movq %rdi, %dr0 216 movq %rdi, %dr0
182 ret 217 ret
183END(ldr0) 218END(amd64_ldr0)
184 219
185ENTRY(rdr0) 220ENTRY(amd64_rdr0)
186 movq %dr0, %rax 221 movq %dr0, %rax
187 ret 222 ret
188END(rdr0) 223END(amd64_rdr0)
189 224
190ENTRY(ldr1) 225ENTRY(amd64_ldr1)
191 movq %rdi, %dr1 226 movq %rdi, %dr1
192 ret 227 ret
193END(ldr1) 228END(amd64_ldr1)
194 229
195ENTRY(rdr1) 230ENTRY(amd64_rdr1)
196 movq %dr1, %rax 231 movq %dr1, %rax
197 ret 232 ret
198END(rdr1) 233END(amd64_rdr1)
199 234
200ENTRY(ldr2) 235ENTRY(amd64_ldr2)
201 movq %rdi, %dr2 236 movq %rdi, %dr2
202 ret 237 ret
203END(ldr2) 238END(amd64_ldr2)
204 239
205ENTRY(rdr2) 240ENTRY(amd64_rdr2)
206 movq %dr2, %rax 241 movq %dr2, %rax
207 ret 242 ret
208END(rdr2) 243END(amd64_rdr2)
209 244
210ENTRY(ldr3) 245ENTRY(amd64_ldr3)
211 movq %rdi, %dr3 246 movq %rdi, %dr3
212 ret 247 ret
213END(ldr3) 248END(amd64_ldr3)
214 249
215ENTRY(rdr3) 250ENTRY(amd64_rdr3)
216 movq %dr3, %rax 251 movq %dr3, %rax
217 ret 252 ret
218END(rdr3) 253END(amd64_rdr3)
219 254
220ENTRY(ldr6) 255ENTRY(amd64_ldr6)
221 movq %rdi, %dr6 256 movq %rdi, %dr6
222 ret 257 ret
223END(ldr6) 258END(amd64_ldr6)
224 259
225ENTRY(rdr6) 260ENTRY(amd64_rdr6)
226 movq %dr6, %rax 261 movq %dr6, %rax
227 ret 262 ret
228END(rdr6) 263END(amd64_rdr6)
229 264
230ENTRY(ldr7) 265ENTRY(amd64_ldr7)
231 movq %rdi, %dr7 266 movq %rdi, %dr7
232 ret 267 ret
233END(ldr7) 268END(amd64_ldr7)
234 269
235ENTRY(rdr7) 270ENTRY(amd64_rdr7)
236 movq %dr7, %rax 271 movq %dr7, %rax
237 ret 272 ret
238END(rdr7) 273END(amd64_rdr7)
239 274
240ENTRY(x86_disable_intr) 275ENTRY(x86_disable_intr)
241 cli 276 cli
242 ret 277 ret
243END(x86_disable_intr) 278END(x86_disable_intr)
244 279
245ENTRY(x86_enable_intr) 280ENTRY(x86_enable_intr)
246 sti 281 sti
247 ret 282 ret
248END(x86_enable_intr) 283END(x86_enable_intr)
249 284
250ENTRY(x86_read_flags) 285ENTRY(x86_read_flags)
251 pushfq 286 pushfq
252 popq %rax 287 popq %rax
253 ret 288 ret
254END(x86_read_flags) 289END(x86_read_flags)
255 290
256STRONG_ALIAS(x86_read_psl,x86_read_flags) 291STRONG_ALIAS(x86_read_psl,x86_read_flags)
257 292
258ENTRY(x86_write_flags) 293ENTRY(x86_write_flags)
259 pushq %rdi 294 pushq %rdi
260 popfq 295 popfq
261 ret 296 ret
262END(x86_write_flags) 297END(x86_write_flags)
263 298
264STRONG_ALIAS(x86_write_psl,x86_write_flags) 299STRONG_ALIAS(x86_write_psl,x86_write_flags)
265#endif /* XEN */ 300#endif /* XEN */
266 301
267ENTRY(rdmsr) 302ENTRY(rdmsr)
268 movq %rdi, %rcx 303 movq %rdi, %rcx
269 xorq %rax, %rax 304 xorq %rax, %rax
270 rdmsr 305 rdmsr
271 shlq $32, %rdx 306 shlq $32, %rdx
272 orq %rdx, %rax 307 orq %rdx, %rax
273 ret 308 ret
274END(rdmsr) 309END(rdmsr)
275 310
276ENTRY(wrmsr) 311ENTRY(wrmsr)
277 movq %rdi, %rcx 312 movq %rdi, %rcx
278 movq %rsi, %rax 313 movq %rsi, %rax
279 movq %rsi, %rdx 314 movq %rsi, %rdx
280 shrq $32, %rdx 315 shrq $32, %rdx
281 wrmsr 316 wrmsr
282 ret 317 ret
283END(wrmsr) 318END(wrmsr)
284 319
285ENTRY(rdmsr_locked) 320ENTRY(rdmsr_locked)
286 movq %rdi, %rcx 321 movq %rdi, %rcx
287 xorq %rax, %rax 322 xorq %rax, %rax
288 movl $OPTERON_MSR_PASSCODE, %edi 323 movl $OPTERON_MSR_PASSCODE, %edi
289 rdmsr 324 rdmsr
290 shlq $32, %rdx 325 shlq $32, %rdx
291 orq %rdx, %rax 326 orq %rdx, %rax
292 ret 327 ret
293END(rdmsr_locked) 328END(rdmsr_locked)
294 329
295ENTRY(wrmsr_locked) 330ENTRY(wrmsr_locked)
296 movq %rdi, %rcx 331 movq %rdi, %rcx
297 movq %rsi, %rax 332 movq %rsi, %rax
298 movq %rsi, %rdx 333 movq %rsi, %rdx
299 shrq $32, %rdx 334 shrq $32, %rdx
300 movl $OPTERON_MSR_PASSCODE, %edi 335 movl $OPTERON_MSR_PASSCODE, %edi
301 wrmsr 336 wrmsr
302 ret 337 ret
303END(wrmsr_locked) 338END(wrmsr_locked)
304 339
305/* 340/*
306 * Support for reading MSRs in the safe manner (returns EFAULT on fault) 341 * Support for reading MSRs in the safe manner (returns EFAULT on fault)
307 */ 342 */
308/* int rdmsr_safe(u_int msr, uint64_t *data) */ 343/* int rdmsr_safe(u_int msr, uint64_t *data) */
309ENTRY(rdmsr_safe) 344ENTRY(rdmsr_safe)
310 movq CPUVAR(CURLWP), %r8 345 movq CPUVAR(CURLWP), %r8
311 movq L_PCB(%r8), %r8 346 movq L_PCB(%r8), %r8
312 movq $_C_LABEL(msr_onfault), PCB_ONFAULT(%r8) 347 movq $_C_LABEL(msr_onfault), PCB_ONFAULT(%r8)
313 348
314 movl %edi, %ecx /* u_int msr */ 349 movl %edi, %ecx /* u_int msr */
315 rdmsr /* Read MSR pointed by %ecx. Returns 350 rdmsr /* Read MSR pointed by %ecx. Returns
316 hi byte in edx, lo in %eax */ 351 hi byte in edx, lo in %eax */
317 salq $32, %rdx /* sign-shift %rdx left */ 352 salq $32, %rdx /* sign-shift %rdx left */
318 movl %eax, %eax /* zero-extend %eax -> %rax */ 353 movl %eax, %eax /* zero-extend %eax -> %rax */
319 orq %rdx, %rax 354 orq %rdx, %rax
320 movq %rax, (%rsi) /* *data */ 355 movq %rax, (%rsi) /* *data */
321 xorq %rax, %rax /* "no error" */ 356 xorq %rax, %rax /* "no error" */
322 357
323 movq %rax, PCB_ONFAULT(%r8) 358 movq %rax, PCB_ONFAULT(%r8)
324 ret 359 ret
325END(rdmsr_safe) 360END(rdmsr_safe)
326 361
327ENTRY(rdxcr) 362ENTRY(rdxcr)
328 movq %rdi, %rcx 363 movq %rdi, %rcx
329 xgetbv 364 xgetbv
330 shlq $32, %rdx 365 shlq $32, %rdx
331 orq %rdx, %rax 366 orq %rdx, %rax
332 ret 367 ret
333END(rdxcr) 368END(rdxcr)
334 369
335ENTRY(wrxcr) 370ENTRY(wrxcr)
336 movq %rdi, %rcx 371 movq %rdi, %rcx
337 movq %rsi, %rax 372 movq %rsi, %rax
338 movq %rsi, %rdx 373 movq %rsi, %rdx
339 shrq $32, %rdx 374 shrq $32, %rdx
340 xsetbv 375 xsetbv
341 ret 376 ret
342END(wrxcr) 377END(wrxcr)
343 378
344/* 379/*
345 * MSR operations fault handler 380 * MSR operations fault handler
346 */ 381 */
347ENTRY(msr_onfault) 382ENTRY(msr_onfault)
348 movq CPUVAR(CURLWP), %r8 383 movq CPUVAR(CURLWP), %r8
349 movq L_PCB(%r8), %r8 384 movq L_PCB(%r8), %r8
350 movq $0, PCB_ONFAULT(%r8) 385 movq $0, PCB_ONFAULT(%r8)
351 movl $EFAULT, %eax 386 movl $EFAULT, %eax
352 ret 387 ret
353END(msr_onfault) 388END(msr_onfault)
354 389
355#ifndef XEN 390#ifndef XEN
356ENTRY(wbinvd) 391ENTRY(wbinvd)
357 wbinvd 392 wbinvd
358 ret 393 ret
359END(wbinvd) 394END(wbinvd)
360#endif 395#endif
361 396
362ENTRY(cpu_counter) 397ENTRY(cpu_counter)
363 xorq %rax, %rax 398 xorq %rax, %rax
364 rdtsc 399 rdtsc
365 shlq $32, %rdx 400 shlq $32, %rdx
366 orq %rdx, %rax 401 orq %rdx, %rax
367 addq CPUVAR(CC_SKEW), %rax 402 addq CPUVAR(CC_SKEW), %rax
368 ret 403 ret
369END(cpu_counter) 404END(cpu_counter)
370 405
371ENTRY(cpu_counter32) 406ENTRY(cpu_counter32)
372 rdtsc 407 rdtsc
373 addl CPUVAR(CC_SKEW), %eax 408 addl CPUVAR(CC_SKEW), %eax
374 ret 409 ret
375END(cpu_counter32) 410END(cpu_counter32)
376 411
377ENTRY(rdpmc) 412ENTRY(rdpmc)
378 movq %rdi, %rcx 413 movq %rdi, %rcx
379 xorq %rax, %rax 414 xorq %rax, %rax
380 rdpmc 415 rdpmc
381 shlq $32, %rdx 416 shlq $32, %rdx
382 orq %rdx, %rax 417 orq %rdx, %rax
383 ret 418 ret
384END(rdpmc) 419END(rdpmc)
385 420
386ENTRY(rdtsc) 421ENTRY(rdtsc)
387 xorq %rax,%rax 422 xorq %rax,%rax
388 rdtsc 423 rdtsc
389 shlq $32,%rdx 424 shlq $32,%rdx
390 orq %rdx,%rax 425 orq %rdx,%rax
391 ret 426 ret
392END(rdtsc) 427END(rdtsc)
393 428
394ENTRY(breakpoint) 429ENTRY(breakpoint)
395 pushq %rbp 430 pushq %rbp
396 movq %rsp, %rbp 431 movq %rsp, %rbp
397 int $0x03 /* paranoid, not 'int3' */ 432 int $0x03 /* paranoid, not 'int3' */
398 leave 433 leave
399 ret 434 ret
400END(breakpoint) 435END(breakpoint)
401 436
402ENTRY(x86_curcpu) 437ENTRY(x86_curcpu)
403 movq %gs:(CPU_INFO_SELF), %rax 438 movq %gs:(CPU_INFO_SELF), %rax
404 ret 439 ret
405END(x86_curcpu) 440END(x86_curcpu)
406 441
407ENTRY(x86_curlwp) 442ENTRY(x86_curlwp)
408 movq %gs:(CPU_INFO_CURLWP), %rax 443 movq %gs:(CPU_INFO_CURLWP), %rax
409 ret 444 ret
410END(x86_curlwp) 445END(x86_curlwp)
411 446
412ENTRY(cpu_set_curpri) 447ENTRY(cpu_set_curpri)
413 movl %edi, %gs:(CPU_INFO_CURPRIORITY) 448 movl %edi, %gs:(CPU_INFO_CURPRIORITY)
414 ret 449 ret
415END(cpu_set_curpri) 450END(cpu_set_curpri)
416 451
417ENTRY(__byte_swap_u32_variable) 452ENTRY(__byte_swap_u32_variable)
418 movl %edi, %eax 453 movl %edi, %eax
419 bswapl %eax 454 bswapl %eax
420 ret 455 ret
421END(__byte_swap_u32_variable) 456END(__byte_swap_u32_variable)
422 457
423ENTRY(__byte_swap_u16_variable) 458ENTRY(__byte_swap_u16_variable)
424 movl %edi, %eax 459 movl %edi, %eax
425 xchgb %al, %ah 460 xchgb %al, %ah
426 ret 461 ret
427END(__byte_swap_u16_variable) 462END(__byte_swap_u16_variable)
428 463
429/* 464/*
430 * void lgdt(struct region_descriptor *rdp); 465 * void lgdt(struct region_descriptor *rdp);
431 * 466 *
432 * Load a new GDT pointer (and do any necessary cleanup). 467 * Load a new GDT pointer (and do any necessary cleanup).
433 * XXX It's somewhat questionable whether reloading all the segment registers 468 * XXX It's somewhat questionable whether reloading all the segment registers
434 * is necessary, since the actual descriptor data is not changed except by 469 * is necessary, since the actual descriptor data is not changed except by
435 * process creation and exit, both of which clean up via task switches. 470 * process creation and exit, both of which clean up via task switches.
436 */ 471 */
437#ifndef XEN 472#ifndef XEN
438ENTRY(lgdt) 473ENTRY(lgdt)
439 /* Reload the descriptor table. */ 474 /* Reload the descriptor table. */
440 movq %rdi,%rax 475 movq %rdi,%rax
441 lgdt (%rax) 476 lgdt (%rax)
442 /* Flush the prefetch q. */ 477 /* Flush the prefetch q. */
443 jmp 1f 478 jmp 1f
444 nop 479 nop
4451: jmp _C_LABEL(lgdt_finish) 4801: jmp _C_LABEL(lgdt_finish)
446END(lgdt) 481END(lgdt)
447#endif 482#endif
448 483
449/* 484/*
450 * void lgdt_finish(void); 485 * void lgdt_finish(void);
451 * Reload segments after a GDT change 486 * Reload segments after a GDT change
452 */ 487 */
453ENTRY(lgdt_finish) 488ENTRY(lgdt_finish)
454 movl $GSEL(GDATA_SEL, SEL_KPL),%eax 489 movl $GSEL(GDATA_SEL, SEL_KPL),%eax
455 movl %eax,%ds 490 movl %eax,%ds
456 movl %eax,%es 491 movl %eax,%es
457 movl %eax,%ss 492 movl %eax,%ss
458 jmp _C_LABEL(x86_flush) 493 jmp _C_LABEL(x86_flush)
459END(lgdt_finish) 494END(lgdt_finish)
460 495
461/* 496/*
462 * void x86_flush() 497 * void x86_flush()
463 * 498 *
464 * Flush instruction pipelines by doing an intersegment (far) return. 499 * Flush instruction pipelines by doing an intersegment (far) return.
465 */ 500 */
466ENTRY(x86_flush) 501ENTRY(x86_flush)
467 popq %rax 502 popq %rax
468 pushq $GSEL(GCODE_SEL, SEL_KPL) 503 pushq $GSEL(GCODE_SEL, SEL_KPL)
469 pushq %rax 504 pushq %rax
470 lretq 505 lretq
471END(x86_flush) 506END(x86_flush)
472 507
473/* Waits - set up stack frame. */ 508/* Waits - set up stack frame. */
474ENTRY(x86_hlt) 509ENTRY(x86_hlt)
475 pushq %rbp 510 pushq %rbp
476 movq %rsp, %rbp 511 movq %rsp, %rbp
477 hlt 512 hlt
478 leave 513 leave
479 ret 514 ret
480END(x86_hlt) 515END(x86_hlt)
481 516
482/* Waits - set up stack frame. */ 517/* Waits - set up stack frame. */
483ENTRY(x86_stihlt) 518ENTRY(x86_stihlt)
484 pushq %rbp 519 pushq %rbp
485 movq %rsp, %rbp 520 movq %rsp, %rbp
486 sti 521 sti
487 hlt 522 hlt
488 leave 523 leave
489 ret 524 ret
490END(x86_stihlt) 525END(x86_stihlt)
491 526
492ENTRY(x86_monitor) 527ENTRY(x86_monitor)
493 movq %rdi, %rax 528 movq %rdi, %rax
494 movq %rsi, %rcx 529 movq %rsi, %rcx
495 monitor %rax, %rcx, %rdx 530 monitor %rax, %rcx, %rdx
496 ret 531 ret
497END(x86_monitor) 532END(x86_monitor)
498 533
499/* Waits - set up stack frame. */ 534/* Waits - set up stack frame. */
500ENTRY(x86_mwait) 535ENTRY(x86_mwait)
501 pushq %rbp 536 pushq %rbp
502 movq %rsp, %rbp 537 movq %rsp, %rbp
503 movq %rdi, %rax 538 movq %rdi, %rax
504 movq %rsi, %rcx 539 movq %rsi, %rcx
505 mwait %rax, %rcx 540 mwait %rax, %rcx
506 leave 541 leave
507 ret 542 ret
508END(x86_mwait) 543END(x86_mwait)
509 544
510ENTRY(x86_pause) 545ENTRY(x86_pause)
511 pause 546 pause
512 ret 547 ret
513END(x86_pause) 548END(x86_pause)
514 549
515ENTRY(x86_cpuid2) 550ENTRY(x86_cpuid2)
516 movq %rbx, %r8 551 movq %rbx, %r8
517 movq %rdi, %rax 552 movq %rdi, %rax
518 movq %rsi, %rcx 553 movq %rsi, %rcx
519 movq %rdx, %rsi 554 movq %rdx, %rsi
520 cpuid 555 cpuid
521 movl %eax, 0(%rsi) 556 movl %eax, 0(%rsi)
522 movl %ebx, 4(%rsi) 557 movl %ebx, 4(%rsi)
523 movl %ecx, 8(%rsi) 558 movl %ecx, 8(%rsi)
524 movl %edx, 12(%rsi) 559 movl %edx, 12(%rsi)
525 movq %r8, %rbx 560 movq %r8, %rbx
526 ret 561 ret
527END(x86_cpuid2) 562END(x86_cpuid2)
528 563
529ENTRY(x86_getss) 564ENTRY(x86_getss)
530 movl %ss, %eax 565 movl %ss, %eax
531 ret 566 ret
532END(x86_getss) 567END(x86_getss)
533 568
534ENTRY(fldcw) 569ENTRY(fldcw)
535 fldcw (%rdi) 570 fldcw (%rdi)
536 ret 571 ret
537END(fldcw) 572END(fldcw)
538 573
539ENTRY(fnclex) 574ENTRY(fnclex)
540 fnclex 575 fnclex
541 ret 576 ret
542END(fnclex) 577END(fnclex)
543 578
544ENTRY(fninit) 579ENTRY(fninit)
545 fninit 580 fninit
546 ret 581 ret
547END(fninit) 582END(fninit)
548 583
549ENTRY(fnsave) 584ENTRY(fnsave)
550 fnsave (%rdi) 585 fnsave (%rdi)
551 ret 586 ret
552END(fnsave) 587END(fnsave)
553 588
554ENTRY(fnstcw) 589ENTRY(fnstcw)
555 fnstcw (%rdi) 590 fnstcw (%rdi)
556 ret 591 ret
557END(fnstcw) 592END(fnstcw)
558 593
559ENTRY(fngetsw) 594ENTRY(fngetsw)
560 fnstsw %ax 595 fnstsw %ax
561 ret 596 ret
562END(fngetsw) 597END(fngetsw)
563 598
564ENTRY(fnstsw) 599ENTRY(fnstsw)
565 fnstsw (%rdi) 600 fnstsw (%rdi)
566 ret 601 ret
567END(fnstsw) 602END(fnstsw)
568 603
569ENTRY(fp_divide_by_0) 604ENTRY(fp_divide_by_0)
570 fldz 605 fldz
571 fld1 606 fld1
572 fdiv %st, %st(1) 607 fdiv %st, %st(1)
573 fwait 608 fwait
574 ret 609 ret
575END(fp_divide_by_0) 610END(fp_divide_by_0)
576 611
577ENTRY(frstor) 612ENTRY(frstor)
578 frstor (%rdi) 613 frstor (%rdi)
579 ret 614 ret
580END(frstor) 615END(frstor)
581 616
582ENTRY(fwait) 617ENTRY(fwait)
583 fwait 618 fwait
584 ret 619 ret
585END(fwait) 620END(fwait)
586 621
587ENTRY(clts) 622ENTRY(clts)
588 clts 623 clts
589 ret 624 ret
590END(clts) 625END(clts)
591 626
592ENTRY(stts) 627ENTRY(stts)
593 movq %cr0, %rax 628 movq %cr0, %rax
594 orq $CR0_TS, %rax 629 orq $CR0_TS, %rax
595 movq %rax, %cr0 630 movq %rax, %cr0
596 ret 631 ret
597END(stts) 632END(stts)
598 633
599ENTRY(fxsave) 634ENTRY(fxsave)
600 fxsave (%rdi) 635 fxsave (%rdi)
601 ret 636 ret
602END(fxsave) 637END(fxsave)
603 638
604ENTRY(fxrstor) 639ENTRY(fxrstor)
605 fxrstor (%rdi) 640 fxrstor (%rdi)
606 ret 641 ret
607END(fxrstor) 642END(fxrstor)
608 643
609ENTRY(fldummy) 644ENTRY(fldummy)
610 ffree %st(7) 645 ffree %st(7)
611 fldz 646 fldz
612 ret 647 ret
613END(fldummy) 648END(fldummy)
614 649
615ENTRY(xsave) 650ENTRY(xsave)
616 movq %rsi, %rax 651 movq %rsi, %rax
617 movq %rsi, %rdx 652 movq %rsi, %rdx
618 shrq $32, %rdx 653 shrq $32, %rdx
619 xsave (%rdi) 654 xsave (%rdi)
620 ret 655 ret
621END(xsave) 656END(xsave)
622 657
623ENTRY(xsaveopt) 658ENTRY(xsaveopt)
624 movq %rsi, %rax 659 movq %rsi, %rax
625 movq %rsi, %rdx 660 movq %rsi, %rdx
626 shrq $32, %rdx 661 shrq $32, %rdx
627 xsaveopt (%rdi) 662 xsaveopt (%rdi)
628 ret 663 ret
629END(xsaveopt) 664END(xsaveopt)
630 665
631ENTRY(xrstor) 666ENTRY(xrstor)
632 movq %rsi, %rax 667 movq %rsi, %rax
633 movq %rsi, %rdx 668 movq %rsi, %rdx
634 shrq $32, %rdx 669 shrq $32, %rdx
635 xrstor (%rdi) 670 xrstor (%rdi)
636 ret 671 ret
637END(xrstor) 672END(xrstor)
638 673
639ENTRY(x86_stmxcsr) 674ENTRY(x86_stmxcsr)
640 stmxcsr (%rdi) 675 stmxcsr (%rdi)
641 ret 676 ret
642END(x86_stmxcsr) 677END(x86_stmxcsr)
643 678
644ENTRY(x86_ldmxcsr) 679ENTRY(x86_ldmxcsr)
645 ldmxcsr (%rdi) 680 ldmxcsr (%rdi)
646 ret 681 ret
647END(x86_ldmxcsr) 682END(x86_ldmxcsr)
648 683
649ENTRY(inb) 684ENTRY(inb)
650 movq %rdi, %rdx 685 movq %rdi, %rdx
651 xorq %rax, %rax 686 xorq %rax, %rax
652 inb %dx, %al 687 inb %dx, %al
653 ret 688 ret
654END(inb) 689END(inb)
655 690
656ENTRY(insb) 691ENTRY(insb)
657 movl %edx, %ecx 692 movl %edx, %ecx
658 movl %edi, %edx 693 movl %edi, %edx
659 movq %rsi, %rdi 694 movq %rsi, %rdi
660 rep 695 rep
661 insb 696 insb
662 ret 697 ret
663END(insb) 698END(insb)
664 699
665ENTRY(inw) 700ENTRY(inw)
666 movq %rdi, %rdx 701 movq %rdi, %rdx
667 xorq %rax, %rax 702 xorq %rax, %rax
668 inw %dx, %ax 703 inw %dx, %ax
669 ret 704 ret
670END(inw) 705END(inw)
671 706
672ENTRY(insw) 707ENTRY(insw)
673 movl %edx, %ecx 708 movl %edx, %ecx
674 movl %edi, %edx 709 movl %edi, %edx
675 movq %rsi, %rdi 710 movq %rsi, %rdi
676 rep 711 rep
677 insw 712 insw
678 ret 713 ret
679END(insw) 714END(insw)
680 715
681ENTRY(inl) 716ENTRY(inl)
682 movq %rdi, %rdx 717 movq %rdi, %rdx
683 xorq %rax, %rax 718 xorq %rax, %rax
684 inl %dx, %eax 719 inl %dx, %eax
685 ret 720 ret
686END(inl) 721END(inl)
687 722
688ENTRY(insl) 723ENTRY(insl)
689 movl %edx, %ecx 724 movl %edx, %ecx
690 movl %edi, %edx 725 movl %edi, %edx
691 movq %rsi, %rdi 726 movq %rsi, %rdi
692 rep 727 rep
693 insl 728 insl
694 ret 729 ret
695END(insl) 730END(insl)
696 731
697ENTRY(outb) 732ENTRY(outb)
698 movq %rdi, %rdx 733 movq %rdi, %rdx
699 movq %rsi, %rax 734 movq %rsi, %rax
700 outb %al, %dx 735 outb %al, %dx
701 ret 736 ret
702END(outb) 737END(outb)
703 738
704ENTRY(outsb) 739ENTRY(outsb)
705 movl %edx, %ecx 740 movl %edx, %ecx
706 movl %edi, %edx 741 movl %edi, %edx
707 rep 742 rep
708 outsb 743 outsb
709 ret 744 ret
710END(outsb) 745END(outsb)
711 746
712ENTRY(outw) 747ENTRY(outw)
713 movq %rdi, %rdx 748 movq %rdi, %rdx
714 movq %rsi, %rax 749 movq %rsi, %rax
715 outw %ax, %dx 750 outw %ax, %dx
716 ret 751 ret
717END(outw) 752END(outw)
718 753
719ENTRY(outsw) 754ENTRY(outsw)
720 movl %edx, %ecx 755 movl %edx, %ecx
721 movl %edi, %edx 756 movl %edi, %edx
722 rep 757 rep
723 outsw 758 outsw
724 ret 759 ret
725END(outsw) 760END(outsw)
726 761
727ENTRY(outl) 762ENTRY(outl)
728 movq %rdi, %rdx 763 movq %rdi, %rdx
729 movq %rsi, %rax 764 movq %rsi, %rax
730 outl %eax, %dx 765 outl %eax, %dx
731 ret 766 ret
732END(outl) 767END(outl)
733 768
734ENTRY(outsl) 769ENTRY(outsl)
735 movl %edx, %ecx 770 movl %edx, %ecx
736 movl %edi, %edx 771 movl %edi, %edx
737 rep 772 rep
738 outsl 773 outsl
739 ret 774 ret
740END(outsl) 775END(outsl)
741 776
742ENTRY(setds) 777ENTRY(setds)
743 movw %di, %ds 778 movw %di, %ds
744 ret 779 ret
745END(setds) 780END(setds)
746 781
747ENTRY(setes) 782ENTRY(setes)
748 movw %di, %es 783 movw %di, %es
749 ret 784 ret
750END(setes) 785END(setes)
751 786
752ENTRY(setfs) 787ENTRY(setfs)
753 movw %di, %fs 788 movw %di, %fs
754 ret 789 ret
755END(setfs) 790END(setfs)
756 791
757#ifndef XEN 792#ifndef XEN
758ENTRY(setusergs) 793ENTRY(setusergs)
759 CLI(ax) 794 CLI(ax)
760 swapgs 795 swapgs
761 movw %di, %gs 796 movw %di, %gs
762 swapgs 797 swapgs
763 STI(ax) 798 STI(ax)
764 ret 799 ret
765END(setusergs) 800END(setusergs)
766#endif 801#endif

cvs diff -r1.25 -r1.26 src/sys/arch/i386/i386/cpufunc.S (switch to unified diff)

--- src/sys/arch/i386/i386/cpufunc.S 2018/10/18 04:11:14 1.25
+++ src/sys/arch/i386/i386/cpufunc.S 2018/12/22 21:27:22 1.26
@@ -1,563 +1,575 @@ @@ -1,563 +1,575 @@
1/* $NetBSD: cpufunc.S,v 1.25 2018/10/18 04:11:14 cherry Exp $ */ 1/* $NetBSD: cpufunc.S,v 1.26 2018/12/22 21:27:22 cherry Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 2007 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum, and by Andrew Doran. 8 * by Charles M. Hannum, and by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Functions to provide access to i386-specific instructions. 33 * Functions to provide access to i386-specific instructions.
34 * 34 *
35 * These are shared with NetBSD/xen. 35 * These are shared with NetBSD/xen.
36 */ 36 */
37 37
38#include <sys/errno.h> 38#include <sys/errno.h>
39 39
40#include <machine/asm.h> 40#include <machine/asm.h>
41__KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.25 2018/10/18 04:11:14 cherry Exp $"); 41__KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.26 2018/12/22 21:27:22 cherry Exp $");
42 42
43#include "opt_xen.h" 43#include "opt_xen.h"
44 44
45#include <machine/specialreg.h> 45#include <machine/specialreg.h>
46#include <machine/segments.h> 46#include <machine/segments.h>
47 47
48#include "assym.h" 48#include "assym.h"
49 49
 50/*
 51 * These functions below should always be accessed via the corresponding wrapper
 52 * function names defined in x86/include/cpufunc.h and exported as WEAK_ALIAS()
 53 *
 54 * We use this rather roundabout method so that a runtime wrapper function may
 55 * be made available for PVHVM, which could override both native and PV aliases
 56 * and decide which to invoke at run time.
 57 */
 58
 59WEAK_ALIAS(lidt, i386_lidt)
 60WEAK_ALIAS(rcr3, i386_rcr3)
 61
50ENTRY(x86_lfence) 62ENTRY(x86_lfence)
51 lock 63 lock
52 addl $0, -4(%esp) 64 addl $0, -4(%esp)
53 ret 65 ret
54END(x86_lfence) 66END(x86_lfence)
55 67
56ENTRY(x86_sfence) 68ENTRY(x86_sfence)
57 lock 69 lock
58 addl $0, -4(%esp) 70 addl $0, -4(%esp)
59 ret 71 ret
60END(x86_sfence) 72END(x86_sfence)
61 73
62ENTRY(x86_mfence) 74ENTRY(x86_mfence)
63 lock 75 lock
64 addl $0, -4(%esp) 76 addl $0, -4(%esp)
65 ret 77 ret
66END(x86_mfence) 78END(x86_mfence)
67 79
68#ifndef XEN 80#ifndef XEN
69ENTRY(lidt) 81ENTRY(i386_lidt)
70 movl 4(%esp), %eax 82 movl 4(%esp), %eax
71 lidt (%eax) 83 lidt (%eax)
72 ret 84 ret
73END(lidt) 85END(i386_lidt)
74#endif /* XEN */ 86#endif /* XEN */
75 87
76ENTRY(rcr3) 88ENTRY(i386_rcr3)
77 movl %cr3, %eax 89 movl %cr3, %eax
78 ret 90 ret
79END(rcr3) 91END(i386_rcr3)
80 92
81ENTRY(lcr4) 93ENTRY(lcr4)
82 movl 4(%esp), %eax 94 movl 4(%esp), %eax
83 movl %eax, %cr4 95 movl %eax, %cr4
84 ret 96 ret
85END(lcr4) 97END(lcr4)
86 98
87ENTRY(rcr4) 99ENTRY(rcr4)
88 movl %cr4, %eax 100 movl %cr4, %eax
89 ret 101 ret
90END(rcr4) 102END(rcr4)
91 103
92ENTRY(x86_read_flags) 104ENTRY(x86_read_flags)
93 pushfl 105 pushfl
94 popl %eax 106 popl %eax
95 ret 107 ret
96END(x86_read_flags) 108END(x86_read_flags)
97 109
98ENTRY(x86_write_flags) 110ENTRY(x86_write_flags)
99 movl 4(%esp), %eax 111 movl 4(%esp), %eax
100 pushl %eax 112 pushl %eax
101 popfl 113 popfl
102 ret 114 ret
103END(x86_write_flags) 115END(x86_write_flags)
104 116
105#ifndef XEN 117#ifndef XEN
106STRONG_ALIAS(x86_write_psl,x86_write_flags) 118STRONG_ALIAS(x86_write_psl,x86_write_flags)
107STRONG_ALIAS(x86_read_psl,x86_read_flags) 119STRONG_ALIAS(x86_read_psl,x86_read_flags)
108#endif /* XEN */ 120#endif /* XEN */
109 121
110ENTRY(rdmsr) 122ENTRY(rdmsr)
111 movl 4(%esp), %ecx 123 movl 4(%esp), %ecx
112 rdmsr 124 rdmsr
113 ret 125 ret
114END(rdmsr) 126END(rdmsr)
115 127
116ENTRY(wrmsr) 128ENTRY(wrmsr)
117 movl 4(%esp), %ecx 129 movl 4(%esp), %ecx
118 movl 8(%esp), %eax 130 movl 8(%esp), %eax
119 movl 12(%esp), %edx 131 movl 12(%esp), %edx
120 wrmsr 132 wrmsr
121 ret 133 ret
122END(wrmsr) 134END(wrmsr)
123 135
124ENTRY(rdmsr_locked) 136ENTRY(rdmsr_locked)
125 movl 4(%esp), %ecx 137 movl 4(%esp), %ecx
126 pushl %edi 138 pushl %edi
127 movl $OPTERON_MSR_PASSCODE, %edi 139 movl $OPTERON_MSR_PASSCODE, %edi
128 rdmsr 140 rdmsr
129 popl %edi 141 popl %edi
130 ret 142 ret
131END(rdmsr_locked) 143END(rdmsr_locked)
132 144
133ENTRY(wrmsr_locked) 145ENTRY(wrmsr_locked)
134 movl 4(%esp), %ecx 146 movl 4(%esp), %ecx
135 movl 8(%esp), %eax 147 movl 8(%esp), %eax
136 movl 12(%esp), %edx 148 movl 12(%esp), %edx
137 pushl %edi 149 pushl %edi
138 movl $OPTERON_MSR_PASSCODE, %edi 150 movl $OPTERON_MSR_PASSCODE, %edi
139 wrmsr 151 wrmsr
140 popl %edi 152 popl %edi
141 ret 153 ret
142END(wrmsr_locked) 154END(wrmsr_locked)
143 155
144/* 156/*
145 * Support for reading MSRs in the safe manner (returns EFAULT on fault) 157 * Support for reading MSRs in the safe manner (returns EFAULT on fault)
146 */ 158 */
147/* int rdmsr_safe(u_int msr, uint64_t *data) */ 159/* int rdmsr_safe(u_int msr, uint64_t *data) */
148ENTRY(rdmsr_safe) 160ENTRY(rdmsr_safe)
149 movl CPUVAR(CURLWP), %ecx 161 movl CPUVAR(CURLWP), %ecx
150 movl L_PCB(%ecx), %ecx 162 movl L_PCB(%ecx), %ecx
151 movl $_C_LABEL(msr_onfault), PCB_ONFAULT(%ecx) 163 movl $_C_LABEL(msr_onfault), PCB_ONFAULT(%ecx)
152 164
153 movl 4(%esp), %ecx /* u_int msr */ 165 movl 4(%esp), %ecx /* u_int msr */
154 rdmsr 166 rdmsr
155 movl 8(%esp), %ecx /* *data */ 167 movl 8(%esp), %ecx /* *data */
156 movl %eax, (%ecx) /* low-order bits */ 168 movl %eax, (%ecx) /* low-order bits */
157 movl %edx, 4(%ecx) /* high-order bits */ 169 movl %edx, 4(%ecx) /* high-order bits */
158 xorl %eax, %eax /* "no error" */ 170 xorl %eax, %eax /* "no error" */
159 171
160 movl CPUVAR(CURLWP), %ecx 172 movl CPUVAR(CURLWP), %ecx
161 movl L_PCB(%ecx), %ecx 173 movl L_PCB(%ecx), %ecx
162 movl %eax, PCB_ONFAULT(%ecx) 174 movl %eax, PCB_ONFAULT(%ecx)
163 175
164 ret 176 ret
165END(rdmsr_safe) 177END(rdmsr_safe)
166 178
167/* uint64_t rdxcr(uint32_t) */ 179/* uint64_t rdxcr(uint32_t) */
168ENTRY(rdxcr) 180ENTRY(rdxcr)
169 movl 4(%esp), %ecx /* extended control reg number */ 181 movl 4(%esp), %ecx /* extended control reg number */
170 xgetbv /* Read to %edx:%eax */ 182 xgetbv /* Read to %edx:%eax */
171 ret 183 ret
172END(rdxcr) 184END(rdxcr)
173 185
174/* void wrxcr(uint32_t, uint64_t) */ 186/* void wrxcr(uint32_t, uint64_t) */
175ENTRY(wrxcr) 187ENTRY(wrxcr)
176 movl 4(%esp), %ecx /* extended control reg number */ 188 movl 4(%esp), %ecx /* extended control reg number */
177 movl 8(%esp), %eax /* feature mask bits */ 189 movl 8(%esp), %eax /* feature mask bits */
178 movl 12(%esp), %edx 190 movl 12(%esp), %edx
179 xsetbv 191 xsetbv
180 ret 192 ret
181END(wrxcr) 193END(wrxcr)
182  194
183 195
184/* 196/*
185 * MSR operations fault handler 197 * MSR operations fault handler
186 */ 198 */
187ENTRY(msr_onfault) 199ENTRY(msr_onfault)
188 movl CPUVAR(CURLWP), %ecx 200 movl CPUVAR(CURLWP), %ecx
189 movl L_PCB(%ecx), %ecx 201 movl L_PCB(%ecx), %ecx
190 movl $0, PCB_ONFAULT(%ecx) 202 movl $0, PCB_ONFAULT(%ecx)
191 movl $EFAULT, %eax 203 movl $EFAULT, %eax
192 ret 204 ret
193END(msr_onfault) 205END(msr_onfault)
194 206
195ENTRY(cpu_counter) 207ENTRY(cpu_counter)
196 rdtsc 208 rdtsc
197 addl CPUVAR(CC_SKEW), %eax 209 addl CPUVAR(CC_SKEW), %eax
198 adcl CPUVAR(CC_SKEW+4), %edx 210 adcl CPUVAR(CC_SKEW+4), %edx
199 ret 211 ret
200END(cpu_counter) 212END(cpu_counter)
201 213
202ENTRY(cpu_counter32) 214ENTRY(cpu_counter32)
203 rdtsc 215 rdtsc
204 addl CPUVAR(CC_SKEW), %eax 216 addl CPUVAR(CC_SKEW), %eax
205 ret 217 ret
206END(cpu_counter32) 218END(cpu_counter32)
207 219
208ENTRY(rdpmc) 220ENTRY(rdpmc)
209 movl 4(%esp), %ecx 221 movl 4(%esp), %ecx
210 rdpmc 222 rdpmc
211 ret 223 ret
212END(rdpmc) 224END(rdpmc)
213 225
214ENTRY(rdtsc) 226ENTRY(rdtsc)
215 rdtsc 227 rdtsc
216 ret 228 ret
217END(rdtsc) 229END(rdtsc)
218 230
219ENTRY(breakpoint) 231ENTRY(breakpoint)
220 pushl %ebp 232 pushl %ebp
221 movl %esp, %ebp 233 movl %esp, %ebp
222 int $0x03 /* paranoid, not 'int3' */ 234 int $0x03 /* paranoid, not 'int3' */
223 popl %ebp 235 popl %ebp
224 ret 236 ret
225END(breakpoint) 237END(breakpoint)
226 238
227ENTRY(x86_curcpu) 239ENTRY(x86_curcpu)
228 movl %fs:(CPU_INFO_SELF), %eax 240 movl %fs:(CPU_INFO_SELF), %eax
229 ret 241 ret
230END(x86_curcpu) 242END(x86_curcpu)
231 243
232ENTRY(x86_curlwp) 244ENTRY(x86_curlwp)
233 movl %fs:(CPU_INFO_CURLWP), %eax 245 movl %fs:(CPU_INFO_CURLWP), %eax
234 ret 246 ret
235END(x86_curlwp) 247END(x86_curlwp)
236 248
237ENTRY(cpu_set_curpri) 249ENTRY(cpu_set_curpri)
238 movl 4(%esp), %eax 250 movl 4(%esp), %eax
239 movl %eax, %fs:(CPU_INFO_CURPRIORITY) 251 movl %eax, %fs:(CPU_INFO_CURPRIORITY)
240 ret 252 ret
241END(cpu_set_curpri) 253END(cpu_set_curpri)
242 254
243ENTRY(__byte_swap_u32_variable) 255ENTRY(__byte_swap_u32_variable)
244 movl 4(%esp), %eax 256 movl 4(%esp), %eax
245 bswapl %eax 257 bswapl %eax
246 ret 258 ret
247END(__byte_swap_u32_variable) 259END(__byte_swap_u32_variable)
248 260
249ENTRY(__byte_swap_u16_variable) 261ENTRY(__byte_swap_u16_variable)
250 movl 4(%esp), %eax 262 movl 4(%esp), %eax
251 xchgb %al, %ah 263 xchgb %al, %ah
252 ret 264 ret
253END(__byte_swap_u16_variable) 265END(__byte_swap_u16_variable)
254 266
255/* 267/*
256 * void x86_flush() 268 * void x86_flush()
257 * 269 *
258 * Flush instruction pipelines by doing an intersegment (far) return. 270 * Flush instruction pipelines by doing an intersegment (far) return.
259 */ 271 */
260ENTRY(x86_flush) 272ENTRY(x86_flush)
261 popl %eax 273 popl %eax
262 pushl $GSEL(GCODE_SEL, SEL_KPL) 274 pushl $GSEL(GCODE_SEL, SEL_KPL)
263 pushl %eax 275 pushl %eax
264 lret 276 lret
265END(x86_flush) 277END(x86_flush)
266 278
267/* Waits - set up stack frame. */ 279/* Waits - set up stack frame. */
268ENTRY(x86_hlt) 280ENTRY(x86_hlt)
269 pushl %ebp 281 pushl %ebp
270 movl %esp, %ebp 282 movl %esp, %ebp
271 hlt 283 hlt
272 leave 284 leave
273 ret 285 ret
274END(x86_hlt) 286END(x86_hlt)
275 287
276/* Waits - set up stack frame. */ 288/* Waits - set up stack frame. */
277ENTRY(x86_stihlt) 289ENTRY(x86_stihlt)
278 pushl %ebp 290 pushl %ebp
279 movl %esp, %ebp 291 movl %esp, %ebp
280 sti 292 sti
281 hlt 293 hlt
282 leave 294 leave
283 ret 295 ret
284END(x86_stihlt) 296END(x86_stihlt)
285 297
286ENTRY(x86_monitor) 298ENTRY(x86_monitor)
287 movl 4(%esp), %eax 299 movl 4(%esp), %eax
288 movl 8(%esp), %ecx 300 movl 8(%esp), %ecx
289 movl 12(%esp), %edx 301 movl 12(%esp), %edx
290 monitor %eax, %ecx, %edx 302 monitor %eax, %ecx, %edx
291 ret 303 ret
292END(x86_monitor) 304END(x86_monitor)
293 305
294/* Waits - set up stack frame. */ 306/* Waits - set up stack frame. */
295ENTRY(x86_mwait)  307ENTRY(x86_mwait)
296 pushl %ebp 308 pushl %ebp
297 movl %esp, %ebp 309 movl %esp, %ebp
298 movl 8(%ebp), %eax 310 movl 8(%ebp), %eax
299 movl 12(%ebp), %ecx 311 movl 12(%ebp), %ecx
300 mwait %eax, %ecx 312 mwait %eax, %ecx
301 leave 313 leave
302 ret 314 ret
303END(x86_mwait)  315END(x86_mwait)
304 316
305ENTRY(x86_pause) 317ENTRY(x86_pause)
306 pause 318 pause
307 ret 319 ret
308END(x86_pause) 320END(x86_pause)
309 321
310ENTRY(x86_cpuid2) 322ENTRY(x86_cpuid2)
311 pushl %ebx 323 pushl %ebx
312 pushl %edi 324 pushl %edi
313 movl 12(%esp), %eax 325 movl 12(%esp), %eax
314 movl 16(%esp), %ecx 326 movl 16(%esp), %ecx
315 movl 20(%esp), %edi 327 movl 20(%esp), %edi
316 cpuid 328 cpuid
317 movl %eax, 0(%edi) 329 movl %eax, 0(%edi)
318 movl %ebx, 4(%edi) 330 movl %ebx, 4(%edi)
319 movl %ecx, 8(%edi) 331 movl %ecx, 8(%edi)
320 movl %edx, 12(%edi) 332 movl %edx, 12(%edi)
321 popl %edi 333 popl %edi
322 popl %ebx 334 popl %ebx
323 ret 335 ret
324END(x86_cpuid2) 336END(x86_cpuid2)
325 337
326ENTRY(x86_getss) 338ENTRY(x86_getss)
327 movl %ss, %eax 339 movl %ss, %eax
328 ret 340 ret
329END(x86_getss) 341END(x86_getss)
330 342
331ENTRY(fldcw) 343ENTRY(fldcw)
332 movl 4(%esp), %eax 344 movl 4(%esp), %eax
333 fldcw (%eax) 345 fldcw (%eax)
334 ret 346 ret
335END(fldcw) 347END(fldcw)
336 348
337ENTRY(fnclex)  349ENTRY(fnclex)
338 fnclex 350 fnclex
339 ret 351 ret
340END(fnclex)  352END(fnclex)
341 353
342ENTRY(fninit) 354ENTRY(fninit)
343 fninit 355 fninit
344 ret 356 ret
345END(fninit) 357END(fninit)
346 358
347ENTRY(fnsave) 359ENTRY(fnsave)
348 movl 4(%esp), %eax 360 movl 4(%esp), %eax
349 fnsave (%eax) 361 fnsave (%eax)
350 ret 362 ret
351END(fnsave) 363END(fnsave)
352 364
353ENTRY(fnstcw) 365ENTRY(fnstcw)
354 movl 4(%esp), %eax 366 movl 4(%esp), %eax
355 fnstcw (%eax) 367 fnstcw (%eax)
356 ret 368 ret
357END(fnstcw) 369END(fnstcw)
358 370
359ENTRY(fngetsw) 371ENTRY(fngetsw)
360 fnstsw %ax 372 fnstsw %ax
361 ret 373 ret
362END(fngetsw) 374END(fngetsw)
363 375
364ENTRY(fnstsw) 376ENTRY(fnstsw)
365 movl 4(%esp), %eax 377 movl 4(%esp), %eax
366 fnstsw (%eax) 378 fnstsw (%eax)
367 ret 379 ret
368END(fnstsw) 380END(fnstsw)
369 381
370ENTRY(fp_divide_by_0) 382ENTRY(fp_divide_by_0)
371 fldz 383 fldz
372 fld1 384 fld1
373 fdiv %st, %st(1) 385 fdiv %st, %st(1)
374 fwait 386 fwait
375 ret 387 ret
376END(fp_divide_by_0) 388END(fp_divide_by_0)
377 389
378ENTRY(frstor) 390ENTRY(frstor)
379 movl 4(%esp), %eax 391 movl 4(%esp), %eax
380 frstor (%eax) 392 frstor (%eax)
381 ret 393 ret
382END(frstor) 394END(frstor)
383 395
384ENTRY(fwait) 396ENTRY(fwait)
385 fwait 397 fwait
386 ret 398 ret
387END(fwait) 399END(fwait)
388 400
389ENTRY(clts) 401ENTRY(clts)
390 clts 402 clts
391 ret 403 ret
392END(clts) 404END(clts)
393 405
394ENTRY(stts) 406ENTRY(stts)
395 movl %cr0, %eax 407 movl %cr0, %eax
396 testl $CR0_TS, %eax 408 testl $CR0_TS, %eax
397 jnz 1f 409 jnz 1f
398 orl $CR0_TS, %eax 410 orl $CR0_TS, %eax
399 movl %eax, %cr0 411 movl %eax, %cr0
4001: 4121:
401 ret 413 ret
402END(stts) 414END(stts)
403 415
404ENTRY(fxsave) 416ENTRY(fxsave)
405 movl 4(%esp), %eax 417 movl 4(%esp), %eax
406 fxsave (%eax) 418 fxsave (%eax)
407 ret 419 ret
408END(fxsave) 420END(fxsave)
409 421
410ENTRY(fxrstor) 422ENTRY(fxrstor)
411 movl 4(%esp), %eax 423 movl 4(%esp), %eax
412 fxrstor (%eax) 424 fxrstor (%eax)
413 ret 425 ret
414END(fxrstor) 426END(fxrstor)
415 427
416ENTRY(xsave) 428ENTRY(xsave)
417 movl 4(%esp), %ecx 429 movl 4(%esp), %ecx
418 movl 8(%esp), %eax /* feature mask bits */ 430 movl 8(%esp), %eax /* feature mask bits */
419 movl 12(%esp), %edx 431 movl 12(%esp), %edx
420 xsave (%ecx) 432 xsave (%ecx)
421 ret 433 ret
422END(xsave) 434END(xsave)
423 435
424ENTRY(xsaveopt) 436ENTRY(xsaveopt)
425 movl 4(%esp), %ecx 437 movl 4(%esp), %ecx
426 movl 8(%esp), %eax /* feature mask bits */ 438 movl 8(%esp), %eax /* feature mask bits */
427 movl 12(%esp), %edx 439 movl 12(%esp), %edx
428 xsaveopt (%ecx) 440 xsaveopt (%ecx)
429 ret 441 ret
430END(xsaveopt) 442END(xsaveopt)
431 443
432ENTRY(xrstor) 444ENTRY(xrstor)
433 movl 4(%esp), %ecx 445 movl 4(%esp), %ecx
434 movl 8(%esp), %eax /* feature mask bits */ 446 movl 8(%esp), %eax /* feature mask bits */
435 movl 12(%esp), %edx 447 movl 12(%esp), %edx
436 xrstor (%ecx) 448 xrstor (%ecx)
437 ret 449 ret
438END(xrstor) 450END(xrstor)
439 451
440ENTRY(x86_stmxcsr) 452ENTRY(x86_stmxcsr)
441 movl 4(%esp), %eax 453 movl 4(%esp), %eax
442 stmxcsr (%eax) 454 stmxcsr (%eax)
443 ret 455 ret
444END(x86_stmxcsr) 456END(x86_stmxcsr)
445 457
446ENTRY(x86_ldmxcsr) 458ENTRY(x86_ldmxcsr)
447 movl 4(%esp), %eax 459 movl 4(%esp), %eax
448 ldmxcsr (%eax) 460 ldmxcsr (%eax)
449 ret 461 ret
450END(x86_ldmxcsr) 462END(x86_ldmxcsr)
451 463
452ENTRY(fldummy) 464ENTRY(fldummy)
453 ffree %st(7) 465 ffree %st(7)
454 fldz 466 fldz
455 ret 467 ret
456END(fldummy) 468END(fldummy)
457 469
458ENTRY(inb) 470ENTRY(inb)
459 movl 4(%esp), %edx 471 movl 4(%esp), %edx
460 xorl %eax, %eax 472 xorl %eax, %eax
461 inb %dx, %al 473 inb %dx, %al
462 ret 474 ret
463END(inb) 475END(inb)
464 476
465ENTRY(insb) 477ENTRY(insb)
466 pushl %edi 478 pushl %edi
467 movl 8(%esp), %edx 479 movl 8(%esp), %edx
468 movl 12(%esp), %edi 480 movl 12(%esp), %edi
469 movl 16(%esp), %ecx 481 movl 16(%esp), %ecx
470 rep 482 rep
471 insb 483 insb
472 popl %edi 484 popl %edi
473 ret 485 ret
474END(insb) 486END(insb)
475 487
476ENTRY(inw) 488ENTRY(inw)
477 movl 4(%esp), %edx 489 movl 4(%esp), %edx
478 xorl %eax, %eax 490 xorl %eax, %eax
479 inw %dx, %ax 491 inw %dx, %ax
480 ret 492 ret
481END(inw) 493END(inw)
482 494
483ENTRY(insw) 495ENTRY(insw)
484 pushl %edi 496 pushl %edi
485 movl 8(%esp), %edx 497 movl 8(%esp), %edx
486 movl 12(%esp), %edi 498 movl 12(%esp), %edi
487 movl 16(%esp), %ecx 499 movl 16(%esp), %ecx
488 rep 500 rep
489 insw 501 insw
490 popl %edi 502 popl %edi
491 ret 503 ret
492END(insw) 504END(insw)
493 505
494ENTRY(inl) 506ENTRY(inl)
495 movl 4(%esp), %edx 507 movl 4(%esp), %edx
496 inl %dx, %eax 508 inl %dx, %eax
497 ret 509 ret
498END(inl) 510END(inl)
499 511
500ENTRY(insl) 512ENTRY(insl)
501 pushl %edi 513 pushl %edi
502 movl 8(%esp), %edx 514 movl 8(%esp), %edx
503 movl 12(%esp), %edi 515 movl 12(%esp), %edi
504 movl 16(%esp), %ecx 516 movl 16(%esp), %ecx
505 rep 517 rep
506 insl 518 insl
507 popl %edi 519 popl %edi
508 ret 520 ret
509END(insl) 521END(insl)
510 522
511ENTRY(outb) 523ENTRY(outb)
512 movl 4(%esp), %edx 524 movl 4(%esp), %edx
513 movl 8(%esp), %eax 525 movl 8(%esp), %eax
514 outb %al, %dx 526 outb %al, %dx
515 ret 527 ret
516END(outb) 528END(outb)
517 529
518ENTRY(outsb) 530ENTRY(outsb)
519 pushl %esi 531 pushl %esi
520 movl 8(%esp), %edx 532 movl 8(%esp), %edx
521 movl 12(%esp), %esi 533 movl 12(%esp), %esi
522 movl 16(%esp), %ecx 534 movl 16(%esp), %ecx
523 rep 535 rep
524 outsb 536 outsb
525 popl %esi 537 popl %esi
526 ret 538 ret
527END(outsb) 539END(outsb)
528 540
529ENTRY(outw) 541ENTRY(outw)
530 movl 4(%esp), %edx 542 movl 4(%esp), %edx
531 movl 8(%esp), %eax 543 movl 8(%esp), %eax
532 outw %ax, %dx 544 outw %ax, %dx
533 ret 545 ret
534END(outw) 546END(outw)
535 547
536ENTRY(outsw) 548ENTRY(outsw)
537 pushl %esi 549 pushl %esi
538 movl 8(%esp), %edx 550 movl 8(%esp), %edx
539 movl 12(%esp), %esi 551 movl 12(%esp), %esi
540 movl 16(%esp), %ecx 552 movl 16(%esp), %ecx
541 rep 553 rep
542 outsw 554 outsw
543 popl %esi 555 popl %esi
544 ret 556 ret
545END(outsw) 557END(outsw)
546 558
547ENTRY(outl) 559ENTRY(outl)
548 movl 4(%esp), %edx 560 movl 4(%esp), %edx
549 movl 8(%esp), %eax 561 movl 8(%esp), %eax
550 outl %eax, %dx 562 outl %eax, %dx
551 ret 563 ret
552END(outl) 564END(outl)
553 565
554ENTRY(outsl) 566ENTRY(outsl)
555 pushl %esi 567 pushl %esi
556 movl 8(%esp), %edx 568 movl 8(%esp), %edx
557 movl 12(%esp), %esi 569 movl 12(%esp), %esi
558 movl 16(%esp), %ecx 570 movl 16(%esp), %ecx
559 rep 571 rep
560 outsl 572 outsl
561 popl %esi 573 popl %esi
562 ret 574 ret
563END(outsl) 575END(outsl)

cvs diff -r1.18 -r1.19 src/sys/arch/i386/i386/i386func.S (switch to unified diff)

--- src/sys/arch/i386/i386/i386func.S 2016/11/27 14:49:21 1.18
+++ src/sys/arch/i386/i386/i386func.S 2018/12/22 21:27:22 1.19
@@ -1,257 +1,290 @@ @@ -1,257 +1,290 @@
1/* $NetBSD: i386func.S,v 1.18 2016/11/27 14:49:21 kamil Exp $ */ 1/* $NetBSD: i386func.S,v 1.19 2018/12/22 21:27:22 cherry Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum, and by Andrew Doran. 8 * by Charles M. Hannum, and by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Functions to provide access to i386-specific instructions. 33 * Functions to provide access to i386-specific instructions.
34 * 34 *
35 * These are _not_ shared with NetBSD/xen. 35 * These are _not_ shared with NetBSD/xen.
36 */ 36 */
37 37
38#include <machine/asm.h> 38#include <machine/asm.h>
39__KERNEL_RCSID(0, "$NetBSD: i386func.S,v 1.18 2016/11/27 14:49:21 kamil Exp $"); 39__KERNEL_RCSID(0, "$NetBSD: i386func.S,v 1.19 2018/12/22 21:27:22 cherry Exp $");
40 40
41#include <machine/specialreg.h> 41#include <machine/specialreg.h>
42#include <machine/segments.h> 42#include <machine/segments.h>
43 43
44#include "assym.h" 44#include "assym.h"
45 45
46ENTRY(invlpg) 46/*
 47 * These functions below should always be accessed via the corresponding wrapper
 48 * function names defined in x86/include/cpufunc.h and exported as WEAK_ALIAS()
 49 *
 50 * We use this rather roundabout method so that a runtime wrapper function may
 51 * be made available for PVHVM, which could override both native and PV aliases
 52 * and decide which to invoke at run time.
 53 */
 54
 55WEAK_ALIAS(invlpg, i386_invlpg)
 56WEAK_ALIAS(lldt, i386_lldt)
 57WEAK_ALIAS(ltr, i386_ltr)
 58WEAK_ALIAS(lcr0, i386_lcr0)
 59WEAK_ALIAS(rcr0, i386_rcr0)
 60WEAK_ALIAS(lcr3, i386_lcr3)
 61WEAK_ALIAS(tlbflush, i386_tlbflush)
 62WEAK_ALIAS(tlbflushg, i386_tlbflushg)
 63WEAK_ALIAS(rdr0, i386_rdr0)
 64WEAK_ALIAS(ldr0, i386_ldr0)
 65WEAK_ALIAS(rdr1, i386_rdr1)
 66WEAK_ALIAS(ldr1, i386_ldr1)
 67WEAK_ALIAS(rdr2, i386_rdr2)
 68WEAK_ALIAS(ldr2, i386_ldr2)
 69WEAK_ALIAS(rdr3, i386_rdr3)
 70WEAK_ALIAS(ldr3, i386_ldr3)
 71WEAK_ALIAS(rdr6, i386_rdr6)
 72WEAK_ALIAS(ldr6, i386_ldr6)
 73WEAK_ALIAS(rdr7, i386_rdr7)
 74WEAK_ALIAS(ldr7, i386_ldr7)
 75WEAK_ALIAS(rcr2, i386_rcr2)
 76WEAK_ALIAS(lcr2, i386_lcr2)
 77WEAK_ALIAS(wbinvd, i386_wbinvd)
 78
 79ENTRY(i386_invlpg)
47 movl 4(%esp), %eax 80 movl 4(%esp), %eax
48 invlpg (%eax) 81 invlpg (%eax)
49 ret 82 ret
50END(invlpg) 83END(i386_invlpg)
51 84
52ENTRY(lldt) 85ENTRY(i386_lldt)
53 movl 4(%esp), %eax 86 movl 4(%esp), %eax
54 cmpl %eax, CPUVAR(CURLDT) 87 cmpl %eax, CPUVAR(CURLDT)
55 jne 1f 88 jne 1f
56 ret 89 ret
571: 901:
58 movl %eax, CPUVAR(CURLDT) 91 movl %eax, CPUVAR(CURLDT)
59 lldt %ax 92 lldt %ax
60 ret 93 ret
61END(lldt) 94END(i386_lldt)
62 95
63ENTRY(ltr) 96ENTRY(i386_ltr)
64 movl 4(%esp), %eax 97 movl 4(%esp), %eax
65 ltr %ax 98 ltr %ax
66 ret 99 ret
67END(ltr) 100END(i386_ltr)
68 101
69ENTRY(lcr0) 102ENTRY(i386_lcr0)
70 movl 4(%esp), %eax 103 movl 4(%esp), %eax
71 movl %eax, %cr0 104 movl %eax, %cr0
72 ret 105 ret
73END(lcr0) 106END(i386_lcr0)
74 107
75ENTRY(rcr0) 108ENTRY(i386_rcr0)
76 movl %cr0, %eax 109 movl %cr0, %eax
77 ret 110 ret
78END(rcr0) 111END(i386_rcr0)
79 112
80ENTRY(lcr3) 113ENTRY(i386_lcr3)
81 movl 4(%esp), %eax 114 movl 4(%esp), %eax
82 movl %eax, %cr3 115 movl %eax, %cr3
83 ret 116 ret
84END(lcr3) 117END(i386_lcr3)
85 118
86/* 119/*
87 * Big hammer: flush all TLB entries, including ones from PTE's 120 * Big hammer: flush all TLB entries, including ones from PTE's
88 * with the G bit set. This should only be necessary if TLB 121 * with the G bit set. This should only be necessary if TLB
89 * shootdown falls far behind. 122 * shootdown falls far behind.
90 * 123 *
91 * Intel Architecture Software Developer's Manual, Volume 3, 124 * Intel Architecture Software Developer's Manual, Volume 3,
92 * System Programming, section 9.10, "Invalidating the 125 * System Programming, section 9.10, "Invalidating the
93 * Translation Lookaside Buffers (TLBS)": 126 * Translation Lookaside Buffers (TLBS)":
94 * "The following operations invalidate all TLB entries, irrespective 127 * "The following operations invalidate all TLB entries, irrespective
95 * of the setting of the G flag: 128 * of the setting of the G flag:
96 * ... 129 * ...
97 * "(P6 family processors only): Writing to control register CR4 to 130 * "(P6 family processors only): Writing to control register CR4 to
98 * modify the PSE, PGE, or PAE flag." 131 * modify the PSE, PGE, or PAE flag."
99 * 132 *
100 * (the alternatives not quoted above are not an option here.) 133 * (the alternatives not quoted above are not an option here.)
101 * 134 *
102 * If PGE is not in use, we reload CR3. Check for the PGE feature 135 * If PGE is not in use, we reload CR3. Check for the PGE feature
103 * first since i486 does not have CR4. Note: the feature flag may 136 * first since i486 does not have CR4. Note: the feature flag may
104 * be present while the actual PGE functionality not yet enabled. 137 * be present while the actual PGE functionality not yet enabled.
105 */ 138 */
106ENTRY(tlbflushg) 139ENTRY(i386_tlbflushg)
107 testl $CPUID_PGE, _C_LABEL(cpu_feature) 140 testl $CPUID_PGE, _C_LABEL(cpu_feature)
108 jz 1f 141 jz 1f
109 movl %cr4, %eax 142 movl %cr4, %eax
110 testl $CR4_PGE, %eax 143 testl $CR4_PGE, %eax
111 jz 1f 144 jz 1f
112 movl %eax, %edx 145 movl %eax, %edx
113 andl $~CR4_PGE, %edx 146 andl $~CR4_PGE, %edx
114 movl %edx, %cr4 147 movl %edx, %cr4
115 movl %eax, %cr4 148 movl %eax, %cr4
116 ret 149 ret
117END(tlbflushg) 150END(i386_tlbflushg)
118 151
119ENTRY(tlbflush) 152ENTRY(i386_tlbflush)
1201: 1531:
121 movl %cr3, %eax 154 movl %cr3, %eax
122 movl %eax, %cr3 155 movl %eax, %cr3
123 ret 156 ret
124END(tlbflush) 157END(i386_tlbflush)
125 158
126ENTRY(ldr0) 159ENTRY(i386_ldr0)
127 movl 4(%esp), %eax 160 movl 4(%esp), %eax
128 movl %eax, %dr0 161 movl %eax, %dr0
129 ret 162 ret
130END(ldr0) 163END(i386_ldr0)
131 164
132ENTRY(rdr0) 165ENTRY(i386_rdr0)
133 movl %dr0, %eax 166 movl %dr0, %eax
134 ret 167 ret
135END(rdr0) 168END(i386_rdr0)
136 169
137ENTRY(ldr1) 170ENTRY(i386_ldr1)
138 movl 4(%esp), %eax 171 movl 4(%esp), %eax
139 movl %eax, %dr1 172 movl %eax, %dr1
140 ret 173 ret
141END(ldr1) 174END(i386_ldr1)
142 175
143ENTRY(rdr1) 176ENTRY(i386_rdr1)
144 movl %dr1, %eax 177 movl %dr1, %eax
145 ret 178 ret
146END(rdr1) 179END(i386_rdr1)
147 180
148ENTRY(ldr2) 181ENTRY(i386_ldr2)
149 movl 4(%esp), %eax 182 movl 4(%esp), %eax
150 movl %eax, %dr2 183 movl %eax, %dr2
151 ret 184 ret
152END(ldr2) 185END(i386_ldr2)
153 186
154ENTRY(rdr2) 187ENTRY(i386_rdr2)
155 movl %dr2, %eax 188 movl %dr2, %eax
156 ret 189 ret
157END(rdr2) 190END(i386_rdr2)
158 191
159ENTRY(ldr3) 192ENTRY(i386_ldr3)
160 movl 4(%esp), %eax 193 movl 4(%esp), %eax
161 movl %eax, %dr3 194 movl %eax, %dr3
162 ret 195 ret
163END(ldr3) 196END(i386_ldr3)
164 197
165ENTRY(rdr3) 198ENTRY(i386_rdr3)
166 movl %dr3, %eax 199 movl %dr3, %eax
167 ret 200 ret
168END(rdr3) 201END(i386_rdr3)
169 202
170ENTRY(ldr6) 203ENTRY(i386_ldr6)
171 movl 4(%esp), %eax 204 movl 4(%esp), %eax
172 movl %eax, %dr6 205 movl %eax, %dr6
173 ret 206 ret
174END(ldr6) 207END(i386_ldr6)
175 208
176ENTRY(rdr6) 209ENTRY(i386_rdr6)
177 movl %dr6, %eax 210 movl %dr6, %eax
178 ret 211 ret
179END(rdr6) 212END(i386_rdr6)
180 213
181ENTRY(ldr7) 214ENTRY(i386_ldr7)
182 movl 4(%esp), %eax 215 movl 4(%esp), %eax
183 movl %eax, %dr7 216 movl %eax, %dr7
184 ret 217 ret
185END(ldr7) 218END(i386_ldr7)
186 219
187ENTRY(rdr7) 220ENTRY(i386_rdr7)
188 movl %dr7, %eax 221 movl %dr7, %eax
189 ret 222 ret
190END(rdr7) 223END(i386_rdr7)
191 224
192ENTRY(rcr2) 225ENTRY(i386_rcr2)
193 movl %cr2, %eax 226 movl %cr2, %eax
194 ret 227 ret
195END(rcr2) 228END(i386_rcr2)
196 229
197ENTRY(lcr2) 230ENTRY(i386_lcr2)
198 movl 4(%esp), %eax 231 movl 4(%esp), %eax
199 movl %eax, %cr2 232 movl %eax, %cr2
200 ret 233 ret
201END(lcr2) 234END(i386_lcr2)
202 235
203ENTRY(wbinvd) 236ENTRY(i386_wbinvd)
204 wbinvd 237 wbinvd
205 ret 238 ret
206END(wbinvd) 239END(i386_wbinvd)
207 240
208ENTRY(x86_disable_intr) 241ENTRY(x86_disable_intr)
209 cli 242 cli
210 ret 243 ret
211END(x86_disable_intr) 244END(x86_disable_intr)
212 245
213ENTRY(x86_enable_intr) 246ENTRY(x86_enable_intr)
214 sti 247 sti
215 ret 248 ret
216END(x86_enable_intr) 249END(x86_enable_intr)
217 250
218/* 251/*
219 * void lgdt(struct region_descriptor *rdp); 252 * void lgdt(struct region_descriptor *rdp);
220 * 253 *
221 * Load a new GDT pointer (and do any necessary cleanup). 254 * Load a new GDT pointer (and do any necessary cleanup).
222 * XXX It's somewhat questionable whether reloading all the segment registers 255 * XXX It's somewhat questionable whether reloading all the segment registers
223 * is necessary, since the actual descriptor data is not changed except by 256 * is necessary, since the actual descriptor data is not changed except by
224 * process creation and exit, both of which clean up via task switches. OTOH, 257 * process creation and exit, both of which clean up via task switches. OTOH,
225 * this only happens at run time when the GDT is resized. 258 * this only happens at run time when the GDT is resized.
226 */ 259 */
227ENTRY(lgdt) 260ENTRY(lgdt)
228 /* Reload the descriptor table. */ 261 /* Reload the descriptor table. */
229 movl 4(%esp), %eax 262 movl 4(%esp), %eax
230 lgdt (%eax) 263 lgdt (%eax)
231 /* Flush the prefetch queue. */ 264 /* Flush the prefetch queue. */
232 jmp 1f 265 jmp 1f
233 nop 266 nop
2341: /* Reload "stale" selectors. */ 2671: /* Reload "stale" selectors. */
235 movl $GSEL(GDATA_SEL, SEL_KPL), %eax 268 movl $GSEL(GDATA_SEL, SEL_KPL), %eax
236 movl %eax, %ds 269 movl %eax, %ds
237 movl %eax, %es 270 movl %eax, %es
238 movl %eax, %gs 271 movl %eax, %gs
239 movl %eax, %ss 272 movl %eax, %ss
240 movl $GSEL(GCPU_SEL, SEL_KPL), %eax 273 movl $GSEL(GCPU_SEL, SEL_KPL), %eax
241 movl %eax, %fs 274 movl %eax, %fs
242 jmp _C_LABEL(x86_flush) 275 jmp _C_LABEL(x86_flush)
243END(lgdt) 276END(lgdt)
244 277
245ENTRY(tsc_get_timecount) 278ENTRY(tsc_get_timecount)
246 movl CPUVAR(CURLWP), %ecx 279 movl CPUVAR(CURLWP), %ecx
2471: 2801:
248 pushl L_NCSW(%ecx) 281 pushl L_NCSW(%ecx)
249 rdtsc 282 rdtsc
250 addl CPUVAR(CC_SKEW), %eax 283 addl CPUVAR(CC_SKEW), %eax
251 popl %edx 284 popl %edx
252 cmpl %edx, L_NCSW(%ecx) 285 cmpl %edx, L_NCSW(%ecx)
253 jne 2f 286 jne 2f
254 ret 287 ret
2552: 2882:
256 jmp 1b 289 jmp 1b
257END(tsc_get_timecount) 290END(tsc_get_timecount)

cvs diff -r1.22 -r1.23 src/sys/arch/xen/x86/xenfunc.c (switch to unified diff)

--- src/sys/arch/xen/x86/xenfunc.c 2018/10/18 04:17:18 1.22
+++ src/sys/arch/xen/x86/xenfunc.c 2018/12/22 21:27:22 1.23
@@ -1,265 +1,331 @@ @@ -1,265 +1,331 @@
1/* $NetBSD: xenfunc.c,v 1.22 2018/10/18 04:17:18 cherry Exp $ */ 1/* $NetBSD: xenfunc.c,v 1.23 2018/12/22 21:27:22 cherry Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2004 Christian Limpach. 4 * Copyright (c) 2004 Christian Limpach.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */ 26 */
27 27
28#include <sys/cdefs.h> 28#include <sys/cdefs.h>
29__KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.22 2018/10/18 04:17:18 cherry Exp $"); 29__KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.23 2018/12/22 21:27:22 cherry Exp $");
30 30
31#include <sys/param.h> 31#include <sys/param.h>
32 32
33#include <uvm/uvm_extern.h> 33#include <uvm/uvm_extern.h>
34 34
35#include <machine/intr.h> 35#include <machine/intr.h>
36#include <machine/vmparam.h> 36#include <machine/vmparam.h>
37#include <machine/pmap.h> 37#include <machine/pmap.h>
38#include <xen/xen.h> 38#include <xen/xen.h>
39#include <xen/hypervisor.h> 39#include <xen/hypervisor.h>
40//#include <xen/evtchn.h> 40//#include <xen/evtchn.h>
41#include <xen/xenpmap.h> 41#include <xen/xenpmap.h>
42#include <machine/pte.h> 42#include <machine/pte.h>
43 43
44#define MAX_XEN_IDT 128 44#define MAX_XEN_IDT 128
45 45
46void xen_set_ldt(vaddr_t, uint32_t); 46void xen_set_ldt(vaddr_t, uint32_t);
47 47
 48/*
 49 * We don't need to export these declarations, since they are used via
 50 * linker aliasing. They should always be accessed via the
 51 * corresponding wrapper function names defined in
 52 * x86/include/cpufunc.h and exported as __weak_alias()
 53 *
 54 * We use this rather roundabout method so that a runtime wrapper
 55 * function may be made available for PVHVM, which could override both
 56 * native and PV aliases and decide which to invoke at run time.
 57 */
 58
 59void xen_invlpg(vaddr_t);
 60void xen_lidt(struct region_descriptor *);
 61void xen_lldt(u_short);
 62void xen_ltr(u_short);
 63void xen_lcr0(u_long);
 64u_long xen_rcr0(void);
 65void xen_tlbflush(void);
 66void xen_tlbflushg(void);
 67register_t xen_rdr0(void);
 68void xen_ldr0(register_t);
 69register_t xen_rdr1(void);
 70void xen_ldr1(register_t);
 71register_t xen_rdr2(void);
 72void xen_ldr2(register_t);
 73register_t xen_rdr3(void);
 74void xen_ldr3(register_t);
 75register_t xen_rdr6(void);
 76void xen_ldr6(register_t);
 77register_t xen_rdr7(void);
 78void xen_ldr7(register_t);
 79void xen_wbinvd(void);
 80vaddr_t xen_rcr2(void);
 81
 82__weak_alias(invlpg, xen_invlpg);
 83__weak_alias(lidt, xen_lidt);
 84__weak_alias(lldt, xen_lldt);
 85__weak_alias(ltr, xen_ltr);
 86__weak_alias(lcr0, xen_lcr0);
 87__weak_alias(rcr0, xen_rcr0);
 88__weak_alias(tlbflush, xen_tlbflush);
 89__weak_alias(tlbflushg, xen_tlbflushg);
 90__weak_alias(rdr0, xen_rdr0);
 91__weak_alias(ldr0, xen_ldr0);
 92__weak_alias(rdr1, xen_rdr1);
 93__weak_alias(ldr1, xen_ldr1);
 94__weak_alias(rdr2, xen_rdr2);
 95__weak_alias(ldr2, xen_ldr2);
 96__weak_alias(rdr3, xen_rdr3);
 97__weak_alias(ldr3, xen_ldr3);
 98__weak_alias(rdr6, xen_rdr6);
 99__weak_alias(ldr6, xen_ldr6);
 100__weak_alias(rdr7, xen_rdr7);
 101__weak_alias(ldr7, xen_ldr7);
 102__weak_alias(wbinvd, xen_wbinvd);
 103__weak_alias(rcr2, xen_rcr2);
 104
 105#ifdef __x86_64__
 106void xen_setusergs(int);
 107__weak_alias(setusergs, xen_setusergs);
 108#else
 109void xen_lcr3(vaddr_t);
 110__weak_alias(lcr3, xen_lcr3);
 111
 112#endif
 113
48void  114void
49invlpg(vaddr_t addr) 115xen_invlpg(vaddr_t addr)
50{ 116{
51 int s = splvm(); /* XXXSMP */ 117 int s = splvm(); /* XXXSMP */
52 xpq_queue_invlpg(addr); 118 xpq_queue_invlpg(addr);
53 splx(s); 119 splx(s);
54}  120}
55 121
56void 122void
57lidt(struct region_descriptor *rd) 123xen_lidt(struct region_descriptor *rd)
58{ 124{
59 /*  125 /*
60 * We need to do this because we can't assume kmem_alloc(9) 126 * We need to do this because we can't assume kmem_alloc(9)
61 * will be available at the boot stage when this is called. 127 * will be available at the boot stage when this is called.
62 */ 128 */
63 static char xen_idt_page[PAGE_SIZE] __attribute__((__aligned__ (PAGE_SIZE))); 129 static char xen_idt_page[PAGE_SIZE] __attribute__((__aligned__ (PAGE_SIZE)));
64 memset(xen_idt_page, 0, PAGE_SIZE); 130 memset(xen_idt_page, 0, PAGE_SIZE);
65  131
66 struct trap_info *xen_idt = (void * )xen_idt_page; 132 struct trap_info *xen_idt = (void * )xen_idt_page;
67 int xen_idt_idx = 0; 133 int xen_idt_idx = 0;
68  134
69 struct trap_info * idd = (void *) rd->rd_base; 135 struct trap_info * idd = (void *) rd->rd_base;
70 const int nidt = rd->rd_limit / (sizeof *idd);  136 const int nidt = rd->rd_limit / (sizeof *idd);
71 137
72 int i; 138 int i;
73 139
74 /* 140 /*
75 * Sweep in all initialised entries, consolidate them back to 141 * Sweep in all initialised entries, consolidate them back to
76 * back in the requestor array. 142 * back in the requestor array.
77 */ 143 */
78 for (i = 0; i < nidt; i++) { 144 for (i = 0; i < nidt; i++) {
79 if (idd[i].address == 0) /* Skip gap */ 145 if (idd[i].address == 0) /* Skip gap */
80 continue; 146 continue;
81 KASSERT(xen_idt_idx < MAX_XEN_IDT); 147 KASSERT(xen_idt_idx < MAX_XEN_IDT);
82 /* Copy over entry */ 148 /* Copy over entry */
83 xen_idt[xen_idt_idx++] = idd[i]; 149 xen_idt[xen_idt_idx++] = idd[i];
84 } 150 }
85 151
86#if defined(__x86_64__) 152#if defined(__x86_64__)
87 /* page needs to be r/o */ 153 /* page needs to be r/o */
88 pmap_changeprot_local((vaddr_t) xen_idt, VM_PROT_READ); 154 pmap_changeprot_local((vaddr_t) xen_idt, VM_PROT_READ);
89#endif /* __x86_64 */ 155#endif /* __x86_64 */
90 156
91 /* Hook it up in the hypervisor */ 157 /* Hook it up in the hypervisor */
92 if (HYPERVISOR_set_trap_table(xen_idt)) 158 if (HYPERVISOR_set_trap_table(xen_idt))
93 panic("HYPERVISOR_set_trap_table() failed"); 159 panic("HYPERVISOR_set_trap_table() failed");
94 160
95#if defined(__x86_64__) 161#if defined(__x86_64__)
96 /* reset */ 162 /* reset */
97 pmap_changeprot_local((vaddr_t) xen_idt, VM_PROT_READ|VM_PROT_WRITE); 163 pmap_changeprot_local((vaddr_t) xen_idt, VM_PROT_READ|VM_PROT_WRITE);
98#endif /* __x86_64 */ 164#endif /* __x86_64 */
99} 165}
100 166
101void 167void
102lldt(u_short sel) 168xen_lldt(u_short sel)
103{ 169{
104#ifndef __x86_64__ 170#ifndef __x86_64__
105 struct cpu_info *ci; 171 struct cpu_info *ci;
106 172
107 ci = curcpu(); 173 ci = curcpu();
108 174
109 if (ci->ci_curldt == sel) 175 if (ci->ci_curldt == sel)
110 return; 176 return;
111 if (sel == GSEL(GLDT_SEL, SEL_KPL)) 177 if (sel == GSEL(GLDT_SEL, SEL_KPL))
112 xen_set_ldt((vaddr_t)ldtstore, NLDT); 178 xen_set_ldt((vaddr_t)ldtstore, NLDT);
113 else 179 else
114 xen_set_ldt(ci->ci_gdt[IDXSELN(sel)].ld.ld_base, 180 xen_set_ldt(ci->ci_gdt[IDXSELN(sel)].ld.ld_base,
115 ci->ci_gdt[IDXSELN(sel)].ld.ld_entries); 181 ci->ci_gdt[IDXSELN(sel)].ld.ld_entries);
116 ci->ci_curldt = sel; 182 ci->ci_curldt = sel;
117#endif 183#endif
118} 184}
119 185
120void 186void
121ltr(u_short sel) 187xen_ltr(u_short sel)
122{ 188{
123 panic("XXX ltr not supported\n"); 189 panic("XXX ltr not supported\n");
124} 190}
125 191
126void 192void
127lcr0(u_long val) 193xen_lcr0(u_long val)
128{ 194{
129 panic("XXX lcr0 not supported\n"); 195 panic("XXX lcr0 not supported\n");
130} 196}
131 197
132u_long 198u_long
133rcr0(void) 199xen_rcr0(void)
134{ 200{
135 /* XXX: handle X86_CR0_TS ? */ 201 /* XXX: handle X86_CR0_TS ? */
136 return 0; 202 return 0;
137} 203}
138 204
139#ifndef __x86_64__ 205#ifndef __x86_64__
140void 206void
141lcr3(vaddr_t val) 207xen_lcr3(vaddr_t val)
142{ 208{
143 int s = splvm(); /* XXXSMP */ 209 int s = splvm(); /* XXXSMP */
144 xpq_queue_pt_switch(xpmap_ptom_masked(val)); 210 xpq_queue_pt_switch(xpmap_ptom_masked(val));
145 splx(s); 211 splx(s);
146} 212}
147#endif 213#endif
148 214
149void 215void
150tlbflush(void) 216xen_tlbflush(void)
151{ 217{
152 int s = splvm(); /* XXXSMP */ 218 int s = splvm(); /* XXXSMP */
153 xpq_queue_tlb_flush(); 219 xpq_queue_tlb_flush();
154 splx(s); 220 splx(s);
155} 221}
156 222
157void 223void
158tlbflushg(void) 224xen_tlbflushg(void)
159{ 225{
160 tlbflush(); 226 tlbflush();
161} 227}
162 228
163register_t 229register_t
164rdr0(void) 230xen_rdr0(void)
165{ 231{
166 232
167 return HYPERVISOR_get_debugreg(0); 233 return HYPERVISOR_get_debugreg(0);
168} 234}
169 235
170void 236void
171ldr0(register_t val) 237xen_ldr0(register_t val)
172{ 238{
173 239
174 HYPERVISOR_set_debugreg(0, val); 240 HYPERVISOR_set_debugreg(0, val);
175} 241}
176 242
177register_t 243register_t
178rdr1(void) 244xen_rdr1(void)
179{ 245{
180 246
181 return HYPERVISOR_get_debugreg(1); 247 return HYPERVISOR_get_debugreg(1);
182} 248}
183 249
184void 250void
185ldr1(register_t val) 251xen_ldr1(register_t val)
186{ 252{
187 253
188 HYPERVISOR_set_debugreg(1, val); 254 HYPERVISOR_set_debugreg(1, val);
189} 255}
190 256
191register_t 257register_t
192rdr2(void) 258xen_rdr2(void)
193{ 259{
194 260
195 return HYPERVISOR_get_debugreg(2); 261 return HYPERVISOR_get_debugreg(2);
196} 262}
197 263
198void 264void
199ldr2(register_t val) 265xen_ldr2(register_t val)
200{ 266{
201 267
202 HYPERVISOR_set_debugreg(2, val); 268 HYPERVISOR_set_debugreg(2, val);
203} 269}
204 270
205register_t 271register_t
206rdr3(void) 272xen_rdr3(void)
207{ 273{
208 274
209 return HYPERVISOR_get_debugreg(3); 275 return HYPERVISOR_get_debugreg(3);
210} 276}
211 277
212void 278void
213ldr3(register_t val) 279xen_ldr3(register_t val)
214{ 280{
215 281
216 HYPERVISOR_set_debugreg(3, val); 282 HYPERVISOR_set_debugreg(3, val);
217} 283}
218register_t 284register_t
219rdr6(void) 285xen_rdr6(void)
220{ 286{
221 287
222 return HYPERVISOR_get_debugreg(6); 288 return HYPERVISOR_get_debugreg(6);
223} 289}
224 290
225void 291void
226ldr6(register_t val) 292xen_ldr6(register_t val)
227{ 293{
228 294
229 HYPERVISOR_set_debugreg(6, val); 295 HYPERVISOR_set_debugreg(6, val);
230} 296}
231 297
232register_t 298register_t
233rdr7(void) 299xen_rdr7(void)
234{ 300{
235 301
236 return HYPERVISOR_get_debugreg(7); 302 return HYPERVISOR_get_debugreg(7);
237} 303}
238 304
239void 305void
240ldr7(register_t val) 306xen_ldr7(register_t val)
241{ 307{
242 308
243 HYPERVISOR_set_debugreg(7, val); 309 HYPERVISOR_set_debugreg(7, val);
244} 310}
245 311
246void 312void
247wbinvd(void) 313xen_wbinvd(void)
248{ 314{
249 315
250 xpq_flush_cache(); 316 xpq_flush_cache();
251} 317}
252 318
253vaddr_t 319vaddr_t
254rcr2(void) 320xen_rcr2(void)
255{ 321{
256 return curcpu()->ci_vcpu->arch.cr2; 322 return curcpu()->ci_vcpu->arch.cr2;
257} 323}
258 324
259#ifdef __x86_64__ 325#ifdef __x86_64__
260void 326void
261setusergs(int gssel) 327xen_setusergs(int gssel)
262{ 328{
263 HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gssel); 329 HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gssel);
264} 330}
265#endif 331#endif