Sat Dec 22 21:27:22 2018 UTC ()
Introduce a weak alias method of exporting different implementations
of the same API.

For eg: the amd64 native implementation of invlpg() now becomes
amd64_invlpg() with a weak symbol export of invlpg(), while the XEN
implementation becomes xen_invlpg(), also weakly exported as invlpg()

Note that linking in both together without having an override function
named invlpg() would be a mistake, as we have limited control over
which of the two options would emerge as the finally exported invlpg()
resulting in a potential situation where the wrong function is finally
exported. This change avoids this situation.

We should however include an override function invlpg() in that case,
such that it is able to then pass on the call to the appropriate
backing function (amd64_invlpg() in the case of native, and
xen_invlpg() in the case of under XEN virtualisation) at runtime.

This change does not introduce such a function and therefore does not
alter builds to include native as well as XEN implementations in the
same binary. This will be done later, with the introduction of XEN
PVHVM mode, where precisely such a runtime switch is required.

There are no operational changes introduced by this change.


(cherry)
diff -r1.33 -r1.34 src/sys/arch/amd64/amd64/cpufunc.S
diff -r1.25 -r1.26 src/sys/arch/i386/i386/cpufunc.S
diff -r1.18 -r1.19 src/sys/arch/i386/i386/i386func.S
diff -r1.22 -r1.23 src/sys/arch/xen/x86/xenfunc.c

cvs diff -r1.33 -r1.34 src/sys/arch/amd64/amd64/cpufunc.S (expand / switch to unified diff)

--- src/sys/arch/amd64/amd64/cpufunc.S 2018/07/21 06:09:13 1.33
+++ src/sys/arch/amd64/amd64/cpufunc.S 2018/12/22 21:27:22 1.34
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpufunc.S,v 1.33 2018/07/21 06:09:13 maxv Exp $ */ 1/* $NetBSD: cpufunc.S,v 1.34 2018/12/22 21:27:22 cherry Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum, and by Andrew Doran. 8 * by Charles M. Hannum, and by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -53,81 +53,116 @@ ENTRY(x86_lfence) @@ -53,81 +53,116 @@ ENTRY(x86_lfence)
53 ret 53 ret
54END(x86_lfence) 54END(x86_lfence)
55 55
56ENTRY(x86_sfence) 56ENTRY(x86_sfence)
57 sfence 57 sfence
58 ret 58 ret
59END(x86_sfence) 59END(x86_sfence)
60 60
61ENTRY(x86_mfence) 61ENTRY(x86_mfence)
62 mfence 62 mfence
63 ret 63 ret
64END(x86_mfence) 64END(x86_mfence)
65 65
 66/*
 67 * These functions below should always be accessed via the corresponding wrapper
 68 * function names defined in x86/include/cpufunc.h and exported as WEAK_ALIAS()
 69 *
 70 * We use this rather roundabout method so that a runtime wrapper function may
 71 * be made available for PVHVM, which could override both native and PV aliases
 72 * and decide which to invoke at run time.
 73 */
 74
 75WEAK_ALIAS(invlpg, amd64_invlpg)
 76WEAK_ALIAS(lidt, amd64_lidt)
 77WEAK_ALIAS(lldt, amd64_lldt)
 78WEAK_ALIAS(ltr, amd64_ltr)
 79WEAK_ALIAS(lcr0, amd64_lcr0)
 80WEAK_ALIAS(rcr0, amd64_rcr0)
 81WEAK_ALIAS(rcr2, amd64_rcr2)
 82WEAK_ALIAS(lcr2, amd64_lcr2)
 83WEAK_ALIAS(rcr3, amd64_rcr3)
 84WEAK_ALIAS(lcr3, amd64_lcr3)
 85WEAK_ALIAS(tlbflush, amd64_tlbflush)
 86WEAK_ALIAS(tlbflushg, amd64_tlbflushg)
 87WEAK_ALIAS(rdr0, amd64_rdr0)
 88WEAK_ALIAS(ldr0, amd64_ldr0)
 89WEAK_ALIAS(rdr1, amd64_rdr1)
 90WEAK_ALIAS(ldr1, amd64_ldr1)
 91WEAK_ALIAS(rdr2, amd64_rdr2)
 92WEAK_ALIAS(ldr2, amd64_ldr2)
 93WEAK_ALIAS(rdr3, amd64_rdr3)
 94WEAK_ALIAS(ldr3, amd64_ldr3)
 95WEAK_ALIAS(rdr6, amd64_rdr6)
 96WEAK_ALIAS(ldr6, amd64_ldr6)
 97WEAK_ALIAS(rdr7, amd64_rdr7)
 98WEAK_ALIAS(ldr7, amd64_ldr7)
 99WEAK_ALIAS(wbinvd, amd64_wbinvd)
 100
66#ifndef XEN 101#ifndef XEN
67ENTRY(invlpg) 102ENTRY(amd64_invlpg)
68 invlpg (%rdi) 103 invlpg (%rdi)
69 ret 104 ret
70END(invlpg) 105END(amd64_invlpg)
71 106
72ENTRY(lidt) 107ENTRY(amd64_lidt)
73 lidt (%rdi) 108 lidt (%rdi)
74 ret 109 ret
75END(lidt) 110END(amd64_lidt)
76 111
77ENTRY(lldt) 112ENTRY(amd64_lldt)
78 cmpl %edi, CPUVAR(CURLDT) 113 cmpl %edi, CPUVAR(CURLDT)
79 jne 1f 114 jne 1f
80 ret 115 ret
811: 1161:
82 movl %edi, CPUVAR(CURLDT) 117 movl %edi, CPUVAR(CURLDT)
83 lldt %di 118 lldt %di
84 ret 119 ret
85END(lldt) 120END(amd64_lldt)
86 121
87ENTRY(ltr) 122ENTRY(amd64_ltr)
88 ltr %di 123 ltr %di
89 ret 124 ret
90END(ltr) 125END(amd64_ltr)
91 126
92ENTRY(lcr0) 127ENTRY(amd64_lcr0)
93 movq %rdi, %cr0 128 movq %rdi, %cr0
94 ret 129 ret
95END(lcr0) 130END(amd64_lcr0)
96 131
97ENTRY(rcr0) 132ENTRY(amd64_rcr0)
98 movq %cr0, %rax 133 movq %cr0, %rax
99 ret 134 ret
100END(rcr0) 135END(amd64_rcr0)
101 136
102ENTRY(lcr2) 137ENTRY(amd64_lcr2)
103 movq %rdi, %cr2 138 movq %rdi, %cr2
104 ret 139 ret
105END(lcr2) 140END(amd64_lcr2)
106 141
107ENTRY(rcr2) 142ENTRY(amd64_rcr2)
108 movq %cr2, %rax 143 movq %cr2, %rax
109 ret 144 ret
110END(rcr2) 145END(amd64_rcr2)
111 146
112ENTRY(lcr3) 147ENTRY(amd64_lcr3)
113 movq %rdi, %cr3 148 movq %rdi, %cr3
114 ret 149 ret
115END(lcr3) 150END(amd64_lcr3)
116 151
117ENTRY(rcr3) 152ENTRY(amd64_rcr3)
118 movq %cr3, %rax 153 movq %cr3, %rax
119 ret 154 ret
120END(rcr3) 155END(amd64_rcr3)
121#endif 156#endif
122 157
123ENTRY(lcr4) 158ENTRY(lcr4)
124 movq %rdi, %cr4 159 movq %rdi, %cr4
125 ret 160 ret
126END(lcr4) 161END(lcr4)
127 162
128ENTRY(rcr4) 163ENTRY(rcr4)
129 movq %cr4, %rax 164 movq %cr4, %rax
130 ret 165 ret
131END(rcr4) 166END(rcr4)
132 167
133ENTRY(lcr8) 168ENTRY(lcr8)
@@ -149,103 +184,103 @@ END(rcr8) @@ -149,103 +184,103 @@ END(rcr8)
149 * System Programming, section 9.10, "Invalidating the 184 * System Programming, section 9.10, "Invalidating the
150 * Translation Lookaside Buffers (TLBS)": 185 * Translation Lookaside Buffers (TLBS)":
151 * "The following operations invalidate all TLB entries, irrespective 186 * "The following operations invalidate all TLB entries, irrespective
152 * of the setting of the G flag: 187 * of the setting of the G flag:
153 * ... 188 * ...
154 * "(P6 family processors only): Writing to control register CR4 to 189 * "(P6 family processors only): Writing to control register CR4 to
155 * modify the PSE, PGE, or PAE flag." 190 * modify the PSE, PGE, or PAE flag."
156 * 191 *
157 * (the alternatives not quoted above are not an option here.) 192 * (the alternatives not quoted above are not an option here.)
158 * 193 *
159 * If PGE is not in use, we reload CR3. 194 * If PGE is not in use, we reload CR3.
160 */ 195 */
161#ifndef XEN 196#ifndef XEN
162ENTRY(tlbflushg) 197ENTRY(amd64_tlbflushg)
163 movq %cr4, %rax 198 movq %cr4, %rax
164 testq $CR4_PGE, %rax 199 testq $CR4_PGE, %rax
165 jz 1f 200 jz 1f
166 movq %rax, %rdx 201 movq %rax, %rdx
167 andq $~CR4_PGE, %rdx 202 andq $~CR4_PGE, %rdx
168 movq %rdx, %cr4 203 movq %rdx, %cr4
169 movq %rax, %cr4 204 movq %rax, %cr4
170 ret 205 ret
171END(tlbflushg) 206END(amd64_tlbflushg)
172 207
173ENTRY(tlbflush) 208ENTRY(amd64_tlbflush)
1741: 2091:
175 movq %cr3, %rax 210 movq %cr3, %rax
176 movq %rax, %cr3 211 movq %rax, %cr3
177 ret 212 ret
178END(tlbflush) 213END(amd64_tlbflush)
179 214
180ENTRY(ldr0) 215ENTRY(amd64_ldr0)
181 movq %rdi, %dr0 216 movq %rdi, %dr0
182 ret 217 ret
183END(ldr0) 218END(amd64_ldr0)
184 219
185ENTRY(rdr0) 220ENTRY(amd64_rdr0)
186 movq %dr0, %rax 221 movq %dr0, %rax
187 ret 222 ret
188END(rdr0) 223END(amd64_rdr0)
189 224
190ENTRY(ldr1) 225ENTRY(amd64_ldr1)
191 movq %rdi, %dr1 226 movq %rdi, %dr1
192 ret 227 ret
193END(ldr1) 228END(amd64_ldr1)
194 229
195ENTRY(rdr1) 230ENTRY(amd64_rdr1)
196 movq %dr1, %rax 231 movq %dr1, %rax
197 ret 232 ret
198END(rdr1) 233END(amd64_rdr1)
199 234
200ENTRY(ldr2) 235ENTRY(amd64_ldr2)
201 movq %rdi, %dr2 236 movq %rdi, %dr2
202 ret 237 ret
203END(ldr2) 238END(amd64_ldr2)
204 239
205ENTRY(rdr2) 240ENTRY(amd64_rdr2)
206 movq %dr2, %rax 241 movq %dr2, %rax
207 ret 242 ret
208END(rdr2) 243END(amd64_rdr2)
209 244
210ENTRY(ldr3) 245ENTRY(amd64_ldr3)
211 movq %rdi, %dr3 246 movq %rdi, %dr3
212 ret 247 ret
213END(ldr3) 248END(amd64_ldr3)
214 249
215ENTRY(rdr3) 250ENTRY(amd64_rdr3)
216 movq %dr3, %rax 251 movq %dr3, %rax
217 ret 252 ret
218END(rdr3) 253END(amd64_rdr3)
219 254
220ENTRY(ldr6) 255ENTRY(amd64_ldr6)
221 movq %rdi, %dr6 256 movq %rdi, %dr6
222 ret 257 ret
223END(ldr6) 258END(amd64_ldr6)
224 259
225ENTRY(rdr6) 260ENTRY(amd64_rdr6)
226 movq %dr6, %rax 261 movq %dr6, %rax
227 ret 262 ret
228END(rdr6) 263END(amd64_rdr6)
229 264
230ENTRY(ldr7) 265ENTRY(amd64_ldr7)
231 movq %rdi, %dr7 266 movq %rdi, %dr7
232 ret 267 ret
233END(ldr7) 268END(amd64_ldr7)
234 269
235ENTRY(rdr7) 270ENTRY(amd64_rdr7)
236 movq %dr7, %rax 271 movq %dr7, %rax
237 ret 272 ret
238END(rdr7) 273END(amd64_rdr7)
239 274
240ENTRY(x86_disable_intr) 275ENTRY(x86_disable_intr)
241 cli 276 cli
242 ret 277 ret
243END(x86_disable_intr) 278END(x86_disable_intr)
244 279
245ENTRY(x86_enable_intr) 280ENTRY(x86_enable_intr)
246 sti 281 sti
247 ret 282 ret
248END(x86_enable_intr) 283END(x86_enable_intr)
249 284
250ENTRY(x86_read_flags) 285ENTRY(x86_read_flags)
251 pushfq 286 pushfq

cvs diff -r1.25 -r1.26 src/sys/arch/i386/i386/cpufunc.S (expand / switch to unified diff)

--- src/sys/arch/i386/i386/cpufunc.S 2018/10/18 04:11:14 1.25
+++ src/sys/arch/i386/i386/cpufunc.S 2018/12/22 21:27:22 1.26
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpufunc.S,v 1.25 2018/10/18 04:11:14 cherry Exp $ */ 1/* $NetBSD: cpufunc.S,v 1.26 2018/12/22 21:27:22 cherry Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 2007 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum, and by Andrew Doran. 8 * by Charles M. Hannum, and by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -28,65 +28,77 @@ @@ -28,65 +28,77 @@
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Functions to provide access to i386-specific instructions. 33 * Functions to provide access to i386-specific instructions.
34 * 34 *
35 * These are shared with NetBSD/xen. 35 * These are shared with NetBSD/xen.
36 */ 36 */
37 37
38#include <sys/errno.h> 38#include <sys/errno.h>
39 39
40#include <machine/asm.h> 40#include <machine/asm.h>
41__KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.25 2018/10/18 04:11:14 cherry Exp $"); 41__KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.26 2018/12/22 21:27:22 cherry Exp $");
42 42
43#include "opt_xen.h" 43#include "opt_xen.h"
44 44
45#include <machine/specialreg.h> 45#include <machine/specialreg.h>
46#include <machine/segments.h> 46#include <machine/segments.h>
47 47
48#include "assym.h" 48#include "assym.h"
49 49
 50/*
 51 * These functions below should always be accessed via the corresponding wrapper
 52 * function names defined in x86/include/cpufunc.h and exported as WEAK_ALIAS()
 53 *
 54 * We use this rather roundabout method so that a runtime wrapper function may
 55 * be made available for PVHVM, which could override both native and PV aliases
 56 * and decide which to invoke at run time.
 57 */
 58
 59WEAK_ALIAS(lidt, i386_lidt)
 60WEAK_ALIAS(rcr3, i386_rcr3)
 61
50ENTRY(x86_lfence) 62ENTRY(x86_lfence)
51 lock 63 lock
52 addl $0, -4(%esp) 64 addl $0, -4(%esp)
53 ret 65 ret
54END(x86_lfence) 66END(x86_lfence)
55 67
56ENTRY(x86_sfence) 68ENTRY(x86_sfence)
57 lock 69 lock
58 addl $0, -4(%esp) 70 addl $0, -4(%esp)
59 ret 71 ret
60END(x86_sfence) 72END(x86_sfence)
61 73
62ENTRY(x86_mfence) 74ENTRY(x86_mfence)
63 lock 75 lock
64 addl $0, -4(%esp) 76 addl $0, -4(%esp)
65 ret 77 ret
66END(x86_mfence) 78END(x86_mfence)
67 79
68#ifndef XEN 80#ifndef XEN
69ENTRY(lidt) 81ENTRY(i386_lidt)
70 movl 4(%esp), %eax 82 movl 4(%esp), %eax
71 lidt (%eax) 83 lidt (%eax)
72 ret 84 ret
73END(lidt) 85END(i386_lidt)
74#endif /* XEN */ 86#endif /* XEN */
75 87
76ENTRY(rcr3) 88ENTRY(i386_rcr3)
77 movl %cr3, %eax 89 movl %cr3, %eax
78 ret 90 ret
79END(rcr3) 91END(i386_rcr3)
80 92
81ENTRY(lcr4) 93ENTRY(lcr4)
82 movl 4(%esp), %eax 94 movl 4(%esp), %eax
83 movl %eax, %cr4 95 movl %eax, %cr4
84 ret 96 ret
85END(lcr4) 97END(lcr4)
86 98
87ENTRY(rcr4) 99ENTRY(rcr4)
88 movl %cr4, %eax 100 movl %cr4, %eax
89 ret 101 ret
90END(rcr4) 102END(rcr4)
91 103
92ENTRY(x86_read_flags) 104ENTRY(x86_read_flags)

cvs diff -r1.18 -r1.19 src/sys/arch/i386/i386/i386func.S (expand / switch to unified diff)

--- src/sys/arch/i386/i386/i386func.S 2016/11/27 14:49:21 1.18
+++ src/sys/arch/i386/i386/i386func.S 2018/12/22 21:27:22 1.19
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: i386func.S,v 1.18 2016/11/27 14:49:21 kamil Exp $ */ 1/* $NetBSD: i386func.S,v 1.19 2018/12/22 21:27:22 cherry Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum, and by Andrew Doran. 8 * by Charles M. Hannum, and by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -26,194 +26,227 @@ @@ -26,194 +26,227 @@
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Functions to provide access to i386-specific instructions. 33 * Functions to provide access to i386-specific instructions.
34 * 34 *
35 * These are _not_ shared with NetBSD/xen. 35 * These are _not_ shared with NetBSD/xen.
36 */ 36 */
37 37
38#include <machine/asm.h> 38#include <machine/asm.h>
39__KERNEL_RCSID(0, "$NetBSD: i386func.S,v 1.18 2016/11/27 14:49:21 kamil Exp $"); 39__KERNEL_RCSID(0, "$NetBSD: i386func.S,v 1.19 2018/12/22 21:27:22 cherry Exp $");
40 40
41#include <machine/specialreg.h> 41#include <machine/specialreg.h>
42#include <machine/segments.h> 42#include <machine/segments.h>
43 43
44#include "assym.h" 44#include "assym.h"
45 45
46ENTRY(invlpg) 46/*
 47 * These functions below should always be accessed via the corresponding wrapper
 48 * function names defined in x86/include/cpufunc.h and exported as WEAK_ALIAS()
 49 *
 50 * We use this rather roundabout method so that a runtime wrapper function may
 51 * be made available for PVHVM, which could override both native and PV aliases
 52 * and decide which to invoke at run time.
 53 */
 54
 55WEAK_ALIAS(invlpg, i386_invlpg)
 56WEAK_ALIAS(lldt, i386_lldt)
 57WEAK_ALIAS(ltr, i386_ltr)
 58WEAK_ALIAS(lcr0, i386_lcr0)
 59WEAK_ALIAS(rcr0, i386_rcr0)
 60WEAK_ALIAS(lcr3, i386_lcr3)
 61WEAK_ALIAS(tlbflush, i386_tlbflush)
 62WEAK_ALIAS(tlbflushg, i386_tlbflushg)
 63WEAK_ALIAS(rdr0, i386_rdr0)
 64WEAK_ALIAS(ldr0, i386_ldr0)
 65WEAK_ALIAS(rdr1, i386_rdr1)
 66WEAK_ALIAS(ldr1, i386_ldr1)
 67WEAK_ALIAS(rdr2, i386_rdr2)
 68WEAK_ALIAS(ldr2, i386_ldr2)
 69WEAK_ALIAS(rdr3, i386_rdr3)
 70WEAK_ALIAS(ldr3, i386_ldr3)
 71WEAK_ALIAS(rdr6, i386_rdr6)
 72WEAK_ALIAS(ldr6, i386_ldr6)
 73WEAK_ALIAS(rdr7, i386_rdr7)
 74WEAK_ALIAS(ldr7, i386_ldr7)
 75WEAK_ALIAS(rcr2, i386_rcr2)
 76WEAK_ALIAS(lcr2, i386_lcr2)
 77WEAK_ALIAS(wbinvd, i386_wbinvd)
 78
 79ENTRY(i386_invlpg)
47 movl 4(%esp), %eax 80 movl 4(%esp), %eax
48 invlpg (%eax) 81 invlpg (%eax)
49 ret 82 ret
50END(invlpg) 83END(i386_invlpg)
51 84
52ENTRY(lldt) 85ENTRY(i386_lldt)
53 movl 4(%esp), %eax 86 movl 4(%esp), %eax
54 cmpl %eax, CPUVAR(CURLDT) 87 cmpl %eax, CPUVAR(CURLDT)
55 jne 1f 88 jne 1f
56 ret 89 ret
571: 901:
58 movl %eax, CPUVAR(CURLDT) 91 movl %eax, CPUVAR(CURLDT)
59 lldt %ax 92 lldt %ax
60 ret 93 ret
61END(lldt) 94END(i386_lldt)
62 95
63ENTRY(ltr) 96ENTRY(i386_ltr)
64 movl 4(%esp), %eax 97 movl 4(%esp), %eax
65 ltr %ax 98 ltr %ax
66 ret 99 ret
67END(ltr) 100END(i386_ltr)
68 101
69ENTRY(lcr0) 102ENTRY(i386_lcr0)
70 movl 4(%esp), %eax 103 movl 4(%esp), %eax
71 movl %eax, %cr0 104 movl %eax, %cr0
72 ret 105 ret
73END(lcr0) 106END(i386_lcr0)
74 107
75ENTRY(rcr0) 108ENTRY(i386_rcr0)
76 movl %cr0, %eax 109 movl %cr0, %eax
77 ret 110 ret
78END(rcr0) 111END(i386_rcr0)
79 112
80ENTRY(lcr3) 113ENTRY(i386_lcr3)
81 movl 4(%esp), %eax 114 movl 4(%esp), %eax
82 movl %eax, %cr3 115 movl %eax, %cr3
83 ret 116 ret
84END(lcr3) 117END(i386_lcr3)
85 118
86/* 119/*
87 * Big hammer: flush all TLB entries, including ones from PTE's 120 * Big hammer: flush all TLB entries, including ones from PTE's
88 * with the G bit set. This should only be necessary if TLB 121 * with the G bit set. This should only be necessary if TLB
89 * shootdown falls far behind. 122 * shootdown falls far behind.
90 * 123 *
91 * Intel Architecture Software Developer's Manual, Volume 3, 124 * Intel Architecture Software Developer's Manual, Volume 3,
92 * System Programming, section 9.10, "Invalidating the 125 * System Programming, section 9.10, "Invalidating the
93 * Translation Lookaside Buffers (TLBS)": 126 * Translation Lookaside Buffers (TLBS)":
94 * "The following operations invalidate all TLB entries, irrespective 127 * "The following operations invalidate all TLB entries, irrespective
95 * of the setting of the G flag: 128 * of the setting of the G flag:
96 * ... 129 * ...
97 * "(P6 family processors only): Writing to control register CR4 to 130 * "(P6 family processors only): Writing to control register CR4 to
98 * modify the PSE, PGE, or PAE flag." 131 * modify the PSE, PGE, or PAE flag."
99 * 132 *
100 * (the alternatives not quoted above are not an option here.) 133 * (the alternatives not quoted above are not an option here.)
101 * 134 *
102 * If PGE is not in use, we reload CR3. Check for the PGE feature 135 * If PGE is not in use, we reload CR3. Check for the PGE feature
103 * first since i486 does not have CR4. Note: the feature flag may 136 * first since i486 does not have CR4. Note: the feature flag may
104 * be present while the actual PGE functionality not yet enabled. 137 * be present while the actual PGE functionality not yet enabled.
105 */ 138 */
106ENTRY(tlbflushg) 139ENTRY(i386_tlbflushg)
107 testl $CPUID_PGE, _C_LABEL(cpu_feature) 140 testl $CPUID_PGE, _C_LABEL(cpu_feature)
108 jz 1f 141 jz 1f
109 movl %cr4, %eax 142 movl %cr4, %eax
110 testl $CR4_PGE, %eax 143 testl $CR4_PGE, %eax
111 jz 1f 144 jz 1f
112 movl %eax, %edx 145 movl %eax, %edx
113 andl $~CR4_PGE, %edx 146 andl $~CR4_PGE, %edx
114 movl %edx, %cr4 147 movl %edx, %cr4
115 movl %eax, %cr4 148 movl %eax, %cr4
116 ret 149 ret
117END(tlbflushg) 150END(i386_tlbflushg)
118 151
119ENTRY(tlbflush) 152ENTRY(i386_tlbflush)
1201: 1531:
121 movl %cr3, %eax 154 movl %cr3, %eax
122 movl %eax, %cr3 155 movl %eax, %cr3
123 ret 156 ret
124END(tlbflush) 157END(i386_tlbflush)
125 158
126ENTRY(ldr0) 159ENTRY(i386_ldr0)
127 movl 4(%esp), %eax 160 movl 4(%esp), %eax
128 movl %eax, %dr0 161 movl %eax, %dr0
129 ret 162 ret
130END(ldr0) 163END(i386_ldr0)
131 164
132ENTRY(rdr0) 165ENTRY(i386_rdr0)
133 movl %dr0, %eax 166 movl %dr0, %eax
134 ret 167 ret
135END(rdr0) 168END(i386_rdr0)
136 169
137ENTRY(ldr1) 170ENTRY(i386_ldr1)
138 movl 4(%esp), %eax 171 movl 4(%esp), %eax
139 movl %eax, %dr1 172 movl %eax, %dr1
140 ret 173 ret
141END(ldr1) 174END(i386_ldr1)
142 175
143ENTRY(rdr1) 176ENTRY(i386_rdr1)
144 movl %dr1, %eax 177 movl %dr1, %eax
145 ret 178 ret
146END(rdr1) 179END(i386_rdr1)
147 180
148ENTRY(ldr2) 181ENTRY(i386_ldr2)
149 movl 4(%esp), %eax 182 movl 4(%esp), %eax
150 movl %eax, %dr2 183 movl %eax, %dr2
151 ret 184 ret
152END(ldr2) 185END(i386_ldr2)
153 186
154ENTRY(rdr2) 187ENTRY(i386_rdr2)
155 movl %dr2, %eax 188 movl %dr2, %eax
156 ret 189 ret
157END(rdr2) 190END(i386_rdr2)
158 191
159ENTRY(ldr3) 192ENTRY(i386_ldr3)
160 movl 4(%esp), %eax 193 movl 4(%esp), %eax
161 movl %eax, %dr3 194 movl %eax, %dr3
162 ret 195 ret
163END(ldr3) 196END(i386_ldr3)
164 197
165ENTRY(rdr3) 198ENTRY(i386_rdr3)
166 movl %dr3, %eax 199 movl %dr3, %eax
167 ret 200 ret
168END(rdr3) 201END(i386_rdr3)
169 202
170ENTRY(ldr6) 203ENTRY(i386_ldr6)
171 movl 4(%esp), %eax 204 movl 4(%esp), %eax
172 movl %eax, %dr6 205 movl %eax, %dr6
173 ret 206 ret
174END(ldr6) 207END(i386_ldr6)
175 208
176ENTRY(rdr6) 209ENTRY(i386_rdr6)
177 movl %dr6, %eax 210 movl %dr6, %eax
178 ret 211 ret
179END(rdr6) 212END(i386_rdr6)
180 213
181ENTRY(ldr7) 214ENTRY(i386_ldr7)
182 movl 4(%esp), %eax 215 movl 4(%esp), %eax
183 movl %eax, %dr7 216 movl %eax, %dr7
184 ret 217 ret
185END(ldr7) 218END(i386_ldr7)
186 219
187ENTRY(rdr7) 220ENTRY(i386_rdr7)
188 movl %dr7, %eax 221 movl %dr7, %eax
189 ret 222 ret
190END(rdr7) 223END(i386_rdr7)
191 224
192ENTRY(rcr2) 225ENTRY(i386_rcr2)
193 movl %cr2, %eax 226 movl %cr2, %eax
194 ret 227 ret
195END(rcr2) 228END(i386_rcr2)
196 229
197ENTRY(lcr2) 230ENTRY(i386_lcr2)
198 movl 4(%esp), %eax 231 movl 4(%esp), %eax
199 movl %eax, %cr2 232 movl %eax, %cr2
200 ret 233 ret
201END(lcr2) 234END(i386_lcr2)
202 235
203ENTRY(wbinvd) 236ENTRY(i386_wbinvd)
204 wbinvd 237 wbinvd
205 ret 238 ret
206END(wbinvd) 239END(i386_wbinvd)
207 240
208ENTRY(x86_disable_intr) 241ENTRY(x86_disable_intr)
209 cli 242 cli
210 ret 243 ret
211END(x86_disable_intr) 244END(x86_disable_intr)
212 245
213ENTRY(x86_enable_intr) 246ENTRY(x86_enable_intr)
214 sti 247 sti
215 ret 248 ret
216END(x86_enable_intr) 249END(x86_enable_intr)
217 250
218/* 251/*
219 * void lgdt(struct region_descriptor *rdp); 252 * void lgdt(struct region_descriptor *rdp);

cvs diff -r1.22 -r1.23 src/sys/arch/xen/x86/xenfunc.c (expand / switch to unified diff)

--- src/sys/arch/xen/x86/xenfunc.c 2018/10/18 04:17:18 1.22
+++ src/sys/arch/xen/x86/xenfunc.c 2018/12/22 21:27:22 1.23
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: xenfunc.c,v 1.22 2018/10/18 04:17:18 cherry Exp $ */ 1/* $NetBSD: xenfunc.c,v 1.23 2018/12/22 21:27:22 cherry Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2004 Christian Limpach. 4 * Copyright (c) 2004 Christian Limpach.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -16,55 +16,121 @@ @@ -16,55 +16,121 @@
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */ 26 */
27 27
28#include <sys/cdefs.h> 28#include <sys/cdefs.h>
29__KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.22 2018/10/18 04:17:18 cherry Exp $"); 29__KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.23 2018/12/22 21:27:22 cherry Exp $");
30 30
31#include <sys/param.h> 31#include <sys/param.h>
32 32
33#include <uvm/uvm_extern.h> 33#include <uvm/uvm_extern.h>
34 34
35#include <machine/intr.h> 35#include <machine/intr.h>
36#include <machine/vmparam.h> 36#include <machine/vmparam.h>
37#include <machine/pmap.h> 37#include <machine/pmap.h>
38#include <xen/xen.h> 38#include <xen/xen.h>
39#include <xen/hypervisor.h> 39#include <xen/hypervisor.h>
40//#include <xen/evtchn.h> 40//#include <xen/evtchn.h>
41#include <xen/xenpmap.h> 41#include <xen/xenpmap.h>
42#include <machine/pte.h> 42#include <machine/pte.h>
43 43
44#define MAX_XEN_IDT 128 44#define MAX_XEN_IDT 128
45 45
46void xen_set_ldt(vaddr_t, uint32_t); 46void xen_set_ldt(vaddr_t, uint32_t);
47 47
 48/*
 49 * We don't need to export these declarations, since they are used via
 50 * linker aliasing. They should always be accessed via the
 51 * corresponding wrapper function names defined in
 52 * x86/include/cpufunc.h and exported as __weak_alias()
 53 *
 54 * We use this rather roundabout method so that a runtime wrapper
 55 * function may be made available for PVHVM, which could override both
 56 * native and PV aliases and decide which to invoke at run time.
 57 */
 58
 59void xen_invlpg(vaddr_t);
 60void xen_lidt(struct region_descriptor *);
 61void xen_lldt(u_short);
 62void xen_ltr(u_short);
 63void xen_lcr0(u_long);
 64u_long xen_rcr0(void);
 65void xen_tlbflush(void);
 66void xen_tlbflushg(void);
 67register_t xen_rdr0(void);
 68void xen_ldr0(register_t);
 69register_t xen_rdr1(void);
 70void xen_ldr1(register_t);
 71register_t xen_rdr2(void);
 72void xen_ldr2(register_t);
 73register_t xen_rdr3(void);
 74void xen_ldr3(register_t);
 75register_t xen_rdr6(void);
 76void xen_ldr6(register_t);
 77register_t xen_rdr7(void);
 78void xen_ldr7(register_t);
 79void xen_wbinvd(void);
 80vaddr_t xen_rcr2(void);
 81
 82__weak_alias(invlpg, xen_invlpg);
 83__weak_alias(lidt, xen_lidt);
 84__weak_alias(lldt, xen_lldt);
 85__weak_alias(ltr, xen_ltr);
 86__weak_alias(lcr0, xen_lcr0);
 87__weak_alias(rcr0, xen_rcr0);
 88__weak_alias(tlbflush, xen_tlbflush);
 89__weak_alias(tlbflushg, xen_tlbflushg);
 90__weak_alias(rdr0, xen_rdr0);
 91__weak_alias(ldr0, xen_ldr0);
 92__weak_alias(rdr1, xen_rdr1);
 93__weak_alias(ldr1, xen_ldr1);
 94__weak_alias(rdr2, xen_rdr2);
 95__weak_alias(ldr2, xen_ldr2);
 96__weak_alias(rdr3, xen_rdr3);
 97__weak_alias(ldr3, xen_ldr3);
 98__weak_alias(rdr6, xen_rdr6);
 99__weak_alias(ldr6, xen_ldr6);
 100__weak_alias(rdr7, xen_rdr7);
 101__weak_alias(ldr7, xen_ldr7);
 102__weak_alias(wbinvd, xen_wbinvd);
 103__weak_alias(rcr2, xen_rcr2);
 104
 105#ifdef __x86_64__
 106void xen_setusergs(int);
 107__weak_alias(setusergs, xen_setusergs);
 108#else
 109void xen_lcr3(vaddr_t);
 110__weak_alias(lcr3, xen_lcr3);
 111
 112#endif
 113
48void  114void
49invlpg(vaddr_t addr) 115xen_invlpg(vaddr_t addr)
50{ 116{
51 int s = splvm(); /* XXXSMP */ 117 int s = splvm(); /* XXXSMP */
52 xpq_queue_invlpg(addr); 118 xpq_queue_invlpg(addr);
53 splx(s); 119 splx(s);
54}  120}
55 121
56void 122void
57lidt(struct region_descriptor *rd) 123xen_lidt(struct region_descriptor *rd)
58{ 124{
59 /*  125 /*
60 * We need to do this because we can't assume kmem_alloc(9) 126 * We need to do this because we can't assume kmem_alloc(9)
61 * will be available at the boot stage when this is called. 127 * will be available at the boot stage when this is called.
62 */ 128 */
63 static char xen_idt_page[PAGE_SIZE] __attribute__((__aligned__ (PAGE_SIZE))); 129 static char xen_idt_page[PAGE_SIZE] __attribute__((__aligned__ (PAGE_SIZE)));
64 memset(xen_idt_page, 0, PAGE_SIZE); 130 memset(xen_idt_page, 0, PAGE_SIZE);
65  131
66 struct trap_info *xen_idt = (void * )xen_idt_page; 132 struct trap_info *xen_idt = (void * )xen_idt_page;
67 int xen_idt_idx = 0; 133 int xen_idt_idx = 0;
68  134
69 struct trap_info * idd = (void *) rd->rd_base; 135 struct trap_info * idd = (void *) rd->rd_base;
70 const int nidt = rd->rd_limit / (sizeof *idd);  136 const int nidt = rd->rd_limit / (sizeof *idd);
@@ -89,177 +155,177 @@ lidt(struct region_descriptor *rd) @@ -89,177 +155,177 @@ lidt(struct region_descriptor *rd)
89#endif /* __x86_64 */ 155#endif /* __x86_64 */
90 156
91 /* Hook it up in the hypervisor */ 157 /* Hook it up in the hypervisor */
92 if (HYPERVISOR_set_trap_table(xen_idt)) 158 if (HYPERVISOR_set_trap_table(xen_idt))
93 panic("HYPERVISOR_set_trap_table() failed"); 159 panic("HYPERVISOR_set_trap_table() failed");
94 160
95#if defined(__x86_64__) 161#if defined(__x86_64__)
96 /* reset */ 162 /* reset */
97 pmap_changeprot_local((vaddr_t) xen_idt, VM_PROT_READ|VM_PROT_WRITE); 163 pmap_changeprot_local((vaddr_t) xen_idt, VM_PROT_READ|VM_PROT_WRITE);
98#endif /* __x86_64 */ 164#endif /* __x86_64 */
99} 165}
100 166
101void 167void
102lldt(u_short sel) 168xen_lldt(u_short sel)
103{ 169{
104#ifndef __x86_64__ 170#ifndef __x86_64__
105 struct cpu_info *ci; 171 struct cpu_info *ci;
106 172
107 ci = curcpu(); 173 ci = curcpu();
108 174
109 if (ci->ci_curldt == sel) 175 if (ci->ci_curldt == sel)
110 return; 176 return;
111 if (sel == GSEL(GLDT_SEL, SEL_KPL)) 177 if (sel == GSEL(GLDT_SEL, SEL_KPL))
112 xen_set_ldt((vaddr_t)ldtstore, NLDT); 178 xen_set_ldt((vaddr_t)ldtstore, NLDT);
113 else 179 else
114 xen_set_ldt(ci->ci_gdt[IDXSELN(sel)].ld.ld_base, 180 xen_set_ldt(ci->ci_gdt[IDXSELN(sel)].ld.ld_base,
115 ci->ci_gdt[IDXSELN(sel)].ld.ld_entries); 181 ci->ci_gdt[IDXSELN(sel)].ld.ld_entries);
116 ci->ci_curldt = sel; 182 ci->ci_curldt = sel;
117#endif 183#endif
118} 184}
119 185
120void 186void
121ltr(u_short sel) 187xen_ltr(u_short sel)
122{ 188{
123 panic("XXX ltr not supported\n"); 189 panic("XXX ltr not supported\n");
124} 190}
125 191
126void 192void
127lcr0(u_long val) 193xen_lcr0(u_long val)
128{ 194{
129 panic("XXX lcr0 not supported\n"); 195 panic("XXX lcr0 not supported\n");
130} 196}
131 197
132u_long 198u_long
133rcr0(void) 199xen_rcr0(void)
134{ 200{
135 /* XXX: handle X86_CR0_TS ? */ 201 /* XXX: handle X86_CR0_TS ? */
136 return 0; 202 return 0;
137} 203}
138 204
139#ifndef __x86_64__ 205#ifndef __x86_64__
140void 206void
141lcr3(vaddr_t val) 207xen_lcr3(vaddr_t val)
142{ 208{
143 int s = splvm(); /* XXXSMP */ 209 int s = splvm(); /* XXXSMP */
144 xpq_queue_pt_switch(xpmap_ptom_masked(val)); 210 xpq_queue_pt_switch(xpmap_ptom_masked(val));
145 splx(s); 211 splx(s);
146} 212}
147#endif 213#endif
148 214
149void 215void
150tlbflush(void) 216xen_tlbflush(void)
151{ 217{
152 int s = splvm(); /* XXXSMP */ 218 int s = splvm(); /* XXXSMP */
153 xpq_queue_tlb_flush(); 219 xpq_queue_tlb_flush();
154 splx(s); 220 splx(s);
155} 221}
156 222
157void 223void
158tlbflushg(void) 224xen_tlbflushg(void)
159{ 225{
160 tlbflush(); 226 tlbflush();
161} 227}
162 228
163register_t 229register_t
164rdr0(void) 230xen_rdr0(void)
165{ 231{
166 232
167 return HYPERVISOR_get_debugreg(0); 233 return HYPERVISOR_get_debugreg(0);
168} 234}
169 235
170void 236void
171ldr0(register_t val) 237xen_ldr0(register_t val)
172{ 238{
173 239
174 HYPERVISOR_set_debugreg(0, val); 240 HYPERVISOR_set_debugreg(0, val);
175} 241}
176 242
177register_t 243register_t
178rdr1(void) 244xen_rdr1(void)
179{ 245{
180 246
181 return HYPERVISOR_get_debugreg(1); 247 return HYPERVISOR_get_debugreg(1);
182} 248}
183 249
184void 250void
185ldr1(register_t val) 251xen_ldr1(register_t val)
186{ 252{
187 253
188 HYPERVISOR_set_debugreg(1, val); 254 HYPERVISOR_set_debugreg(1, val);
189} 255}
190 256
191register_t 257register_t
192rdr2(void) 258xen_rdr2(void)
193{ 259{
194 260
195 return HYPERVISOR_get_debugreg(2); 261 return HYPERVISOR_get_debugreg(2);
196} 262}
197 263
198void 264void
199ldr2(register_t val) 265xen_ldr2(register_t val)
200{ 266{
201 267
202 HYPERVISOR_set_debugreg(2, val); 268 HYPERVISOR_set_debugreg(2, val);
203} 269}
204 270
205register_t 271register_t
206rdr3(void) 272xen_rdr3(void)
207{ 273{
208 274
209 return HYPERVISOR_get_debugreg(3); 275 return HYPERVISOR_get_debugreg(3);
210} 276}
211 277
212void 278void
213ldr3(register_t val) 279xen_ldr3(register_t val)
214{ 280{
215 281
216 HYPERVISOR_set_debugreg(3, val); 282 HYPERVISOR_set_debugreg(3, val);
217} 283}
218register_t 284register_t
219rdr6(void) 285xen_rdr6(void)
220{ 286{
221 287
222 return HYPERVISOR_get_debugreg(6); 288 return HYPERVISOR_get_debugreg(6);
223} 289}
224 290
225void 291void
226ldr6(register_t val) 292xen_ldr6(register_t val)
227{ 293{
228 294
229 HYPERVISOR_set_debugreg(6, val); 295 HYPERVISOR_set_debugreg(6, val);
230} 296}
231 297
232register_t 298register_t
233rdr7(void) 299xen_rdr7(void)
234{ 300{
235 301
236 return HYPERVISOR_get_debugreg(7); 302 return HYPERVISOR_get_debugreg(7);
237} 303}
238 304
239void 305void
240ldr7(register_t val) 306xen_ldr7(register_t val)
241{ 307{
242 308
243 HYPERVISOR_set_debugreg(7, val); 309 HYPERVISOR_set_debugreg(7, val);
244} 310}
245 311
246void 312void
247wbinvd(void) 313xen_wbinvd(void)
248{ 314{
249 315
250 xpq_flush_cache(); 316 xpq_flush_cache();
251} 317}
252 318
253vaddr_t 319vaddr_t
254rcr2(void) 320xen_rcr2(void)
255{ 321{
256 return curcpu()->ci_vcpu->arch.cr2; 322 return curcpu()->ci_vcpu->arch.cr2;
257} 323}
258 324
259#ifdef __x86_64__ 325#ifdef __x86_64__
260void 326void
261setusergs(int gssel) 327xen_setusergs(int gssel)
262{ 328{
263 HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gssel); 329 HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gssel);
264} 330}
265#endif 331#endif