Introduce a weak alias method of exporting different implementations of the same API. For eg: the amd64 native implementation of invlpg() now becomes amd64_invlpg() with a weak symbol export of invlpg(), while the XEN implementation becomes xen_invlpg(), also weakly exported as invlpg() Note that linking in both together without having an override function named invlpg() would be a mistake, as we have limited control over which of the two options would emerge as the finally exported invlpg() resulting in a potential situation where the wrong function is finally exported. This change avoids this situation. We should however include an override function invlpg() in that case, such that it is able to then pass on the call to the appropriate backing function (amd64_invlpg() in the case of native, and xen_invlpg() in the case of under XEN virtualisation) at runtime. This change does not introduce such a function and therefore does not alter builds to include native as well as XEN implementations in the same binary. This will be done later, with the introduction of XEN PVHVM mode, where precisely such a runtime switch is required. There are no operational changes introduced by this change.diff -r1.33 -r1.34 src/sys/arch/amd64/amd64/cpufunc.S
(cherry)
--- src/sys/arch/amd64/amd64/cpufunc.S 2018/07/21 06:09:13 1.33
+++ src/sys/arch/amd64/amd64/cpufunc.S 2018/12/22 21:27:22 1.34
@@ -1,14 +1,14 @@ | @@ -1,14 +1,14 @@ | |||
1 | /* $NetBSD: cpufunc.S,v 1.33 2018/07/21 06:09:13 maxv Exp $ */ | 1 | /* $NetBSD: cpufunc.S,v 1.34 2018/12/22 21:27:22 cherry Exp $ */ | |
2 | 2 | |||
3 | /* | 3 | /* | |
4 | * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc. | 4 | * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc. | |
5 | * All rights reserved. | 5 | * All rights reserved. | |
6 | * | 6 | * | |
7 | * This code is derived from software contributed to The NetBSD Foundation | 7 | * This code is derived from software contributed to The NetBSD Foundation | |
8 | * by Charles M. Hannum, and by Andrew Doran. | 8 | * by Charles M. Hannum, and by Andrew Doran. | |
9 | * | 9 | * | |
10 | * Redistribution and use in source and binary forms, with or without | 10 | * Redistribution and use in source and binary forms, with or without | |
11 | * modification, are permitted provided that the following conditions | 11 | * modification, are permitted provided that the following conditions | |
12 | * are met: | 12 | * are met: | |
13 | * 1. Redistributions of source code must retain the above copyright | 13 | * 1. Redistributions of source code must retain the above copyright | |
14 | * notice, this list of conditions and the following disclaimer. | 14 | * notice, this list of conditions and the following disclaimer. | |
@@ -53,81 +53,116 @@ ENTRY(x86_lfence) | @@ -53,81 +53,116 @@ ENTRY(x86_lfence) | |||
53 | ret | 53 | ret | |
54 | END(x86_lfence) | 54 | END(x86_lfence) | |
55 | 55 | |||
56 | ENTRY(x86_sfence) | 56 | ENTRY(x86_sfence) | |
57 | sfence | 57 | sfence | |
58 | ret | 58 | ret | |
59 | END(x86_sfence) | 59 | END(x86_sfence) | |
60 | 60 | |||
61 | ENTRY(x86_mfence) | 61 | ENTRY(x86_mfence) | |
62 | mfence | 62 | mfence | |
63 | ret | 63 | ret | |
64 | END(x86_mfence) | 64 | END(x86_mfence) | |
65 | 65 | |||
66 | /* | |||
67 | * These functions below should always be accessed via the corresponding wrapper | |||
68 | * function names defined in x86/include/cpufunc.h and exported as WEAK_ALIAS() | |||
69 | * | |||
70 | * We use this rather roundabout method so that a runtime wrapper function may | |||
71 | * be made available for PVHVM, which could override both native and PV aliases | |||
72 | * and decide which to invoke at run time. | |||
73 | */ | |||
74 | ||||
75 | WEAK_ALIAS(invlpg, amd64_invlpg) | |||
76 | WEAK_ALIAS(lidt, amd64_lidt) | |||
77 | WEAK_ALIAS(lldt, amd64_lldt) | |||
78 | WEAK_ALIAS(ltr, amd64_ltr) | |||
79 | WEAK_ALIAS(lcr0, amd64_lcr0) | |||
80 | WEAK_ALIAS(rcr0, amd64_rcr0) | |||
81 | WEAK_ALIAS(rcr2, amd64_rcr2) | |||
82 | WEAK_ALIAS(lcr2, amd64_lcr2) | |||
83 | WEAK_ALIAS(rcr3, amd64_rcr3) | |||
84 | WEAK_ALIAS(lcr3, amd64_lcr3) | |||
85 | WEAK_ALIAS(tlbflush, amd64_tlbflush) | |||
86 | WEAK_ALIAS(tlbflushg, amd64_tlbflushg) | |||
87 | WEAK_ALIAS(rdr0, amd64_rdr0) | |||
88 | WEAK_ALIAS(ldr0, amd64_ldr0) | |||
89 | WEAK_ALIAS(rdr1, amd64_rdr1) | |||
90 | WEAK_ALIAS(ldr1, amd64_ldr1) | |||
91 | WEAK_ALIAS(rdr2, amd64_rdr2) | |||
92 | WEAK_ALIAS(ldr2, amd64_ldr2) | |||
93 | WEAK_ALIAS(rdr3, amd64_rdr3) | |||
94 | WEAK_ALIAS(ldr3, amd64_ldr3) | |||
95 | WEAK_ALIAS(rdr6, amd64_rdr6) | |||
96 | WEAK_ALIAS(ldr6, amd64_ldr6) | |||
97 | WEAK_ALIAS(rdr7, amd64_rdr7) | |||
98 | WEAK_ALIAS(ldr7, amd64_ldr7) | |||
99 | WEAK_ALIAS(wbinvd, amd64_wbinvd) | |||
100 | ||||
66 | #ifndef XEN | 101 | #ifndef XEN | |
67 | ENTRY(invlpg) | 102 | ENTRY(amd64_invlpg) | |
68 | invlpg (%rdi) | 103 | invlpg (%rdi) | |
69 | ret | 104 | ret | |
70 | END(invlpg) | 105 | END(amd64_invlpg) | |
71 | 106 | |||
72 | ENTRY(lidt) | 107 | ENTRY(amd64_lidt) | |
73 | lidt (%rdi) | 108 | lidt (%rdi) | |
74 | ret | 109 | ret | |
75 | END(lidt) | 110 | END(amd64_lidt) | |
76 | 111 | |||
77 | ENTRY(lldt) | 112 | ENTRY(amd64_lldt) | |
78 | cmpl %edi, CPUVAR(CURLDT) | 113 | cmpl %edi, CPUVAR(CURLDT) | |
79 | jne 1f | 114 | jne 1f | |
80 | ret | 115 | ret | |
81 | 1: | 116 | 1: | |
82 | movl %edi, CPUVAR(CURLDT) | 117 | movl %edi, CPUVAR(CURLDT) | |
83 | lldt %di | 118 | lldt %di | |
84 | ret | 119 | ret | |
85 | END(lldt) | 120 | END(amd64_lldt) | |
86 | 121 | |||
87 | ENTRY(ltr) | 122 | ENTRY(amd64_ltr) | |
88 | ltr %di | 123 | ltr %di | |
89 | ret | 124 | ret | |
90 | END(ltr) | 125 | END(amd64_ltr) | |
91 | 126 | |||
92 | ENTRY(lcr0) | 127 | ENTRY(amd64_lcr0) | |
93 | movq %rdi, %cr0 | 128 | movq %rdi, %cr0 | |
94 | ret | 129 | ret | |
95 | END(lcr0) | 130 | END(amd64_lcr0) | |
96 | 131 | |||
97 | ENTRY(rcr0) | 132 | ENTRY(amd64_rcr0) | |
98 | movq %cr0, %rax | 133 | movq %cr0, %rax | |
99 | ret | 134 | ret | |
100 | END(rcr0) | 135 | END(amd64_rcr0) | |
101 | 136 | |||
102 | ENTRY(lcr2) | 137 | ENTRY(amd64_lcr2) | |
103 | movq %rdi, %cr2 | 138 | movq %rdi, %cr2 | |
104 | ret | 139 | ret | |
105 | END(lcr2) | 140 | END(amd64_lcr2) | |
106 | 141 | |||
107 | ENTRY(rcr2) | 142 | ENTRY(amd64_rcr2) | |
108 | movq %cr2, %rax | 143 | movq %cr2, %rax | |
109 | ret | 144 | ret | |
110 | END(rcr2) | 145 | END(amd64_rcr2) | |
111 | 146 | |||
112 | ENTRY(lcr3) | 147 | ENTRY(amd64_lcr3) | |
113 | movq %rdi, %cr3 | 148 | movq %rdi, %cr3 | |
114 | ret | 149 | ret | |
115 | END(lcr3) | 150 | END(amd64_lcr3) | |
116 | 151 | |||
117 | ENTRY(rcr3) | 152 | ENTRY(amd64_rcr3) | |
118 | movq %cr3, %rax | 153 | movq %cr3, %rax | |
119 | ret | 154 | ret | |
120 | END(rcr3) | 155 | END(amd64_rcr3) | |
121 | #endif | 156 | #endif | |
122 | 157 | |||
123 | ENTRY(lcr4) | 158 | ENTRY(lcr4) | |
124 | movq %rdi, %cr4 | 159 | movq %rdi, %cr4 | |
125 | ret | 160 | ret | |
126 | END(lcr4) | 161 | END(lcr4) | |
127 | 162 | |||
128 | ENTRY(rcr4) | 163 | ENTRY(rcr4) | |
129 | movq %cr4, %rax | 164 | movq %cr4, %rax | |
130 | ret | 165 | ret | |
131 | END(rcr4) | 166 | END(rcr4) | |
132 | 167 | |||
133 | ENTRY(lcr8) | 168 | ENTRY(lcr8) | |
@@ -149,103 +184,103 @@ END(rcr8) | @@ -149,103 +184,103 @@ END(rcr8) | |||
149 | * System Programming, section 9.10, "Invalidating the | 184 | * System Programming, section 9.10, "Invalidating the | |
150 | * Translation Lookaside Buffers (TLBS)": | 185 | * Translation Lookaside Buffers (TLBS)": | |
151 | * "The following operations invalidate all TLB entries, irrespective | 186 | * "The following operations invalidate all TLB entries, irrespective | |
152 | * of the setting of the G flag: | 187 | * of the setting of the G flag: | |
153 | * ... | 188 | * ... | |
154 | * "(P6 family processors only): Writing to control register CR4 to | 189 | * "(P6 family processors only): Writing to control register CR4 to | |
155 | * modify the PSE, PGE, or PAE flag." | 190 | * modify the PSE, PGE, or PAE flag." | |
156 | * | 191 | * | |
157 | * (the alternatives not quoted above are not an option here.) | 192 | * (the alternatives not quoted above are not an option here.) | |
158 | * | 193 | * | |
159 | * If PGE is not in use, we reload CR3. | 194 | * If PGE is not in use, we reload CR3. | |
160 | */ | 195 | */ | |
161 | #ifndef XEN | 196 | #ifndef XEN | |
162 | ENTRY(tlbflushg) | 197 | ENTRY(amd64_tlbflushg) | |
163 | movq %cr4, %rax | 198 | movq %cr4, %rax | |
164 | testq $CR4_PGE, %rax | 199 | testq $CR4_PGE, %rax | |
165 | jz 1f | 200 | jz 1f | |
166 | movq %rax, %rdx | 201 | movq %rax, %rdx | |
167 | andq $~CR4_PGE, %rdx | 202 | andq $~CR4_PGE, %rdx | |
168 | movq %rdx, %cr4 | 203 | movq %rdx, %cr4 | |
169 | movq %rax, %cr4 | 204 | movq %rax, %cr4 | |
170 | ret | 205 | ret | |
171 | END(tlbflushg) | 206 | END(amd64_tlbflushg) | |
172 | 207 | |||
173 | ENTRY(tlbflush) | 208 | ENTRY(amd64_tlbflush) | |
174 | 1: | 209 | 1: | |
175 | movq %cr3, %rax | 210 | movq %cr3, %rax | |
176 | movq %rax, %cr3 | 211 | movq %rax, %cr3 | |
177 | ret | 212 | ret | |
178 | END(tlbflush) | 213 | END(amd64_tlbflush) | |
179 | 214 | |||
180 | ENTRY(ldr0) | 215 | ENTRY(amd64_ldr0) | |
181 | movq %rdi, %dr0 | 216 | movq %rdi, %dr0 | |
182 | ret | 217 | ret | |
183 | END(ldr0) | 218 | END(amd64_ldr0) | |
184 | 219 | |||
185 | ENTRY(rdr0) | 220 | ENTRY(amd64_rdr0) | |
186 | movq %dr0, %rax | 221 | movq %dr0, %rax | |
187 | ret | 222 | ret | |
188 | END(rdr0) | 223 | END(amd64_rdr0) | |
189 | 224 | |||
190 | ENTRY(ldr1) | 225 | ENTRY(amd64_ldr1) | |
191 | movq %rdi, %dr1 | 226 | movq %rdi, %dr1 | |
192 | ret | 227 | ret | |
193 | END(ldr1) | 228 | END(amd64_ldr1) | |
194 | 229 | |||
195 | ENTRY(rdr1) | 230 | ENTRY(amd64_rdr1) | |
196 | movq %dr1, %rax | 231 | movq %dr1, %rax | |
197 | ret | 232 | ret | |
198 | END(rdr1) | 233 | END(amd64_rdr1) | |
199 | 234 | |||
200 | ENTRY(ldr2) | 235 | ENTRY(amd64_ldr2) | |
201 | movq %rdi, %dr2 | 236 | movq %rdi, %dr2 | |
202 | ret | 237 | ret | |
203 | END(ldr2) | 238 | END(amd64_ldr2) | |
204 | 239 | |||
205 | ENTRY(rdr2) | 240 | ENTRY(amd64_rdr2) | |
206 | movq %dr2, %rax | 241 | movq %dr2, %rax | |
207 | ret | 242 | ret | |
208 | END(rdr2) | 243 | END(amd64_rdr2) | |
209 | 244 | |||
210 | ENTRY(ldr3) | 245 | ENTRY(amd64_ldr3) | |
211 | movq %rdi, %dr3 | 246 | movq %rdi, %dr3 | |
212 | ret | 247 | ret | |
213 | END(ldr3) | 248 | END(amd64_ldr3) | |
214 | 249 | |||
215 | ENTRY(rdr3) | 250 | ENTRY(amd64_rdr3) | |
216 | movq %dr3, %rax | 251 | movq %dr3, %rax | |
217 | ret | 252 | ret | |
218 | END(rdr3) | 253 | END(amd64_rdr3) | |
219 | 254 | |||
220 | ENTRY(ldr6) | 255 | ENTRY(amd64_ldr6) | |
221 | movq %rdi, %dr6 | 256 | movq %rdi, %dr6 | |
222 | ret | 257 | ret | |
223 | END(ldr6) | 258 | END(amd64_ldr6) | |
224 | 259 | |||
225 | ENTRY(rdr6) | 260 | ENTRY(amd64_rdr6) | |
226 | movq %dr6, %rax | 261 | movq %dr6, %rax | |
227 | ret | 262 | ret | |
228 | END(rdr6) | 263 | END(amd64_rdr6) | |
229 | 264 | |||
230 | ENTRY(ldr7) | 265 | ENTRY(amd64_ldr7) | |
231 | movq %rdi, %dr7 | 266 | movq %rdi, %dr7 | |
232 | ret | 267 | ret | |
233 | END(ldr7) | 268 | END(amd64_ldr7) | |
234 | 269 | |||
235 | ENTRY(rdr7) | 270 | ENTRY(amd64_rdr7) | |
236 | movq %dr7, %rax | 271 | movq %dr7, %rax | |
237 | ret | 272 | ret | |
238 | END(rdr7) | 273 | END(amd64_rdr7) | |
239 | 274 | |||
240 | ENTRY(x86_disable_intr) | 275 | ENTRY(x86_disable_intr) | |
241 | cli | 276 | cli | |
242 | ret | 277 | ret | |
243 | END(x86_disable_intr) | 278 | END(x86_disable_intr) | |
244 | 279 | |||
245 | ENTRY(x86_enable_intr) | 280 | ENTRY(x86_enable_intr) | |
246 | sti | 281 | sti | |
247 | ret | 282 | ret | |
248 | END(x86_enable_intr) | 283 | END(x86_enable_intr) | |
249 | 284 | |||
250 | ENTRY(x86_read_flags) | 285 | ENTRY(x86_read_flags) | |
251 | pushfq | 286 | pushfq |
--- src/sys/arch/i386/i386/cpufunc.S 2018/10/18 04:11:14 1.25
+++ src/sys/arch/i386/i386/cpufunc.S 2018/12/22 21:27:22 1.26
@@ -1,14 +1,14 @@ | @@ -1,14 +1,14 @@ | |||
1 | /* $NetBSD: cpufunc.S,v 1.25 2018/10/18 04:11:14 cherry Exp $ */ | 1 | /* $NetBSD: cpufunc.S,v 1.26 2018/12/22 21:27:22 cherry Exp $ */ | |
2 | 2 | |||
3 | /*- | 3 | /*- | |
4 | * Copyright (c) 1998, 2007 The NetBSD Foundation, Inc. | 4 | * Copyright (c) 1998, 2007 The NetBSD Foundation, Inc. | |
5 | * All rights reserved. | 5 | * All rights reserved. | |
6 | * | 6 | * | |
7 | * This code is derived from software contributed to The NetBSD Foundation | 7 | * This code is derived from software contributed to The NetBSD Foundation | |
8 | * by Charles M. Hannum, and by Andrew Doran. | 8 | * by Charles M. Hannum, and by Andrew Doran. | |
9 | * | 9 | * | |
10 | * Redistribution and use in source and binary forms, with or without | 10 | * Redistribution and use in source and binary forms, with or without | |
11 | * modification, are permitted provided that the following conditions | 11 | * modification, are permitted provided that the following conditions | |
12 | * are met: | 12 | * are met: | |
13 | * 1. Redistributions of source code must retain the above copyright | 13 | * 1. Redistributions of source code must retain the above copyright | |
14 | * notice, this list of conditions and the following disclaimer. | 14 | * notice, this list of conditions and the following disclaimer. | |
@@ -28,65 +28,77 @@ | @@ -28,65 +28,77 @@ | |||
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
29 | * POSSIBILITY OF SUCH DAMAGE. | 29 | * POSSIBILITY OF SUCH DAMAGE. | |
30 | */ | 30 | */ | |
31 | 31 | |||
32 | /* | 32 | /* | |
33 | * Functions to provide access to i386-specific instructions. | 33 | * Functions to provide access to i386-specific instructions. | |
34 | * | 34 | * | |
35 | * These are shared with NetBSD/xen. | 35 | * These are shared with NetBSD/xen. | |
36 | */ | 36 | */ | |
37 | 37 | |||
38 | #include <sys/errno.h> | 38 | #include <sys/errno.h> | |
39 | 39 | |||
40 | #include <machine/asm.h> | 40 | #include <machine/asm.h> | |
41 | __KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.25 2018/10/18 04:11:14 cherry Exp $"); | 41 | __KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.26 2018/12/22 21:27:22 cherry Exp $"); | |
42 | 42 | |||
43 | #include "opt_xen.h" | 43 | #include "opt_xen.h" | |
44 | 44 | |||
45 | #include <machine/specialreg.h> | 45 | #include <machine/specialreg.h> | |
46 | #include <machine/segments.h> | 46 | #include <machine/segments.h> | |
47 | 47 | |||
48 | #include "assym.h" | 48 | #include "assym.h" | |
49 | 49 | |||
50 | /* | |||
51 | * These functions below should always be accessed via the corresponding wrapper | |||
52 | * function names defined in x86/include/cpufunc.h and exported as WEAK_ALIAS() | |||
53 | * | |||
54 | * We use this rather roundabout method so that a runtime wrapper function may | |||
55 | * be made available for PVHVM, which could override both native and PV aliases | |||
56 | * and decide which to invoke at run time. | |||
57 | */ | |||
58 | ||||
59 | WEAK_ALIAS(lidt, i386_lidt) | |||
60 | WEAK_ALIAS(rcr3, i386_rcr3) | |||
61 | ||||
50 | ENTRY(x86_lfence) | 62 | ENTRY(x86_lfence) | |
51 | lock | 63 | lock | |
52 | addl $0, -4(%esp) | 64 | addl $0, -4(%esp) | |
53 | ret | 65 | ret | |
54 | END(x86_lfence) | 66 | END(x86_lfence) | |
55 | 67 | |||
56 | ENTRY(x86_sfence) | 68 | ENTRY(x86_sfence) | |
57 | lock | 69 | lock | |
58 | addl $0, -4(%esp) | 70 | addl $0, -4(%esp) | |
59 | ret | 71 | ret | |
60 | END(x86_sfence) | 72 | END(x86_sfence) | |
61 | 73 | |||
62 | ENTRY(x86_mfence) | 74 | ENTRY(x86_mfence) | |
63 | lock | 75 | lock | |
64 | addl $0, -4(%esp) | 76 | addl $0, -4(%esp) | |
65 | ret | 77 | ret | |
66 | END(x86_mfence) | 78 | END(x86_mfence) | |
67 | 79 | |||
68 | #ifndef XEN | 80 | #ifndef XEN | |
69 | ENTRY(lidt) | 81 | ENTRY(i386_lidt) | |
70 | movl 4(%esp), %eax | 82 | movl 4(%esp), %eax | |
71 | lidt (%eax) | 83 | lidt (%eax) | |
72 | ret | 84 | ret | |
73 | END(lidt) | 85 | END(i386_lidt) | |
74 | #endif /* XEN */ | 86 | #endif /* XEN */ | |
75 | 87 | |||
76 | ENTRY(rcr3) | 88 | ENTRY(i386_rcr3) | |
77 | movl %cr3, %eax | 89 | movl %cr3, %eax | |
78 | ret | 90 | ret | |
79 | END(rcr3) | 91 | END(i386_rcr3) | |
80 | 92 | |||
81 | ENTRY(lcr4) | 93 | ENTRY(lcr4) | |
82 | movl 4(%esp), %eax | 94 | movl 4(%esp), %eax | |
83 | movl %eax, %cr4 | 95 | movl %eax, %cr4 | |
84 | ret | 96 | ret | |
85 | END(lcr4) | 97 | END(lcr4) | |
86 | 98 | |||
87 | ENTRY(rcr4) | 99 | ENTRY(rcr4) | |
88 | movl %cr4, %eax | 100 | movl %cr4, %eax | |
89 | ret | 101 | ret | |
90 | END(rcr4) | 102 | END(rcr4) | |
91 | 103 | |||
92 | ENTRY(x86_read_flags) | 104 | ENTRY(x86_read_flags) |
--- src/sys/arch/i386/i386/i386func.S 2016/11/27 14:49:21 1.18
+++ src/sys/arch/i386/i386/i386func.S 2018/12/22 21:27:22 1.19
@@ -1,14 +1,14 @@ | @@ -1,14 +1,14 @@ | |||
1 | /* $NetBSD: i386func.S,v 1.18 2016/11/27 14:49:21 kamil Exp $ */ | 1 | /* $NetBSD: i386func.S,v 1.19 2018/12/22 21:27:22 cherry Exp $ */ | |
2 | 2 | |||
3 | /*- | 3 | /*- | |
4 | * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc. | 4 | * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc. | |
5 | * All rights reserved. | 5 | * All rights reserved. | |
6 | * | 6 | * | |
7 | * This code is derived from software contributed to The NetBSD Foundation | 7 | * This code is derived from software contributed to The NetBSD Foundation | |
8 | * by Charles M. Hannum, and by Andrew Doran. | 8 | * by Charles M. Hannum, and by Andrew Doran. | |
9 | * | 9 | * | |
10 | * Redistribution and use in source and binary forms, with or without | 10 | * Redistribution and use in source and binary forms, with or without | |
11 | * modification, are permitted provided that the following conditions | 11 | * modification, are permitted provided that the following conditions | |
12 | * are met: | 12 | * are met: | |
13 | * 1. Redistributions of source code must retain the above copyright | 13 | * 1. Redistributions of source code must retain the above copyright | |
14 | * notice, this list of conditions and the following disclaimer. | 14 | * notice, this list of conditions and the following disclaimer. | |
@@ -26,194 +26,227 @@ | @@ -26,194 +26,227 @@ | |||
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
29 | * POSSIBILITY OF SUCH DAMAGE. | 29 | * POSSIBILITY OF SUCH DAMAGE. | |
30 | */ | 30 | */ | |
31 | 31 | |||
32 | /* | 32 | /* | |
33 | * Functions to provide access to i386-specific instructions. | 33 | * Functions to provide access to i386-specific instructions. | |
34 | * | 34 | * | |
35 | * These are _not_ shared with NetBSD/xen. | 35 | * These are _not_ shared with NetBSD/xen. | |
36 | */ | 36 | */ | |
37 | 37 | |||
38 | #include <machine/asm.h> | 38 | #include <machine/asm.h> | |
39 | __KERNEL_RCSID(0, "$NetBSD: i386func.S,v 1.18 2016/11/27 14:49:21 kamil Exp $"); | 39 | __KERNEL_RCSID(0, "$NetBSD: i386func.S,v 1.19 2018/12/22 21:27:22 cherry Exp $"); | |
40 | 40 | |||
41 | #include <machine/specialreg.h> | 41 | #include <machine/specialreg.h> | |
42 | #include <machine/segments.h> | 42 | #include <machine/segments.h> | |
43 | 43 | |||
44 | #include "assym.h" | 44 | #include "assym.h" | |
45 | 45 | |||
46 | ENTRY(invlpg) | 46 | /* | |
47 | * These functions below should always be accessed via the corresponding wrapper | |||
48 | * function names defined in x86/include/cpufunc.h and exported as WEAK_ALIAS() | |||
49 | * | |||
50 | * We use this rather roundabout method so that a runtime wrapper function may | |||
51 | * be made available for PVHVM, which could override both native and PV aliases | |||
52 | * and decide which to invoke at run time. | |||
53 | */ | |||
54 | ||||
55 | WEAK_ALIAS(invlpg, i386_invlpg) | |||
56 | WEAK_ALIAS(lldt, i386_lldt) | |||
57 | WEAK_ALIAS(ltr, i386_ltr) | |||
58 | WEAK_ALIAS(lcr0, i386_lcr0) | |||
59 | WEAK_ALIAS(rcr0, i386_rcr0) | |||
60 | WEAK_ALIAS(lcr3, i386_lcr3) | |||
61 | WEAK_ALIAS(tlbflush, i386_tlbflush) | |||
62 | WEAK_ALIAS(tlbflushg, i386_tlbflushg) | |||
63 | WEAK_ALIAS(rdr0, i386_rdr0) | |||
64 | WEAK_ALIAS(ldr0, i386_ldr0) | |||
65 | WEAK_ALIAS(rdr1, i386_rdr1) | |||
66 | WEAK_ALIAS(ldr1, i386_ldr1) | |||
67 | WEAK_ALIAS(rdr2, i386_rdr2) | |||
68 | WEAK_ALIAS(ldr2, i386_ldr2) | |||
69 | WEAK_ALIAS(rdr3, i386_rdr3) | |||
70 | WEAK_ALIAS(ldr3, i386_ldr3) | |||
71 | WEAK_ALIAS(rdr6, i386_rdr6) | |||
72 | WEAK_ALIAS(ldr6, i386_ldr6) | |||
73 | WEAK_ALIAS(rdr7, i386_rdr7) | |||
74 | WEAK_ALIAS(ldr7, i386_ldr7) | |||
75 | WEAK_ALIAS(rcr2, i386_rcr2) | |||
76 | WEAK_ALIAS(lcr2, i386_lcr2) | |||
77 | WEAK_ALIAS(wbinvd, i386_wbinvd) | |||
78 | ||||
79 | ENTRY(i386_invlpg) | |||
47 | movl 4(%esp), %eax | 80 | movl 4(%esp), %eax | |
48 | invlpg (%eax) | 81 | invlpg (%eax) | |
49 | ret | 82 | ret | |
50 | END(invlpg) | 83 | END(i386_invlpg) | |
51 | 84 | |||
52 | ENTRY(lldt) | 85 | ENTRY(i386_lldt) | |
53 | movl 4(%esp), %eax | 86 | movl 4(%esp), %eax | |
54 | cmpl %eax, CPUVAR(CURLDT) | 87 | cmpl %eax, CPUVAR(CURLDT) | |
55 | jne 1f | 88 | jne 1f | |
56 | ret | 89 | ret | |
57 | 1: | 90 | 1: | |
58 | movl %eax, CPUVAR(CURLDT) | 91 | movl %eax, CPUVAR(CURLDT) | |
59 | lldt %ax | 92 | lldt %ax | |
60 | ret | 93 | ret | |
61 | END(lldt) | 94 | END(i386_lldt) | |
62 | 95 | |||
63 | ENTRY(ltr) | 96 | ENTRY(i386_ltr) | |
64 | movl 4(%esp), %eax | 97 | movl 4(%esp), %eax | |
65 | ltr %ax | 98 | ltr %ax | |
66 | ret | 99 | ret | |
67 | END(ltr) | 100 | END(i386_ltr) | |
68 | 101 | |||
69 | ENTRY(lcr0) | 102 | ENTRY(i386_lcr0) | |
70 | movl 4(%esp), %eax | 103 | movl 4(%esp), %eax | |
71 | movl %eax, %cr0 | 104 | movl %eax, %cr0 | |
72 | ret | 105 | ret | |
73 | END(lcr0) | 106 | END(i386_lcr0) | |
74 | 107 | |||
75 | ENTRY(rcr0) | 108 | ENTRY(i386_rcr0) | |
76 | movl %cr0, %eax | 109 | movl %cr0, %eax | |
77 | ret | 110 | ret | |
78 | END(rcr0) | 111 | END(i386_rcr0) | |
79 | 112 | |||
80 | ENTRY(lcr3) | 113 | ENTRY(i386_lcr3) | |
81 | movl 4(%esp), %eax | 114 | movl 4(%esp), %eax | |
82 | movl %eax, %cr3 | 115 | movl %eax, %cr3 | |
83 | ret | 116 | ret | |
84 | END(lcr3) | 117 | END(i386_lcr3) | |
85 | 118 | |||
86 | /* | 119 | /* | |
87 | * Big hammer: flush all TLB entries, including ones from PTE's | 120 | * Big hammer: flush all TLB entries, including ones from PTE's | |
88 | * with the G bit set. This should only be necessary if TLB | 121 | * with the G bit set. This should only be necessary if TLB | |
89 | * shootdown falls far behind. | 122 | * shootdown falls far behind. | |
90 | * | 123 | * | |
91 | * Intel Architecture Software Developer's Manual, Volume 3, | 124 | * Intel Architecture Software Developer's Manual, Volume 3, | |
92 | * System Programming, section 9.10, "Invalidating the | 125 | * System Programming, section 9.10, "Invalidating the | |
93 | * Translation Lookaside Buffers (TLBS)": | 126 | * Translation Lookaside Buffers (TLBS)": | |
94 | * "The following operations invalidate all TLB entries, irrespective | 127 | * "The following operations invalidate all TLB entries, irrespective | |
95 | * of the setting of the G flag: | 128 | * of the setting of the G flag: | |
96 | * ... | 129 | * ... | |
97 | * "(P6 family processors only): Writing to control register CR4 to | 130 | * "(P6 family processors only): Writing to control register CR4 to | |
98 | * modify the PSE, PGE, or PAE flag." | 131 | * modify the PSE, PGE, or PAE flag." | |
99 | * | 132 | * | |
100 | * (the alternatives not quoted above are not an option here.) | 133 | * (the alternatives not quoted above are not an option here.) | |
101 | * | 134 | * | |
102 | * If PGE is not in use, we reload CR3. Check for the PGE feature | 135 | * If PGE is not in use, we reload CR3. Check for the PGE feature | |
103 | * first since i486 does not have CR4. Note: the feature flag may | 136 | * first since i486 does not have CR4. Note: the feature flag may | |
104 | * be present while the actual PGE functionality not yet enabled. | 137 | * be present while the actual PGE functionality not yet enabled. | |
105 | */ | 138 | */ | |
106 | ENTRY(tlbflushg) | 139 | ENTRY(i386_tlbflushg) | |
107 | testl $CPUID_PGE, _C_LABEL(cpu_feature) | 140 | testl $CPUID_PGE, _C_LABEL(cpu_feature) | |
108 | jz 1f | 141 | jz 1f | |
109 | movl %cr4, %eax | 142 | movl %cr4, %eax | |
110 | testl $CR4_PGE, %eax | 143 | testl $CR4_PGE, %eax | |
111 | jz 1f | 144 | jz 1f | |
112 | movl %eax, %edx | 145 | movl %eax, %edx | |
113 | andl $~CR4_PGE, %edx | 146 | andl $~CR4_PGE, %edx | |
114 | movl %edx, %cr4 | 147 | movl %edx, %cr4 | |
115 | movl %eax, %cr4 | 148 | movl %eax, %cr4 | |
116 | ret | 149 | ret | |
117 | END(tlbflushg) | 150 | END(i386_tlbflushg) | |
118 | 151 | |||
119 | ENTRY(tlbflush) | 152 | ENTRY(i386_tlbflush) | |
120 | 1: | 153 | 1: | |
121 | movl %cr3, %eax | 154 | movl %cr3, %eax | |
122 | movl %eax, %cr3 | 155 | movl %eax, %cr3 | |
123 | ret | 156 | ret | |
124 | END(tlbflush) | 157 | END(i386_tlbflush) | |
125 | 158 | |||
126 | ENTRY(ldr0) | 159 | ENTRY(i386_ldr0) | |
127 | movl 4(%esp), %eax | 160 | movl 4(%esp), %eax | |
128 | movl %eax, %dr0 | 161 | movl %eax, %dr0 | |
129 | ret | 162 | ret | |
130 | END(ldr0) | 163 | END(i386_ldr0) | |
131 | 164 | |||
132 | ENTRY(rdr0) | 165 | ENTRY(i386_rdr0) | |
133 | movl %dr0, %eax | 166 | movl %dr0, %eax | |
134 | ret | 167 | ret | |
135 | END(rdr0) | 168 | END(i386_rdr0) | |
136 | 169 | |||
137 | ENTRY(ldr1) | 170 | ENTRY(i386_ldr1) | |
138 | movl 4(%esp), %eax | 171 | movl 4(%esp), %eax | |
139 | movl %eax, %dr1 | 172 | movl %eax, %dr1 | |
140 | ret | 173 | ret | |
141 | END(ldr1) | 174 | END(i386_ldr1) | |
142 | 175 | |||
143 | ENTRY(rdr1) | 176 | ENTRY(i386_rdr1) | |
144 | movl %dr1, %eax | 177 | movl %dr1, %eax | |
145 | ret | 178 | ret | |
146 | END(rdr1) | 179 | END(i386_rdr1) | |
147 | 180 | |||
148 | ENTRY(ldr2) | 181 | ENTRY(i386_ldr2) | |
149 | movl 4(%esp), %eax | 182 | movl 4(%esp), %eax | |
150 | movl %eax, %dr2 | 183 | movl %eax, %dr2 | |
151 | ret | 184 | ret | |
152 | END(ldr2) | 185 | END(i386_ldr2) | |
153 | 186 | |||
154 | ENTRY(rdr2) | 187 | ENTRY(i386_rdr2) | |
155 | movl %dr2, %eax | 188 | movl %dr2, %eax | |
156 | ret | 189 | ret | |
157 | END(rdr2) | 190 | END(i386_rdr2) | |
158 | 191 | |||
159 | ENTRY(ldr3) | 192 | ENTRY(i386_ldr3) | |
160 | movl 4(%esp), %eax | 193 | movl 4(%esp), %eax | |
161 | movl %eax, %dr3 | 194 | movl %eax, %dr3 | |
162 | ret | 195 | ret | |
163 | END(ldr3) | 196 | END(i386_ldr3) | |
164 | 197 | |||
165 | ENTRY(rdr3) | 198 | ENTRY(i386_rdr3) | |
166 | movl %dr3, %eax | 199 | movl %dr3, %eax | |
167 | ret | 200 | ret | |
168 | END(rdr3) | 201 | END(i386_rdr3) | |
169 | 202 | |||
170 | ENTRY(ldr6) | 203 | ENTRY(i386_ldr6) | |
171 | movl 4(%esp), %eax | 204 | movl 4(%esp), %eax | |
172 | movl %eax, %dr6 | 205 | movl %eax, %dr6 | |
173 | ret | 206 | ret | |
174 | END(ldr6) | 207 | END(i386_ldr6) | |
175 | 208 | |||
176 | ENTRY(rdr6) | 209 | ENTRY(i386_rdr6) | |
177 | movl %dr6, %eax | 210 | movl %dr6, %eax | |
178 | ret | 211 | ret | |
179 | END(rdr6) | 212 | END(i386_rdr6) | |
180 | 213 | |||
181 | ENTRY(ldr7) | 214 | ENTRY(i386_ldr7) | |
182 | movl 4(%esp), %eax | 215 | movl 4(%esp), %eax | |
183 | movl %eax, %dr7 | 216 | movl %eax, %dr7 | |
184 | ret | 217 | ret | |
185 | END(ldr7) | 218 | END(i386_ldr7) | |
186 | 219 | |||
187 | ENTRY(rdr7) | 220 | ENTRY(i386_rdr7) | |
188 | movl %dr7, %eax | 221 | movl %dr7, %eax | |
189 | ret | 222 | ret | |
190 | END(rdr7) | 223 | END(i386_rdr7) | |
191 | 224 | |||
192 | ENTRY(rcr2) | 225 | ENTRY(i386_rcr2) | |
193 | movl %cr2, %eax | 226 | movl %cr2, %eax | |
194 | ret | 227 | ret | |
195 | END(rcr2) | 228 | END(i386_rcr2) | |
196 | 229 | |||
197 | ENTRY(lcr2) | 230 | ENTRY(i386_lcr2) | |
198 | movl 4(%esp), %eax | 231 | movl 4(%esp), %eax | |
199 | movl %eax, %cr2 | 232 | movl %eax, %cr2 | |
200 | ret | 233 | ret | |
201 | END(lcr2) | 234 | END(i386_lcr2) | |
202 | 235 | |||
203 | ENTRY(wbinvd) | 236 | ENTRY(i386_wbinvd) | |
204 | wbinvd | 237 | wbinvd | |
205 | ret | 238 | ret | |
206 | END(wbinvd) | 239 | END(i386_wbinvd) | |
207 | 240 | |||
208 | ENTRY(x86_disable_intr) | 241 | ENTRY(x86_disable_intr) | |
209 | cli | 242 | cli | |
210 | ret | 243 | ret | |
211 | END(x86_disable_intr) | 244 | END(x86_disable_intr) | |
212 | 245 | |||
213 | ENTRY(x86_enable_intr) | 246 | ENTRY(x86_enable_intr) | |
214 | sti | 247 | sti | |
215 | ret | 248 | ret | |
216 | END(x86_enable_intr) | 249 | END(x86_enable_intr) | |
217 | 250 | |||
218 | /* | 251 | /* | |
219 | * void lgdt(struct region_descriptor *rdp); | 252 | * void lgdt(struct region_descriptor *rdp); |
--- src/sys/arch/xen/x86/xenfunc.c 2018/10/18 04:17:18 1.22
+++ src/sys/arch/xen/x86/xenfunc.c 2018/12/22 21:27:22 1.23
@@ -1,14 +1,14 @@ | @@ -1,14 +1,14 @@ | |||
1 | /* $NetBSD: xenfunc.c,v 1.22 2018/10/18 04:17:18 cherry Exp $ */ | 1 | /* $NetBSD: xenfunc.c,v 1.23 2018/12/22 21:27:22 cherry Exp $ */ | |
2 | 2 | |||
3 | /* | 3 | /* | |
4 | * Copyright (c) 2004 Christian Limpach. | 4 | * Copyright (c) 2004 Christian Limpach. | |
5 | * All rights reserved. | 5 | * All rights reserved. | |
6 | * | 6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | 7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions | 8 | * modification, are permitted provided that the following conditions | |
9 | * are met: | 9 | * are met: | |
10 | * 1. Redistributions of source code must retain the above copyright | 10 | * 1. Redistributions of source code must retain the above copyright | |
11 | * notice, this list of conditions and the following disclaimer. | 11 | * notice, this list of conditions and the following disclaimer. | |
12 | * 2. Redistributions in binary form must reproduce the above copyright | 12 | * 2. Redistributions in binary form must reproduce the above copyright | |
13 | * notice, this list of conditions and the following disclaimer in the | 13 | * notice, this list of conditions and the following disclaimer in the | |
14 | * documentation and/or other materials provided with the distribution. | 14 | * documentation and/or other materials provided with the distribution. | |
@@ -16,55 +16,121 @@ | @@ -16,55 +16,121 @@ | |||
16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | 16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | |
17 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | 17 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | |
18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | 18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | |
19 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | 19 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | |
20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | |
21 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 21 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
22 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 22 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
23 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 23 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
24 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | 24 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | |
25 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 25 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
26 | */ | 26 | */ | |
27 | 27 | |||
28 | #include <sys/cdefs.h> | 28 | #include <sys/cdefs.h> | |
29 | __KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.22 2018/10/18 04:17:18 cherry Exp $"); | 29 | __KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.23 2018/12/22 21:27:22 cherry Exp $"); | |
30 | 30 | |||
31 | #include <sys/param.h> | 31 | #include <sys/param.h> | |
32 | 32 | |||
33 | #include <uvm/uvm_extern.h> | 33 | #include <uvm/uvm_extern.h> | |
34 | 34 | |||
35 | #include <machine/intr.h> | 35 | #include <machine/intr.h> | |
36 | #include <machine/vmparam.h> | 36 | #include <machine/vmparam.h> | |
37 | #include <machine/pmap.h> | 37 | #include <machine/pmap.h> | |
38 | #include <xen/xen.h> | 38 | #include <xen/xen.h> | |
39 | #include <xen/hypervisor.h> | 39 | #include <xen/hypervisor.h> | |
40 | //#include <xen/evtchn.h> | 40 | //#include <xen/evtchn.h> | |
41 | #include <xen/xenpmap.h> | 41 | #include <xen/xenpmap.h> | |
42 | #include <machine/pte.h> | 42 | #include <machine/pte.h> | |
43 | 43 | |||
44 | #define MAX_XEN_IDT 128 | 44 | #define MAX_XEN_IDT 128 | |
45 | 45 | |||
46 | void xen_set_ldt(vaddr_t, uint32_t); | 46 | void xen_set_ldt(vaddr_t, uint32_t); | |
47 | 47 | |||
48 | /* | |||
49 | * We don't need to export these declarations, since they are used via | |||
50 | * linker aliasing. They should always be accessed via the | |||
51 | * corresponding wrapper function names defined in | |||
52 | * x86/include/cpufunc.h and exported as __weak_alias() | |||
53 | * | |||
54 | * We use this rather roundabout method so that a runtime wrapper | |||
55 | * function may be made available for PVHVM, which could override both | |||
56 | * native and PV aliases and decide which to invoke at run time. | |||
57 | */ | |||
58 | ||||
59 | void xen_invlpg(vaddr_t); | |||
60 | void xen_lidt(struct region_descriptor *); | |||
61 | void xen_lldt(u_short); | |||
62 | void xen_ltr(u_short); | |||
63 | void xen_lcr0(u_long); | |||
64 | u_long xen_rcr0(void); | |||
65 | void xen_tlbflush(void); | |||
66 | void xen_tlbflushg(void); | |||
67 | register_t xen_rdr0(void); | |||
68 | void xen_ldr0(register_t); | |||
69 | register_t xen_rdr1(void); | |||
70 | void xen_ldr1(register_t); | |||
71 | register_t xen_rdr2(void); | |||
72 | void xen_ldr2(register_t); | |||
73 | register_t xen_rdr3(void); | |||
74 | void xen_ldr3(register_t); | |||
75 | register_t xen_rdr6(void); | |||
76 | void xen_ldr6(register_t); | |||
77 | register_t xen_rdr7(void); | |||
78 | void xen_ldr7(register_t); | |||
79 | void xen_wbinvd(void); | |||
80 | vaddr_t xen_rcr2(void); | |||
81 | ||||
82 | __weak_alias(invlpg, xen_invlpg); | |||
83 | __weak_alias(lidt, xen_lidt); | |||
84 | __weak_alias(lldt, xen_lldt); | |||
85 | __weak_alias(ltr, xen_ltr); | |||
86 | __weak_alias(lcr0, xen_lcr0); | |||
87 | __weak_alias(rcr0, xen_rcr0); | |||
88 | __weak_alias(tlbflush, xen_tlbflush); | |||
89 | __weak_alias(tlbflushg, xen_tlbflushg); | |||
90 | __weak_alias(rdr0, xen_rdr0); | |||
91 | __weak_alias(ldr0, xen_ldr0); | |||
92 | __weak_alias(rdr1, xen_rdr1); | |||
93 | __weak_alias(ldr1, xen_ldr1); | |||
94 | __weak_alias(rdr2, xen_rdr2); | |||
95 | __weak_alias(ldr2, xen_ldr2); | |||
96 | __weak_alias(rdr3, xen_rdr3); | |||
97 | __weak_alias(ldr3, xen_ldr3); | |||
98 | __weak_alias(rdr6, xen_rdr6); | |||
99 | __weak_alias(ldr6, xen_ldr6); | |||
100 | __weak_alias(rdr7, xen_rdr7); | |||
101 | __weak_alias(ldr7, xen_ldr7); | |||
102 | __weak_alias(wbinvd, xen_wbinvd); | |||
103 | __weak_alias(rcr2, xen_rcr2); | |||
104 | ||||
105 | #ifdef __x86_64__ | |||
106 | void xen_setusergs(int); | |||
107 | __weak_alias(setusergs, xen_setusergs); | |||
108 | #else | |||
109 | void xen_lcr3(vaddr_t); | |||
110 | __weak_alias(lcr3, xen_lcr3); | |||
111 | ||||
112 | #endif | |||
113 | ||||
48 | void | 114 | void | |
49 | invlpg(vaddr_t addr) | 115 | xen_invlpg(vaddr_t addr) | |
50 | { | 116 | { | |
51 | int s = splvm(); /* XXXSMP */ | 117 | int s = splvm(); /* XXXSMP */ | |
52 | xpq_queue_invlpg(addr); | 118 | xpq_queue_invlpg(addr); | |
53 | splx(s); | 119 | splx(s); | |
54 | } | 120 | } | |
55 | 121 | |||
56 | void | 122 | void | |
57 | lidt(struct region_descriptor *rd) | 123 | xen_lidt(struct region_descriptor *rd) | |
58 | { | 124 | { | |
59 | /* | 125 | /* | |
60 | * We need to do this because we can't assume kmem_alloc(9) | 126 | * We need to do this because we can't assume kmem_alloc(9) | |
61 | * will be available at the boot stage when this is called. | 127 | * will be available at the boot stage when this is called. | |
62 | */ | 128 | */ | |
63 | static char xen_idt_page[PAGE_SIZE] __attribute__((__aligned__ (PAGE_SIZE))); | 129 | static char xen_idt_page[PAGE_SIZE] __attribute__((__aligned__ (PAGE_SIZE))); | |
64 | memset(xen_idt_page, 0, PAGE_SIZE); | 130 | memset(xen_idt_page, 0, PAGE_SIZE); | |
65 | 131 | |||
66 | struct trap_info *xen_idt = (void * )xen_idt_page; | 132 | struct trap_info *xen_idt = (void * )xen_idt_page; | |
67 | int xen_idt_idx = 0; | 133 | int xen_idt_idx = 0; | |
68 | 134 | |||
69 | struct trap_info * idd = (void *) rd->rd_base; | 135 | struct trap_info * idd = (void *) rd->rd_base; | |
70 | const int nidt = rd->rd_limit / (sizeof *idd); | 136 | const int nidt = rd->rd_limit / (sizeof *idd); | |
@@ -89,177 +155,177 @@ lidt(struct region_descriptor *rd) | @@ -89,177 +155,177 @@ lidt(struct region_descriptor *rd) | |||
89 | #endif /* __x86_64 */ | 155 | #endif /* __x86_64 */ | |
90 | 156 | |||
91 | /* Hook it up in the hypervisor */ | 157 | /* Hook it up in the hypervisor */ | |
92 | if (HYPERVISOR_set_trap_table(xen_idt)) | 158 | if (HYPERVISOR_set_trap_table(xen_idt)) | |
93 | panic("HYPERVISOR_set_trap_table() failed"); | 159 | panic("HYPERVISOR_set_trap_table() failed"); | |
94 | 160 | |||
95 | #if defined(__x86_64__) | 161 | #if defined(__x86_64__) | |
96 | /* reset */ | 162 | /* reset */ | |
97 | pmap_changeprot_local((vaddr_t) xen_idt, VM_PROT_READ|VM_PROT_WRITE); | 163 | pmap_changeprot_local((vaddr_t) xen_idt, VM_PROT_READ|VM_PROT_WRITE); | |
98 | #endif /* __x86_64 */ | 164 | #endif /* __x86_64 */ | |
99 | } | 165 | } | |
100 | 166 | |||
101 | void | 167 | void | |
102 | lldt(u_short sel) | 168 | xen_lldt(u_short sel) | |
103 | { | 169 | { | |
104 | #ifndef __x86_64__ | 170 | #ifndef __x86_64__ | |
105 | struct cpu_info *ci; | 171 | struct cpu_info *ci; | |
106 | 172 | |||
107 | ci = curcpu(); | 173 | ci = curcpu(); | |
108 | 174 | |||
109 | if (ci->ci_curldt == sel) | 175 | if (ci->ci_curldt == sel) | |
110 | return; | 176 | return; | |
111 | if (sel == GSEL(GLDT_SEL, SEL_KPL)) | 177 | if (sel == GSEL(GLDT_SEL, SEL_KPL)) | |
112 | xen_set_ldt((vaddr_t)ldtstore, NLDT); | 178 | xen_set_ldt((vaddr_t)ldtstore, NLDT); | |
113 | else | 179 | else | |
114 | xen_set_ldt(ci->ci_gdt[IDXSELN(sel)].ld.ld_base, | 180 | xen_set_ldt(ci->ci_gdt[IDXSELN(sel)].ld.ld_base, | |
115 | ci->ci_gdt[IDXSELN(sel)].ld.ld_entries); | 181 | ci->ci_gdt[IDXSELN(sel)].ld.ld_entries); | |
116 | ci->ci_curldt = sel; | 182 | ci->ci_curldt = sel; | |
117 | #endif | 183 | #endif | |
118 | } | 184 | } | |
119 | 185 | |||
120 | void | 186 | void | |
121 | ltr(u_short sel) | 187 | xen_ltr(u_short sel) | |
122 | { | 188 | { | |
123 | panic("XXX ltr not supported\n"); | 189 | panic("XXX ltr not supported\n"); | |
124 | } | 190 | } | |
125 | 191 | |||
126 | void | 192 | void | |
127 | lcr0(u_long val) | 193 | xen_lcr0(u_long val) | |
128 | { | 194 | { | |
129 | panic("XXX lcr0 not supported\n"); | 195 | panic("XXX lcr0 not supported\n"); | |
130 | } | 196 | } | |
131 | 197 | |||
132 | u_long | 198 | u_long | |
133 | rcr0(void) | 199 | xen_rcr0(void) | |
134 | { | 200 | { | |
135 | /* XXX: handle X86_CR0_TS ? */ | 201 | /* XXX: handle X86_CR0_TS ? */ | |
136 | return 0; | 202 | return 0; | |
137 | } | 203 | } | |
138 | 204 | |||
139 | #ifndef __x86_64__ | 205 | #ifndef __x86_64__ | |
140 | void | 206 | void | |
141 | lcr3(vaddr_t val) | 207 | xen_lcr3(vaddr_t val) | |
142 | { | 208 | { | |
143 | int s = splvm(); /* XXXSMP */ | 209 | int s = splvm(); /* XXXSMP */ | |
144 | xpq_queue_pt_switch(xpmap_ptom_masked(val)); | 210 | xpq_queue_pt_switch(xpmap_ptom_masked(val)); | |
145 | splx(s); | 211 | splx(s); | |
146 | } | 212 | } | |
147 | #endif | 213 | #endif | |
148 | 214 | |||
149 | void | 215 | void | |
150 | tlbflush(void) | 216 | xen_tlbflush(void) | |
151 | { | 217 | { | |
152 | int s = splvm(); /* XXXSMP */ | 218 | int s = splvm(); /* XXXSMP */ | |
153 | xpq_queue_tlb_flush(); | 219 | xpq_queue_tlb_flush(); | |
154 | splx(s); | 220 | splx(s); | |
155 | } | 221 | } | |
156 | 222 | |||
157 | void | 223 | void | |
158 | tlbflushg(void) | 224 | xen_tlbflushg(void) | |
159 | { | 225 | { | |
160 | tlbflush(); | 226 | tlbflush(); | |
161 | } | 227 | } | |
162 | 228 | |||
163 | register_t | 229 | register_t | |
164 | rdr0(void) | 230 | xen_rdr0(void) | |
165 | { | 231 | { | |
166 | 232 | |||
167 | return HYPERVISOR_get_debugreg(0); | 233 | return HYPERVISOR_get_debugreg(0); | |
168 | } | 234 | } | |
169 | 235 | |||
170 | void | 236 | void | |
171 | ldr0(register_t val) | 237 | xen_ldr0(register_t val) | |
172 | { | 238 | { | |
173 | 239 | |||
174 | HYPERVISOR_set_debugreg(0, val); | 240 | HYPERVISOR_set_debugreg(0, val); | |
175 | } | 241 | } | |
176 | 242 | |||
177 | register_t | 243 | register_t | |
178 | rdr1(void) | 244 | xen_rdr1(void) | |
179 | { | 245 | { | |
180 | 246 | |||
181 | return HYPERVISOR_get_debugreg(1); | 247 | return HYPERVISOR_get_debugreg(1); | |
182 | } | 248 | } | |
183 | 249 | |||
184 | void | 250 | void | |
185 | ldr1(register_t val) | 251 | xen_ldr1(register_t val) | |
186 | { | 252 | { | |
187 | 253 | |||
188 | HYPERVISOR_set_debugreg(1, val); | 254 | HYPERVISOR_set_debugreg(1, val); | |
189 | } | 255 | } | |
190 | 256 | |||
191 | register_t | 257 | register_t | |
192 | rdr2(void) | 258 | xen_rdr2(void) | |
193 | { | 259 | { | |
194 | 260 | |||
195 | return HYPERVISOR_get_debugreg(2); | 261 | return HYPERVISOR_get_debugreg(2); | |
196 | } | 262 | } | |
197 | 263 | |||
198 | void | 264 | void | |
199 | ldr2(register_t val) | 265 | xen_ldr2(register_t val) | |
200 | { | 266 | { | |
201 | 267 | |||
202 | HYPERVISOR_set_debugreg(2, val); | 268 | HYPERVISOR_set_debugreg(2, val); | |
203 | } | 269 | } | |
204 | 270 | |||
205 | register_t | 271 | register_t | |
206 | rdr3(void) | 272 | xen_rdr3(void) | |
207 | { | 273 | { | |
208 | 274 | |||
209 | return HYPERVISOR_get_debugreg(3); | 275 | return HYPERVISOR_get_debugreg(3); | |
210 | } | 276 | } | |
211 | 277 | |||
212 | void | 278 | void | |
213 | ldr3(register_t val) | 279 | xen_ldr3(register_t val) | |
214 | { | 280 | { | |
215 | 281 | |||
216 | HYPERVISOR_set_debugreg(3, val); | 282 | HYPERVISOR_set_debugreg(3, val); | |
217 | } | 283 | } | |
218 | register_t | 284 | register_t | |
219 | rdr6(void) | 285 | xen_rdr6(void) | |
220 | { | 286 | { | |
221 | 287 | |||
222 | return HYPERVISOR_get_debugreg(6); | 288 | return HYPERVISOR_get_debugreg(6); | |
223 | } | 289 | } | |
224 | 290 | |||
225 | void | 291 | void | |
226 | ldr6(register_t val) | 292 | xen_ldr6(register_t val) | |
227 | { | 293 | { | |
228 | 294 | |||
229 | HYPERVISOR_set_debugreg(6, val); | 295 | HYPERVISOR_set_debugreg(6, val); | |
230 | } | 296 | } | |
231 | 297 | |||
232 | register_t | 298 | register_t | |
233 | rdr7(void) | 299 | xen_rdr7(void) | |
234 | { | 300 | { | |
235 | 301 | |||
236 | return HYPERVISOR_get_debugreg(7); | 302 | return HYPERVISOR_get_debugreg(7); | |
237 | } | 303 | } | |
238 | 304 | |||
239 | void | 305 | void | |
240 | ldr7(register_t val) | 306 | xen_ldr7(register_t val) | |
241 | { | 307 | { | |
242 | 308 | |||
243 | HYPERVISOR_set_debugreg(7, val); | 309 | HYPERVISOR_set_debugreg(7, val); | |
244 | } | 310 | } | |
245 | 311 | |||
246 | void | 312 | void | |
247 | wbinvd(void) | 313 | xen_wbinvd(void) | |
248 | { | 314 | { | |
249 | 315 | |||
250 | xpq_flush_cache(); | 316 | xpq_flush_cache(); | |
251 | } | 317 | } | |
252 | 318 | |||
253 | vaddr_t | 319 | vaddr_t | |
254 | rcr2(void) | 320 | xen_rcr2(void) | |
255 | { | 321 | { | |
256 | return curcpu()->ci_vcpu->arch.cr2; | 322 | return curcpu()->ci_vcpu->arch.cr2; | |
257 | } | 323 | } | |
258 | 324 | |||
259 | #ifdef __x86_64__ | 325 | #ifdef __x86_64__ | |
260 | void | 326 | void | |
261 | setusergs(int gssel) | 327 | xen_setusergs(int gssel) | |
262 | { | 328 | { | |
263 | HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gssel); | 329 | HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gssel); | |
264 | } | 330 | } | |
265 | #endif | 331 | #endif |