Introduce a weak alias method of exporting different implementations of the same API. For eg: the amd64 native implementation of invlpg() now becomes amd64_invlpg() with a weak symbol export of invlpg(), while the XEN implementation becomes xen_invlpg(), also weakly exported as invlpg() Note that linking in both together without having an override function named invlpg() would be a mistake, as we have limited control over which of the two options would emerge as the finally exported invlpg() resulting in a potential situation where the wrong function is finally exported. This change avoids this situation. We should however include an override function invlpg() in that case, such that it is able to then pass on the call to the appropriate backing function (amd64_invlpg() in the case of native, and xen_invlpg() in the case of under XEN virtualisation) at runtime. This change does not introduce such a function and therefore does not alter builds to include native as well as XEN implementations in the same binary. This will be done later, with the introduction of XEN PVHVM mode, where precisely such a runtime switch is required. There are no operational changes introduced by this change.diff -r1.33 -r1.34 src/sys/arch/amd64/amd64/cpufunc.S
(cherry)
--- src/sys/arch/amd64/amd64/cpufunc.S 2018/07/21 06:09:13 1.33
+++ src/sys/arch/amd64/amd64/cpufunc.S 2018/12/22 21:27:22 1.34
@@ -1,766 +1,801 @@ | @@ -1,766 +1,801 @@ | |||
1 | /* $NetBSD: cpufunc.S,v 1.33 2018/07/21 06:09:13 maxv Exp $ */ | 1 | /* $NetBSD: cpufunc.S,v 1.34 2018/12/22 21:27:22 cherry Exp $ */ | |
2 | 2 | |||
3 | /* | 3 | /* | |
4 | * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc. | 4 | * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc. | |
5 | * All rights reserved. | 5 | * All rights reserved. | |
6 | * | 6 | * | |
7 | * This code is derived from software contributed to The NetBSD Foundation | 7 | * This code is derived from software contributed to The NetBSD Foundation | |
8 | * by Charles M. Hannum, and by Andrew Doran. | 8 | * by Charles M. Hannum, and by Andrew Doran. | |
9 | * | 9 | * | |
10 | * Redistribution and use in source and binary forms, with or without | 10 | * Redistribution and use in source and binary forms, with or without | |
11 | * modification, are permitted provided that the following conditions | 11 | * modification, are permitted provided that the following conditions | |
12 | * are met: | 12 | * are met: | |
13 | * 1. Redistributions of source code must retain the above copyright | 13 | * 1. Redistributions of source code must retain the above copyright | |
14 | * notice, this list of conditions and the following disclaimer. | 14 | * notice, this list of conditions and the following disclaimer. | |
15 | * 2. Redistributions in binary form must reproduce the above copyright | 15 | * 2. Redistributions in binary form must reproduce the above copyright | |
16 | * notice, this list of conditions and the following disclaimer in the | 16 | * notice, this list of conditions and the following disclaimer in the | |
17 | * documentation and/or other materials provided with the distribution. | 17 | * documentation and/or other materials provided with the distribution. | |
18 | * | 18 | * | |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
29 | * POSSIBILITY OF SUCH DAMAGE. | 29 | * POSSIBILITY OF SUCH DAMAGE. | |
30 | */ | 30 | */ | |
31 | 31 | |||
32 | /* | 32 | /* | |
33 | * Functions to provide access to i386-specific instructions. | 33 | * Functions to provide access to i386-specific instructions. | |
34 | */ | 34 | */ | |
35 | 35 | |||
36 | #include <sys/errno.h> | 36 | #include <sys/errno.h> | |
37 | 37 | |||
38 | #include <machine/asm.h> | 38 | #include <machine/asm.h> | |
39 | #include <machine/frameasm.h> | 39 | #include <machine/frameasm.h> | |
40 | #include <machine/specialreg.h> | 40 | #include <machine/specialreg.h> | |
41 | #include <machine/segments.h> | 41 | #include <machine/segments.h> | |
42 | 42 | |||
43 | #include "opt_xen.h" | 43 | #include "opt_xen.h" | |
44 | 44 | |||
45 | #include "assym.h" | 45 | #include "assym.h" | |
46 | 46 | |||
47 | /* Small and slow, so align less. */ | 47 | /* Small and slow, so align less. */ | |
48 | #undef _ALIGN_TEXT | 48 | #undef _ALIGN_TEXT | |
49 | #define _ALIGN_TEXT .align 8 | 49 | #define _ALIGN_TEXT .align 8 | |
50 | 50 | |||
51 | ENTRY(x86_lfence) | 51 | ENTRY(x86_lfence) | |
52 | lfence | 52 | lfence | |
53 | ret | 53 | ret | |
54 | END(x86_lfence) | 54 | END(x86_lfence) | |
55 | 55 | |||
56 | ENTRY(x86_sfence) | 56 | ENTRY(x86_sfence) | |
57 | sfence | 57 | sfence | |
58 | ret | 58 | ret | |
59 | END(x86_sfence) | 59 | END(x86_sfence) | |
60 | 60 | |||
61 | ENTRY(x86_mfence) | 61 | ENTRY(x86_mfence) | |
62 | mfence | 62 | mfence | |
63 | ret | 63 | ret | |
64 | END(x86_mfence) | 64 | END(x86_mfence) | |
65 | 65 | |||
66 | /* | |||
67 | * These functions below should always be accessed via the corresponding wrapper | |||
68 | * function names defined in x86/include/cpufunc.h and exported as WEAK_ALIAS() | |||
69 | * | |||
70 | * We use this rather roundabout method so that a runtime wrapper function may | |||
71 | * be made available for PVHVM, which could override both native and PV aliases | |||
72 | * and decide which to invoke at run time. | |||
73 | */ | |||
74 | ||||
75 | WEAK_ALIAS(invlpg, amd64_invlpg) | |||
76 | WEAK_ALIAS(lidt, amd64_lidt) | |||
77 | WEAK_ALIAS(lldt, amd64_lldt) | |||
78 | WEAK_ALIAS(ltr, amd64_ltr) | |||
79 | WEAK_ALIAS(lcr0, amd64_lcr0) | |||
80 | WEAK_ALIAS(rcr0, amd64_rcr0) | |||
81 | WEAK_ALIAS(rcr2, amd64_rcr2) | |||
82 | WEAK_ALIAS(lcr2, amd64_lcr2) | |||
83 | WEAK_ALIAS(rcr3, amd64_rcr3) | |||
84 | WEAK_ALIAS(lcr3, amd64_lcr3) | |||
85 | WEAK_ALIAS(tlbflush, amd64_tlbflush) | |||
86 | WEAK_ALIAS(tlbflushg, amd64_tlbflushg) | |||
87 | WEAK_ALIAS(rdr0, amd64_rdr0) | |||
88 | WEAK_ALIAS(ldr0, amd64_ldr0) | |||
89 | WEAK_ALIAS(rdr1, amd64_rdr1) | |||
90 | WEAK_ALIAS(ldr1, amd64_ldr1) | |||
91 | WEAK_ALIAS(rdr2, amd64_rdr2) | |||
92 | WEAK_ALIAS(ldr2, amd64_ldr2) | |||
93 | WEAK_ALIAS(rdr3, amd64_rdr3) | |||
94 | WEAK_ALIAS(ldr3, amd64_ldr3) | |||
95 | WEAK_ALIAS(rdr6, amd64_rdr6) | |||
96 | WEAK_ALIAS(ldr6, amd64_ldr6) | |||
97 | WEAK_ALIAS(rdr7, amd64_rdr7) | |||
98 | WEAK_ALIAS(ldr7, amd64_ldr7) | |||
99 | WEAK_ALIAS(wbinvd, amd64_wbinvd) | |||
100 | ||||
66 | #ifndef XEN | 101 | #ifndef XEN | |
67 | ENTRY(invlpg) | 102 | ENTRY(amd64_invlpg) | |
68 | invlpg (%rdi) | 103 | invlpg (%rdi) | |
69 | ret | 104 | ret | |
70 | END(invlpg) | 105 | END(amd64_invlpg) | |
71 | 106 | |||
72 | ENTRY(lidt) | 107 | ENTRY(amd64_lidt) | |
73 | lidt (%rdi) | 108 | lidt (%rdi) | |
74 | ret | 109 | ret | |
75 | END(lidt) | 110 | END(amd64_lidt) | |
76 | 111 | |||
77 | ENTRY(lldt) | 112 | ENTRY(amd64_lldt) | |
78 | cmpl %edi, CPUVAR(CURLDT) | 113 | cmpl %edi, CPUVAR(CURLDT) | |
79 | jne 1f | 114 | jne 1f | |
80 | ret | 115 | ret | |
81 | 1: | 116 | 1: | |
82 | movl %edi, CPUVAR(CURLDT) | 117 | movl %edi, CPUVAR(CURLDT) | |
83 | lldt %di | 118 | lldt %di | |
84 | ret | 119 | ret | |
85 | END(lldt) | 120 | END(amd64_lldt) | |
86 | 121 | |||
87 | ENTRY(ltr) | 122 | ENTRY(amd64_ltr) | |
88 | ltr %di | 123 | ltr %di | |
89 | ret | 124 | ret | |
90 | END(ltr) | 125 | END(amd64_ltr) | |
91 | 126 | |||
92 | ENTRY(lcr0) | 127 | ENTRY(amd64_lcr0) | |
93 | movq %rdi, %cr0 | 128 | movq %rdi, %cr0 | |
94 | ret | 129 | ret | |
95 | END(lcr0) | 130 | END(amd64_lcr0) | |
96 | 131 | |||
97 | ENTRY(rcr0) | 132 | ENTRY(amd64_rcr0) | |
98 | movq %cr0, %rax | 133 | movq %cr0, %rax | |
99 | ret | 134 | ret | |
100 | END(rcr0) | 135 | END(amd64_rcr0) | |
101 | 136 | |||
102 | ENTRY(lcr2) | 137 | ENTRY(amd64_lcr2) | |
103 | movq %rdi, %cr2 | 138 | movq %rdi, %cr2 | |
104 | ret | 139 | ret | |
105 | END(lcr2) | 140 | END(amd64_lcr2) | |
106 | 141 | |||
107 | ENTRY(rcr2) | 142 | ENTRY(amd64_rcr2) | |
108 | movq %cr2, %rax | 143 | movq %cr2, %rax | |
109 | ret | 144 | ret | |
110 | END(rcr2) | 145 | END(amd64_rcr2) | |
111 | 146 | |||
112 | ENTRY(lcr3) | 147 | ENTRY(amd64_lcr3) | |
113 | movq %rdi, %cr3 | 148 | movq %rdi, %cr3 | |
114 | ret | 149 | ret | |
115 | END(lcr3) | 150 | END(amd64_lcr3) | |
116 | 151 | |||
117 | ENTRY(rcr3) | 152 | ENTRY(amd64_rcr3) | |
118 | movq %cr3, %rax | 153 | movq %cr3, %rax | |
119 | ret | 154 | ret | |
120 | END(rcr3) | 155 | END(amd64_rcr3) | |
121 | #endif | 156 | #endif | |
122 | 157 | |||
123 | ENTRY(lcr4) | 158 | ENTRY(lcr4) | |
124 | movq %rdi, %cr4 | 159 | movq %rdi, %cr4 | |
125 | ret | 160 | ret | |
126 | END(lcr4) | 161 | END(lcr4) | |
127 | 162 | |||
128 | ENTRY(rcr4) | 163 | ENTRY(rcr4) | |
129 | movq %cr4, %rax | 164 | movq %cr4, %rax | |
130 | ret | 165 | ret | |
131 | END(rcr4) | 166 | END(rcr4) | |
132 | 167 | |||
133 | ENTRY(lcr8) | 168 | ENTRY(lcr8) | |
134 | movq %rdi, %cr8 | 169 | movq %rdi, %cr8 | |
135 | ret | 170 | ret | |
136 | END(lcr8) | 171 | END(lcr8) | |
137 | 172 | |||
138 | ENTRY(rcr8) | 173 | ENTRY(rcr8) | |
139 | movq %cr8, %rax | 174 | movq %cr8, %rax | |
140 | ret | 175 | ret | |
141 | END(rcr8) | 176 | END(rcr8) | |
142 | 177 | |||
143 | /* | 178 | /* | |
144 | * Big hammer: flush all TLB entries, including ones from PTE's | 179 | * Big hammer: flush all TLB entries, including ones from PTE's | |
145 | * with the G bit set. This should only be necessary if TLB | 180 | * with the G bit set. This should only be necessary if TLB | |
146 | * shootdown falls far behind. | 181 | * shootdown falls far behind. | |
147 | * | 182 | * | |
148 | * Intel Architecture Software Developer's Manual, Volume 3, | 183 | * Intel Architecture Software Developer's Manual, Volume 3, | |
149 | * System Programming, section 9.10, "Invalidating the | 184 | * System Programming, section 9.10, "Invalidating the | |
150 | * Translation Lookaside Buffers (TLBS)": | 185 | * Translation Lookaside Buffers (TLBS)": | |
151 | * "The following operations invalidate all TLB entries, irrespective | 186 | * "The following operations invalidate all TLB entries, irrespective | |
152 | * of the setting of the G flag: | 187 | * of the setting of the G flag: | |
153 | * ... | 188 | * ... | |
154 | * "(P6 family processors only): Writing to control register CR4 to | 189 | * "(P6 family processors only): Writing to control register CR4 to | |
155 | * modify the PSE, PGE, or PAE flag." | 190 | * modify the PSE, PGE, or PAE flag." | |
156 | * | 191 | * | |
157 | * (the alternatives not quoted above are not an option here.) | 192 | * (the alternatives not quoted above are not an option here.) | |
158 | * | 193 | * | |
159 | * If PGE is not in use, we reload CR3. | 194 | * If PGE is not in use, we reload CR3. | |
160 | */ | 195 | */ | |
161 | #ifndef XEN | 196 | #ifndef XEN | |
162 | ENTRY(tlbflushg) | 197 | ENTRY(amd64_tlbflushg) | |
163 | movq %cr4, %rax | 198 | movq %cr4, %rax | |
164 | testq $CR4_PGE, %rax | 199 | testq $CR4_PGE, %rax | |
165 | jz 1f | 200 | jz 1f | |
166 | movq %rax, %rdx | 201 | movq %rax, %rdx | |
167 | andq $~CR4_PGE, %rdx | 202 | andq $~CR4_PGE, %rdx | |
168 | movq %rdx, %cr4 | 203 | movq %rdx, %cr4 | |
169 | movq %rax, %cr4 | 204 | movq %rax, %cr4 | |
170 | ret | 205 | ret | |
171 | END(tlbflushg) | 206 | END(amd64_tlbflushg) | |
172 | 207 | |||
173 | ENTRY(tlbflush) | 208 | ENTRY(amd64_tlbflush) | |
174 | 1: | 209 | 1: | |
175 | movq %cr3, %rax | 210 | movq %cr3, %rax | |
176 | movq %rax, %cr3 | 211 | movq %rax, %cr3 | |
177 | ret | 212 | ret | |
178 | END(tlbflush) | 213 | END(amd64_tlbflush) | |
179 | 214 | |||
180 | ENTRY(ldr0) | 215 | ENTRY(amd64_ldr0) | |
181 | movq %rdi, %dr0 | 216 | movq %rdi, %dr0 | |
182 | ret | 217 | ret | |
183 | END(ldr0) | 218 | END(amd64_ldr0) | |
184 | 219 | |||
185 | ENTRY(rdr0) | 220 | ENTRY(amd64_rdr0) | |
186 | movq %dr0, %rax | 221 | movq %dr0, %rax | |
187 | ret | 222 | ret | |
188 | END(rdr0) | 223 | END(amd64_rdr0) | |
189 | 224 | |||
190 | ENTRY(ldr1) | 225 | ENTRY(amd64_ldr1) | |
191 | movq %rdi, %dr1 | 226 | movq %rdi, %dr1 | |
192 | ret | 227 | ret | |
193 | END(ldr1) | 228 | END(amd64_ldr1) | |
194 | 229 | |||
195 | ENTRY(rdr1) | 230 | ENTRY(amd64_rdr1) | |
196 | movq %dr1, %rax | 231 | movq %dr1, %rax | |
197 | ret | 232 | ret | |
198 | END(rdr1) | 233 | END(amd64_rdr1) | |
199 | 234 | |||
200 | ENTRY(ldr2) | 235 | ENTRY(amd64_ldr2) | |
201 | movq %rdi, %dr2 | 236 | movq %rdi, %dr2 | |
202 | ret | 237 | ret | |
203 | END(ldr2) | 238 | END(amd64_ldr2) | |
204 | 239 | |||
205 | ENTRY(rdr2) | 240 | ENTRY(amd64_rdr2) | |
206 | movq %dr2, %rax | 241 | movq %dr2, %rax | |
207 | ret | 242 | ret | |
208 | END(rdr2) | 243 | END(amd64_rdr2) | |
209 | 244 | |||
210 | ENTRY(ldr3) | 245 | ENTRY(amd64_ldr3) | |
211 | movq %rdi, %dr3 | 246 | movq %rdi, %dr3 | |
212 | ret | 247 | ret | |
213 | END(ldr3) | 248 | END(amd64_ldr3) | |
214 | 249 | |||
215 | ENTRY(rdr3) | 250 | ENTRY(amd64_rdr3) | |
216 | movq %dr3, %rax | 251 | movq %dr3, %rax | |
217 | ret | 252 | ret | |
218 | END(rdr3) | 253 | END(amd64_rdr3) | |
219 | 254 | |||
220 | ENTRY(ldr6) | 255 | ENTRY(amd64_ldr6) | |
221 | movq %rdi, %dr6 | 256 | movq %rdi, %dr6 | |
222 | ret | 257 | ret | |
223 | END(ldr6) | 258 | END(amd64_ldr6) | |
224 | 259 | |||
225 | ENTRY(rdr6) | 260 | ENTRY(amd64_rdr6) | |
226 | movq %dr6, %rax | 261 | movq %dr6, %rax | |
227 | ret | 262 | ret | |
228 | END(rdr6) | 263 | END(amd64_rdr6) | |
229 | 264 | |||
230 | ENTRY(ldr7) | 265 | ENTRY(amd64_ldr7) | |
231 | movq %rdi, %dr7 | 266 | movq %rdi, %dr7 | |
232 | ret | 267 | ret | |
233 | END(ldr7) | 268 | END(amd64_ldr7) | |
234 | 269 | |||
235 | ENTRY(rdr7) | 270 | ENTRY(amd64_rdr7) | |
236 | movq %dr7, %rax | 271 | movq %dr7, %rax | |
237 | ret | 272 | ret | |
238 | END(rdr7) | 273 | END(amd64_rdr7) | |
239 | 274 | |||
240 | ENTRY(x86_disable_intr) | 275 | ENTRY(x86_disable_intr) | |
241 | cli | 276 | cli | |
242 | ret | 277 | ret | |
243 | END(x86_disable_intr) | 278 | END(x86_disable_intr) | |
244 | 279 | |||
245 | ENTRY(x86_enable_intr) | 280 | ENTRY(x86_enable_intr) | |
246 | sti | 281 | sti | |
247 | ret | 282 | ret | |
248 | END(x86_enable_intr) | 283 | END(x86_enable_intr) | |
249 | 284 | |||
250 | ENTRY(x86_read_flags) | 285 | ENTRY(x86_read_flags) | |
251 | pushfq | 286 | pushfq | |
252 | popq %rax | 287 | popq %rax | |
253 | ret | 288 | ret | |
254 | END(x86_read_flags) | 289 | END(x86_read_flags) | |
255 | 290 | |||
256 | STRONG_ALIAS(x86_read_psl,x86_read_flags) | 291 | STRONG_ALIAS(x86_read_psl,x86_read_flags) | |
257 | 292 | |||
258 | ENTRY(x86_write_flags) | 293 | ENTRY(x86_write_flags) | |
259 | pushq %rdi | 294 | pushq %rdi | |
260 | popfq | 295 | popfq | |
261 | ret | 296 | ret | |
262 | END(x86_write_flags) | 297 | END(x86_write_flags) | |
263 | 298 | |||
264 | STRONG_ALIAS(x86_write_psl,x86_write_flags) | 299 | STRONG_ALIAS(x86_write_psl,x86_write_flags) | |
265 | #endif /* XEN */ | 300 | #endif /* XEN */ | |
266 | 301 | |||
267 | ENTRY(rdmsr) | 302 | ENTRY(rdmsr) | |
268 | movq %rdi, %rcx | 303 | movq %rdi, %rcx | |
269 | xorq %rax, %rax | 304 | xorq %rax, %rax | |
270 | rdmsr | 305 | rdmsr | |
271 | shlq $32, %rdx | 306 | shlq $32, %rdx | |
272 | orq %rdx, %rax | 307 | orq %rdx, %rax | |
273 | ret | 308 | ret | |
274 | END(rdmsr) | 309 | END(rdmsr) | |
275 | 310 | |||
276 | ENTRY(wrmsr) | 311 | ENTRY(wrmsr) | |
277 | movq %rdi, %rcx | 312 | movq %rdi, %rcx | |
278 | movq %rsi, %rax | 313 | movq %rsi, %rax | |
279 | movq %rsi, %rdx | 314 | movq %rsi, %rdx | |
280 | shrq $32, %rdx | 315 | shrq $32, %rdx | |
281 | wrmsr | 316 | wrmsr | |
282 | ret | 317 | ret | |
283 | END(wrmsr) | 318 | END(wrmsr) | |
284 | 319 | |||
285 | ENTRY(rdmsr_locked) | 320 | ENTRY(rdmsr_locked) | |
286 | movq %rdi, %rcx | 321 | movq %rdi, %rcx | |
287 | xorq %rax, %rax | 322 | xorq %rax, %rax | |
288 | movl $OPTERON_MSR_PASSCODE, %edi | 323 | movl $OPTERON_MSR_PASSCODE, %edi | |
289 | rdmsr | 324 | rdmsr | |
290 | shlq $32, %rdx | 325 | shlq $32, %rdx | |
291 | orq %rdx, %rax | 326 | orq %rdx, %rax | |
292 | ret | 327 | ret | |
293 | END(rdmsr_locked) | 328 | END(rdmsr_locked) | |
294 | 329 | |||
295 | ENTRY(wrmsr_locked) | 330 | ENTRY(wrmsr_locked) | |
296 | movq %rdi, %rcx | 331 | movq %rdi, %rcx | |
297 | movq %rsi, %rax | 332 | movq %rsi, %rax | |
298 | movq %rsi, %rdx | 333 | movq %rsi, %rdx | |
299 | shrq $32, %rdx | 334 | shrq $32, %rdx | |
300 | movl $OPTERON_MSR_PASSCODE, %edi | 335 | movl $OPTERON_MSR_PASSCODE, %edi | |
301 | wrmsr | 336 | wrmsr | |
302 | ret | 337 | ret | |
303 | END(wrmsr_locked) | 338 | END(wrmsr_locked) | |
304 | 339 | |||
305 | /* | 340 | /* | |
306 | * Support for reading MSRs in the safe manner (returns EFAULT on fault) | 341 | * Support for reading MSRs in the safe manner (returns EFAULT on fault) | |
307 | */ | 342 | */ | |
308 | /* int rdmsr_safe(u_int msr, uint64_t *data) */ | 343 | /* int rdmsr_safe(u_int msr, uint64_t *data) */ | |
309 | ENTRY(rdmsr_safe) | 344 | ENTRY(rdmsr_safe) | |
310 | movq CPUVAR(CURLWP), %r8 | 345 | movq CPUVAR(CURLWP), %r8 | |
311 | movq L_PCB(%r8), %r8 | 346 | movq L_PCB(%r8), %r8 | |
312 | movq $_C_LABEL(msr_onfault), PCB_ONFAULT(%r8) | 347 | movq $_C_LABEL(msr_onfault), PCB_ONFAULT(%r8) | |
313 | 348 | |||
314 | movl %edi, %ecx /* u_int msr */ | 349 | movl %edi, %ecx /* u_int msr */ | |
315 | rdmsr /* Read MSR pointed by %ecx. Returns | 350 | rdmsr /* Read MSR pointed by %ecx. Returns | |
316 | hi byte in edx, lo in %eax */ | 351 | hi byte in edx, lo in %eax */ | |
317 | salq $32, %rdx /* sign-shift %rdx left */ | 352 | salq $32, %rdx /* sign-shift %rdx left */ | |
318 | movl %eax, %eax /* zero-extend %eax -> %rax */ | 353 | movl %eax, %eax /* zero-extend %eax -> %rax */ | |
319 | orq %rdx, %rax | 354 | orq %rdx, %rax | |
320 | movq %rax, (%rsi) /* *data */ | 355 | movq %rax, (%rsi) /* *data */ | |
321 | xorq %rax, %rax /* "no error" */ | 356 | xorq %rax, %rax /* "no error" */ | |
322 | 357 | |||
323 | movq %rax, PCB_ONFAULT(%r8) | 358 | movq %rax, PCB_ONFAULT(%r8) | |
324 | ret | 359 | ret | |
325 | END(rdmsr_safe) | 360 | END(rdmsr_safe) | |
326 | 361 | |||
327 | ENTRY(rdxcr) | 362 | ENTRY(rdxcr) | |
328 | movq %rdi, %rcx | 363 | movq %rdi, %rcx | |
329 | xgetbv | 364 | xgetbv | |
330 | shlq $32, %rdx | 365 | shlq $32, %rdx | |
331 | orq %rdx, %rax | 366 | orq %rdx, %rax | |
332 | ret | 367 | ret | |
333 | END(rdxcr) | 368 | END(rdxcr) | |
334 | 369 | |||
335 | ENTRY(wrxcr) | 370 | ENTRY(wrxcr) | |
336 | movq %rdi, %rcx | 371 | movq %rdi, %rcx | |
337 | movq %rsi, %rax | 372 | movq %rsi, %rax | |
338 | movq %rsi, %rdx | 373 | movq %rsi, %rdx | |
339 | shrq $32, %rdx | 374 | shrq $32, %rdx | |
340 | xsetbv | 375 | xsetbv | |
341 | ret | 376 | ret | |
342 | END(wrxcr) | 377 | END(wrxcr) | |
343 | 378 | |||
344 | /* | 379 | /* | |
345 | * MSR operations fault handler | 380 | * MSR operations fault handler | |
346 | */ | 381 | */ | |
347 | ENTRY(msr_onfault) | 382 | ENTRY(msr_onfault) | |
348 | movq CPUVAR(CURLWP), %r8 | 383 | movq CPUVAR(CURLWP), %r8 | |
349 | movq L_PCB(%r8), %r8 | 384 | movq L_PCB(%r8), %r8 | |
350 | movq $0, PCB_ONFAULT(%r8) | 385 | movq $0, PCB_ONFAULT(%r8) | |
351 | movl $EFAULT, %eax | 386 | movl $EFAULT, %eax | |
352 | ret | 387 | ret | |
353 | END(msr_onfault) | 388 | END(msr_onfault) | |
354 | 389 | |||
355 | #ifndef XEN | 390 | #ifndef XEN | |
356 | ENTRY(wbinvd) | 391 | ENTRY(wbinvd) | |
357 | wbinvd | 392 | wbinvd | |
358 | ret | 393 | ret | |
359 | END(wbinvd) | 394 | END(wbinvd) | |
360 | #endif | 395 | #endif | |
361 | 396 | |||
362 | ENTRY(cpu_counter) | 397 | ENTRY(cpu_counter) | |
363 | xorq %rax, %rax | 398 | xorq %rax, %rax | |
364 | rdtsc | 399 | rdtsc | |
365 | shlq $32, %rdx | 400 | shlq $32, %rdx | |
366 | orq %rdx, %rax | 401 | orq %rdx, %rax | |
367 | addq CPUVAR(CC_SKEW), %rax | 402 | addq CPUVAR(CC_SKEW), %rax | |
368 | ret | 403 | ret | |
369 | END(cpu_counter) | 404 | END(cpu_counter) | |
370 | 405 | |||
371 | ENTRY(cpu_counter32) | 406 | ENTRY(cpu_counter32) | |
372 | rdtsc | 407 | rdtsc | |
373 | addl CPUVAR(CC_SKEW), %eax | 408 | addl CPUVAR(CC_SKEW), %eax | |
374 | ret | 409 | ret | |
375 | END(cpu_counter32) | 410 | END(cpu_counter32) | |
376 | 411 | |||
377 | ENTRY(rdpmc) | 412 | ENTRY(rdpmc) | |
378 | movq %rdi, %rcx | 413 | movq %rdi, %rcx | |
379 | xorq %rax, %rax | 414 | xorq %rax, %rax | |
380 | rdpmc | 415 | rdpmc | |
381 | shlq $32, %rdx | 416 | shlq $32, %rdx | |
382 | orq %rdx, %rax | 417 | orq %rdx, %rax | |
383 | ret | 418 | ret | |
384 | END(rdpmc) | 419 | END(rdpmc) | |
385 | 420 | |||
386 | ENTRY(rdtsc) | 421 | ENTRY(rdtsc) | |
387 | xorq %rax,%rax | 422 | xorq %rax,%rax | |
388 | rdtsc | 423 | rdtsc | |
389 | shlq $32,%rdx | 424 | shlq $32,%rdx | |
390 | orq %rdx,%rax | 425 | orq %rdx,%rax | |
391 | ret | 426 | ret | |
392 | END(rdtsc) | 427 | END(rdtsc) | |
393 | 428 | |||
394 | ENTRY(breakpoint) | 429 | ENTRY(breakpoint) | |
395 | pushq %rbp | 430 | pushq %rbp | |
396 | movq %rsp, %rbp | 431 | movq %rsp, %rbp | |
397 | int $0x03 /* paranoid, not 'int3' */ | 432 | int $0x03 /* paranoid, not 'int3' */ | |
398 | leave | 433 | leave | |
399 | ret | 434 | ret | |
400 | END(breakpoint) | 435 | END(breakpoint) | |
401 | 436 | |||
402 | ENTRY(x86_curcpu) | 437 | ENTRY(x86_curcpu) | |
403 | movq %gs:(CPU_INFO_SELF), %rax | 438 | movq %gs:(CPU_INFO_SELF), %rax | |
404 | ret | 439 | ret | |
405 | END(x86_curcpu) | 440 | END(x86_curcpu) | |
406 | 441 | |||
407 | ENTRY(x86_curlwp) | 442 | ENTRY(x86_curlwp) | |
408 | movq %gs:(CPU_INFO_CURLWP), %rax | 443 | movq %gs:(CPU_INFO_CURLWP), %rax | |
409 | ret | 444 | ret | |
410 | END(x86_curlwp) | 445 | END(x86_curlwp) | |
411 | 446 | |||
412 | ENTRY(cpu_set_curpri) | 447 | ENTRY(cpu_set_curpri) | |
413 | movl %edi, %gs:(CPU_INFO_CURPRIORITY) | 448 | movl %edi, %gs:(CPU_INFO_CURPRIORITY) | |
414 | ret | 449 | ret | |
415 | END(cpu_set_curpri) | 450 | END(cpu_set_curpri) | |
416 | 451 | |||
417 | ENTRY(__byte_swap_u32_variable) | 452 | ENTRY(__byte_swap_u32_variable) | |
418 | movl %edi, %eax | 453 | movl %edi, %eax | |
419 | bswapl %eax | 454 | bswapl %eax | |
420 | ret | 455 | ret | |
421 | END(__byte_swap_u32_variable) | 456 | END(__byte_swap_u32_variable) | |
422 | 457 | |||
423 | ENTRY(__byte_swap_u16_variable) | 458 | ENTRY(__byte_swap_u16_variable) | |
424 | movl %edi, %eax | 459 | movl %edi, %eax | |
425 | xchgb %al, %ah | 460 | xchgb %al, %ah | |
426 | ret | 461 | ret | |
427 | END(__byte_swap_u16_variable) | 462 | END(__byte_swap_u16_variable) | |
428 | 463 | |||
429 | /* | 464 | /* | |
430 | * void lgdt(struct region_descriptor *rdp); | 465 | * void lgdt(struct region_descriptor *rdp); | |
431 | * | 466 | * | |
432 | * Load a new GDT pointer (and do any necessary cleanup). | 467 | * Load a new GDT pointer (and do any necessary cleanup). | |
433 | * XXX It's somewhat questionable whether reloading all the segment registers | 468 | * XXX It's somewhat questionable whether reloading all the segment registers | |
434 | * is necessary, since the actual descriptor data is not changed except by | 469 | * is necessary, since the actual descriptor data is not changed except by | |
435 | * process creation and exit, both of which clean up via task switches. | 470 | * process creation and exit, both of which clean up via task switches. | |
436 | */ | 471 | */ | |
437 | #ifndef XEN | 472 | #ifndef XEN | |
438 | ENTRY(lgdt) | 473 | ENTRY(lgdt) | |
439 | /* Reload the descriptor table. */ | 474 | /* Reload the descriptor table. */ | |
440 | movq %rdi,%rax | 475 | movq %rdi,%rax | |
441 | lgdt (%rax) | 476 | lgdt (%rax) | |
442 | /* Flush the prefetch q. */ | 477 | /* Flush the prefetch q. */ | |
443 | jmp 1f | 478 | jmp 1f | |
444 | nop | 479 | nop | |
445 | 1: jmp _C_LABEL(lgdt_finish) | 480 | 1: jmp _C_LABEL(lgdt_finish) | |
446 | END(lgdt) | 481 | END(lgdt) | |
447 | #endif | 482 | #endif | |
448 | 483 | |||
449 | /* | 484 | /* | |
450 | * void lgdt_finish(void); | 485 | * void lgdt_finish(void); | |
451 | * Reload segments after a GDT change | 486 | * Reload segments after a GDT change | |
452 | */ | 487 | */ | |
453 | ENTRY(lgdt_finish) | 488 | ENTRY(lgdt_finish) | |
454 | movl $GSEL(GDATA_SEL, SEL_KPL),%eax | 489 | movl $GSEL(GDATA_SEL, SEL_KPL),%eax | |
455 | movl %eax,%ds | 490 | movl %eax,%ds | |
456 | movl %eax,%es | 491 | movl %eax,%es | |
457 | movl %eax,%ss | 492 | movl %eax,%ss | |
458 | jmp _C_LABEL(x86_flush) | 493 | jmp _C_LABEL(x86_flush) | |
459 | END(lgdt_finish) | 494 | END(lgdt_finish) | |
460 | 495 | |||
461 | /* | 496 | /* | |
462 | * void x86_flush() | 497 | * void x86_flush() | |
463 | * | 498 | * | |
464 | * Flush instruction pipelines by doing an intersegment (far) return. | 499 | * Flush instruction pipelines by doing an intersegment (far) return. | |
465 | */ | 500 | */ | |
466 | ENTRY(x86_flush) | 501 | ENTRY(x86_flush) | |
467 | popq %rax | 502 | popq %rax | |
468 | pushq $GSEL(GCODE_SEL, SEL_KPL) | 503 | pushq $GSEL(GCODE_SEL, SEL_KPL) | |
469 | pushq %rax | 504 | pushq %rax | |
470 | lretq | 505 | lretq | |
471 | END(x86_flush) | 506 | END(x86_flush) | |
472 | 507 | |||
473 | /* Waits - set up stack frame. */ | 508 | /* Waits - set up stack frame. */ | |
474 | ENTRY(x86_hlt) | 509 | ENTRY(x86_hlt) | |
475 | pushq %rbp | 510 | pushq %rbp | |
476 | movq %rsp, %rbp | 511 | movq %rsp, %rbp | |
477 | hlt | 512 | hlt | |
478 | leave | 513 | leave | |
479 | ret | 514 | ret | |
480 | END(x86_hlt) | 515 | END(x86_hlt) | |
481 | 516 | |||
482 | /* Waits - set up stack frame. */ | 517 | /* Waits - set up stack frame. */ | |
483 | ENTRY(x86_stihlt) | 518 | ENTRY(x86_stihlt) | |
484 | pushq %rbp | 519 | pushq %rbp | |
485 | movq %rsp, %rbp | 520 | movq %rsp, %rbp | |
486 | sti | 521 | sti | |
487 | hlt | 522 | hlt | |
488 | leave | 523 | leave | |
489 | ret | 524 | ret | |
490 | END(x86_stihlt) | 525 | END(x86_stihlt) | |
491 | 526 | |||
492 | ENTRY(x86_monitor) | 527 | ENTRY(x86_monitor) | |
493 | movq %rdi, %rax | 528 | movq %rdi, %rax | |
494 | movq %rsi, %rcx | 529 | movq %rsi, %rcx | |
495 | monitor %rax, %rcx, %rdx | 530 | monitor %rax, %rcx, %rdx | |
496 | ret | 531 | ret | |
497 | END(x86_monitor) | 532 | END(x86_monitor) | |
498 | 533 | |||
499 | /* Waits - set up stack frame. */ | 534 | /* Waits - set up stack frame. */ | |
500 | ENTRY(x86_mwait) | 535 | ENTRY(x86_mwait) | |
501 | pushq %rbp | 536 | pushq %rbp | |
502 | movq %rsp, %rbp | 537 | movq %rsp, %rbp | |
503 | movq %rdi, %rax | 538 | movq %rdi, %rax | |
504 | movq %rsi, %rcx | 539 | movq %rsi, %rcx | |
505 | mwait %rax, %rcx | 540 | mwait %rax, %rcx | |
506 | leave | 541 | leave | |
507 | ret | 542 | ret | |
508 | END(x86_mwait) | 543 | END(x86_mwait) | |
509 | 544 | |||
510 | ENTRY(x86_pause) | 545 | ENTRY(x86_pause) | |
511 | pause | 546 | pause | |
512 | ret | 547 | ret | |
513 | END(x86_pause) | 548 | END(x86_pause) | |
514 | 549 | |||
515 | ENTRY(x86_cpuid2) | 550 | ENTRY(x86_cpuid2) | |
516 | movq %rbx, %r8 | 551 | movq %rbx, %r8 | |
517 | movq %rdi, %rax | 552 | movq %rdi, %rax | |
518 | movq %rsi, %rcx | 553 | movq %rsi, %rcx | |
519 | movq %rdx, %rsi | 554 | movq %rdx, %rsi | |
520 | cpuid | 555 | cpuid | |
521 | movl %eax, 0(%rsi) | 556 | movl %eax, 0(%rsi) | |
522 | movl %ebx, 4(%rsi) | 557 | movl %ebx, 4(%rsi) | |
523 | movl %ecx, 8(%rsi) | 558 | movl %ecx, 8(%rsi) | |
524 | movl %edx, 12(%rsi) | 559 | movl %edx, 12(%rsi) | |
525 | movq %r8, %rbx | 560 | movq %r8, %rbx | |
526 | ret | 561 | ret | |
527 | END(x86_cpuid2) | 562 | END(x86_cpuid2) | |
528 | 563 | |||
529 | ENTRY(x86_getss) | 564 | ENTRY(x86_getss) | |
530 | movl %ss, %eax | 565 | movl %ss, %eax | |
531 | ret | 566 | ret | |
532 | END(x86_getss) | 567 | END(x86_getss) | |
533 | 568 | |||
534 | ENTRY(fldcw) | 569 | ENTRY(fldcw) | |
535 | fldcw (%rdi) | 570 | fldcw (%rdi) | |
536 | ret | 571 | ret | |
537 | END(fldcw) | 572 | END(fldcw) | |
538 | 573 | |||
539 | ENTRY(fnclex) | 574 | ENTRY(fnclex) | |
540 | fnclex | 575 | fnclex | |
541 | ret | 576 | ret | |
542 | END(fnclex) | 577 | END(fnclex) | |
543 | 578 | |||
544 | ENTRY(fninit) | 579 | ENTRY(fninit) | |
545 | fninit | 580 | fninit | |
546 | ret | 581 | ret | |
547 | END(fninit) | 582 | END(fninit) | |
548 | 583 | |||
549 | ENTRY(fnsave) | 584 | ENTRY(fnsave) | |
550 | fnsave (%rdi) | 585 | fnsave (%rdi) | |
551 | ret | 586 | ret | |
552 | END(fnsave) | 587 | END(fnsave) | |
553 | 588 | |||
554 | ENTRY(fnstcw) | 589 | ENTRY(fnstcw) | |
555 | fnstcw (%rdi) | 590 | fnstcw (%rdi) | |
556 | ret | 591 | ret | |
557 | END(fnstcw) | 592 | END(fnstcw) | |
558 | 593 | |||
559 | ENTRY(fngetsw) | 594 | ENTRY(fngetsw) | |
560 | fnstsw %ax | 595 | fnstsw %ax | |
561 | ret | 596 | ret | |
562 | END(fngetsw) | 597 | END(fngetsw) | |
563 | 598 | |||
564 | ENTRY(fnstsw) | 599 | ENTRY(fnstsw) | |
565 | fnstsw (%rdi) | 600 | fnstsw (%rdi) | |
566 | ret | 601 | ret | |
567 | END(fnstsw) | 602 | END(fnstsw) | |
568 | 603 | |||
569 | ENTRY(fp_divide_by_0) | 604 | ENTRY(fp_divide_by_0) | |
570 | fldz | 605 | fldz | |
571 | fld1 | 606 | fld1 | |
572 | fdiv %st, %st(1) | 607 | fdiv %st, %st(1) | |
573 | fwait | 608 | fwait | |
574 | ret | 609 | ret | |
575 | END(fp_divide_by_0) | 610 | END(fp_divide_by_0) | |
576 | 611 | |||
577 | ENTRY(frstor) | 612 | ENTRY(frstor) | |
578 | frstor (%rdi) | 613 | frstor (%rdi) | |
579 | ret | 614 | ret | |
580 | END(frstor) | 615 | END(frstor) | |
581 | 616 | |||
582 | ENTRY(fwait) | 617 | ENTRY(fwait) | |
583 | fwait | 618 | fwait | |
584 | ret | 619 | ret | |
585 | END(fwait) | 620 | END(fwait) | |
586 | 621 | |||
587 | ENTRY(clts) | 622 | ENTRY(clts) | |
588 | clts | 623 | clts | |
589 | ret | 624 | ret | |
590 | END(clts) | 625 | END(clts) | |
591 | 626 | |||
592 | ENTRY(stts) | 627 | ENTRY(stts) | |
593 | movq %cr0, %rax | 628 | movq %cr0, %rax | |
594 | orq $CR0_TS, %rax | 629 | orq $CR0_TS, %rax | |
595 | movq %rax, %cr0 | 630 | movq %rax, %cr0 | |
596 | ret | 631 | ret | |
597 | END(stts) | 632 | END(stts) | |
598 | 633 | |||
599 | ENTRY(fxsave) | 634 | ENTRY(fxsave) | |
600 | fxsave (%rdi) | 635 | fxsave (%rdi) | |
601 | ret | 636 | ret | |
602 | END(fxsave) | 637 | END(fxsave) | |
603 | 638 | |||
604 | ENTRY(fxrstor) | 639 | ENTRY(fxrstor) | |
605 | fxrstor (%rdi) | 640 | fxrstor (%rdi) | |
606 | ret | 641 | ret | |
607 | END(fxrstor) | 642 | END(fxrstor) | |
608 | 643 | |||
609 | ENTRY(fldummy) | 644 | ENTRY(fldummy) | |
610 | ffree %st(7) | 645 | ffree %st(7) | |
611 | fldz | 646 | fldz | |
612 | ret | 647 | ret | |
613 | END(fldummy) | 648 | END(fldummy) | |
614 | 649 | |||
615 | ENTRY(xsave) | 650 | ENTRY(xsave) | |
616 | movq %rsi, %rax | 651 | movq %rsi, %rax | |
617 | movq %rsi, %rdx | 652 | movq %rsi, %rdx | |
618 | shrq $32, %rdx | 653 | shrq $32, %rdx | |
619 | xsave (%rdi) | 654 | xsave (%rdi) | |
620 | ret | 655 | ret | |
621 | END(xsave) | 656 | END(xsave) | |
622 | 657 | |||
623 | ENTRY(xsaveopt) | 658 | ENTRY(xsaveopt) | |
624 | movq %rsi, %rax | 659 | movq %rsi, %rax | |
625 | movq %rsi, %rdx | 660 | movq %rsi, %rdx | |
626 | shrq $32, %rdx | 661 | shrq $32, %rdx | |
627 | xsaveopt (%rdi) | 662 | xsaveopt (%rdi) | |
628 | ret | 663 | ret | |
629 | END(xsaveopt) | 664 | END(xsaveopt) | |
630 | 665 | |||
631 | ENTRY(xrstor) | 666 | ENTRY(xrstor) | |
632 | movq %rsi, %rax | 667 | movq %rsi, %rax | |
633 | movq %rsi, %rdx | 668 | movq %rsi, %rdx | |
634 | shrq $32, %rdx | 669 | shrq $32, %rdx | |
635 | xrstor (%rdi) | 670 | xrstor (%rdi) | |
636 | ret | 671 | ret | |
637 | END(xrstor) | 672 | END(xrstor) | |
638 | 673 | |||
639 | ENTRY(x86_stmxcsr) | 674 | ENTRY(x86_stmxcsr) | |
640 | stmxcsr (%rdi) | 675 | stmxcsr (%rdi) | |
641 | ret | 676 | ret | |
642 | END(x86_stmxcsr) | 677 | END(x86_stmxcsr) | |
643 | 678 | |||
644 | ENTRY(x86_ldmxcsr) | 679 | ENTRY(x86_ldmxcsr) | |
645 | ldmxcsr (%rdi) | 680 | ldmxcsr (%rdi) | |
646 | ret | 681 | ret | |
647 | END(x86_ldmxcsr) | 682 | END(x86_ldmxcsr) | |
648 | 683 | |||
649 | ENTRY(inb) | 684 | ENTRY(inb) | |
650 | movq %rdi, %rdx | 685 | movq %rdi, %rdx | |
651 | xorq %rax, %rax | 686 | xorq %rax, %rax | |
652 | inb %dx, %al | 687 | inb %dx, %al | |
653 | ret | 688 | ret | |
654 | END(inb) | 689 | END(inb) | |
655 | 690 | |||
656 | ENTRY(insb) | 691 | ENTRY(insb) | |
657 | movl %edx, %ecx | 692 | movl %edx, %ecx | |
658 | movl %edi, %edx | 693 | movl %edi, %edx | |
659 | movq %rsi, %rdi | 694 | movq %rsi, %rdi | |
660 | rep | 695 | rep | |
661 | insb | 696 | insb | |
662 | ret | 697 | ret | |
663 | END(insb) | 698 | END(insb) | |
664 | 699 | |||
665 | ENTRY(inw) | 700 | ENTRY(inw) | |
666 | movq %rdi, %rdx | 701 | movq %rdi, %rdx | |
667 | xorq %rax, %rax | 702 | xorq %rax, %rax | |
668 | inw %dx, %ax | 703 | inw %dx, %ax | |
669 | ret | 704 | ret | |
670 | END(inw) | 705 | END(inw) | |
671 | 706 | |||
672 | ENTRY(insw) | 707 | ENTRY(insw) | |
673 | movl %edx, %ecx | 708 | movl %edx, %ecx | |
674 | movl %edi, %edx | 709 | movl %edi, %edx | |
675 | movq %rsi, %rdi | 710 | movq %rsi, %rdi | |
676 | rep | 711 | rep | |
677 | insw | 712 | insw | |
678 | ret | 713 | ret | |
679 | END(insw) | 714 | END(insw) | |
680 | 715 | |||
681 | ENTRY(inl) | 716 | ENTRY(inl) | |
682 | movq %rdi, %rdx | 717 | movq %rdi, %rdx | |
683 | xorq %rax, %rax | 718 | xorq %rax, %rax | |
684 | inl %dx, %eax | 719 | inl %dx, %eax | |
685 | ret | 720 | ret | |
686 | END(inl) | 721 | END(inl) | |
687 | 722 | |||
688 | ENTRY(insl) | 723 | ENTRY(insl) | |
689 | movl %edx, %ecx | 724 | movl %edx, %ecx | |
690 | movl %edi, %edx | 725 | movl %edi, %edx | |
691 | movq %rsi, %rdi | 726 | movq %rsi, %rdi | |
692 | rep | 727 | rep | |
693 | insl | 728 | insl | |
694 | ret | 729 | ret | |
695 | END(insl) | 730 | END(insl) | |
696 | 731 | |||
697 | ENTRY(outb) | 732 | ENTRY(outb) | |
698 | movq %rdi, %rdx | 733 | movq %rdi, %rdx | |
699 | movq %rsi, %rax | 734 | movq %rsi, %rax | |
700 | outb %al, %dx | 735 | outb %al, %dx | |
701 | ret | 736 | ret | |
702 | END(outb) | 737 | END(outb) | |
703 | 738 | |||
704 | ENTRY(outsb) | 739 | ENTRY(outsb) | |
705 | movl %edx, %ecx | 740 | movl %edx, %ecx | |
706 | movl %edi, %edx | 741 | movl %edi, %edx | |
707 | rep | 742 | rep | |
708 | outsb | 743 | outsb | |
709 | ret | 744 | ret | |
710 | END(outsb) | 745 | END(outsb) | |
711 | 746 | |||
712 | ENTRY(outw) | 747 | ENTRY(outw) | |
713 | movq %rdi, %rdx | 748 | movq %rdi, %rdx | |
714 | movq %rsi, %rax | 749 | movq %rsi, %rax | |
715 | outw %ax, %dx | 750 | outw %ax, %dx | |
716 | ret | 751 | ret | |
717 | END(outw) | 752 | END(outw) | |
718 | 753 | |||
719 | ENTRY(outsw) | 754 | ENTRY(outsw) | |
720 | movl %edx, %ecx | 755 | movl %edx, %ecx | |
721 | movl %edi, %edx | 756 | movl %edi, %edx | |
722 | rep | 757 | rep | |
723 | outsw | 758 | outsw | |
724 | ret | 759 | ret | |
725 | END(outsw) | 760 | END(outsw) | |
726 | 761 | |||
727 | ENTRY(outl) | 762 | ENTRY(outl) | |
728 | movq %rdi, %rdx | 763 | movq %rdi, %rdx | |
729 | movq %rsi, %rax | 764 | movq %rsi, %rax | |
730 | outl %eax, %dx | 765 | outl %eax, %dx | |
731 | ret | 766 | ret | |
732 | END(outl) | 767 | END(outl) | |
733 | 768 | |||
734 | ENTRY(outsl) | 769 | ENTRY(outsl) | |
735 | movl %edx, %ecx | 770 | movl %edx, %ecx | |
736 | movl %edi, %edx | 771 | movl %edi, %edx | |
737 | rep | 772 | rep | |
738 | outsl | 773 | outsl | |
739 | ret | 774 | ret | |
740 | END(outsl) | 775 | END(outsl) | |
741 | 776 | |||
742 | ENTRY(setds) | 777 | ENTRY(setds) | |
743 | movw %di, %ds | 778 | movw %di, %ds | |
744 | ret | 779 | ret | |
745 | END(setds) | 780 | END(setds) | |
746 | 781 | |||
747 | ENTRY(setes) | 782 | ENTRY(setes) | |
748 | movw %di, %es | 783 | movw %di, %es | |
749 | ret | 784 | ret | |
750 | END(setes) | 785 | END(setes) | |
751 | 786 | |||
752 | ENTRY(setfs) | 787 | ENTRY(setfs) | |
753 | movw %di, %fs | 788 | movw %di, %fs | |
754 | ret | 789 | ret | |
755 | END(setfs) | 790 | END(setfs) | |
756 | 791 | |||
757 | #ifndef XEN | 792 | #ifndef XEN | |
758 | ENTRY(setusergs) | 793 | ENTRY(setusergs) | |
759 | CLI(ax) | 794 | CLI(ax) | |
760 | swapgs | 795 | swapgs | |
761 | movw %di, %gs | 796 | movw %di, %gs | |
762 | swapgs | 797 | swapgs | |
763 | STI(ax) | 798 | STI(ax) | |
764 | ret | 799 | ret | |
765 | END(setusergs) | 800 | END(setusergs) | |
766 | #endif | 801 | #endif |
--- src/sys/arch/i386/i386/cpufunc.S 2018/10/18 04:11:14 1.25
+++ src/sys/arch/i386/i386/cpufunc.S 2018/12/22 21:27:22 1.26
@@ -1,563 +1,575 @@ | @@ -1,563 +1,575 @@ | |||
1 | /* $NetBSD: cpufunc.S,v 1.25 2018/10/18 04:11:14 cherry Exp $ */ | 1 | /* $NetBSD: cpufunc.S,v 1.26 2018/12/22 21:27:22 cherry Exp $ */ | |
2 | 2 | |||
3 | /*- | 3 | /*- | |
4 | * Copyright (c) 1998, 2007 The NetBSD Foundation, Inc. | 4 | * Copyright (c) 1998, 2007 The NetBSD Foundation, Inc. | |
5 | * All rights reserved. | 5 | * All rights reserved. | |
6 | * | 6 | * | |
7 | * This code is derived from software contributed to The NetBSD Foundation | 7 | * This code is derived from software contributed to The NetBSD Foundation | |
8 | * by Charles M. Hannum, and by Andrew Doran. | 8 | * by Charles M. Hannum, and by Andrew Doran. | |
9 | * | 9 | * | |
10 | * Redistribution and use in source and binary forms, with or without | 10 | * Redistribution and use in source and binary forms, with or without | |
11 | * modification, are permitted provided that the following conditions | 11 | * modification, are permitted provided that the following conditions | |
12 | * are met: | 12 | * are met: | |
13 | * 1. Redistributions of source code must retain the above copyright | 13 | * 1. Redistributions of source code must retain the above copyright | |
14 | * notice, this list of conditions and the following disclaimer. | 14 | * notice, this list of conditions and the following disclaimer. | |
15 | * 2. Redistributions in binary form must reproduce the above copyright | 15 | * 2. Redistributions in binary form must reproduce the above copyright | |
16 | * notice, this list of conditions and the following disclaimer in the | 16 | * notice, this list of conditions and the following disclaimer in the | |
17 | * documentation and/or other materials provided with the distribution. | 17 | * documentation and/or other materials provided with the distribution. | |
18 | * | 18 | * | |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
29 | * POSSIBILITY OF SUCH DAMAGE. | 29 | * POSSIBILITY OF SUCH DAMAGE. | |
30 | */ | 30 | */ | |
31 | 31 | |||
32 | /* | 32 | /* | |
33 | * Functions to provide access to i386-specific instructions. | 33 | * Functions to provide access to i386-specific instructions. | |
34 | * | 34 | * | |
35 | * These are shared with NetBSD/xen. | 35 | * These are shared with NetBSD/xen. | |
36 | */ | 36 | */ | |
37 | 37 | |||
38 | #include <sys/errno.h> | 38 | #include <sys/errno.h> | |
39 | 39 | |||
40 | #include <machine/asm.h> | 40 | #include <machine/asm.h> | |
41 | __KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.25 2018/10/18 04:11:14 cherry Exp $"); | 41 | __KERNEL_RCSID(0, "$NetBSD: cpufunc.S,v 1.26 2018/12/22 21:27:22 cherry Exp $"); | |
42 | 42 | |||
43 | #include "opt_xen.h" | 43 | #include "opt_xen.h" | |
44 | 44 | |||
45 | #include <machine/specialreg.h> | 45 | #include <machine/specialreg.h> | |
46 | #include <machine/segments.h> | 46 | #include <machine/segments.h> | |
47 | 47 | |||
48 | #include "assym.h" | 48 | #include "assym.h" | |
49 | 49 | |||
50 | /* | |||
51 | * These functions below should always be accessed via the corresponding wrapper | |||
52 | * function names defined in x86/include/cpufunc.h and exported as WEAK_ALIAS() | |||
53 | * | |||
54 | * We use this rather roundabout method so that a runtime wrapper function may | |||
55 | * be made available for PVHVM, which could override both native and PV aliases | |||
56 | * and decide which to invoke at run time. | |||
57 | */ | |||
58 | ||||
59 | WEAK_ALIAS(lidt, i386_lidt) | |||
60 | WEAK_ALIAS(rcr3, i386_rcr3) | |||
61 | ||||
50 | ENTRY(x86_lfence) | 62 | ENTRY(x86_lfence) | |
51 | lock | 63 | lock | |
52 | addl $0, -4(%esp) | 64 | addl $0, -4(%esp) | |
53 | ret | 65 | ret | |
54 | END(x86_lfence) | 66 | END(x86_lfence) | |
55 | 67 | |||
56 | ENTRY(x86_sfence) | 68 | ENTRY(x86_sfence) | |
57 | lock | 69 | lock | |
58 | addl $0, -4(%esp) | 70 | addl $0, -4(%esp) | |
59 | ret | 71 | ret | |
60 | END(x86_sfence) | 72 | END(x86_sfence) | |
61 | 73 | |||
62 | ENTRY(x86_mfence) | 74 | ENTRY(x86_mfence) | |
63 | lock | 75 | lock | |
64 | addl $0, -4(%esp) | 76 | addl $0, -4(%esp) | |
65 | ret | 77 | ret | |
66 | END(x86_mfence) | 78 | END(x86_mfence) | |
67 | 79 | |||
68 | #ifndef XEN | 80 | #ifndef XEN | |
69 | ENTRY(lidt) | 81 | ENTRY(i386_lidt) | |
70 | movl 4(%esp), %eax | 82 | movl 4(%esp), %eax | |
71 | lidt (%eax) | 83 | lidt (%eax) | |
72 | ret | 84 | ret | |
73 | END(lidt) | 85 | END(i386_lidt) | |
74 | #endif /* XEN */ | 86 | #endif /* XEN */ | |
75 | 87 | |||
76 | ENTRY(rcr3) | 88 | ENTRY(i386_rcr3) | |
77 | movl %cr3, %eax | 89 | movl %cr3, %eax | |
78 | ret | 90 | ret | |
79 | END(rcr3) | 91 | END(i386_rcr3) | |
80 | 92 | |||
81 | ENTRY(lcr4) | 93 | ENTRY(lcr4) | |
82 | movl 4(%esp), %eax | 94 | movl 4(%esp), %eax | |
83 | movl %eax, %cr4 | 95 | movl %eax, %cr4 | |
84 | ret | 96 | ret | |
85 | END(lcr4) | 97 | END(lcr4) | |
86 | 98 | |||
87 | ENTRY(rcr4) | 99 | ENTRY(rcr4) | |
88 | movl %cr4, %eax | 100 | movl %cr4, %eax | |
89 | ret | 101 | ret | |
90 | END(rcr4) | 102 | END(rcr4) | |
91 | 103 | |||
92 | ENTRY(x86_read_flags) | 104 | ENTRY(x86_read_flags) | |
93 | pushfl | 105 | pushfl | |
94 | popl %eax | 106 | popl %eax | |
95 | ret | 107 | ret | |
96 | END(x86_read_flags) | 108 | END(x86_read_flags) | |
97 | 109 | |||
98 | ENTRY(x86_write_flags) | 110 | ENTRY(x86_write_flags) | |
99 | movl 4(%esp), %eax | 111 | movl 4(%esp), %eax | |
100 | pushl %eax | 112 | pushl %eax | |
101 | popfl | 113 | popfl | |
102 | ret | 114 | ret | |
103 | END(x86_write_flags) | 115 | END(x86_write_flags) | |
104 | 116 | |||
105 | #ifndef XEN | 117 | #ifndef XEN | |
106 | STRONG_ALIAS(x86_write_psl,x86_write_flags) | 118 | STRONG_ALIAS(x86_write_psl,x86_write_flags) | |
107 | STRONG_ALIAS(x86_read_psl,x86_read_flags) | 119 | STRONG_ALIAS(x86_read_psl,x86_read_flags) | |
108 | #endif /* XEN */ | 120 | #endif /* XEN */ | |
109 | 121 | |||
110 | ENTRY(rdmsr) | 122 | ENTRY(rdmsr) | |
111 | movl 4(%esp), %ecx | 123 | movl 4(%esp), %ecx | |
112 | rdmsr | 124 | rdmsr | |
113 | ret | 125 | ret | |
114 | END(rdmsr) | 126 | END(rdmsr) | |
115 | 127 | |||
116 | ENTRY(wrmsr) | 128 | ENTRY(wrmsr) | |
117 | movl 4(%esp), %ecx | 129 | movl 4(%esp), %ecx | |
118 | movl 8(%esp), %eax | 130 | movl 8(%esp), %eax | |
119 | movl 12(%esp), %edx | 131 | movl 12(%esp), %edx | |
120 | wrmsr | 132 | wrmsr | |
121 | ret | 133 | ret | |
122 | END(wrmsr) | 134 | END(wrmsr) | |
123 | 135 | |||
124 | ENTRY(rdmsr_locked) | 136 | ENTRY(rdmsr_locked) | |
125 | movl 4(%esp), %ecx | 137 | movl 4(%esp), %ecx | |
126 | pushl %edi | 138 | pushl %edi | |
127 | movl $OPTERON_MSR_PASSCODE, %edi | 139 | movl $OPTERON_MSR_PASSCODE, %edi | |
128 | rdmsr | 140 | rdmsr | |
129 | popl %edi | 141 | popl %edi | |
130 | ret | 142 | ret | |
131 | END(rdmsr_locked) | 143 | END(rdmsr_locked) | |
132 | 144 | |||
133 | ENTRY(wrmsr_locked) | 145 | ENTRY(wrmsr_locked) | |
134 | movl 4(%esp), %ecx | 146 | movl 4(%esp), %ecx | |
135 | movl 8(%esp), %eax | 147 | movl 8(%esp), %eax | |
136 | movl 12(%esp), %edx | 148 | movl 12(%esp), %edx | |
137 | pushl %edi | 149 | pushl %edi | |
138 | movl $OPTERON_MSR_PASSCODE, %edi | 150 | movl $OPTERON_MSR_PASSCODE, %edi | |
139 | wrmsr | 151 | wrmsr | |
140 | popl %edi | 152 | popl %edi | |
141 | ret | 153 | ret | |
142 | END(wrmsr_locked) | 154 | END(wrmsr_locked) | |
143 | 155 | |||
144 | /* | 156 | /* | |
145 | * Support for reading MSRs in the safe manner (returns EFAULT on fault) | 157 | * Support for reading MSRs in the safe manner (returns EFAULT on fault) | |
146 | */ | 158 | */ | |
147 | /* int rdmsr_safe(u_int msr, uint64_t *data) */ | 159 | /* int rdmsr_safe(u_int msr, uint64_t *data) */ | |
148 | ENTRY(rdmsr_safe) | 160 | ENTRY(rdmsr_safe) | |
149 | movl CPUVAR(CURLWP), %ecx | 161 | movl CPUVAR(CURLWP), %ecx | |
150 | movl L_PCB(%ecx), %ecx | 162 | movl L_PCB(%ecx), %ecx | |
151 | movl $_C_LABEL(msr_onfault), PCB_ONFAULT(%ecx) | 163 | movl $_C_LABEL(msr_onfault), PCB_ONFAULT(%ecx) | |
152 | 164 | |||
153 | movl 4(%esp), %ecx /* u_int msr */ | 165 | movl 4(%esp), %ecx /* u_int msr */ | |
154 | rdmsr | 166 | rdmsr | |
155 | movl 8(%esp), %ecx /* *data */ | 167 | movl 8(%esp), %ecx /* *data */ | |
156 | movl %eax, (%ecx) /* low-order bits */ | 168 | movl %eax, (%ecx) /* low-order bits */ | |
157 | movl %edx, 4(%ecx) /* high-order bits */ | 169 | movl %edx, 4(%ecx) /* high-order bits */ | |
158 | xorl %eax, %eax /* "no error" */ | 170 | xorl %eax, %eax /* "no error" */ | |
159 | 171 | |||
160 | movl CPUVAR(CURLWP), %ecx | 172 | movl CPUVAR(CURLWP), %ecx | |
161 | movl L_PCB(%ecx), %ecx | 173 | movl L_PCB(%ecx), %ecx | |
162 | movl %eax, PCB_ONFAULT(%ecx) | 174 | movl %eax, PCB_ONFAULT(%ecx) | |
163 | 175 | |||
164 | ret | 176 | ret | |
165 | END(rdmsr_safe) | 177 | END(rdmsr_safe) | |
166 | 178 | |||
167 | /* uint64_t rdxcr(uint32_t) */ | 179 | /* uint64_t rdxcr(uint32_t) */ | |
168 | ENTRY(rdxcr) | 180 | ENTRY(rdxcr) | |
169 | movl 4(%esp), %ecx /* extended control reg number */ | 181 | movl 4(%esp), %ecx /* extended control reg number */ | |
170 | xgetbv /* Read to %edx:%eax */ | 182 | xgetbv /* Read to %edx:%eax */ | |
171 | ret | 183 | ret | |
172 | END(rdxcr) | 184 | END(rdxcr) | |
173 | 185 | |||
174 | /* void wrxcr(uint32_t, uint64_t) */ | 186 | /* void wrxcr(uint32_t, uint64_t) */ | |
175 | ENTRY(wrxcr) | 187 | ENTRY(wrxcr) | |
176 | movl 4(%esp), %ecx /* extended control reg number */ | 188 | movl 4(%esp), %ecx /* extended control reg number */ | |
177 | movl 8(%esp), %eax /* feature mask bits */ | 189 | movl 8(%esp), %eax /* feature mask bits */ | |
178 | movl 12(%esp), %edx | 190 | movl 12(%esp), %edx | |
179 | xsetbv | 191 | xsetbv | |
180 | ret | 192 | ret | |
181 | END(wrxcr) | 193 | END(wrxcr) | |
182 | 194 | |||
183 | 195 | |||
184 | /* | 196 | /* | |
185 | * MSR operations fault handler | 197 | * MSR operations fault handler | |
186 | */ | 198 | */ | |
187 | ENTRY(msr_onfault) | 199 | ENTRY(msr_onfault) | |
188 | movl CPUVAR(CURLWP), %ecx | 200 | movl CPUVAR(CURLWP), %ecx | |
189 | movl L_PCB(%ecx), %ecx | 201 | movl L_PCB(%ecx), %ecx | |
190 | movl $0, PCB_ONFAULT(%ecx) | 202 | movl $0, PCB_ONFAULT(%ecx) | |
191 | movl $EFAULT, %eax | 203 | movl $EFAULT, %eax | |
192 | ret | 204 | ret | |
193 | END(msr_onfault) | 205 | END(msr_onfault) | |
194 | 206 | |||
195 | ENTRY(cpu_counter) | 207 | ENTRY(cpu_counter) | |
196 | rdtsc | 208 | rdtsc | |
197 | addl CPUVAR(CC_SKEW), %eax | 209 | addl CPUVAR(CC_SKEW), %eax | |
198 | adcl CPUVAR(CC_SKEW+4), %edx | 210 | adcl CPUVAR(CC_SKEW+4), %edx | |
199 | ret | 211 | ret | |
200 | END(cpu_counter) | 212 | END(cpu_counter) | |
201 | 213 | |||
202 | ENTRY(cpu_counter32) | 214 | ENTRY(cpu_counter32) | |
203 | rdtsc | 215 | rdtsc | |
204 | addl CPUVAR(CC_SKEW), %eax | 216 | addl CPUVAR(CC_SKEW), %eax | |
205 | ret | 217 | ret | |
206 | END(cpu_counter32) | 218 | END(cpu_counter32) | |
207 | 219 | |||
208 | ENTRY(rdpmc) | 220 | ENTRY(rdpmc) | |
209 | movl 4(%esp), %ecx | 221 | movl 4(%esp), %ecx | |
210 | rdpmc | 222 | rdpmc | |
211 | ret | 223 | ret | |
212 | END(rdpmc) | 224 | END(rdpmc) | |
213 | 225 | |||
214 | ENTRY(rdtsc) | 226 | ENTRY(rdtsc) | |
215 | rdtsc | 227 | rdtsc | |
216 | ret | 228 | ret | |
217 | END(rdtsc) | 229 | END(rdtsc) | |
218 | 230 | |||
219 | ENTRY(breakpoint) | 231 | ENTRY(breakpoint) | |
220 | pushl %ebp | 232 | pushl %ebp | |
221 | movl %esp, %ebp | 233 | movl %esp, %ebp | |
222 | int $0x03 /* paranoid, not 'int3' */ | 234 | int $0x03 /* paranoid, not 'int3' */ | |
223 | popl %ebp | 235 | popl %ebp | |
224 | ret | 236 | ret | |
225 | END(breakpoint) | 237 | END(breakpoint) | |
226 | 238 | |||
227 | ENTRY(x86_curcpu) | 239 | ENTRY(x86_curcpu) | |
228 | movl %fs:(CPU_INFO_SELF), %eax | 240 | movl %fs:(CPU_INFO_SELF), %eax | |
229 | ret | 241 | ret | |
230 | END(x86_curcpu) | 242 | END(x86_curcpu) | |
231 | 243 | |||
232 | ENTRY(x86_curlwp) | 244 | ENTRY(x86_curlwp) | |
233 | movl %fs:(CPU_INFO_CURLWP), %eax | 245 | movl %fs:(CPU_INFO_CURLWP), %eax | |
234 | ret | 246 | ret | |
235 | END(x86_curlwp) | 247 | END(x86_curlwp) | |
236 | 248 | |||
237 | ENTRY(cpu_set_curpri) | 249 | ENTRY(cpu_set_curpri) | |
238 | movl 4(%esp), %eax | 250 | movl 4(%esp), %eax | |
239 | movl %eax, %fs:(CPU_INFO_CURPRIORITY) | 251 | movl %eax, %fs:(CPU_INFO_CURPRIORITY) | |
240 | ret | 252 | ret | |
241 | END(cpu_set_curpri) | 253 | END(cpu_set_curpri) | |
242 | 254 | |||
243 | ENTRY(__byte_swap_u32_variable) | 255 | ENTRY(__byte_swap_u32_variable) | |
244 | movl 4(%esp), %eax | 256 | movl 4(%esp), %eax | |
245 | bswapl %eax | 257 | bswapl %eax | |
246 | ret | 258 | ret | |
247 | END(__byte_swap_u32_variable) | 259 | END(__byte_swap_u32_variable) | |
248 | 260 | |||
249 | ENTRY(__byte_swap_u16_variable) | 261 | ENTRY(__byte_swap_u16_variable) | |
250 | movl 4(%esp), %eax | 262 | movl 4(%esp), %eax | |
251 | xchgb %al, %ah | 263 | xchgb %al, %ah | |
252 | ret | 264 | ret | |
253 | END(__byte_swap_u16_variable) | 265 | END(__byte_swap_u16_variable) | |
254 | 266 | |||
255 | /* | 267 | /* | |
256 | * void x86_flush() | 268 | * void x86_flush() | |
257 | * | 269 | * | |
258 | * Flush instruction pipelines by doing an intersegment (far) return. | 270 | * Flush instruction pipelines by doing an intersegment (far) return. | |
259 | */ | 271 | */ | |
260 | ENTRY(x86_flush) | 272 | ENTRY(x86_flush) | |
261 | popl %eax | 273 | popl %eax | |
262 | pushl $GSEL(GCODE_SEL, SEL_KPL) | 274 | pushl $GSEL(GCODE_SEL, SEL_KPL) | |
263 | pushl %eax | 275 | pushl %eax | |
264 | lret | 276 | lret | |
265 | END(x86_flush) | 277 | END(x86_flush) | |
266 | 278 | |||
267 | /* Waits - set up stack frame. */ | 279 | /* Waits - set up stack frame. */ | |
268 | ENTRY(x86_hlt) | 280 | ENTRY(x86_hlt) | |
269 | pushl %ebp | 281 | pushl %ebp | |
270 | movl %esp, %ebp | 282 | movl %esp, %ebp | |
271 | hlt | 283 | hlt | |
272 | leave | 284 | leave | |
273 | ret | 285 | ret | |
274 | END(x86_hlt) | 286 | END(x86_hlt) | |
275 | 287 | |||
276 | /* Waits - set up stack frame. */ | 288 | /* Waits - set up stack frame. */ | |
277 | ENTRY(x86_stihlt) | 289 | ENTRY(x86_stihlt) | |
278 | pushl %ebp | 290 | pushl %ebp | |
279 | movl %esp, %ebp | 291 | movl %esp, %ebp | |
280 | sti | 292 | sti | |
281 | hlt | 293 | hlt | |
282 | leave | 294 | leave | |
283 | ret | 295 | ret | |
284 | END(x86_stihlt) | 296 | END(x86_stihlt) | |
285 | 297 | |||
286 | ENTRY(x86_monitor) | 298 | ENTRY(x86_monitor) | |
287 | movl 4(%esp), %eax | 299 | movl 4(%esp), %eax | |
288 | movl 8(%esp), %ecx | 300 | movl 8(%esp), %ecx | |
289 | movl 12(%esp), %edx | 301 | movl 12(%esp), %edx | |
290 | monitor %eax, %ecx, %edx | 302 | monitor %eax, %ecx, %edx | |
291 | ret | 303 | ret | |
292 | END(x86_monitor) | 304 | END(x86_monitor) | |
293 | 305 | |||
294 | /* Waits - set up stack frame. */ | 306 | /* Waits - set up stack frame. */ | |
295 | ENTRY(x86_mwait) | 307 | ENTRY(x86_mwait) | |
296 | pushl %ebp | 308 | pushl %ebp | |
297 | movl %esp, %ebp | 309 | movl %esp, %ebp | |
298 | movl 8(%ebp), %eax | 310 | movl 8(%ebp), %eax | |
299 | movl 12(%ebp), %ecx | 311 | movl 12(%ebp), %ecx | |
300 | mwait %eax, %ecx | 312 | mwait %eax, %ecx | |
301 | leave | 313 | leave | |
302 | ret | 314 | ret | |
303 | END(x86_mwait) | 315 | END(x86_mwait) | |
304 | 316 | |||
305 | ENTRY(x86_pause) | 317 | ENTRY(x86_pause) | |
306 | pause | 318 | pause | |
307 | ret | 319 | ret | |
308 | END(x86_pause) | 320 | END(x86_pause) | |
309 | 321 | |||
310 | ENTRY(x86_cpuid2) | 322 | ENTRY(x86_cpuid2) | |
311 | pushl %ebx | 323 | pushl %ebx | |
312 | pushl %edi | 324 | pushl %edi | |
313 | movl 12(%esp), %eax | 325 | movl 12(%esp), %eax | |
314 | movl 16(%esp), %ecx | 326 | movl 16(%esp), %ecx | |
315 | movl 20(%esp), %edi | 327 | movl 20(%esp), %edi | |
316 | cpuid | 328 | cpuid | |
317 | movl %eax, 0(%edi) | 329 | movl %eax, 0(%edi) | |
318 | movl %ebx, 4(%edi) | 330 | movl %ebx, 4(%edi) | |
319 | movl %ecx, 8(%edi) | 331 | movl %ecx, 8(%edi) | |
320 | movl %edx, 12(%edi) | 332 | movl %edx, 12(%edi) | |
321 | popl %edi | 333 | popl %edi | |
322 | popl %ebx | 334 | popl %ebx | |
323 | ret | 335 | ret | |
324 | END(x86_cpuid2) | 336 | END(x86_cpuid2) | |
325 | 337 | |||
326 | ENTRY(x86_getss) | 338 | ENTRY(x86_getss) | |
327 | movl %ss, %eax | 339 | movl %ss, %eax | |
328 | ret | 340 | ret | |
329 | END(x86_getss) | 341 | END(x86_getss) | |
330 | 342 | |||
331 | ENTRY(fldcw) | 343 | ENTRY(fldcw) | |
332 | movl 4(%esp), %eax | 344 | movl 4(%esp), %eax | |
333 | fldcw (%eax) | 345 | fldcw (%eax) | |
334 | ret | 346 | ret | |
335 | END(fldcw) | 347 | END(fldcw) | |
336 | 348 | |||
337 | ENTRY(fnclex) | 349 | ENTRY(fnclex) | |
338 | fnclex | 350 | fnclex | |
339 | ret | 351 | ret | |
340 | END(fnclex) | 352 | END(fnclex) | |
341 | 353 | |||
342 | ENTRY(fninit) | 354 | ENTRY(fninit) | |
343 | fninit | 355 | fninit | |
344 | ret | 356 | ret | |
345 | END(fninit) | 357 | END(fninit) | |
346 | 358 | |||
347 | ENTRY(fnsave) | 359 | ENTRY(fnsave) | |
348 | movl 4(%esp), %eax | 360 | movl 4(%esp), %eax | |
349 | fnsave (%eax) | 361 | fnsave (%eax) | |
350 | ret | 362 | ret | |
351 | END(fnsave) | 363 | END(fnsave) | |
352 | 364 | |||
353 | ENTRY(fnstcw) | 365 | ENTRY(fnstcw) | |
354 | movl 4(%esp), %eax | 366 | movl 4(%esp), %eax | |
355 | fnstcw (%eax) | 367 | fnstcw (%eax) | |
356 | ret | 368 | ret | |
357 | END(fnstcw) | 369 | END(fnstcw) | |
358 | 370 | |||
359 | ENTRY(fngetsw) | 371 | ENTRY(fngetsw) | |
360 | fnstsw %ax | 372 | fnstsw %ax | |
361 | ret | 373 | ret | |
362 | END(fngetsw) | 374 | END(fngetsw) | |
363 | 375 | |||
364 | ENTRY(fnstsw) | 376 | ENTRY(fnstsw) | |
365 | movl 4(%esp), %eax | 377 | movl 4(%esp), %eax | |
366 | fnstsw (%eax) | 378 | fnstsw (%eax) | |
367 | ret | 379 | ret | |
368 | END(fnstsw) | 380 | END(fnstsw) | |
369 | 381 | |||
370 | ENTRY(fp_divide_by_0) | 382 | ENTRY(fp_divide_by_0) | |
371 | fldz | 383 | fldz | |
372 | fld1 | 384 | fld1 | |
373 | fdiv %st, %st(1) | 385 | fdiv %st, %st(1) | |
374 | fwait | 386 | fwait | |
375 | ret | 387 | ret | |
376 | END(fp_divide_by_0) | 388 | END(fp_divide_by_0) | |
377 | 389 | |||
378 | ENTRY(frstor) | 390 | ENTRY(frstor) | |
379 | movl 4(%esp), %eax | 391 | movl 4(%esp), %eax | |
380 | frstor (%eax) | 392 | frstor (%eax) | |
381 | ret | 393 | ret | |
382 | END(frstor) | 394 | END(frstor) | |
383 | 395 | |||
384 | ENTRY(fwait) | 396 | ENTRY(fwait) | |
385 | fwait | 397 | fwait | |
386 | ret | 398 | ret | |
387 | END(fwait) | 399 | END(fwait) | |
388 | 400 | |||
389 | ENTRY(clts) | 401 | ENTRY(clts) | |
390 | clts | 402 | clts | |
391 | ret | 403 | ret | |
392 | END(clts) | 404 | END(clts) | |
393 | 405 | |||
394 | ENTRY(stts) | 406 | ENTRY(stts) | |
395 | movl %cr0, %eax | 407 | movl %cr0, %eax | |
396 | testl $CR0_TS, %eax | 408 | testl $CR0_TS, %eax | |
397 | jnz 1f | 409 | jnz 1f | |
398 | orl $CR0_TS, %eax | 410 | orl $CR0_TS, %eax | |
399 | movl %eax, %cr0 | 411 | movl %eax, %cr0 | |
400 | 1: | 412 | 1: | |
401 | ret | 413 | ret | |
402 | END(stts) | 414 | END(stts) | |
403 | 415 | |||
404 | ENTRY(fxsave) | 416 | ENTRY(fxsave) | |
405 | movl 4(%esp), %eax | 417 | movl 4(%esp), %eax | |
406 | fxsave (%eax) | 418 | fxsave (%eax) | |
407 | ret | 419 | ret | |
408 | END(fxsave) | 420 | END(fxsave) | |
409 | 421 | |||
410 | ENTRY(fxrstor) | 422 | ENTRY(fxrstor) | |
411 | movl 4(%esp), %eax | 423 | movl 4(%esp), %eax | |
412 | fxrstor (%eax) | 424 | fxrstor (%eax) | |
413 | ret | 425 | ret | |
414 | END(fxrstor) | 426 | END(fxrstor) | |
415 | 427 | |||
416 | ENTRY(xsave) | 428 | ENTRY(xsave) | |
417 | movl 4(%esp), %ecx | 429 | movl 4(%esp), %ecx | |
418 | movl 8(%esp), %eax /* feature mask bits */ | 430 | movl 8(%esp), %eax /* feature mask bits */ | |
419 | movl 12(%esp), %edx | 431 | movl 12(%esp), %edx | |
420 | xsave (%ecx) | 432 | xsave (%ecx) | |
421 | ret | 433 | ret | |
422 | END(xsave) | 434 | END(xsave) | |
423 | 435 | |||
424 | ENTRY(xsaveopt) | 436 | ENTRY(xsaveopt) | |
425 | movl 4(%esp), %ecx | 437 | movl 4(%esp), %ecx | |
426 | movl 8(%esp), %eax /* feature mask bits */ | 438 | movl 8(%esp), %eax /* feature mask bits */ | |
427 | movl 12(%esp), %edx | 439 | movl 12(%esp), %edx | |
428 | xsaveopt (%ecx) | 440 | xsaveopt (%ecx) | |
429 | ret | 441 | ret | |
430 | END(xsaveopt) | 442 | END(xsaveopt) | |
431 | 443 | |||
432 | ENTRY(xrstor) | 444 | ENTRY(xrstor) | |
433 | movl 4(%esp), %ecx | 445 | movl 4(%esp), %ecx | |
434 | movl 8(%esp), %eax /* feature mask bits */ | 446 | movl 8(%esp), %eax /* feature mask bits */ | |
435 | movl 12(%esp), %edx | 447 | movl 12(%esp), %edx | |
436 | xrstor (%ecx) | 448 | xrstor (%ecx) | |
437 | ret | 449 | ret | |
438 | END(xrstor) | 450 | END(xrstor) | |
439 | 451 | |||
440 | ENTRY(x86_stmxcsr) | 452 | ENTRY(x86_stmxcsr) | |
441 | movl 4(%esp), %eax | 453 | movl 4(%esp), %eax | |
442 | stmxcsr (%eax) | 454 | stmxcsr (%eax) | |
443 | ret | 455 | ret | |
444 | END(x86_stmxcsr) | 456 | END(x86_stmxcsr) | |
445 | 457 | |||
446 | ENTRY(x86_ldmxcsr) | 458 | ENTRY(x86_ldmxcsr) | |
447 | movl 4(%esp), %eax | 459 | movl 4(%esp), %eax | |
448 | ldmxcsr (%eax) | 460 | ldmxcsr (%eax) | |
449 | ret | 461 | ret | |
450 | END(x86_ldmxcsr) | 462 | END(x86_ldmxcsr) | |
451 | 463 | |||
452 | ENTRY(fldummy) | 464 | ENTRY(fldummy) | |
453 | ffree %st(7) | 465 | ffree %st(7) | |
454 | fldz | 466 | fldz | |
455 | ret | 467 | ret | |
456 | END(fldummy) | 468 | END(fldummy) | |
457 | 469 | |||
458 | ENTRY(inb) | 470 | ENTRY(inb) | |
459 | movl 4(%esp), %edx | 471 | movl 4(%esp), %edx | |
460 | xorl %eax, %eax | 472 | xorl %eax, %eax | |
461 | inb %dx, %al | 473 | inb %dx, %al | |
462 | ret | 474 | ret | |
463 | END(inb) | 475 | END(inb) | |
464 | 476 | |||
465 | ENTRY(insb) | 477 | ENTRY(insb) | |
466 | pushl %edi | 478 | pushl %edi | |
467 | movl 8(%esp), %edx | 479 | movl 8(%esp), %edx | |
468 | movl 12(%esp), %edi | 480 | movl 12(%esp), %edi | |
469 | movl 16(%esp), %ecx | 481 | movl 16(%esp), %ecx | |
470 | rep | 482 | rep | |
471 | insb | 483 | insb | |
472 | popl %edi | 484 | popl %edi | |
473 | ret | 485 | ret | |
474 | END(insb) | 486 | END(insb) | |
475 | 487 | |||
476 | ENTRY(inw) | 488 | ENTRY(inw) | |
477 | movl 4(%esp), %edx | 489 | movl 4(%esp), %edx | |
478 | xorl %eax, %eax | 490 | xorl %eax, %eax | |
479 | inw %dx, %ax | 491 | inw %dx, %ax | |
480 | ret | 492 | ret | |
481 | END(inw) | 493 | END(inw) | |
482 | 494 | |||
483 | ENTRY(insw) | 495 | ENTRY(insw) | |
484 | pushl %edi | 496 | pushl %edi | |
485 | movl 8(%esp), %edx | 497 | movl 8(%esp), %edx | |
486 | movl 12(%esp), %edi | 498 | movl 12(%esp), %edi | |
487 | movl 16(%esp), %ecx | 499 | movl 16(%esp), %ecx | |
488 | rep | 500 | rep | |
489 | insw | 501 | insw | |
490 | popl %edi | 502 | popl %edi | |
491 | ret | 503 | ret | |
492 | END(insw) | 504 | END(insw) | |
493 | 505 | |||
494 | ENTRY(inl) | 506 | ENTRY(inl) | |
495 | movl 4(%esp), %edx | 507 | movl 4(%esp), %edx | |
496 | inl %dx, %eax | 508 | inl %dx, %eax | |
497 | ret | 509 | ret | |
498 | END(inl) | 510 | END(inl) | |
499 | 511 | |||
500 | ENTRY(insl) | 512 | ENTRY(insl) | |
501 | pushl %edi | 513 | pushl %edi | |
502 | movl 8(%esp), %edx | 514 | movl 8(%esp), %edx | |
503 | movl 12(%esp), %edi | 515 | movl 12(%esp), %edi | |
504 | movl 16(%esp), %ecx | 516 | movl 16(%esp), %ecx | |
505 | rep | 517 | rep | |
506 | insl | 518 | insl | |
507 | popl %edi | 519 | popl %edi | |
508 | ret | 520 | ret | |
509 | END(insl) | 521 | END(insl) | |
510 | 522 | |||
511 | ENTRY(outb) | 523 | ENTRY(outb) | |
512 | movl 4(%esp), %edx | 524 | movl 4(%esp), %edx | |
513 | movl 8(%esp), %eax | 525 | movl 8(%esp), %eax | |
514 | outb %al, %dx | 526 | outb %al, %dx | |
515 | ret | 527 | ret | |
516 | END(outb) | 528 | END(outb) | |
517 | 529 | |||
518 | ENTRY(outsb) | 530 | ENTRY(outsb) | |
519 | pushl %esi | 531 | pushl %esi | |
520 | movl 8(%esp), %edx | 532 | movl 8(%esp), %edx | |
521 | movl 12(%esp), %esi | 533 | movl 12(%esp), %esi | |
522 | movl 16(%esp), %ecx | 534 | movl 16(%esp), %ecx | |
523 | rep | 535 | rep | |
524 | outsb | 536 | outsb | |
525 | popl %esi | 537 | popl %esi | |
526 | ret | 538 | ret | |
527 | END(outsb) | 539 | END(outsb) | |
528 | 540 | |||
529 | ENTRY(outw) | 541 | ENTRY(outw) | |
530 | movl 4(%esp), %edx | 542 | movl 4(%esp), %edx | |
531 | movl 8(%esp), %eax | 543 | movl 8(%esp), %eax | |
532 | outw %ax, %dx | 544 | outw %ax, %dx | |
533 | ret | 545 | ret | |
534 | END(outw) | 546 | END(outw) | |
535 | 547 | |||
536 | ENTRY(outsw) | 548 | ENTRY(outsw) | |
537 | pushl %esi | 549 | pushl %esi | |
538 | movl 8(%esp), %edx | 550 | movl 8(%esp), %edx | |
539 | movl 12(%esp), %esi | 551 | movl 12(%esp), %esi | |
540 | movl 16(%esp), %ecx | 552 | movl 16(%esp), %ecx | |
541 | rep | 553 | rep | |
542 | outsw | 554 | outsw | |
543 | popl %esi | 555 | popl %esi | |
544 | ret | 556 | ret | |
545 | END(outsw) | 557 | END(outsw) | |
546 | 558 | |||
547 | ENTRY(outl) | 559 | ENTRY(outl) | |
548 | movl 4(%esp), %edx | 560 | movl 4(%esp), %edx | |
549 | movl 8(%esp), %eax | 561 | movl 8(%esp), %eax | |
550 | outl %eax, %dx | 562 | outl %eax, %dx | |
551 | ret | 563 | ret | |
552 | END(outl) | 564 | END(outl) | |
553 | 565 | |||
554 | ENTRY(outsl) | 566 | ENTRY(outsl) | |
555 | pushl %esi | 567 | pushl %esi | |
556 | movl 8(%esp), %edx | 568 | movl 8(%esp), %edx | |
557 | movl 12(%esp), %esi | 569 | movl 12(%esp), %esi | |
558 | movl 16(%esp), %ecx | 570 | movl 16(%esp), %ecx | |
559 | rep | 571 | rep | |
560 | outsl | 572 | outsl | |
561 | popl %esi | 573 | popl %esi | |
562 | ret | 574 | ret | |
563 | END(outsl) | 575 | END(outsl) |
--- src/sys/arch/i386/i386/i386func.S 2016/11/27 14:49:21 1.18
+++ src/sys/arch/i386/i386/i386func.S 2018/12/22 21:27:22 1.19
@@ -1,257 +1,290 @@ | @@ -1,257 +1,290 @@ | |||
1 | /* $NetBSD: i386func.S,v 1.18 2016/11/27 14:49:21 kamil Exp $ */ | 1 | /* $NetBSD: i386func.S,v 1.19 2018/12/22 21:27:22 cherry Exp $ */ | |
2 | 2 | |||
3 | /*- | 3 | /*- | |
4 | * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc. | 4 | * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc. | |
5 | * All rights reserved. | 5 | * All rights reserved. | |
6 | * | 6 | * | |
7 | * This code is derived from software contributed to The NetBSD Foundation | 7 | * This code is derived from software contributed to The NetBSD Foundation | |
8 | * by Charles M. Hannum, and by Andrew Doran. | 8 | * by Charles M. Hannum, and by Andrew Doran. | |
9 | * | 9 | * | |
10 | * Redistribution and use in source and binary forms, with or without | 10 | * Redistribution and use in source and binary forms, with or without | |
11 | * modification, are permitted provided that the following conditions | 11 | * modification, are permitted provided that the following conditions | |
12 | * are met: | 12 | * are met: | |
13 | * 1. Redistributions of source code must retain the above copyright | 13 | * 1. Redistributions of source code must retain the above copyright | |
14 | * notice, this list of conditions and the following disclaimer. | 14 | * notice, this list of conditions and the following disclaimer. | |
15 | * 2. Redistributions in binary form must reproduce the above copyright | 15 | * 2. Redistributions in binary form must reproduce the above copyright | |
16 | * notice, this list of conditions and the following disclaimer in the | 16 | * notice, this list of conditions and the following disclaimer in the | |
17 | * documentation and/or other materials provided with the distribution. | 17 | * documentation and/or other materials provided with the distribution. | |
18 | * | 18 | * | |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
29 | * POSSIBILITY OF SUCH DAMAGE. | 29 | * POSSIBILITY OF SUCH DAMAGE. | |
30 | */ | 30 | */ | |
31 | 31 | |||
32 | /* | 32 | /* | |
33 | * Functions to provide access to i386-specific instructions. | 33 | * Functions to provide access to i386-specific instructions. | |
34 | * | 34 | * | |
35 | * These are _not_ shared with NetBSD/xen. | 35 | * These are _not_ shared with NetBSD/xen. | |
36 | */ | 36 | */ | |
37 | 37 | |||
38 | #include <machine/asm.h> | 38 | #include <machine/asm.h> | |
39 | __KERNEL_RCSID(0, "$NetBSD: i386func.S,v 1.18 2016/11/27 14:49:21 kamil Exp $"); | 39 | __KERNEL_RCSID(0, "$NetBSD: i386func.S,v 1.19 2018/12/22 21:27:22 cherry Exp $"); | |
40 | 40 | |||
41 | #include <machine/specialreg.h> | 41 | #include <machine/specialreg.h> | |
42 | #include <machine/segments.h> | 42 | #include <machine/segments.h> | |
43 | 43 | |||
44 | #include "assym.h" | 44 | #include "assym.h" | |
45 | 45 | |||
46 | ENTRY(invlpg) | 46 | /* | |
47 | * These functions below should always be accessed via the corresponding wrapper | |||
48 | * function names defined in x86/include/cpufunc.h and exported as WEAK_ALIAS() | |||
49 | * | |||
50 | * We use this rather roundabout method so that a runtime wrapper function may | |||
51 | * be made available for PVHVM, which could override both native and PV aliases | |||
52 | * and decide which to invoke at run time. | |||
53 | */ | |||
54 | ||||
55 | WEAK_ALIAS(invlpg, i386_invlpg) | |||
56 | WEAK_ALIAS(lldt, i386_lldt) | |||
57 | WEAK_ALIAS(ltr, i386_ltr) | |||
58 | WEAK_ALIAS(lcr0, i386_lcr0) | |||
59 | WEAK_ALIAS(rcr0, i386_rcr0) | |||
60 | WEAK_ALIAS(lcr3, i386_lcr3) | |||
61 | WEAK_ALIAS(tlbflush, i386_tlbflush) | |||
62 | WEAK_ALIAS(tlbflushg, i386_tlbflushg) | |||
63 | WEAK_ALIAS(rdr0, i386_rdr0) | |||
64 | WEAK_ALIAS(ldr0, i386_ldr0) | |||
65 | WEAK_ALIAS(rdr1, i386_rdr1) | |||
66 | WEAK_ALIAS(ldr1, i386_ldr1) | |||
67 | WEAK_ALIAS(rdr2, i386_rdr2) | |||
68 | WEAK_ALIAS(ldr2, i386_ldr2) | |||
69 | WEAK_ALIAS(rdr3, i386_rdr3) | |||
70 | WEAK_ALIAS(ldr3, i386_ldr3) | |||
71 | WEAK_ALIAS(rdr6, i386_rdr6) | |||
72 | WEAK_ALIAS(ldr6, i386_ldr6) | |||
73 | WEAK_ALIAS(rdr7, i386_rdr7) | |||
74 | WEAK_ALIAS(ldr7, i386_ldr7) | |||
75 | WEAK_ALIAS(rcr2, i386_rcr2) | |||
76 | WEAK_ALIAS(lcr2, i386_lcr2) | |||
77 | WEAK_ALIAS(wbinvd, i386_wbinvd) | |||
78 | ||||
79 | ENTRY(i386_invlpg) | |||
47 | movl 4(%esp), %eax | 80 | movl 4(%esp), %eax | |
48 | invlpg (%eax) | 81 | invlpg (%eax) | |
49 | ret | 82 | ret | |
50 | END(invlpg) | 83 | END(i386_invlpg) | |
51 | 84 | |||
52 | ENTRY(lldt) | 85 | ENTRY(i386_lldt) | |
53 | movl 4(%esp), %eax | 86 | movl 4(%esp), %eax | |
54 | cmpl %eax, CPUVAR(CURLDT) | 87 | cmpl %eax, CPUVAR(CURLDT) | |
55 | jne 1f | 88 | jne 1f | |
56 | ret | 89 | ret | |
57 | 1: | 90 | 1: | |
58 | movl %eax, CPUVAR(CURLDT) | 91 | movl %eax, CPUVAR(CURLDT) | |
59 | lldt %ax | 92 | lldt %ax | |
60 | ret | 93 | ret | |
61 | END(lldt) | 94 | END(i386_lldt) | |
62 | 95 | |||
63 | ENTRY(ltr) | 96 | ENTRY(i386_ltr) | |
64 | movl 4(%esp), %eax | 97 | movl 4(%esp), %eax | |
65 | ltr %ax | 98 | ltr %ax | |
66 | ret | 99 | ret | |
67 | END(ltr) | 100 | END(i386_ltr) | |
68 | 101 | |||
69 | ENTRY(lcr0) | 102 | ENTRY(i386_lcr0) | |
70 | movl 4(%esp), %eax | 103 | movl 4(%esp), %eax | |
71 | movl %eax, %cr0 | 104 | movl %eax, %cr0 | |
72 | ret | 105 | ret | |
73 | END(lcr0) | 106 | END(i386_lcr0) | |
74 | 107 | |||
75 | ENTRY(rcr0) | 108 | ENTRY(i386_rcr0) | |
76 | movl %cr0, %eax | 109 | movl %cr0, %eax | |
77 | ret | 110 | ret | |
78 | END(rcr0) | 111 | END(i386_rcr0) | |
79 | 112 | |||
80 | ENTRY(lcr3) | 113 | ENTRY(i386_lcr3) | |
81 | movl 4(%esp), %eax | 114 | movl 4(%esp), %eax | |
82 | movl %eax, %cr3 | 115 | movl %eax, %cr3 | |
83 | ret | 116 | ret | |
84 | END(lcr3) | 117 | END(i386_lcr3) | |
85 | 118 | |||
86 | /* | 119 | /* | |
87 | * Big hammer: flush all TLB entries, including ones from PTE's | 120 | * Big hammer: flush all TLB entries, including ones from PTE's | |
88 | * with the G bit set. This should only be necessary if TLB | 121 | * with the G bit set. This should only be necessary if TLB | |
89 | * shootdown falls far behind. | 122 | * shootdown falls far behind. | |
90 | * | 123 | * | |
91 | * Intel Architecture Software Developer's Manual, Volume 3, | 124 | * Intel Architecture Software Developer's Manual, Volume 3, | |
92 | * System Programming, section 9.10, "Invalidating the | 125 | * System Programming, section 9.10, "Invalidating the | |
93 | * Translation Lookaside Buffers (TLBS)": | 126 | * Translation Lookaside Buffers (TLBS)": | |
94 | * "The following operations invalidate all TLB entries, irrespective | 127 | * "The following operations invalidate all TLB entries, irrespective | |
95 | * of the setting of the G flag: | 128 | * of the setting of the G flag: | |
96 | * ... | 129 | * ... | |
97 | * "(P6 family processors only): Writing to control register CR4 to | 130 | * "(P6 family processors only): Writing to control register CR4 to | |
98 | * modify the PSE, PGE, or PAE flag." | 131 | * modify the PSE, PGE, or PAE flag." | |
99 | * | 132 | * | |
100 | * (the alternatives not quoted above are not an option here.) | 133 | * (the alternatives not quoted above are not an option here.) | |
101 | * | 134 | * | |
102 | * If PGE is not in use, we reload CR3. Check for the PGE feature | 135 | * If PGE is not in use, we reload CR3. Check for the PGE feature | |
103 | * first since i486 does not have CR4. Note: the feature flag may | 136 | * first since i486 does not have CR4. Note: the feature flag may | |
104 | * be present while the actual PGE functionality not yet enabled. | 137 | * be present while the actual PGE functionality not yet enabled. | |
105 | */ | 138 | */ | |
106 | ENTRY(tlbflushg) | 139 | ENTRY(i386_tlbflushg) | |
107 | testl $CPUID_PGE, _C_LABEL(cpu_feature) | 140 | testl $CPUID_PGE, _C_LABEL(cpu_feature) | |
108 | jz 1f | 141 | jz 1f | |
109 | movl %cr4, %eax | 142 | movl %cr4, %eax | |
110 | testl $CR4_PGE, %eax | 143 | testl $CR4_PGE, %eax | |
111 | jz 1f | 144 | jz 1f | |
112 | movl %eax, %edx | 145 | movl %eax, %edx | |
113 | andl $~CR4_PGE, %edx | 146 | andl $~CR4_PGE, %edx | |
114 | movl %edx, %cr4 | 147 | movl %edx, %cr4 | |
115 | movl %eax, %cr4 | 148 | movl %eax, %cr4 | |
116 | ret | 149 | ret | |
117 | END(tlbflushg) | 150 | END(i386_tlbflushg) | |
118 | 151 | |||
119 | ENTRY(tlbflush) | 152 | ENTRY(i386_tlbflush) | |
120 | 1: | 153 | 1: | |
121 | movl %cr3, %eax | 154 | movl %cr3, %eax | |
122 | movl %eax, %cr3 | 155 | movl %eax, %cr3 | |
123 | ret | 156 | ret | |
124 | END(tlbflush) | 157 | END(i386_tlbflush) | |
125 | 158 | |||
126 | ENTRY(ldr0) | 159 | ENTRY(i386_ldr0) | |
127 | movl 4(%esp), %eax | 160 | movl 4(%esp), %eax | |
128 | movl %eax, %dr0 | 161 | movl %eax, %dr0 | |
129 | ret | 162 | ret | |
130 | END(ldr0) | 163 | END(i386_ldr0) | |
131 | 164 | |||
132 | ENTRY(rdr0) | 165 | ENTRY(i386_rdr0) | |
133 | movl %dr0, %eax | 166 | movl %dr0, %eax | |
134 | ret | 167 | ret | |
135 | END(rdr0) | 168 | END(i386_rdr0) | |
136 | 169 | |||
137 | ENTRY(ldr1) | 170 | ENTRY(i386_ldr1) | |
138 | movl 4(%esp), %eax | 171 | movl 4(%esp), %eax | |
139 | movl %eax, %dr1 | 172 | movl %eax, %dr1 | |
140 | ret | 173 | ret | |
141 | END(ldr1) | 174 | END(i386_ldr1) | |
142 | 175 | |||
143 | ENTRY(rdr1) | 176 | ENTRY(i386_rdr1) | |
144 | movl %dr1, %eax | 177 | movl %dr1, %eax | |
145 | ret | 178 | ret | |
146 | END(rdr1) | 179 | END(i386_rdr1) | |
147 | 180 | |||
148 | ENTRY(ldr2) | 181 | ENTRY(i386_ldr2) | |
149 | movl 4(%esp), %eax | 182 | movl 4(%esp), %eax | |
150 | movl %eax, %dr2 | 183 | movl %eax, %dr2 | |
151 | ret | 184 | ret | |
152 | END(ldr2) | 185 | END(i386_ldr2) | |
153 | 186 | |||
154 | ENTRY(rdr2) | 187 | ENTRY(i386_rdr2) | |
155 | movl %dr2, %eax | 188 | movl %dr2, %eax | |
156 | ret | 189 | ret | |
157 | END(rdr2) | 190 | END(i386_rdr2) | |
158 | 191 | |||
159 | ENTRY(ldr3) | 192 | ENTRY(i386_ldr3) | |
160 | movl 4(%esp), %eax | 193 | movl 4(%esp), %eax | |
161 | movl %eax, %dr3 | 194 | movl %eax, %dr3 | |
162 | ret | 195 | ret | |
163 | END(ldr3) | 196 | END(i386_ldr3) | |
164 | 197 | |||
165 | ENTRY(rdr3) | 198 | ENTRY(i386_rdr3) | |
166 | movl %dr3, %eax | 199 | movl %dr3, %eax | |
167 | ret | 200 | ret | |
168 | END(rdr3) | 201 | END(i386_rdr3) | |
169 | 202 | |||
170 | ENTRY(ldr6) | 203 | ENTRY(i386_ldr6) | |
171 | movl 4(%esp), %eax | 204 | movl 4(%esp), %eax | |
172 | movl %eax, %dr6 | 205 | movl %eax, %dr6 | |
173 | ret | 206 | ret | |
174 | END(ldr6) | 207 | END(i386_ldr6) | |
175 | 208 | |||
176 | ENTRY(rdr6) | 209 | ENTRY(i386_rdr6) | |
177 | movl %dr6, %eax | 210 | movl %dr6, %eax | |
178 | ret | 211 | ret | |
179 | END(rdr6) | 212 | END(i386_rdr6) | |
180 | 213 | |||
181 | ENTRY(ldr7) | 214 | ENTRY(i386_ldr7) | |
182 | movl 4(%esp), %eax | 215 | movl 4(%esp), %eax | |
183 | movl %eax, %dr7 | 216 | movl %eax, %dr7 | |
184 | ret | 217 | ret | |
185 | END(ldr7) | 218 | END(i386_ldr7) | |
186 | 219 | |||
187 | ENTRY(rdr7) | 220 | ENTRY(i386_rdr7) | |
188 | movl %dr7, %eax | 221 | movl %dr7, %eax | |
189 | ret | 222 | ret | |
190 | END(rdr7) | 223 | END(i386_rdr7) | |
191 | 224 | |||
192 | ENTRY(rcr2) | 225 | ENTRY(i386_rcr2) | |
193 | movl %cr2, %eax | 226 | movl %cr2, %eax | |
194 | ret | 227 | ret | |
195 | END(rcr2) | 228 | END(i386_rcr2) | |
196 | 229 | |||
197 | ENTRY(lcr2) | 230 | ENTRY(i386_lcr2) | |
198 | movl 4(%esp), %eax | 231 | movl 4(%esp), %eax | |
199 | movl %eax, %cr2 | 232 | movl %eax, %cr2 | |
200 | ret | 233 | ret | |
201 | END(lcr2) | 234 | END(i386_lcr2) | |
202 | 235 | |||
203 | ENTRY(wbinvd) | 236 | ENTRY(i386_wbinvd) | |
204 | wbinvd | 237 | wbinvd | |
205 | ret | 238 | ret | |
206 | END(wbinvd) | 239 | END(i386_wbinvd) | |
207 | 240 | |||
208 | ENTRY(x86_disable_intr) | 241 | ENTRY(x86_disable_intr) | |
209 | cli | 242 | cli | |
210 | ret | 243 | ret | |
211 | END(x86_disable_intr) | 244 | END(x86_disable_intr) | |
212 | 245 | |||
213 | ENTRY(x86_enable_intr) | 246 | ENTRY(x86_enable_intr) | |
214 | sti | 247 | sti | |
215 | ret | 248 | ret | |
216 | END(x86_enable_intr) | 249 | END(x86_enable_intr) | |
217 | 250 | |||
218 | /* | 251 | /* | |
219 | * void lgdt(struct region_descriptor *rdp); | 252 | * void lgdt(struct region_descriptor *rdp); | |
220 | * | 253 | * | |
221 | * Load a new GDT pointer (and do any necessary cleanup). | 254 | * Load a new GDT pointer (and do any necessary cleanup). | |
222 | * XXX It's somewhat questionable whether reloading all the segment registers | 255 | * XXX It's somewhat questionable whether reloading all the segment registers | |
223 | * is necessary, since the actual descriptor data is not changed except by | 256 | * is necessary, since the actual descriptor data is not changed except by | |
224 | * process creation and exit, both of which clean up via task switches. OTOH, | 257 | * process creation and exit, both of which clean up via task switches. OTOH, | |
225 | * this only happens at run time when the GDT is resized. | 258 | * this only happens at run time when the GDT is resized. | |
226 | */ | 259 | */ | |
227 | ENTRY(lgdt) | 260 | ENTRY(lgdt) | |
228 | /* Reload the descriptor table. */ | 261 | /* Reload the descriptor table. */ | |
229 | movl 4(%esp), %eax | 262 | movl 4(%esp), %eax | |
230 | lgdt (%eax) | 263 | lgdt (%eax) | |
231 | /* Flush the prefetch queue. */ | 264 | /* Flush the prefetch queue. */ | |
232 | jmp 1f | 265 | jmp 1f | |
233 | nop | 266 | nop | |
234 | 1: /* Reload "stale" selectors. */ | 267 | 1: /* Reload "stale" selectors. */ | |
235 | movl $GSEL(GDATA_SEL, SEL_KPL), %eax | 268 | movl $GSEL(GDATA_SEL, SEL_KPL), %eax | |
236 | movl %eax, %ds | 269 | movl %eax, %ds | |
237 | movl %eax, %es | 270 | movl %eax, %es | |
238 | movl %eax, %gs | 271 | movl %eax, %gs | |
239 | movl %eax, %ss | 272 | movl %eax, %ss | |
240 | movl $GSEL(GCPU_SEL, SEL_KPL), %eax | 273 | movl $GSEL(GCPU_SEL, SEL_KPL), %eax | |
241 | movl %eax, %fs | 274 | movl %eax, %fs | |
242 | jmp _C_LABEL(x86_flush) | 275 | jmp _C_LABEL(x86_flush) | |
243 | END(lgdt) | 276 | END(lgdt) | |
244 | 277 | |||
245 | ENTRY(tsc_get_timecount) | 278 | ENTRY(tsc_get_timecount) | |
246 | movl CPUVAR(CURLWP), %ecx | 279 | movl CPUVAR(CURLWP), %ecx | |
247 | 1: | 280 | 1: | |
248 | pushl L_NCSW(%ecx) | 281 | pushl L_NCSW(%ecx) | |
249 | rdtsc | 282 | rdtsc | |
250 | addl CPUVAR(CC_SKEW), %eax | 283 | addl CPUVAR(CC_SKEW), %eax | |
251 | popl %edx | 284 | popl %edx | |
252 | cmpl %edx, L_NCSW(%ecx) | 285 | cmpl %edx, L_NCSW(%ecx) | |
253 | jne 2f | 286 | jne 2f | |
254 | ret | 287 | ret | |
255 | 2: | 288 | 2: | |
256 | jmp 1b | 289 | jmp 1b | |
257 | END(tsc_get_timecount) | 290 | END(tsc_get_timecount) |
--- src/sys/arch/xen/x86/xenfunc.c 2018/10/18 04:17:18 1.22
+++ src/sys/arch/xen/x86/xenfunc.c 2018/12/22 21:27:22 1.23
@@ -1,265 +1,331 @@ | @@ -1,265 +1,331 @@ | |||
1 | /* $NetBSD: xenfunc.c,v 1.22 2018/10/18 04:17:18 cherry Exp $ */ | 1 | /* $NetBSD: xenfunc.c,v 1.23 2018/12/22 21:27:22 cherry Exp $ */ | |
2 | 2 | |||
3 | /* | 3 | /* | |
4 | * Copyright (c) 2004 Christian Limpach. | 4 | * Copyright (c) 2004 Christian Limpach. | |
5 | * All rights reserved. | 5 | * All rights reserved. | |
6 | * | 6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | 7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions | 8 | * modification, are permitted provided that the following conditions | |
9 | * are met: | 9 | * are met: | |
10 | * 1. Redistributions of source code must retain the above copyright | 10 | * 1. Redistributions of source code must retain the above copyright | |
11 | * notice, this list of conditions and the following disclaimer. | 11 | * notice, this list of conditions and the following disclaimer. | |
12 | * 2. Redistributions in binary form must reproduce the above copyright | 12 | * 2. Redistributions in binary form must reproduce the above copyright | |
13 | * notice, this list of conditions and the following disclaimer in the | 13 | * notice, this list of conditions and the following disclaimer in the | |
14 | * documentation and/or other materials provided with the distribution. | 14 | * documentation and/or other materials provided with the distribution. | |
15 | * | 15 | * | |
16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | 16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | |
17 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | 17 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | |
18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | 18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | |
19 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | 19 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | |
20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | |
21 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 21 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
22 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 22 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
23 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 23 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
24 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | 24 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | |
25 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 25 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
26 | */ | 26 | */ | |
27 | 27 | |||
28 | #include <sys/cdefs.h> | 28 | #include <sys/cdefs.h> | |
29 | __KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.22 2018/10/18 04:17:18 cherry Exp $"); | 29 | __KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.23 2018/12/22 21:27:22 cherry Exp $"); | |
30 | 30 | |||
31 | #include <sys/param.h> | 31 | #include <sys/param.h> | |
32 | 32 | |||
33 | #include <uvm/uvm_extern.h> | 33 | #include <uvm/uvm_extern.h> | |
34 | 34 | |||
35 | #include <machine/intr.h> | 35 | #include <machine/intr.h> | |
36 | #include <machine/vmparam.h> | 36 | #include <machine/vmparam.h> | |
37 | #include <machine/pmap.h> | 37 | #include <machine/pmap.h> | |
38 | #include <xen/xen.h> | 38 | #include <xen/xen.h> | |
39 | #include <xen/hypervisor.h> | 39 | #include <xen/hypervisor.h> | |
40 | //#include <xen/evtchn.h> | 40 | //#include <xen/evtchn.h> | |
41 | #include <xen/xenpmap.h> | 41 | #include <xen/xenpmap.h> | |
42 | #include <machine/pte.h> | 42 | #include <machine/pte.h> | |
43 | 43 | |||
44 | #define MAX_XEN_IDT 128 | 44 | #define MAX_XEN_IDT 128 | |
45 | 45 | |||
46 | void xen_set_ldt(vaddr_t, uint32_t); | 46 | void xen_set_ldt(vaddr_t, uint32_t); | |
47 | 47 | |||
48 | /* | |||
49 | * We don't need to export these declarations, since they are used via | |||
50 | * linker aliasing. They should always be accessed via the | |||
51 | * corresponding wrapper function names defined in | |||
52 | * x86/include/cpufunc.h and exported as __weak_alias() | |||
53 | * | |||
54 | * We use this rather roundabout method so that a runtime wrapper | |||
55 | * function may be made available for PVHVM, which could override both | |||
56 | * native and PV aliases and decide which to invoke at run time. | |||
57 | */ | |||
58 | ||||
59 | void xen_invlpg(vaddr_t); | |||
60 | void xen_lidt(struct region_descriptor *); | |||
61 | void xen_lldt(u_short); | |||
62 | void xen_ltr(u_short); | |||
63 | void xen_lcr0(u_long); | |||
64 | u_long xen_rcr0(void); | |||
65 | void xen_tlbflush(void); | |||
66 | void xen_tlbflushg(void); | |||
67 | register_t xen_rdr0(void); | |||
68 | void xen_ldr0(register_t); | |||
69 | register_t xen_rdr1(void); | |||
70 | void xen_ldr1(register_t); | |||
71 | register_t xen_rdr2(void); | |||
72 | void xen_ldr2(register_t); | |||
73 | register_t xen_rdr3(void); | |||
74 | void xen_ldr3(register_t); | |||
75 | register_t xen_rdr6(void); | |||
76 | void xen_ldr6(register_t); | |||
77 | register_t xen_rdr7(void); | |||
78 | void xen_ldr7(register_t); | |||
79 | void xen_wbinvd(void); | |||
80 | vaddr_t xen_rcr2(void); | |||
81 | ||||
82 | __weak_alias(invlpg, xen_invlpg); | |||
83 | __weak_alias(lidt, xen_lidt); | |||
84 | __weak_alias(lldt, xen_lldt); | |||
85 | __weak_alias(ltr, xen_ltr); | |||
86 | __weak_alias(lcr0, xen_lcr0); | |||
87 | __weak_alias(rcr0, xen_rcr0); | |||
88 | __weak_alias(tlbflush, xen_tlbflush); | |||
89 | __weak_alias(tlbflushg, xen_tlbflushg); | |||
90 | __weak_alias(rdr0, xen_rdr0); | |||
91 | __weak_alias(ldr0, xen_ldr0); | |||
92 | __weak_alias(rdr1, xen_rdr1); | |||
93 | __weak_alias(ldr1, xen_ldr1); | |||
94 | __weak_alias(rdr2, xen_rdr2); | |||
95 | __weak_alias(ldr2, xen_ldr2); | |||
96 | __weak_alias(rdr3, xen_rdr3); | |||
97 | __weak_alias(ldr3, xen_ldr3); | |||
98 | __weak_alias(rdr6, xen_rdr6); | |||
99 | __weak_alias(ldr6, xen_ldr6); | |||
100 | __weak_alias(rdr7, xen_rdr7); | |||
101 | __weak_alias(ldr7, xen_ldr7); | |||
102 | __weak_alias(wbinvd, xen_wbinvd); | |||
103 | __weak_alias(rcr2, xen_rcr2); | |||
104 | ||||
105 | #ifdef __x86_64__ | |||
106 | void xen_setusergs(int); | |||
107 | __weak_alias(setusergs, xen_setusergs); | |||
108 | #else | |||
109 | void xen_lcr3(vaddr_t); | |||
110 | __weak_alias(lcr3, xen_lcr3); | |||
111 | ||||
112 | #endif | |||
113 | ||||
48 | void | 114 | void | |
49 | invlpg(vaddr_t addr) | 115 | xen_invlpg(vaddr_t addr) | |
50 | { | 116 | { | |
51 | int s = splvm(); /* XXXSMP */ | 117 | int s = splvm(); /* XXXSMP */ | |
52 | xpq_queue_invlpg(addr); | 118 | xpq_queue_invlpg(addr); | |
53 | splx(s); | 119 | splx(s); | |
54 | } | 120 | } | |
55 | 121 | |||
56 | void | 122 | void | |
57 | lidt(struct region_descriptor *rd) | 123 | xen_lidt(struct region_descriptor *rd) | |
58 | { | 124 | { | |
59 | /* | 125 | /* | |
60 | * We need to do this because we can't assume kmem_alloc(9) | 126 | * We need to do this because we can't assume kmem_alloc(9) | |
61 | * will be available at the boot stage when this is called. | 127 | * will be available at the boot stage when this is called. | |
62 | */ | 128 | */ | |
63 | static char xen_idt_page[PAGE_SIZE] __attribute__((__aligned__ (PAGE_SIZE))); | 129 | static char xen_idt_page[PAGE_SIZE] __attribute__((__aligned__ (PAGE_SIZE))); | |
64 | memset(xen_idt_page, 0, PAGE_SIZE); | 130 | memset(xen_idt_page, 0, PAGE_SIZE); | |
65 | 131 | |||
66 | struct trap_info *xen_idt = (void * )xen_idt_page; | 132 | struct trap_info *xen_idt = (void * )xen_idt_page; | |
67 | int xen_idt_idx = 0; | 133 | int xen_idt_idx = 0; | |
68 | 134 | |||
69 | struct trap_info * idd = (void *) rd->rd_base; | 135 | struct trap_info * idd = (void *) rd->rd_base; | |
70 | const int nidt = rd->rd_limit / (sizeof *idd); | 136 | const int nidt = rd->rd_limit / (sizeof *idd); | |
71 | 137 | |||
72 | int i; | 138 | int i; | |
73 | 139 | |||
74 | /* | 140 | /* | |
75 | * Sweep in all initialised entries, consolidate them back to | 141 | * Sweep in all initialised entries, consolidate them back to | |
76 | * back in the requestor array. | 142 | * back in the requestor array. | |
77 | */ | 143 | */ | |
78 | for (i = 0; i < nidt; i++) { | 144 | for (i = 0; i < nidt; i++) { | |
79 | if (idd[i].address == 0) /* Skip gap */ | 145 | if (idd[i].address == 0) /* Skip gap */ | |
80 | continue; | 146 | continue; | |
81 | KASSERT(xen_idt_idx < MAX_XEN_IDT); | 147 | KASSERT(xen_idt_idx < MAX_XEN_IDT); | |
82 | /* Copy over entry */ | 148 | /* Copy over entry */ | |
83 | xen_idt[xen_idt_idx++] = idd[i]; | 149 | xen_idt[xen_idt_idx++] = idd[i]; | |
84 | } | 150 | } | |
85 | 151 | |||
86 | #if defined(__x86_64__) | 152 | #if defined(__x86_64__) | |
87 | /* page needs to be r/o */ | 153 | /* page needs to be r/o */ | |
88 | pmap_changeprot_local((vaddr_t) xen_idt, VM_PROT_READ); | 154 | pmap_changeprot_local((vaddr_t) xen_idt, VM_PROT_READ); | |
89 | #endif /* __x86_64 */ | 155 | #endif /* __x86_64 */ | |
90 | 156 | |||
91 | /* Hook it up in the hypervisor */ | 157 | /* Hook it up in the hypervisor */ | |
92 | if (HYPERVISOR_set_trap_table(xen_idt)) | 158 | if (HYPERVISOR_set_trap_table(xen_idt)) | |
93 | panic("HYPERVISOR_set_trap_table() failed"); | 159 | panic("HYPERVISOR_set_trap_table() failed"); | |
94 | 160 | |||
95 | #if defined(__x86_64__) | 161 | #if defined(__x86_64__) | |
96 | /* reset */ | 162 | /* reset */ | |
97 | pmap_changeprot_local((vaddr_t) xen_idt, VM_PROT_READ|VM_PROT_WRITE); | 163 | pmap_changeprot_local((vaddr_t) xen_idt, VM_PROT_READ|VM_PROT_WRITE); | |
98 | #endif /* __x86_64 */ | 164 | #endif /* __x86_64 */ | |
99 | } | 165 | } | |
100 | 166 | |||
101 | void | 167 | void | |
102 | lldt(u_short sel) | 168 | xen_lldt(u_short sel) | |
103 | { | 169 | { | |
104 | #ifndef __x86_64__ | 170 | #ifndef __x86_64__ | |
105 | struct cpu_info *ci; | 171 | struct cpu_info *ci; | |
106 | 172 | |||
107 | ci = curcpu(); | 173 | ci = curcpu(); | |
108 | 174 | |||
109 | if (ci->ci_curldt == sel) | 175 | if (ci->ci_curldt == sel) | |
110 | return; | 176 | return; | |
111 | if (sel == GSEL(GLDT_SEL, SEL_KPL)) | 177 | if (sel == GSEL(GLDT_SEL, SEL_KPL)) | |
112 | xen_set_ldt((vaddr_t)ldtstore, NLDT); | 178 | xen_set_ldt((vaddr_t)ldtstore, NLDT); | |
113 | else | 179 | else | |
114 | xen_set_ldt(ci->ci_gdt[IDXSELN(sel)].ld.ld_base, | 180 | xen_set_ldt(ci->ci_gdt[IDXSELN(sel)].ld.ld_base, | |
115 | ci->ci_gdt[IDXSELN(sel)].ld.ld_entries); | 181 | ci->ci_gdt[IDXSELN(sel)].ld.ld_entries); | |
116 | ci->ci_curldt = sel; | 182 | ci->ci_curldt = sel; | |
117 | #endif | 183 | #endif | |
118 | } | 184 | } | |
119 | 185 | |||
120 | void | 186 | void | |
121 | ltr(u_short sel) | 187 | xen_ltr(u_short sel) | |
122 | { | 188 | { | |
123 | panic("XXX ltr not supported\n"); | 189 | panic("XXX ltr not supported\n"); | |
124 | } | 190 | } | |
125 | 191 | |||
126 | void | 192 | void | |
127 | lcr0(u_long val) | 193 | xen_lcr0(u_long val) | |
128 | { | 194 | { | |
129 | panic("XXX lcr0 not supported\n"); | 195 | panic("XXX lcr0 not supported\n"); | |
130 | } | 196 | } | |
131 | 197 | |||
132 | u_long | 198 | u_long | |
133 | rcr0(void) | 199 | xen_rcr0(void) | |
134 | { | 200 | { | |
135 | /* XXX: handle X86_CR0_TS ? */ | 201 | /* XXX: handle X86_CR0_TS ? */ | |
136 | return 0; | 202 | return 0; | |
137 | } | 203 | } | |
138 | 204 | |||
139 | #ifndef __x86_64__ | 205 | #ifndef __x86_64__ | |
140 | void | 206 | void | |
141 | lcr3(vaddr_t val) | 207 | xen_lcr3(vaddr_t val) | |
142 | { | 208 | { | |
143 | int s = splvm(); /* XXXSMP */ | 209 | int s = splvm(); /* XXXSMP */ | |
144 | xpq_queue_pt_switch(xpmap_ptom_masked(val)); | 210 | xpq_queue_pt_switch(xpmap_ptom_masked(val)); | |
145 | splx(s); | 211 | splx(s); | |
146 | } | 212 | } | |
147 | #endif | 213 | #endif | |
148 | 214 | |||
149 | void | 215 | void | |
150 | tlbflush(void) | 216 | xen_tlbflush(void) | |
151 | { | 217 | { | |
152 | int s = splvm(); /* XXXSMP */ | 218 | int s = splvm(); /* XXXSMP */ | |
153 | xpq_queue_tlb_flush(); | 219 | xpq_queue_tlb_flush(); | |
154 | splx(s); | 220 | splx(s); | |
155 | } | 221 | } | |
156 | 222 | |||
157 | void | 223 | void | |
158 | tlbflushg(void) | 224 | xen_tlbflushg(void) | |
159 | { | 225 | { | |
160 | tlbflush(); | 226 | tlbflush(); | |
161 | } | 227 | } | |
162 | 228 | |||
163 | register_t | 229 | register_t | |
164 | rdr0(void) | 230 | xen_rdr0(void) | |
165 | { | 231 | { | |
166 | 232 | |||
167 | return HYPERVISOR_get_debugreg(0); | 233 | return HYPERVISOR_get_debugreg(0); | |
168 | } | 234 | } | |
169 | 235 | |||
170 | void | 236 | void | |
171 | ldr0(register_t val) | 237 | xen_ldr0(register_t val) | |
172 | { | 238 | { | |
173 | 239 | |||
174 | HYPERVISOR_set_debugreg(0, val); | 240 | HYPERVISOR_set_debugreg(0, val); | |
175 | } | 241 | } | |
176 | 242 | |||
177 | register_t | 243 | register_t | |
178 | rdr1(void) | 244 | xen_rdr1(void) | |
179 | { | 245 | { | |
180 | 246 | |||
181 | return HYPERVISOR_get_debugreg(1); | 247 | return HYPERVISOR_get_debugreg(1); | |
182 | } | 248 | } | |
183 | 249 | |||
184 | void | 250 | void | |
185 | ldr1(register_t val) | 251 | xen_ldr1(register_t val) | |
186 | { | 252 | { | |
187 | 253 | |||
188 | HYPERVISOR_set_debugreg(1, val); | 254 | HYPERVISOR_set_debugreg(1, val); | |
189 | } | 255 | } | |
190 | 256 | |||
191 | register_t | 257 | register_t | |
192 | rdr2(void) | 258 | xen_rdr2(void) | |
193 | { | 259 | { | |
194 | 260 | |||
195 | return HYPERVISOR_get_debugreg(2); | 261 | return HYPERVISOR_get_debugreg(2); | |
196 | } | 262 | } | |
197 | 263 | |||
198 | void | 264 | void | |
199 | ldr2(register_t val) | 265 | xen_ldr2(register_t val) | |
200 | { | 266 | { | |
201 | 267 | |||
202 | HYPERVISOR_set_debugreg(2, val); | 268 | HYPERVISOR_set_debugreg(2, val); | |
203 | } | 269 | } | |
204 | 270 | |||
205 | register_t | 271 | register_t | |
206 | rdr3(void) | 272 | xen_rdr3(void) | |
207 | { | 273 | { | |
208 | 274 | |||
209 | return HYPERVISOR_get_debugreg(3); | 275 | return HYPERVISOR_get_debugreg(3); | |
210 | } | 276 | } | |
211 | 277 | |||
212 | void | 278 | void | |
213 | ldr3(register_t val) | 279 | xen_ldr3(register_t val) | |
214 | { | 280 | { | |
215 | 281 | |||
216 | HYPERVISOR_set_debugreg(3, val); | 282 | HYPERVISOR_set_debugreg(3, val); | |
217 | } | 283 | } | |
218 | register_t | 284 | register_t | |
219 | rdr6(void) | 285 | xen_rdr6(void) | |
220 | { | 286 | { | |
221 | 287 | |||
222 | return HYPERVISOR_get_debugreg(6); | 288 | return HYPERVISOR_get_debugreg(6); | |
223 | } | 289 | } | |
224 | 290 | |||
225 | void | 291 | void | |
226 | ldr6(register_t val) | 292 | xen_ldr6(register_t val) | |
227 | { | 293 | { | |
228 | 294 | |||
229 | HYPERVISOR_set_debugreg(6, val); | 295 | HYPERVISOR_set_debugreg(6, val); | |
230 | } | 296 | } | |
231 | 297 | |||
232 | register_t | 298 | register_t | |
233 | rdr7(void) | 299 | xen_rdr7(void) | |
234 | { | 300 | { | |
235 | 301 | |||
236 | return HYPERVISOR_get_debugreg(7); | 302 | return HYPERVISOR_get_debugreg(7); | |
237 | } | 303 | } | |
238 | 304 | |||
239 | void | 305 | void | |
240 | ldr7(register_t val) | 306 | xen_ldr7(register_t val) | |
241 | { | 307 | { | |
242 | 308 | |||
243 | HYPERVISOR_set_debugreg(7, val); | 309 | HYPERVISOR_set_debugreg(7, val); | |
244 | } | 310 | } | |
245 | 311 | |||
246 | void | 312 | void | |
247 | wbinvd(void) | 313 | xen_wbinvd(void) | |
248 | { | 314 | { | |
249 | 315 | |||
250 | xpq_flush_cache(); | 316 | xpq_flush_cache(); | |
251 | } | 317 | } | |
252 | 318 | |||
253 | vaddr_t | 319 | vaddr_t | |
254 | rcr2(void) | 320 | xen_rcr2(void) | |
255 | { | 321 | { | |
256 | return curcpu()->ci_vcpu->arch.cr2; | 322 | return curcpu()->ci_vcpu->arch.cr2; | |
257 | } | 323 | } | |
258 | 324 | |||
259 | #ifdef __x86_64__ | 325 | #ifdef __x86_64__ | |
260 | void | 326 | void | |
261 | setusergs(int gssel) | 327 | xen_setusergs(int gssel) | |
262 | { | 328 | { | |
263 | HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gssel); | 329 | HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gssel); | |
264 | } | 330 | } | |
265 | #endif | 331 | #endif |