| @@ -1,350 +1,350 @@ | | | @@ -1,350 +1,350 @@ |
1 | /* $NetBSD: xen.h,v 1.30.8.6 2011/08/27 15:44:09 jym Exp $ */ | | 1 | /* $NetBSD: xen.h,v 1.30.8.7 2011/08/27 15:48:35 jym Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * | | 4 | * |
5 | * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team) | | 5 | * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team) |
6 | * All rights reserved. | | 6 | * All rights reserved. |
7 | * | | 7 | * |
8 | * Permission is hereby granted, free of charge, to any person obtaining a copy | | 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
9 | * of this software and associated documentation files (the "Software"), to | | 9 | * of this software and associated documentation files (the "Software"), to |
10 | * deal in the Software without restriction, including without limitation the | | 10 | * deal in the Software without restriction, including without limitation the |
11 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or | | 11 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
12 | * sell copies of the Software, and to permit persons to whom the Software is | | 12 | * sell copies of the Software, and to permit persons to whom the Software is |
13 | * furnished to do so, subject to the following conditions: | | 13 | * furnished to do so, subject to the following conditions: |
14 | * | | 14 | * |
15 | * The above copyright notice and this permission notice shall be included in | | 15 | * The above copyright notice and this permission notice shall be included in |
16 | * all copies or substantial portions of the Software. | | 16 | * all copies or substantial portions of the Software. |
17 | * | | 17 | * |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | | 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | | 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | | 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | | 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | | 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
23 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | | 23 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
24 | * DEALINGS IN THE SOFTWARE. | | 24 | * DEALINGS IN THE SOFTWARE. |
25 | */ | | 25 | */ |
26 | | | 26 | |
27 | | | 27 | |
28 | #ifndef _XEN_H | | 28 | #ifndef _XEN_H |
29 | #define _XEN_H | | 29 | #define _XEN_H |
30 | | | 30 | |
31 | #ifdef _KERNEL_OPT | | 31 | #ifdef _KERNEL_OPT |
32 | #include "opt_xen.h" | | 32 | #include "opt_xen.h" |
33 | #endif | | 33 | #endif |
34 | | | 34 | |
35 | | | 35 | |
36 | #ifndef _LOCORE | | 36 | #ifndef _LOCORE |
37 | | | 37 | |
38 | #include <machine/cpufunc.h> | | 38 | #include <machine/cpufunc.h> |
39 | | | 39 | |
40 | struct xen_netinfo { | | 40 | struct xen_netinfo { |
41 | uint32_t xi_ifno; | | 41 | uint32_t xi_ifno; |
42 | char *xi_root; | | 42 | char *xi_root; |
43 | uint32_t xi_ip[5]; | | 43 | uint32_t xi_ip[5]; |
44 | }; | | 44 | }; |
45 | | | 45 | |
46 | union xen_cmdline_parseinfo { | | 46 | union xen_cmdline_parseinfo { |
47 | char xcp_bootdev[16]; /* sizeof(dv_xname) */ | | 47 | char xcp_bootdev[16]; /* sizeof(dv_xname) */ |
48 | struct xen_netinfo xcp_netinfo; | | 48 | struct xen_netinfo xcp_netinfo; |
49 | char xcp_console[16]; | | 49 | char xcp_console[16]; |
50 | char xcp_pcidevs[64]; | | 50 | char xcp_pcidevs[64]; |
51 | }; | | 51 | }; |
52 | | | 52 | |
53 | #define XEN_PARSE_BOOTDEV 0 | | 53 | #define XEN_PARSE_BOOTDEV 0 |
54 | #define XEN_PARSE_NETINFO 1 | | 54 | #define XEN_PARSE_NETINFO 1 |
55 | #define XEN_PARSE_CONSOLE 2 | | 55 | #define XEN_PARSE_CONSOLE 2 |
56 | #define XEN_PARSE_BOOTFLAGS 3 | | 56 | #define XEN_PARSE_BOOTFLAGS 3 |
57 | #define XEN_PARSE_PCIBACK 4 | | 57 | #define XEN_PARSE_PCIBACK 4 |
58 | | | 58 | |
59 | void xen_parse_cmdline(int, union xen_cmdline_parseinfo *); | | 59 | void xen_parse_cmdline(int, union xen_cmdline_parseinfo *); |
60 | | | 60 | |
61 | void xenconscn_attach(void); | | 61 | void xenconscn_attach(void); |
62 | | | 62 | |
63 | void xenprivcmd_init(void); | | 63 | void xenprivcmd_init(void); |
64 | | | 64 | |
65 | void xbdback_init(void); | | 65 | void xbdback_init(void); |
66 | void xennetback_init(void); | | 66 | void xennetback_init(void); |
67 | void xen_shm_init(void); | | 67 | void xen_shm_init(void); |
68 | | | 68 | |
69 | void xenevt_event(int); | | 69 | void xenevt_event(int); |
70 | void xenevt_setipending(int, int); | | 70 | void xenevt_setipending(int, int); |
71 | void xenevt_notify(void); | | 71 | void xenevt_notify(void); |
72 | | | 72 | |
73 | void idle_block(void); | | 73 | void idle_block(void); |
74 | | | 74 | |
75 | /* xen_machdep.c */ | | 75 | /* xen_machdep.c */ |
76 | void sysctl_xen_sleepstate_setup(void); | | 76 | void sysctl_xen_sleepstate_setup(void); |
77 | | | 77 | |
78 | #if defined(XENDEBUG) || 1 /* XXX */ | | 78 | #if defined(XENDEBUG) || 1 /* XXX */ |
79 | #include <sys/stdarg.h> | | 79 | #include <sys/stdarg.h> |
80 | | | 80 | |
81 | void printk(const char *, ...); | | 81 | void printk(const char *, ...); |
82 | void vprintk(const char *, _BSD_VA_LIST_); | | 82 | void vprintk(const char *, va_list); |
83 | #endif | | 83 | #endif |
84 | | | 84 | |
85 | #endif | | 85 | #endif |
86 | | | 86 | |
87 | #endif /* _XEN_H */ | | 87 | #endif /* _XEN_H */ |
88 | | | 88 | |
89 | /****************************************************************************** | | 89 | /****************************************************************************** |
90 | * os.h | | 90 | * os.h |
91 | * | | 91 | * |
92 | * random collection of macros and definition | | 92 | * random collection of macros and definition |
93 | */ | | 93 | */ |
94 | | | 94 | |
95 | #ifndef _OS_H_ | | 95 | #ifndef _OS_H_ |
96 | #define _OS_H_ | | 96 | #define _OS_H_ |
97 | | | 97 | |
98 | /* | | 98 | /* |
99 | * These are the segment descriptors provided for us by the hypervisor. | | 99 | * These are the segment descriptors provided for us by the hypervisor. |
100 | * For now, these are hardwired -- guest OSes cannot update the GDT | | 100 | * For now, these are hardwired -- guest OSes cannot update the GDT |
101 | * or LDT. | | 101 | * or LDT. |
102 | * | | 102 | * |
103 | * It shouldn't be hard to support descriptor-table frobbing -- let me | | 103 | * It shouldn't be hard to support descriptor-table frobbing -- let me |
104 | * know if the BSD or XP ports require flexibility here. | | 104 | * know if the BSD or XP ports require flexibility here. |
105 | */ | | 105 | */ |
106 | | | 106 | |
107 | | | 107 | |
108 | /* | | 108 | /* |
109 | * these are also defined in xen-public/xen.h but can't be pulled in as | | 109 | * these are also defined in xen-public/xen.h but can't be pulled in as |
110 | * they are used in start of day assembly. Need to clean up the .h files | | 110 | * they are used in start of day assembly. Need to clean up the .h files |
111 | * a bit more... | | 111 | * a bit more... |
112 | */ | | 112 | */ |
113 | | | 113 | |
114 | #ifndef FLAT_RING1_CS | | 114 | #ifndef FLAT_RING1_CS |
115 | #define FLAT_RING1_CS 0xe019 /* GDT index 259 */ | | 115 | #define FLAT_RING1_CS 0xe019 /* GDT index 259 */ |
116 | #define FLAT_RING1_DS 0xe021 /* GDT index 260 */ | | 116 | #define FLAT_RING1_DS 0xe021 /* GDT index 260 */ |
117 | #define FLAT_RING1_SS 0xe021 /* GDT index 260 */ | | 117 | #define FLAT_RING1_SS 0xe021 /* GDT index 260 */ |
118 | #define FLAT_RING3_CS 0xe02b /* GDT index 261 */ | | 118 | #define FLAT_RING3_CS 0xe02b /* GDT index 261 */ |
119 | #define FLAT_RING3_DS 0xe033 /* GDT index 262 */ | | 119 | #define FLAT_RING3_DS 0xe033 /* GDT index 262 */ |
120 | #define FLAT_RING3_SS 0xe033 /* GDT index 262 */ | | 120 | #define FLAT_RING3_SS 0xe033 /* GDT index 262 */ |
121 | #endif | | 121 | #endif |
122 | | | 122 | |
123 | #define __KERNEL_CS FLAT_RING1_CS | | 123 | #define __KERNEL_CS FLAT_RING1_CS |
124 | #define __KERNEL_DS FLAT_RING1_DS | | 124 | #define __KERNEL_DS FLAT_RING1_DS |
125 | | | 125 | |
126 | /* Everything below this point is not included by assembler (.S) files. */ | | 126 | /* Everything below this point is not included by assembler (.S) files. */ |
127 | #ifndef _LOCORE | | 127 | #ifndef _LOCORE |
128 | | | 128 | |
129 | /* some function prototypes */ | | 129 | /* some function prototypes */ |
130 | void trap_init(void); | | 130 | void trap_init(void); |
131 | void xpq_flush_cache(void); | | 131 | void xpq_flush_cache(void); |
132 | | | 132 | |
133 | #define xendomain_is_dom0() (xen_start_info.flags & SIF_INITDOMAIN) | | 133 | #define xendomain_is_dom0() (xen_start_info.flags & SIF_INITDOMAIN) |
134 | #define xendomain_is_privileged() (xen_start_info.flags & SIF_PRIVILEGED) | | 134 | #define xendomain_is_privileged() (xen_start_info.flags & SIF_PRIVILEGED) |
135 | | | 135 | |
136 | /* | | 136 | /* |
137 | * STI/CLI equivalents. These basically set and clear the virtual | | 137 | * STI/CLI equivalents. These basically set and clear the virtual |
138 | * event_enable flag in the shared_info structure. Note that when | | 138 | * event_enable flag in the shared_info structure. Note that when |
139 | * the enable bit is set, there may be pending events to be handled. | | 139 | * the enable bit is set, there may be pending events to be handled. |
140 | * We may therefore call into do_hypervisor_callback() directly. | | 140 | * We may therefore call into do_hypervisor_callback() directly. |
141 | */ | | 141 | */ |
142 | | | 142 | |
143 | #define __save_flags(x) \ | | 143 | #define __save_flags(x) \ |
144 | do { \ | | 144 | do { \ |
145 | (x) = curcpu()->ci_vcpu->evtchn_upcall_mask; \ | | 145 | (x) = curcpu()->ci_vcpu->evtchn_upcall_mask; \ |
146 | } while (0) | | 146 | } while (0) |
147 | | | 147 | |
148 | #define __restore_flags(x) \ | | 148 | #define __restore_flags(x) \ |
149 | do { \ | | 149 | do { \ |
150 | volatile struct vcpu_info *_vci = curcpu()->ci_vcpu; \ | | 150 | volatile struct vcpu_info *_vci = curcpu()->ci_vcpu; \ |
151 | __insn_barrier(); \ | | 151 | __insn_barrier(); \ |
152 | if ((_vci->evtchn_upcall_mask = (x)) == 0) { \ | | 152 | if ((_vci->evtchn_upcall_mask = (x)) == 0) { \ |
153 | x86_lfence(); \ | | 153 | x86_lfence(); \ |
154 | if (__predict_false(_vci->evtchn_upcall_pending)) \ | | 154 | if (__predict_false(_vci->evtchn_upcall_pending)) \ |
155 | hypervisor_force_callback(); \ | | 155 | hypervisor_force_callback(); \ |
156 | } \ | | 156 | } \ |
157 | } while (0) | | 157 | } while (0) |
158 | | | 158 | |
159 | #define __cli() \ | | 159 | #define __cli() \ |
160 | do { \ | | 160 | do { \ |
161 | curcpu()->ci_vcpu->evtchn_upcall_mask = 1; \ | | 161 | curcpu()->ci_vcpu->evtchn_upcall_mask = 1; \ |
162 | x86_lfence(); \ | | 162 | x86_lfence(); \ |
163 | } while (0) | | 163 | } while (0) |
164 | | | 164 | |
165 | #define __sti() \ | | 165 | #define __sti() \ |
166 | do { \ | | 166 | do { \ |
167 | volatile struct vcpu_info *_vci = curcpu()->ci_vcpu; \ | | 167 | volatile struct vcpu_info *_vci = curcpu()->ci_vcpu; \ |
168 | __insn_barrier(); \ | | 168 | __insn_barrier(); \ |
169 | _vci->evtchn_upcall_mask = 0; \ | | 169 | _vci->evtchn_upcall_mask = 0; \ |
170 | x86_lfence(); /* unmask then check (avoid races) */ \ | | 170 | x86_lfence(); /* unmask then check (avoid races) */ \ |
171 | if (__predict_false(_vci->evtchn_upcall_pending)) \ | | 171 | if (__predict_false(_vci->evtchn_upcall_pending)) \ |
172 | hypervisor_force_callback(); \ | | 172 | hypervisor_force_callback(); \ |
173 | } while (0) | | 173 | } while (0) |
174 | | | 174 | |
175 | #define cli() __cli() | | 175 | #define cli() __cli() |
176 | #define sti() __sti() | | 176 | #define sti() __sti() |
177 | #define save_flags(x) __save_flags(x) | | 177 | #define save_flags(x) __save_flags(x) |
178 | #define restore_flags(x) __restore_flags(x) | | 178 | #define restore_flags(x) __restore_flags(x) |
179 | #define save_and_cli(x) do { \ | | 179 | #define save_and_cli(x) do { \ |
180 | __save_flags(x); \ | | 180 | __save_flags(x); \ |
181 | __cli(); \ | | 181 | __cli(); \ |
182 | } while (/* CONSTCOND */ 0) | | 182 | } while (/* CONSTCOND */ 0) |
183 | #define save_and_sti(x) __save_and_sti(x) | | 183 | #define save_and_sti(x) __save_and_sti(x) |
184 | | | 184 | |
185 | /* | | 185 | /* |
186 | * always assume we're on multiprocessor. We don't know how many CPU the | | 186 | * always assume we're on multiprocessor. We don't know how many CPU the |
187 | * underlying hardware has. | | 187 | * underlying hardware has. |
188 | */ | | 188 | */ |
189 | #define __LOCK_PREFIX "lock; " | | 189 | #define __LOCK_PREFIX "lock; " |
190 | | | 190 | |
191 | #define XATOMIC_T u_long | | 191 | #define XATOMIC_T u_long |
192 | #ifdef __x86_64__ | | 192 | #ifdef __x86_64__ |
193 | #define LONG_SHIFT 6 | | 193 | #define LONG_SHIFT 6 |
194 | #define LONG_MASK 63 | | 194 | #define LONG_MASK 63 |
195 | #else /* __x86_64__ */ | | 195 | #else /* __x86_64__ */ |
196 | #define LONG_SHIFT 5 | | 196 | #define LONG_SHIFT 5 |
197 | #define LONG_MASK 31 | | 197 | #define LONG_MASK 31 |
198 | #endif /* __x86_64__ */ | | 198 | #endif /* __x86_64__ */ |
199 | | | 199 | |
200 | #define xen_ffs __builtin_ffsl | | 200 | #define xen_ffs __builtin_ffsl |
201 | | | 201 | |
202 | static __inline XATOMIC_T | | 202 | static __inline XATOMIC_T |
203 | xen_atomic_xchg(volatile XATOMIC_T *ptr, unsigned long val) | | 203 | xen_atomic_xchg(volatile XATOMIC_T *ptr, unsigned long val) |
204 | { | | 204 | { |
205 | unsigned long result; | | 205 | unsigned long result; |
206 | | | 206 | |
207 | __asm volatile(__LOCK_PREFIX | | 207 | __asm volatile(__LOCK_PREFIX |
208 | #ifdef __x86_64__ | | 208 | #ifdef __x86_64__ |
209 | "xchgq %0,%1" | | 209 | "xchgq %0,%1" |
210 | #else | | 210 | #else |
211 | "xchgl %0,%1" | | 211 | "xchgl %0,%1" |
212 | #endif | | 212 | #endif |
213 | :"=r" (result) | | 213 | :"=r" (result) |
214 | :"m" (*ptr), "0" (val) | | 214 | :"m" (*ptr), "0" (val) |
215 | :"memory"); | | 215 | :"memory"); |
216 | | | 216 | |
217 | return result; | | 217 | return result; |
218 | } | | 218 | } |
219 | | | 219 | |
220 | static inline uint16_t | | 220 | static inline uint16_t |
221 | xen_atomic_cmpxchg16(volatile uint16_t *ptr, uint16_t val, uint16_t newval) | | 221 | xen_atomic_cmpxchg16(volatile uint16_t *ptr, uint16_t val, uint16_t newval) |
222 | { | | 222 | { |
223 | unsigned long result; | | 223 | unsigned long result; |
224 | | | 224 | |
225 | __asm volatile(__LOCK_PREFIX | | 225 | __asm volatile(__LOCK_PREFIX |
226 | "cmpxchgw %w1,%2" | | 226 | "cmpxchgw %w1,%2" |
227 | :"=a" (result) | | 227 | :"=a" (result) |
228 | :"q"(newval), "m" (*ptr), "0" (val) | | 228 | :"q"(newval), "m" (*ptr), "0" (val) |
229 | :"memory"); | | 229 | :"memory"); |
230 | | | 230 | |
231 | return result; | | 231 | return result; |
232 | } | | 232 | } |
233 | | | 233 | |
234 | static __inline void | | 234 | static __inline void |
235 | xen_atomic_setbits_l (volatile XATOMIC_T *ptr, unsigned long bits) { | | 235 | xen_atomic_setbits_l (volatile XATOMIC_T *ptr, unsigned long bits) { |
236 | #ifdef __x86_64__ | | 236 | #ifdef __x86_64__ |
237 | __asm volatile("lock ; orq %1,%0" : "=m" (*ptr) : "ir" (bits)); | | 237 | __asm volatile("lock ; orq %1,%0" : "=m" (*ptr) : "ir" (bits)); |
238 | #else | | 238 | #else |
239 | __asm volatile("lock ; orl %1,%0" : "=m" (*ptr) : "ir" (bits)); | | 239 | __asm volatile("lock ; orl %1,%0" : "=m" (*ptr) : "ir" (bits)); |
240 | #endif | | 240 | #endif |
241 | } | | 241 | } |
242 | | | 242 | |
243 | static __inline void | | 243 | static __inline void |
244 | xen_atomic_clearbits_l (volatile XATOMIC_T *ptr, unsigned long bits) { | | 244 | xen_atomic_clearbits_l (volatile XATOMIC_T *ptr, unsigned long bits) { |
245 | #ifdef __x86_64__ | | 245 | #ifdef __x86_64__ |
246 | __asm volatile("lock ; andq %1,%0" : "=m" (*ptr) : "ir" (~bits)); | | 246 | __asm volatile("lock ; andq %1,%0" : "=m" (*ptr) : "ir" (~bits)); |
247 | #else | | 247 | #else |
248 | __asm volatile("lock ; andl %1,%0" : "=m" (*ptr) : "ir" (~bits)); | | 248 | __asm volatile("lock ; andl %1,%0" : "=m" (*ptr) : "ir" (~bits)); |
249 | #endif | | 249 | #endif |
250 | } | | 250 | } |
251 | | | 251 | |
252 | static __inline XATOMIC_T | | 252 | static __inline XATOMIC_T |
253 | xen_atomic_test_and_clear_bit(volatile void *ptr, unsigned long bitno) | | 253 | xen_atomic_test_and_clear_bit(volatile void *ptr, unsigned long bitno) |
254 | { | | 254 | { |
255 | int result; | | 255 | int result; |
256 | | | 256 | |
257 | __asm volatile(__LOCK_PREFIX | | 257 | __asm volatile(__LOCK_PREFIX |
258 | #ifdef __x86_64__ | | 258 | #ifdef __x86_64__ |
259 | "btrq %2,%1 ;" | | 259 | "btrq %2,%1 ;" |
260 | "sbbq %0,%0" | | 260 | "sbbq %0,%0" |
261 | #else | | 261 | #else |
262 | "btrl %2,%1 ;" | | 262 | "btrl %2,%1 ;" |
263 | "sbbl %0,%0" | | 263 | "sbbl %0,%0" |
264 | #endif | | 264 | #endif |
265 | :"=r" (result), "=m" (*(volatile XATOMIC_T *)(ptr)) | | 265 | :"=r" (result), "=m" (*(volatile XATOMIC_T *)(ptr)) |
266 | :"Ir" (bitno) : "memory"); | | 266 | :"Ir" (bitno) : "memory"); |
267 | return result; | | 267 | return result; |
268 | } | | 268 | } |
269 | | | 269 | |
270 | static __inline XATOMIC_T | | 270 | static __inline XATOMIC_T |
271 | xen_atomic_test_and_set_bit(volatile void *ptr, unsigned long bitno) | | 271 | xen_atomic_test_and_set_bit(volatile void *ptr, unsigned long bitno) |
272 | { | | 272 | { |
273 | long result; | | 273 | long result; |
274 | | | 274 | |
275 | __asm volatile(__LOCK_PREFIX | | 275 | __asm volatile(__LOCK_PREFIX |
276 | #ifdef __x86_64__ | | 276 | #ifdef __x86_64__ |
277 | "btsq %2,%1 ;" | | 277 | "btsq %2,%1 ;" |
278 | "sbbq %0,%0" | | 278 | "sbbq %0,%0" |
279 | #else | | 279 | #else |
280 | "btsl %2,%1 ;" | | 280 | "btsl %2,%1 ;" |
281 | "sbbl %0,%0" | | 281 | "sbbl %0,%0" |
282 | #endif | | 282 | #endif |
283 | :"=r" (result), "=m" (*(volatile XATOMIC_T *)(ptr)) | | 283 | :"=r" (result), "=m" (*(volatile XATOMIC_T *)(ptr)) |
284 | :"Ir" (bitno) : "memory"); | | 284 | :"Ir" (bitno) : "memory"); |
285 | return result; | | 285 | return result; |
286 | } | | 286 | } |
287 | | | 287 | |
288 | static __inline int | | 288 | static __inline int |
289 | xen_constant_test_bit(const volatile void *ptr, unsigned long bitno) | | 289 | xen_constant_test_bit(const volatile void *ptr, unsigned long bitno) |
290 | { | | 290 | { |
291 | return ((1UL << (bitno & LONG_MASK)) & | | 291 | return ((1UL << (bitno & LONG_MASK)) & |
292 | (((const volatile XATOMIC_T *) ptr)[bitno >> LONG_SHIFT])) != 0; | | 292 | (((const volatile XATOMIC_T *) ptr)[bitno >> LONG_SHIFT])) != 0; |
293 | } | | 293 | } |
294 | | | 294 | |
295 | static __inline XATOMIC_T | | 295 | static __inline XATOMIC_T |
296 | xen_variable_test_bit(const volatile void *ptr, unsigned long bitno) | | 296 | xen_variable_test_bit(const volatile void *ptr, unsigned long bitno) |
297 | { | | 297 | { |
298 | long result; | | 298 | long result; |
299 | | | 299 | |
300 | __asm volatile( | | 300 | __asm volatile( |
301 | #ifdef __x86_64__ | | 301 | #ifdef __x86_64__ |
302 | "btq %2,%1 ;" | | 302 | "btq %2,%1 ;" |
303 | "sbbq %0,%0" | | 303 | "sbbq %0,%0" |
304 | #else | | 304 | #else |
305 | "btl %2,%1 ;" | | 305 | "btl %2,%1 ;" |
306 | "sbbl %0,%0" | | 306 | "sbbl %0,%0" |
307 | #endif | | 307 | #endif |
308 | :"=r" (result) | | 308 | :"=r" (result) |
309 | :"m" (*(const volatile XATOMIC_T *)(ptr)), "Ir" (bitno)); | | 309 | :"m" (*(const volatile XATOMIC_T *)(ptr)), "Ir" (bitno)); |
310 | return result; | | 310 | return result; |
311 | } | | 311 | } |
312 | | | 312 | |
313 | #define xen_atomic_test_bit(ptr, bitno) \ | | 313 | #define xen_atomic_test_bit(ptr, bitno) \ |
314 | (__builtin_constant_p(bitno) ? \ | | 314 | (__builtin_constant_p(bitno) ? \ |
315 | xen_constant_test_bit((ptr),(bitno)) : \ | | 315 | xen_constant_test_bit((ptr),(bitno)) : \ |
316 | xen_variable_test_bit((ptr),(bitno))) | | 316 | xen_variable_test_bit((ptr),(bitno))) |
317 | | | 317 | |
318 | static __inline void | | 318 | static __inline void |
319 | xen_atomic_set_bit(volatile void *ptr, unsigned long bitno) | | 319 | xen_atomic_set_bit(volatile void *ptr, unsigned long bitno) |
320 | { | | 320 | { |
321 | __asm volatile(__LOCK_PREFIX | | 321 | __asm volatile(__LOCK_PREFIX |
322 | #ifdef __x86_64__ | | 322 | #ifdef __x86_64__ |
323 | "btsq %1,%0" | | 323 | "btsq %1,%0" |
324 | #else | | 324 | #else |
325 | "btsl %1,%0" | | 325 | "btsl %1,%0" |
326 | #endif | | 326 | #endif |
327 | :"=m" (*(volatile XATOMIC_T *)(ptr)) | | 327 | :"=m" (*(volatile XATOMIC_T *)(ptr)) |
328 | :"Ir" (bitno)); | | 328 | :"Ir" (bitno)); |
329 | } | | 329 | } |
330 | | | 330 | |
331 | static __inline void | | 331 | static __inline void |
332 | xen_atomic_clear_bit(volatile void *ptr, unsigned long bitno) | | 332 | xen_atomic_clear_bit(volatile void *ptr, unsigned long bitno) |
333 | { | | 333 | { |
334 | __asm volatile(__LOCK_PREFIX | | 334 | __asm volatile(__LOCK_PREFIX |
335 | #ifdef __x86_64__ | | 335 | #ifdef __x86_64__ |
336 | "btrq %1,%0" | | 336 | "btrq %1,%0" |
337 | #else | | 337 | #else |
338 | "btrl %1,%0" | | 338 | "btrl %1,%0" |
339 | #endif | | 339 | #endif |
340 | :"=m" (*(volatile XATOMIC_T *)(ptr)) | | 340 | :"=m" (*(volatile XATOMIC_T *)(ptr)) |
341 | :"Ir" (bitno)); | | 341 | :"Ir" (bitno)); |
342 | } | | 342 | } |
343 | | | 343 | |
344 | #undef XATOMIC_T | | 344 | #undef XATOMIC_T |
345 | | | 345 | |
346 | void wbinvd(void); | | 346 | void wbinvd(void); |
347 | | | 347 | |
348 | #endif /* !__ASSEMBLY__ */ | | 348 | #endif /* !__ASSEMBLY__ */ |
349 | | | 349 | |
350 | #endif /* _OS_H_ */ | | 350 | #endif /* _OS_H_ */ |