| @@ -1,221 +1,221 @@ | | | @@ -1,221 +1,221 @@ |
1 | /* $NetBSD: lock.h,v 1.38 2021/04/27 06:03:09 skrll Exp $ */ | | 1 | /* $NetBSD: lock.h,v 1.39 2021/05/30 02:28:59 joerg Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Jason R. Thorpe. | | 8 | * by Jason R. Thorpe. |
9 | * | | 9 | * |
10 | * Redistribution and use in source and binary forms, with or without | | 10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions | | 11 | * modification, are permitted provided that the following conditions |
12 | * are met: | | 12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright | | 13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. | | 14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright | | 15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the | | 16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. | | 17 | * documentation and/or other materials provided with the distribution. |
18 | * | | 18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. | | 29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ | | 30 | */ |
31 | | | 31 | |
32 | /* | | 32 | /* |
33 | * Machine-dependent spin lock operations. | | 33 | * Machine-dependent spin lock operations. |
34 | * | | 34 | * |
35 | * NOTE: The SWP insn used here is available only on ARM architecture | | 35 | * NOTE: The SWP insn used here is available only on ARM architecture |
36 | * version 3 and later (as well as 2a). What we are going to do is | | 36 | * version 3 and later (as well as 2a). What we are going to do is |
37 | * expect that the kernel will trap and emulate the insn. That will | | 37 | * expect that the kernel will trap and emulate the insn. That will |
38 | * be slow, but give us the atomicity that we need. | | 38 | * be slow, but give us the atomicity that we need. |
39 | */ | | 39 | */ |
40 | | | 40 | |
41 | #ifndef _ARM_LOCK_H_ | | 41 | #ifndef _ARM_LOCK_H_ |
42 | #define _ARM_LOCK_H_ | | 42 | #define _ARM_LOCK_H_ |
43 | | | 43 | |
44 | static __inline int | | 44 | static __inline int |
45 | __SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t *__ptr) | | 45 | __SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t *__ptr) |
46 | { | | 46 | { |
47 | return *__ptr == __SIMPLELOCK_LOCKED; | | 47 | return *__ptr == __SIMPLELOCK_LOCKED; |
48 | } | | 48 | } |
49 | | | 49 | |
50 | static __inline int | | 50 | static __inline int |
51 | __SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t *__ptr) | | 51 | __SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t *__ptr) |
52 | { | | 52 | { |
53 | return *__ptr == __SIMPLELOCK_UNLOCKED; | | 53 | return *__ptr == __SIMPLELOCK_UNLOCKED; |
54 | } | | 54 | } |
55 | | | 55 | |
56 | static __inline void | | 56 | static __inline void |
57 | __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr) | | 57 | __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr) |
58 | { | | 58 | { |
59 | *__ptr = __SIMPLELOCK_UNLOCKED; | | 59 | *__ptr = __SIMPLELOCK_UNLOCKED; |
60 | } | | 60 | } |
61 | | | 61 | |
62 | static __inline void | | 62 | static __inline void |
63 | __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr) | | 63 | __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr) |
64 | { | | 64 | { |
65 | *__ptr = __SIMPLELOCK_LOCKED; | | 65 | *__ptr = __SIMPLELOCK_LOCKED; |
66 | } | | 66 | } |
67 | | | 67 | |
68 | #if defined(_ARM_ARCH_6) | | 68 | #if defined(_ARM_ARCH_6) |
69 | static __inline unsigned int | | 69 | static __inline unsigned int |
70 | __arm_load_exclusive(__cpu_simple_lock_t *__alp) | | 70 | __arm_load_exclusive(__cpu_simple_lock_t *__alp) |
71 | { | | 71 | { |
72 | unsigned int __rv; | | 72 | unsigned int __rv; |
73 | if (/*CONSTCOND*/sizeof(*__alp) == 1) { | | 73 | if (/*CONSTCOND*/sizeof(*__alp) == 1) { |
74 | __asm __volatile("ldrexb\t%0,[%1]" : "=r"(__rv) : "r"(__alp)); | | 74 | __asm __volatile("ldrexb\t%0,[%1]" : "=r"(__rv) : "r"(__alp)); |
75 | } else { | | 75 | } else { |
76 | __asm __volatile("ldrex\t%0,[%1]" : "=r"(__rv) : "r"(__alp)); | | 76 | __asm __volatile("ldrex\t%0,[%1]" : "=r"(__rv) : "r"(__alp)); |
77 | } | | 77 | } |
78 | return __rv; | | 78 | return __rv; |
79 | } | | 79 | } |
80 | | | 80 | |
81 | /* returns 0 on success and 1 on failure */ | | 81 | /* returns 0 on success and 1 on failure */ |
82 | static __inline unsigned int | | 82 | static __inline unsigned int |
83 | __arm_store_exclusive(__cpu_simple_lock_t *__alp, unsigned int __val) | | 83 | __arm_store_exclusive(__cpu_simple_lock_t *__alp, unsigned int __val) |
84 | { | | 84 | { |
85 | unsigned int __rv; | | 85 | unsigned int __rv; |
86 | if (/*CONSTCOND*/sizeof(*__alp) == 1) { | | 86 | if (/*CONSTCOND*/sizeof(*__alp) == 1) { |
87 | __asm __volatile("strexb\t%0,%1,[%2]" | | 87 | __asm __volatile("strexb\t%0,%1,[%2]" |
88 | : "=&r"(__rv) : "r"(__val), "r"(__alp) : "cc", "memory"); | | 88 | : "=&r"(__rv) : "r"(__val), "r"(__alp) : "cc", "memory"); |
89 | } else { | | 89 | } else { |
90 | __asm __volatile("strex\t%0,%1,[%2]" | | 90 | __asm __volatile("strex\t%0,%1,[%2]" |
91 | : "=&r"(__rv) : "r"(__val), "r"(__alp) : "cc", "memory"); | | 91 | : "=&r"(__rv) : "r"(__val), "r"(__alp) : "cc", "memory"); |
92 | } | | 92 | } |
93 | return __rv; | | 93 | return __rv; |
94 | } | | 94 | } |
95 | #elif defined(_KERNEL) | | 95 | #elif defined(_KERNEL) |
96 | static __inline unsigned char | | 96 | static __inline unsigned char |
97 | __swp(unsigned char __val, __cpu_simple_lock_t *__ptr) | | 97 | __swp(unsigned char __val, __cpu_simple_lock_t *__ptr) |
98 | { | | 98 | { |
99 | uint32_t __val32; | | 99 | uint32_t __val32; |
100 | __asm volatile("swpb %0, %1, [%2]" | | 100 | __asm volatile("swpb %0, %1, [%2]" |
101 | : "=&r" (__val32) : "r" (__val), "r" (__ptr) : "memory"); | | 101 | : "=&r" (__val32) : "r" (__val), "r" (__ptr) : "memory"); |
102 | return __val32; | | 102 | return __val32; |
103 | } | | 103 | } |
104 | #else | | 104 | #else |
105 | /* | | 105 | /* |
106 | * On MP Cortex, SWP no longer guarantees atomic results. Thus we pad | | 106 | * On MP Cortex, SWP no longer guarantees atomic results. Thus we pad |
107 | * out SWP so that when the cpu generates an undefined exception we can replace | | 107 | * out SWP so that when the cpu generates an undefined exception we can replace |
108 | * the SWP/MOV instructions with the right LDREX/STREX instructions. | | 108 | * the SWP/MOV instructions with the right LDREX/STREX instructions. |
109 | * | | 109 | * |
110 | * This is why we force the SWP into the template needed for LDREX/STREX | | 110 | * This is why we force the SWP into the template needed for LDREX/STREX |
111 | * including the extra instructions and extra register for testing the result. | | 111 | * including the extra instructions and extra register for testing the result. |
112 | */ | | 112 | */ |
113 | static __inline int | | 113 | static __inline int |
114 | __swp(int __val, __cpu_simple_lock_t *__ptr) | | 114 | __swp(int __val, __cpu_simple_lock_t *__ptr) |
115 | { | | 115 | { |
116 | int __tmp, __rv; | | 116 | int __tmp, __rv; |
117 | __asm volatile( | | 117 | __asm volatile( |
118 | #if 1 | | 118 | #if 1 |
119 | "1:\t" "swp %[__rv], %[__val], [%[__ptr]]" | | 119 | "1:\t" "swp %[__rv], %[__val], [%[__ptr]]" |
120 | "\n\t" "b 2f" | | 120 | "\n\t" "b 2f" |
121 | #else | | 121 | #else |
122 | "1:\t" "ldrex %[__rv],[%[__ptr]]" | | 122 | "1:\t" "ldrex %[__rv],[%[__ptr]]" |
123 | "\n\t" "strex %[__tmp],%[__val],[%[__ptr]]" | | 123 | "\n\t" "strex %[__tmp],%[__val],[%[__ptr]]" |
124 | #endif | | 124 | #endif |
125 | "\n\t" "cmp %[__tmp],#0" | | 125 | "\n\t" "cmp %[__tmp],#0" |
126 | "\n\t" "bne 1b" | | 126 | "\n\t" "bne 1b" |
127 | "\n" "2:" | | 127 | "\n" "2:" |
128 | : [__rv] "=&r" (__rv), [__tmp] "=&r" (__tmp) | | 128 | : [__rv] "=&r" (__rv), [__tmp] "=&r" (__tmp) |
129 | : [__val] "r" (__val), [__ptr] "r" (__ptr) : "cc", "memory"); | | 129 | : [__val] "r" (__val), [__ptr] "r" (__ptr) : "cc", "memory"); |
130 | return __rv; | | 130 | return __rv; |
131 | } | | 131 | } |
132 | #endif /* !_ARM_ARCH_6 */ | | 132 | #endif /* !_ARM_ARCH_6 */ |
133 | | | 133 | |
134 | /* load/dmb implies load-acquire */ | | 134 | /* load/dmb implies load-acquire */ |
135 | static __inline void | | 135 | static __inline void |
136 | __arm_load_dmb(void) | | 136 | __arm_load_dmb(void) |
137 | { | | 137 | { |
138 | #if defined(_ARM_ARCH_7) | | 138 | #if defined(_ARM_ARCH_7) |
139 | __asm __volatile("dmb ish" ::: "memory"); | | 139 | __asm __volatile("dmb ish" ::: "memory"); |
140 | #elif defined(_ARM_ARCH_6) | | 140 | #elif defined(_ARM_ARCH_6) |
141 | __asm __volatile("mcr\tp15,0,%0,c7,c10,5" :: "r"(0) : "memory"); | | 141 | __asm __volatile("mcr\tp15,0,%0,c7,c10,5" :: "r"(0) : "memory"); |
142 | #endif | | 142 | #endif |
143 | } | | 143 | } |
144 | | | 144 | |
145 | /* dmb/store implies store-release */ | | 145 | /* dmb/store implies store-release */ |
146 | static __inline void | | 146 | static __inline void |
147 | __arm_dmb_store(void) | | 147 | __arm_dmb_store(void) |
148 | { | | 148 | { |
149 | #if defined(_ARM_ARCH_7) | | 149 | #if defined(_ARM_ARCH_7) |
150 | __asm __volatile("dmb ish" ::: "memory"); | | 150 | __asm __volatile("dmb ish" ::: "memory"); |
151 | #elif defined(_ARM_ARCH_6) | | 151 | #elif defined(_ARM_ARCH_6) |
152 | __asm __volatile("mcr\tp15,0,%0,c7,c10,5" :: "r"(0) : "memory"); | | 152 | __asm __volatile("mcr\tp15,0,%0,c7,c10,5" :: "r"(0) : "memory"); |
153 | #endif | | 153 | #endif |
154 | } | | 154 | } |
155 | | | 155 | |
156 | | | 156 | |
157 | static __inline void __unused | | 157 | static __inline void __unused |
158 | __cpu_simple_lock_init(__cpu_simple_lock_t *__alp) | | 158 | __cpu_simple_lock_init(__cpu_simple_lock_t *__alp) |
159 | { | | 159 | { |
160 | | | 160 | |
161 | *__alp = __SIMPLELOCK_UNLOCKED; | | 161 | *__alp = __SIMPLELOCK_UNLOCKED; |
162 | } | | 162 | } |
163 | | | 163 | |
164 | #if !defined(__thumb__) || defined(_ARM_ARCH_T2) | | 164 | #if !defined(__thumb__) || defined(_ARM_ARCH_T2) |
165 | static __inline void __unused | | 165 | static __inline void __unused |
166 | __cpu_simple_lock(__cpu_simple_lock_t *__alp) | | 166 | __cpu_simple_lock(__cpu_simple_lock_t *__alp) |
167 | { | | 167 | { |
168 | #if defined(_ARM_ARCH_6) | | 168 | #if defined(_ARM_ARCH_6) |
169 | do { | | 169 | do { |
170 | /* spin */ | | 170 | /* spin */ |
171 | } while (__arm_load_exclusive(__alp) != __SIMPLELOCK_UNLOCKED | | 171 | } while (__arm_load_exclusive(__alp) != __SIMPLELOCK_UNLOCKED |
172 | || __arm_store_exclusive(__alp, __SIMPLELOCK_LOCKED)); | | 172 | || __arm_store_exclusive(__alp, __SIMPLELOCK_LOCKED)); |
173 | __arm_load_dmb(); | | 173 | __arm_load_dmb(); |
174 | #else | | 174 | #else |
175 | while (__swp(__SIMPLELOCK_LOCKED, __alp) != __SIMPLELOCK_UNLOCKED) | | 175 | while (__swp(__SIMPLELOCK_LOCKED, __alp) != __SIMPLELOCK_UNLOCKED) |
176 | continue; | | 176 | continue; |
177 | #endif | | 177 | #endif |
178 | } | | 178 | } |
179 | #else | | 179 | #else |
180 | void __cpu_simple_lock(__cpu_simple_lock_t *); | | 180 | void __cpu_simple_lock(__cpu_simple_lock_t *); |
181 | #endif | | 181 | #endif |
182 | | | 182 | |
183 | #if !defined(__thumb__) || defined(_ARM_ARCH_T2) | | 183 | #if !defined(__thumb__) || defined(_ARM_ARCH_T2) |
184 | static __inline int __unused | | 184 | static __inline int __unused |
185 | __cpu_simple_lock_try(__cpu_simple_lock_t *__alp) | | 185 | __cpu_simple_lock_try(__cpu_simple_lock_t *__alp) |
186 | { | | 186 | { |
187 | #if defined(_ARM_ARCH_6) | | 187 | #if defined(_ARM_ARCH_6) |
188 | do { | | 188 | do { |
189 | if (__arm_load_exclusive(__alp) != __SIMPLELOCK_UNLOCKED) { | | 189 | if (__arm_load_exclusive(__alp) != __SIMPLELOCK_UNLOCKED) { |
190 | return 0; | | 190 | return 0; |
191 | } | | 191 | } |
192 | } while (__arm_store_exclusive(__alp, __SIMPLELOCK_LOCKED)); | | 192 | } while (__arm_store_exclusive(__alp, __SIMPLELOCK_LOCKED)); |
193 | __arm_load_dmb(); | | 193 | __arm_load_dmb(); |
194 | return 1; | | 194 | return 1; |
195 | #else | | 195 | #else |
196 | return (__swp(__SIMPLELOCK_LOCKED, __alp) == __SIMPLELOCK_UNLOCKED); | | 196 | return (__swp(__SIMPLELOCK_LOCKED, __alp) == __SIMPLELOCK_UNLOCKED); |
197 | #endif | | 197 | #endif |
198 | } | | 198 | } |
199 | #else | | 199 | #else |
200 | int __cpu_simple_lock_try(__cpu_simple_lock_t *); | | 200 | int __cpu_simple_lock_try(__cpu_simple_lock_t *); |
201 | #endif | | 201 | #endif |
202 | | | 202 | |
203 | static __inline void __unused | | 203 | static __inline void __unused |
204 | __cpu_simple_unlock(__cpu_simple_lock_t *__alp) | | 204 | __cpu_simple_unlock(__cpu_simple_lock_t *__alp) |
205 | { | | 205 | { |
206 | | | 206 | |
207 | #if defined(_ARM_ARCH_8) | | 207 | #if defined(_ARM_ARCH_8) && defined(__LP64__) |
208 | if (sizeof(*__alp) == 1) { | | 208 | if (sizeof(*__alp) == 1) { |
209 | __asm __volatile("stlrb\t%w0, [%1]" | | 209 | __asm __volatile("stlrb\t%w0, [%1]" |
210 | :: "r"(__SIMPLELOCK_UNLOCKED), "r"(__alp) : "memory"); | | 210 | :: "r"(__SIMPLELOCK_UNLOCKED), "r"(__alp) : "memory"); |
211 | } else { | | 211 | } else { |
212 | __asm __volatile("stlr\t%0, [%1]" | | 212 | __asm __volatile("stlr\t%0, [%1]" |
213 | :: "r"(__SIMPLELOCK_UNLOCKED), "r"(__alp) : "memory"); | | 213 | :: "r"(__SIMPLELOCK_UNLOCKED), "r"(__alp) : "memory"); |
214 | } | | 214 | } |
215 | #else | | 215 | #else |
216 | __arm_dmb_store(); | | 216 | __arm_dmb_store(); |
217 | *__alp = __SIMPLELOCK_UNLOCKED; | | 217 | *__alp = __SIMPLELOCK_UNLOCKED; |
218 | #endif | | 218 | #endif |
219 | } | | 219 | } |
220 | | | 220 | |
221 | #endif /* _ARM_LOCK_H_ */ | | 221 | #endif /* _ARM_LOCK_H_ */ |