| @@ -1,970 +1,970 @@ | | | @@ -1,970 +1,970 @@ |
1 | /* $NetBSD: kern_mutex.c,v 1.79 2019/05/09 05:00:31 ozaki-r Exp $ */ | | 1 | /* $NetBSD: kern_mutex.c,v 1.79.2.1 2020/03/08 11:21:29 martin Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Jason R. Thorpe and Andrew Doran. | | 8 | * by Jason R. Thorpe and Andrew Doran. |
9 | * | | 9 | * |
10 | * Redistribution and use in source and binary forms, with or without | | 10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions | | 11 | * modification, are permitted provided that the following conditions |
12 | * are met: | | 12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright | | 13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. | | 14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright | | 15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the | | 16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. | | 17 | * documentation and/or other materials provided with the distribution. |
18 | * | | 18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. | | 29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ | | 30 | */ |
31 | | | 31 | |
32 | /* | | 32 | /* |
33 | * Kernel mutex implementation, modeled after those found in Solaris, | | 33 | * Kernel mutex implementation, modeled after those found in Solaris, |
34 | * a description of which can be found in: | | 34 | * a description of which can be found in: |
35 | * | | 35 | * |
36 | * Solaris Internals: Core Kernel Architecture, Jim Mauro and | | 36 | * Solaris Internals: Core Kernel Architecture, Jim Mauro and |
37 | * Richard McDougall. | | 37 | * Richard McDougall. |
38 | */ | | 38 | */ |
39 | | | 39 | |
40 | #define __MUTEX_PRIVATE | | 40 | #define __MUTEX_PRIVATE |
41 | | | 41 | |
42 | #include <sys/cdefs.h> | | 42 | #include <sys/cdefs.h> |
43 | __KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.79 2019/05/09 05:00:31 ozaki-r Exp $"); | | 43 | __KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.79.2.1 2020/03/08 11:21:29 martin Exp $"); |
44 | | | 44 | |
45 | #include <sys/param.h> | | 45 | #include <sys/param.h> |
46 | #include <sys/atomic.h> | | 46 | #include <sys/atomic.h> |
47 | #include <sys/proc.h> | | 47 | #include <sys/proc.h> |
48 | #include <sys/mutex.h> | | 48 | #include <sys/mutex.h> |
49 | #include <sys/sched.h> | | 49 | #include <sys/sched.h> |
50 | #include <sys/sleepq.h> | | 50 | #include <sys/sleepq.h> |
51 | #include <sys/systm.h> | | 51 | #include <sys/systm.h> |
52 | #include <sys/lockdebug.h> | | 52 | #include <sys/lockdebug.h> |
53 | #include <sys/kernel.h> | | 53 | #include <sys/kernel.h> |
54 | #include <sys/intr.h> | | 54 | #include <sys/intr.h> |
55 | #include <sys/lock.h> | | 55 | #include <sys/lock.h> |
56 | #include <sys/types.h> | | 56 | #include <sys/types.h> |
57 | #include <sys/cpu.h> | | 57 | #include <sys/cpu.h> |
58 | #include <sys/pserialize.h> | | 58 | #include <sys/pserialize.h> |
59 | | | 59 | |
60 | #include <dev/lockstat.h> | | 60 | #include <dev/lockstat.h> |
61 | | | 61 | |
62 | #include <machine/lock.h> | | 62 | #include <machine/lock.h> |
63 | | | 63 | |
64 | #define MUTEX_PANIC_SKIP_SPIN 1 | | 64 | #define MUTEX_PANIC_SKIP_SPIN 1 |
65 | #define MUTEX_PANIC_SKIP_ADAPTIVE 1 | | 65 | #define MUTEX_PANIC_SKIP_ADAPTIVE 1 |
66 | | | 66 | |
67 | /* | | 67 | /* |
68 | * When not running a debug kernel, spin mutexes are not much | | 68 | * When not running a debug kernel, spin mutexes are not much |
69 | * more than an splraiseipl() and splx() pair. | | 69 | * more than an splraiseipl() and splx() pair. |
70 | */ | | 70 | */ |
71 | | | 71 | |
72 | #if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR) || defined(LOCKDEBUG) | | 72 | #if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR) || defined(LOCKDEBUG) |
73 | #define FULL | | 73 | #define FULL |
74 | #endif | | 74 | #endif |
75 | | | 75 | |
76 | /* | | 76 | /* |
77 | * Debugging support. | | 77 | * Debugging support. |
78 | */ | | 78 | */ |
79 | | | 79 | |
80 | #define MUTEX_WANTLOCK(mtx) \ | | 80 | #define MUTEX_WANTLOCK(mtx) \ |
81 | LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx), \ | | 81 | LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx), \ |
82 | (uintptr_t)__builtin_return_address(0), 0) | | 82 | (uintptr_t)__builtin_return_address(0), 0) |
83 | #define MUTEX_TESTLOCK(mtx) \ | | 83 | #define MUTEX_TESTLOCK(mtx) \ |
84 | LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx), \ | | 84 | LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx), \ |
85 | (uintptr_t)__builtin_return_address(0), -1) | | 85 | (uintptr_t)__builtin_return_address(0), -1) |
86 | #define MUTEX_LOCKED(mtx) \ | | 86 | #define MUTEX_LOCKED(mtx) \ |
87 | LOCKDEBUG_LOCKED(MUTEX_DEBUG_P(mtx), (mtx), NULL, \ | | 87 | LOCKDEBUG_LOCKED(MUTEX_DEBUG_P(mtx), (mtx), NULL, \ |
88 | (uintptr_t)__builtin_return_address(0), 0) | | 88 | (uintptr_t)__builtin_return_address(0), 0) |
89 | #define MUTEX_UNLOCKED(mtx) \ | | 89 | #define MUTEX_UNLOCKED(mtx) \ |
90 | LOCKDEBUG_UNLOCKED(MUTEX_DEBUG_P(mtx), (mtx), \ | | 90 | LOCKDEBUG_UNLOCKED(MUTEX_DEBUG_P(mtx), (mtx), \ |
91 | (uintptr_t)__builtin_return_address(0), 0) | | 91 | (uintptr_t)__builtin_return_address(0), 0) |
92 | #define MUTEX_ABORT(mtx, msg) \ | | 92 | #define MUTEX_ABORT(mtx, msg) \ |
93 | mutex_abort(__func__, __LINE__, mtx, msg) | | 93 | mutex_abort(__func__, __LINE__, mtx, msg) |
94 | | | 94 | |
95 | #if defined(LOCKDEBUG) | | 95 | #if defined(LOCKDEBUG) |
96 | | | 96 | |
97 | #define MUTEX_DASSERT(mtx, cond) \ | | 97 | #define MUTEX_DASSERT(mtx, cond) \ |
98 | do { \ | | 98 | do { \ |
99 | if (__predict_false(!(cond))) \ | | 99 | if (__predict_false(!(cond))) \ |
100 | MUTEX_ABORT(mtx, "assertion failed: " #cond); \ | | 100 | MUTEX_ABORT(mtx, "assertion failed: " #cond); \ |
101 | } while (/* CONSTCOND */ 0) | | 101 | } while (/* CONSTCOND */ 0) |
102 | | | 102 | |
103 | #else /* LOCKDEBUG */ | | 103 | #else /* LOCKDEBUG */ |
104 | | | 104 | |
105 | #define MUTEX_DASSERT(mtx, cond) /* nothing */ | | 105 | #define MUTEX_DASSERT(mtx, cond) /* nothing */ |
106 | | | 106 | |
107 | #endif /* LOCKDEBUG */ | | 107 | #endif /* LOCKDEBUG */ |
108 | | | 108 | |
109 | #if defined(DIAGNOSTIC) | | 109 | #if defined(DIAGNOSTIC) |
110 | | | 110 | |
111 | #define MUTEX_ASSERT(mtx, cond) \ | | 111 | #define MUTEX_ASSERT(mtx, cond) \ |
112 | do { \ | | 112 | do { \ |
113 | if (__predict_false(!(cond))) \ | | 113 | if (__predict_false(!(cond))) \ |
114 | MUTEX_ABORT(mtx, "assertion failed: " #cond); \ | | 114 | MUTEX_ABORT(mtx, "assertion failed: " #cond); \ |
115 | } while (/* CONSTCOND */ 0) | | 115 | } while (/* CONSTCOND */ 0) |
116 | | | 116 | |
117 | #else /* DIAGNOSTIC */ | | 117 | #else /* DIAGNOSTIC */ |
118 | | | 118 | |
119 | #define MUTEX_ASSERT(mtx, cond) /* nothing */ | | 119 | #define MUTEX_ASSERT(mtx, cond) /* nothing */ |
120 | | | 120 | |
121 | #endif /* DIAGNOSTIC */ | | 121 | #endif /* DIAGNOSTIC */ |
122 | | | 122 | |
123 | /* | | 123 | /* |
124 | * Some architectures can't use __cpu_simple_lock as is so allow a way | | 124 | * Some architectures can't use __cpu_simple_lock as is so allow a way |
125 | * for them to use an alternate definition. | | 125 | * for them to use an alternate definition. |
126 | */ | | 126 | */ |
127 | #ifndef MUTEX_SPINBIT_LOCK_INIT | | 127 | #ifndef MUTEX_SPINBIT_LOCK_INIT |
128 | #define MUTEX_SPINBIT_LOCK_INIT(mtx) __cpu_simple_lock_init(&(mtx)->mtx_lock) | | 128 | #define MUTEX_SPINBIT_LOCK_INIT(mtx) __cpu_simple_lock_init(&(mtx)->mtx_lock) |
129 | #endif | | 129 | #endif |
130 | #ifndef MUTEX_SPINBIT_LOCKED_P | | 130 | #ifndef MUTEX_SPINBIT_LOCKED_P |
131 | #define MUTEX_SPINBIT_LOCKED_P(mtx) __SIMPLELOCK_LOCKED_P(&(mtx)->mtx_lock) | | 131 | #define MUTEX_SPINBIT_LOCKED_P(mtx) __SIMPLELOCK_LOCKED_P(&(mtx)->mtx_lock) |
132 | #endif | | 132 | #endif |
133 | #ifndef MUTEX_SPINBIT_LOCK_TRY | | 133 | #ifndef MUTEX_SPINBIT_LOCK_TRY |
134 | #define MUTEX_SPINBIT_LOCK_TRY(mtx) __cpu_simple_lock_try(&(mtx)->mtx_lock) | | 134 | #define MUTEX_SPINBIT_LOCK_TRY(mtx) __cpu_simple_lock_try(&(mtx)->mtx_lock) |
135 | #endif | | 135 | #endif |
136 | #ifndef MUTEX_SPINBIT_LOCK_UNLOCK | | 136 | #ifndef MUTEX_SPINBIT_LOCK_UNLOCK |
137 | #define MUTEX_SPINBIT_LOCK_UNLOCK(mtx) __cpu_simple_unlock(&(mtx)->mtx_lock) | | 137 | #define MUTEX_SPINBIT_LOCK_UNLOCK(mtx) __cpu_simple_unlock(&(mtx)->mtx_lock) |
138 | #endif | | 138 | #endif |
139 | | | 139 | |
140 | #ifndef MUTEX_INITIALIZE_SPIN_IPL | | 140 | #ifndef MUTEX_INITIALIZE_SPIN_IPL |
141 | #define MUTEX_INITIALIZE_SPIN_IPL(mtx, ipl) \ | | 141 | #define MUTEX_INITIALIZE_SPIN_IPL(mtx, ipl) \ |
142 | ((mtx)->mtx_ipl = makeiplcookie((ipl))) | | 142 | ((mtx)->mtx_ipl = makeiplcookie((ipl))) |
143 | #endif | | 143 | #endif |
144 | | | 144 | |
145 | /* | | 145 | /* |
146 | * Spin mutex SPL save / restore. | | 146 | * Spin mutex SPL save / restore. |
147 | */ | | 147 | */ |
148 | | | 148 | |
149 | #define MUTEX_SPIN_SPLRAISE(mtx) \ | | 149 | #define MUTEX_SPIN_SPLRAISE(mtx) \ |
150 | do { \ | | 150 | do { \ |
151 | struct cpu_info *x__ci; \ | | 151 | struct cpu_info *x__ci; \ |
152 | int x__cnt, s; \ | | 152 | int x__cnt, s; \ |
153 | s = splraiseipl(MUTEX_SPIN_IPL(mtx)); \ | | 153 | s = splraiseipl(MUTEX_SPIN_IPL(mtx)); \ |
154 | x__ci = curcpu(); \ | | 154 | x__ci = curcpu(); \ |
155 | x__cnt = x__ci->ci_mtx_count--; \ | | 155 | x__cnt = x__ci->ci_mtx_count--; \ |
156 | __insn_barrier(); \ | | 156 | __insn_barrier(); \ |
157 | if (x__cnt == 0) \ | | 157 | if (x__cnt == 0) \ |
158 | x__ci->ci_mtx_oldspl = (s); \ | | 158 | x__ci->ci_mtx_oldspl = (s); \ |
159 | } while (/* CONSTCOND */ 0) | | 159 | } while (/* CONSTCOND */ 0) |
160 | | | 160 | |
161 | #define MUTEX_SPIN_SPLRESTORE(mtx) \ | | 161 | #define MUTEX_SPIN_SPLRESTORE(mtx) \ |
162 | do { \ | | 162 | do { \ |
163 | struct cpu_info *x__ci = curcpu(); \ | | 163 | struct cpu_info *x__ci = curcpu(); \ |
164 | int s = x__ci->ci_mtx_oldspl; \ | | 164 | int s = x__ci->ci_mtx_oldspl; \ |
165 | __insn_barrier(); \ | | 165 | __insn_barrier(); \ |
166 | if (++(x__ci->ci_mtx_count) == 0) \ | | 166 | if (++(x__ci->ci_mtx_count) == 0) \ |
167 | splx(s); \ | | 167 | splx(s); \ |
168 | } while (/* CONSTCOND */ 0) | | 168 | } while (/* CONSTCOND */ 0) |
169 | | | 169 | |
170 | /* | | 170 | /* |
171 | * For architectures that provide 'simple' mutexes: they provide a | | 171 | * For architectures that provide 'simple' mutexes: they provide a |
172 | * CAS function that is either MP-safe, or does not need to be MP | | 172 | * CAS function that is either MP-safe, or does not need to be MP |
173 | * safe. Adaptive mutexes on these architectures do not require an | | 173 | * safe. Adaptive mutexes on these architectures do not require an |
174 | * additional interlock. | | 174 | * additional interlock. |
175 | */ | | 175 | */ |
176 | | | 176 | |
177 | #ifdef __HAVE_SIMPLE_MUTEXES | | 177 | #ifdef __HAVE_SIMPLE_MUTEXES |
178 | | | 178 | |
179 | #define MUTEX_OWNER(owner) \ | | 179 | #define MUTEX_OWNER(owner) \ |
180 | (owner & MUTEX_THREAD) | | 180 | (owner & MUTEX_THREAD) |
181 | #define MUTEX_HAS_WAITERS(mtx) \ | | 181 | #define MUTEX_HAS_WAITERS(mtx) \ |
182 | (((int)(mtx)->mtx_owner & MUTEX_BIT_WAITERS) != 0) | | 182 | (((int)(mtx)->mtx_owner & MUTEX_BIT_WAITERS) != 0) |
183 | | | 183 | |
184 | #define MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug) \ | | 184 | #define MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug) \ |
185 | do { \ | | 185 | do { \ |
186 | if (!dodebug) \ | | 186 | if (!dodebug) \ |
187 | (mtx)->mtx_owner |= MUTEX_BIT_NODEBUG; \ | | 187 | (mtx)->mtx_owner |= MUTEX_BIT_NODEBUG; \ |
188 | } while (/* CONSTCOND */ 0) | | 188 | } while (/* CONSTCOND */ 0) |
189 | | | 189 | |
190 | #define MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl) \ | | 190 | #define MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl) \ |
191 | do { \ | | 191 | do { \ |
192 | (mtx)->mtx_owner = MUTEX_BIT_SPIN; \ | | 192 | (mtx)->mtx_owner = MUTEX_BIT_SPIN; \ |
193 | if (!dodebug) \ | | 193 | if (!dodebug) \ |
194 | (mtx)->mtx_owner |= MUTEX_BIT_NODEBUG; \ | | 194 | (mtx)->mtx_owner |= MUTEX_BIT_NODEBUG; \ |
195 | MUTEX_INITIALIZE_SPIN_IPL((mtx), (ipl)); \ | | 195 | MUTEX_INITIALIZE_SPIN_IPL((mtx), (ipl)); \ |
196 | MUTEX_SPINBIT_LOCK_INIT((mtx)); \ | | 196 | MUTEX_SPINBIT_LOCK_INIT((mtx)); \ |
197 | } while (/* CONSTCOND */ 0) | | 197 | } while (/* CONSTCOND */ 0) |
198 | | | 198 | |
199 | #define MUTEX_DESTROY(mtx) \ | | 199 | #define MUTEX_DESTROY(mtx) \ |
200 | do { \ | | 200 | do { \ |
201 | (mtx)->mtx_owner = MUTEX_THREAD; \ | | 201 | (mtx)->mtx_owner = MUTEX_THREAD; \ |
202 | } while (/* CONSTCOND */ 0) | | 202 | } while (/* CONSTCOND */ 0) |
203 | | | 203 | |
204 | #define MUTEX_SPIN_P(mtx) \ | | 204 | #define MUTEX_SPIN_P(mtx) \ |
205 | (((mtx)->mtx_owner & MUTEX_BIT_SPIN) != 0) | | 205 | (((mtx)->mtx_owner & MUTEX_BIT_SPIN) != 0) |
206 | #define MUTEX_ADAPTIVE_P(mtx) \ | | 206 | #define MUTEX_ADAPTIVE_P(mtx) \ |
207 | (((mtx)->mtx_owner & MUTEX_BIT_SPIN) == 0) | | 207 | (((mtx)->mtx_owner & MUTEX_BIT_SPIN) == 0) |
208 | | | 208 | |
209 | #define MUTEX_DEBUG_P(mtx) (((mtx)->mtx_owner & MUTEX_BIT_NODEBUG) == 0) | | 209 | #define MUTEX_DEBUG_P(mtx) (((mtx)->mtx_owner & MUTEX_BIT_NODEBUG) == 0) |
210 | #if defined(LOCKDEBUG) | | 210 | #if defined(LOCKDEBUG) |
211 | #define MUTEX_OWNED(owner) (((owner) & ~MUTEX_BIT_NODEBUG) != 0) | | 211 | #define MUTEX_OWNED(owner) (((owner) & ~MUTEX_BIT_NODEBUG) != 0) |
212 | #define MUTEX_INHERITDEBUG(n, o) (n) |= (o) & MUTEX_BIT_NODEBUG | | 212 | #define MUTEX_INHERITDEBUG(n, o) (n) |= (o) & MUTEX_BIT_NODEBUG |
213 | #else /* defined(LOCKDEBUG) */ | | 213 | #else /* defined(LOCKDEBUG) */ |
214 | #define MUTEX_OWNED(owner) ((owner) != 0) | | 214 | #define MUTEX_OWNED(owner) ((owner) != 0) |
215 | #define MUTEX_INHERITDEBUG(n, o) /* nothing */ | | 215 | #define MUTEX_INHERITDEBUG(n, o) /* nothing */ |
216 | #endif /* defined(LOCKDEBUG) */ | | 216 | #endif /* defined(LOCKDEBUG) */ |
217 | | | 217 | |
218 | static inline int | | 218 | static inline int |
219 | MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread) | | 219 | MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread) |
220 | { | | 220 | { |
221 | int rv; | | 221 | int rv; |
222 | uintptr_t oldown = 0; | | 222 | uintptr_t oldown = 0; |
223 | uintptr_t newown = curthread; | | 223 | uintptr_t newown = curthread; |
224 | | | 224 | |
225 | MUTEX_INHERITDEBUG(oldown, mtx->mtx_owner); | | 225 | MUTEX_INHERITDEBUG(oldown, mtx->mtx_owner); |
226 | MUTEX_INHERITDEBUG(newown, oldown); | | 226 | MUTEX_INHERITDEBUG(newown, oldown); |
227 | rv = MUTEX_CAS(&mtx->mtx_owner, oldown, newown); | | 227 | rv = MUTEX_CAS(&mtx->mtx_owner, oldown, newown); |
228 | MUTEX_RECEIVE(mtx); | | 228 | MUTEX_RECEIVE(mtx); |
229 | return rv; | | 229 | return rv; |
230 | } | | 230 | } |
231 | | | 231 | |
232 | static inline int | | 232 | static inline int |
233 | MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr_t owner) | | 233 | MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr_t owner) |
234 | { | | 234 | { |
235 | int rv; | | 235 | int rv; |
236 | rv = MUTEX_CAS(&mtx->mtx_owner, owner, owner | MUTEX_BIT_WAITERS); | | 236 | rv = MUTEX_CAS(&mtx->mtx_owner, owner, owner | MUTEX_BIT_WAITERS); |
237 | MUTEX_RECEIVE(mtx); | | 237 | MUTEX_RECEIVE(mtx); |
238 | return rv; | | 238 | return rv; |
239 | } | | 239 | } |
240 | | | 240 | |
241 | static inline void | | 241 | static inline void |
242 | MUTEX_RELEASE(kmutex_t *mtx) | | 242 | MUTEX_RELEASE(kmutex_t *mtx) |
243 | { | | 243 | { |
244 | uintptr_t newown; | | 244 | uintptr_t newown; |
245 | | | 245 | |
246 | MUTEX_GIVE(mtx); | | 246 | MUTEX_GIVE(mtx); |
247 | newown = 0; | | 247 | newown = 0; |
248 | MUTEX_INHERITDEBUG(newown, mtx->mtx_owner); | | 248 | MUTEX_INHERITDEBUG(newown, mtx->mtx_owner); |
249 | mtx->mtx_owner = newown; | | 249 | mtx->mtx_owner = newown; |
250 | } | | 250 | } |
251 | #endif /* __HAVE_SIMPLE_MUTEXES */ | | 251 | #endif /* __HAVE_SIMPLE_MUTEXES */ |
252 | | | 252 | |
253 | /* | | 253 | /* |
254 | * Patch in stubs via strong alias where they are not available. | | 254 | * Patch in stubs via strong alias where they are not available. |
255 | */ | | 255 | */ |
256 | | | 256 | |
257 | #if defined(LOCKDEBUG) | | 257 | #if defined(LOCKDEBUG) |
258 | #undef __HAVE_MUTEX_STUBS | | 258 | #undef __HAVE_MUTEX_STUBS |
259 | #undef __HAVE_SPIN_MUTEX_STUBS | | 259 | #undef __HAVE_SPIN_MUTEX_STUBS |
260 | #endif | | 260 | #endif |
261 | | | 261 | |
262 | #ifndef __HAVE_MUTEX_STUBS | | 262 | #ifndef __HAVE_MUTEX_STUBS |
263 | __strong_alias(mutex_enter,mutex_vector_enter); | | 263 | __strong_alias(mutex_enter,mutex_vector_enter); |
264 | __strong_alias(mutex_exit,mutex_vector_exit); | | 264 | __strong_alias(mutex_exit,mutex_vector_exit); |
265 | #endif | | 265 | #endif |
266 | | | 266 | |
267 | #ifndef __HAVE_SPIN_MUTEX_STUBS | | 267 | #ifndef __HAVE_SPIN_MUTEX_STUBS |
268 | __strong_alias(mutex_spin_enter,mutex_vector_enter); | | 268 | __strong_alias(mutex_spin_enter,mutex_vector_enter); |
269 | __strong_alias(mutex_spin_exit,mutex_vector_exit); | | 269 | __strong_alias(mutex_spin_exit,mutex_vector_exit); |
270 | #endif | | 270 | #endif |
271 | | | 271 | |
272 | static void mutex_abort(const char *, size_t, const kmutex_t *, | | 272 | static void mutex_abort(const char *, size_t, const kmutex_t *, |
273 | const char *); | | 273 | const char *); |
274 | static void mutex_dump(const volatile void *, lockop_printer_t); | | 274 | static void mutex_dump(const volatile void *, lockop_printer_t); |
275 | | | 275 | |
276 | lockops_t mutex_spin_lockops = { | | 276 | lockops_t mutex_spin_lockops = { |
277 | .lo_name = "Mutex", | | 277 | .lo_name = "Mutex", |
278 | .lo_type = LOCKOPS_SPIN, | | 278 | .lo_type = LOCKOPS_SPIN, |
279 | .lo_dump = mutex_dump, | | 279 | .lo_dump = mutex_dump, |
280 | }; | | 280 | }; |
281 | | | 281 | |
282 | lockops_t mutex_adaptive_lockops = { | | 282 | lockops_t mutex_adaptive_lockops = { |
283 | .lo_name = "Mutex", | | 283 | .lo_name = "Mutex", |
284 | .lo_type = LOCKOPS_SLEEP, | | 284 | .lo_type = LOCKOPS_SLEEP, |
285 | .lo_dump = mutex_dump, | | 285 | .lo_dump = mutex_dump, |
286 | }; | | 286 | }; |
287 | | | 287 | |
288 | syncobj_t mutex_syncobj = { | | 288 | syncobj_t mutex_syncobj = { |
289 | .sobj_flag = SOBJ_SLEEPQ_SORTED, | | 289 | .sobj_flag = SOBJ_SLEEPQ_SORTED, |
290 | .sobj_unsleep = turnstile_unsleep, | | 290 | .sobj_unsleep = turnstile_unsleep, |
291 | .sobj_changepri = turnstile_changepri, | | 291 | .sobj_changepri = turnstile_changepri, |
292 | .sobj_lendpri = sleepq_lendpri, | | 292 | .sobj_lendpri = sleepq_lendpri, |
293 | .sobj_owner = (void *)mutex_owner, | | 293 | .sobj_owner = (void *)mutex_owner, |
294 | }; | | 294 | }; |
295 | | | 295 | |
296 | /* | | 296 | /* |
297 | * mutex_dump: | | 297 | * mutex_dump: |
298 | * | | 298 | * |
299 | * Dump the contents of a mutex structure. | | 299 | * Dump the contents of a mutex structure. |
300 | */ | | 300 | */ |
301 | static void | | 301 | static void |
302 | mutex_dump(const volatile void *cookie, lockop_printer_t pr) | | 302 | mutex_dump(const volatile void *cookie, lockop_printer_t pr) |
303 | { | | 303 | { |
304 | const volatile kmutex_t *mtx = cookie; | | 304 | const volatile kmutex_t *mtx = cookie; |
305 | | | 305 | |
306 | pr("owner field : %#018lx wait/spin: %16d/%d\n", | | 306 | pr("owner field : %#018lx wait/spin: %16d/%d\n", |
307 | (long)MUTEX_OWNER(mtx->mtx_owner), MUTEX_HAS_WAITERS(mtx), | | 307 | (long)MUTEX_OWNER(mtx->mtx_owner), MUTEX_HAS_WAITERS(mtx), |
308 | MUTEX_SPIN_P(mtx)); | | 308 | MUTEX_SPIN_P(mtx)); |
309 | } | | 309 | } |
310 | | | 310 | |
311 | /* | | 311 | /* |
312 | * mutex_abort: | | 312 | * mutex_abort: |
313 | * | | 313 | * |
314 | * Dump information about an error and panic the system. This | | 314 | * Dump information about an error and panic the system. This |
315 | * generates a lot of machine code in the DIAGNOSTIC case, so | | 315 | * generates a lot of machine code in the DIAGNOSTIC case, so |
316 | * we ask the compiler to not inline it. | | 316 | * we ask the compiler to not inline it. |
317 | */ | | 317 | */ |
318 | static void __noinline | | 318 | static void __noinline |
319 | mutex_abort(const char *func, size_t line, const kmutex_t *mtx, const char *msg) | | 319 | mutex_abort(const char *func, size_t line, const kmutex_t *mtx, const char *msg) |
320 | { | | 320 | { |
321 | | | 321 | |
322 | LOCKDEBUG_ABORT(func, line, mtx, (MUTEX_SPIN_P(mtx) ? | | 322 | LOCKDEBUG_ABORT(func, line, mtx, (MUTEX_SPIN_P(mtx) ? |
323 | &mutex_spin_lockops : &mutex_adaptive_lockops), msg); | | 323 | &mutex_spin_lockops : &mutex_adaptive_lockops), msg); |
324 | } | | 324 | } |
325 | | | 325 | |
326 | /* | | 326 | /* |
327 | * mutex_init: | | 327 | * mutex_init: |
328 | * | | 328 | * |
329 | * Initialize a mutex for use. Note that adaptive mutexes are in | | 329 | * Initialize a mutex for use. Note that adaptive mutexes are in |
330 | * essence spin mutexes that can sleep to avoid deadlock and wasting | | 330 | * essence spin mutexes that can sleep to avoid deadlock and wasting |
331 | * CPU time. We can't easily provide a type of mutex that always | | 331 | * CPU time. We can't easily provide a type of mutex that always |
332 | * sleeps - see comments in mutex_vector_enter() about releasing | | 332 | * sleeps - see comments in mutex_vector_enter() about releasing |
333 | * mutexes unlocked. | | 333 | * mutexes unlocked. |
334 | */ | | 334 | */ |
335 | void _mutex_init(kmutex_t *, kmutex_type_t, int, uintptr_t); | | 335 | void _mutex_init(kmutex_t *, kmutex_type_t, int, uintptr_t); |
336 | void | | 336 | void |
337 | _mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl, | | 337 | _mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl, |
338 | uintptr_t return_address) | | 338 | uintptr_t return_address) |
339 | { | | 339 | { |
340 | bool dodebug; | | 340 | bool dodebug; |
341 | | | 341 | |
342 | memset(mtx, 0, sizeof(*mtx)); | | 342 | memset(mtx, 0, sizeof(*mtx)); |
343 | | | 343 | |
344 | switch (type) { | | 344 | switch (type) { |
345 | case MUTEX_ADAPTIVE: | | 345 | case MUTEX_ADAPTIVE: |
346 | KASSERT(ipl == IPL_NONE); | | 346 | KASSERT(ipl == IPL_NONE); |
347 | break; | | 347 | break; |
348 | case MUTEX_DEFAULT: | | 348 | case MUTEX_DEFAULT: |
349 | case MUTEX_DRIVER: | | 349 | case MUTEX_DRIVER: |
350 | if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK || | | 350 | if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK || |
351 | ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET || | | 351 | ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET || |
352 | ipl == IPL_SOFTSERIAL) { | | 352 | ipl == IPL_SOFTSERIAL) { |
353 | type = MUTEX_ADAPTIVE; | | 353 | type = MUTEX_ADAPTIVE; |
354 | } else { | | 354 | } else { |
355 | type = MUTEX_SPIN; | | 355 | type = MUTEX_SPIN; |
356 | } | | 356 | } |
357 | break; | | 357 | break; |
358 | default: | | 358 | default: |
359 | break; | | 359 | break; |
360 | } | | 360 | } |
361 | | | 361 | |
362 | switch (type) { | | 362 | switch (type) { |
363 | case MUTEX_NODEBUG: | | 363 | case MUTEX_NODEBUG: |
364 | dodebug = LOCKDEBUG_ALLOC(mtx, NULL, return_address); | | 364 | dodebug = LOCKDEBUG_ALLOC(mtx, NULL, return_address); |
365 | MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl); | | 365 | MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl); |
366 | break; | | 366 | break; |
367 | case MUTEX_ADAPTIVE: | | 367 | case MUTEX_ADAPTIVE: |
368 | dodebug = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops, | | 368 | dodebug = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops, |
369 | return_address); | | 369 | return_address); |
370 | MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug); | | 370 | MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug); |
371 | break; | | 371 | break; |
372 | case MUTEX_SPIN: | | 372 | case MUTEX_SPIN: |
373 | dodebug = LOCKDEBUG_ALLOC(mtx, &mutex_spin_lockops, | | 373 | dodebug = LOCKDEBUG_ALLOC(mtx, &mutex_spin_lockops, |
374 | return_address); | | 374 | return_address); |
375 | MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl); | | 375 | MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl); |
376 | break; | | 376 | break; |
377 | default: | | 377 | default: |
378 | panic("mutex_init: impossible type"); | | 378 | panic("mutex_init: impossible type"); |
379 | break; | | 379 | break; |
380 | } | | 380 | } |
381 | } | | 381 | } |
382 | | | 382 | |
383 | void | | 383 | void |
384 | mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl) | | 384 | mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl) |
385 | { | | 385 | { |
386 | | | 386 | |
387 | _mutex_init(mtx, type, ipl, (uintptr_t)__builtin_return_address(0)); | | 387 | _mutex_init(mtx, type, ipl, (uintptr_t)__builtin_return_address(0)); |
388 | } | | 388 | } |
389 | | | 389 | |
390 | /* | | 390 | /* |
391 | * mutex_destroy: | | 391 | * mutex_destroy: |
392 | * | | 392 | * |
393 | * Tear down a mutex. | | 393 | * Tear down a mutex. |
394 | */ | | 394 | */ |
395 | void | | 395 | void |
396 | mutex_destroy(kmutex_t *mtx) | | 396 | mutex_destroy(kmutex_t *mtx) |
397 | { | | 397 | { |
398 | | | 398 | |
399 | if (MUTEX_ADAPTIVE_P(mtx)) { | | 399 | if (MUTEX_ADAPTIVE_P(mtx)) { |
400 | MUTEX_ASSERT(mtx, !MUTEX_OWNED(mtx->mtx_owner) && | | 400 | MUTEX_ASSERT(mtx, !MUTEX_OWNED(mtx->mtx_owner)); |
401 | !MUTEX_HAS_WAITERS(mtx)); | | 401 | MUTEX_ASSERT(mtx, !MUTEX_HAS_WAITERS(mtx)); |
402 | } else { | | 402 | } else { |
403 | MUTEX_ASSERT(mtx, !MUTEX_SPINBIT_LOCKED_P(mtx)); | | 403 | MUTEX_ASSERT(mtx, !MUTEX_SPINBIT_LOCKED_P(mtx)); |
404 | } | | 404 | } |
405 | | | 405 | |
406 | LOCKDEBUG_FREE(MUTEX_DEBUG_P(mtx), mtx); | | 406 | LOCKDEBUG_FREE(MUTEX_DEBUG_P(mtx), mtx); |
407 | MUTEX_DESTROY(mtx); | | 407 | MUTEX_DESTROY(mtx); |
408 | } | | 408 | } |
409 | | | 409 | |
410 | #ifdef MULTIPROCESSOR | | 410 | #ifdef MULTIPROCESSOR |
411 | /* | | 411 | /* |
412 | * mutex_oncpu: | | 412 | * mutex_oncpu: |
413 | * | | 413 | * |
414 | * Return true if an adaptive mutex owner is running on a CPU in the | | 414 | * Return true if an adaptive mutex owner is running on a CPU in the |
415 | * system. If the target is waiting on the kernel big lock, then we | | 415 | * system. If the target is waiting on the kernel big lock, then we |
416 | * must release it. This is necessary to avoid deadlock. | | 416 | * must release it. This is necessary to avoid deadlock. |
417 | */ | | 417 | */ |
418 | static bool | | 418 | static bool |
419 | mutex_oncpu(uintptr_t owner) | | 419 | mutex_oncpu(uintptr_t owner) |
420 | { | | 420 | { |
421 | struct cpu_info *ci; | | 421 | struct cpu_info *ci; |
422 | lwp_t *l; | | 422 | lwp_t *l; |
423 | | | 423 | |
424 | KASSERT(kpreempt_disabled()); | | 424 | KASSERT(kpreempt_disabled()); |
425 | | | 425 | |
426 | if (!MUTEX_OWNED(owner)) { | | 426 | if (!MUTEX_OWNED(owner)) { |
427 | return false; | | 427 | return false; |
428 | } | | 428 | } |
429 | | | 429 | |
430 | /* | | 430 | /* |
431 | * See lwp_dtor() why dereference of the LWP pointer is safe. | | 431 | * See lwp_dtor() why dereference of the LWP pointer is safe. |
432 | * We must have kernel preemption disabled for that. | | 432 | * We must have kernel preemption disabled for that. |
433 | */ | | 433 | */ |
434 | l = (lwp_t *)MUTEX_OWNER(owner); | | 434 | l = (lwp_t *)MUTEX_OWNER(owner); |
435 | ci = l->l_cpu; | | 435 | ci = l->l_cpu; |
436 | | | 436 | |
437 | if (ci && ci->ci_curlwp == l) { | | 437 | if (ci && ci->ci_curlwp == l) { |
438 | /* Target is running; do we need to block? */ | | 438 | /* Target is running; do we need to block? */ |
439 | return (ci->ci_biglock_wanted != l); | | 439 | return (ci->ci_biglock_wanted != l); |
440 | } | | 440 | } |
441 | | | 441 | |
442 | /* Not running. It may be safe to block now. */ | | 442 | /* Not running. It may be safe to block now. */ |
443 | return false; | | 443 | return false; |
444 | } | | 444 | } |
445 | #endif /* MULTIPROCESSOR */ | | 445 | #endif /* MULTIPROCESSOR */ |
446 | | | 446 | |
447 | /* | | 447 | /* |
448 | * mutex_vector_enter: | | 448 | * mutex_vector_enter: |
449 | * | | 449 | * |
450 | * Support routine for mutex_enter() that must handle all cases. In | | 450 | * Support routine for mutex_enter() that must handle all cases. In |
451 | * the LOCKDEBUG case, mutex_enter() is always aliased here, even if | | 451 | * the LOCKDEBUG case, mutex_enter() is always aliased here, even if |
452 | * fast-path stubs are available. If a mutex_spin_enter() stub is | | 452 | * fast-path stubs are available. If a mutex_spin_enter() stub is |
453 | * not available, then it is also aliased directly here. | | 453 | * not available, then it is also aliased directly here. |
454 | */ | | 454 | */ |
455 | void | | 455 | void |
456 | mutex_vector_enter(kmutex_t *mtx) | | 456 | mutex_vector_enter(kmutex_t *mtx) |
457 | { | | 457 | { |
458 | uintptr_t owner, curthread; | | 458 | uintptr_t owner, curthread; |
459 | turnstile_t *ts; | | 459 | turnstile_t *ts; |
460 | #ifdef MULTIPROCESSOR | | 460 | #ifdef MULTIPROCESSOR |
461 | u_int count; | | 461 | u_int count; |
462 | #endif | | 462 | #endif |
463 | LOCKSTAT_COUNTER(spincnt); | | 463 | LOCKSTAT_COUNTER(spincnt); |
464 | LOCKSTAT_COUNTER(slpcnt); | | 464 | LOCKSTAT_COUNTER(slpcnt); |
465 | LOCKSTAT_TIMER(spintime); | | 465 | LOCKSTAT_TIMER(spintime); |
466 | LOCKSTAT_TIMER(slptime); | | 466 | LOCKSTAT_TIMER(slptime); |
467 | LOCKSTAT_FLAG(lsflag); | | 467 | LOCKSTAT_FLAG(lsflag); |
468 | | | 468 | |
469 | /* | | 469 | /* |
470 | * Handle spin mutexes. | | 470 | * Handle spin mutexes. |
471 | */ | | 471 | */ |
472 | if (MUTEX_SPIN_P(mtx)) { | | 472 | if (MUTEX_SPIN_P(mtx)) { |
473 | #if defined(LOCKDEBUG) && defined(MULTIPROCESSOR) | | 473 | #if defined(LOCKDEBUG) && defined(MULTIPROCESSOR) |
474 | u_int spins = 0; | | 474 | u_int spins = 0; |
475 | #endif | | 475 | #endif |
476 | MUTEX_SPIN_SPLRAISE(mtx); | | 476 | MUTEX_SPIN_SPLRAISE(mtx); |
477 | MUTEX_WANTLOCK(mtx); | | 477 | MUTEX_WANTLOCK(mtx); |
478 | #ifdef FULL | | 478 | #ifdef FULL |
479 | if (MUTEX_SPINBIT_LOCK_TRY(mtx)) { | | 479 | if (MUTEX_SPINBIT_LOCK_TRY(mtx)) { |
480 | MUTEX_LOCKED(mtx); | | 480 | MUTEX_LOCKED(mtx); |
481 | return; | | 481 | return; |
482 | } | | 482 | } |
483 | #if !defined(MULTIPROCESSOR) | | 483 | #if !defined(MULTIPROCESSOR) |
484 | MUTEX_ABORT(mtx, "locking against myself"); | | 484 | MUTEX_ABORT(mtx, "locking against myself"); |
485 | #else /* !MULTIPROCESSOR */ | | 485 | #else /* !MULTIPROCESSOR */ |
486 | | | 486 | |
487 | LOCKSTAT_ENTER(lsflag); | | 487 | LOCKSTAT_ENTER(lsflag); |
488 | LOCKSTAT_START_TIMER(lsflag, spintime); | | 488 | LOCKSTAT_START_TIMER(lsflag, spintime); |
489 | count = SPINLOCK_BACKOFF_MIN; | | 489 | count = SPINLOCK_BACKOFF_MIN; |
490 | | | 490 | |
491 | /* | | 491 | /* |
492 | * Spin testing the lock word and do exponential backoff | | 492 | * Spin testing the lock word and do exponential backoff |
493 | * to reduce cache line ping-ponging between CPUs. | | 493 | * to reduce cache line ping-ponging between CPUs. |
494 | */ | | 494 | */ |
495 | do { | | 495 | do { |
496 | #if MUTEX_PANIC_SKIP_SPIN | | 496 | #if MUTEX_PANIC_SKIP_SPIN |
497 | if (panicstr != NULL) | | 497 | if (panicstr != NULL) |
498 | break; | | 498 | break; |
499 | #endif | | 499 | #endif |
500 | while (MUTEX_SPINBIT_LOCKED_P(mtx)) { | | 500 | while (MUTEX_SPINBIT_LOCKED_P(mtx)) { |
501 | SPINLOCK_BACKOFF(count); | | 501 | SPINLOCK_BACKOFF(count); |
502 | #ifdef LOCKDEBUG | | 502 | #ifdef LOCKDEBUG |
503 | if (SPINLOCK_SPINOUT(spins)) | | 503 | if (SPINLOCK_SPINOUT(spins)) |
504 | MUTEX_ABORT(mtx, "spinout"); | | 504 | MUTEX_ABORT(mtx, "spinout"); |
505 | #endif /* LOCKDEBUG */ | | 505 | #endif /* LOCKDEBUG */ |
506 | } | | 506 | } |
507 | } while (!MUTEX_SPINBIT_LOCK_TRY(mtx)); | | 507 | } while (!MUTEX_SPINBIT_LOCK_TRY(mtx)); |
508 | | | 508 | |
509 | if (count != SPINLOCK_BACKOFF_MIN) { | | 509 | if (count != SPINLOCK_BACKOFF_MIN) { |
510 | LOCKSTAT_STOP_TIMER(lsflag, spintime); | | 510 | LOCKSTAT_STOP_TIMER(lsflag, spintime); |
511 | LOCKSTAT_EVENT(lsflag, mtx, | | 511 | LOCKSTAT_EVENT(lsflag, mtx, |
512 | LB_SPIN_MUTEX | LB_SPIN, 1, spintime); | | 512 | LB_SPIN_MUTEX | LB_SPIN, 1, spintime); |
513 | } | | 513 | } |
514 | LOCKSTAT_EXIT(lsflag); | | 514 | LOCKSTAT_EXIT(lsflag); |
515 | #endif /* !MULTIPROCESSOR */ | | 515 | #endif /* !MULTIPROCESSOR */ |
516 | #endif /* FULL */ | | 516 | #endif /* FULL */ |
517 | MUTEX_LOCKED(mtx); | | 517 | MUTEX_LOCKED(mtx); |
518 | return; | | 518 | return; |
519 | } | | 519 | } |
520 | | | 520 | |
521 | curthread = (uintptr_t)curlwp; | | 521 | curthread = (uintptr_t)curlwp; |
522 | | | 522 | |
523 | MUTEX_DASSERT(mtx, MUTEX_ADAPTIVE_P(mtx)); | | 523 | MUTEX_DASSERT(mtx, MUTEX_ADAPTIVE_P(mtx)); |
524 | MUTEX_ASSERT(mtx, curthread != 0); | | 524 | MUTEX_ASSERT(mtx, curthread != 0); |
525 | MUTEX_ASSERT(mtx, !cpu_intr_p()); | | 525 | MUTEX_ASSERT(mtx, !cpu_intr_p()); |
526 | MUTEX_WANTLOCK(mtx); | | 526 | MUTEX_WANTLOCK(mtx); |
527 | | | 527 | |
528 | if (panicstr == NULL) { | | 528 | if (panicstr == NULL) { |
529 | KDASSERT(pserialize_not_in_read_section()); | | 529 | KDASSERT(pserialize_not_in_read_section()); |
530 | LOCKDEBUG_BARRIER(&kernel_lock, 1); | | 530 | LOCKDEBUG_BARRIER(&kernel_lock, 1); |
531 | } | | 531 | } |
532 | | | 532 | |
533 | LOCKSTAT_ENTER(lsflag); | | 533 | LOCKSTAT_ENTER(lsflag); |
534 | | | 534 | |
535 | /* | | 535 | /* |
536 | * Adaptive mutex; spin trying to acquire the mutex. If we | | 536 | * Adaptive mutex; spin trying to acquire the mutex. If we |
537 | * determine that the owner is not running on a processor, | | 537 | * determine that the owner is not running on a processor, |
538 | * then we stop spinning, and sleep instead. | | 538 | * then we stop spinning, and sleep instead. |
539 | */ | | 539 | */ |
540 | KPREEMPT_DISABLE(curlwp); | | 540 | KPREEMPT_DISABLE(curlwp); |
541 | for (owner = mtx->mtx_owner;;) { | | 541 | for (owner = mtx->mtx_owner;;) { |
542 | if (!MUTEX_OWNED(owner)) { | | 542 | if (!MUTEX_OWNED(owner)) { |
543 | /* | | 543 | /* |
544 | * Mutex owner clear could mean two things: | | 544 | * Mutex owner clear could mean two things: |
545 | * | | 545 | * |
546 | * * The mutex has been released. | | 546 | * * The mutex has been released. |
547 | * * The owner field hasn't been set yet. | | 547 | * * The owner field hasn't been set yet. |
548 | * | | 548 | * |
549 | * Try to acquire it again. If that fails, | | 549 | * Try to acquire it again. If that fails, |
550 | * we'll just loop again. | | 550 | * we'll just loop again. |
551 | */ | | 551 | */ |
552 | if (MUTEX_ACQUIRE(mtx, curthread)) | | 552 | if (MUTEX_ACQUIRE(mtx, curthread)) |
553 | break; | | 553 | break; |
554 | owner = mtx->mtx_owner; | | 554 | owner = mtx->mtx_owner; |
555 | continue; | | 555 | continue; |
556 | } | | 556 | } |
557 | #if MUTEX_PANIC_SKIP_ADAPTIVE | | 557 | #if MUTEX_PANIC_SKIP_ADAPTIVE |
558 | if (__predict_false(panicstr != NULL)) { | | 558 | if (__predict_false(panicstr != NULL)) { |
559 | KPREEMPT_ENABLE(curlwp); | | 559 | KPREEMPT_ENABLE(curlwp); |
560 | return; | | 560 | return; |
561 | } | | 561 | } |
562 | #endif | | 562 | #endif |
563 | if (__predict_false(MUTEX_OWNER(owner) == curthread)) { | | 563 | if (__predict_false(MUTEX_OWNER(owner) == curthread)) { |
564 | MUTEX_ABORT(mtx, "locking against myself"); | | 564 | MUTEX_ABORT(mtx, "locking against myself"); |
565 | } | | 565 | } |
566 | #ifdef MULTIPROCESSOR | | 566 | #ifdef MULTIPROCESSOR |
567 | /* | | 567 | /* |
568 | * Check to see if the owner is running on a processor. | | 568 | * Check to see if the owner is running on a processor. |
569 | * If so, then we should just spin, as the owner will | | 569 | * If so, then we should just spin, as the owner will |
570 | * likely release the lock very soon. | | 570 | * likely release the lock very soon. |
571 | */ | | 571 | */ |
572 | if (mutex_oncpu(owner)) { | | 572 | if (mutex_oncpu(owner)) { |
573 | LOCKSTAT_START_TIMER(lsflag, spintime); | | 573 | LOCKSTAT_START_TIMER(lsflag, spintime); |
574 | count = SPINLOCK_BACKOFF_MIN; | | 574 | count = SPINLOCK_BACKOFF_MIN; |
575 | do { | | 575 | do { |
576 | KPREEMPT_ENABLE(curlwp); | | 576 | KPREEMPT_ENABLE(curlwp); |
577 | SPINLOCK_BACKOFF(count); | | 577 | SPINLOCK_BACKOFF(count); |
578 | KPREEMPT_DISABLE(curlwp); | | 578 | KPREEMPT_DISABLE(curlwp); |
579 | owner = mtx->mtx_owner; | | 579 | owner = mtx->mtx_owner; |
580 | } while (mutex_oncpu(owner)); | | 580 | } while (mutex_oncpu(owner)); |
581 | LOCKSTAT_STOP_TIMER(lsflag, spintime); | | 581 | LOCKSTAT_STOP_TIMER(lsflag, spintime); |
582 | LOCKSTAT_COUNT(spincnt, 1); | | 582 | LOCKSTAT_COUNT(spincnt, 1); |
583 | if (!MUTEX_OWNED(owner)) | | 583 | if (!MUTEX_OWNED(owner)) |
584 | continue; | | 584 | continue; |
585 | } | | 585 | } |
586 | #endif | | 586 | #endif |
587 | | | 587 | |
588 | ts = turnstile_lookup(mtx); | | 588 | ts = turnstile_lookup(mtx); |
589 | | | 589 | |
590 | /* | | 590 | /* |
591 | * Once we have the turnstile chain interlock, mark the | | 591 | * Once we have the turnstile chain interlock, mark the |
592 | * mutex as having waiters. If that fails, spin again: | | 592 | * mutex as having waiters. If that fails, spin again: |
593 | * chances are that the mutex has been released. | | 593 | * chances are that the mutex has been released. |
594 | */ | | 594 | */ |
595 | if (!MUTEX_SET_WAITERS(mtx, owner)) { | | 595 | if (!MUTEX_SET_WAITERS(mtx, owner)) { |
596 | turnstile_exit(mtx); | | 596 | turnstile_exit(mtx); |
597 | owner = mtx->mtx_owner; | | 597 | owner = mtx->mtx_owner; |
598 | continue; | | 598 | continue; |
599 | } | | 599 | } |
600 | | | 600 | |
601 | #ifdef MULTIPROCESSOR | | 601 | #ifdef MULTIPROCESSOR |
602 | /* | | 602 | /* |
603 | * mutex_exit() is permitted to release the mutex without | | 603 | * mutex_exit() is permitted to release the mutex without |
604 | * any interlocking instructions, and the following can | | 604 | * any interlocking instructions, and the following can |
605 | * occur as a result: | | 605 | * occur as a result: |
606 | * | | 606 | * |
607 | * CPU 1: MUTEX_SET_WAITERS() CPU2: mutex_exit() | | 607 | * CPU 1: MUTEX_SET_WAITERS() CPU2: mutex_exit() |
608 | * ---------------------------- ---------------------------- | | 608 | * ---------------------------- ---------------------------- |
609 | * .. acquire cache line | | 609 | * .. acquire cache line |
610 | * .. test for waiters | | 610 | * .. test for waiters |
611 | * acquire cache line <- lose cache line | | 611 | * acquire cache line <- lose cache line |
612 | * lock cache line .. | | 612 | * lock cache line .. |
613 | * verify mutex is held .. | | 613 | * verify mutex is held .. |
614 | * set waiters .. | | 614 | * set waiters .. |
615 | * unlock cache line .. | | 615 | * unlock cache line .. |
616 | * lose cache line -> acquire cache line | | 616 | * lose cache line -> acquire cache line |
617 | * .. clear lock word, waiters | | 617 | * .. clear lock word, waiters |
618 | * return success | | 618 | * return success |
619 | * | | 619 | * |
620 | * There is another race that can occur: a third CPU could | | 620 | * There is another race that can occur: a third CPU could |
621 | * acquire the mutex as soon as it is released. Since | | 621 | * acquire the mutex as soon as it is released. Since |
622 | * adaptive mutexes are primarily spin mutexes, this is not | | 622 | * adaptive mutexes are primarily spin mutexes, this is not |
623 | * something that we need to worry about too much. What we | | 623 | * something that we need to worry about too much. What we |
624 | * do need to ensure is that the waiters bit gets set. | | 624 | * do need to ensure is that the waiters bit gets set. |
625 | * | | 625 | * |
626 | * To allow the unlocked release, we need to make some | | 626 | * To allow the unlocked release, we need to make some |
627 | * assumptions here: | | 627 | * assumptions here: |
628 | * | | 628 | * |
629 | * o Release is the only non-atomic/unlocked operation | | 629 | * o Release is the only non-atomic/unlocked operation |
630 | * that can be performed on the mutex. (It must still | | 630 | * that can be performed on the mutex. (It must still |
631 | * be atomic on the local CPU, e.g. in case interrupted | | 631 | * be atomic on the local CPU, e.g. in case interrupted |
632 | * or preempted). | | 632 | * or preempted). |
633 | * | | 633 | * |
634 | * o At any given time, MUTEX_SET_WAITERS() can only ever | | 634 | * o At any given time, MUTEX_SET_WAITERS() can only ever |
635 | * be in progress on one CPU in the system - guaranteed | | 635 | * be in progress on one CPU in the system - guaranteed |
636 | * by the turnstile chain lock. | | 636 | * by the turnstile chain lock. |
637 | * | | 637 | * |
638 | * o No other operations other than MUTEX_SET_WAITERS() | | 638 | * o No other operations other than MUTEX_SET_WAITERS() |
639 | * and release can modify a mutex with a non-zero | | 639 | * and release can modify a mutex with a non-zero |
640 | * owner field. | | 640 | * owner field. |
641 | * | | 641 | * |
642 | * o The result of a successful MUTEX_SET_WAITERS() call | | 642 | * o The result of a successful MUTEX_SET_WAITERS() call |
643 | * is an unbuffered write that is immediately visible | | 643 | * is an unbuffered write that is immediately visible |
644 | * to all other processors in the system. | | 644 | * to all other processors in the system. |
645 | * | | 645 | * |
646 | * o If the holding LWP switches away, it posts a store | | 646 | * o If the holding LWP switches away, it posts a store |
647 | * fence before changing curlwp, ensuring that any | | 647 | * fence before changing curlwp, ensuring that any |
648 | * overwrite of the mutex waiters flag by mutex_exit() | | 648 | * overwrite of the mutex waiters flag by mutex_exit() |
649 | * completes before the modification of curlwp becomes | | 649 | * completes before the modification of curlwp becomes |
650 | * visible to this CPU. | | 650 | * visible to this CPU. |
651 | * | | 651 | * |
652 | * o mi_switch() posts a store fence before setting curlwp | | 652 | * o mi_switch() posts a store fence before setting curlwp |
653 | * and before resuming execution of an LWP. | | 653 | * and before resuming execution of an LWP. |
654 | * | | 654 | * |
655 | * o _kernel_lock() posts a store fence before setting | | 655 | * o _kernel_lock() posts a store fence before setting |
656 | * curcpu()->ci_biglock_wanted, and after clearing it. | | 656 | * curcpu()->ci_biglock_wanted, and after clearing it. |
657 | * This ensures that any overwrite of the mutex waiters | | 657 | * This ensures that any overwrite of the mutex waiters |
658 | * flag by mutex_exit() completes before the modification | | 658 | * flag by mutex_exit() completes before the modification |
659 | * of ci_biglock_wanted becomes visible. | | 659 | * of ci_biglock_wanted becomes visible. |
660 | * | | 660 | * |
661 | * We now post a read memory barrier (after setting the | | 661 | * We now post a read memory barrier (after setting the |
662 | * waiters field) and check the lock holder's status again. | | 662 | * waiters field) and check the lock holder's status again. |
663 | * Some of the possible outcomes (not an exhaustive list): | | 663 | * Some of the possible outcomes (not an exhaustive list): |
664 | * | | 664 | * |
665 | * 1. The on-CPU check returns true: the holding LWP is | | 665 | * 1. The on-CPU check returns true: the holding LWP is |
666 | * running again. The lock may be released soon and | | 666 | * running again. The lock may be released soon and |
667 | * we should spin. Importantly, we can't trust the | | 667 | * we should spin. Importantly, we can't trust the |
668 | * value of the waiters flag. | | 668 | * value of the waiters flag. |
669 | * | | 669 | * |
670 | * 2. The on-CPU check returns false: the holding LWP is | | 670 | * 2. The on-CPU check returns false: the holding LWP is |
671 | * not running. We now have the opportunity to check | | 671 | * not running. We now have the opportunity to check |
672 | * if mutex_exit() has blatted the modifications made | | 672 | * if mutex_exit() has blatted the modifications made |
673 | * by MUTEX_SET_WAITERS(). | | 673 | * by MUTEX_SET_WAITERS(). |
674 | * | | 674 | * |
675 | * 3. The on-CPU check returns false: the holding LWP may | | 675 | * 3. The on-CPU check returns false: the holding LWP may |
676 | * or may not be running. It has context switched at | | 676 | * or may not be running. It has context switched at |
677 | * some point during our check. Again, we have the | | 677 | * some point during our check. Again, we have the |
678 | * chance to see if the waiters bit is still set or | | 678 | * chance to see if the waiters bit is still set or |
679 | * has been overwritten. | | 679 | * has been overwritten. |
680 | * | | 680 | * |
681 | * 4. The on-CPU check returns false: the holding LWP is | | 681 | * 4. The on-CPU check returns false: the holding LWP is |
682 | * running on a CPU, but wants the big lock. It's OK | | 682 | * running on a CPU, but wants the big lock. It's OK |
683 | * to check the waiters field in this case. | | 683 | * to check the waiters field in this case. |
684 | * | | 684 | * |
685 | * 5. The has-waiters check fails: the mutex has been | | 685 | * 5. The has-waiters check fails: the mutex has been |
686 | * released, the waiters flag cleared and another LWP | | 686 | * released, the waiters flag cleared and another LWP |
687 | * now owns the mutex. | | 687 | * now owns the mutex. |
688 | * | | 688 | * |
689 | * 6. The has-waiters check fails: the mutex has been | | 689 | * 6. The has-waiters check fails: the mutex has been |
690 | * released. | | 690 | * released. |
691 | * | | 691 | * |
692 | * If the waiters bit is not set it's unsafe to go asleep, | | 692 | * If the waiters bit is not set it's unsafe to go asleep, |
693 | * as we might never be awoken. | | 693 | * as we might never be awoken. |
694 | */ | | 694 | */ |
695 | if ((membar_consumer(), mutex_oncpu(owner)) || | | 695 | if ((membar_consumer(), mutex_oncpu(owner)) || |
696 | (membar_consumer(), !MUTEX_HAS_WAITERS(mtx))) { | | 696 | (membar_consumer(), !MUTEX_HAS_WAITERS(mtx))) { |
697 | turnstile_exit(mtx); | | 697 | turnstile_exit(mtx); |
698 | owner = mtx->mtx_owner; | | 698 | owner = mtx->mtx_owner; |
699 | continue; | | 699 | continue; |
700 | } | | 700 | } |
701 | #endif /* MULTIPROCESSOR */ | | 701 | #endif /* MULTIPROCESSOR */ |
702 | | | 702 | |
703 | LOCKSTAT_START_TIMER(lsflag, slptime); | | 703 | LOCKSTAT_START_TIMER(lsflag, slptime); |
704 | | | 704 | |
705 | turnstile_block(ts, TS_WRITER_Q, mtx, &mutex_syncobj); | | 705 | turnstile_block(ts, TS_WRITER_Q, mtx, &mutex_syncobj); |
706 | | | 706 | |
707 | LOCKSTAT_STOP_TIMER(lsflag, slptime); | | 707 | LOCKSTAT_STOP_TIMER(lsflag, slptime); |
708 | LOCKSTAT_COUNT(slpcnt, 1); | | 708 | LOCKSTAT_COUNT(slpcnt, 1); |
709 | | | 709 | |
710 | owner = mtx->mtx_owner; | | 710 | owner = mtx->mtx_owner; |
711 | } | | 711 | } |
712 | KPREEMPT_ENABLE(curlwp); | | 712 | KPREEMPT_ENABLE(curlwp); |
713 | | | 713 | |
714 | LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1, | | 714 | LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1, |
715 | slpcnt, slptime); | | 715 | slpcnt, slptime); |
716 | LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SPIN, | | 716 | LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SPIN, |
717 | spincnt, spintime); | | 717 | spincnt, spintime); |
718 | LOCKSTAT_EXIT(lsflag); | | 718 | LOCKSTAT_EXIT(lsflag); |
719 | | | 719 | |
720 | MUTEX_DASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread); | | 720 | MUTEX_DASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread); |
721 | MUTEX_LOCKED(mtx); | | 721 | MUTEX_LOCKED(mtx); |
722 | } | | 722 | } |
723 | | | 723 | |
724 | /* | | 724 | /* |
725 | * mutex_vector_exit: | | 725 | * mutex_vector_exit: |
726 | * | | 726 | * |
727 | * Support routine for mutex_exit() that handles all cases. | | 727 | * Support routine for mutex_exit() that handles all cases. |
728 | */ | | 728 | */ |
729 | void | | 729 | void |
730 | mutex_vector_exit(kmutex_t *mtx) | | 730 | mutex_vector_exit(kmutex_t *mtx) |
731 | { | | 731 | { |
732 | turnstile_t *ts; | | 732 | turnstile_t *ts; |
733 | uintptr_t curthread; | | 733 | uintptr_t curthread; |
734 | | | 734 | |
735 | if (MUTEX_SPIN_P(mtx)) { | | 735 | if (MUTEX_SPIN_P(mtx)) { |
736 | #ifdef FULL | | 736 | #ifdef FULL |
737 | if (__predict_false(!MUTEX_SPINBIT_LOCKED_P(mtx))) { | | 737 | if (__predict_false(!MUTEX_SPINBIT_LOCKED_P(mtx))) { |
738 | #if MUTEX_PANIC_SKIP_SPIN | | 738 | #if MUTEX_PANIC_SKIP_SPIN |
739 | if (panicstr != NULL) | | 739 | if (panicstr != NULL) |
740 | return; | | 740 | return; |
741 | #endif | | 741 | #endif |
742 | MUTEX_ABORT(mtx, "exiting unheld spin mutex"); | | 742 | MUTEX_ABORT(mtx, "exiting unheld spin mutex"); |
743 | } | | 743 | } |
744 | MUTEX_UNLOCKED(mtx); | | 744 | MUTEX_UNLOCKED(mtx); |
745 | MUTEX_SPINBIT_LOCK_UNLOCK(mtx); | | 745 | MUTEX_SPINBIT_LOCK_UNLOCK(mtx); |
746 | #endif | | 746 | #endif |
747 | MUTEX_SPIN_SPLRESTORE(mtx); | | 747 | MUTEX_SPIN_SPLRESTORE(mtx); |
748 | return; | | 748 | return; |
749 | } | | 749 | } |
750 | | | 750 | |
751 | #ifdef MUTEX_PANIC_SKIP_ADAPTIVE | | 751 | #ifdef MUTEX_PANIC_SKIP_ADAPTIVE |
752 | if (__predict_false((uintptr_t)panicstr | cold)) { | | 752 | if (__predict_false((uintptr_t)panicstr | cold)) { |
753 | MUTEX_UNLOCKED(mtx); | | 753 | MUTEX_UNLOCKED(mtx); |
754 | MUTEX_RELEASE(mtx); | | 754 | MUTEX_RELEASE(mtx); |
755 | return; | | 755 | return; |
756 | } | | 756 | } |
757 | #endif | | 757 | #endif |
758 | | | 758 | |
759 | curthread = (uintptr_t)curlwp; | | 759 | curthread = (uintptr_t)curlwp; |
760 | MUTEX_DASSERT(mtx, curthread != 0); | | 760 | MUTEX_DASSERT(mtx, curthread != 0); |
761 | MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread); | | 761 | MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread); |
762 | MUTEX_UNLOCKED(mtx); | | 762 | MUTEX_UNLOCKED(mtx); |
763 | #if !defined(LOCKDEBUG) | | 763 | #if !defined(LOCKDEBUG) |
764 | __USE(curthread); | | 764 | __USE(curthread); |
765 | #endif | | 765 | #endif |
766 | | | 766 | |
767 | #ifdef LOCKDEBUG | | 767 | #ifdef LOCKDEBUG |
768 | /* | | 768 | /* |
769 | * Avoid having to take the turnstile chain lock every time | | 769 | * Avoid having to take the turnstile chain lock every time |
770 | * around. Raise the priority level to splhigh() in order | | 770 | * around. Raise the priority level to splhigh() in order |
771 | * to disable preemption and so make the following atomic. | | 771 | * to disable preemption and so make the following atomic. |
772 | */ | | 772 | */ |
773 | { | | 773 | { |
774 | int s = splhigh(); | | 774 | int s = splhigh(); |
775 | if (!MUTEX_HAS_WAITERS(mtx)) { | | 775 | if (!MUTEX_HAS_WAITERS(mtx)) { |
776 | MUTEX_RELEASE(mtx); | | 776 | MUTEX_RELEASE(mtx); |
777 | splx(s); | | 777 | splx(s); |
778 | return; | | 778 | return; |
779 | } | | 779 | } |
780 | splx(s); | | 780 | splx(s); |
781 | } | | 781 | } |
782 | #endif | | 782 | #endif |
783 | | | 783 | |
784 | /* | | 784 | /* |
785 | * Get this lock's turnstile. This gets the interlock on | | 785 | * Get this lock's turnstile. This gets the interlock on |
786 | * the sleep queue. Once we have that, we can clear the | | 786 | * the sleep queue. Once we have that, we can clear the |
787 | * lock. If there was no turnstile for the lock, there | | 787 | * lock. If there was no turnstile for the lock, there |
788 | * were no waiters remaining. | | 788 | * were no waiters remaining. |
789 | */ | | 789 | */ |
790 | ts = turnstile_lookup(mtx); | | 790 | ts = turnstile_lookup(mtx); |
791 | | | 791 | |
792 | if (ts == NULL) { | | 792 | if (ts == NULL) { |
793 | MUTEX_RELEASE(mtx); | | 793 | MUTEX_RELEASE(mtx); |
794 | turnstile_exit(mtx); | | 794 | turnstile_exit(mtx); |
795 | } else { | | 795 | } else { |
796 | MUTEX_RELEASE(mtx); | | 796 | MUTEX_RELEASE(mtx); |
797 | turnstile_wakeup(ts, TS_WRITER_Q, | | 797 | turnstile_wakeup(ts, TS_WRITER_Q, |
798 | TS_WAITERS(ts, TS_WRITER_Q), NULL); | | 798 | TS_WAITERS(ts, TS_WRITER_Q), NULL); |
799 | } | | 799 | } |
800 | } | | 800 | } |
801 | | | 801 | |
802 | #ifndef __HAVE_SIMPLE_MUTEXES | | 802 | #ifndef __HAVE_SIMPLE_MUTEXES |
803 | /* | | 803 | /* |
804 | * mutex_wakeup: | | 804 | * mutex_wakeup: |
805 | * | | 805 | * |
806 | * Support routine for mutex_exit() that wakes up all waiters. | | 806 | * Support routine for mutex_exit() that wakes up all waiters. |
807 | * We assume that the mutex has been released, but it need not | | 807 | * We assume that the mutex has been released, but it need not |
808 | * be. | | 808 | * be. |
809 | */ | | 809 | */ |
810 | void | | 810 | void |
811 | mutex_wakeup(kmutex_t *mtx) | | 811 | mutex_wakeup(kmutex_t *mtx) |
812 | { | | 812 | { |
813 | turnstile_t *ts; | | 813 | turnstile_t *ts; |
814 | | | 814 | |
815 | ts = turnstile_lookup(mtx); | | 815 | ts = turnstile_lookup(mtx); |
816 | if (ts == NULL) { | | 816 | if (ts == NULL) { |
817 | turnstile_exit(mtx); | | 817 | turnstile_exit(mtx); |
818 | return; | | 818 | return; |
819 | } | | 819 | } |
820 | MUTEX_CLEAR_WAITERS(mtx); | | 820 | MUTEX_CLEAR_WAITERS(mtx); |
821 | turnstile_wakeup(ts, TS_WRITER_Q, TS_WAITERS(ts, TS_WRITER_Q), NULL); | | 821 | turnstile_wakeup(ts, TS_WRITER_Q, TS_WAITERS(ts, TS_WRITER_Q), NULL); |
822 | } | | 822 | } |
823 | #endif /* !__HAVE_SIMPLE_MUTEXES */ | | 823 | #endif /* !__HAVE_SIMPLE_MUTEXES */ |
824 | | | 824 | |
825 | /* | | 825 | /* |
826 | * mutex_owned: | | 826 | * mutex_owned: |
827 | * | | 827 | * |
828 | * Return true if the current LWP (adaptive) or CPU (spin) | | 828 | * Return true if the current LWP (adaptive) or CPU (spin) |
829 | * holds the mutex. | | 829 | * holds the mutex. |
830 | */ | | 830 | */ |
831 | int | | 831 | int |
832 | mutex_owned(const kmutex_t *mtx) | | 832 | mutex_owned(const kmutex_t *mtx) |
833 | { | | 833 | { |
834 | | | 834 | |
835 | if (mtx == NULL) | | 835 | if (mtx == NULL) |
836 | return 0; | | 836 | return 0; |
837 | if (MUTEX_ADAPTIVE_P(mtx)) | | 837 | if (MUTEX_ADAPTIVE_P(mtx)) |
838 | return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp; | | 838 | return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp; |
839 | #ifdef FULL | | 839 | #ifdef FULL |
840 | return MUTEX_SPINBIT_LOCKED_P(mtx); | | 840 | return MUTEX_SPINBIT_LOCKED_P(mtx); |
841 | #else | | 841 | #else |
842 | return 1; | | 842 | return 1; |
843 | #endif | | 843 | #endif |
844 | } | | 844 | } |
845 | | | 845 | |
846 | /* | | 846 | /* |
847 | * mutex_owner: | | 847 | * mutex_owner: |
848 | * | | 848 | * |
849 | * Return the current owner of an adaptive mutex. Used for | | 849 | * Return the current owner of an adaptive mutex. Used for |
850 | * priority inheritance. | | 850 | * priority inheritance. |
851 | */ | | 851 | */ |
852 | lwp_t * | | 852 | lwp_t * |
853 | mutex_owner(const kmutex_t *mtx) | | 853 | mutex_owner(const kmutex_t *mtx) |
854 | { | | 854 | { |
855 | | | 855 | |
856 | MUTEX_ASSERT(mtx, MUTEX_ADAPTIVE_P(mtx)); | | 856 | MUTEX_ASSERT(mtx, MUTEX_ADAPTIVE_P(mtx)); |
857 | return (struct lwp *)MUTEX_OWNER(mtx->mtx_owner); | | 857 | return (struct lwp *)MUTEX_OWNER(mtx->mtx_owner); |
858 | } | | 858 | } |
859 | | | 859 | |
860 | /* | | 860 | /* |
861 | * mutex_ownable: | | 861 | * mutex_ownable: |
862 | * | | 862 | * |
863 | * When compiled with DEBUG and LOCKDEBUG defined, ensure that | | 863 | * When compiled with DEBUG and LOCKDEBUG defined, ensure that |
864 | * the mutex is available. We cannot use !mutex_owned() since | | 864 | * the mutex is available. We cannot use !mutex_owned() since |
865 | * that won't work correctly for spin mutexes. | | 865 | * that won't work correctly for spin mutexes. |
866 | */ | | 866 | */ |
867 | int | | 867 | int |
868 | mutex_ownable(const kmutex_t *mtx) | | 868 | mutex_ownable(const kmutex_t *mtx) |
869 | { | | 869 | { |
870 | | | 870 | |
871 | #ifdef LOCKDEBUG | | 871 | #ifdef LOCKDEBUG |
872 | MUTEX_TESTLOCK(mtx); | | 872 | MUTEX_TESTLOCK(mtx); |
873 | #endif | | 873 | #endif |
874 | return 1; | | 874 | return 1; |
875 | } | | 875 | } |
876 | | | 876 | |
877 | /* | | 877 | /* |
878 | * mutex_tryenter: | | 878 | * mutex_tryenter: |
879 | * | | 879 | * |
880 | * Try to acquire the mutex; return non-zero if we did. | | 880 | * Try to acquire the mutex; return non-zero if we did. |
881 | */ | | 881 | */ |
882 | int | | 882 | int |
883 | mutex_tryenter(kmutex_t *mtx) | | 883 | mutex_tryenter(kmutex_t *mtx) |
884 | { | | 884 | { |
885 | uintptr_t curthread; | | 885 | uintptr_t curthread; |
886 | | | 886 | |
887 | /* | | 887 | /* |
888 | * Handle spin mutexes. | | 888 | * Handle spin mutexes. |
889 | */ | | 889 | */ |
890 | if (MUTEX_SPIN_P(mtx)) { | | 890 | if (MUTEX_SPIN_P(mtx)) { |
891 | MUTEX_SPIN_SPLRAISE(mtx); | | 891 | MUTEX_SPIN_SPLRAISE(mtx); |
892 | #ifdef FULL | | 892 | #ifdef FULL |
893 | if (MUTEX_SPINBIT_LOCK_TRY(mtx)) { | | 893 | if (MUTEX_SPINBIT_LOCK_TRY(mtx)) { |
894 | MUTEX_WANTLOCK(mtx); | | 894 | MUTEX_WANTLOCK(mtx); |
895 | MUTEX_LOCKED(mtx); | | 895 | MUTEX_LOCKED(mtx); |
896 | return 1; | | 896 | return 1; |
897 | } | | 897 | } |
898 | MUTEX_SPIN_SPLRESTORE(mtx); | | 898 | MUTEX_SPIN_SPLRESTORE(mtx); |
899 | #else | | 899 | #else |
900 | MUTEX_WANTLOCK(mtx); | | 900 | MUTEX_WANTLOCK(mtx); |
901 | MUTEX_LOCKED(mtx); | | 901 | MUTEX_LOCKED(mtx); |
902 | return 1; | | 902 | return 1; |
903 | #endif | | 903 | #endif |
904 | } else { | | 904 | } else { |
905 | curthread = (uintptr_t)curlwp; | | 905 | curthread = (uintptr_t)curlwp; |
906 | MUTEX_ASSERT(mtx, curthread != 0); | | 906 | MUTEX_ASSERT(mtx, curthread != 0); |
907 | if (MUTEX_ACQUIRE(mtx, curthread)) { | | 907 | if (MUTEX_ACQUIRE(mtx, curthread)) { |
908 | MUTEX_WANTLOCK(mtx); | | 908 | MUTEX_WANTLOCK(mtx); |
909 | MUTEX_LOCKED(mtx); | | 909 | MUTEX_LOCKED(mtx); |
910 | MUTEX_DASSERT(mtx, | | 910 | MUTEX_DASSERT(mtx, |
911 | MUTEX_OWNER(mtx->mtx_owner) == curthread); | | 911 | MUTEX_OWNER(mtx->mtx_owner) == curthread); |
912 | return 1; | | 912 | return 1; |
913 | } | | 913 | } |
914 | } | | 914 | } |
915 | | | 915 | |
916 | return 0; | | 916 | return 0; |
917 | } | | 917 | } |
918 | | | 918 | |
919 | #if defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) | | 919 | #if defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) |
920 | /* | | 920 | /* |
921 | * mutex_spin_retry: | | 921 | * mutex_spin_retry: |
922 | * | | 922 | * |
923 | * Support routine for mutex_spin_enter(). Assumes that the caller | | 923 | * Support routine for mutex_spin_enter(). Assumes that the caller |
924 | * has already raised the SPL, and adjusted counters. | | 924 | * has already raised the SPL, and adjusted counters. |
925 | */ | | 925 | */ |
926 | void | | 926 | void |
927 | mutex_spin_retry(kmutex_t *mtx) | | 927 | mutex_spin_retry(kmutex_t *mtx) |
928 | { | | 928 | { |
929 | #ifdef MULTIPROCESSOR | | 929 | #ifdef MULTIPROCESSOR |
930 | u_int count; | | 930 | u_int count; |
931 | LOCKSTAT_TIMER(spintime); | | 931 | LOCKSTAT_TIMER(spintime); |
932 | LOCKSTAT_FLAG(lsflag); | | 932 | LOCKSTAT_FLAG(lsflag); |
933 | #ifdef LOCKDEBUG | | 933 | #ifdef LOCKDEBUG |
934 | u_int spins = 0; | | 934 | u_int spins = 0; |
935 | #endif /* LOCKDEBUG */ | | 935 | #endif /* LOCKDEBUG */ |
936 | | | 936 | |
937 | MUTEX_WANTLOCK(mtx); | | 937 | MUTEX_WANTLOCK(mtx); |
938 | | | 938 | |
939 | LOCKSTAT_ENTER(lsflag); | | 939 | LOCKSTAT_ENTER(lsflag); |
940 | LOCKSTAT_START_TIMER(lsflag, spintime); | | 940 | LOCKSTAT_START_TIMER(lsflag, spintime); |
941 | count = SPINLOCK_BACKOFF_MIN; | | 941 | count = SPINLOCK_BACKOFF_MIN; |
942 | | | 942 | |
943 | /* | | 943 | /* |
944 | * Spin testing the lock word and do exponential backoff | | 944 | * Spin testing the lock word and do exponential backoff |
945 | * to reduce cache line ping-ponging between CPUs. | | 945 | * to reduce cache line ping-ponging between CPUs. |
946 | */ | | 946 | */ |
947 | do { | | 947 | do { |
948 | #if MUTEX_PANIC_SKIP_SPIN | | 948 | #if MUTEX_PANIC_SKIP_SPIN |
949 | if (panicstr != NULL) | | 949 | if (panicstr != NULL) |
950 | break; | | 950 | break; |
951 | #endif | | 951 | #endif |
952 | while (MUTEX_SPINBIT_LOCKED_P(mtx)) { | | 952 | while (MUTEX_SPINBIT_LOCKED_P(mtx)) { |
953 | SPINLOCK_BACKOFF(count); | | 953 | SPINLOCK_BACKOFF(count); |
954 | #ifdef LOCKDEBUG | | 954 | #ifdef LOCKDEBUG |
955 | if (SPINLOCK_SPINOUT(spins)) | | 955 | if (SPINLOCK_SPINOUT(spins)) |
956 | MUTEX_ABORT(mtx, "spinout"); | | 956 | MUTEX_ABORT(mtx, "spinout"); |
957 | #endif /* LOCKDEBUG */ | | 957 | #endif /* LOCKDEBUG */ |
958 | } | | 958 | } |
959 | } while (!MUTEX_SPINBIT_LOCK_TRY(mtx)); | | 959 | } while (!MUTEX_SPINBIT_LOCK_TRY(mtx)); |
960 | | | 960 | |
961 | LOCKSTAT_STOP_TIMER(lsflag, spintime); | | 961 | LOCKSTAT_STOP_TIMER(lsflag, spintime); |
962 | LOCKSTAT_EVENT(lsflag, mtx, LB_SPIN_MUTEX | LB_SPIN, 1, spintime); | | 962 | LOCKSTAT_EVENT(lsflag, mtx, LB_SPIN_MUTEX | LB_SPIN, 1, spintime); |
963 | LOCKSTAT_EXIT(lsflag); | | 963 | LOCKSTAT_EXIT(lsflag); |
964 | | | 964 | |
965 | MUTEX_LOCKED(mtx); | | 965 | MUTEX_LOCKED(mtx); |
966 | #else /* MULTIPROCESSOR */ | | 966 | #else /* MULTIPROCESSOR */ |
967 | MUTEX_ABORT(mtx, "locking against myself"); | | 967 | MUTEX_ABORT(mtx, "locking against myself"); |
968 | #endif /* MULTIPROCESSOR */ | | 968 | #endif /* MULTIPROCESSOR */ |
969 | } | | 969 | } |
970 | #endif /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */ | | 970 | #endif /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */ |