| @@ -1,332 +1,363 @@ | | | @@ -1,332 +1,363 @@ |
1 | /* $NetBSD: kern_lock.c,v 1.167 2020/01/24 20:05:15 ad Exp $ */ | | 1 | /* $NetBSD: kern_lock.c,v 1.168 2020/01/27 21:05:43 ad Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 2002, 2006, 2007, 2008, 2009, 2020 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 2002, 2006, 2007, 2008, 2009, 2020 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, | | 8 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, |
9 | * NASA Ames Research Center, and by Andrew Doran. | | 9 | * NASA Ames Research Center, and by Andrew Doran. |
10 | * | | 10 | * |
11 | * Redistribution and use in source and binary forms, with or without | | 11 | * Redistribution and use in source and binary forms, with or without |
12 | * modification, are permitted provided that the following conditions | | 12 | * modification, are permitted provided that the following conditions |
13 | * are met: | | 13 | * are met: |
14 | * 1. Redistributions of source code must retain the above copyright | | 14 | * 1. Redistributions of source code must retain the above copyright |
15 | * notice, this list of conditions and the following disclaimer. | | 15 | * notice, this list of conditions and the following disclaimer. |
16 | * 2. Redistributions in binary form must reproduce the above copyright | | 16 | * 2. Redistributions in binary form must reproduce the above copyright |
17 | * notice, this list of conditions and the following disclaimer in the | | 17 | * notice, this list of conditions and the following disclaimer in the |
18 | * documentation and/or other materials provided with the distribution. | | 18 | * documentation and/or other materials provided with the distribution. |
19 | * | | 19 | * |
20 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 20 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
21 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 21 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
22 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 22 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
23 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 23 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
24 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 24 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 25 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 26 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 27 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 28 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 29 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
30 | * POSSIBILITY OF SUCH DAMAGE. | | 30 | * POSSIBILITY OF SUCH DAMAGE. |
31 | */ | | 31 | */ |
32 | | | 32 | |
33 | #include <sys/cdefs.h> | | 33 | #include <sys/cdefs.h> |
34 | __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.167 2020/01/24 20:05:15 ad Exp $"); | | 34 | __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.168 2020/01/27 21:05:43 ad Exp $"); |
| | | 35 | |
| | | 36 | #ifdef _KERNEL_OPT |
| | | 37 | #include "opt_lockdebug.h" |
| | | 38 | #endif |
35 | | | 39 | |
36 | #include <sys/param.h> | | 40 | #include <sys/param.h> |
37 | #include <sys/proc.h> | | 41 | #include <sys/proc.h> |
38 | #include <sys/lock.h> | | 42 | #include <sys/lock.h> |
39 | #include <sys/systm.h> | | 43 | #include <sys/systm.h> |
40 | #include <sys/kernel.h> | | 44 | #include <sys/kernel.h> |
41 | #include <sys/lockdebug.h> | | 45 | #include <sys/lockdebug.h> |
42 | #include <sys/cpu.h> | | 46 | #include <sys/cpu.h> |
43 | #include <sys/syslog.h> | | 47 | #include <sys/syslog.h> |
44 | #include <sys/atomic.h> | | 48 | #include <sys/atomic.h> |
45 | #include <sys/lwp.h> | | 49 | #include <sys/lwp.h> |
46 | #include <sys/pserialize.h> | | 50 | #include <sys/pserialize.h> |
47 | | | 51 | |
| | | 52 | #if defined(DIAGNOSTIC) && !defined(LOCKDEBUG) |
| | | 53 | #include <sys/ksyms.h> |
| | | 54 | #endif |
| | | 55 | |
48 | #include <machine/lock.h> | | 56 | #include <machine/lock.h> |
49 | | | 57 | |
50 | #include <dev/lockstat.h> | | 58 | #include <dev/lockstat.h> |
51 | | | 59 | |
52 | #define RETURN_ADDRESS (uintptr_t)__builtin_return_address(0) | | 60 | #define RETURN_ADDRESS (uintptr_t)__builtin_return_address(0) |
53 | | | 61 | |
54 | bool kernel_lock_dodebug; | | 62 | bool kernel_lock_dodebug; |
55 | | | 63 | |
56 | __cpu_simple_lock_t kernel_lock[CACHE_LINE_SIZE / sizeof(__cpu_simple_lock_t)] | | 64 | __cpu_simple_lock_t kernel_lock[CACHE_LINE_SIZE / sizeof(__cpu_simple_lock_t)] |
57 | __cacheline_aligned; | | 65 | __cacheline_aligned; |
58 | | | 66 | |
59 | void | | 67 | void |
60 | assert_sleepable(void) | | 68 | assert_sleepable(void) |
61 | { | | 69 | { |
62 | const char *reason; | | 70 | const char *reason; |
63 | uint64_t pctr; | | 71 | uint64_t pctr; |
64 | bool idle; | | 72 | bool idle; |
65 | | | 73 | |
66 | if (panicstr != NULL) { | | 74 | if (panicstr != NULL) { |
67 | return; | | 75 | return; |
68 | } | | 76 | } |
69 | | | 77 | |
70 | LOCKDEBUG_BARRIER(kernel_lock, 1); | | 78 | LOCKDEBUG_BARRIER(kernel_lock, 1); |
71 | | | 79 | |
72 | /* | | 80 | /* |
73 | * Avoid disabling/re-enabling preemption here since this | | 81 | * Avoid disabling/re-enabling preemption here since this |
74 | * routine may be called in delicate situations. | | 82 | * routine may be called in delicate situations. |
75 | */ | | 83 | */ |
76 | do { | | 84 | do { |
77 | pctr = lwp_pctr(); | | 85 | pctr = lwp_pctr(); |
78 | __insn_barrier(); | | 86 | __insn_barrier(); |
79 | idle = CURCPU_IDLE_P(); | | 87 | idle = CURCPU_IDLE_P(); |
80 | __insn_barrier(); | | 88 | __insn_barrier(); |
81 | } while (pctr != lwp_pctr()); | | 89 | } while (pctr != lwp_pctr()); |
82 | | | 90 | |
83 | reason = NULL; | | 91 | reason = NULL; |
84 | if (idle && !cold && | | 92 | if (idle && !cold && |
85 | kcpuset_isset(kcpuset_running, cpu_index(curcpu()))) { | | 93 | kcpuset_isset(kcpuset_running, cpu_index(curcpu()))) { |
86 | reason = "idle"; | | 94 | reason = "idle"; |
87 | } | | 95 | } |
88 | if (cpu_intr_p()) { | | 96 | if (cpu_intr_p()) { |
89 | reason = "interrupt"; | | 97 | reason = "interrupt"; |
90 | } | | 98 | } |
91 | if (cpu_softintr_p()) { | | 99 | if (cpu_softintr_p()) { |
92 | reason = "softint"; | | 100 | reason = "softint"; |
93 | } | | 101 | } |
94 | if (!pserialize_not_in_read_section()) { | | 102 | if (!pserialize_not_in_read_section()) { |
95 | reason = "pserialize"; | | 103 | reason = "pserialize"; |
96 | } | | 104 | } |
97 | | | 105 | |
98 | if (reason) { | | 106 | if (reason) { |
99 | panic("%s: %s caller=%p", __func__, reason, | | 107 | panic("%s: %s caller=%p", __func__, reason, |
100 | (void *)RETURN_ADDRESS); | | 108 | (void *)RETURN_ADDRESS); |
101 | } | | 109 | } |
102 | } | | 110 | } |
103 | | | 111 | |
104 | /* | | 112 | /* |
105 | * Functions for manipulating the kernel_lock. We put them here | | 113 | * Functions for manipulating the kernel_lock. We put them here |
106 | * so that they show up in profiles. | | 114 | * so that they show up in profiles. |
107 | */ | | 115 | */ |
108 | | | 116 | |
109 | #define _KERNEL_LOCK_ABORT(msg) \ | | 117 | #define _KERNEL_LOCK_ABORT(msg) \ |
110 | LOCKDEBUG_ABORT(__func__, __LINE__, kernel_lock, &_kernel_lock_ops, msg) | | 118 | LOCKDEBUG_ABORT(__func__, __LINE__, kernel_lock, &_kernel_lock_ops, msg) |
111 | | | 119 | |
112 | #ifdef LOCKDEBUG | | 120 | #ifdef LOCKDEBUG |
113 | #define _KERNEL_LOCK_ASSERT(cond) \ | | 121 | #define _KERNEL_LOCK_ASSERT(cond) \ |
114 | do { \ | | 122 | do { \ |
115 | if (!(cond)) \ | | 123 | if (!(cond)) \ |
116 | _KERNEL_LOCK_ABORT("assertion failed: " #cond); \ | | 124 | _KERNEL_LOCK_ABORT("assertion failed: " #cond); \ |
117 | } while (/* CONSTCOND */ 0) | | 125 | } while (/* CONSTCOND */ 0) |
118 | #else | | 126 | #else |
119 | #define _KERNEL_LOCK_ASSERT(cond) /* nothing */ | | 127 | #define _KERNEL_LOCK_ASSERT(cond) /* nothing */ |
120 | #endif | | 128 | #endif |
121 | | | 129 | |
122 | static void _kernel_lock_dump(const volatile void *, lockop_printer_t); | | 130 | static void _kernel_lock_dump(const volatile void *, lockop_printer_t); |
123 | | | 131 | |
124 | lockops_t _kernel_lock_ops = { | | 132 | lockops_t _kernel_lock_ops = { |
125 | .lo_name = "Kernel lock", | | 133 | .lo_name = "Kernel lock", |
126 | .lo_type = LOCKOPS_SPIN, | | 134 | .lo_type = LOCKOPS_SPIN, |
127 | .lo_dump = _kernel_lock_dump, | | 135 | .lo_dump = _kernel_lock_dump, |
128 | }; | | 136 | }; |
129 | | | 137 | |
130 | /* | | 138 | /* |
131 | * Initialize the kernel lock. | | 139 | * Initialize the kernel lock. |
132 | */ | | 140 | */ |
133 | void | | 141 | void |
134 | kernel_lock_init(void) | | 142 | kernel_lock_init(void) |
135 | { | | 143 | { |
136 | | | 144 | |
137 | __cpu_simple_lock_init(kernel_lock); | | 145 | __cpu_simple_lock_init(kernel_lock); |
138 | kernel_lock_dodebug = LOCKDEBUG_ALLOC(kernel_lock, &_kernel_lock_ops, | | 146 | kernel_lock_dodebug = LOCKDEBUG_ALLOC(kernel_lock, &_kernel_lock_ops, |
139 | RETURN_ADDRESS); | | 147 | RETURN_ADDRESS); |
140 | } | | 148 | } |
141 | CTASSERT(CACHE_LINE_SIZE >= sizeof(__cpu_simple_lock_t)); | | 149 | CTASSERT(CACHE_LINE_SIZE >= sizeof(__cpu_simple_lock_t)); |
142 | | | 150 | |
143 | /* | | 151 | /* |
144 | * Print debugging information about the kernel lock. | | 152 | * Print debugging information about the kernel lock. |
145 | */ | | 153 | */ |
146 | static void | | 154 | static void |
147 | _kernel_lock_dump(const volatile void *junk, lockop_printer_t pr) | | 155 | _kernel_lock_dump(const volatile void *junk, lockop_printer_t pr) |
148 | { | | 156 | { |
149 | struct cpu_info *ci = curcpu(); | | 157 | struct cpu_info *ci = curcpu(); |
150 | | | 158 | |
151 | (void)junk; | | 159 | (void)junk; |
152 | | | 160 | |
153 | pr("curcpu holds : %18d wanted by: %#018lx\n", | | 161 | pr("curcpu holds : %18d wanted by: %#018lx\n", |
154 | ci->ci_biglock_count, (long)ci->ci_biglock_wanted); | | 162 | ci->ci_biglock_count, (long)ci->ci_biglock_wanted); |
155 | } | | 163 | } |
156 | | | 164 | |
157 | /* | | 165 | /* |
158 | * Acquire 'nlocks' holds on the kernel lock. | | 166 | * Acquire 'nlocks' holds on the kernel lock. |
159 | * | | 167 | * |
160 | * Although it may not look it, this is one of the most central, intricate | | 168 | * Although it may not look it, this is one of the most central, intricate |
161 | * routines in the kernel, and tons of code elsewhere depends on its exact | | 169 | * routines in the kernel, and tons of code elsewhere depends on its exact |
162 | * behaviour. If you change something in here, expect it to bite you in the | | 170 | * behaviour. If you change something in here, expect it to bite you in the |
163 | * rear. | | 171 | * rear. |
164 | */ | | 172 | */ |
165 | void | | 173 | void |
166 | _kernel_lock(int nlocks) | | 174 | _kernel_lock(int nlocks) |
167 | { | | 175 | { |
168 | struct cpu_info *ci; | | 176 | struct cpu_info *ci; |
169 | LOCKSTAT_TIMER(spintime); | | 177 | LOCKSTAT_TIMER(spintime); |
170 | LOCKSTAT_FLAG(lsflag); | | 178 | LOCKSTAT_FLAG(lsflag); |
171 | struct lwp *owant; | | 179 | struct lwp *owant; |
172 | #ifdef LOCKDEBUG | | 180 | #ifdef LOCKDEBUG |
173 | u_int spins = 0; | | 181 | u_int spins = 0; |
174 | #endif | | 182 | #endif |
175 | int s; | | 183 | int s; |
176 | struct lwp *l = curlwp; | | 184 | struct lwp *l = curlwp; |
177 | | | 185 | |
178 | _KERNEL_LOCK_ASSERT(nlocks > 0); | | 186 | _KERNEL_LOCK_ASSERT(nlocks > 0); |
179 | | | 187 | |
180 | s = splvm(); | | 188 | s = splvm(); |
181 | ci = curcpu(); | | 189 | ci = curcpu(); |
182 | if (ci->ci_biglock_count != 0) { | | 190 | if (ci->ci_biglock_count != 0) { |
183 | _KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(kernel_lock)); | | 191 | _KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(kernel_lock)); |
184 | ci->ci_biglock_count += nlocks; | | 192 | ci->ci_biglock_count += nlocks; |
185 | l->l_blcnt += nlocks; | | 193 | l->l_blcnt += nlocks; |
186 | splx(s); | | 194 | splx(s); |
187 | return; | | 195 | return; |
188 | } | | 196 | } |
189 | | | 197 | |
190 | _KERNEL_LOCK_ASSERT(l->l_blcnt == 0); | | 198 | _KERNEL_LOCK_ASSERT(l->l_blcnt == 0); |
191 | LOCKDEBUG_WANTLOCK(kernel_lock_dodebug, kernel_lock, RETURN_ADDRESS, | | 199 | LOCKDEBUG_WANTLOCK(kernel_lock_dodebug, kernel_lock, RETURN_ADDRESS, |
192 | 0); | | 200 | 0); |
193 | | | 201 | |
194 | if (__predict_true(__cpu_simple_lock_try(kernel_lock))) { | | 202 | if (__predict_true(__cpu_simple_lock_try(kernel_lock))) { |
195 | ci->ci_biglock_count = nlocks; | | 203 | ci->ci_biglock_count = nlocks; |
196 | l->l_blcnt = nlocks; | | 204 | l->l_blcnt = nlocks; |
197 | LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, NULL, | | 205 | LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, NULL, |
198 | RETURN_ADDRESS, 0); | | 206 | RETURN_ADDRESS, 0); |
199 | splx(s); | | 207 | splx(s); |
200 | return; | | 208 | return; |
201 | } | | 209 | } |
202 | | | 210 | |
203 | /* | | 211 | /* |
204 | * To remove the ordering constraint between adaptive mutexes | | 212 | * To remove the ordering constraint between adaptive mutexes |
205 | * and kernel_lock we must make it appear as if this thread is | | 213 | * and kernel_lock we must make it appear as if this thread is |
206 | * blocking. For non-interlocked mutex release, a store fence | | 214 | * blocking. For non-interlocked mutex release, a store fence |
207 | * is required to ensure that the result of any mutex_exit() | | 215 | * is required to ensure that the result of any mutex_exit() |
208 | * by the current LWP becomes visible on the bus before the set | | 216 | * by the current LWP becomes visible on the bus before the set |
209 | * of ci->ci_biglock_wanted becomes visible. | | 217 | * of ci->ci_biglock_wanted becomes visible. |
210 | * | | 218 | * |
211 | * However, we won't set ci_biglock_wanted until we've spun for | | 219 | * However, we won't set ci_biglock_wanted until we've spun for |
212 | * a bit, as we don't want to make any lock waiters in rw_oncpu() | | 220 | * a bit, as we don't want to make any lock waiters in rw_oncpu() |
213 | * or mutex_oncpu() block prematurely. | | 221 | * or mutex_oncpu() block prematurely. |
214 | */ | | 222 | */ |
215 | membar_producer(); | | 223 | membar_producer(); |
216 | owant = ci->ci_biglock_wanted; | | 224 | owant = ci->ci_biglock_wanted; |
217 | ci->ci_biglock_wanted = l; | | 225 | ci->ci_biglock_wanted = l; |
| | | 226 | #if defined(DIAGNOSTIC) && !defined(LOCKDEBUG) |
| | | 227 | l->l_ld_wanted = __builtin_return_address(0); |
| | | 228 | #endif |
218 | | | 229 | |
219 | /* | | 230 | /* |
220 | * Spin until we acquire the lock. Once we have it, record the | | 231 | * Spin until we acquire the lock. Once we have it, record the |
221 | * time spent with lockstat. | | 232 | * time spent with lockstat. |
222 | */ | | 233 | */ |
223 | LOCKSTAT_ENTER(lsflag); | | 234 | LOCKSTAT_ENTER(lsflag); |
224 | LOCKSTAT_START_TIMER(lsflag, spintime); | | 235 | LOCKSTAT_START_TIMER(lsflag, spintime); |
225 | | | 236 | |
226 | do { | | 237 | do { |
227 | splx(s); | | 238 | splx(s); |
228 | while (__SIMPLELOCK_LOCKED_P(kernel_lock)) { | | 239 | while (__SIMPLELOCK_LOCKED_P(kernel_lock)) { |
229 | #ifdef LOCKDEBUG | | 240 | #ifdef LOCKDEBUG |
230 | if (SPINLOCK_SPINOUT(spins)) { | | 241 | if (SPINLOCK_SPINOUT(spins)) { |
231 | extern int start_init_exec; | | 242 | extern int start_init_exec; |
232 | if (!start_init_exec) | | 243 | if (!start_init_exec) |
233 | _KERNEL_LOCK_ABORT("spinout"); | | 244 | _KERNEL_LOCK_ABORT("spinout"); |
234 | } | | 245 | } |
235 | #endif | | 246 | #endif |
236 | } | | 247 | } |
237 | s = splvm(); | | 248 | s = splvm(); |
238 | } while (!__cpu_simple_lock_try(kernel_lock)); | | 249 | } while (!__cpu_simple_lock_try(kernel_lock)); |
239 | | | 250 | |
240 | ci->ci_biglock_count = nlocks; | | 251 | ci->ci_biglock_count = nlocks; |
241 | l->l_blcnt = nlocks; | | 252 | l->l_blcnt = nlocks; |
242 | LOCKSTAT_STOP_TIMER(lsflag, spintime); | | 253 | LOCKSTAT_STOP_TIMER(lsflag, spintime); |
243 | LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, NULL, | | 254 | LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, NULL, |
244 | RETURN_ADDRESS, 0); | | 255 | RETURN_ADDRESS, 0); |
245 | if (owant == NULL) { | | 256 | if (owant == NULL) { |
246 | LOCKSTAT_EVENT_RA(lsflag, kernel_lock, | | 257 | LOCKSTAT_EVENT_RA(lsflag, kernel_lock, |
247 | LB_KERNEL_LOCK | LB_SPIN, 1, spintime, RETURN_ADDRESS); | | 258 | LB_KERNEL_LOCK | LB_SPIN, 1, spintime, RETURN_ADDRESS); |
248 | } | | 259 | } |
249 | LOCKSTAT_EXIT(lsflag); | | 260 | LOCKSTAT_EXIT(lsflag); |
250 | splx(s); | | 261 | splx(s); |
251 | | | 262 | |
252 | /* | | 263 | /* |
253 | * Now that we have kernel_lock, reset ci_biglock_wanted. This | | 264 | * Now that we have kernel_lock, reset ci_biglock_wanted. This |
254 | * store must be unbuffered (immediately visible on the bus) in | | 265 | * store must be unbuffered (immediately visible on the bus) in |
255 | * order for non-interlocked mutex release to work correctly. | | 266 | * order for non-interlocked mutex release to work correctly. |
256 | * It must be visible before a mutex_exit() can execute on this | | 267 | * It must be visible before a mutex_exit() can execute on this |
257 | * processor. | | 268 | * processor. |
258 | * | | 269 | * |
259 | * Note: only where CAS is available in hardware will this be | | 270 | * Note: only where CAS is available in hardware will this be |
260 | * an unbuffered write, but non-interlocked release cannot be | | 271 | * an unbuffered write, but non-interlocked release cannot be |
261 | * done on CPUs without CAS in hardware. | | 272 | * done on CPUs without CAS in hardware. |
262 | */ | | 273 | */ |
263 | (void)atomic_swap_ptr(&ci->ci_biglock_wanted, owant); | | 274 | (void)atomic_swap_ptr(&ci->ci_biglock_wanted, owant); |
264 | | | 275 | |
265 | /* | | 276 | /* |
266 | * Issue a memory barrier as we have acquired a lock. This also | | 277 | * Issue a memory barrier as we have acquired a lock. This also |
267 | * prevents stores from a following mutex_exit() being reordered | | 278 | * prevents stores from a following mutex_exit() being reordered |
268 | * to occur before our store to ci_biglock_wanted above. | | 279 | * to occur before our store to ci_biglock_wanted above. |
269 | */ | | 280 | */ |
270 | #ifndef __HAVE_ATOMIC_AS_MEMBAR | | 281 | #ifndef __HAVE_ATOMIC_AS_MEMBAR |
271 | membar_enter(); | | 282 | membar_enter(); |
272 | #endif | | 283 | #endif |
273 | } | | 284 | } |
274 | | | 285 | |
275 | /* | | 286 | /* |
276 | * Release 'nlocks' holds on the kernel lock. If 'nlocks' is zero, release | | 287 | * Release 'nlocks' holds on the kernel lock. If 'nlocks' is zero, release |
277 | * all holds. | | 288 | * all holds. |
278 | */ | | 289 | */ |
279 | void | | 290 | void |
280 | _kernel_unlock(int nlocks, int *countp) | | 291 | _kernel_unlock(int nlocks, int *countp) |
281 | { | | 292 | { |
282 | struct cpu_info *ci; | | 293 | struct cpu_info *ci; |
283 | u_int olocks; | | 294 | u_int olocks; |
284 | int s; | | 295 | int s; |
285 | struct lwp *l = curlwp; | | 296 | struct lwp *l = curlwp; |
286 | | | 297 | |
287 | _KERNEL_LOCK_ASSERT(nlocks < 2); | | 298 | _KERNEL_LOCK_ASSERT(nlocks < 2); |
288 | | | 299 | |
289 | olocks = l->l_blcnt; | | 300 | olocks = l->l_blcnt; |
290 | | | 301 | |
291 | if (olocks == 0) { | | 302 | if (olocks == 0) { |
292 | _KERNEL_LOCK_ASSERT(nlocks <= 0); | | 303 | _KERNEL_LOCK_ASSERT(nlocks <= 0); |
293 | if (countp != NULL) | | 304 | if (countp != NULL) |
294 | *countp = 0; | | 305 | *countp = 0; |
295 | return; | | 306 | return; |
296 | } | | 307 | } |
297 | | | 308 | |
298 | _KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(kernel_lock)); | | 309 | _KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(kernel_lock)); |
299 | | | 310 | |
300 | if (nlocks == 0) | | 311 | if (nlocks == 0) |
301 | nlocks = olocks; | | 312 | nlocks = olocks; |
302 | else if (nlocks == -1) { | | 313 | else if (nlocks == -1) { |
303 | nlocks = 1; | | 314 | nlocks = 1; |
304 | _KERNEL_LOCK_ASSERT(olocks == 1); | | 315 | _KERNEL_LOCK_ASSERT(olocks == 1); |
305 | } | | 316 | } |
306 | s = splvm(); | | 317 | s = splvm(); |
307 | ci = curcpu(); | | 318 | ci = curcpu(); |
308 | _KERNEL_LOCK_ASSERT(ci->ci_biglock_count >= l->l_blcnt); | | 319 | _KERNEL_LOCK_ASSERT(ci->ci_biglock_count >= l->l_blcnt); |
309 | if (ci->ci_biglock_count == nlocks) { | | 320 | if (ci->ci_biglock_count == nlocks) { |
310 | LOCKDEBUG_UNLOCKED(kernel_lock_dodebug, kernel_lock, | | 321 | LOCKDEBUG_UNLOCKED(kernel_lock_dodebug, kernel_lock, |
311 | RETURN_ADDRESS, 0); | | 322 | RETURN_ADDRESS, 0); |
312 | ci->ci_biglock_count = 0; | | 323 | ci->ci_biglock_count = 0; |
313 | __cpu_simple_unlock(kernel_lock); | | 324 | __cpu_simple_unlock(kernel_lock); |
314 | l->l_blcnt -= nlocks; | | 325 | l->l_blcnt -= nlocks; |
315 | splx(s); | | 326 | splx(s); |
316 | if (l->l_dopreempt) | | 327 | if (l->l_dopreempt) |
317 | kpreempt(0); | | 328 | kpreempt(0); |
318 | } else { | | 329 | } else { |
319 | ci->ci_biglock_count -= nlocks; | | 330 | ci->ci_biglock_count -= nlocks; |
320 | l->l_blcnt -= nlocks; | | 331 | l->l_blcnt -= nlocks; |
321 | splx(s); | | 332 | splx(s); |
322 | } | | 333 | } |
323 | | | 334 | |
324 | if (countp != NULL) | | 335 | if (countp != NULL) |
325 | *countp = olocks; | | 336 | *countp = olocks; |
326 | } | | 337 | } |
327 | | | 338 | |
328 | bool | | 339 | bool |
329 | _kernel_locked_p(void) | | 340 | _kernel_locked_p(void) |
330 | { | | 341 | { |
331 | return __SIMPLELOCK_LOCKED_P(kernel_lock); | | 342 | return __SIMPLELOCK_LOCKED_P(kernel_lock); |
332 | } | | 343 | } |
| | | 344 | |
| | | 345 | void |
| | | 346 | kernel_lock_plug_leak(void) |
| | | 347 | { |
| | | 348 | #ifndef LOCKDEBUG |
| | | 349 | # ifdef DIAGNOSTIC |
| | | 350 | int biglocks = 0; |
| | | 351 | KERNEL_UNLOCK_ALL(curlwp, &biglocks); |
| | | 352 | if (biglocks != 0) { |
| | | 353 | const char *sym = "(unknown)"; |
| | | 354 | ksyms_getname(NULL, &sym, (vaddr_t)curlwp->l_ld_wanted, |
| | | 355 | KSYMS_CLOSEST|KSYMS_PROC|KSYMS_ANY); |
| | | 356 | printf("kernel_lock leak detected. last acquired: %s / %p\n", |
| | | 357 | sym, curlwp->l_ld_wanted); |
| | | 358 | } |
| | | 359 | # else |
| | | 360 | KERNEL_UNLOCK_ALL(curlwp, NULL); |
| | | 361 | # endif |
| | | 362 | #endif |
| | | 363 | } |