| @@ -1,1054 +1,1054 @@ | | | @@ -1,1054 +1,1054 @@ |
1 | /* $NetBSD: subr_lockdebug.c,v 1.77 2020/05/15 13:09:02 maxv Exp $ */ | | 1 | /* $NetBSD: subr_lockdebug.c,v 1.78 2021/01/01 14:04:17 riastradh Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Andrew Doran. | | 8 | * by Andrew Doran. |
9 | * | | 9 | * |
10 | * Redistribution and use in source and binary forms, with or without | | 10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions | | 11 | * modification, are permitted provided that the following conditions |
12 | * are met: | | 12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright | | 13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. | | 14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright | | 15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the | | 16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. | | 17 | * documentation and/or other materials provided with the distribution. |
18 | * | | 18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. | | 29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ | | 30 | */ |
31 | | | 31 | |
32 | /* | | 32 | /* |
33 | * Basic lock debugging code shared among lock primitives. | | 33 | * Basic lock debugging code shared among lock primitives. |
34 | */ | | 34 | */ |
35 | | | 35 | |
36 | #include <sys/cdefs.h> | | 36 | #include <sys/cdefs.h> |
37 | __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.77 2020/05/15 13:09:02 maxv Exp $"); | | 37 | __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.78 2021/01/01 14:04:17 riastradh Exp $"); |
38 | | | 38 | |
39 | #ifdef _KERNEL_OPT | | 39 | #ifdef _KERNEL_OPT |
40 | #include "opt_ddb.h" | | 40 | #include "opt_ddb.h" |
41 | #endif | | 41 | #endif |
42 | | | 42 | |
43 | #include <sys/param.h> | | 43 | #include <sys/param.h> |
44 | #include <sys/proc.h> | | 44 | #include <sys/proc.h> |
45 | #include <sys/systm.h> | | 45 | #include <sys/systm.h> |
46 | #include <sys/kernel.h> | | 46 | #include <sys/kernel.h> |
47 | #include <sys/kmem.h> | | 47 | #include <sys/kmem.h> |
48 | #include <sys/lockdebug.h> | | 48 | #include <sys/lockdebug.h> |
49 | #include <sys/sleepq.h> | | 49 | #include <sys/sleepq.h> |
50 | #include <sys/cpu.h> | | 50 | #include <sys/cpu.h> |
51 | #include <sys/atomic.h> | | 51 | #include <sys/atomic.h> |
52 | #include <sys/lock.h> | | 52 | #include <sys/lock.h> |
53 | #include <sys/rbtree.h> | | 53 | #include <sys/rbtree.h> |
54 | #include <sys/ksyms.h> | | 54 | #include <sys/ksyms.h> |
55 | #include <sys/kcov.h> | | 55 | #include <sys/kcov.h> |
56 | | | 56 | |
57 | #include <machine/lock.h> | | 57 | #include <machine/lock.h> |
58 | | | 58 | |
59 | unsigned int ld_panic; | | 59 | unsigned int ld_panic; |
60 | | | 60 | |
61 | #ifdef LOCKDEBUG | | 61 | #ifdef LOCKDEBUG |
62 | | | 62 | |
63 | #ifdef __ia64__ | | 63 | #ifdef __ia64__ |
64 | #define LD_BATCH_SHIFT 16 | | 64 | #define LD_BATCH_SHIFT 16 |
65 | #else | | 65 | #else |
66 | #define LD_BATCH_SHIFT 9 | | 66 | #define LD_BATCH_SHIFT 9 |
67 | #endif | | 67 | #endif |
68 | #define LD_BATCH (1 << LD_BATCH_SHIFT) | | 68 | #define LD_BATCH (1 << LD_BATCH_SHIFT) |
69 | #define LD_BATCH_MASK (LD_BATCH - 1) | | 69 | #define LD_BATCH_MASK (LD_BATCH - 1) |
70 | #define LD_MAX_LOCKS 1048576 | | 70 | #define LD_MAX_LOCKS 1048576 |
71 | #define LD_SLOP 16 | | 71 | #define LD_SLOP 16 |
72 | | | 72 | |
73 | #define LD_LOCKED 0x01 | | 73 | #define LD_LOCKED 0x01 |
74 | #define LD_SLEEPER 0x02 | | 74 | #define LD_SLEEPER 0x02 |
75 | | | 75 | |
76 | #define LD_WRITE_LOCK 0x80000000 | | 76 | #define LD_WRITE_LOCK 0x80000000 |
77 | | | 77 | |
78 | typedef struct lockdebug { | | 78 | typedef struct lockdebug { |
79 | struct rb_node ld_rb_node; | | 79 | struct rb_node ld_rb_node; |
80 | __cpu_simple_lock_t ld_spinlock; | | 80 | __cpu_simple_lock_t ld_spinlock; |
81 | _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain; | | 81 | _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain; |
82 | _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain; | | 82 | _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain; |
83 | volatile void *ld_lock; | | 83 | volatile void *ld_lock; |
84 | lockops_t *ld_lockops; | | 84 | lockops_t *ld_lockops; |
85 | struct lwp *ld_lwp; | | 85 | struct lwp *ld_lwp; |
86 | uintptr_t ld_locked; | | 86 | uintptr_t ld_locked; |
87 | uintptr_t ld_unlocked; | | 87 | uintptr_t ld_unlocked; |
88 | uintptr_t ld_initaddr; | | 88 | uintptr_t ld_initaddr; |
89 | uint16_t ld_shares; | | 89 | uint16_t ld_shares; |
90 | uint16_t ld_cpu; | | 90 | uint16_t ld_cpu; |
91 | uint8_t ld_flags; | | 91 | uint8_t ld_flags; |
92 | uint8_t ld_shwant; /* advisory */ | | 92 | uint8_t ld_shwant; /* advisory */ |
93 | uint8_t ld_exwant; /* advisory */ | | 93 | uint8_t ld_exwant; /* advisory */ |
94 | uint8_t ld_unused; | | 94 | uint8_t ld_unused; |
95 | } volatile lockdebug_t; | | 95 | } volatile lockdebug_t; |
96 | | | 96 | |
97 | typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t; | | 97 | typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t; |
98 | | | 98 | |
99 | __cpu_simple_lock_t ld_mod_lk; | | 99 | __cpu_simple_lock_t ld_mod_lk; |
100 | lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free); | | 100 | lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free); |
101 | #ifdef _KERNEL | | 101 | #ifdef _KERNEL |
102 | lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all); | | 102 | lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all); |
103 | #else | | 103 | #else |
104 | extern lockdebuglist_t ld_all; | | 104 | extern lockdebuglist_t ld_all; |
105 | #define cpu_name(a) "?" | | 105 | #define cpu_name(a) "?" |
106 | #define cpu_index(a) -1 | | 106 | #define cpu_index(a) -1 |
107 | #define curlwp NULL | | 107 | #define curlwp NULL |
108 | #endif /* _KERNEL */ | | 108 | #endif /* _KERNEL */ |
109 | int ld_nfree; | | 109 | int ld_nfree; |
110 | int ld_freeptr; | | 110 | int ld_freeptr; |
111 | int ld_recurse; | | 111 | int ld_recurse; |
112 | bool ld_nomore; | | 112 | bool ld_nomore; |
113 | lockdebug_t ld_prime[LD_BATCH]; | | 113 | lockdebug_t ld_prime[LD_BATCH]; |
114 | | | 114 | |
115 | #ifdef _KERNEL | | 115 | #ifdef _KERNEL |
116 | static void lockdebug_abort1(const char *, size_t, lockdebug_t *, int, | | 116 | static void lockdebug_abort1(const char *, size_t, lockdebug_t *, int, |
117 | const char *, bool); | | 117 | const char *, bool); |
118 | static int lockdebug_more(int); | | 118 | static int lockdebug_more(int); |
119 | static void lockdebug_init(void); | | 119 | static void lockdebug_init(void); |
120 | static void lockdebug_dump(lwp_t *, lockdebug_t *, | | 120 | static void lockdebug_dump(lwp_t *, lockdebug_t *, |
121 | void (*)(const char *, ...) | | 121 | void (*)(const char *, ...) |
122 | __printflike(1, 2)); | | 122 | __printflike(1, 2)); |
123 | | | 123 | |
124 | static signed int | | 124 | static signed int |
125 | ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2) | | 125 | ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2) |
126 | { | | 126 | { |
127 | const lockdebug_t *ld1 = n1; | | 127 | const lockdebug_t *ld1 = n1; |
128 | const lockdebug_t *ld2 = n2; | | 128 | const lockdebug_t *ld2 = n2; |
129 | const uintptr_t a = (uintptr_t)ld1->ld_lock; | | 129 | const uintptr_t a = (uintptr_t)ld1->ld_lock; |
130 | const uintptr_t b = (uintptr_t)ld2->ld_lock; | | 130 | const uintptr_t b = (uintptr_t)ld2->ld_lock; |
131 | | | 131 | |
132 | if (a < b) | | 132 | if (a < b) |
133 | return -1; | | 133 | return -1; |
134 | if (a > b) | | 134 | if (a > b) |
135 | return 1; | | 135 | return 1; |
136 | return 0; | | 136 | return 0; |
137 | } | | 137 | } |
138 | | | 138 | |
139 | static signed int | | 139 | static signed int |
140 | ld_rbto_compare_key(void *ctx, const void *n, const void *key) | | 140 | ld_rbto_compare_key(void *ctx, const void *n, const void *key) |
141 | { | | 141 | { |
142 | const lockdebug_t *ld = n; | | 142 | const lockdebug_t *ld = n; |
143 | const uintptr_t a = (uintptr_t)ld->ld_lock; | | 143 | const uintptr_t a = (uintptr_t)ld->ld_lock; |
144 | const uintptr_t b = (uintptr_t)key; | | 144 | const uintptr_t b = (uintptr_t)key; |
145 | | | 145 | |
146 | if (a < b) | | 146 | if (a < b) |
147 | return -1; | | 147 | return -1; |
148 | if (a > b) | | 148 | if (a > b) |
149 | return 1; | | 149 | return 1; |
150 | return 0; | | 150 | return 0; |
151 | } | | 151 | } |
152 | | | 152 | |
153 | static rb_tree_t ld_rb_tree; | | 153 | static rb_tree_t ld_rb_tree; |
154 | | | 154 | |
155 | static const rb_tree_ops_t ld_rb_tree_ops = { | | 155 | static const rb_tree_ops_t ld_rb_tree_ops = { |
156 | .rbto_compare_nodes = ld_rbto_compare_nodes, | | 156 | .rbto_compare_nodes = ld_rbto_compare_nodes, |
157 | .rbto_compare_key = ld_rbto_compare_key, | | 157 | .rbto_compare_key = ld_rbto_compare_key, |
158 | .rbto_node_offset = offsetof(lockdebug_t, ld_rb_node), | | 158 | .rbto_node_offset = offsetof(lockdebug_t, ld_rb_node), |
159 | .rbto_context = NULL | | 159 | .rbto_context = NULL |
160 | }; | | 160 | }; |
161 | | | 161 | |
162 | static inline lockdebug_t * | | 162 | static inline lockdebug_t * |
163 | lockdebug_lookup1(const volatile void *lock) | | 163 | lockdebug_lookup1(const volatile void *lock) |
164 | { | | 164 | { |
165 | lockdebug_t *ld; | | 165 | lockdebug_t *ld; |
166 | struct cpu_info *ci; | | 166 | struct cpu_info *ci; |
167 | | | 167 | |
168 | ci = curcpu(); | | 168 | ci = curcpu(); |
169 | __cpu_simple_lock(&ci->ci_data.cpu_ld_lock); | | 169 | __cpu_simple_lock(&ci->ci_data.cpu_ld_lock); |
170 | ld = rb_tree_find_node(&ld_rb_tree, (void *)(intptr_t)lock); | | 170 | ld = rb_tree_find_node(&ld_rb_tree, (void *)(intptr_t)lock); |
171 | __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock); | | 171 | __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock); |
172 | if (ld == NULL) { | | 172 | if (ld == NULL) { |
173 | return NULL; | | 173 | return NULL; |
174 | } | | 174 | } |
175 | __cpu_simple_lock(&ld->ld_spinlock); | | 175 | __cpu_simple_lock(&ld->ld_spinlock); |
176 | | | 176 | |
177 | return ld; | | 177 | return ld; |
178 | } | | 178 | } |
179 | | | 179 | |
180 | static void | | 180 | static void |
181 | lockdebug_lock_cpus(void) | | 181 | lockdebug_lock_cpus(void) |
182 | { | | 182 | { |
183 | CPU_INFO_ITERATOR cii; | | 183 | CPU_INFO_ITERATOR cii; |
184 | struct cpu_info *ci; | | 184 | struct cpu_info *ci; |
185 | | | 185 | |
186 | for (CPU_INFO_FOREACH(cii, ci)) { | | 186 | for (CPU_INFO_FOREACH(cii, ci)) { |
187 | __cpu_simple_lock(&ci->ci_data.cpu_ld_lock); | | 187 | __cpu_simple_lock(&ci->ci_data.cpu_ld_lock); |
188 | } | | 188 | } |
189 | } | | 189 | } |
190 | | | 190 | |
191 | static void | | 191 | static void |
192 | lockdebug_unlock_cpus(void) | | 192 | lockdebug_unlock_cpus(void) |
193 | { | | 193 | { |
194 | CPU_INFO_ITERATOR cii; | | 194 | CPU_INFO_ITERATOR cii; |
195 | struct cpu_info *ci; | | 195 | struct cpu_info *ci; |
196 | | | 196 | |
197 | for (CPU_INFO_FOREACH(cii, ci)) { | | 197 | for (CPU_INFO_FOREACH(cii, ci)) { |
198 | __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock); | | 198 | __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock); |
199 | } | | 199 | } |
200 | } | | 200 | } |
201 | | | 201 | |
202 | /* | | 202 | /* |
203 | * lockdebug_lookup: | | 203 | * lockdebug_lookup: |
204 | * | | 204 | * |
205 | * Find a lockdebug structure by a pointer to a lock and return it locked. | | 205 | * Find a lockdebug structure by a pointer to a lock and return it locked. |
206 | */ | | 206 | */ |
207 | static inline lockdebug_t * | | 207 | static inline lockdebug_t * |
208 | lockdebug_lookup(const char *func, size_t line, const volatile void *lock, | | 208 | lockdebug_lookup(const char *func, size_t line, const volatile void *lock, |
209 | uintptr_t where) | | 209 | uintptr_t where) |
210 | { | | 210 | { |
211 | lockdebug_t *ld; | | 211 | lockdebug_t *ld; |
212 | | | 212 | |
213 | kcov_silence_enter(); | | 213 | kcov_silence_enter(); |
214 | ld = lockdebug_lookup1(lock); | | 214 | ld = lockdebug_lookup1(lock); |
215 | kcov_silence_leave(); | | 215 | kcov_silence_leave(); |
216 | | | 216 | |
217 | if (__predict_false(ld == NULL)) { | | 217 | if (__predict_false(ld == NULL)) { |
218 | panic("%s,%zu: uninitialized lock (lock=%p, from=%08" | | 218 | panic("%s,%zu: uninitialized lock (lock=%p, from=%08" |
219 | PRIxPTR ")", func, line, lock, where); | | 219 | PRIxPTR ")", func, line, lock, where); |
220 | } | | 220 | } |
221 | return ld; | | 221 | return ld; |
222 | } | | 222 | } |
223 | | | 223 | |
224 | /* | | 224 | /* |
225 | * lockdebug_init: | | 225 | * lockdebug_init: |
226 | * | | 226 | * |
227 | * Initialize the lockdebug system. Allocate an initial pool of | | 227 | * Initialize the lockdebug system. Allocate an initial pool of |
228 | * lockdebug structures before the VM system is up and running. | | 228 | * lockdebug structures before the VM system is up and running. |
229 | */ | | 229 | */ |
230 | static void | | 230 | static void |
231 | lockdebug_init(void) | | 231 | lockdebug_init(void) |
232 | { | | 232 | { |
233 | lockdebug_t *ld; | | 233 | lockdebug_t *ld; |
234 | int i; | | 234 | int i; |
235 | | | 235 | |
236 | TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks); | | 236 | TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks); |
237 | TAILQ_INIT(&curlwp->l_ld_locks); | | 237 | TAILQ_INIT(&curlwp->l_ld_locks); |
238 | __cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock); | | 238 | __cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock); |
239 | __cpu_simple_lock_init(&ld_mod_lk); | | 239 | __cpu_simple_lock_init(&ld_mod_lk); |
240 | | | 240 | |
241 | rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops); | | 241 | rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops); |
242 | | | 242 | |
243 | ld = ld_prime; | | 243 | ld = ld_prime; |
244 | for (i = 1, ld++; i < LD_BATCH; i++, ld++) { | | 244 | for (i = 1, ld++; i < LD_BATCH; i++, ld++) { |
245 | __cpu_simple_lock_init(&ld->ld_spinlock); | | 245 | __cpu_simple_lock_init(&ld->ld_spinlock); |
246 | TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); | | 246 | TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); |
247 | TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain); | | 247 | TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain); |
248 | } | | 248 | } |
249 | ld_freeptr = 1; | | 249 | ld_freeptr = 1; |
250 | ld_nfree = LD_BATCH - 1; | | 250 | ld_nfree = LD_BATCH - 1; |
251 | } | | 251 | } |
252 | | | 252 | |
253 | /* | | 253 | /* |
254 | * lockdebug_alloc: | | 254 | * lockdebug_alloc: |
255 | * | | 255 | * |
256 | * A lock is being initialized, so allocate an associated debug | | 256 | * A lock is being initialized, so allocate an associated debug |
257 | * structure. | | 257 | * structure. |
258 | */ | | 258 | */ |
259 | bool | | 259 | bool |
260 | lockdebug_alloc(const char *func, size_t line, volatile void *lock, | | 260 | lockdebug_alloc(const char *func, size_t line, volatile void *lock, |
261 | lockops_t *lo, uintptr_t initaddr) | | 261 | lockops_t *lo, uintptr_t initaddr) |
262 | { | | 262 | { |
263 | struct cpu_info *ci; | | 263 | struct cpu_info *ci; |
264 | lockdebug_t *ld; | | 264 | lockdebug_t *ld; |
265 | int s; | | 265 | int s; |
266 | | | 266 | |
267 | if (__predict_false(lo == NULL || panicstr != NULL || ld_panic)) | | 267 | if (__predict_false(lo == NULL || panicstr != NULL || ld_panic)) |
268 | return false; | | 268 | return false; |
269 | if (__predict_false(ld_freeptr == 0)) | | 269 | if (__predict_false(ld_freeptr == 0)) |
270 | lockdebug_init(); | | 270 | lockdebug_init(); |
271 | | | 271 | |
272 | s = splhigh(); | | 272 | s = splhigh(); |
273 | __cpu_simple_lock(&ld_mod_lk); | | 273 | __cpu_simple_lock(&ld_mod_lk); |
274 | if (__predict_false((ld = lockdebug_lookup1(lock)) != NULL)) { | | 274 | if (__predict_false((ld = lockdebug_lookup1(lock)) != NULL)) { |
275 | __cpu_simple_unlock(&ld_mod_lk); | | 275 | __cpu_simple_unlock(&ld_mod_lk); |
276 | lockdebug_abort1(func, line, ld, s, "already initialized", | | 276 | lockdebug_abort1(func, line, ld, s, "already initialized", |
277 | true); | | 277 | true); |
278 | return false; | | 278 | return false; |
279 | } | | 279 | } |
280 | | | 280 | |
281 | /* | | 281 | /* |
282 | * Pinch a new debug structure. We may recurse because we call | | 282 | * Pinch a new debug structure. We may recurse because we call |
283 | * kmem_alloc(), which may need to initialize new locks somewhere | | 283 | * kmem_alloc(), which may need to initialize new locks somewhere |
284 | * down the path. If not recursing, we try to maintain at least | | 284 | * down the path. If not recursing, we try to maintain at least |
285 | * LD_SLOP structures free, which should hopefully be enough to | | 285 | * LD_SLOP structures free, which should hopefully be enough to |
286 | * satisfy kmem_alloc(). If we can't provide a structure, not to | | 286 | * satisfy kmem_alloc(). If we can't provide a structure, not to |
287 | * worry: we'll just mark the lock as not having an ID. | | 287 | * worry: we'll just mark the lock as not having an ID. |
288 | */ | | 288 | */ |
289 | ci = curcpu(); | | 289 | ci = curcpu(); |
290 | ci->ci_lkdebug_recurse++; | | 290 | ci->ci_lkdebug_recurse++; |
291 | if (TAILQ_EMPTY(&ld_free)) { | | 291 | if (TAILQ_EMPTY(&ld_free)) { |
292 | if (ci->ci_lkdebug_recurse > 1 || ld_nomore) { | | 292 | if (ci->ci_lkdebug_recurse > 1 || ld_nomore) { |
293 | ci->ci_lkdebug_recurse--; | | 293 | ci->ci_lkdebug_recurse--; |
294 | __cpu_simple_unlock(&ld_mod_lk); | | 294 | __cpu_simple_unlock(&ld_mod_lk); |
295 | splx(s); | | 295 | splx(s); |
296 | return false; | | 296 | return false; |
297 | } | | 297 | } |
298 | s = lockdebug_more(s); | | 298 | s = lockdebug_more(s); |
299 | } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) { | | 299 | } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) { |
300 | s = lockdebug_more(s); | | 300 | s = lockdebug_more(s); |
301 | } | | 301 | } |
302 | if (__predict_false((ld = TAILQ_FIRST(&ld_free)) == NULL)) { | | 302 | if (__predict_false((ld = TAILQ_FIRST(&ld_free)) == NULL)) { |
303 | __cpu_simple_unlock(&ld_mod_lk); | | 303 | __cpu_simple_unlock(&ld_mod_lk); |
304 | splx(s); | | 304 | splx(s); |
305 | return false; | | 305 | return false; |
306 | } | | 306 | } |
307 | TAILQ_REMOVE(&ld_free, ld, ld_chain); | | 307 | TAILQ_REMOVE(&ld_free, ld, ld_chain); |
308 | ld_nfree--; | | 308 | ld_nfree--; |
309 | ci->ci_lkdebug_recurse--; | | 309 | ci->ci_lkdebug_recurse--; |
310 | | | 310 | |
311 | if (__predict_false(ld->ld_lock != NULL)) { | | 311 | if (__predict_false(ld->ld_lock != NULL)) { |
312 | panic("%s,%zu: corrupt table ld %p", func, line, ld); | | 312 | panic("%s,%zu: corrupt table ld %p", func, line, ld); |
313 | } | | 313 | } |
314 | | | 314 | |
315 | /* Initialise the structure. */ | | 315 | /* Initialise the structure. */ |
316 | ld->ld_lock = lock; | | 316 | ld->ld_lock = lock; |
317 | ld->ld_lockops = lo; | | 317 | ld->ld_lockops = lo; |
318 | ld->ld_locked = 0; | | 318 | ld->ld_locked = 0; |
319 | ld->ld_unlocked = 0; | | 319 | ld->ld_unlocked = 0; |
320 | ld->ld_lwp = NULL; | | 320 | ld->ld_lwp = NULL; |
321 | ld->ld_initaddr = initaddr; | | 321 | ld->ld_initaddr = initaddr; |
322 | ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0); | | 322 | ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0); |
323 | lockdebug_lock_cpus(); | | 323 | lockdebug_lock_cpus(); |
324 | (void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld)); | | 324 | (void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld)); |
325 | lockdebug_unlock_cpus(); | | 325 | lockdebug_unlock_cpus(); |
326 | __cpu_simple_unlock(&ld_mod_lk); | | 326 | __cpu_simple_unlock(&ld_mod_lk); |
327 | | | 327 | |
328 | splx(s); | | 328 | splx(s); |
329 | return true; | | 329 | return true; |
330 | } | | 330 | } |
331 | | | 331 | |
332 | /* | | 332 | /* |
333 | * lockdebug_free: | | 333 | * lockdebug_free: |
334 | * | | 334 | * |
335 | * A lock is being destroyed, so release debugging resources. | | 335 | * A lock is being destroyed, so release debugging resources. |
336 | */ | | 336 | */ |
337 | void | | 337 | void |
338 | lockdebug_free(const char *func, size_t line, volatile void *lock) | | 338 | lockdebug_free(const char *func, size_t line, volatile void *lock) |
339 | { | | 339 | { |
340 | lockdebug_t *ld; | | 340 | lockdebug_t *ld; |
341 | int s; | | 341 | int s; |
342 | | | 342 | |
343 | if (__predict_false(panicstr != NULL || ld_panic)) | | 343 | if (__predict_false(panicstr != NULL || ld_panic)) |
344 | return; | | 344 | return; |
345 | | | 345 | |
346 | s = splhigh(); | | 346 | s = splhigh(); |
347 | __cpu_simple_lock(&ld_mod_lk); | | 347 | __cpu_simple_lock(&ld_mod_lk); |
348 | ld = lockdebug_lookup(func, line, lock, | | 348 | ld = lockdebug_lookup(func, line, lock, |
349 | (uintptr_t) __builtin_return_address(0)); | | 349 | (uintptr_t) __builtin_return_address(0)); |
350 | if (__predict_false(ld == NULL)) { | | 350 | if (__predict_false(ld == NULL)) { |
351 | __cpu_simple_unlock(&ld_mod_lk); | | 351 | __cpu_simple_unlock(&ld_mod_lk); |
352 | panic("%s,%zu: destroying uninitialized object %p" | | 352 | panic("%s,%zu: destroying uninitialized object %p" |
353 | "(ld_lock=%p)", func, line, lock, ld->ld_lock); | | 353 | "(ld_lock=%p)", func, line, lock, ld->ld_lock); |
354 | return; | | 354 | return; |
355 | } | | 355 | } |
356 | if (__predict_false((ld->ld_flags & LD_LOCKED) != 0 || | | 356 | if (__predict_false((ld->ld_flags & LD_LOCKED) != 0 || |
357 | ld->ld_shares != 0)) { | | 357 | ld->ld_shares != 0)) { |
358 | __cpu_simple_unlock(&ld_mod_lk); | | 358 | __cpu_simple_unlock(&ld_mod_lk); |
359 | lockdebug_abort1(func, line, ld, s, "is locked or in use", | | 359 | lockdebug_abort1(func, line, ld, s, "is locked or in use", |
360 | true); | | 360 | true); |
361 | return; | | 361 | return; |
362 | } | | 362 | } |
363 | lockdebug_lock_cpus(); | | 363 | lockdebug_lock_cpus(); |
364 | rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld)); | | 364 | rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld)); |
365 | lockdebug_unlock_cpus(); | | 365 | lockdebug_unlock_cpus(); |
366 | ld->ld_lock = NULL; | | 366 | ld->ld_lock = NULL; |
367 | TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); | | 367 | TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); |
368 | ld_nfree++; | | 368 | ld_nfree++; |
369 | __cpu_simple_unlock(&ld->ld_spinlock); | | 369 | __cpu_simple_unlock(&ld->ld_spinlock); |
370 | __cpu_simple_unlock(&ld_mod_lk); | | 370 | __cpu_simple_unlock(&ld_mod_lk); |
371 | splx(s); | | 371 | splx(s); |
372 | } | | 372 | } |
373 | | | 373 | |
374 | /* | | 374 | /* |
375 | * lockdebug_more: | | 375 | * lockdebug_more: |
376 | * | | 376 | * |
377 | * Allocate a batch of debug structures and add to the free list. | | 377 | * Allocate a batch of debug structures and add to the free list. |
378 | * Must be called with ld_mod_lk held. | | 378 | * Must be called with ld_mod_lk held. |
379 | */ | | 379 | */ |
380 | static int | | 380 | static int |
381 | lockdebug_more(int s) | | 381 | lockdebug_more(int s) |
382 | { | | 382 | { |
383 | lockdebug_t *ld; | | 383 | lockdebug_t *ld; |
384 | void *block; | | 384 | void *block; |
385 | int i, base, m; | | 385 | int i, base, m; |
386 | | | 386 | |
387 | /* | | 387 | /* |
388 | * Can't call kmem_alloc() if in interrupt context. XXX We could | | 388 | * Can't call kmem_alloc() if in interrupt context. XXX We could |
389 | * deadlock, because we don't know which locks the caller holds. | | 389 | * deadlock, because we don't know which locks the caller holds. |
390 | */ | | 390 | */ |
391 | if (cpu_intr_p() || cpu_softintr_p()) { | | 391 | if (cpu_intr_p() || cpu_softintr_p()) { |
392 | return s; | | 392 | return s; |
393 | } | | 393 | } |
394 | | | 394 | |
395 | while (ld_nfree < LD_SLOP) { | | 395 | while (ld_nfree < LD_SLOP) { |
396 | __cpu_simple_unlock(&ld_mod_lk); | | 396 | __cpu_simple_unlock(&ld_mod_lk); |
397 | splx(s); | | 397 | splx(s); |
398 | block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP); | | 398 | block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP); |
399 | s = splhigh(); | | 399 | s = splhigh(); |
400 | __cpu_simple_lock(&ld_mod_lk); | | 400 | __cpu_simple_lock(&ld_mod_lk); |
401 | | | 401 | |
402 | if (ld_nfree > LD_SLOP) { | | 402 | if (ld_nfree > LD_SLOP) { |
403 | /* Somebody beat us to it. */ | | 403 | /* Somebody beat us to it. */ |
404 | __cpu_simple_unlock(&ld_mod_lk); | | 404 | __cpu_simple_unlock(&ld_mod_lk); |
405 | splx(s); | | 405 | splx(s); |
406 | kmem_free(block, LD_BATCH * sizeof(lockdebug_t)); | | 406 | kmem_free(block, LD_BATCH * sizeof(lockdebug_t)); |
407 | s = splhigh(); | | 407 | s = splhigh(); |
408 | __cpu_simple_lock(&ld_mod_lk); | | 408 | __cpu_simple_lock(&ld_mod_lk); |
409 | continue; | | 409 | continue; |
410 | } | | 410 | } |
411 | | | 411 | |
412 | base = ld_freeptr; | | 412 | base = ld_freeptr; |
413 | ld_nfree += LD_BATCH; | | 413 | ld_nfree += LD_BATCH; |
414 | ld = block; | | 414 | ld = block; |
415 | base <<= LD_BATCH_SHIFT; | | 415 | base <<= LD_BATCH_SHIFT; |
416 | m = uimin(LD_MAX_LOCKS, base + LD_BATCH); | | 416 | m = uimin(LD_MAX_LOCKS, base + LD_BATCH); |
417 | | | 417 | |
418 | if (m == LD_MAX_LOCKS) | | 418 | if (m == LD_MAX_LOCKS) |
419 | ld_nomore = true; | | 419 | ld_nomore = true; |
420 | | | 420 | |
421 | for (i = base; i < m; i++, ld++) { | | 421 | for (i = base; i < m; i++, ld++) { |
422 | __cpu_simple_lock_init(&ld->ld_spinlock); | | 422 | __cpu_simple_lock_init(&ld->ld_spinlock); |
423 | TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); | | 423 | TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); |
424 | TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain); | | 424 | TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain); |
425 | } | | 425 | } |
426 | | | 426 | |
427 | membar_producer(); | | 427 | membar_producer(); |
428 | } | | 428 | } |
429 | | | 429 | |
430 | return s; | | 430 | return s; |
431 | } | | 431 | } |
432 | | | 432 | |
433 | /* | | 433 | /* |
434 | * lockdebug_wantlock: | | 434 | * lockdebug_wantlock: |
435 | * | | 435 | * |
436 | * Process the preamble to a lock acquire. The "shared" | | 436 | * Process the preamble to a lock acquire. The "shared" |
437 | * parameter controls which ld_{ex,sh}want counter is | | 437 | * parameter controls which ld_{ex,sh}want counter is |
438 | * updated; a negative value of shared updates neither. | | 438 | * updated; a negative value of shared updates neither. |
439 | */ | | 439 | */ |
440 | void | | 440 | void |
441 | lockdebug_wantlock(const char *func, size_t line, | | 441 | lockdebug_wantlock(const char *func, size_t line, |
442 | const volatile void *lock, uintptr_t where, int shared) | | 442 | const volatile void *lock, uintptr_t where, int shared) |
443 | { | | 443 | { |
444 | struct lwp *l = curlwp; | | 444 | struct lwp *l = curlwp; |
445 | lockdebug_t *ld; | | 445 | lockdebug_t *ld; |
446 | bool recurse; | | 446 | bool recurse; |
447 | int s; | | 447 | int s; |
448 | | | 448 | |
449 | (void)shared; | | 449 | (void)shared; |
450 | recurse = false; | | 450 | recurse = false; |
451 | | | 451 | |
452 | if (__predict_false(panicstr != NULL || ld_panic)) | | 452 | if (__predict_false(panicstr != NULL || ld_panic)) |
453 | return; | | 453 | return; |
454 | | | 454 | |
455 | s = splhigh(); | | 455 | s = splhigh(); |
456 | if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) { | | 456 | if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) { |
457 | splx(s); | | 457 | splx(s); |
458 | return; | | 458 | return; |
459 | } | | 459 | } |
460 | if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) { | | 460 | if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) { |
461 | if ((ld->ld_flags & LD_SLEEPER) != 0) { | | 461 | if ((ld->ld_flags & LD_SLEEPER) != 0) { |
462 | if (ld->ld_lwp == l) | | 462 | if (ld->ld_lwp == l) |
463 | recurse = true; | | 463 | recurse = true; |
464 | } else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu())) | | 464 | } else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu())) |
465 | recurse = true; | | 465 | recurse = true; |
466 | } | | 466 | } |
467 | if (cpu_intr_p()) { | | 467 | if (cpu_intr_p()) { |
468 | if (__predict_false((ld->ld_flags & LD_SLEEPER) != 0)) { | | 468 | if (__predict_false((ld->ld_flags & LD_SLEEPER) != 0)) { |
469 | lockdebug_abort1(func, line, ld, s, | | 469 | lockdebug_abort1(func, line, ld, s, |
470 | "acquiring sleep lock from interrupt context", | | 470 | "acquiring sleep lock from interrupt context", |
471 | true); | | 471 | true); |
472 | return; | | 472 | return; |
473 | } | | 473 | } |
474 | } | | 474 | } |
475 | if (shared > 0) | | 475 | if (shared > 0) |
476 | ld->ld_shwant++; | | 476 | ld->ld_shwant++; |
477 | else if (shared == 0) | | 477 | else if (shared == 0) |
478 | ld->ld_exwant++; | | 478 | ld->ld_exwant++; |
479 | if (__predict_false(recurse)) { | | 479 | if (__predict_false(recurse)) { |
480 | lockdebug_abort1(func, line, ld, s, "locking against myself", | | 480 | lockdebug_abort1(func, line, ld, s, "locking against myself", |
481 | true); | | 481 | true); |
482 | return; | | 482 | return; |
483 | } | | 483 | } |
484 | if (l->l_ld_wanted == NULL) { | | 484 | if (l->l_ld_wanted == NULL) { |
485 | l->l_ld_wanted = ld; | | 485 | l->l_ld_wanted = ld; |
486 | } | | 486 | } |
487 | __cpu_simple_unlock(&ld->ld_spinlock); | | 487 | __cpu_simple_unlock(&ld->ld_spinlock); |
488 | splx(s); | | 488 | splx(s); |
489 | } | | 489 | } |
490 | | | 490 | |
491 | /* | | 491 | /* |
492 | * lockdebug_locked: | | 492 | * lockdebug_locked: |
493 | * | | 493 | * |
494 | * Process a lock acquire operation. | | 494 | * Process a lock acquire operation. |
495 | */ | | 495 | */ |
496 | void | | 496 | void |
497 | lockdebug_locked(const char *func, size_t line, | | 497 | lockdebug_locked(const char *func, size_t line, |
498 | volatile void *lock, void *cvlock, uintptr_t where, int shared) | | 498 | volatile void *lock, void *cvlock, uintptr_t where, int shared) |
499 | { | | 499 | { |
500 | struct lwp *l = curlwp; | | 500 | struct lwp *l = curlwp; |
501 | lockdebug_t *ld; | | 501 | lockdebug_t *ld; |
502 | int s; | | 502 | int s; |
503 | | | 503 | |
504 | if (__predict_false(panicstr != NULL || ld_panic)) | | 504 | if (__predict_false(panicstr != NULL || ld_panic)) |
505 | return; | | 505 | return; |
506 | | | 506 | |
507 | s = splhigh(); | | 507 | s = splhigh(); |
508 | if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) { | | 508 | if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) { |
509 | splx(s); | | 509 | splx(s); |
510 | return; | | 510 | return; |
511 | } | | 511 | } |
512 | if (shared) { | | 512 | if (shared) { |
513 | l->l_shlocks++; | | 513 | l->l_shlocks++; |
514 | ld->ld_locked = where; | | 514 | ld->ld_locked = where; |
515 | ld->ld_shares++; | | 515 | ld->ld_shares++; |
516 | ld->ld_shwant--; | | 516 | ld->ld_shwant--; |
517 | } else { | | 517 | } else { |
518 | if (__predict_false((ld->ld_flags & LD_LOCKED) != 0)) { | | 518 | if (__predict_false((ld->ld_flags & LD_LOCKED) != 0)) { |
519 | lockdebug_abort1(func, line, ld, s, "already locked", | | 519 | lockdebug_abort1(func, line, ld, s, "already locked", |
520 | true); | | 520 | true); |
521 | return; | | 521 | return; |
522 | } | | 522 | } |
523 | ld->ld_flags |= LD_LOCKED; | | 523 | ld->ld_flags |= LD_LOCKED; |
524 | ld->ld_locked = where; | | 524 | ld->ld_locked = where; |
525 | ld->ld_exwant--; | | 525 | ld->ld_exwant--; |
526 | if ((ld->ld_flags & LD_SLEEPER) != 0) { | | 526 | if ((ld->ld_flags & LD_SLEEPER) != 0) { |
527 | TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain); | | 527 | TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain); |
528 | } else { | | 528 | } else { |
529 | TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks, | | 529 | TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks, |
530 | ld, ld_chain); | | 530 | ld, ld_chain); |
531 | } | | 531 | } |
532 | } | | 532 | } |
533 | ld->ld_cpu = (uint16_t)cpu_index(curcpu()); | | 533 | ld->ld_cpu = (uint16_t)cpu_index(curcpu()); |
534 | ld->ld_lwp = l; | | 534 | ld->ld_lwp = l; |
535 | __cpu_simple_unlock(&ld->ld_spinlock); | | 535 | __cpu_simple_unlock(&ld->ld_spinlock); |
536 | if (l->l_ld_wanted == ld) { | | 536 | if (l->l_ld_wanted == ld) { |
537 | l->l_ld_wanted = NULL; | | 537 | l->l_ld_wanted = NULL; |
538 | } | | 538 | } |
539 | splx(s); | | 539 | splx(s); |
540 | } | | 540 | } |
541 | | | 541 | |
542 | /* | | 542 | /* |
543 | * lockdebug_unlocked: | | 543 | * lockdebug_unlocked: |
544 | * | | 544 | * |
545 | * Process a lock release operation. | | 545 | * Process a lock release operation. |
546 | */ | | 546 | */ |
547 | void | | 547 | void |
548 | lockdebug_unlocked(const char *func, size_t line, | | 548 | lockdebug_unlocked(const char *func, size_t line, |
549 | volatile void *lock, uintptr_t where, int shared) | | 549 | volatile void *lock, uintptr_t where, int shared) |
550 | { | | 550 | { |
551 | struct lwp *l = curlwp; | | 551 | struct lwp *l = curlwp; |
552 | lockdebug_t *ld; | | 552 | lockdebug_t *ld; |
553 | int s; | | 553 | int s; |
554 | | | 554 | |
555 | if (__predict_false(panicstr != NULL || ld_panic)) | | 555 | if (__predict_false(panicstr != NULL || ld_panic)) |
556 | return; | | 556 | return; |
557 | | | 557 | |
558 | s = splhigh(); | | 558 | s = splhigh(); |
559 | if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) { | | 559 | if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) { |
560 | splx(s); | | 560 | splx(s); |
561 | return; | | 561 | return; |
562 | } | | 562 | } |
563 | if (shared) { | | 563 | if (shared) { |
564 | if (__predict_false(l->l_shlocks == 0)) { | | 564 | if (__predict_false(l->l_shlocks == 0)) { |
565 | lockdebug_abort1(func, line, ld, s, | | 565 | lockdebug_abort1(func, line, ld, s, |
566 | "no shared locks held by LWP", true); | | 566 | "no shared locks held by LWP", true); |
567 | return; | | 567 | return; |
568 | } | | 568 | } |
569 | if (__predict_false(ld->ld_shares == 0)) { | | 569 | if (__predict_false(ld->ld_shares == 0)) { |
570 | lockdebug_abort1(func, line, ld, s, | | 570 | lockdebug_abort1(func, line, ld, s, |
571 | "no shared holds on this lock", true); | | 571 | "no shared holds on this lock", true); |
572 | return; | | 572 | return; |
573 | } | | 573 | } |
574 | l->l_shlocks--; | | 574 | l->l_shlocks--; |
575 | ld->ld_shares--; | | 575 | ld->ld_shares--; |
576 | if (ld->ld_lwp == l) { | | 576 | if (ld->ld_lwp == l) { |
577 | ld->ld_unlocked = where; | | 577 | ld->ld_unlocked = where; |
578 | ld->ld_lwp = NULL; | | 578 | ld->ld_lwp = NULL; |
579 | } | | 579 | } |
580 | if (ld->ld_cpu == (uint16_t)cpu_index(curcpu())) | | 580 | if (ld->ld_cpu == (uint16_t)cpu_index(curcpu())) |
581 | ld->ld_cpu = (uint16_t)-1; | | 581 | ld->ld_cpu = (uint16_t)-1; |
582 | } else { | | 582 | } else { |
583 | if (__predict_false((ld->ld_flags & LD_LOCKED) == 0)) { | | 583 | if (__predict_false((ld->ld_flags & LD_LOCKED) == 0)) { |
584 | lockdebug_abort1(func, line, ld, s, "not locked", true); | | 584 | lockdebug_abort1(func, line, ld, s, "not locked", true); |
585 | return; | | 585 | return; |
586 | } | | 586 | } |
587 | | | 587 | |
588 | if ((ld->ld_flags & LD_SLEEPER) != 0) { | | 588 | if ((ld->ld_flags & LD_SLEEPER) != 0) { |
589 | if (__predict_false(ld->ld_lwp != curlwp)) { | | 589 | if (__predict_false(ld->ld_lwp != curlwp)) { |
590 | lockdebug_abort1(func, line, ld, s, | | 590 | lockdebug_abort1(func, line, ld, s, |
591 | "not held by current LWP", true); | | 591 | "not held by current LWP", true); |
592 | return; | | 592 | return; |
593 | } | | 593 | } |
594 | TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain); | | 594 | TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain); |
595 | } else { | | 595 | } else { |
596 | uint16_t idx = (uint16_t)cpu_index(curcpu()); | | 596 | uint16_t idx = (uint16_t)cpu_index(curcpu()); |
597 | if (__predict_false(ld->ld_cpu != idx)) { | | 597 | if (__predict_false(ld->ld_cpu != idx)) { |
598 | lockdebug_abort1(func, line, ld, s, | | 598 | lockdebug_abort1(func, line, ld, s, |
599 | "not held by current CPU", true); | | 599 | "not held by current CPU", true); |
600 | return; | | 600 | return; |
601 | } | | 601 | } |
602 | TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld, | | 602 | TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld, |
603 | ld_chain); | | 603 | ld_chain); |
604 | } | | 604 | } |
605 | ld->ld_flags &= ~LD_LOCKED; | | 605 | ld->ld_flags &= ~LD_LOCKED; |
606 | ld->ld_unlocked = where; | | 606 | ld->ld_unlocked = where; |
607 | ld->ld_lwp = NULL; | | 607 | ld->ld_lwp = NULL; |
608 | } | | 608 | } |
609 | __cpu_simple_unlock(&ld->ld_spinlock); | | 609 | __cpu_simple_unlock(&ld->ld_spinlock); |
610 | splx(s); | | 610 | splx(s); |
611 | } | | 611 | } |
612 | | | 612 | |
613 | /* | | 613 | /* |
614 | * lockdebug_barrier: | | 614 | * lockdebug_barrier: |
615 | * | | 615 | * |
616 | * Panic if we hold more than one specified lock, and optionally, if we | | 616 | * Panic if we hold more than one specified lock, and optionally, if we |
617 | * hold any sleep locks. | | 617 | * hold any sleep locks. |
618 | */ | | 618 | */ |
619 | void | | 619 | void |
620 | lockdebug_barrier(const char *func, size_t line, volatile void *onelock, | | 620 | lockdebug_barrier(const char *func, size_t line, volatile void *onelock, |
621 | int slplocks) | | 621 | int slplocks) |
622 | { | | 622 | { |
623 | struct lwp *l = curlwp; | | 623 | struct lwp *l = curlwp; |
624 | lockdebug_t *ld; | | 624 | lockdebug_t *ld; |
625 | int s; | | 625 | int s; |
626 | | | 626 | |
627 | if (__predict_false(panicstr != NULL || ld_panic)) | | 627 | if (__predict_false(panicstr != NULL || ld_panic)) |
628 | return; | | 628 | return; |
629 | | | 629 | |
630 | s = splhigh(); | | 630 | s = splhigh(); |
631 | if ((l->l_pflag & LP_INTR) == 0) { | | 631 | if ((l->l_pflag & LP_INTR) == 0) { |
632 | TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) { | | 632 | TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) { |
633 | if (ld->ld_lock == onelock) { | | 633 | if (ld->ld_lock == onelock) { |
634 | continue; | | 634 | continue; |
635 | } | | 635 | } |
636 | __cpu_simple_lock(&ld->ld_spinlock); | | 636 | __cpu_simple_lock(&ld->ld_spinlock); |
637 | lockdebug_abort1(func, line, ld, s, | | 637 | lockdebug_abort1(func, line, ld, s, |
638 | "spin lock held", true); | | 638 | "spin lock held", true); |
639 | return; | | 639 | return; |
640 | } | | 640 | } |
641 | } | | 641 | } |
642 | if (slplocks) { | | 642 | if (slplocks) { |
643 | splx(s); | | 643 | splx(s); |
644 | return; | | 644 | return; |
645 | } | | 645 | } |
646 | ld = TAILQ_FIRST(&l->l_ld_locks); | | 646 | ld = TAILQ_FIRST(&l->l_ld_locks); |
647 | if (__predict_false(ld != NULL && ld->ld_lock != onelock)) { | | 647 | if (__predict_false(ld != NULL && ld->ld_lock != onelock)) { |
648 | __cpu_simple_lock(&ld->ld_spinlock); | | 648 | __cpu_simple_lock(&ld->ld_spinlock); |
649 | lockdebug_abort1(func, line, ld, s, "sleep lock held", true); | | 649 | lockdebug_abort1(func, line, ld, s, "sleep lock held", true); |
650 | return; | | 650 | return; |
651 | } | | 651 | } |
652 | splx(s); | | 652 | splx(s); |
653 | if (l->l_shlocks != 0) { | | 653 | if (l->l_shlocks != 0) { |
654 | TAILQ_FOREACH(ld, &ld_all, ld_achain) { | | 654 | TAILQ_FOREACH(ld, &ld_all, ld_achain) { |
655 | if (ld->ld_lock == onelock) { | | 655 | if (ld->ld_lock == onelock) { |
656 | continue; | | 656 | continue; |
657 | } | | 657 | } |
658 | if (ld->ld_lwp == l) | | 658 | if (ld->ld_lwp == l) |
659 | lockdebug_dump(l, ld, printf); | | 659 | lockdebug_dump(l, ld, printf); |
660 | } | | 660 | } |
661 | panic("%s,%zu: holding %d shared locks", func, line, | | 661 | panic("%s,%zu: holding %d shared locks", func, line, |
662 | l->l_shlocks); | | 662 | l->l_shlocks); |
663 | } | | 663 | } |
664 | } | | 664 | } |
665 | | | 665 | |
666 | /* | | 666 | /* |
667 | * lockdebug_mem_check: | | 667 | * lockdebug_mem_check: |
668 | * | | 668 | * |
669 | * Check for in-use locks within a memory region that is | | 669 | * Check for in-use locks within a memory region that is |
670 | * being freed. | | 670 | * being freed. |
671 | */ | | 671 | */ |
672 | void | | 672 | void |
673 | lockdebug_mem_check(const char *func, size_t line, void *base, size_t sz) | | 673 | lockdebug_mem_check(const char *func, size_t line, void *base, size_t sz) |
674 | { | | 674 | { |
675 | lockdebug_t *ld; | | 675 | lockdebug_t *ld; |
676 | struct cpu_info *ci; | | 676 | struct cpu_info *ci; |
677 | int s; | | 677 | int s; |
678 | | | 678 | |
679 | if (__predict_false(panicstr != NULL || ld_panic)) | | 679 | if (__predict_false(panicstr != NULL || ld_panic)) |
680 | return; | | 680 | return; |
681 | | | 681 | |
682 | kcov_silence_enter(); | | 682 | kcov_silence_enter(); |
683 | | | 683 | |
684 | s = splhigh(); | | 684 | s = splhigh(); |
685 | ci = curcpu(); | | 685 | ci = curcpu(); |
686 | __cpu_simple_lock(&ci->ci_data.cpu_ld_lock); | | 686 | __cpu_simple_lock(&ci->ci_data.cpu_ld_lock); |
687 | ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base); | | 687 | ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base); |
688 | if (ld != NULL) { | | 688 | if (ld != NULL) { |
689 | const uintptr_t lock = (uintptr_t)ld->ld_lock; | | 689 | const uintptr_t lock = (uintptr_t)ld->ld_lock; |
690 | | | 690 | |
691 | if (__predict_false((uintptr_t)base > lock)) | | 691 | if (__predict_false((uintptr_t)base > lock)) |
692 | panic("%s,%zu: corrupt tree ld=%p, base=%p, sz=%zu", | | 692 | panic("%s,%zu: corrupt tree ld=%p, base=%p, sz=%zu", |
693 | func, line, ld, base, sz); | | 693 | func, line, ld, base, sz); |
694 | if (lock >= (uintptr_t)base + sz) | | 694 | if (lock >= (uintptr_t)base + sz) |
695 | ld = NULL; | | 695 | ld = NULL; |
696 | } | | 696 | } |
697 | __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock); | | 697 | __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock); |
698 | if (__predict_false(ld != NULL)) { | | 698 | if (__predict_false(ld != NULL)) { |
699 | __cpu_simple_lock(&ld->ld_spinlock); | | 699 | __cpu_simple_lock(&ld->ld_spinlock); |
700 | lockdebug_abort1(func, line, ld, s, | | 700 | lockdebug_abort1(func, line, ld, s, |
701 | "allocation contains active lock", !cold); | | 701 | "allocation contains active lock", !cold); |
702 | kcov_silence_leave(); | | 702 | kcov_silence_leave(); |
703 | return; | | 703 | return; |
704 | } | | 704 | } |
705 | splx(s); | | 705 | splx(s); |
706 | | | 706 | |
707 | kcov_silence_leave(); | | 707 | kcov_silence_leave(); |
708 | } | | 708 | } |
709 | #endif /* _KERNEL */ | | 709 | #endif /* _KERNEL */ |
710 | | | 710 | |
711 | #ifdef DDB | | 711 | #ifdef DDB |
712 | #include <machine/db_machdep.h> | | 712 | #include <machine/db_machdep.h> |
713 | #include <ddb/db_interface.h> | | 713 | #include <ddb/db_interface.h> |
714 | #include <ddb/db_access.h> | | 714 | #include <ddb/db_access.h> |
715 | #endif | | 715 | #endif |
716 | | | 716 | |
717 | /* | | 717 | /* |
718 | * lockdebug_dump: | | 718 | * lockdebug_dump: |
719 | * | | 719 | * |
720 | * Dump information about a lock on panic, or for DDB. | | 720 | * Dump information about a lock on panic, or for DDB. |
721 | */ | | 721 | */ |
722 | static void | | 722 | static void |
723 | lockdebug_dump(lwp_t *l, lockdebug_t *ld, void (*pr)(const char *, ...) | | 723 | lockdebug_dump(lwp_t *l, lockdebug_t *ld, void (*pr)(const char *, ...) |
724 | __printflike(1, 2)) | | 724 | __printflike(1, 2)) |
725 | { | | 725 | { |
726 | int sleeper = (ld->ld_flags & LD_SLEEPER); | | 726 | int sleeper = (ld->ld_flags & LD_SLEEPER); |
727 | lockops_t *lo = ld->ld_lockops; | | 727 | lockops_t *lo = ld->ld_lockops; |
728 | | | 728 | |
729 | (*pr)( | | 729 | (*pr)( |
730 | "lock address : %#018lx type : %18s\n" | | 730 | "lock address : %#018lx type : %18s\n" |
731 | "initialized : %#018lx", | | 731 | "initialized : %#018lx", |
732 | (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"), | | 732 | (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"), |
733 | (long)ld->ld_initaddr); | | 733 | (long)ld->ld_initaddr); |
734 | | | 734 | |
735 | #ifndef _KERNEL | | 735 | #ifndef _KERNEL |
736 | lockops_t los; | | 736 | lockops_t los; |
737 | lo = &los; | | 737 | lo = &los; |
738 | db_read_bytes((db_addr_t)ld->ld_lockops, sizeof(los), (char *)lo); | | 738 | db_read_bytes((db_addr_t)ld->ld_lockops, sizeof(los), (char *)lo); |
739 | #endif | | 739 | #endif |
740 | (*pr)("\n" | | 740 | (*pr)("\n" |
741 | "shared holds : %18u exclusive: %18u\n" | | 741 | "shared holds : %18u exclusive: %18u\n" |
742 | "shares wanted: %18u exclusive: %18u\n" | | 742 | "shares wanted: %18u exclusive: %18u\n" |
743 | "relevant cpu : %18u last held: %18u\n" | | 743 | "relevant cpu : %18u last held: %18u\n" |
744 | "relevant lwp : %#018lx last held: %#018lx\n" | | 744 | "relevant lwp : %#018lx last held: %#018lx\n" |
745 | "last locked%c : %#018lx unlocked%c: %#018lx\n", | | 745 | "last locked%c : %#018lx unlocked%c: %#018lx\n", |
746 | (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0), | | 746 | (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0), |
747 | (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant, | | 747 | (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant, |
748 | (unsigned)cpu_index(l->l_cpu), (unsigned)ld->ld_cpu, | | 748 | (unsigned)cpu_index(l->l_cpu), (unsigned)ld->ld_cpu, |
749 | (long)l, (long)ld->ld_lwp, | | 749 | (long)l, (long)ld->ld_lwp, |
750 | ((ld->ld_flags & LD_LOCKED) ? '*' : ' '), | | 750 | ((ld->ld_flags & LD_LOCKED) ? '*' : ' '), |
751 | (long)ld->ld_locked, | | 751 | (long)ld->ld_locked, |
752 | ((ld->ld_flags & LD_LOCKED) ? ' ' : '*'), | | 752 | ((ld->ld_flags & LD_LOCKED) ? ' ' : '*'), |
753 | (long)ld->ld_unlocked); | | 753 | (long)ld->ld_unlocked); |
754 | | | 754 | |
755 | #ifdef _KERNEL | | 755 | #ifdef _KERNEL |
756 | if (lo->lo_dump != NULL) | | 756 | if (lo->lo_dump != NULL) |
757 | (*lo->lo_dump)(ld->ld_lock, pr); | | 757 | (*lo->lo_dump)(ld->ld_lock, pr); |
758 | | | 758 | |
759 | if (sleeper) { | | 759 | if (sleeper) { |
760 | turnstile_print(ld->ld_lock, pr); | | 760 | turnstile_print(ld->ld_lock, pr); |
761 | } | | 761 | } |
762 | #endif | | 762 | #endif |
763 | } | | 763 | } |
764 | | | 764 | |
765 | #ifdef _KERNEL | | 765 | #ifdef _KERNEL |
766 | /* | | 766 | /* |
767 | * lockdebug_abort1: | | 767 | * lockdebug_abort1: |
768 | * | | 768 | * |
769 | * An error has been trapped - dump lock info and panic. | | 769 | * An error has been trapped - dump lock info and panic. |
770 | */ | | 770 | */ |
771 | static void | | 771 | static void |
772 | lockdebug_abort1(const char *func, size_t line, lockdebug_t *ld, int s, | | 772 | lockdebug_abort1(const char *func, size_t line, lockdebug_t *ld, int s, |
773 | const char *msg, bool dopanic) | | 773 | const char *msg, bool dopanic) |
774 | { | | 774 | { |
775 | | | 775 | |
776 | /* | | 776 | /* |
777 | * Don't make the situation worse if the system is already going | | 777 | * Don't make the situation worse if the system is already going |
778 | * down in flames. Once a panic is triggered, lockdebug state | | 778 | * down in flames. Once a panic is triggered, lockdebug state |
779 | * becomes stale and cannot be trusted. | | 779 | * becomes stale and cannot be trusted. |
780 | */ | | 780 | */ |
781 | if (atomic_inc_uint_nv(&ld_panic) != 1) { | | 781 | if (atomic_inc_uint_nv(&ld_panic) != 1) { |
782 | __cpu_simple_unlock(&ld->ld_spinlock); | | 782 | __cpu_simple_unlock(&ld->ld_spinlock); |
783 | splx(s); | | 783 | splx(s); |
784 | return; | | 784 | return; |
785 | } | | 785 | } |
786 | | | 786 | |
787 | printf_nolog("%s error: %s,%zu: %s\n\n", ld->ld_lockops->lo_name, | | 787 | printf_nolog("%s error: %s,%zu: %s\n\n", ld->ld_lockops->lo_name, |
788 | func, line, msg); | | 788 | func, line, msg); |
789 | lockdebug_dump(curlwp, ld, printf_nolog); | | 789 | lockdebug_dump(curlwp, ld, printf_nolog); |
790 | __cpu_simple_unlock(&ld->ld_spinlock); | | 790 | __cpu_simple_unlock(&ld->ld_spinlock); |
791 | splx(s); | | 791 | splx(s); |
792 | printf_nolog("\n"); | | 792 | printf_nolog("\n"); |
793 | if (dopanic) | | 793 | if (dopanic) |
794 | panic("LOCKDEBUG: %s error: %s,%zu: %s", | | 794 | panic("LOCKDEBUG: %s error: %s,%zu: %s", |
795 | ld->ld_lockops->lo_name, func, line, msg); | | 795 | ld->ld_lockops->lo_name, func, line, msg); |
796 | } | | 796 | } |
797 | | | 797 | |
798 | #endif /* _KERNEL */ | | 798 | #endif /* _KERNEL */ |
799 | #endif /* LOCKDEBUG */ | | 799 | #endif /* LOCKDEBUG */ |
800 | | | 800 | |
801 | /* | | 801 | /* |
802 | * lockdebug_lock_print: | | 802 | * lockdebug_lock_print: |
803 | * | | 803 | * |
804 | * Handle the DDB 'show lock' command. | | 804 | * Handle the DDB 'show lock' command. |
805 | */ | | 805 | */ |
806 | #ifdef DDB | | 806 | #ifdef DDB |
807 | void | | 807 | void |
808 | lockdebug_lock_print(void *addr, | | 808 | lockdebug_lock_print(void *addr, |
809 | void (*pr)(const char *, ...) __printflike(1, 2)) | | 809 | void (*pr)(const char *, ...) __printflike(1, 2)) |
810 | { | | 810 | { |
811 | #ifdef LOCKDEBUG | | 811 | #ifdef LOCKDEBUG |
812 | lockdebug_t *ld, lds; | | 812 | lockdebug_t *ld, lds; |
813 | | | 813 | |
814 | TAILQ_FOREACH(ld, &ld_all, ld_achain) { | | 814 | TAILQ_FOREACH(ld, &ld_all, ld_achain) { |
815 | db_read_bytes((db_addr_t)ld, sizeof(lds), __UNVOLATILE(&lds)); | | 815 | db_read_bytes((db_addr_t)ld, sizeof(lds), __UNVOLATILE(&lds)); |
816 | ld = &lds; | | 816 | ld = &lds; |
817 | if (ld->ld_lock == NULL) | | 817 | if (ld->ld_lock == NULL) |
818 | continue; | | 818 | continue; |
819 | if (addr == NULL || ld->ld_lock == addr) { | | 819 | if (addr == NULL || ld->ld_lock == addr) { |
820 | lockdebug_dump(curlwp, ld, pr); | | 820 | lockdebug_dump(curlwp, ld, pr); |
821 | if (addr != NULL) | | 821 | if (addr != NULL) |
822 | return; | | 822 | return; |
823 | } | | 823 | } |
824 | } | | 824 | } |
825 | if (addr != NULL) { | | 825 | if (addr != NULL) { |
826 | (*pr)("Sorry, no record of a lock with address %p found.\n", | | 826 | (*pr)("Sorry, no record of a lock with address %p found.\n", |
827 | addr); | | 827 | addr); |
828 | } | | 828 | } |
829 | #else | | 829 | #else |
830 | (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n"); | | 830 | (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n"); |
831 | #endif /* LOCKDEBUG */ | | 831 | #endif /* LOCKDEBUG */ |
832 | } | | 832 | } |
833 | | | 833 | |
834 | #ifdef _KERNEL | | 834 | #ifdef _KERNEL |
835 | #ifdef LOCKDEBUG | | 835 | #ifdef LOCKDEBUG |
836 | static void | | 836 | static void |
837 | lockdebug_show_one(lwp_t *l, lockdebug_t *ld, int i, | | 837 | lockdebug_show_one(lwp_t *l, lockdebug_t *ld, int i, |
838 | void (*pr)(const char *, ...) __printflike(1, 2)) | | 838 | void (*pr)(const char *, ...) __printflike(1, 2)) |
839 | { | | 839 | { |
840 | const char *sym; | | 840 | const char *sym; |
841 | | | 841 | |
842 | #ifdef _KERNEL | | 842 | #ifdef _KERNEL |
843 | ksyms_getname(NULL, &sym, (vaddr_t)ld->ld_initaddr, | | 843 | ksyms_getname(NULL, &sym, (vaddr_t)ld->ld_initaddr, |
844 | KSYMS_CLOSEST|KSYMS_PROC|KSYMS_ANY); | | 844 | KSYMS_CLOSEST|KSYMS_PROC|KSYMS_ANY); |
845 | #endif | | 845 | #endif |
846 | (*pr)("* Lock %d (initialized at %s)\n", i++, sym); | | 846 | (*pr)("* Lock %d (initialized at %s)\n", i++, sym); |
847 | lockdebug_dump(l, ld, pr); | | 847 | lockdebug_dump(l, ld, pr); |
848 | } | | 848 | } |
849 | | | 849 | |
850 | static void | | 850 | static void |
851 | lockdebug_show_trace(const void *ptr, | | 851 | lockdebug_show_trace(const void *ptr, |
852 | void (*pr)(const char *, ...) __printflike(1, 2)) | | 852 | void (*pr)(const char *, ...) __printflike(1, 2)) |
853 | { | | 853 | { |
854 | db_stack_trace_print((db_expr_t)(intptr_t)ptr, true, 32, "a", pr); | | 854 | db_stack_trace_print((db_expr_t)(intptr_t)ptr, true, 32, "a", pr); |
855 | } | | 855 | } |
856 | | | 856 | |
857 | static void | | 857 | static void |
858 | lockdebug_show_all_locks_lwp(void (*pr)(const char *, ...) __printflike(1, 2), | | 858 | lockdebug_show_all_locks_lwp(void (*pr)(const char *, ...) __printflike(1, 2), |
859 | bool show_trace) | | 859 | bool show_trace) |
860 | { | | 860 | { |
861 | struct proc *p; | | 861 | struct proc *p; |
862 | | | 862 | |
863 | LIST_FOREACH(p, &allproc, p_list) { | | 863 | LIST_FOREACH(p, &allproc, p_list) { |
864 | struct lwp *l; | | 864 | struct lwp *l; |
865 | LIST_FOREACH(l, &p->p_lwps, l_sibling) { | | 865 | LIST_FOREACH(l, &p->p_lwps, l_sibling) { |
866 | lockdebug_t *ld; | | 866 | lockdebug_t *ld; |
867 | int i = 0; | | 867 | int i = 0; |
868 | if (TAILQ_EMPTY(&l->l_ld_locks) && | | 868 | if (TAILQ_EMPTY(&l->l_ld_locks) && |
869 | l->l_ld_wanted == NULL) { | | 869 | l->l_ld_wanted == NULL) { |
870 | continue; | | 870 | continue; |
871 | } | | 871 | } |
872 | (*pr)("\n****** LWP %d.%d (%s) @ %p, l_stat=%d\n", | | 872 | (*pr)("\n****** LWP %d.%d (%s) @ %p, l_stat=%d\n", |
873 | p->p_pid, l->l_lid, | | 873 | p->p_pid, l->l_lid, |
874 | l->l_name ? l->l_name : p->p_comm, l, l->l_stat); | | 874 | l->l_name ? l->l_name : p->p_comm, l, l->l_stat); |
875 | if (!TAILQ_EMPTY(&l->l_ld_locks)) { | | 875 | if (!TAILQ_EMPTY(&l->l_ld_locks)) { |
876 | (*pr)("\n*** Locks held: \n"); | | 876 | (*pr)("\n*** Locks held: \n"); |
877 | TAILQ_FOREACH(ld, &l->l_ld_locks, ld_chain) { | | 877 | TAILQ_FOREACH(ld, &l->l_ld_locks, ld_chain) { |
878 | (*pr)("\n"); | | 878 | (*pr)("\n"); |
879 | lockdebug_show_one(l, ld, i++, pr); | | 879 | lockdebug_show_one(l, ld, i++, pr); |
880 | } | | 880 | } |
881 | } else { | | 881 | } else { |
882 | (*pr)("\n*** Locks held: none\n"); | | 882 | (*pr)("\n*** Locks held: none\n"); |
883 | } | | 883 | } |
884 | | | 884 | |
885 | if (l->l_ld_wanted != NULL) { | | 885 | if (l->l_ld_wanted != NULL) { |
886 | (*pr)("\n*** Locks wanted: \n\n"); | | 886 | (*pr)("\n*** Locks wanted: \n\n"); |
887 | lockdebug_show_one(l, l->l_ld_wanted, 0, pr); | | 887 | lockdebug_show_one(l, l->l_ld_wanted, 0, pr); |
888 | } else { | | 888 | } else { |
889 | (*pr)("\n*** Locks wanted: none\n"); | | 889 | (*pr)("\n*** Locks wanted: none\n"); |
890 | } | | 890 | } |
891 | if (show_trace) { | | 891 | if (show_trace) { |
892 | (*pr)("\n*** Traceback: \n\n"); | | 892 | (*pr)("\n*** Traceback: \n\n"); |
893 | lockdebug_show_trace(l, pr); | | 893 | lockdebug_show_trace(l, pr); |
894 | (*pr)("\n"); | | 894 | (*pr)("\n"); |
895 | } | | 895 | } |
896 | } | | 896 | } |
897 | } | | 897 | } |
898 | } | | 898 | } |
899 | | | 899 | |
900 | static void | | 900 | static void |
901 | lockdebug_show_all_locks_cpu(void (*pr)(const char *, ...) __printflike(1, 2), | | 901 | lockdebug_show_all_locks_cpu(void (*pr)(const char *, ...) __printflike(1, 2), |
902 | bool show_trace) | | 902 | bool show_trace) |
903 | { | | 903 | { |
904 | lockdebug_t *ld; | | 904 | lockdebug_t *ld; |
905 | CPU_INFO_ITERATOR cii; | | 905 | CPU_INFO_ITERATOR cii; |
906 | struct cpu_info *ci; | | 906 | struct cpu_info *ci; |
907 | | | 907 | |
908 | for (CPU_INFO_FOREACH(cii, ci)) { | | 908 | for (CPU_INFO_FOREACH(cii, ci)) { |
909 | int i = 0; | | 909 | int i = 0; |
910 | if (TAILQ_EMPTY(&ci->ci_data.cpu_ld_locks)) | | 910 | if (TAILQ_EMPTY(&ci->ci_data.cpu_ld_locks)) |
911 | continue; | | 911 | continue; |
912 | (*pr)("\n******* Locks held on %s:\n", cpu_name(ci)); | | 912 | (*pr)("\n******* Locks held on %s:\n", cpu_name(ci)); |
913 | TAILQ_FOREACH(ld, &ci->ci_data.cpu_ld_locks, ld_chain) { | | 913 | TAILQ_FOREACH(ld, &ci->ci_data.cpu_ld_locks, ld_chain) { |
914 | (*pr)("\n"); | | 914 | (*pr)("\n"); |
915 | #ifdef MULTIPROCESSOR | | 915 | #ifdef MULTIPROCESSOR |
916 | lockdebug_show_one(ci->ci_curlwp, ld, i++, pr); | | 916 | lockdebug_show_one(ci->ci_curlwp, ld, i++, pr); |
917 | if (show_trace) | | 917 | if (show_trace) |
918 | lockdebug_show_trace(ci->ci_curlwp, pr); | | 918 | lockdebug_show_trace(ci->ci_curlwp, pr); |
919 | #else | | 919 | #else |
920 | lockdebug_show_one(curlwp, ld, i++, pr); | | 920 | lockdebug_show_one(curlwp, ld, i++, pr); |
921 | if (show_trace) | | 921 | if (show_trace) |
922 | lockdebug_show_trace(curlwp, pr); | | 922 | lockdebug_show_trace(curlwp, pr); |
923 | #endif | | 923 | #endif |
924 | } | | 924 | } |
925 | } | | 925 | } |
926 | } | | 926 | } |
927 | #endif /* _KERNEL */ | | 927 | #endif /* _KERNEL */ |
928 | #endif /* LOCKDEBUG */ | | 928 | #endif /* LOCKDEBUG */ |
929 | | | 929 | |
930 | #ifdef _KERNEL | | 930 | #ifdef _KERNEL |
931 | void | | 931 | void |
932 | lockdebug_show_all_locks(void (*pr)(const char *, ...) __printflike(1, 2), | | 932 | lockdebug_show_all_locks(void (*pr)(const char *, ...) __printflike(1, 2), |
933 | const char *modif) | | 933 | const char *modif) |
934 | { | | 934 | { |
935 | #ifdef LOCKDEBUG | | 935 | #ifdef LOCKDEBUG |
936 | bool show_trace = false; | | 936 | bool show_trace = false; |
937 | if (modif[0] == 't') | | 937 | if (modif[0] == 't') |
938 | show_trace = true; | | 938 | show_trace = true; |
939 | | | 939 | |
940 | (*pr)("[Locks tracked through LWPs]\n"); | | 940 | (*pr)("[Locks tracked through LWPs]\n"); |
941 | lockdebug_show_all_locks_lwp(pr, show_trace); | | 941 | lockdebug_show_all_locks_lwp(pr, show_trace); |
942 | (*pr)("\n"); | | 942 | (*pr)("\n"); |
943 | | | 943 | |
944 | (*pr)("[Locks tracked through CPUs]\n"); | | 944 | (*pr)("[Locks tracked through CPUs]\n"); |
945 | lockdebug_show_all_locks_cpu(pr, show_trace); | | 945 | lockdebug_show_all_locks_cpu(pr, show_trace); |
946 | (*pr)("\n"); | | 946 | (*pr)("\n"); |
947 | #else | | 947 | #else |
948 | (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n"); | | 948 | (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n"); |
949 | #endif /* LOCKDEBUG */ | | 949 | #endif /* LOCKDEBUG */ |
950 | } | | 950 | } |
951 | | | 951 | |
952 | void | | 952 | void |
953 | lockdebug_show_lockstats(void (*pr)(const char *, ...) __printflike(1, 2)) | | 953 | lockdebug_show_lockstats(void (*pr)(const char *, ...) __printflike(1, 2)) |
954 | { | | 954 | { |
955 | #ifdef LOCKDEBUG | | 955 | #ifdef LOCKDEBUG |
956 | lockdebug_t *ld; | | 956 | lockdebug_t *ld; |
957 | void *_ld; | | 957 | void *_ld; |
958 | uint32_t n_null = 0; | | 958 | uint32_t n_null = 0; |
959 | uint32_t n_spin_mutex = 0; | | 959 | uint32_t n_spin_mutex = 0; |
960 | uint32_t n_adaptive_mutex = 0; | | 960 | uint32_t n_adaptive_mutex = 0; |
961 | uint32_t n_rwlock = 0; | | 961 | uint32_t n_rwlock = 0; |
962 | uint32_t n_others = 0; | | 962 | uint32_t n_others = 0; |
963 | | | 963 | |
964 | RB_TREE_FOREACH(_ld, &ld_rb_tree) { | | 964 | RB_TREE_FOREACH(_ld, &ld_rb_tree) { |
965 | ld = _ld; | | 965 | ld = _ld; |
966 | if (ld->ld_lock == NULL) { | | 966 | if (ld->ld_lock == NULL) { |
967 | n_null++; | | 967 | n_null++; |
968 | continue; | | 968 | continue; |
969 | } | | 969 | } |
970 | if (ld->ld_lockops->lo_name[0] == 'M') { | | 970 | if (ld->ld_lockops->lo_name[0] == 'M') { |
971 | if (ld->ld_lockops->lo_type == LOCKOPS_SLEEP) | | 971 | if (ld->ld_lockops->lo_type == LOCKOPS_SLEEP) |
972 | n_adaptive_mutex++; | | 972 | n_adaptive_mutex++; |
973 | else | | 973 | else |
974 | n_spin_mutex++; | | 974 | n_spin_mutex++; |
975 | continue; | | 975 | continue; |
976 | } | | 976 | } |
977 | if (ld->ld_lockops->lo_name[0] == 'R') { | | 977 | if (ld->ld_lockops->lo_name[0] == 'R') { |
978 | n_rwlock++; | | 978 | n_rwlock++; |
979 | continue; | | 979 | continue; |
980 | } | | 980 | } |
981 | n_others++; | | 981 | n_others++; |
982 | } | | 982 | } |
983 | (*pr)( | | 983 | (*pr)( |
984 | "spin mutex: %u\n" | | 984 | "spin mutex: %u\n" |
985 | "adaptive mutex: %u\n" | | 985 | "adaptive mutex: %u\n" |
986 | "rwlock: %u\n" | | 986 | "rwlock: %u\n" |
987 | "null locks: %u\n" | | 987 | "null locks: %u\n" |
988 | "others: %u\n", | | 988 | "others: %u\n", |
989 | n_spin_mutex, n_adaptive_mutex, n_rwlock, | | 989 | n_spin_mutex, n_adaptive_mutex, n_rwlock, |
990 | n_null, n_others); | | 990 | n_null, n_others); |
991 | #else | | 991 | #else |
992 | (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n"); | | 992 | (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n"); |
993 | #endif /* LOCKDEBUG */ | | 993 | #endif /* LOCKDEBUG */ |
994 | } | | 994 | } |
995 | #endif /* _KERNEL */ | | 995 | #endif /* _KERNEL */ |
996 | #endif /* DDB */ | | 996 | #endif /* DDB */ |
997 | | | 997 | |
998 | #ifdef _KERNEL | | 998 | #ifdef _KERNEL |
999 | /* | | 999 | /* |
1000 | * lockdebug_dismiss: | | 1000 | * lockdebug_dismiss: |
1001 | * | | 1001 | * |
1002 | * The system is rebooting, and potentially from an unsafe | | 1002 | * The system is rebooting, and potentially from an unsafe |
1003 | * place so avoid any future aborts. | | 1003 | * place so avoid any future aborts. |
1004 | */ | | 1004 | */ |
1005 | void | | 1005 | void |
1006 | lockdebug_dismiss(void) | | 1006 | lockdebug_dismiss(void) |
1007 | { | | 1007 | { |
1008 | | | 1008 | |
1009 | atomic_inc_uint_nv(&ld_panic); | | 1009 | atomic_inc_uint_nv(&ld_panic); |
1010 | } | | 1010 | } |
1011 | | | 1011 | |
1012 | /* | | 1012 | /* |
1013 | * lockdebug_abort: | | 1013 | * lockdebug_abort: |
1014 | * | | 1014 | * |
1015 | * An error has been trapped - dump lock info and call panic(). | | 1015 | * An error has been trapped - dump lock info and call panic(). |
1016 | */ | | 1016 | */ |
1017 | void | | 1017 | void |
1018 | lockdebug_abort(const char *func, size_t line, const volatile void *lock, | | 1018 | lockdebug_abort(const char *func, size_t line, const volatile void *lock, |
1019 | lockops_t *ops, const char *msg) | | 1019 | lockops_t *ops, const char *msg) |
1020 | { | | 1020 | { |
1021 | #ifdef LOCKDEBUG | | 1021 | #ifdef LOCKDEBUG |
1022 | lockdebug_t *ld; | | 1022 | lockdebug_t *ld; |
1023 | int s; | | 1023 | int s; |
1024 | | | 1024 | |
1025 | s = splhigh(); | | 1025 | s = splhigh(); |
1026 | if ((ld = lockdebug_lookup(func, line, lock, | | 1026 | if ((ld = lockdebug_lookup(func, line, lock, |
1027 | (uintptr_t) __builtin_return_address(0))) != NULL) { | | 1027 | (uintptr_t) __builtin_return_address(0))) != NULL) { |
1028 | lockdebug_abort1(func, line, ld, s, msg, true); | | 1028 | lockdebug_abort1(func, line, ld, s, msg, true); |
1029 | return; | | 1029 | return; |
1030 | } | | 1030 | } |
1031 | splx(s); | | 1031 | splx(s); |
1032 | #endif /* LOCKDEBUG */ | | 1032 | #endif /* LOCKDEBUG */ |
1033 | | | 1033 | |
1034 | /* | | 1034 | /* |
1035 | * Don't make the situation worse if the system is already going | | 1035 | * Don't make the situation worse if the system is already going |
1036 | * down in flames. Once a panic is triggered, lockdebug state | | 1036 | * down in flames. Once a panic is triggered, lockdebug state |
1037 | * becomes stale and cannot be trusted. | | 1037 | * becomes stale and cannot be trusted. |
1038 | */ | | 1038 | */ |
1039 | if (atomic_inc_uint_nv(&ld_panic) > 1) | | 1039 | if (atomic_inc_uint_nv(&ld_panic) > 1) |
1040 | return; | | 1040 | return; |
1041 | | | 1041 | |
1042 | printf_nolog("%s error: %s,%zu: %s\n\n" | | 1042 | printf_nolog("%s error: %s,%zu: %s\n\n" |
1043 | "lock address : %#018lx\n" | | 1043 | "lock address : %#018lx\n" |
1044 | "current cpu : %18d\n" | | 1044 | "current cpu : %18d\n" |
1045 | "current lwp : %#018lx\n", | | 1045 | "current lwp : %#018lx\n", |
1046 | ops->lo_name, func, line, msg, (long)lock, | | 1046 | ops->lo_name, func, line, msg, (long)lock, |
1047 | (int)cpu_index(curcpu()), (long)curlwp); | | 1047 | (int)cpu_index(curcpu()), (long)curlwp); |
1048 | (*ops->lo_dump)(lock, printf_nolog); | | 1048 | (*ops->lo_dump)(lock, printf_nolog); |
1049 | printf_nolog("\n"); | | 1049 | printf_nolog("\n"); |
1050 | | | 1050 | |
1051 | panic("lock error: %s: %s,%zu: %s: lock %p cpu %d lwp %p", | | 1051 | panic("lock error: %s: %s,%zu: %s: lock %p cpu %d lwp %p", |
1052 | ops->lo_name, func, line, msg, lock, cpu_index(curcpu()), curlwp); | | 1052 | ops->lo_name, func, line, msg, lock, cpu_index(curcpu()), curlwp); |
1053 | } | | 1053 | } |
1054 | #endif /* _KERNEL */ | | 1054 | #endif /* _KERNEL */ |