Tue Feb 20 03:34:52 2018 UTC ()
Spinkle __predict_false to LOCKDEBUG functions

Panics and lockdebug failures are unlikely to occur normally.


(ozaki-r)
diff -r1.59 -r1.60 src/sys/kern/subr_lockdebug.c

cvs diff -r1.59 -r1.60 src/sys/kern/subr_lockdebug.c (expand / switch to unified diff)

--- src/sys/kern/subr_lockdebug.c 2018/02/14 03:56:26 1.59
+++ src/sys/kern/subr_lockdebug.c 2018/02/20 03:34:52 1.60
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: subr_lockdebug.c,v 1.59 2018/02/14 03:56:26 ozaki-r Exp $ */ 1/* $NetBSD: subr_lockdebug.c,v 1.60 2018/02/20 03:34:52 ozaki-r Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -24,27 +24,27 @@ @@ -24,27 +24,27 @@
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Basic lock debugging code shared among lock primitives. 33 * Basic lock debugging code shared among lock primitives.
34 */ 34 */
35 35
36#include <sys/cdefs.h> 36#include <sys/cdefs.h>
37__KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.59 2018/02/14 03:56:26 ozaki-r Exp $"); 37__KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.60 2018/02/20 03:34:52 ozaki-r Exp $");
38 38
39#ifdef _KERNEL_OPT 39#ifdef _KERNEL_OPT
40#include "opt_ddb.h" 40#include "opt_ddb.h"
41#endif 41#endif
42 42
43#include <sys/param.h> 43#include <sys/param.h>
44#include <sys/proc.h> 44#include <sys/proc.h>
45#include <sys/systm.h> 45#include <sys/systm.h>
46#include <sys/kernel.h> 46#include <sys/kernel.h>
47#include <sys/kmem.h> 47#include <sys/kmem.h>
48#include <sys/lockdebug.h> 48#include <sys/lockdebug.h>
49#include <sys/sleepq.h> 49#include <sys/sleepq.h>
50#include <sys/cpu.h> 50#include <sys/cpu.h>
@@ -186,27 +186,27 @@ lockdebug_unlock_cpus(void) @@ -186,27 +186,27 @@ lockdebug_unlock_cpus(void)
186 186
187/* 187/*
188 * lockdebug_lookup: 188 * lockdebug_lookup:
189 * 189 *
190 * Find a lockdebug structure by a pointer to a lock and return it locked. 190 * Find a lockdebug structure by a pointer to a lock and return it locked.
191 */ 191 */
192static inline lockdebug_t * 192static inline lockdebug_t *
193lockdebug_lookup(const char *func, size_t line, const volatile void *lock, 193lockdebug_lookup(const char *func, size_t line, const volatile void *lock,
194 uintptr_t where) 194 uintptr_t where)
195{ 195{
196 lockdebug_t *ld; 196 lockdebug_t *ld;
197 197
198 ld = lockdebug_lookup1(lock); 198 ld = lockdebug_lookup1(lock);
199 if (ld == NULL) { 199 if (__predict_false(ld == NULL)) {
200 panic("%s,%zu: uninitialized lock (lock=%p, from=%08" 200 panic("%s,%zu: uninitialized lock (lock=%p, from=%08"
201 PRIxPTR ")", func, line, lock, where); 201 PRIxPTR ")", func, line, lock, where);
202 } 202 }
203 return ld; 203 return ld;
204} 204}
205 205
206/* 206/*
207 * lockdebug_init: 207 * lockdebug_init:
208 * 208 *
209 * Initialize the lockdebug system. Allocate an initial pool of 209 * Initialize the lockdebug system. Allocate an initial pool of
210 * lockdebug structures before the VM system is up and running. 210 * lockdebug structures before the VM system is up and running.
211 */ 211 */
212static void 212static void
@@ -236,34 +236,34 @@ lockdebug_init(void) @@ -236,34 +236,34 @@ lockdebug_init(void)
236 * lockdebug_alloc: 236 * lockdebug_alloc:
237 * 237 *
238 * A lock is being initialized, so allocate an associated debug 238 * A lock is being initialized, so allocate an associated debug
239 * structure. 239 * structure.
240 */ 240 */
241bool 241bool
242lockdebug_alloc(const char *func, size_t line, volatile void *lock, 242lockdebug_alloc(const char *func, size_t line, volatile void *lock,
243 lockops_t *lo, uintptr_t initaddr) 243 lockops_t *lo, uintptr_t initaddr)
244{ 244{
245 struct cpu_info *ci; 245 struct cpu_info *ci;
246 lockdebug_t *ld; 246 lockdebug_t *ld;
247 int s; 247 int s;
248 248
249 if (lo == NULL || panicstr != NULL || ld_panic) 249 if (__predict_false(lo == NULL || panicstr != NULL || ld_panic))
250 return false; 250 return false;
251 if (ld_freeptr == 0) 251 if (__predict_false(ld_freeptr == 0))
252 lockdebug_init(); 252 lockdebug_init();
253 253
254 s = splhigh(); 254 s = splhigh();
255 __cpu_simple_lock(&ld_mod_lk); 255 __cpu_simple_lock(&ld_mod_lk);
256 if ((ld = lockdebug_lookup1(lock)) != NULL) { 256 if (__predict_false((ld = lockdebug_lookup1(lock)) != NULL)) {
257 __cpu_simple_unlock(&ld_mod_lk); 257 __cpu_simple_unlock(&ld_mod_lk);
258 lockdebug_abort1(func, line, ld, s, "already initialized", 258 lockdebug_abort1(func, line, ld, s, "already initialized",
259 true); 259 true);
260 return false; 260 return false;
261 } 261 }
262 262
263 /* 263 /*
264 * Pinch a new debug structure. We may recurse because we call 264 * Pinch a new debug structure. We may recurse because we call
265 * kmem_alloc(), which may need to initialize new locks somewhere 265 * kmem_alloc(), which may need to initialize new locks somewhere
266 * down the path. If not recursing, we try to maintain at least 266 * down the path. If not recursing, we try to maintain at least
267 * LD_SLOP structures free, which should hopefully be enough to 267 * LD_SLOP structures free, which should hopefully be enough to
268 * satisfy kmem_alloc(). If we can't provide a structure, not to 268 * satisfy kmem_alloc(). If we can't provide a structure, not to
269 * worry: we'll just mark the lock as not having an ID. 269 * worry: we'll just mark the lock as not having an ID.
@@ -271,36 +271,36 @@ lockdebug_alloc(const char *func, size_t @@ -271,36 +271,36 @@ lockdebug_alloc(const char *func, size_t
271 ci = curcpu(); 271 ci = curcpu();
272 ci->ci_lkdebug_recurse++; 272 ci->ci_lkdebug_recurse++;
273 if (TAILQ_EMPTY(&ld_free)) { 273 if (TAILQ_EMPTY(&ld_free)) {
274 if (ci->ci_lkdebug_recurse > 1 || ld_nomore) { 274 if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
275 ci->ci_lkdebug_recurse--; 275 ci->ci_lkdebug_recurse--;
276 __cpu_simple_unlock(&ld_mod_lk); 276 __cpu_simple_unlock(&ld_mod_lk);
277 splx(s); 277 splx(s);
278 return false; 278 return false;
279 } 279 }
280 s = lockdebug_more(s); 280 s = lockdebug_more(s);
281 } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) { 281 } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) {
282 s = lockdebug_more(s); 282 s = lockdebug_more(s);
283 } 283 }
284 if ((ld = TAILQ_FIRST(&ld_free)) == NULL) { 284 if (__predict_false((ld = TAILQ_FIRST(&ld_free)) == NULL)) {
285 __cpu_simple_unlock(&ld_mod_lk); 285 __cpu_simple_unlock(&ld_mod_lk);
286 splx(s); 286 splx(s);
287 return false; 287 return false;
288 } 288 }
289 TAILQ_REMOVE(&ld_free, ld, ld_chain); 289 TAILQ_REMOVE(&ld_free, ld, ld_chain);
290 ld_nfree--; 290 ld_nfree--;
291 ci->ci_lkdebug_recurse--; 291 ci->ci_lkdebug_recurse--;
292 292
293 if (ld->ld_lock != NULL) { 293 if (__predict_false(ld->ld_lock != NULL)) {
294 panic("%s,%zu: corrupt table ld %p", func, line, ld); 294 panic("%s,%zu: corrupt table ld %p", func, line, ld);
295 } 295 }
296 296
297 /* Initialise the structure. */ 297 /* Initialise the structure. */
298 ld->ld_lock = lock; 298 ld->ld_lock = lock;
299 ld->ld_lockops = lo; 299 ld->ld_lockops = lo;
300 ld->ld_locked = 0; 300 ld->ld_locked = 0;
301 ld->ld_unlocked = 0; 301 ld->ld_unlocked = 0;
302 ld->ld_lwp = NULL; 302 ld->ld_lwp = NULL;
303 ld->ld_initaddr = initaddr; 303 ld->ld_initaddr = initaddr;
304 ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0); 304 ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0);
305 lockdebug_lock_cpus(); 305 lockdebug_lock_cpus();
306 (void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld)); 306 (void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld));
@@ -312,40 +312,41 @@ lockdebug_alloc(const char *func, size_t @@ -312,40 +312,41 @@ lockdebug_alloc(const char *func, size_t
312} 312}
313 313
314/* 314/*
315 * lockdebug_free: 315 * lockdebug_free:
316 * 316 *
317 * A lock is being destroyed, so release debugging resources. 317 * A lock is being destroyed, so release debugging resources.
318 */ 318 */
319void 319void
320lockdebug_free(const char *func, size_t line, volatile void *lock) 320lockdebug_free(const char *func, size_t line, volatile void *lock)
321{ 321{
322 lockdebug_t *ld; 322 lockdebug_t *ld;
323 int s; 323 int s;
324 324
325 if (panicstr != NULL || ld_panic) 325 if (__predict_false(panicstr != NULL || ld_panic))
326 return; 326 return;
327 327
328 s = splhigh(); 328 s = splhigh();
329 __cpu_simple_lock(&ld_mod_lk); 329 __cpu_simple_lock(&ld_mod_lk);
330 ld = lockdebug_lookup(func, line, lock, 330 ld = lockdebug_lookup(func, line, lock,
331 (uintptr_t) __builtin_return_address(0)); 331 (uintptr_t) __builtin_return_address(0));
332 if (ld == NULL) { 332 if (__predict_false(ld == NULL)) {
333 __cpu_simple_unlock(&ld_mod_lk); 333 __cpu_simple_unlock(&ld_mod_lk);
334 panic("%s,%zu: destroying uninitialized object %p" 334 panic("%s,%zu: destroying uninitialized object %p"
335 "(ld_lock=%p)", func, line, lock, ld->ld_lock); 335 "(ld_lock=%p)", func, line, lock, ld->ld_lock);
336 return; 336 return;
337 } 337 }
338 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) { 338 if (__predict_false((ld->ld_flags & LD_LOCKED) != 0 ||
 339 ld->ld_shares != 0)) {
339 __cpu_simple_unlock(&ld_mod_lk); 340 __cpu_simple_unlock(&ld_mod_lk);
340 lockdebug_abort1(func, line, ld, s, "is locked or in use", 341 lockdebug_abort1(func, line, ld, s, "is locked or in use",
341 true); 342 true);
342 return; 343 return;
343 } 344 }
344 lockdebug_lock_cpus(); 345 lockdebug_lock_cpus();
345 rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld)); 346 rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld));
346 lockdebug_unlock_cpus(); 347 lockdebug_unlock_cpus();
347 ld->ld_lock = NULL; 348 ld->ld_lock = NULL;
348 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); 349 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
349 ld_nfree++; 350 ld_nfree++;
350 __cpu_simple_unlock(&ld->ld_spinlock); 351 __cpu_simple_unlock(&ld->ld_spinlock);
351 __cpu_simple_unlock(&ld_mod_lk); 352 __cpu_simple_unlock(&ld_mod_lk);
@@ -420,102 +421,102 @@ lockdebug_more(int s) @@ -420,102 +421,102 @@ lockdebug_more(int s)
420 */ 421 */
421void 422void
422lockdebug_wantlock(const char *func, size_t line, 423lockdebug_wantlock(const char *func, size_t line,
423 const volatile void *lock, uintptr_t where, int shared) 424 const volatile void *lock, uintptr_t where, int shared)
424{ 425{
425 struct lwp *l = curlwp; 426 struct lwp *l = curlwp;
426 lockdebug_t *ld; 427 lockdebug_t *ld;
427 bool recurse; 428 bool recurse;
428 int s; 429 int s;
429 430
430 (void)shared; 431 (void)shared;
431 recurse = false; 432 recurse = false;
432 433
433 if (panicstr != NULL || ld_panic) 434 if (__predict_false(panicstr != NULL || ld_panic))
434 return; 435 return;
435 436
436 s = splhigh(); 437 s = splhigh();
437 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) { 438 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
438 splx(s); 439 splx(s);
439 return; 440 return;
440 } 441 }
441 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) { 442 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
442 if ((ld->ld_flags & LD_SLEEPER) != 0) { 443 if ((ld->ld_flags & LD_SLEEPER) != 0) {
443 if (ld->ld_lwp == l) 444 if (ld->ld_lwp == l)
444 recurse = true; 445 recurse = true;
445 } else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu())) 446 } else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
446 recurse = true; 447 recurse = true;
447 } 448 }
448 if (cpu_intr_p()) { 449 if (cpu_intr_p()) {
449 if ((ld->ld_flags & LD_SLEEPER) != 0) { 450 if (__predict_false((ld->ld_flags & LD_SLEEPER) != 0)) {
450 lockdebug_abort1(func, line, ld, s, 451 lockdebug_abort1(func, line, ld, s,
451 "acquiring sleep lock from interrupt context", 452 "acquiring sleep lock from interrupt context",
452 true); 453 true);
453 return; 454 return;
454 } 455 }
455 } 456 }
456 if (shared > 0) 457 if (shared > 0)
457 ld->ld_shwant++; 458 ld->ld_shwant++;
458 else if (shared == 0) 459 else if (shared == 0)
459 ld->ld_exwant++; 460 ld->ld_exwant++;
460 if (recurse) { 461 if (__predict_false(recurse)) {
461 lockdebug_abort1(func, line, ld, s, "locking against myself", 462 lockdebug_abort1(func, line, ld, s, "locking against myself",
462 true); 463 true);
463 return; 464 return;
464 } 465 }
465 __cpu_simple_unlock(&ld->ld_spinlock); 466 __cpu_simple_unlock(&ld->ld_spinlock);
466 splx(s); 467 splx(s);
467} 468}
468 469
469/* 470/*
470 * lockdebug_locked: 471 * lockdebug_locked:
471 * 472 *
472 * Process a lock acquire operation. 473 * Process a lock acquire operation.
473 */ 474 */
474void 475void
475lockdebug_locked(const char *func, size_t line, 476lockdebug_locked(const char *func, size_t line,
476 volatile void *lock, void *cvlock, uintptr_t where, int shared) 477 volatile void *lock, void *cvlock, uintptr_t where, int shared)
477{ 478{
478 struct lwp *l = curlwp; 479 struct lwp *l = curlwp;
479 lockdebug_t *ld; 480 lockdebug_t *ld;
480 int s; 481 int s;
481 482
482 if (panicstr != NULL || ld_panic) 483 if (__predict_false(panicstr != NULL || ld_panic))
483 return; 484 return;
484 485
485 s = splhigh(); 486 s = splhigh();
486 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) { 487 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
487 splx(s); 488 splx(s);
488 return; 489 return;
489 } 490 }
490 if (cvlock) { 491 if (cvlock) {
491 KASSERT(ld->ld_lockops->lo_type == LOCKOPS_CV); 492 KASSERT(ld->ld_lockops->lo_type == LOCKOPS_CV);
492 if (lock == (void *)&lbolt) { 493 if (lock == (void *)&lbolt) {
493 /* nothing */ 494 /* nothing */
494 } else if (ld->ld_shares++ == 0) { 495 } else if (ld->ld_shares++ == 0) {
495 ld->ld_locked = (uintptr_t)cvlock; 496 ld->ld_locked = (uintptr_t)cvlock;
496 } else if (cvlock != (void *)ld->ld_locked) { 497 } else if (__predict_false(cvlock != (void *)ld->ld_locked)) {
497 lockdebug_abort1(func, line, ld, s, 498 lockdebug_abort1(func, line, ld, s,
498 "multiple locks used with condition variable", 499 "multiple locks used with condition variable",
499 true); 500 true);
500 return; 501 return;
501 } 502 }
502 } else if (shared) { 503 } else if (shared) {
503 l->l_shlocks++; 504 l->l_shlocks++;
504 ld->ld_locked = where; 505 ld->ld_locked = where;
505 ld->ld_shares++; 506 ld->ld_shares++;
506 ld->ld_shwant--; 507 ld->ld_shwant--;
507 } else { 508 } else {
508 if ((ld->ld_flags & LD_LOCKED) != 0) { 509 if (__predict_false((ld->ld_flags & LD_LOCKED) != 0)) {
509 lockdebug_abort1(func, line, ld, s, "already locked", 510 lockdebug_abort1(func, line, ld, s, "already locked",
510 true); 511 true);
511 return; 512 return;
512 } 513 }
513 ld->ld_flags |= LD_LOCKED; 514 ld->ld_flags |= LD_LOCKED;
514 ld->ld_locked = where; 515 ld->ld_locked = where;
515 ld->ld_exwant--; 516 ld->ld_exwant--;
516 if ((ld->ld_flags & LD_SLEEPER) != 0) { 517 if ((ld->ld_flags & LD_SLEEPER) != 0) {
517 TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain); 518 TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain);
518 } else { 519 } else {
519 TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks, 520 TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks,
520 ld, ld_chain); 521 ld, ld_chain);
521 } 522 }
@@ -529,74 +530,75 @@ lockdebug_locked(const char *func, size_ @@ -529,74 +530,75 @@ lockdebug_locked(const char *func, size_
529/* 530/*
530 * lockdebug_unlocked: 531 * lockdebug_unlocked:
531 * 532 *
532 * Process a lock release operation. 533 * Process a lock release operation.
533 */ 534 */
534void 535void
535lockdebug_unlocked(const char *func, size_t line, 536lockdebug_unlocked(const char *func, size_t line,
536 volatile void *lock, uintptr_t where, int shared) 537 volatile void *lock, uintptr_t where, int shared)
537{ 538{
538 struct lwp *l = curlwp; 539 struct lwp *l = curlwp;
539 lockdebug_t *ld; 540 lockdebug_t *ld;
540 int s; 541 int s;
541 542
542 if (panicstr != NULL || ld_panic) 543 if (__predict_false(panicstr != NULL || ld_panic))
543 return; 544 return;
544 545
545 s = splhigh(); 546 s = splhigh();
546 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) { 547 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
547 splx(s); 548 splx(s);
548 return; 549 return;
549 } 550 }
550 if (ld->ld_lockops->lo_type == LOCKOPS_CV) { 551 if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
551 if (lock == (void *)&lbolt) { 552 if (lock == (void *)&lbolt) {
552 /* nothing */ 553 /* nothing */
553 } else { 554 } else {
554 ld->ld_shares--; 555 ld->ld_shares--;
555 } 556 }
556 } else if (shared) { 557 } else if (shared) {
557 if (l->l_shlocks == 0) { 558 if (__predict_false(l->l_shlocks == 0)) {
558 lockdebug_abort1(func, line, ld, s, 559 lockdebug_abort1(func, line, ld, s,
559 "no shared locks held by LWP", true); 560 "no shared locks held by LWP", true);
560 return; 561 return;
561 } 562 }
562 if (ld->ld_shares == 0) { 563 if (__predict_false(ld->ld_shares == 0)) {
563 lockdebug_abort1(func, line, ld, s, 564 lockdebug_abort1(func, line, ld, s,
564 "no shared holds on this lock", true); 565 "no shared holds on this lock", true);
565 return; 566 return;
566 } 567 }
567 l->l_shlocks--; 568 l->l_shlocks--;
568 ld->ld_shares--; 569 ld->ld_shares--;
569 if (ld->ld_lwp == l) { 570 if (ld->ld_lwp == l) {
570 ld->ld_unlocked = where; 571 ld->ld_unlocked = where;
571 ld->ld_lwp = NULL; 572 ld->ld_lwp = NULL;
572 } 573 }
573 if (ld->ld_cpu == (uint16_t)cpu_index(curcpu())) 574 if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
574 ld->ld_cpu = (uint16_t)-1; 575 ld->ld_cpu = (uint16_t)-1;
575 } else { 576 } else {
576 if ((ld->ld_flags & LD_LOCKED) == 0) { 577 if (__predict_false((ld->ld_flags & LD_LOCKED) == 0)) {
577 lockdebug_abort1(func, line, ld, s, "not locked", true); 578 lockdebug_abort1(func, line, ld, s, "not locked", true);
578 return; 579 return;
579 } 580 }
580 581
581 if ((ld->ld_flags & LD_SLEEPER) != 0) { 582 if ((ld->ld_flags & LD_SLEEPER) != 0) {
582 if (ld->ld_lwp != curlwp) { 583 if (__predict_false(ld->ld_lwp != curlwp)) {
583 lockdebug_abort1(func, line, ld, s, 584 lockdebug_abort1(func, line, ld, s,
584 "not held by current LWP", true); 585 "not held by current LWP", true);
585 return; 586 return;
586 } 587 }
587 TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain); 588 TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain);
588 } else { 589 } else {
589 if (ld->ld_cpu != (uint16_t)cpu_index(curcpu())) { 590 uint16_t idx = (uint16_t)cpu_index(curcpu());
 591 if (__predict_false(ld->ld_cpu != idx)) {
590 lockdebug_abort1(func, line, ld, s, 592 lockdebug_abort1(func, line, ld, s,
591 "not held by current CPU", true); 593 "not held by current CPU", true);
592 return; 594 return;
593 } 595 }
594 TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld, 596 TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld,
595 ld_chain); 597 ld_chain);
596 } 598 }
597 ld->ld_flags &= ~LD_LOCKED; 599 ld->ld_flags &= ~LD_LOCKED;
598 ld->ld_unlocked = where;  600 ld->ld_unlocked = where;
599 ld->ld_lwp = NULL; 601 ld->ld_lwp = NULL;
600 } 602 }
601 __cpu_simple_unlock(&ld->ld_spinlock); 603 __cpu_simple_unlock(&ld->ld_spinlock);
602 splx(s); 604 splx(s);
@@ -604,82 +606,84 @@ lockdebug_unlocked(const char *func, siz @@ -604,82 +606,84 @@ lockdebug_unlocked(const char *func, siz
604 606
605/* 607/*
606 * lockdebug_wakeup: 608 * lockdebug_wakeup:
607 * 609 *
608 * Process a wakeup on a condition variable. 610 * Process a wakeup on a condition variable.
609 */ 611 */
610void 612void
611lockdebug_wakeup(const char *func, size_t line, volatile void *lock, 613lockdebug_wakeup(const char *func, size_t line, volatile void *lock,
612 uintptr_t where) 614 uintptr_t where)
613{ 615{
614 lockdebug_t *ld; 616 lockdebug_t *ld;
615 int s; 617 int s;
616 618
617 if (panicstr != NULL || ld_panic || lock == (void *)&lbolt) 619 if (__predict_false(panicstr != NULL || ld_panic || lock == (void *)&lbolt))
618 return; 620 return;
619 621
620 s = splhigh(); 622 s = splhigh();
621 /* Find the CV... */ 623 /* Find the CV... */
622 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) { 624 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
623 splx(s); 625 splx(s);
624 return; 626 return;
625 } 627 }
626 /* 628 /*
627 * If it has any waiters, ensure that they are using the 629 * If it has any waiters, ensure that they are using the
628 * same interlock. 630 * same interlock.
629 */ 631 */
630 if (ld->ld_shares != 0 && !mutex_owned((kmutex_t *)ld->ld_locked)) { 632 if (__predict_false(ld->ld_shares != 0 &&
 633 !mutex_owned((kmutex_t *)ld->ld_locked))) {
631 lockdebug_abort1(func, line, ld, s, "interlocking mutex not " 634 lockdebug_abort1(func, line, ld, s, "interlocking mutex not "
632 "held during wakeup", true); 635 "held during wakeup", true);
633 return; 636 return;
634 } 637 }
635 __cpu_simple_unlock(&ld->ld_spinlock); 638 __cpu_simple_unlock(&ld->ld_spinlock);
636 splx(s); 639 splx(s);
637} 640}
638 641
639/* 642/*
640 * lockdebug_barrier: 643 * lockdebug_barrier:
641 *  644 *
642 * Panic if we hold more than one specified spin lock, and optionally, 645 * Panic if we hold more than one specified spin lock, and optionally,
643 * if we hold sleep locks. 646 * if we hold sleep locks.
644 */ 647 */
645void 648void
646lockdebug_barrier(const char *func, size_t line, volatile void *spinlock, 649lockdebug_barrier(const char *func, size_t line, volatile void *spinlock,
647 int slplocks) 650 int slplocks)
648{ 651{
649 struct lwp *l = curlwp; 652 struct lwp *l = curlwp;
650 lockdebug_t *ld; 653 lockdebug_t *ld;
651 int s; 654 int s;
652 655
653 if (panicstr != NULL || ld_panic) 656 if (__predict_false(panicstr != NULL || ld_panic))
654 return; 657 return;
655 658
656 s = splhigh(); 659 s = splhigh();
657 if ((l->l_pflag & LP_INTR) == 0) { 660 if ((l->l_pflag & LP_INTR) == 0) {
658 TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) { 661 TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) {
659 if (ld->ld_lock == spinlock) { 662 if (ld->ld_lock == spinlock) {
660 continue; 663 continue;
661 } 664 }
662 __cpu_simple_lock(&ld->ld_spinlock); 665 __cpu_simple_lock(&ld->ld_spinlock);
663 lockdebug_abort1(func, line, ld, s, 666 lockdebug_abort1(func, line, ld, s,
664 "spin lock held", true); 667 "spin lock held", true);
665 return; 668 return;
666 } 669 }
667 } 670 }
668 if (slplocks) { 671 if (slplocks) {
669 splx(s); 672 splx(s);
670 return; 673 return;
671 } 674 }
672 if ((ld = TAILQ_FIRST(&l->l_ld_locks)) != NULL) { 675 ld = TAILQ_FIRST(&l->l_ld_locks);
 676 if (__predict_false(ld != NULL)) {
673 __cpu_simple_lock(&ld->ld_spinlock); 677 __cpu_simple_lock(&ld->ld_spinlock);
674 lockdebug_abort1(func, line, ld, s, "sleep lock held", true); 678 lockdebug_abort1(func, line, ld, s, "sleep lock held", true);
675 return; 679 return;
676 } 680 }
677 splx(s); 681 splx(s);
678 if (l->l_shlocks != 0) { 682 if (l->l_shlocks != 0) {
679 TAILQ_FOREACH(ld, &ld_all, ld_achain) { 683 TAILQ_FOREACH(ld, &ld_all, ld_achain) {
680 if (ld->ld_lockops->lo_type == LOCKOPS_CV) 684 if (ld->ld_lockops->lo_type == LOCKOPS_CV)
681 continue; 685 continue;
682 if (ld->ld_lwp == l) 686 if (ld->ld_lwp == l)
683 lockdebug_dump(ld, printf); 687 lockdebug_dump(ld, printf);
684 } 688 }
685 panic("%s,%zu: holding %d shared locks", func, line, 689 panic("%s,%zu: holding %d shared locks", func, line,
@@ -690,44 +694,44 @@ lockdebug_barrier(const char *func, size @@ -690,44 +694,44 @@ lockdebug_barrier(const char *func, size
690/* 694/*
691 * lockdebug_mem_check: 695 * lockdebug_mem_check:
692 * 696 *
693 * Check for in-use locks within a memory region that is 697 * Check for in-use locks within a memory region that is
694 * being freed. 698 * being freed.
695 */ 699 */
696void 700void
697lockdebug_mem_check(const char *func, size_t line, void *base, size_t sz) 701lockdebug_mem_check(const char *func, size_t line, void *base, size_t sz)
698{ 702{
699 lockdebug_t *ld; 703 lockdebug_t *ld;
700 struct cpu_info *ci; 704 struct cpu_info *ci;
701 int s; 705 int s;
702 706
703 if (panicstr != NULL || ld_panic) 707 if (__predict_false(panicstr != NULL || ld_panic))
704 return; 708 return;
705 709
706 s = splhigh(); 710 s = splhigh();
707 ci = curcpu(); 711 ci = curcpu();
708 __cpu_simple_lock(&ci->ci_data.cpu_ld_lock); 712 __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
709 ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base); 713 ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
710 if (ld != NULL) { 714 if (ld != NULL) {
711 const uintptr_t lock = (uintptr_t)ld->ld_lock; 715 const uintptr_t lock = (uintptr_t)ld->ld_lock;
712 716
713 if ((uintptr_t)base > lock) 717 if (__predict_false((uintptr_t)base > lock))
714 panic("%s,%zu: corrupt tree ld=%p, base=%p, sz=%zu", 718 panic("%s,%zu: corrupt tree ld=%p, base=%p, sz=%zu",
715 func, line, ld, base, sz); 719 func, line, ld, base, sz);
716 if (lock >= (uintptr_t)base + sz) 720 if (lock >= (uintptr_t)base + sz)
717 ld = NULL; 721 ld = NULL;
718 } 722 }
719 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock); 723 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
720 if (ld != NULL) { 724 if (__predict_false(ld != NULL)) {
721 __cpu_simple_lock(&ld->ld_spinlock); 725 __cpu_simple_lock(&ld->ld_spinlock);
722 lockdebug_abort1(func, line, ld, s, 726 lockdebug_abort1(func, line, ld, s,
723 "allocation contains active lock", !cold); 727 "allocation contains active lock", !cold);
724 return; 728 return;
725 } 729 }
726 splx(s); 730 splx(s);
727} 731}
728 732
729/* 733/*
730 * lockdebug_dump: 734 * lockdebug_dump:
731 * 735 *
732 * Dump information about a lock on panic, or for DDB. 736 * Dump information about a lock on panic, or for DDB.
733 */ 737 */