Thu Jan 26 04:11:56 2017 UTC ()
For LOCKDEBUG:
Always provide the location of the caller of the lock as __func__, __LINE__.


(christos)
diff -r1.157 -r1.158 src/sys/kern/kern_lock.c
diff -r1.63 -r1.64 src/sys/kern/kern_mutex.c
diff -r1.45 -r1.46 src/sys/kern/kern_rwlock.c
diff -r1.54 -r1.55 src/sys/kern/subr_lockdebug.c
diff -r1.14 -r1.15 src/sys/sys/lockdebug.h

cvs diff -r1.157 -r1.158 src/sys/kern/kern_lock.c (expand / switch to unified diff)

--- src/sys/kern/kern_lock.c 2015/04/11 15:24:25 1.157
+++ src/sys/kern/kern_lock.c 2017/01/26 04:11:56 1.158
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_lock.c,v 1.157 2015/04/11 15:24:25 skrll Exp $ */ 1/* $NetBSD: kern_lock.c,v 1.158 2017/01/26 04:11:56 christos Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2002, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc. 4 * Copyright (c) 2002, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Andrew Doran. 9 * NASA Ames Research Center, and by Andrew Doran.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -21,27 +21,27 @@ @@ -21,27 +21,27 @@
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33#include <sys/cdefs.h> 33#include <sys/cdefs.h>
34__KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.157 2015/04/11 15:24:25 skrll Exp $"); 34__KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.158 2017/01/26 04:11:56 christos Exp $");
35 35
36#include <sys/param.h> 36#include <sys/param.h>
37#include <sys/proc.h> 37#include <sys/proc.h>
38#include <sys/lock.h> 38#include <sys/lock.h>
39#include <sys/systm.h> 39#include <sys/systm.h>
40#include <sys/kernel.h> 40#include <sys/kernel.h>
41#include <sys/lockdebug.h> 41#include <sys/lockdebug.h>
42#include <sys/cpu.h> 42#include <sys/cpu.h>
43#include <sys/syslog.h> 43#include <sys/syslog.h>
44#include <sys/atomic.h> 44#include <sys/atomic.h>
45#include <sys/lwp.h> 45#include <sys/lwp.h>
46 46
47#include <machine/lock.h> 47#include <machine/lock.h>
@@ -91,27 +91,27 @@ assert_sleepable(void) @@ -91,27 +91,27 @@ assert_sleepable(void)
91 91
92 if (reason) { 92 if (reason) {
93 panic("%s: %s caller=%p", __func__, reason, 93 panic("%s: %s caller=%p", __func__, reason,
94 (void *)RETURN_ADDRESS); 94 (void *)RETURN_ADDRESS);
95 } 95 }
96} 96}
97 97
98/* 98/*
99 * Functions for manipulating the kernel_lock. We put them here 99 * Functions for manipulating the kernel_lock. We put them here
100 * so that they show up in profiles. 100 * so that they show up in profiles.
101 */ 101 */
102 102
103#define _KERNEL_LOCK_ABORT(msg) \ 103#define _KERNEL_LOCK_ABORT(msg) \
104 LOCKDEBUG_ABORT(kernel_lock, &_kernel_lock_ops, __func__, msg) 104 LOCKDEBUG_ABORT(__func__, __LINE__, kernel_lock, &_kernel_lock_ops, msg)
105 105
106#ifdef LOCKDEBUG 106#ifdef LOCKDEBUG
107#define _KERNEL_LOCK_ASSERT(cond) \ 107#define _KERNEL_LOCK_ASSERT(cond) \
108do { \ 108do { \
109 if (!(cond)) \ 109 if (!(cond)) \
110 _KERNEL_LOCK_ABORT("assertion failed: " #cond); \ 110 _KERNEL_LOCK_ABORT("assertion failed: " #cond); \
111} while (/* CONSTCOND */ 0) 111} while (/* CONSTCOND */ 0)
112#else 112#else
113#define _KERNEL_LOCK_ASSERT(cond) /* nothing */ 113#define _KERNEL_LOCK_ASSERT(cond) /* nothing */
114#endif 114#endif
115 115
116void _kernel_lock_dump(volatile void *); 116void _kernel_lock_dump(volatile void *);
117 117

cvs diff -r1.63 -r1.64 src/sys/kern/kern_mutex.c (expand / switch to unified diff)

--- src/sys/kern/kern_mutex.c 2016/07/07 06:55:43 1.63
+++ src/sys/kern/kern_mutex.c 2017/01/26 04:11:56 1.64
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_mutex.c,v 1.63 2016/07/07 06:55:43 msaitoh Exp $ */ 1/* $NetBSD: kern_mutex.c,v 1.64 2017/01/26 04:11:56 christos Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Andrew Doran. 8 * by Jason R. Thorpe and Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -30,27 +30,27 @@ @@ -30,27 +30,27 @@
30 */ 30 */
31 31
32/* 32/*
33 * Kernel mutex implementation, modeled after those found in Solaris, 33 * Kernel mutex implementation, modeled after those found in Solaris,
34 * a description of which can be found in: 34 * a description of which can be found in:
35 * 35 *
36 * Solaris Internals: Core Kernel Architecture, Jim Mauro and 36 * Solaris Internals: Core Kernel Architecture, Jim Mauro and
37 * Richard McDougall. 37 * Richard McDougall.
38 */ 38 */
39 39
40#define __MUTEX_PRIVATE 40#define __MUTEX_PRIVATE
41 41
42#include <sys/cdefs.h> 42#include <sys/cdefs.h>
43__KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.63 2016/07/07 06:55:43 msaitoh Exp $"); 43__KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.64 2017/01/26 04:11:56 christos Exp $");
44 44
45#include <sys/param.h> 45#include <sys/param.h>
46#include <sys/atomic.h> 46#include <sys/atomic.h>
47#include <sys/proc.h> 47#include <sys/proc.h>
48#include <sys/mutex.h> 48#include <sys/mutex.h>
49#include <sys/sched.h> 49#include <sys/sched.h>
50#include <sys/sleepq.h> 50#include <sys/sleepq.h>
51#include <sys/systm.h> 51#include <sys/systm.h>
52#include <sys/lockdebug.h> 52#include <sys/lockdebug.h>
53#include <sys/kernel.h> 53#include <sys/kernel.h>
54#include <sys/intr.h> 54#include <sys/intr.h>
55#include <sys/lock.h> 55#include <sys/lock.h>
56#include <sys/types.h> 56#include <sys/types.h>
@@ -72,27 +72,27 @@ __KERNEL_RCSID(0, "$NetBSD: kern_mutex.c @@ -72,27 +72,27 @@ __KERNEL_RCSID(0, "$NetBSD: kern_mutex.c
72 * Debugging support. 72 * Debugging support.
73 */ 73 */
74 74
75#define MUTEX_WANTLOCK(mtx) \ 75#define MUTEX_WANTLOCK(mtx) \
76 LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx), \ 76 LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx), \
77 (uintptr_t)__builtin_return_address(0), 0) 77 (uintptr_t)__builtin_return_address(0), 0)
78#define MUTEX_LOCKED(mtx) \ 78#define MUTEX_LOCKED(mtx) \
79 LOCKDEBUG_LOCKED(MUTEX_DEBUG_P(mtx), (mtx), NULL, \ 79 LOCKDEBUG_LOCKED(MUTEX_DEBUG_P(mtx), (mtx), NULL, \
80 (uintptr_t)__builtin_return_address(0), 0) 80 (uintptr_t)__builtin_return_address(0), 0)
81#define MUTEX_UNLOCKED(mtx) \ 81#define MUTEX_UNLOCKED(mtx) \
82 LOCKDEBUG_UNLOCKED(MUTEX_DEBUG_P(mtx), (mtx), \ 82 LOCKDEBUG_UNLOCKED(MUTEX_DEBUG_P(mtx), (mtx), \
83 (uintptr_t)__builtin_return_address(0), 0) 83 (uintptr_t)__builtin_return_address(0), 0)
84#define MUTEX_ABORT(mtx, msg) \ 84#define MUTEX_ABORT(mtx, msg) \
85 mutex_abort(mtx, __func__, msg) 85 mutex_abort(__func__, __LINE__, mtx, msg)
86 86
87#if defined(LOCKDEBUG) 87#if defined(LOCKDEBUG)
88 88
89#define MUTEX_DASSERT(mtx, cond) \ 89#define MUTEX_DASSERT(mtx, cond) \
90do { \ 90do { \
91 if (!(cond)) \ 91 if (!(cond)) \
92 MUTEX_ABORT(mtx, "assertion failed: " #cond); \ 92 MUTEX_ABORT(mtx, "assertion failed: " #cond); \
93} while (/* CONSTCOND */ 0); 93} while (/* CONSTCOND */ 0);
94 94
95#else /* LOCKDEBUG */ 95#else /* LOCKDEBUG */
96 96
97#define MUTEX_DASSERT(mtx, cond) /* nothing */ 97#define MUTEX_DASSERT(mtx, cond) /* nothing */
98 98
@@ -251,28 +251,28 @@ MUTEX_RELEASE(kmutex_t *mtx) @@ -251,28 +251,28 @@ MUTEX_RELEASE(kmutex_t *mtx)
251#undef __HAVE_SPIN_MUTEX_STUBS 251#undef __HAVE_SPIN_MUTEX_STUBS
252#endif 252#endif
253 253
254#ifndef __HAVE_MUTEX_STUBS 254#ifndef __HAVE_MUTEX_STUBS
255__strong_alias(mutex_enter,mutex_vector_enter); 255__strong_alias(mutex_enter,mutex_vector_enter);
256__strong_alias(mutex_exit,mutex_vector_exit); 256__strong_alias(mutex_exit,mutex_vector_exit);
257#endif 257#endif
258 258
259#ifndef __HAVE_SPIN_MUTEX_STUBS 259#ifndef __HAVE_SPIN_MUTEX_STUBS
260__strong_alias(mutex_spin_enter,mutex_vector_enter); 260__strong_alias(mutex_spin_enter,mutex_vector_enter);
261__strong_alias(mutex_spin_exit,mutex_vector_exit); 261__strong_alias(mutex_spin_exit,mutex_vector_exit);
262#endif 262#endif
263 263
264static void mutex_abort(kmutex_t *, const char *, const char *); 264static void mutex_abort(const char *, size_t, kmutex_t *, const char *);
265static void mutex_dump(volatile void *); 265static void mutex_dump(volatile void *);
266 266
267lockops_t mutex_spin_lockops = { 267lockops_t mutex_spin_lockops = {
268 "Mutex", 268 "Mutex",
269 LOCKOPS_SPIN, 269 LOCKOPS_SPIN,
270 mutex_dump 270 mutex_dump
271}; 271};
272 272
273lockops_t mutex_adaptive_lockops = { 273lockops_t mutex_adaptive_lockops = {
274 "Mutex", 274 "Mutex",
275 LOCKOPS_SLEEP, 275 LOCKOPS_SLEEP,
276 mutex_dump 276 mutex_dump
277}; 277};
278 278
@@ -297,31 +297,31 @@ mutex_dump(volatile void *cookie) @@ -297,31 +297,31 @@ mutex_dump(volatile void *cookie)
297 printf_nolog("owner field : %#018lx wait/spin: %16d/%d\n", 297 printf_nolog("owner field : %#018lx wait/spin: %16d/%d\n",
298 (long)MUTEX_OWNER(mtx->mtx_owner), MUTEX_HAS_WAITERS(mtx), 298 (long)MUTEX_OWNER(mtx->mtx_owner), MUTEX_HAS_WAITERS(mtx),
299 MUTEX_SPIN_P(mtx)); 299 MUTEX_SPIN_P(mtx));
300} 300}
301 301
302/* 302/*
303 * mutex_abort: 303 * mutex_abort:
304 * 304 *
305 * Dump information about an error and panic the system. This 305 * Dump information about an error and panic the system. This
306 * generates a lot of machine code in the DIAGNOSTIC case, so 306 * generates a lot of machine code in the DIAGNOSTIC case, so
307 * we ask the compiler to not inline it. 307 * we ask the compiler to not inline it.
308 */ 308 */
309void __noinline 309void __noinline
310mutex_abort(kmutex_t *mtx, const char *func, const char *msg) 310mutex_abort(const char *func, size_t line, kmutex_t *mtx, const char *msg)
311{ 311{
312 312
313 LOCKDEBUG_ABORT(mtx, (MUTEX_SPIN_P(mtx) ? 313 LOCKDEBUG_ABORT(func, line, mtx, (MUTEX_SPIN_P(mtx) ?
314 &mutex_spin_lockops : &mutex_adaptive_lockops), func, msg); 314 &mutex_spin_lockops : &mutex_adaptive_lockops), msg);
315} 315}
316 316
317/* 317/*
318 * mutex_init: 318 * mutex_init:
319 * 319 *
320 * Initialize a mutex for use. Note that adaptive mutexes are in 320 * Initialize a mutex for use. Note that adaptive mutexes are in
321 * essence spin mutexes that can sleep to avoid deadlock and wasting 321 * essence spin mutexes that can sleep to avoid deadlock and wasting
322 * CPU time. We can't easily provide a type of mutex that always 322 * CPU time. We can't easily provide a type of mutex that always
323 * sleeps - see comments in mutex_vector_enter() about releasing 323 * sleeps - see comments in mutex_vector_enter() about releasing
324 * mutexes unlocked. 324 * mutexes unlocked.
325 */ 325 */
326void 326void
327mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl) 327mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)

cvs diff -r1.45 -r1.46 src/sys/kern/kern_rwlock.c (expand / switch to unified diff)

--- src/sys/kern/kern_rwlock.c 2014/11/28 08:28:17 1.45
+++ src/sys/kern/kern_rwlock.c 2017/01/26 04:11:56 1.46
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_rwlock.c,v 1.45 2014/11/28 08:28:17 uebayasi Exp $ */ 1/* $NetBSD: kern_rwlock.c,v 1.46 2017/01/26 04:11:56 christos Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2002, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc. 4 * Copyright (c) 2002, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Andrew Doran. 8 * by Jason R. Thorpe and Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -28,27 +28,27 @@ @@ -28,27 +28,27 @@
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Kernel reader/writer lock implementation, modeled after those 33 * Kernel reader/writer lock implementation, modeled after those
34 * found in Solaris, a description of which can be found in: 34 * found in Solaris, a description of which can be found in:
35 * 35 *
36 * Solaris Internals: Core Kernel Architecture, Jim Mauro and 36 * Solaris Internals: Core Kernel Architecture, Jim Mauro and
37 * Richard McDougall. 37 * Richard McDougall.
38 */ 38 */
39 39
40#include <sys/cdefs.h> 40#include <sys/cdefs.h>
41__KERNEL_RCSID(0, "$NetBSD: kern_rwlock.c,v 1.45 2014/11/28 08:28:17 uebayasi Exp $"); 41__KERNEL_RCSID(0, "$NetBSD: kern_rwlock.c,v 1.46 2017/01/26 04:11:56 christos Exp $");
42 42
43#define __RWLOCK_PRIVATE 43#define __RWLOCK_PRIVATE
44 44
45#include <sys/param.h> 45#include <sys/param.h>
46#include <sys/proc.h> 46#include <sys/proc.h>
47#include <sys/rwlock.h> 47#include <sys/rwlock.h>
48#include <sys/sched.h> 48#include <sys/sched.h>
49#include <sys/sleepq.h> 49#include <sys/sleepq.h>
50#include <sys/systm.h> 50#include <sys/systm.h>
51#include <sys/lockdebug.h> 51#include <sys/lockdebug.h>
52#include <sys/cpu.h> 52#include <sys/cpu.h>
53#include <sys/atomic.h> 53#include <sys/atomic.h>
54#include <sys/lock.h> 54#include <sys/lock.h>
@@ -63,65 +63,65 @@ __KERNEL_RCSID(0, "$NetBSD: kern_rwlock. @@ -63,65 +63,65 @@ __KERNEL_RCSID(0, "$NetBSD: kern_rwlock.
63 63
64#define RW_WANTLOCK(rw, op) \ 64#define RW_WANTLOCK(rw, op) \
65 LOCKDEBUG_WANTLOCK(RW_DEBUG_P(rw), (rw), \ 65 LOCKDEBUG_WANTLOCK(RW_DEBUG_P(rw), (rw), \
66 (uintptr_t)__builtin_return_address(0), op == RW_READER); 66 (uintptr_t)__builtin_return_address(0), op == RW_READER);
67#define RW_LOCKED(rw, op) \ 67#define RW_LOCKED(rw, op) \
68 LOCKDEBUG_LOCKED(RW_DEBUG_P(rw), (rw), NULL, \ 68 LOCKDEBUG_LOCKED(RW_DEBUG_P(rw), (rw), NULL, \
69 (uintptr_t)__builtin_return_address(0), op == RW_READER); 69 (uintptr_t)__builtin_return_address(0), op == RW_READER);
70#define RW_UNLOCKED(rw, op) \ 70#define RW_UNLOCKED(rw, op) \
71 LOCKDEBUG_UNLOCKED(RW_DEBUG_P(rw), (rw), \ 71 LOCKDEBUG_UNLOCKED(RW_DEBUG_P(rw), (rw), \
72 (uintptr_t)__builtin_return_address(0), op == RW_READER); 72 (uintptr_t)__builtin_return_address(0), op == RW_READER);
73#define RW_DASSERT(rw, cond) \ 73#define RW_DASSERT(rw, cond) \
74do { \ 74do { \
75 if (!(cond)) \ 75 if (!(cond)) \
76 rw_abort(rw, __func__, "assertion failed: " #cond); \ 76 rw_abort(__func__, __LINE__, rw, "assertion failed: " #cond);\
77} while (/* CONSTCOND */ 0); 77} while (/* CONSTCOND */ 0);
78 78
79#else /* LOCKDEBUG */ 79#else /* LOCKDEBUG */
80 80
81#define RW_WANTLOCK(rw, op) /* nothing */ 81#define RW_WANTLOCK(rw, op) /* nothing */
82#define RW_LOCKED(rw, op) /* nothing */ 82#define RW_LOCKED(rw, op) /* nothing */
83#define RW_UNLOCKED(rw, op) /* nothing */ 83#define RW_UNLOCKED(rw, op) /* nothing */
84#define RW_DASSERT(rw, cond) /* nothing */ 84#define RW_DASSERT(rw, cond) /* nothing */
85 85
86#endif /* LOCKDEBUG */ 86#endif /* LOCKDEBUG */
87 87
88/* 88/*
89 * DIAGNOSTIC 89 * DIAGNOSTIC
90 */ 90 */
91 91
92#if defined(DIAGNOSTIC) 92#if defined(DIAGNOSTIC)
93 93
94#define RW_ASSERT(rw, cond) \ 94#define RW_ASSERT(rw, cond) \
95do { \ 95do { \
96 if (!(cond)) \ 96 if (!(cond)) \
97 rw_abort(rw, __func__, "assertion failed: " #cond); \ 97 rw_abort(__func__, __LINE__, rw, "assertion failed: " #cond);\
98} while (/* CONSTCOND */ 0) 98} while (/* CONSTCOND */ 0)
99 99
100#else 100#else
101 101
102#define RW_ASSERT(rw, cond) /* nothing */ 102#define RW_ASSERT(rw, cond) /* nothing */
103 103
104#endif /* DIAGNOSTIC */ 104#endif /* DIAGNOSTIC */
105 105
106#define RW_SETDEBUG(rw, on) ((rw)->rw_owner |= (on) ? 0 : RW_NODEBUG) 106#define RW_SETDEBUG(rw, on) ((rw)->rw_owner |= (on) ? 0 : RW_NODEBUG)
107#define RW_DEBUG_P(rw) (((rw)->rw_owner & RW_NODEBUG) == 0) 107#define RW_DEBUG_P(rw) (((rw)->rw_owner & RW_NODEBUG) == 0)
108#if defined(LOCKDEBUG) 108#if defined(LOCKDEBUG)
109#define RW_INHERITDEBUG(n, o) (n) |= (o) & RW_NODEBUG 109#define RW_INHERITDEBUG(n, o) (n) |= (o) & RW_NODEBUG
110#else /* defined(LOCKDEBUG) */ 110#else /* defined(LOCKDEBUG) */
111#define RW_INHERITDEBUG(n, o) /* nothing */ 111#define RW_INHERITDEBUG(n, o) /* nothing */
112#endif /* defined(LOCKDEBUG) */ 112#endif /* defined(LOCKDEBUG) */
113 113
114static void rw_abort(krwlock_t *, const char *, const char *); 114static void rw_abort(const char *, size_t, krwlock_t *, const char *);
115static void rw_dump(volatile void *); 115static void rw_dump(volatile void *);
116static lwp_t *rw_owner(wchan_t); 116static lwp_t *rw_owner(wchan_t);
117 117
118static inline uintptr_t 118static inline uintptr_t
119rw_cas(krwlock_t *rw, uintptr_t o, uintptr_t n) 119rw_cas(krwlock_t *rw, uintptr_t o, uintptr_t n)
120{ 120{
121 121
122 RW_INHERITDEBUG(n, o); 122 RW_INHERITDEBUG(n, o);
123 return (uintptr_t)atomic_cas_ptr((volatile void *)&rw->rw_owner, 123 return (uintptr_t)atomic_cas_ptr((volatile void *)&rw->rw_owner,
124 (void *)o, (void *)n); 124 (void *)o, (void *)n);
125} 125}
126 126
127static inline void 127static inline void
@@ -173,33 +173,33 @@ rw_dump(volatile void *cookie) @@ -173,33 +173,33 @@ rw_dump(volatile void *cookie)
173 173
174 printf_nolog("owner/count : %#018lx flags : %#018x\n", 174 printf_nolog("owner/count : %#018lx flags : %#018x\n",
175 (long)RW_OWNER(rw), (int)RW_FLAGS(rw)); 175 (long)RW_OWNER(rw), (int)RW_FLAGS(rw));
176} 176}
177 177
178/* 178/*
179 * rw_abort: 179 * rw_abort:
180 * 180 *
181 * Dump information about an error and panic the system. This 181 * Dump information about an error and panic the system. This
182 * generates a lot of machine code in the DIAGNOSTIC case, so 182 * generates a lot of machine code in the DIAGNOSTIC case, so
183 * we ask the compiler to not inline it. 183 * we ask the compiler to not inline it.
184 */ 184 */
185static void __noinline 185static void __noinline
186rw_abort(krwlock_t *rw, const char *func, const char *msg) 186rw_abort(const char *func, size_t line, krwlock_t *rw, const char *msg)
187{ 187{
188 188
189 if (panicstr != NULL) 189 if (panicstr != NULL)
190 return; 190 return;
191 191
192 LOCKDEBUG_ABORT(rw, &rwlock_lockops, func, msg); 192 LOCKDEBUG_ABORT(func, line, rw, &rwlock_lockops, msg);
193} 193}
194 194
195/* 195/*
196 * rw_init: 196 * rw_init:
197 * 197 *
198 * Initialize a rwlock for use. 198 * Initialize a rwlock for use.
199 */ 199 */
200void 200void
201rw_init(krwlock_t *rw) 201rw_init(krwlock_t *rw)
202{ 202{
203 bool dodebug; 203 bool dodebug;
204 204
205 memset(rw, 0, sizeof(*rw)); 205 memset(rw, 0, sizeof(*rw));
@@ -328,27 +328,28 @@ rw_vector_enter(krwlock_t *rw, const krw @@ -328,27 +328,28 @@ rw_vector_enter(krwlock_t *rw, const krw
328 328
329 /* 329 /*
330 * Didn't get it -- spin around again (we'll 330 * Didn't get it -- spin around again (we'll
331 * probably sleep on the next iteration). 331 * probably sleep on the next iteration).
332 */ 332 */
333 owner = next; 333 owner = next;
334 continue; 334 continue;
335 } 335 }
336 if (__predict_false(panicstr != NULL)) { 336 if (__predict_false(panicstr != NULL)) {
337 KPREEMPT_ENABLE(curlwp); 337 KPREEMPT_ENABLE(curlwp);
338 return; 338 return;
339 } 339 }
340 if (__predict_false(RW_OWNER(rw) == curthread)) { 340 if (__predict_false(RW_OWNER(rw) == curthread)) {
341 rw_abort(rw, __func__, "locking against myself"); 341 rw_abort(__func__, __LINE__, rw,
 342 "locking against myself");
342 } 343 }
343 /* 344 /*
344 * If the lock owner is running on another CPU, and 345 * If the lock owner is running on another CPU, and
345 * there are no existing waiters, then spin. 346 * there are no existing waiters, then spin.
346 */ 347 */
347 if (rw_oncpu(owner)) { 348 if (rw_oncpu(owner)) {
348 LOCKSTAT_START_TIMER(lsflag, spintime); 349 LOCKSTAT_START_TIMER(lsflag, spintime);
349 u_int count = SPINLOCK_BACKOFF_MIN; 350 u_int count = SPINLOCK_BACKOFF_MIN;
350 do { 351 do {
351 KPREEMPT_ENABLE(curlwp); 352 KPREEMPT_ENABLE(curlwp);
352 SPINLOCK_BACKOFF(count); 353 SPINLOCK_BACKOFF(count);
353 KPREEMPT_DISABLE(curlwp); 354 KPREEMPT_DISABLE(curlwp);
354 owner = rw->rw_owner; 355 owner = rw->rw_owner;

cvs diff -r1.54 -r1.55 src/sys/kern/subr_lockdebug.c (expand / switch to unified diff)

--- src/sys/kern/subr_lockdebug.c 2015/09/29 01:44:57 1.54
+++ src/sys/kern/subr_lockdebug.c 2017/01/26 04:11:56 1.55
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: subr_lockdebug.c,v 1.54 2015/09/29 01:44:57 ozaki-r Exp $ */ 1/* $NetBSD: subr_lockdebug.c,v 1.55 2017/01/26 04:11:56 christos Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -24,27 +24,27 @@ @@ -24,27 +24,27 @@
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Basic lock debugging code shared among lock primitives. 33 * Basic lock debugging code shared among lock primitives.
34 */ 34 */
35 35
36#include <sys/cdefs.h> 36#include <sys/cdefs.h>
37__KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.54 2015/09/29 01:44:57 ozaki-r Exp $"); 37__KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.55 2017/01/26 04:11:56 christos Exp $");
38 38
39#ifdef _KERNEL_OPT 39#ifdef _KERNEL_OPT
40#include "opt_ddb.h" 40#include "opt_ddb.h"
41#endif 41#endif
42 42
43#include <sys/param.h> 43#include <sys/param.h>
44#include <sys/proc.h> 44#include <sys/proc.h>
45#include <sys/systm.h> 45#include <sys/systm.h>
46#include <sys/kernel.h> 46#include <sys/kernel.h>
47#include <sys/kmem.h> 47#include <sys/kmem.h>
48#include <sys/lockdebug.h> 48#include <sys/lockdebug.h>
49#include <sys/sleepq.h> 49#include <sys/sleepq.h>
50#include <sys/cpu.h> 50#include <sys/cpu.h>
@@ -89,28 +89,28 @@ typedef struct lockdebug { @@ -89,28 +89,28 @@ typedef struct lockdebug {
89} volatile lockdebug_t; 89} volatile lockdebug_t;
90 90
91typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t; 91typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
92 92
93__cpu_simple_lock_t ld_mod_lk; 93__cpu_simple_lock_t ld_mod_lk;
94lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free); 94lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
95lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all); 95lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
96int ld_nfree; 96int ld_nfree;
97int ld_freeptr; 97int ld_freeptr;
98int ld_recurse; 98int ld_recurse;
99bool ld_nomore; 99bool ld_nomore;
100lockdebug_t ld_prime[LD_BATCH]; 100lockdebug_t ld_prime[LD_BATCH];
101 101
102static void lockdebug_abort1(lockdebug_t *, int, const char *, 102static void lockdebug_abort1(const char *, size_t, lockdebug_t *, int,
103 const char *, bool); 103 const char *, bool);
104static int lockdebug_more(int); 104static int lockdebug_more(int);
105static void lockdebug_init(void); 105static void lockdebug_init(void);
106static void lockdebug_dump(lockdebug_t *, void (*)(const char *, ...) 106static void lockdebug_dump(lockdebug_t *, void (*)(const char *, ...)
107 __printflike(1, 2)); 107 __printflike(1, 2));
108 108
109static signed int 109static signed int
110ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2) 110ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2)
111{ 111{
112 const lockdebug_t *ld1 = n1; 112 const lockdebug_t *ld1 = n1;
113 const lockdebug_t *ld2 = n2; 113 const lockdebug_t *ld2 = n2;
114 const uintptr_t a = (uintptr_t)ld1->ld_lock; 114 const uintptr_t a = (uintptr_t)ld1->ld_lock;
115 const uintptr_t b = (uintptr_t)ld2->ld_lock; 115 const uintptr_t b = (uintptr_t)ld2->ld_lock;
116 116
@@ -180,34 +180,35 @@ lockdebug_unlock_cpus(void) @@ -180,34 +180,35 @@ lockdebug_unlock_cpus(void)
180 struct cpu_info *ci; 180 struct cpu_info *ci;
181 181
182 for (CPU_INFO_FOREACH(cii, ci)) { 182 for (CPU_INFO_FOREACH(cii, ci)) {
183 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock); 183 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
184 } 184 }
185} 185}
186 186
187/* 187/*
188 * lockdebug_lookup: 188 * lockdebug_lookup:
189 * 189 *
190 * Find a lockdebug structure by a pointer to a lock and return it locked. 190 * Find a lockdebug structure by a pointer to a lock and return it locked.
191 */ 191 */
192static inline lockdebug_t * 192static inline lockdebug_t *
193lockdebug_lookup(volatile void *lock, uintptr_t where) 193lockdebug_lookup(const char *func, size_t line, volatile void *lock,
 194 uintptr_t where)
194{ 195{
195 lockdebug_t *ld; 196 lockdebug_t *ld;
196 197
197 ld = lockdebug_lookup1(lock); 198 ld = lockdebug_lookup1(lock);
198 if (ld == NULL) { 199 if (ld == NULL) {
199 panic("lockdebug_lookup: uninitialized lock " 200 panic("%s,%zu: uninitialized lock (lock=%p, from=%08"
200 "(lock=%p, from=%08"PRIxPTR")", lock, where); 201 PRIxPTR ")", func, line, lock, where);
201 } 202 }
202 return ld; 203 return ld;
203} 204}
204 205
205/* 206/*
206 * lockdebug_init: 207 * lockdebug_init:
207 * 208 *
208 * Initialize the lockdebug system. Allocate an initial pool of 209 * Initialize the lockdebug system. Allocate an initial pool of
209 * lockdebug structures before the VM system is up and running. 210 * lockdebug structures before the VM system is up and running.
210 */ 211 */
211static void 212static void
212lockdebug_init(void) 213lockdebug_init(void)
213{ 214{
@@ -228,42 +229,44 @@ lockdebug_init(void) @@ -228,42 +229,44 @@ lockdebug_init(void)
228 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain); 229 TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
229 } 230 }
230 ld_freeptr = 1; 231 ld_freeptr = 1;
231 ld_nfree = LD_BATCH - 1; 232 ld_nfree = LD_BATCH - 1;
232} 233}
233 234
234/* 235/*
235 * lockdebug_alloc: 236 * lockdebug_alloc:
236 * 237 *
237 * A lock is being initialized, so allocate an associated debug 238 * A lock is being initialized, so allocate an associated debug
238 * structure. 239 * structure.
239 */ 240 */
240bool 241bool
241lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr) 242lockdebug_alloc(const char *func, size_t line, volatile void *lock,
 243 lockops_t *lo, uintptr_t initaddr)
242{ 244{
243 struct cpu_info *ci; 245 struct cpu_info *ci;
244 lockdebug_t *ld; 246 lockdebug_t *ld;
245 int s; 247 int s;
246 248
247 if (lo == NULL || panicstr != NULL || ld_panic) 249 if (lo == NULL || panicstr != NULL || ld_panic)
248 return false; 250 return false;
249 if (ld_freeptr == 0) 251 if (ld_freeptr == 0)
250 lockdebug_init(); 252 lockdebug_init();
251 253
252 s = splhigh(); 254 s = splhigh();
253 __cpu_simple_lock(&ld_mod_lk); 255 __cpu_simple_lock(&ld_mod_lk);
254 if ((ld = lockdebug_lookup1(lock)) != NULL) { 256 if ((ld = lockdebug_lookup1(lock)) != NULL) {
255 __cpu_simple_unlock(&ld_mod_lk); 257 __cpu_simple_unlock(&ld_mod_lk);
256 lockdebug_abort1(ld, s, __func__, "already initialized", true); 258 lockdebug_abort1(func, line, ld, s, "already initialized",
 259 true);
257 return false; 260 return false;
258 } 261 }
259 262
260 /* 263 /*
261 * Pinch a new debug structure. We may recurse because we call 264 * Pinch a new debug structure. We may recurse because we call
262 * kmem_alloc(), which may need to initialize new locks somewhere 265 * kmem_alloc(), which may need to initialize new locks somewhere
263 * down the path. If not recursing, we try to maintain at least 266 * down the path. If not recursing, we try to maintain at least
264 * LD_SLOP structures free, which should hopefully be enough to 267 * LD_SLOP structures free, which should hopefully be enough to
265 * satisfy kmem_alloc(). If we can't provide a structure, not to 268 * satisfy kmem_alloc(). If we can't provide a structure, not to
266 * worry: we'll just mark the lock as not having an ID. 269 * worry: we'll just mark the lock as not having an ID.
267 */ 270 */
268 ci = curcpu(); 271 ci = curcpu();
269 ci->ci_lkdebug_recurse++; 272 ci->ci_lkdebug_recurse++;
@@ -278,72 +281,74 @@ lockdebug_alloc(volatile void *lock, loc @@ -278,72 +281,74 @@ lockdebug_alloc(volatile void *lock, loc
278 } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) { 281 } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) {
279 s = lockdebug_more(s); 282 s = lockdebug_more(s);
280 } 283 }
281 if ((ld = TAILQ_FIRST(&ld_free)) == NULL) { 284 if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
282 __cpu_simple_unlock(&ld_mod_lk); 285 __cpu_simple_unlock(&ld_mod_lk);
283 splx(s); 286 splx(s);
284 return false; 287 return false;
285 } 288 }
286 TAILQ_REMOVE(&ld_free, ld, ld_chain); 289 TAILQ_REMOVE(&ld_free, ld, ld_chain);
287 ld_nfree--; 290 ld_nfree--;
288 ci->ci_lkdebug_recurse--; 291 ci->ci_lkdebug_recurse--;
289 292
290 if (ld->ld_lock != NULL) { 293 if (ld->ld_lock != NULL) {
291 panic("lockdebug_alloc: corrupt table ld %p", ld); 294 panic("%s,%zu: corrupt table ld %p", func, line, ld);
292 } 295 }
293 296
294 /* Initialise the structure. */ 297 /* Initialise the structure. */
295 ld->ld_lock = lock; 298 ld->ld_lock = lock;
296 ld->ld_lockops = lo; 299 ld->ld_lockops = lo;
297 ld->ld_locked = 0; 300 ld->ld_locked = 0;
298 ld->ld_unlocked = 0; 301 ld->ld_unlocked = 0;
299 ld->ld_lwp = NULL; 302 ld->ld_lwp = NULL;
300 ld->ld_initaddr = initaddr; 303 ld->ld_initaddr = initaddr;
301 ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0); 304 ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0);
302 lockdebug_lock_cpus(); 305 lockdebug_lock_cpus();
303 (void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld)); 306 (void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld));
304 lockdebug_unlock_cpus(); 307 lockdebug_unlock_cpus();
305 __cpu_simple_unlock(&ld_mod_lk); 308 __cpu_simple_unlock(&ld_mod_lk);
306 309
307 splx(s); 310 splx(s);
308 return true; 311 return true;
309} 312}
310 313
311/* 314/*
312 * lockdebug_free: 315 * lockdebug_free:
313 * 316 *
314 * A lock is being destroyed, so release debugging resources. 317 * A lock is being destroyed, so release debugging resources.
315 */ 318 */
316void 319void
317lockdebug_free(volatile void *lock) 320lockdebug_free(const char *func, size_t line, volatile void *lock)
318{ 321{
319 lockdebug_t *ld; 322 lockdebug_t *ld;
320 int s; 323 int s;
321 324
322 if (panicstr != NULL || ld_panic) 325 if (panicstr != NULL || ld_panic)
323 return; 326 return;
324 327
325 s = splhigh(); 328 s = splhigh();
326 __cpu_simple_lock(&ld_mod_lk); 329 __cpu_simple_lock(&ld_mod_lk);
327 ld = lockdebug_lookup(lock, (uintptr_t) __builtin_return_address(0)); 330 ld = lockdebug_lookup(func, line, lock,
 331 (uintptr_t) __builtin_return_address(0));
328 if (ld == NULL) { 332 if (ld == NULL) {
329 __cpu_simple_unlock(&ld_mod_lk); 333 __cpu_simple_unlock(&ld_mod_lk);
330 panic("lockdebug_free: destroying uninitialized object %p" 334 panic("%s,%zu: destroying uninitialized object %p"
331 "(ld_lock=%p)", lock, ld->ld_lock); 335 "(ld_lock=%p)", func, line, lock, ld->ld_lock);
332 return; 336 return;
333 } 337 }
334 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) { 338 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
335 __cpu_simple_unlock(&ld_mod_lk); 339 __cpu_simple_unlock(&ld_mod_lk);
336 lockdebug_abort1(ld, s, __func__, "is locked or in use", true); 340 lockdebug_abort1(func, line, ld, s, "is locked or in use",
 341 true);
337 return; 342 return;
338 } 343 }
339 lockdebug_lock_cpus(); 344 lockdebug_lock_cpus();
340 rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld)); 345 rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld));
341 lockdebug_unlock_cpus(); 346 lockdebug_unlock_cpus();
342 ld->ld_lock = NULL; 347 ld->ld_lock = NULL;
343 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); 348 TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
344 ld_nfree++; 349 ld_nfree++;
345 __cpu_simple_unlock(&ld->ld_spinlock); 350 __cpu_simple_unlock(&ld->ld_spinlock);
346 __cpu_simple_unlock(&ld_mod_lk); 351 __cpu_simple_unlock(&ld_mod_lk);
347 splx(s); 352 splx(s);
348} 353}
349 354
@@ -405,321 +410,327 @@ lockdebug_more(int s) @@ -405,321 +410,327 @@ lockdebug_more(int s)
405 410
406 membar_producer(); 411 membar_producer();
407 } 412 }
408 413
409 return s; 414 return s;
410} 415}
411 416
412/* 417/*
413 * lockdebug_wantlock: 418 * lockdebug_wantlock:
414 * 419 *
415 * Process the preamble to a lock acquire. 420 * Process the preamble to a lock acquire.
416 */ 421 */
417void 422void
418lockdebug_wantlock(volatile void *lock, uintptr_t where, int shared) 423lockdebug_wantlock(const char *func, size_t line,
 424 volatile void *lock, uintptr_t where, int shared)
419{ 425{
420 struct lwp *l = curlwp; 426 struct lwp *l = curlwp;
421 lockdebug_t *ld; 427 lockdebug_t *ld;
422 bool recurse; 428 bool recurse;
423 int s; 429 int s;
424 430
425 (void)shared; 431 (void)shared;
426 recurse = false; 432 recurse = false;
427 433
428 if (panicstr != NULL || ld_panic) 434 if (panicstr != NULL || ld_panic)
429 return; 435 return;
430 436
431 s = splhigh(); 437 s = splhigh();
432 if ((ld = lockdebug_lookup(lock, where)) == NULL) { 438 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
433 splx(s); 439 splx(s);
434 return; 440 return;
435 } 441 }
436 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) { 442 if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
437 if ((ld->ld_flags & LD_SLEEPER) != 0) { 443 if ((ld->ld_flags & LD_SLEEPER) != 0) {
438 if (ld->ld_lwp == l) 444 if (ld->ld_lwp == l)
439 recurse = true; 445 recurse = true;
440 } else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu())) 446 } else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
441 recurse = true; 447 recurse = true;
442 } 448 }
443 if (cpu_intr_p()) { 449 if (cpu_intr_p()) {
444 if ((ld->ld_flags & LD_SLEEPER) != 0) { 450 if ((ld->ld_flags & LD_SLEEPER) != 0) {
445 lockdebug_abort1(ld, s, __func__, 451 lockdebug_abort1(func, line, ld, s,
446 "acquiring sleep lock from interrupt context", 452 "acquiring sleep lock from interrupt context",
447 true); 453 true);
448 return; 454 return;
449 } 455 }
450 } 456 }
451 if (shared) 457 if (shared)
452 ld->ld_shwant++; 458 ld->ld_shwant++;
453 else 459 else
454 ld->ld_exwant++; 460 ld->ld_exwant++;
455 if (recurse) { 461 if (recurse) {
456 lockdebug_abort1(ld, s, __func__, "locking against myself", 462 lockdebug_abort1(func, line, ld, s, "locking against myself",
457 true); 463 true);
458 return; 464 return;
459 } 465 }
460 __cpu_simple_unlock(&ld->ld_spinlock); 466 __cpu_simple_unlock(&ld->ld_spinlock);
461 splx(s); 467 splx(s);
462} 468}
463 469
464/* 470/*
465 * lockdebug_locked: 471 * lockdebug_locked:
466 * 472 *
467 * Process a lock acquire operation. 473 * Process a lock acquire operation.
468 */ 474 */
469void 475void
470lockdebug_locked(volatile void *lock, void *cvlock, uintptr_t where, 476lockdebug_locked(const char *func, size_t line,
471 int shared) 477 volatile void *lock, void *cvlock, uintptr_t where, int shared)
472{ 478{
473 struct lwp *l = curlwp; 479 struct lwp *l = curlwp;
474 lockdebug_t *ld; 480 lockdebug_t *ld;
475 int s; 481 int s;
476 482
477 if (panicstr != NULL || ld_panic) 483 if (panicstr != NULL || ld_panic)
478 return; 484 return;
479 485
480 s = splhigh(); 486 s = splhigh();
481 if ((ld = lockdebug_lookup(lock, where)) == NULL) { 487 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
482 splx(s); 488 splx(s);
483 return; 489 return;
484 } 490 }
485 if (cvlock) { 491 if (cvlock) {
486 KASSERT(ld->ld_lockops->lo_type == LOCKOPS_CV); 492 KASSERT(ld->ld_lockops->lo_type == LOCKOPS_CV);
487 if (lock == (void *)&lbolt) { 493 if (lock == (void *)&lbolt) {
488 /* nothing */ 494 /* nothing */
489 } else if (ld->ld_shares++ == 0) { 495 } else if (ld->ld_shares++ == 0) {
490 ld->ld_locked = (uintptr_t)cvlock; 496 ld->ld_locked = (uintptr_t)cvlock;
491 } else if (cvlock != (void *)ld->ld_locked) { 497 } else if (cvlock != (void *)ld->ld_locked) {
492 lockdebug_abort1(ld, s, __func__, "multiple locks used" 498 lockdebug_abort1(func, line, ld, s,
493 " with condition variable", true); 499 "multiple locks used with condition variable",
 500 true);
494 return; 501 return;
495 } 502 }
496 } else if (shared) { 503 } else if (shared) {
497 l->l_shlocks++; 504 l->l_shlocks++;
498 ld->ld_locked = where; 505 ld->ld_locked = where;
499 ld->ld_shares++; 506 ld->ld_shares++;
500 ld->ld_shwant--; 507 ld->ld_shwant--;
501 } else { 508 } else {
502 if ((ld->ld_flags & LD_LOCKED) != 0) { 509 if ((ld->ld_flags & LD_LOCKED) != 0) {
503 lockdebug_abort1(ld, s, __func__, "already locked", 510 lockdebug_abort1(func, line, ld, s, "already locked",
504 true); 511 true);
505 return; 512 return;
506 } 513 }
507 ld->ld_flags |= LD_LOCKED; 514 ld->ld_flags |= LD_LOCKED;
508 ld->ld_locked = where; 515 ld->ld_locked = where;
509 ld->ld_exwant--; 516 ld->ld_exwant--;
510 if ((ld->ld_flags & LD_SLEEPER) != 0) { 517 if ((ld->ld_flags & LD_SLEEPER) != 0) {
511 TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain); 518 TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain);
512 } else { 519 } else {
513 TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks, 520 TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks,
514 ld, ld_chain); 521 ld, ld_chain);
515 } 522 }
516 } 523 }
517 ld->ld_cpu = (uint16_t)cpu_index(curcpu()); 524 ld->ld_cpu = (uint16_t)cpu_index(curcpu());
518 ld->ld_lwp = l; 525 ld->ld_lwp = l;
519 __cpu_simple_unlock(&ld->ld_spinlock); 526 __cpu_simple_unlock(&ld->ld_spinlock);
520 splx(s); 527 splx(s);
521} 528}
522 529
523/* 530/*
524 * lockdebug_unlocked: 531 * lockdebug_unlocked:
525 * 532 *
526 * Process a lock release operation. 533 * Process a lock release operation.
527 */ 534 */
528void 535void
529lockdebug_unlocked(volatile void *lock, uintptr_t where, int shared) 536lockdebug_unlocked(const char *func, size_t line,
 537 volatile void *lock, uintptr_t where, int shared)
530{ 538{
531 struct lwp *l = curlwp; 539 struct lwp *l = curlwp;
532 lockdebug_t *ld; 540 lockdebug_t *ld;
533 int s; 541 int s;
534 542
535 if (panicstr != NULL || ld_panic) 543 if (panicstr != NULL || ld_panic)
536 return; 544 return;
537 545
538 s = splhigh(); 546 s = splhigh();
539 if ((ld = lockdebug_lookup(lock, where)) == NULL) { 547 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
540 splx(s); 548 splx(s);
541 return; 549 return;
542 } 550 }
543 if (ld->ld_lockops->lo_type == LOCKOPS_CV) { 551 if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
544 if (lock == (void *)&lbolt) { 552 if (lock == (void *)&lbolt) {
545 /* nothing */ 553 /* nothing */
546 } else { 554 } else {
547 ld->ld_shares--; 555 ld->ld_shares--;
548 } 556 }
549 } else if (shared) { 557 } else if (shared) {
550 if (l->l_shlocks == 0) { 558 if (l->l_shlocks == 0) {
551 lockdebug_abort1(ld, s, __func__, 559 lockdebug_abort1(func, line, ld, s,
552 "no shared locks held by LWP", true); 560 "no shared locks held by LWP", true);
553 return; 561 return;
554 } 562 }
555 if (ld->ld_shares == 0) { 563 if (ld->ld_shares == 0) {
556 lockdebug_abort1(ld, s, __func__, 564 lockdebug_abort1(func, line, ld, s,
557 "no shared holds on this lock", true); 565 "no shared holds on this lock", true);
558 return; 566 return;
559 } 567 }
560 l->l_shlocks--; 568 l->l_shlocks--;
561 ld->ld_shares--; 569 ld->ld_shares--;
562 if (ld->ld_lwp == l) { 570 if (ld->ld_lwp == l) {
563 ld->ld_unlocked = where; 571 ld->ld_unlocked = where;
564 ld->ld_lwp = NULL; 572 ld->ld_lwp = NULL;
565 } 573 }
566 if (ld->ld_cpu == (uint16_t)cpu_index(curcpu())) 574 if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
567 ld->ld_cpu = (uint16_t)-1; 575 ld->ld_cpu = (uint16_t)-1;
568 } else { 576 } else {
569 if ((ld->ld_flags & LD_LOCKED) == 0) { 577 if ((ld->ld_flags & LD_LOCKED) == 0) {
570 lockdebug_abort1(ld, s, __func__, "not locked", true); 578 lockdebug_abort1(func, line, ld, s, "not locked", true);
571 return; 579 return;
572 } 580 }
573 581
574 if ((ld->ld_flags & LD_SLEEPER) != 0) { 582 if ((ld->ld_flags & LD_SLEEPER) != 0) {
575 if (ld->ld_lwp != curlwp) { 583 if (ld->ld_lwp != curlwp) {
576 lockdebug_abort1(ld, s, __func__, 584 lockdebug_abort1(func, line, ld, s,
577 "not held by current LWP", true); 585 "not held by current LWP", true);
578 return; 586 return;
579 } 587 }
580 TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain); 588 TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain);
581 } else { 589 } else {
582 if (ld->ld_cpu != (uint16_t)cpu_index(curcpu())) { 590 if (ld->ld_cpu != (uint16_t)cpu_index(curcpu())) {
583 lockdebug_abort1(ld, s, __func__, 591 lockdebug_abort1(func, line, ld, s,
584 "not held by current CPU", true); 592 "not held by current CPU", true);
585 return; 593 return;
586 } 594 }
587 TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld, 595 TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld,
588 ld_chain); 596 ld_chain);
589 } 597 }
590 ld->ld_flags &= ~LD_LOCKED; 598 ld->ld_flags &= ~LD_LOCKED;
591 ld->ld_unlocked = where;  599 ld->ld_unlocked = where;
592 ld->ld_lwp = NULL; 600 ld->ld_lwp = NULL;
593 } 601 }
594 __cpu_simple_unlock(&ld->ld_spinlock); 602 __cpu_simple_unlock(&ld->ld_spinlock);
595 splx(s); 603 splx(s);
596} 604}
597 605
598/* 606/*
599 * lockdebug_wakeup: 607 * lockdebug_wakeup:
600 * 608 *
601 * Process a wakeup on a condition variable. 609 * Process a wakeup on a condition variable.
602 */ 610 */
603void 611void
604lockdebug_wakeup(volatile void *lock, uintptr_t where) 612lockdebug_wakeup(const char *func, size_t line, volatile void *lock,
 613 uintptr_t where)
605{ 614{
606 lockdebug_t *ld; 615 lockdebug_t *ld;
607 int s; 616 int s;
608 617
609 if (panicstr != NULL || ld_panic || lock == (void *)&lbolt) 618 if (panicstr != NULL || ld_panic || lock == (void *)&lbolt)
610 return; 619 return;
611 620
612 s = splhigh(); 621 s = splhigh();
613 /* Find the CV... */ 622 /* Find the CV... */
614 if ((ld = lockdebug_lookup(lock, where)) == NULL) { 623 if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
615 splx(s); 624 splx(s);
616 return; 625 return;
617 } 626 }
618 /* 627 /*
619 * If it has any waiters, ensure that they are using the 628 * If it has any waiters, ensure that they are using the
620 * same interlock. 629 * same interlock.
621 */ 630 */
622 if (ld->ld_shares != 0 && !mutex_owned((kmutex_t *)ld->ld_locked)) { 631 if (ld->ld_shares != 0 && !mutex_owned((kmutex_t *)ld->ld_locked)) {
623 lockdebug_abort1(ld, s, __func__, "interlocking mutex not " 632 lockdebug_abort1(func, line, ld, s, "interlocking mutex not "
624 "held during wakeup", true); 633 "held during wakeup", true);
625 return; 634 return;
626 } 635 }
627 __cpu_simple_unlock(&ld->ld_spinlock); 636 __cpu_simple_unlock(&ld->ld_spinlock);
628 splx(s); 637 splx(s);
629} 638}
630 639
631/* 640/*
632 * lockdebug_barrier: 641 * lockdebug_barrier:
633 *  642 *
634 * Panic if we hold more than one specified spin lock, and optionally, 643 * Panic if we hold more than one specified spin lock, and optionally,
635 * if we hold sleep locks. 644 * if we hold sleep locks.
636 */ 645 */
637void 646void
638lockdebug_barrier(volatile void *spinlock, int slplocks) 647lockdebug_barrier(const char *func, size_t line, volatile void *spinlock,
 648 int slplocks)
639{ 649{
640 struct lwp *l = curlwp; 650 struct lwp *l = curlwp;
641 lockdebug_t *ld; 651 lockdebug_t *ld;
642 int s; 652 int s;
643 653
644 if (panicstr != NULL || ld_panic) 654 if (panicstr != NULL || ld_panic)
645 return; 655 return;
646 656
647 s = splhigh(); 657 s = splhigh();
648 if ((l->l_pflag & LP_INTR) == 0) { 658 if ((l->l_pflag & LP_INTR) == 0) {
649 TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) { 659 TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) {
650 if (ld->ld_lock == spinlock) { 660 if (ld->ld_lock == spinlock) {
651 continue; 661 continue;
652 } 662 }
653 __cpu_simple_lock(&ld->ld_spinlock); 663 __cpu_simple_lock(&ld->ld_spinlock);
654 lockdebug_abort1(ld, s, __func__, 664 lockdebug_abort1(func, line, ld, s,
655 "spin lock held", true); 665 "spin lock held", true);
656 return; 666 return;
657 } 667 }
658 } 668 }
659 if (slplocks) { 669 if (slplocks) {
660 splx(s); 670 splx(s);
661 return; 671 return;
662 } 672 }
663 if ((ld = TAILQ_FIRST(&l->l_ld_locks)) != NULL) { 673 if ((ld = TAILQ_FIRST(&l->l_ld_locks)) != NULL) {
664 __cpu_simple_lock(&ld->ld_spinlock); 674 __cpu_simple_lock(&ld->ld_spinlock);
665 lockdebug_abort1(ld, s, __func__, "sleep lock held", true); 675 lockdebug_abort1(func, line, ld, s, "sleep lock held", true);
666 return; 676 return;
667 } 677 }
668 splx(s); 678 splx(s);
669 if (l->l_shlocks != 0) { 679 if (l->l_shlocks != 0) {
670 TAILQ_FOREACH(ld, &ld_all, ld_achain) { 680 TAILQ_FOREACH(ld, &ld_all, ld_achain) {
671 if (ld->ld_lockops->lo_type == LOCKOPS_CV) 681 if (ld->ld_lockops->lo_type == LOCKOPS_CV)
672 continue; 682 continue;
673 if (ld->ld_lwp == l) 683 if (ld->ld_lwp == l)
674 lockdebug_dump(ld, printf); 684 lockdebug_dump(ld, printf);
675 } 685 }
676 panic("%s: holding %d shared locks", __func__, l->l_shlocks); 686 panic("%s,%zu: holding %d shared locks", func, line,
 687 l->l_shlocks);
677 } 688 }
678} 689}
679 690
680/* 691/*
681 * lockdebug_mem_check: 692 * lockdebug_mem_check:
682 * 693 *
683 * Check for in-use locks within a memory region that is 694 * Check for in-use locks within a memory region that is
684 * being freed. 695 * being freed.
685 */ 696 */
686void 697void
687lockdebug_mem_check(const char *func, void *base, size_t sz) 698lockdebug_mem_check(const char *func, size_t line, void *base, size_t sz)
688{ 699{
689 lockdebug_t *ld; 700 lockdebug_t *ld;
690 struct cpu_info *ci; 701 struct cpu_info *ci;
691 int s; 702 int s;
692 703
693 if (panicstr != NULL || ld_panic) 704 if (panicstr != NULL || ld_panic)
694 return; 705 return;
695 706
696 s = splhigh(); 707 s = splhigh();
697 ci = curcpu(); 708 ci = curcpu();
698 __cpu_simple_lock(&ci->ci_data.cpu_ld_lock); 709 __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
699 ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base); 710 ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
700 if (ld != NULL) { 711 if (ld != NULL) {
701 const uintptr_t lock = (uintptr_t)ld->ld_lock; 712 const uintptr_t lock = (uintptr_t)ld->ld_lock;
702 713
703 if ((uintptr_t)base > lock) 714 if ((uintptr_t)base > lock)
704 panic("%s: corrupt tree ld=%p, base=%p, sz=%zu", 715 panic("%s,%zu: corrupt tree ld=%p, base=%p, sz=%zu",
705 __func__, ld, base, sz); 716 func, line, ld, base, sz);
706 if (lock >= (uintptr_t)base + sz) 717 if (lock >= (uintptr_t)base + sz)
707 ld = NULL; 718 ld = NULL;
708 } 719 }
709 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock); 720 __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
710 if (ld != NULL) { 721 if (ld != NULL) {
711 __cpu_simple_lock(&ld->ld_spinlock); 722 __cpu_simple_lock(&ld->ld_spinlock);
712 lockdebug_abort1(ld, s, func, 723 lockdebug_abort1(func, line, ld, s,
713 "allocation contains active lock", !cold); 724 "allocation contains active lock", !cold);
714 return; 725 return;
715 } 726 }
716 splx(s); 727 splx(s);
717} 728}
718 729
719/* 730/*
720 * lockdebug_dump: 731 * lockdebug_dump:
721 * 732 *
722 * Dump information about a lock on panic, or for DDB. 733 * Dump information about a lock on panic, or for DDB.
723 */ 734 */
724static void 735static void
725lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...) 736lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...)
@@ -757,50 +768,50 @@ lockdebug_dump(lockdebug_t *ld, void (*p @@ -757,50 +768,50 @@ lockdebug_dump(lockdebug_t *ld, void (*p
757 768
758 if (sleeper) { 769 if (sleeper) {
759 (*pr)("\n"); 770 (*pr)("\n");
760 turnstile_print(ld->ld_lock, pr); 771 turnstile_print(ld->ld_lock, pr);
761 } 772 }
762} 773}
763 774
764/* 775/*
765 * lockdebug_abort1: 776 * lockdebug_abort1:
766 * 777 *
767 * An error has been trapped - dump lock info and panic. 778 * An error has been trapped - dump lock info and panic.
768 */ 779 */
769static void 780static void
770lockdebug_abort1(lockdebug_t *ld, int s, const char *func, 781lockdebug_abort1(const char *func, size_t line, lockdebug_t *ld, int s,
771 const char *msg, bool dopanic) 782 const char *msg, bool dopanic)
772{ 783{
773 784
774 /* 785 /*
775 * Don't make the situation worse if the system is already going 786 * Don't make the situation worse if the system is already going
776 * down in flames. Once a panic is triggered, lockdebug state 787 * down in flames. Once a panic is triggered, lockdebug state
777 * becomes stale and cannot be trusted. 788 * becomes stale and cannot be trusted.
778 */ 789 */
779 if (atomic_inc_uint_nv(&ld_panic) != 1) { 790 if (atomic_inc_uint_nv(&ld_panic) != 1) {
780 __cpu_simple_unlock(&ld->ld_spinlock); 791 __cpu_simple_unlock(&ld->ld_spinlock);
781 splx(s); 792 splx(s);
782 return; 793 return;
783 } 794 }
784 795
785 printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name, 796 printf_nolog("%s error: %s,%zu: %s\n\n", ld->ld_lockops->lo_name,
786 func, msg); 797 func, line, msg);
787 lockdebug_dump(ld, printf_nolog); 798 lockdebug_dump(ld, printf_nolog);
788 __cpu_simple_unlock(&ld->ld_spinlock); 799 __cpu_simple_unlock(&ld->ld_spinlock);
789 splx(s); 800 splx(s);
790 printf_nolog("\n"); 801 printf_nolog("\n");
791 if (dopanic) 802 if (dopanic)
792 panic("LOCKDEBUG: %s error: %s: %s", ld->ld_lockops->lo_name, 803 panic("LOCKDEBUG: %s error: %s,%zu: %s",
793 func, msg); 804 ld->ld_lockops->lo_name, func, line, msg);
794} 805}
795 806
796#endif /* LOCKDEBUG */ 807#endif /* LOCKDEBUG */
797 808
798/* 809/*
799 * lockdebug_lock_print: 810 * lockdebug_lock_print:
800 * 811 *
801 * Handle the DDB 'show lock' command. 812 * Handle the DDB 'show lock' command.
802 */ 813 */
803#ifdef DDB 814#ifdef DDB
804void 815void
805lockdebug_lock_print(void *addr, void (*pr)(const char *, ...)) 816lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
806{ 817{
@@ -822,48 +833,48 @@ lockdebug_lock_print(void *addr, void (* @@ -822,48 +833,48 @@ lockdebug_lock_print(void *addr, void (*
822 } 833 }
823#else 834#else
824 (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n"); 835 (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
825#endif /* LOCKDEBUG */ 836#endif /* LOCKDEBUG */
826} 837}
827#endif /* DDB */ 838#endif /* DDB */
828 839
829/* 840/*
830 * lockdebug_abort: 841 * lockdebug_abort:
831 * 842 *
832 * An error has been trapped - dump lock info and call panic(). 843 * An error has been trapped - dump lock info and call panic().
833 */ 844 */
834void 845void
835lockdebug_abort(volatile void *lock, lockops_t *ops, const char *func, 846lockdebug_abort(const char *func, size_t line, volatile void *lock,
836 const char *msg) 847 lockops_t *ops, const char *msg)
837{ 848{
838#ifdef LOCKDEBUG 849#ifdef LOCKDEBUG
839 lockdebug_t *ld; 850 lockdebug_t *ld;
840 int s; 851 int s;
841 852
842 s = splhigh(); 853 s = splhigh();
843 if ((ld = lockdebug_lookup(lock,  854 if ((ld = lockdebug_lookup(func, line, lock,
844 (uintptr_t) __builtin_return_address(0))) != NULL) { 855 (uintptr_t) __builtin_return_address(0))) != NULL) {
845 lockdebug_abort1(ld, s, func, msg, true); 856 lockdebug_abort1(func, line, ld, s, msg, true);
846 return; 857 return;
847 } 858 }
848 splx(s); 859 splx(s);
849#endif /* LOCKDEBUG */ 860#endif /* LOCKDEBUG */
850 861
851 /* 862 /*
852 * Complain first on the occurrance only. Otherwise proceeed to 863 * Complain first on the occurrance only. Otherwise proceeed to
853 * panic where we will `rendezvous' with other CPUs if the machine 864 * panic where we will `rendezvous' with other CPUs if the machine
854 * is going down in flames. 865 * is going down in flames.
855 */ 866 */
856 if (atomic_inc_uint_nv(&ld_panic) == 1) { 867 if (atomic_inc_uint_nv(&ld_panic) == 1) {
857 printf_nolog("%s error: %s: %s\n\n" 868 printf_nolog("%s error: %s,%zu: %s\n\n"
858 "lock address : %#018lx\n" 869 "lock address : %#018lx\n"
859 "current cpu : %18d\n" 870 "current cpu : %18d\n"
860 "current lwp : %#018lx\n", 871 "current lwp : %#018lx\n",
861 ops->lo_name, func, msg, (long)lock, 872 ops->lo_name, func, line, msg, (long)lock,
862 (int)cpu_index(curcpu()), (long)curlwp); 873 (int)cpu_index(curcpu()), (long)curlwp);
863 (*ops->lo_dump)(lock); 874 (*ops->lo_dump)(lock);
864 printf_nolog("\n"); 875 printf_nolog("\n");
865 } 876 }
866 877
867 panic("lock error: %s: %s: %s: lock %p cpu %d lwp %p", 878 panic("lock error: %s: %s,%zu: %s: lock %p cpu %d lwp %p",
868 ops->lo_name, func, msg, lock, cpu_index(curcpu()), curlwp); 879 ops->lo_name, func, line, msg, lock, cpu_index(curcpu()), curlwp);
869} 880}

cvs diff -r1.14 -r1.15 src/sys/sys/lockdebug.h (expand / switch to unified diff)

--- src/sys/sys/lockdebug.h 2013/04/27 08:12:34 1.14
+++ src/sys/sys/lockdebug.h 2017/01/26 04:11:56 1.15
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: lockdebug.h,v 1.14 2013/04/27 08:12:34 mlelstv Exp $ */ 1/* $NetBSD: lockdebug.h,v 1.15 2017/01/26 04:11:56 christos Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -40,59 +40,66 @@ @@ -40,59 +40,66 @@
40#error "Sorry, nothing of interest to user level programs here." 40#error "Sorry, nothing of interest to user level programs here."
41#endif 41#endif
42 42
43#define LOCKOPS_SLEEP 0 43#define LOCKOPS_SLEEP 0
44#define LOCKOPS_SPIN 1 44#define LOCKOPS_SPIN 1
45#define LOCKOPS_CV 2 45#define LOCKOPS_CV 2
46 46
47typedef struct lockops { 47typedef struct lockops {
48 const char *lo_name; 48 const char *lo_name;
49 int lo_type; 49 int lo_type;
50 void (*lo_dump)(volatile void *); 50 void (*lo_dump)(volatile void *);
51} lockops_t; 51} lockops_t;
52 52
53#define LOCKDEBUG_ABORT(l, o, f, m) lockdebug_abort(l, o, f, m) 53#define LOCKDEBUG_ABORT(f, ln, l, o, m) \
 54 lockdebug_abort(f, ln, l, o, m)
54 55
55void lockdebug_abort(volatile void *, lockops_t *, 56void lockdebug_abort(const char *, size_t, volatile void *, lockops_t *,
56 const char *, const char *); 57 const char *);
57 58
58void lockdebug_lock_print(void *, void (*)(const char *, ...) 59void lockdebug_lock_print(void *, void (*)(const char *, ...)
59 __printflike(1, 2)); 60 __printflike(1, 2));
60 61
61#ifdef LOCKDEBUG 62#ifdef LOCKDEBUG
62 63
63bool lockdebug_alloc(volatile void *, lockops_t *, uintptr_t); 64bool lockdebug_alloc(const char *, size_t, volatile void *, lockops_t *,
64void lockdebug_free(volatile void *); 65 uintptr_t);
65void lockdebug_wantlock(volatile void *, uintptr_t, int); 66void lockdebug_free(const char *, size_t, volatile void *);
66void lockdebug_locked(volatile void *, void *, uintptr_t, int); 67void lockdebug_wantlock(const char *, size_t, volatile void *, uintptr_t,
67void lockdebug_unlocked(volatile void *, uintptr_t, int); 68 int);
68void lockdebug_barrier(volatile void *, int); 69void lockdebug_locked(const char *, size_t, volatile void *, void *,
69void lockdebug_mem_check(const char *, void *, size_t); 70 uintptr_t, int);
70void lockdebug_wakeup(volatile void *, uintptr_t); 71void lockdebug_unlocked(const char *, size_t, volatile void *,
 72 uintptr_t, int);
 73void lockdebug_barrier(const char *, size_t, volatile void *, int);
 74void lockdebug_mem_check(const char *, size_t, void *, size_t);
 75void lockdebug_wakeup(const char *, size_t, volatile void *, uintptr_t);
71 76
72#define LOCKDEBUG_ALLOC(lock, ops, addr) lockdebug_alloc(lock, ops, addr) 77#define LOCKDEBUG_ALLOC(lock, ops, addr) \
 78 lockdebug_alloc(__func__, __LINE__, lock, ops, addr)
73#define LOCKDEBUG_FREE(dodebug, lock) \ 79#define LOCKDEBUG_FREE(dodebug, lock) \
74 if (dodebug) lockdebug_free(lock) 80 if (dodebug) lockdebug_free(__func__, __LINE__, lock)
75#define LOCKDEBUG_WANTLOCK(dodebug, lock, where, s) \ 81#define LOCKDEBUG_WANTLOCK(dodebug, lock, where, s) \
76 if (dodebug) lockdebug_wantlock(lock, where, s) 82 if (dodebug) lockdebug_wantlock(__func__, __LINE__, lock, where, s)
77#define LOCKDEBUG_LOCKED(dodebug, lock, al, where, s) \ 83#define LOCKDEBUG_LOCKED(dodebug, lock, al, where, s) \
78 if (dodebug) lockdebug_locked(lock, al, where, s) 84 if (dodebug) lockdebug_locked(__func__, __LINE__, lock, al, where, s)
79#define LOCKDEBUG_UNLOCKED(dodebug, lock, where, s) \ 85#define LOCKDEBUG_UNLOCKED(dodebug, lock, where, s) \
80 if (dodebug) lockdebug_unlocked(lock, where, s) 86 if (dodebug) lockdebug_unlocked(__func__, __LINE__, lock, where, s)
81#define LOCKDEBUG_BARRIER(lock, slp) lockdebug_barrier(lock, slp) 87#define LOCKDEBUG_BARRIER(lock, slp) \
 88 lockdebug_barrier(__func__, __LINE__, lock, slp)
82#define LOCKDEBUG_MEM_CHECK(base, sz) \ 89#define LOCKDEBUG_MEM_CHECK(base, sz) \
83 lockdebug_mem_check(__func__, base, sz) 90 lockdebug_mem_check(__func__, __LINE__, base, sz)
84#define LOCKDEBUG_WAKEUP(dodebug, lock, where) \ 91#define LOCKDEBUG_WAKEUP(dodebug, lock, where) \
85 if (dodebug) lockdebug_wakeup(lock, where) 92 if (dodebug) lockdebug_wakeup(__func__, __LINE__, lock, where)
86 93
87#else /* LOCKDEBUG */ 94#else /* LOCKDEBUG */
88 95
89#define LOCKDEBUG_ALLOC(lock, ops, addr) false 96#define LOCKDEBUG_ALLOC(lock, ops, addr) false
90#define LOCKDEBUG_FREE(dodebug, lock) /* nothing */ 97#define LOCKDEBUG_FREE(dodebug, lock) /* nothing */
91#define LOCKDEBUG_WANTLOCK(dodebug, lock, where, s) /* nothing */ 98#define LOCKDEBUG_WANTLOCK(dodebug, lock, where, s) /* nothing */
92#define LOCKDEBUG_LOCKED(dodebug, lock, al, where, s) /* nothing */ 99#define LOCKDEBUG_LOCKED(dodebug, lock, al, where, s) /* nothing */
93#define LOCKDEBUG_UNLOCKED(dodebug, lock, where, s) /* nothing */ 100#define LOCKDEBUG_UNLOCKED(dodebug, lock, where, s) /* nothing */
94#define LOCKDEBUG_BARRIER(lock, slp) /* nothing */ 101#define LOCKDEBUG_BARRIER(lock, slp) /* nothing */
95#define LOCKDEBUG_MEM_CHECK(base, sz) /* nothing */ 102#define LOCKDEBUG_MEM_CHECK(base, sz) /* nothing */
96#define LOCKDEBUG_WAKEUP(dodebug, lock, where) /* nothing */ 103#define LOCKDEBUG_WAKEUP(dodebug, lock, where) /* nothing */
97 104
98#endif /* LOCKDEBUG */ 105#endif /* LOCKDEBUG */