Mon Jan 27 21:05:43 2020 UTC ()
Add a kernel_lock_plug_leak() that drops any holds and tries to identify
the baddy.


(ad)
diff -r1.167 -r1.168 src/sys/kern/kern_lock.c
diff -r1.87 -r1.88 src/sys/sys/lock.h

cvs diff -r1.167 -r1.168 src/sys/kern/kern_lock.c (expand / switch to unified diff)

--- src/sys/kern/kern_lock.c 2020/01/24 20:05:15 1.167
+++ src/sys/kern/kern_lock.c 2020/01/27 21:05:43 1.168
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_lock.c,v 1.167 2020/01/24 20:05:15 ad Exp $ */ 1/* $NetBSD: kern_lock.c,v 1.168 2020/01/27 21:05:43 ad Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2002, 2006, 2007, 2008, 2009, 2020 The NetBSD Foundation, Inc. 4 * Copyright (c) 2002, 2006, 2007, 2008, 2009, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Andrew Doran. 9 * NASA Ames Research Center, and by Andrew Doran.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -21,40 +21,48 @@ @@ -21,40 +21,48 @@
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33#include <sys/cdefs.h> 33#include <sys/cdefs.h>
34__KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.167 2020/01/24 20:05:15 ad Exp $"); 34__KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.168 2020/01/27 21:05:43 ad Exp $");
 35
 36#ifdef _KERNEL_OPT
 37#include "opt_lockdebug.h"
 38#endif
35 39
36#include <sys/param.h> 40#include <sys/param.h>
37#include <sys/proc.h> 41#include <sys/proc.h>
38#include <sys/lock.h> 42#include <sys/lock.h>
39#include <sys/systm.h> 43#include <sys/systm.h>
40#include <sys/kernel.h> 44#include <sys/kernel.h>
41#include <sys/lockdebug.h> 45#include <sys/lockdebug.h>
42#include <sys/cpu.h> 46#include <sys/cpu.h>
43#include <sys/syslog.h> 47#include <sys/syslog.h>
44#include <sys/atomic.h> 48#include <sys/atomic.h>
45#include <sys/lwp.h> 49#include <sys/lwp.h>
46#include <sys/pserialize.h> 50#include <sys/pserialize.h>
47 51
 52#if defined(DIAGNOSTIC) && !defined(LOCKDEBUG)
 53#include <sys/ksyms.h>
 54#endif
 55
48#include <machine/lock.h> 56#include <machine/lock.h>
49 57
50#include <dev/lockstat.h> 58#include <dev/lockstat.h>
51 59
52#define RETURN_ADDRESS (uintptr_t)__builtin_return_address(0) 60#define RETURN_ADDRESS (uintptr_t)__builtin_return_address(0)
53 61
54bool kernel_lock_dodebug; 62bool kernel_lock_dodebug;
55 63
56__cpu_simple_lock_t kernel_lock[CACHE_LINE_SIZE / sizeof(__cpu_simple_lock_t)] 64__cpu_simple_lock_t kernel_lock[CACHE_LINE_SIZE / sizeof(__cpu_simple_lock_t)]
57 __cacheline_aligned; 65 __cacheline_aligned;
58 66
59void 67void
60assert_sleepable(void) 68assert_sleepable(void)
@@ -205,26 +213,29 @@ _kernel_lock(int nlocks) @@ -205,26 +213,29 @@ _kernel_lock(int nlocks)
205 * and kernel_lock we must make it appear as if this thread is 213 * and kernel_lock we must make it appear as if this thread is
206 * blocking. For non-interlocked mutex release, a store fence 214 * blocking. For non-interlocked mutex release, a store fence
207 * is required to ensure that the result of any mutex_exit() 215 * is required to ensure that the result of any mutex_exit()
208 * by the current LWP becomes visible on the bus before the set 216 * by the current LWP becomes visible on the bus before the set
209 * of ci->ci_biglock_wanted becomes visible. 217 * of ci->ci_biglock_wanted becomes visible.
210 * 218 *
211 * However, we won't set ci_biglock_wanted until we've spun for 219 * However, we won't set ci_biglock_wanted until we've spun for
212 * a bit, as we don't want to make any lock waiters in rw_oncpu() 220 * a bit, as we don't want to make any lock waiters in rw_oncpu()
213 * or mutex_oncpu() block prematurely. 221 * or mutex_oncpu() block prematurely.
214 */ 222 */
215 membar_producer(); 223 membar_producer();
216 owant = ci->ci_biglock_wanted; 224 owant = ci->ci_biglock_wanted;
217 ci->ci_biglock_wanted = l; 225 ci->ci_biglock_wanted = l;
 226#if defined(DIAGNOSTIC) && !defined(LOCKDEBUG)
 227 l->l_ld_wanted = __builtin_return_address(0);
 228#endif
218 229
219 /* 230 /*
220 * Spin until we acquire the lock. Once we have it, record the 231 * Spin until we acquire the lock. Once we have it, record the
221 * time spent with lockstat. 232 * time spent with lockstat.
222 */ 233 */
223 LOCKSTAT_ENTER(lsflag); 234 LOCKSTAT_ENTER(lsflag);
224 LOCKSTAT_START_TIMER(lsflag, spintime); 235 LOCKSTAT_START_TIMER(lsflag, spintime);
225 236
226 do { 237 do {
227 splx(s); 238 splx(s);
228 while (__SIMPLELOCK_LOCKED_P(kernel_lock)) { 239 while (__SIMPLELOCK_LOCKED_P(kernel_lock)) {
229#ifdef LOCKDEBUG 240#ifdef LOCKDEBUG
230 if (SPINLOCK_SPINOUT(spins)) { 241 if (SPINLOCK_SPINOUT(spins)) {
@@ -320,13 +331,33 @@ _kernel_unlock(int nlocks, int *countp) @@ -320,13 +331,33 @@ _kernel_unlock(int nlocks, int *countp)
320 l->l_blcnt -= nlocks; 331 l->l_blcnt -= nlocks;
321 splx(s); 332 splx(s);
322 } 333 }
323 334
324 if (countp != NULL) 335 if (countp != NULL)
325 *countp = olocks; 336 *countp = olocks;
326} 337}
327 338
328bool 339bool
329_kernel_locked_p(void) 340_kernel_locked_p(void)
330{ 341{
331 return __SIMPLELOCK_LOCKED_P(kernel_lock); 342 return __SIMPLELOCK_LOCKED_P(kernel_lock);
332} 343}
 344
 345void
 346kernel_lock_plug_leak(void)
 347{
 348#ifndef LOCKDEBUG
 349# ifdef DIAGNOSTIC
 350 int biglocks = 0;
 351 KERNEL_UNLOCK_ALL(curlwp, &biglocks);
 352 if (biglocks != 0) {
 353 const char *sym = "(unknown)";
 354 ksyms_getname(NULL, &sym, (vaddr_t)curlwp->l_ld_wanted,
 355 KSYMS_CLOSEST|KSYMS_PROC|KSYMS_ANY);
 356 printf("kernel_lock leak detected. last acquired: %s / %p\n",
 357 sym, curlwp->l_ld_wanted);
 358 }
 359# else
 360 KERNEL_UNLOCK_ALL(curlwp, NULL);
 361# endif
 362#endif
 363}

cvs diff -r1.87 -r1.88 src/sys/sys/lock.h (expand / switch to unified diff)

--- src/sys/sys/lock.h 2018/07/10 19:55:05 1.87
+++ src/sys/sys/lock.h 2020/01/27 21:05:43 1.88
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: lock.h,v 1.87 2018/07/10 19:55:05 maya Exp $ */ 1/* $NetBSD: lock.h,v 1.88 2020/01/27 21:05:43 ad Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1999, 2000, 2006, 2007 The NetBSD Foundation, Inc. 4 * Copyright (c) 1999, 2000, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Ross Harvey, and by Andrew Doran. 9 * NASA Ames Research Center, by Ross Harvey, and by Andrew Doran.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -99,16 +99,18 @@ do { \ @@ -99,16 +99,18 @@ do { \
99 } \ 99 } \
100 if ((count) < SPINLOCK_BACKOFF_MAX) \ 100 if ((count) < SPINLOCK_BACKOFF_MAX) \
101 (count) += (count); \ 101 (count) += (count); \
102} while (/* CONSTCOND */ 0); 102} while (/* CONSTCOND */ 0);
103 103
104#ifdef LOCKDEBUG 104#ifdef LOCKDEBUG
105#define SPINLOCK_SPINOUT(spins) ((spins)++ > 0x0fffffff) 105#define SPINLOCK_SPINOUT(spins) ((spins)++ > 0x0fffffff)
106#else 106#else
107#define SPINLOCK_SPINOUT(spins) ((void)(spins), 0) 107#define SPINLOCK_SPINOUT(spins) ((void)(spins), 0)
108#endif 108#endif
109 109
110extern __cpu_simple_lock_t kernel_lock[]; 110extern __cpu_simple_lock_t kernel_lock[];
111 111
 112void kernel_lock_plug_leak(void);
 113
112#endif /* _KERNEL */ 114#endif /* _KERNEL */
113 115
114#endif /* _SYS_LOCK_H_ */ 116#endif /* _SYS_LOCK_H_ */