Wed Jul 14 02:18:10 2021 UTC ()
Disable the mutex_spin_{enter,exit}() fast-path stubs while I investigate
some an odd IPL-related panic that seems to be related (that I can fairly
reliabily reproduce on an LCA45).


(thorpej)
diff -r1.8 -r1.9 src/sys/arch/alpha/alpha/lock_stubs.s
diff -r1.9 -r1.10 src/sys/arch/alpha/include/mutex.h

cvs diff -r1.8 -r1.9 src/sys/arch/alpha/alpha/lock_stubs.s (expand / switch to unified diff)

--- src/sys/arch/alpha/alpha/lock_stubs.s 2021/07/13 13:58:30 1.8
+++ src/sys/arch/alpha/alpha/lock_stubs.s 2021/07/14 02:18:10 1.9
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: lock_stubs.s,v 1.8 2021/07/13 13:58:30 thorpej Exp $ */ 1/* $NetBSD: lock_stubs.s,v 1.9 2021/07/14 02:18:10 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2007, 2021 The NetBSD Foundation, Inc. 4 * Copyright (c) 2007, 2021 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, and by Jason R. Thorpe. 8 * by Andrew Doran, and by Jason R. Thorpe.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -24,27 +24,27 @@ @@ -24,27 +24,27 @@
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include "opt_lockdebug.h" 32#include "opt_lockdebug.h"
33#include "opt_multiprocessor.h" 33#include "opt_multiprocessor.h"
34 34
35#include <machine/asm.h> 35#include <machine/asm.h>
36 36
37__KERNEL_RCSID(0, "$NetBSD: lock_stubs.s,v 1.8 2021/07/13 13:58:30 thorpej Exp $"); 37__KERNEL_RCSID(0, "$NetBSD: lock_stubs.s,v 1.9 2021/07/14 02:18:10 thorpej Exp $");
38 38
39#include "assym.h" 39#include "assym.h"
40 40
41#if defined(MULTIPROCESSOR) 41#if defined(MULTIPROCESSOR)
42/* 42/*
43 * These 'unop' insns will be patched with 'mb' insns at run-time if 43 * These 'unop' insns will be patched with 'mb' insns at run-time if
44 * the system has more than one processor. 44 * the system has more than one processor.
45 */ 45 */
46#define MB(label) label: unop 46#define MB(label) label: unop
47#else 47#else
48#define MB(label) /* nothing */ 48#define MB(label) /* nothing */
49#endif 49#endif
50 50
@@ -104,26 +104,27 @@ LEAF(mutex_exit, 1) @@ -104,26 +104,27 @@ LEAF(mutex_exit, 1)
104 ldq_l t2, 0(a0) 104 ldq_l t2, 0(a0)
105 cmpeq v0, t2, t2 105 cmpeq v0, t2, t2
106 beq t2, 2f 106 beq t2, 2f
107 stq_c t3, 0(a0) 107 stq_c t3, 0(a0)
108 beq t3, 3f 108 beq t3, 3f
109 RET 109 RET
1102: 1102:
111 lda t12, mutex_vector_exit 111 lda t12, mutex_vector_exit
112 jmp (t12) 112 jmp (t12)
1133: 1133:
114 br 1b 114 br 1b
115 END(mutex_exit) 115 END(mutex_exit)
116 116
 117#if 0 /* XXX disabled for now XXX */
117/* 118/*
118 * void mutex_spin_enter(kmutex_t *mtx); 119 * void mutex_spin_enter(kmutex_t *mtx);
119 */ 120 */
120LEAF(mutex_spin_enter, 1); 121LEAF(mutex_spin_enter, 1);
121 LDGP(pv) 122 LDGP(pv)
122 123
123 /* 124 /*
124 * STEP 1: Perform the MUTEX_SPIN_SPLRAISE() function. 125 * STEP 1: Perform the MUTEX_SPIN_SPLRAISE() function.
125 * (see sys/kern/kern_mutex.c) 126 * (see sys/kern/kern_mutex.c)
126 * 127 *
127 * s = splraise(mtx->mtx_ipl); 128 * s = splraise(mtx->mtx_ipl);
128 * if (curcpu->ci_mtx_count-- == 0) 129 * if (curcpu->ci_mtx_count-- == 0)
129 * curcpu->ci_mtx_oldspl = s; 130 * curcpu->ci_mtx_oldspl = s;
@@ -217,26 +218,27 @@ LEAF(mutex_spin_exit, 1) @@ -217,26 +218,27 @@ LEAF(mutex_spin_exit, 1)
217 ldl a0, CPU_INFO_MTX_OLDSPL(a3) 218 ldl a0, CPU_INFO_MTX_OLDSPL(a3)
218 addl t0, 1, t2 /* mtx_count++ */ 219 addl t0, 1, t2 /* mtx_count++ */
219 stl t2, CPU_INFO_MTX_COUNT(a3) 220 stl t2, CPU_INFO_MTX_COUNT(a3)
220 /* 221 /*
221 * The forward-branch over the SWPIPL call is correctly predicted 222 * The forward-branch over the SWPIPL call is correctly predicted
222 * not-taken by the CPU because it's rare for a code path to acquire 223 * not-taken by the CPU because it's rare for a code path to acquire
223 * 2 spin mutexes. 224 * 2 spin mutexes.
224 */ 225 */
225 bne t2, 1f /* t2 != 0? Skip... */ 226 bne t2, 1f /* t2 != 0? Skip... */
226 call_pal PAL_OSF1_swpipl /* clobbers v0, t0, t8..t11 */ 227 call_pal PAL_OSF1_swpipl /* clobbers v0, t0, t8..t11 */
2271: 2281:
228 RET 229 RET
229 END(mutex_spin_exit) 230 END(mutex_spin_exit)
 231#endif /* XXX disabled for now XXX */
230 232
231/* 233/*
232 * void rw_enter(krwlock_t *rwl, krw_t op); 234 * void rw_enter(krwlock_t *rwl, krw_t op);
233 * 235 *
234 * Acquire one hold on a RW lock. 236 * Acquire one hold on a RW lock.
235 */ 237 */
236LEAF(rw_enter, 2) 238LEAF(rw_enter, 2)
237 LDGP(pv) 239 LDGP(pv)
238 240
239 /* 241 /*
240 * RW_READER == 0 (we have a compile-time assert in machdep.c 242 * RW_READER == 0 (we have a compile-time assert in machdep.c
241 * to ensure this). 243 * to ensure this).
242 * 244 *
@@ -389,23 +391,25 @@ LEAF(rw_exit, 1) @@ -389,23 +391,25 @@ LEAF(rw_exit, 1)
389#if defined(MULTIPROCESSOR) 391#if defined(MULTIPROCESSOR)
390/* 392/*
391 * Table of locations to patch with MB instructions on multiprocessor 393 * Table of locations to patch with MB instructions on multiprocessor
392 * systems. 394 * systems.
393 */ 395 */
394 .section ".rodata" 396 .section ".rodata"
395 .globl lock_stub_patch_table 397 .globl lock_stub_patch_table
396lock_stub_patch_table: 398lock_stub_patch_table:
397 .quad .L__lock_cas_mb_1 399 .quad .L__lock_cas_mb_1
398 .quad .L__lock_cas_mb_2 400 .quad .L__lock_cas_mb_2
399#if !defined(LOCKDEBUG) 401#if !defined(LOCKDEBUG)
400 .quad .L_mutex_enter_mb_1 402 .quad .L_mutex_enter_mb_1
401 .quad .L_mutex_exit_mb_1 403 .quad .L_mutex_exit_mb_1
 404#if 0 /* XXX disabled for now XXX */
402 .quad .L_mutex_spin_enter_mb_1 405 .quad .L_mutex_spin_enter_mb_1
403 .quad .L_mutex_spin_exit_mb_1 406 .quad .L_mutex_spin_exit_mb_1
 407#endif /* XXX disabled for now XXX */
404 .quad .L_rw_enter_mb_1 408 .quad .L_rw_enter_mb_1
405 .quad .L_rw_enter_mb_2 409 .quad .L_rw_enter_mb_2
406 .quad .L_rw_tryenter_mb_1 410 .quad .L_rw_tryenter_mb_1
407 .quad .L_rw_tryenter_mb_2 411 .quad .L_rw_tryenter_mb_2
408 .quad .L_rw_exit_mb_1 412 .quad .L_rw_exit_mb_1
409#endif /* ! LOCKDEBUG */ 413#endif /* ! LOCKDEBUG */
410 .quad 0 /* NULL terminator */ 414 .quad 0 /* NULL terminator */
411#endif /* MULTIPROCESSOR */ 415#endif /* MULTIPROCESSOR */

cvs diff -r1.9 -r1.10 src/sys/arch/alpha/include/mutex.h (expand / switch to unified diff)

--- src/sys/arch/alpha/include/mutex.h 2021/07/12 15:21:51 1.9
+++ src/sys/arch/alpha/include/mutex.h 2021/07/14 02:18:10 1.10
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: mutex.h,v 1.9 2021/07/12 15:21:51 thorpej Exp $ */ 1/* $NetBSD: mutex.h,v 1.10 2021/07/14 02:18:10 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc. 4 * Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Andrew Doran. 8 * by Jason R. Thorpe and Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -49,24 +49,24 @@ struct kmutex { @@ -49,24 +49,24 @@ struct kmutex {
49 volatile uint16_t mtxs_unused; 49 volatile uint16_t mtxs_unused;
50 __cpu_simple_lock_t mtxs_lock; 50 __cpu_simple_lock_t mtxs_lock;
51 } s; 51 } s;
52 } u; 52 } u;
53}; 53};
54 54
55#define mtx_owner u.mtxa_owner 55#define mtx_owner u.mtxa_owner
56#define mtx_flags u.s.mtxs_flags 56#define mtx_flags u.s.mtxs_flags
57#define mtx_ipl u.s.mtxs_ipl 57#define mtx_ipl u.s.mtxs_ipl
58#define mtx_lock u.s.mtxs_lock 58#define mtx_lock u.s.mtxs_lock
59 59
60#define __HAVE_SIMPLE_MUTEXES 1 60#define __HAVE_SIMPLE_MUTEXES 1
61#define __HAVE_MUTEX_STUBS 1 61#define __HAVE_MUTEX_STUBS 1
62#define __HAVE_SPIN_MUTEX_STUBS 1 62/* XXX #define __HAVE_SPIN_MUTEX_STUBS 1 XXX */
63 63
64#define MUTEX_CAS(p, o, n) _lock_cas((p), (o), (n)) 64#define MUTEX_CAS(p, o, n) _lock_cas((p), (o), (n))
65 65
66int _lock_cas(volatile uintptr_t *, uintptr_t, uintptr_t); 66int _lock_cas(volatile uintptr_t *, uintptr_t, uintptr_t);
67 67
68#endif /* __MUTEX_PRIVATE */ 68#endif /* __MUTEX_PRIVATE */
69 69
70__CTASSERT(sizeof(struct kmutex) == sizeof(uintptr_t)); 70__CTASSERT(sizeof(struct kmutex) == sizeof(uintptr_t));
71 71
72#endif /* _ALPHA_MUTEX_H_ */ 72#endif /* _ALPHA_MUTEX_H_ */