Sun Feb 13 13:42:30 2022 UTC ()
alpha: __cpu_simple_lock audit.

Add missing "cc" and "memory" asm clobbers to the compiler can't
reorder memory access around these.  The necessary memory barrier
instructions, mb, already appear in all the right places.


(riastradh)
diff -r1.32 -r1.33 src/sys/arch/alpha/include/lock.h

cvs diff -r1.32 -r1.33 src/sys/arch/alpha/include/lock.h (switch to unified diff)

--- src/sys/arch/alpha/include/lock.h 2022/02/12 17:17:53 1.32
+++ src/sys/arch/alpha/include/lock.h 2022/02/13 13:42:30 1.33
@@ -1,173 +1,175 @@ @@ -1,173 +1,175 @@
1/* $NetBSD: lock.h,v 1.32 2022/02/12 17:17:53 riastradh Exp $ */ 1/* $NetBSD: lock.h,v 1.33 2022/02/13 13:42:30 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center. 9 * NASA Ames Research Center.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33/* 33/*
34 * Machine-dependent spin lock operations. 34 * Machine-dependent spin lock operations.
35 */ 35 */
36 36
37#ifndef _ALPHA_LOCK_H_ 37#ifndef _ALPHA_LOCK_H_
38#define _ALPHA_LOCK_H_ 38#define _ALPHA_LOCK_H_
39 39
40#ifdef _KERNEL_OPT 40#ifdef _KERNEL_OPT
41#include "opt_multiprocessor.h" 41#include "opt_multiprocessor.h"
42#endif 42#endif
43 43
44static __inline int 44static __inline int
45__SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t *__ptr) 45__SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t *__ptr)
46{ 46{
47 return *__ptr == __SIMPLELOCK_LOCKED; 47 return *__ptr == __SIMPLELOCK_LOCKED;
48} 48}
49 49
50static __inline int 50static __inline int
51__SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t *__ptr) 51__SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t *__ptr)
52{ 52{
53 return *__ptr == __SIMPLELOCK_UNLOCKED; 53 return *__ptr == __SIMPLELOCK_UNLOCKED;
54} 54}
55 55
56static __inline void 56static __inline void
57__cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr) 57__cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
58{ 58{
59 *__ptr = __SIMPLELOCK_UNLOCKED; 59 *__ptr = __SIMPLELOCK_UNLOCKED;
60} 60}
61 61
62static __inline void 62static __inline void
63__cpu_simple_lock_set(__cpu_simple_lock_t *__ptr) 63__cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
64{ 64{
65 *__ptr = __SIMPLELOCK_LOCKED; 65 *__ptr = __SIMPLELOCK_LOCKED;
66} 66}
67 67
68static __inline void 68static __inline void
69__cpu_simple_lock_init(__cpu_simple_lock_t *alp) 69__cpu_simple_lock_init(__cpu_simple_lock_t *alp)
70{ 70{
71 71
72 *alp = __SIMPLELOCK_UNLOCKED; 72 *alp = __SIMPLELOCK_UNLOCKED;
73} 73}
74 74
75static __inline void 75static __inline void
76__cpu_simple_lock(__cpu_simple_lock_t *alp) 76__cpu_simple_lock(__cpu_simple_lock_t *alp)
77{ 77{
78 unsigned long t0; 78 unsigned long t0;
79 79
80 /* 80 /*
81 * Note, if we detect that the lock is held when 81 * Note, if we detect that the lock is held when
82 * we do the initial load-locked, we spin using 82 * we do the initial load-locked, we spin using
83 * a non-locked load to save the coherency logic 83 * a non-locked load to save the coherency logic
84 * some work. 84 * some work.
85 */ 85 */
86 86
87 __asm volatile( 87 __asm volatile(
88 "# BEGIN __cpu_simple_lock\n" 88 "# BEGIN __cpu_simple_lock\n"
89 "1: ldl_l %0, %3 \n" 89 "1: ldl_l %0, %3 \n"
90 " bne %0, 2f \n" 90 " bne %0, 2f \n"
91 " bis $31, %2, %0 \n" 91 " bis $31, %2, %0 \n"
92 " stl_c %0, %1 \n" 92 " stl_c %0, %1 \n"
93 " beq %0, 3f \n" 93 " beq %0, 3f \n"
94 " mb \n" 94 " mb \n"
95 " br 4f \n" 95 " br 4f \n"
96 "2: ldl %0, %3 \n" 96 "2: ldl %0, %3 \n"
97 " beq %0, 1b \n" 97 " beq %0, 1b \n"
98 " br 2b \n" 98 " br 2b \n"
99 "3: br 1b \n" 99 "3: br 1b \n"
100 "4: \n" 100 "4: \n"
101 " # END __cpu_simple_lock\n" 101 " # END __cpu_simple_lock\n"
102 : "=&r" (t0), "=m" (*alp) 102 : "=&r" (t0), "=m" (*alp)
103 : "i" (__SIMPLELOCK_LOCKED), "m" (*alp) 103 : "i" (__SIMPLELOCK_LOCKED), "m" (*alp)
104 : "memory"); 104 : "cc", "memory");
105} 105}
106 106
107static __inline int 107static __inline int
108__cpu_simple_lock_try(__cpu_simple_lock_t *alp) 108__cpu_simple_lock_try(__cpu_simple_lock_t *alp)
109{ 109{
110 unsigned long t0, v0; 110 unsigned long t0, v0;
111 111
112 __asm volatile( 112 __asm volatile(
113 "# BEGIN __cpu_simple_lock_try\n" 113 "# BEGIN __cpu_simple_lock_try\n"
114 "1: ldl_l %0, %4 \n" 114 "1: ldl_l %0, %4 \n"
115 " bne %0, 2f \n" 115 " bne %0, 2f \n"
116 " bis $31, %3, %0 \n" 116 " bis $31, %3, %0 \n"
117 " stl_c %0, %2 \n" 117 " stl_c %0, %2 \n"
118 " beq %0, 3f \n" 118 " beq %0, 3f \n"
119 " mb \n" 119 " mb \n"
120 " bis $31, 1, %1 \n" 120 " bis $31, 1, %1 \n"
121 " br 4f \n" 121 " br 4f \n"
122 "2: bis $31, $31, %1 \n" 122 "2: bis $31, $31, %1 \n"
123 " br 4f \n" 123 " br 4f \n"
124 "3: br 1b \n" 124 "3: br 1b \n"
125 "4: \n" 125 "4: \n"
126 " # END __cpu_simple_lock_try" 126 " # END __cpu_simple_lock_try"
127 : "=&r" (t0), "=r" (v0), "=m" (*alp) 127 : "=&r" (t0), "=r" (v0), "=m" (*alp)
128 : "i" (__SIMPLELOCK_LOCKED), "m" (*alp) 128 : "i" (__SIMPLELOCK_LOCKED), "m" (*alp)
129 : "memory"); 129 : "cc", "memory");
130 130
131 return (v0 != 0); 131 return (v0 != 0);
132} 132}
133 133
134static __inline void 134static __inline void
135__cpu_simple_unlock(__cpu_simple_lock_t *alp) 135__cpu_simple_unlock(__cpu_simple_lock_t *alp)
136{ 136{
137 137
138 __asm volatile( 138 __asm volatile(
139 "# BEGIN __cpu_simple_unlock\n" 139 "# BEGIN __cpu_simple_unlock\n"
140 " mb \n" 140 " mb \n"
141 " stl $31, %0 \n" 141 " stl $31, %0 \n"
142 " # END __cpu_simple_unlock" 142 " # END __cpu_simple_unlock"
143 : "=m" (*alp)); 143 : "=m" (*alp)
 144 : /* no inputs */
 145 : "memory");
144} 146}
145 147
146#if defined(MULTIPROCESSOR) 148#if defined(MULTIPROCESSOR)
147/* 149/*
148 * On the Alpha, interprocessor interrupts come in at device priority 150 * On the Alpha, interprocessor interrupts come in at device priority
149 * level (ALPHA_PSL_IPL_CLOCK). This can cause some problems while 151 * level (ALPHA_PSL_IPL_CLOCK). This can cause some problems while
150 * waiting for spin locks from a high'ish priority level (like spin 152 * waiting for spin locks from a high'ish priority level (like spin
151 * mutexes used by the scheduler): IPIs that come in will not be 153 * mutexes used by the scheduler): IPIs that come in will not be
152 * processed. This can lead to deadlock. 154 * processed. This can lead to deadlock.
153 * 155 *
154 * This hook allows IPIs to be processed while spinning. Note we only 156 * This hook allows IPIs to be processed while spinning. Note we only
155 * do the special thing if IPIs are blocked (current IPL >= IPL_CLOCK). 157 * do the special thing if IPIs are blocked (current IPL >= IPL_CLOCK).
156 * IPIs will be processed in the normal fashion otherwise, and checking 158 * IPIs will be processed in the normal fashion otherwise, and checking
157 * this way ensures that preemption is disabled (i.e. curcpu() is stable). 159 * this way ensures that preemption is disabled (i.e. curcpu() is stable).
158 */ 160 */
159#define SPINLOCK_SPIN_HOOK \ 161#define SPINLOCK_SPIN_HOOK \
160do { \ 162do { \
161 unsigned long _ipl_ = alpha_pal_rdps() & ALPHA_PSL_IPL_MASK; \ 163 unsigned long _ipl_ = alpha_pal_rdps() & ALPHA_PSL_IPL_MASK; \
162 \ 164 \
163 if (_ipl_ >= ALPHA_PSL_IPL_CLOCK) { \ 165 if (_ipl_ >= ALPHA_PSL_IPL_CLOCK) { \
164 struct cpu_info *__ci = curcpu(); \ 166 struct cpu_info *__ci = curcpu(); \
165 if (atomic_load_relaxed(&__ci->ci_ipis) != 0) { \ 167 if (atomic_load_relaxed(&__ci->ci_ipis) != 0) { \
166 alpha_ipi_process(__ci, NULL); \ 168 alpha_ipi_process(__ci, NULL); \
167 } \ 169 } \
168 } \ 170 } \
169} while (/*CONSTCOND*/0) 171} while (/*CONSTCOND*/0)
170#define SPINLOCK_BACKOFF_HOOK (void)nullop((void *)0) 172#define SPINLOCK_BACKOFF_HOOK (void)nullop((void *)0)
171#endif /* MULTIPROCESSOR */ 173#endif /* MULTIPROCESSOR */
172 174
173#endif /* _ALPHA_LOCK_H_ */ 175#endif /* _ALPHA_LOCK_H_ */