Optimized fast-paths for mutex_spin_enter() and mutex_spin_exit().diff -r1.29 -r1.30 src/sys/arch/alpha/alpha/genassym.cf
(thorpej)
--- src/sys/arch/alpha/alpha/genassym.cf 2021/07/11 01:58:41 1.29
+++ src/sys/arch/alpha/alpha/genassym.cf 2021/07/12 15:21:51 1.30
@@ -1,14 +1,14 @@ | @@ -1,14 +1,14 @@ | |||
1 | # $NetBSD: genassym.cf,v 1.29 2021/07/11 01:58:41 thorpej Exp $ | 1 | # $NetBSD: genassym.cf,v 1.30 2021/07/12 15:21:51 thorpej Exp $ | |
2 | 2 | |||
3 | # | 3 | # | |
4 | # Copyright (c) 1982, 1990, 1993 | 4 | # Copyright (c) 1982, 1990, 1993 | |
5 | # The Regents of the University of California. All rights reserved. | 5 | # The Regents of the University of California. All rights reserved. | |
6 | # | 6 | # | |
7 | # Redistribution and use in source and binary forms, with or without | 7 | # Redistribution and use in source and binary forms, with or without | |
8 | # modification, are permitted provided that the following conditions | 8 | # modification, are permitted provided that the following conditions | |
9 | # are met: | 9 | # are met: | |
10 | # 1. Redistributions of source code must retain the above copyright | 10 | # 1. Redistributions of source code must retain the above copyright | |
11 | # notice, this list of conditions and the following disclaimer. | 11 | # notice, this list of conditions and the following disclaimer. | |
12 | # 2. Redistributions in binary form must reproduce the above copyright | 12 | # 2. Redistributions in binary form must reproduce the above copyright | |
13 | # notice, this list of conditions and the following disclaimer in the | 13 | # notice, this list of conditions and the following disclaimer in the | |
14 | # documentation and/or other materials provided with the distribution. | 14 | # documentation and/or other materials provided with the distribution. | |
@@ -57,34 +57,36 @@ | @@ -57,34 +57,36 @@ | |||
57 | # ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | 57 | # ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
58 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | 58 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
59 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | 59 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
60 | # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | 60 | # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
61 | # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | 61 | # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
62 | # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | 62 | # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
63 | # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | 63 | # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
64 | # SUCH DAMAGE. | 64 | # SUCH DAMAGE. | |
65 | # | 65 | # | |
66 | # from: @(#)genassym.c 8.3 (Berkeley) 1/4/94 | 66 | # from: @(#)genassym.c 8.3 (Berkeley) 1/4/94 | |
67 | # | 67 | # | |
68 | 68 | |||
69 | quote #define __RWLOCK_PRIVATE | 69 | quote #define __RWLOCK_PRIVATE | |
70 | quote #define __MUTEX_PRIVATE | |||
70 | 71 | |||
71 | include <sys/param.h> | 72 | include <sys/param.h> | |
72 | include <sys/buf.h> | 73 | include <sys/buf.h> | |
73 | include <sys/proc.h> | 74 | include <sys/proc.h> | |
74 | include <sys/sched.h> | 75 | include <sys/sched.h> | |
75 | include <sys/mbuf.h> | 76 | include <sys/mbuf.h> | |
76 | include <sys/msgbuf.h> | 77 | include <sys/msgbuf.h> | |
77 | include <sys/rwlock.h> | 78 | include <sys/rwlock.h> | |
79 | include <sys/mutex.h> | |||
78 | include <sys/syscall.h> | 80 | include <sys/syscall.h> | |
79 | 81 | |||
80 | include <machine/cpu.h> | 82 | include <machine/cpu.h> | |
81 | include <machine/reg.h> | 83 | include <machine/reg.h> | |
82 | include <machine/frame.h> | 84 | include <machine/frame.h> | |
83 | include <machine/rpb.h> | 85 | include <machine/rpb.h> | |
84 | include <machine/vmparam.h> | 86 | include <machine/vmparam.h> | |
85 | 87 | |||
86 | include <uvm/uvm_extern.h> | 88 | include <uvm/uvm_extern.h> | |
87 | 89 | |||
88 | # general constants | 90 | # general constants | |
89 | define VM_MAX_ADDRESS VM_MAX_ADDRESS | 91 | define VM_MAX_ADDRESS VM_MAX_ADDRESS | |
90 | define ALPHA_PGBYTES ALPHA_PGBYTES | 92 | define ALPHA_PGBYTES ALPHA_PGBYTES | |
@@ -188,20 +190,24 @@ define ALPHA_KENTRY_UNA ALPHA_KENTRY_UNA | @@ -188,20 +190,24 @@ define ALPHA_KENTRY_UNA ALPHA_KENTRY_UNA | |||
188 | # errno values | 190 | # errno values | |
189 | define ENAMETOOLONG ENAMETOOLONG | 191 | define ENAMETOOLONG ENAMETOOLONG | |
190 | define EFAULT EFAULT | 192 | define EFAULT EFAULT | |
191 | 193 | |||
192 | # Syscalls called from sigreturn. | 194 | # Syscalls called from sigreturn. | |
193 | define SYS_compat_16___sigreturn14 SYS_compat_16___sigreturn14 | 195 | define SYS_compat_16___sigreturn14 SYS_compat_16___sigreturn14 | |
194 | define SYS_exit SYS_exit | 196 | define SYS_exit SYS_exit | |
195 | 197 | |||
196 | # CPU info | 198 | # CPU info | |
197 | define CPU_INFO_CURLWP offsetof(struct cpu_info, ci_curlwp) | 199 | define CPU_INFO_CURLWP offsetof(struct cpu_info, ci_curlwp) | |
198 | define CPU_INFO_IDLE_LWP offsetof(struct cpu_info, ci_data.cpu_idlelwp) | 200 | define CPU_INFO_IDLE_LWP offsetof(struct cpu_info, ci_data.cpu_idlelwp) | |
199 | define CPU_INFO_SSIR offsetof(struct cpu_info, ci_ssir) | 201 | define CPU_INFO_SSIR offsetof(struct cpu_info, ci_ssir) | |
200 | define CPU_INFO_MTX_COUNT offsetof(struct cpu_info, ci_mtx_count) | 202 | define CPU_INFO_MTX_COUNT offsetof(struct cpu_info, ci_mtx_count) | |
203 | define CPU_INFO_MTX_OLDSPL offsetof(struct cpu_info, ci_mtx_oldspl) | |||
201 | define CPU_INFO_SIZEOF sizeof(struct cpu_info) | 204 | define CPU_INFO_SIZEOF sizeof(struct cpu_info) | |
202 | 205 | |||
203 | # Bits in lock fields | 206 | # Bits in lock fields | |
204 | define RW_WRITE_WANTED RW_WRITE_WANTED | 207 | define RW_WRITE_WANTED RW_WRITE_WANTED | |
205 | define RW_WRITE_LOCKED RW_WRITE_LOCKED | 208 | define RW_WRITE_LOCKED RW_WRITE_LOCKED | |
206 | define RW_READ_INCR RW_READ_INCR | 209 | define RW_READ_INCR RW_READ_INCR | |
207 | define RW_READ_COUNT_SHIFT RW_READ_COUNT_SHIFT | 210 | define RW_READ_COUNT_SHIFT RW_READ_COUNT_SHIFT | |
211 | define MUTEX_IPL offsetof(struct kmutex, mtx_ipl) | |||
212 | define MUTEX_SIMPLELOCK offsetof(struct kmutex, mtx_lock) | |||
213 | define __SIMPLELOCK_LOCKED __SIMPLELOCK_LOCKED |
--- src/sys/arch/alpha/alpha/lock_stubs.s 2021/07/11 01:58:41 1.5
+++ src/sys/arch/alpha/alpha/lock_stubs.s 2021/07/12 15:21:51 1.6
@@ -1,14 +1,14 @@ | @@ -1,14 +1,14 @@ | |||
1 | /* $NetBSD: lock_stubs.s,v 1.5 2021/07/11 01:58:41 thorpej Exp $ */ | 1 | /* $NetBSD: lock_stubs.s,v 1.6 2021/07/12 15:21:51 thorpej Exp $ */ | |
2 | 2 | |||
3 | /*- | 3 | /*- | |
4 | * Copyright (c) 2007, 2021 The NetBSD Foundation, Inc. | 4 | * Copyright (c) 2007, 2021 The NetBSD Foundation, Inc. | |
5 | * All rights reserved. | 5 | * All rights reserved. | |
6 | * | 6 | * | |
7 | * This code is derived from software contributed to The NetBSD Foundation | 7 | * This code is derived from software contributed to The NetBSD Foundation | |
8 | * by Andrew Doran, and by Jason R. Thorpe. | 8 | * by Andrew Doran, and by Jason R. Thorpe. | |
9 | * | 9 | * | |
10 | * Redistribution and use in source and binary forms, with or without | 10 | * Redistribution and use in source and binary forms, with or without | |
11 | * modification, are permitted provided that the following conditions | 11 | * modification, are permitted provided that the following conditions | |
12 | * are met: | 12 | * are met: | |
13 | * 1. Redistributions of source code must retain the above copyright | 13 | * 1. Redistributions of source code must retain the above copyright | |
14 | * notice, this list of conditions and the following disclaimer. | 14 | * notice, this list of conditions and the following disclaimer. | |
@@ -24,27 +24,27 @@ | @@ -24,27 +24,27 @@ | |||
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
29 | * POSSIBILITY OF SUCH DAMAGE. | 29 | * POSSIBILITY OF SUCH DAMAGE. | |
30 | */ | 30 | */ | |
31 | 31 | |||
32 | #include "opt_lockdebug.h" | 32 | #include "opt_lockdebug.h" | |
33 | #include "opt_multiprocessor.h" | 33 | #include "opt_multiprocessor.h" | |
34 | 34 | |||
35 | #include <machine/asm.h> | 35 | #include <machine/asm.h> | |
36 | 36 | |||
37 | __KERNEL_RCSID(0, "$NetBSD: lock_stubs.s,v 1.5 2021/07/11 01:58:41 thorpej Exp $"); | 37 | __KERNEL_RCSID(0, "$NetBSD: lock_stubs.s,v 1.6 2021/07/12 15:21:51 thorpej Exp $"); | |
38 | 38 | |||
39 | #include "assym.h" | 39 | #include "assym.h" | |
40 | 40 | |||
41 | #if defined(MULTIPROCESSOR) | 41 | #if defined(MULTIPROCESSOR) | |
42 | #define MB mb | 42 | #define MB mb | |
43 | #else | 43 | #else | |
44 | #define MB /* nothing */ | 44 | #define MB /* nothing */ | |
45 | #endif | 45 | #endif | |
46 | 46 | |||
47 | /* | 47 | /* | |
48 | * int _lock_cas(uintptr_t *ptr, uintptr_t old, uintptr_t new) | 48 | * int _lock_cas(uintptr_t *ptr, uintptr_t old, uintptr_t new) | |
49 | */ | 49 | */ | |
50 | LEAF(_lock_cas, 3) | 50 | LEAF(_lock_cas, 3) | |
@@ -101,26 +101,140 @@ LEAF(mutex_exit, 1) | @@ -101,26 +101,140 @@ LEAF(mutex_exit, 1) | |||
101 | cmpeq v0, t2, t2 | 101 | cmpeq v0, t2, t2 | |
102 | beq t2, 2f | 102 | beq t2, 2f | |
103 | stq_c t3, 0(a0) | 103 | stq_c t3, 0(a0) | |
104 | beq t3, 3f | 104 | beq t3, 3f | |
105 | RET | 105 | RET | |
106 | 2: | 106 | 2: | |
107 | lda t12, mutex_vector_exit | 107 | lda t12, mutex_vector_exit | |
108 | jmp (t12) | 108 | jmp (t12) | |
109 | 3: | 109 | 3: | |
110 | br 1b | 110 | br 1b | |
111 | END(mutex_exit) | 111 | END(mutex_exit) | |
112 | 112 | |||
113 | /* | 113 | /* | |
114 | * void mutex_spin_enter(kmutex_t *mtx); | |||
115 | */ | |||
116 | LEAF(mutex_spin_enter, 1); | |||
117 | LDGP(pv) | |||
118 | ||||
119 | /* | |||
120 | * STEP 1: Perform the MUTEX_SPIN_SPLRAISE() function. | |||
121 | * (see sys/kern/kern_mutex.c) | |||
122 | * | |||
123 | * s = splraise(mtx->mtx_ipl); | |||
124 | * if (curcpu->ci_mtx_count-- == 0) | |||
125 | * curcpu->ci_mtx_oldspl = s; | |||
126 | */ | |||
127 | ||||
128 | call_pal PAL_OSF1_rdps /* clobbers v0, t0, t8..t11 */ | |||
129 | /* v0 = cur_ipl */ | |||
130 | #ifdef __BWX__ | |||
131 | mov a0, a1 /* a1 = mtx */ | |||
132 | ldbu a0, MUTEX_IPL(a0) /* a0 = new_ipl */ | |||
133 | mov v0, a4 /* save cur_ipl in a4 */ | |||
134 | #else | |||
135 | mov a0, a1 /* a1 = mtx */ | |||
136 | ldq_u a2, MUTEX_IPL(a0) | |||
137 | mov v0, a4 /* save cur_ipl in a4 */ | |||
138 | extbl a2, MUTEX_IPL, a0 /* a0 = new_ipl */ | |||
139 | #endif /* __BWX__ */ | |||
140 | cmplt v0, a0, a3 /* a3 = (cur_ipl < new_ipl) */ | |||
141 | GET_CURLWP /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ | |||
142 | mov v0, a5 /* save curlwp in a5 */ | |||
143 | /* | |||
144 | * The forward-branch over the SWPIPL call is correctly predicted | |||
145 | * not-taken by the CPU because it's rare for a code path to acquire | |||
146 | * 2 spin mutexes. | |||
147 | */ | |||
148 | beq a3, 1f /* no? -> skip... */ | |||
149 | call_pal PAL_OSF1_swpipl /* clobbers v0, t0, t8..t11 */ | |||
150 | /* | |||
151 | * v0 returns the old_ipl, which will be the same as the | |||
152 | * cur_ipl we squirreled away in a4 earlier. | |||
153 | */ | |||
154 | 1: | |||
155 | /* | |||
156 | * curlwp->l_cpu is now stable. Update the counter and | |||
157 | * stash the old_ipl. Just in case it's not clear what's | |||
158 | * going on, we: | |||
159 | * | |||
160 | * - Load previous value of mtx_oldspl into t1. | |||
161 | * - Conditionally move old_ipl into t1 if mtx_count == 0. | |||
162 | * - Store t1 back to mtx_oldspl; if mtx_count != 0, | |||
163 | * the store is redundant, but it's faster than a forward | |||
164 | * branch. | |||
165 | */ | |||
166 | ldq a3, L_CPU(a5) /* a3 = curlwp->l_cpu (curcpu) */ | |||
167 | ldl t0, CPU_INFO_MTX_COUNT(a3) | |||
168 | ldl t1, CPU_INFO_MTX_OLDSPL(a3) | |||
169 | cmoveq t0, a4, t1 /* mtx_count == 0? -> t1 = old_ipl */ | |||
170 | subl t0, 1, t2 /* mtx_count-- */ | |||
171 | stl t1, CPU_INFO_MTX_OLDSPL(a3) | |||
172 | stl t2, CPU_INFO_MTX_COUNT(a3) | |||
173 | ||||
174 | /* | |||
175 | * STEP 2: __cpu_simple_lock_try(&mtx->mtx_lock) | |||
176 | */ | |||
177 | ldl_l t0, MUTEX_SIMPLELOCK(a1) | |||
178 | ldiq t1, __SIMPLELOCK_LOCKED | |||
179 | bne t0, 2f /* contended */ | |||
180 | stl_c t1, MUTEX_SIMPLELOCK(a1) | |||
181 | beq t1, 2f /* STL_C failed; consider contended */ | |||
182 | MB | |||
183 | RET | |||
184 | 2: | |||
185 | mov a1, a0 /* restore first argument */ | |||
186 | lda pv, mutex_spin_retry | |||
187 | jmp (pv) | |||
188 | END(mutex_spin_enter) | |||
189 | ||||
190 | /* | |||
191 | * void mutex_spin_exit(kmutex_t *mtx); | |||
192 | */ | |||
193 | LEAF(mutex_spin_exit, 1) | |||
194 | LDGP(pv); | |||
195 | MB | |||
196 | ||||
197 | /* | |||
198 | * STEP 1: __cpu_simple_unlock(&mtx->mtx_lock) | |||
199 | */ | |||
200 | stl zero, MUTEX_SIMPLELOCK(a0) | |||
201 | ||||
202 | /* | |||
203 | * STEP 2: Perform the MUTEX_SPIN_SPLRESTORE() function. | |||
204 | * (see sys/kern/kern_mutex.c) | |||
205 | * | |||
206 | * s = curcpu->ci_mtx_oldspl; | |||
207 | * if (++curcpu->ci_mtx_count == 0) | |||
208 | * splx(s); | |||
209 | */ | |||
210 | GET_CURLWP /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ | |||
211 | ldq a3, L_CPU(v0) /* a3 = curlwp->l_cpu (curcpu) */ | |||
212 | ldl t0, CPU_INFO_MTX_COUNT(a3) | |||
213 | ldl a0, CPU_INFO_MTX_OLDSPL(a3) | |||
214 | addl t0, 1, t2 /* mtx_count++ */ | |||
215 | stl t2, CPU_INFO_MTX_COUNT(a3) | |||
216 | /* | |||
217 | * The forward-branch over the SWPIPL call is correctly predicted | |||
218 | * not-taken by the CPU because it's rare for a code path to acquire | |||
219 | * 2 spin mutexes. | |||
220 | */ | |||
221 | bne t2, 1f /* t2 != 0? Skip... */ | |||
222 | call_pal PAL_OSF1_swpipl /* clobbers v0, t0, t8..t11 */ | |||
223 | 1: | |||
224 | RET | |||
225 | END(mutex_spin_exit) | |||
226 | ||||
227 | /* | |||
114 | * void rw_enter(krwlock_t *rwl, krw_t op); | 228 | * void rw_enter(krwlock_t *rwl, krw_t op); | |
115 | * | 229 | * | |
116 | * Acquire one hold on a RW lock. | 230 | * Acquire one hold on a RW lock. | |
117 | */ | 231 | */ | |
118 | LEAF(rw_enter, 2) | 232 | LEAF(rw_enter, 2) | |
119 | LDGP(pv) | 233 | LDGP(pv) | |
120 | 234 | |||
121 | /* | 235 | /* | |
122 | * RW_READER == 0 (we have a compile-time assert in machdep.c | 236 | * RW_READER == 0 (we have a compile-time assert in machdep.c | |
123 | * to ensure this). | 237 | * to ensure this). | |
124 | * | 238 | * | |
125 | * Acquire for read is the most common case. | 239 | * Acquire for read is the most common case. | |
126 | */ | 240 | */ |
--- src/sys/arch/alpha/include/mutex.h 2020/09/23 00:52:49 1.8
+++ src/sys/arch/alpha/include/mutex.h 2021/07/12 15:21:51 1.9
@@ -1,14 +1,14 @@ | @@ -1,14 +1,14 @@ | |||
1 | /* $NetBSD: mutex.h,v 1.8 2020/09/23 00:52:49 thorpej Exp $ */ | 1 | /* $NetBSD: mutex.h,v 1.9 2021/07/12 15:21:51 thorpej Exp $ */ | |
2 | 2 | |||
3 | /*- | 3 | /*- | |
4 | * Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc. | 4 | * Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc. | |
5 | * All rights reserved. | 5 | * All rights reserved. | |
6 | * | 6 | * | |
7 | * This code is derived from software contributed to The NetBSD Foundation | 7 | * This code is derived from software contributed to The NetBSD Foundation | |
8 | * by Jason R. Thorpe and Andrew Doran. | 8 | * by Jason R. Thorpe and Andrew Doran. | |
9 | * | 9 | * | |
10 | * Redistribution and use in source and binary forms, with or without | 10 | * Redistribution and use in source and binary forms, with or without | |
11 | * modification, are permitted provided that the following conditions | 11 | * modification, are permitted provided that the following conditions | |
12 | * are met: | 12 | * are met: | |
13 | * 1. Redistributions of source code must retain the above copyright | 13 | * 1. Redistributions of source code must retain the above copyright | |
14 | * notice, this list of conditions and the following disclaimer. | 14 | * notice, this list of conditions and the following disclaimer. | |
@@ -49,23 +49,24 @@ struct kmutex { | @@ -49,23 +49,24 @@ struct kmutex { | |||
49 | volatile uint16_t mtxs_unused; | 49 | volatile uint16_t mtxs_unused; | |
50 | __cpu_simple_lock_t mtxs_lock; | 50 | __cpu_simple_lock_t mtxs_lock; | |
51 | } s; | 51 | } s; | |
52 | } u; | 52 | } u; | |
53 | }; | 53 | }; | |
54 | 54 | |||
55 | #define mtx_owner u.mtxa_owner | 55 | #define mtx_owner u.mtxa_owner | |
56 | #define mtx_flags u.s.mtxs_flags | 56 | #define mtx_flags u.s.mtxs_flags | |
57 | #define mtx_ipl u.s.mtxs_ipl | 57 | #define mtx_ipl u.s.mtxs_ipl | |
58 | #define mtx_lock u.s.mtxs_lock | 58 | #define mtx_lock u.s.mtxs_lock | |
59 | 59 | |||
60 | #define __HAVE_SIMPLE_MUTEXES 1 | 60 | #define __HAVE_SIMPLE_MUTEXES 1 | |
61 | #define __HAVE_MUTEX_STUBS 1 | 61 | #define __HAVE_MUTEX_STUBS 1 | |
62 | #define __HAVE_SPIN_MUTEX_STUBS 1 | |||
62 | 63 | |||
63 | #define MUTEX_CAS(p, o, n) _lock_cas((p), (o), (n)) | 64 | #define MUTEX_CAS(p, o, n) _lock_cas((p), (o), (n)) | |
64 | 65 | |||
65 | int _lock_cas(volatile uintptr_t *, uintptr_t, uintptr_t); | 66 | int _lock_cas(volatile uintptr_t *, uintptr_t, uintptr_t); | |
66 | 67 | |||
67 | #endif /* __MUTEX_PRIVATE */ | 68 | #endif /* __MUTEX_PRIVATE */ | |
68 | 69 | |||
69 | __CTASSERT(sizeof(struct kmutex) == sizeof(uintptr_t)); | 70 | __CTASSERT(sizeof(struct kmutex) == sizeof(uintptr_t)); | |
70 | 71 | |||
71 | #endif /* _ALPHA_MUTEX_H_ */ | 72 | #endif /* _ALPHA_MUTEX_H_ */ |