Mon Jul 12 15:21:51 2021 UTC ()
Optimized fast-paths for mutex_spin_enter() and mutex_spin_exit().


(thorpej)
diff -r1.29 -r1.30 src/sys/arch/alpha/alpha/genassym.cf
diff -r1.5 -r1.6 src/sys/arch/alpha/alpha/lock_stubs.s
diff -r1.8 -r1.9 src/sys/arch/alpha/include/mutex.h

cvs diff -r1.29 -r1.30 src/sys/arch/alpha/alpha/genassym.cf (switch to unified diff)

--- src/sys/arch/alpha/alpha/genassym.cf 2021/07/11 01:58:41 1.29
+++ src/sys/arch/alpha/alpha/genassym.cf 2021/07/12 15:21:51 1.30
@@ -1,207 +1,213 @@ @@ -1,207 +1,213 @@
1# $NetBSD: genassym.cf,v 1.29 2021/07/11 01:58:41 thorpej Exp $ 1# $NetBSD: genassym.cf,v 1.30 2021/07/12 15:21:51 thorpej Exp $
2 2
3# 3#
4# Copyright (c) 1982, 1990, 1993 4# Copyright (c) 1982, 1990, 1993
5# The Regents of the University of California. All rights reserved. 5# The Regents of the University of California. All rights reserved.
6# 6#
7# Redistribution and use in source and binary forms, with or without 7# Redistribution and use in source and binary forms, with or without
8# modification, are permitted provided that the following conditions 8# modification, are permitted provided that the following conditions
9# are met: 9# are met:
10# 1. Redistributions of source code must retain the above copyright 10# 1. Redistributions of source code must retain the above copyright
11# notice, this list of conditions and the following disclaimer. 11# notice, this list of conditions and the following disclaimer.
12# 2. Redistributions in binary form must reproduce the above copyright 12# 2. Redistributions in binary form must reproduce the above copyright
13# notice, this list of conditions and the following disclaimer in the 13# notice, this list of conditions and the following disclaimer in the
14# documentation and/or other materials provided with the distribution. 14# documentation and/or other materials provided with the distribution.
15# 3. Neither the name of the University nor the names of its contributors 15# 3. Neither the name of the University nor the names of its contributors
16# may be used to endorse or promote products derived from this software 16# may be used to endorse or promote products derived from this software
17# without specific prior written permission. 17# without specific prior written permission.
18# 18#
19# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 19# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 22# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29# SUCH DAMAGE. 29# SUCH DAMAGE.
30# 30#
31# from: @(#)genassym.c 8.3 (Berkeley) 1/4/94 31# from: @(#)genassym.c 8.3 (Berkeley) 1/4/94
32# 32#
33 33
34# 34#
35# Copyright (c) 1994, 1995 Gordon W. Ross 35# Copyright (c) 1994, 1995 Gordon W. Ross
36# Copyright (c) 1993 Adam Glass 36# Copyright (c) 1993 Adam Glass
37# 37#
38# Redistribution and use in source and binary forms, with or without 38# Redistribution and use in source and binary forms, with or without
39# modification, are permitted provided that the following conditions 39# modification, are permitted provided that the following conditions
40# are met: 40# are met:
41# 1. Redistributions of source code must retain the above copyright 41# 1. Redistributions of source code must retain the above copyright
42# notice, this list of conditions and the following disclaimer. 42# notice, this list of conditions and the following disclaimer.
43# 2. Redistributions in binary form must reproduce the above copyright 43# 2. Redistributions in binary form must reproduce the above copyright
44# notice, this list of conditions and the following disclaimer in the 44# notice, this list of conditions and the following disclaimer in the
45# documentation and/or other materials provided with the distribution. 45# documentation and/or other materials provided with the distribution.
46# 3. All advertising materials mentioning features or use of this software 46# 3. All advertising materials mentioning features or use of this software
47# must display the following acknowledgement: 47# must display the following acknowledgement:
48# This product includes software developed by the University of 48# This product includes software developed by the University of
49# California, Berkeley and its contributors. 49# California, Berkeley and its contributors.
50# 4. Neither the name of the University nor the names of its contributors 50# 4. Neither the name of the University nor the names of its contributors
51# may be used to endorse or promote products derived from this software 51# may be used to endorse or promote products derived from this software
52# without specific prior written permission. 52# without specific prior written permission.
53# 53#
54# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 54# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 55# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 56# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 57# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64# SUCH DAMAGE. 64# SUCH DAMAGE.
65# 65#
66# from: @(#)genassym.c 8.3 (Berkeley) 1/4/94 66# from: @(#)genassym.c 8.3 (Berkeley) 1/4/94
67# 67#
68 68
69quote #define __RWLOCK_PRIVATE 69quote #define __RWLOCK_PRIVATE
 70quote #define __MUTEX_PRIVATE
70 71
71include <sys/param.h> 72include <sys/param.h>
72include <sys/buf.h> 73include <sys/buf.h>
73include <sys/proc.h> 74include <sys/proc.h>
74include <sys/sched.h> 75include <sys/sched.h>
75include <sys/mbuf.h> 76include <sys/mbuf.h>
76include <sys/msgbuf.h> 77include <sys/msgbuf.h>
77include <sys/rwlock.h> 78include <sys/rwlock.h>
 79include <sys/mutex.h>
78include <sys/syscall.h> 80include <sys/syscall.h>
79 81
80include <machine/cpu.h> 82include <machine/cpu.h>
81include <machine/reg.h> 83include <machine/reg.h>
82include <machine/frame.h> 84include <machine/frame.h>
83include <machine/rpb.h> 85include <machine/rpb.h>
84include <machine/vmparam.h> 86include <machine/vmparam.h>
85 87
86include <uvm/uvm_extern.h> 88include <uvm/uvm_extern.h>
87 89
88# general constants  90# general constants
89define VM_MAX_ADDRESS VM_MAX_ADDRESS 91define VM_MAX_ADDRESS VM_MAX_ADDRESS
90define ALPHA_PGBYTES ALPHA_PGBYTES 92define ALPHA_PGBYTES ALPHA_PGBYTES
91 93
92# Register offsets, for stack frames. 94# Register offsets, for stack frames.
93define FRAME_V0 FRAME_V0 95define FRAME_V0 FRAME_V0
94define FRAME_T0 FRAME_T0 96define FRAME_T0 FRAME_T0
95define FRAME_T1 FRAME_T1 97define FRAME_T1 FRAME_T1
96define FRAME_T2 FRAME_T2 98define FRAME_T2 FRAME_T2
97define FRAME_T3 FRAME_T3 99define FRAME_T3 FRAME_T3
98define FRAME_T4 FRAME_T4 100define FRAME_T4 FRAME_T4
99define FRAME_T5 FRAME_T5 101define FRAME_T5 FRAME_T5
100define FRAME_T6 FRAME_T6 102define FRAME_T6 FRAME_T6
101define FRAME_T7 FRAME_T7 103define FRAME_T7 FRAME_T7
102define FRAME_S0 FRAME_S0 104define FRAME_S0 FRAME_S0
103define FRAME_S1 FRAME_S1 105define FRAME_S1 FRAME_S1
104define FRAME_S2 FRAME_S2 106define FRAME_S2 FRAME_S2
105define FRAME_S3 FRAME_S3 107define FRAME_S3 FRAME_S3
106define FRAME_S4 FRAME_S4 108define FRAME_S4 FRAME_S4
107define FRAME_S5 FRAME_S5 109define FRAME_S5 FRAME_S5
108define FRAME_S6 FRAME_S6 110define FRAME_S6 FRAME_S6
109define FRAME_A3 FRAME_A3 111define FRAME_A3 FRAME_A3
110define FRAME_A4 FRAME_A4 112define FRAME_A4 FRAME_A4
111define FRAME_A5 FRAME_A5 113define FRAME_A5 FRAME_A5
112define FRAME_T8 FRAME_T8 114define FRAME_T8 FRAME_T8
113define FRAME_T9 FRAME_T9 115define FRAME_T9 FRAME_T9
114define FRAME_T10 FRAME_T10 116define FRAME_T10 FRAME_T10
115define FRAME_T11 FRAME_T11 117define FRAME_T11 FRAME_T11
116define FRAME_RA FRAME_RA 118define FRAME_RA FRAME_RA
117define FRAME_T12 FRAME_T12 119define FRAME_T12 FRAME_T12
118define FRAME_AT FRAME_AT 120define FRAME_AT FRAME_AT
119define FRAME_SP FRAME_SP 121define FRAME_SP FRAME_SP
120 122
121define FRAME_SW_SIZE FRAME_SW_SIZE 123define FRAME_SW_SIZE FRAME_SW_SIZE
122 124
123define FRAME_PS FRAME_PS 125define FRAME_PS FRAME_PS
124define FRAME_PC FRAME_PC 126define FRAME_PC FRAME_PC
125define FRAME_GP FRAME_GP 127define FRAME_GP FRAME_GP
126define FRAME_A0 FRAME_A0 128define FRAME_A0 FRAME_A0
127define FRAME_A1 FRAME_A1 129define FRAME_A1 FRAME_A1
128define FRAME_A2 FRAME_A2 130define FRAME_A2 FRAME_A2
129 131
130define FRAME_SIZE FRAME_SIZE 132define FRAME_SIZE FRAME_SIZE
131 133
132# bits of the PS register 134# bits of the PS register
133define ALPHA_PSL_USERMODE ALPHA_PSL_USERMODE 135define ALPHA_PSL_USERMODE ALPHA_PSL_USERMODE
134define ALPHA_PSL_IPL_MASK ALPHA_PSL_IPL_MASK 136define ALPHA_PSL_IPL_MASK ALPHA_PSL_IPL_MASK
135define ALPHA_PSL_IPL_0 ALPHA_PSL_IPL_0 137define ALPHA_PSL_IPL_0 ALPHA_PSL_IPL_0
136define ALPHA_PSL_IPL_SOFT_LO ALPHA_PSL_IPL_SOFT_LO 138define ALPHA_PSL_IPL_SOFT_LO ALPHA_PSL_IPL_SOFT_LO
137define ALPHA_PSL_IPL_SOFT_HI ALPHA_PSL_IPL_SOFT_HI 139define ALPHA_PSL_IPL_SOFT_HI ALPHA_PSL_IPL_SOFT_HI
138define ALPHA_PSL_IPL_HIGH ALPHA_PSL_IPL_HIGH 140define ALPHA_PSL_IPL_HIGH ALPHA_PSL_IPL_HIGH
139 141
140# soft interrrupt definitions 142# soft interrrupt definitions
141define ALPHA_ALL_SOFTINTS ALPHA_ALL_SOFTINTS 143define ALPHA_ALL_SOFTINTS ALPHA_ALL_SOFTINTS
142 144
143# pte bits 145# pte bits
144define ALPHA_PTE_VALID ALPHA_PTE_VALID 146define ALPHA_PTE_VALID ALPHA_PTE_VALID
145define ALPHA_PTE_ASM ALPHA_PTE_ASM 147define ALPHA_PTE_ASM ALPHA_PTE_ASM
146define ALPHA_PTE_KR ALPHA_PTE_KR 148define ALPHA_PTE_KR ALPHA_PTE_KR
147define ALPHA_PTE_KW ALPHA_PTE_KW 149define ALPHA_PTE_KW ALPHA_PTE_KW
148 150
149# Important offsets into the lwp and proc structs & associated constants 151# Important offsets into the lwp and proc structs & associated constants
150define L_PCB offsetof(struct lwp, l_addr) 152define L_PCB offsetof(struct lwp, l_addr)
151define L_CPU offsetof(struct lwp, l_cpu) 153define L_CPU offsetof(struct lwp, l_cpu)
152define L_PROC offsetof(struct lwp, l_proc) 154define L_PROC offsetof(struct lwp, l_proc)
153define L_MD_FLAGS offsetof(struct lwp, l_md.md_flags) 155define L_MD_FLAGS offsetof(struct lwp, l_md.md_flags)
154define L_MD_PCBPADDR offsetof(struct lwp, l_md.md_pcbpaddr) 156define L_MD_PCBPADDR offsetof(struct lwp, l_md.md_pcbpaddr)
155define L_MD_TF offsetof(struct lwp, l_md.md_tf) 157define L_MD_TF offsetof(struct lwp, l_md.md_tf)
156define L_MD_ASTPENDING offsetof(struct lwp, l_md.md_astpending) 158define L_MD_ASTPENDING offsetof(struct lwp, l_md.md_astpending)
157 159
158define P_VMSPACE offsetof(struct proc, p_vmspace) 160define P_VMSPACE offsetof(struct proc, p_vmspace)
159define P_RASLIST offsetof(struct proc, p_raslist) 161define P_RASLIST offsetof(struct proc, p_raslist)
160define P_MD_SYSCALL offsetof(struct proc, p_md.md_syscall) 162define P_MD_SYSCALL offsetof(struct proc, p_md.md_syscall)
161 163
162# offsets needed by cpu_switch() to switch mappings. 164# offsets needed by cpu_switch() to switch mappings.
163define VM_MAP_PMAP offsetof(struct vmspace, vm_map.pmap) 165define VM_MAP_PMAP offsetof(struct vmspace, vm_map.pmap)
164 166
165# Important offsets into the user struct & associated constants 167# Important offsets into the user struct & associated constants
166define UPAGES UPAGES 168define UPAGES UPAGES
167define PCB_HWPCB_KSP offsetof(struct pcb, pcb_hw.apcb_ksp) 169define PCB_HWPCB_KSP offsetof(struct pcb, pcb_hw.apcb_ksp)
168define PCB_CONTEXT offsetof(struct pcb, pcb_context[0]) 170define PCB_CONTEXT offsetof(struct pcb, pcb_context[0])
169define PCB_ONFAULT offsetof(struct pcb, pcb_onfault) 171define PCB_ONFAULT offsetof(struct pcb, pcb_onfault)
170 172
171# Offsets into struct fpstate, for save, restore 173# Offsets into struct fpstate, for save, restore
172define FPREG_FPR_REGS offsetof(struct fpreg, fpr_regs[0]) 174define FPREG_FPR_REGS offsetof(struct fpreg, fpr_regs[0])
173define FPREG_FPR_CR offsetof(struct fpreg, fpr_cr) 175define FPREG_FPR_CR offsetof(struct fpreg, fpr_cr)
174 176
175# Important other addresses 177# Important other addresses
176define HWRPB_ADDR HWRPB_ADDR /* Restart parameter block */ 178define HWRPB_ADDR HWRPB_ADDR /* Restart parameter block */
177define VPTBASE VPTBASE /* Virtual Page Table base */ 179define VPTBASE VPTBASE /* Virtual Page Table base */
178 180
179# Offsets into the HWRPB. 181# Offsets into the HWRPB.
180define RPB_PRIMARY_CPU_ID offsetof(struct rpb, rpb_primary_cpu_id) 182define RPB_PRIMARY_CPU_ID offsetof(struct rpb, rpb_primary_cpu_id)
181 183
182# Kernel entries 184# Kernel entries
183define ALPHA_KENTRY_ARITH ALPHA_KENTRY_ARITH 185define ALPHA_KENTRY_ARITH ALPHA_KENTRY_ARITH
184define ALPHA_KENTRY_MM ALPHA_KENTRY_MM 186define ALPHA_KENTRY_MM ALPHA_KENTRY_MM
185define ALPHA_KENTRY_IF ALPHA_KENTRY_IF 187define ALPHA_KENTRY_IF ALPHA_KENTRY_IF
186define ALPHA_KENTRY_UNA ALPHA_KENTRY_UNA 188define ALPHA_KENTRY_UNA ALPHA_KENTRY_UNA
187 189
188# errno values 190# errno values
189define ENAMETOOLONG ENAMETOOLONG 191define ENAMETOOLONG ENAMETOOLONG
190define EFAULT EFAULT 192define EFAULT EFAULT
191 193
192# Syscalls called from sigreturn. 194# Syscalls called from sigreturn.
193define SYS_compat_16___sigreturn14 SYS_compat_16___sigreturn14 195define SYS_compat_16___sigreturn14 SYS_compat_16___sigreturn14
194define SYS_exit SYS_exit 196define SYS_exit SYS_exit
195 197
196# CPU info 198# CPU info
197define CPU_INFO_CURLWP offsetof(struct cpu_info, ci_curlwp) 199define CPU_INFO_CURLWP offsetof(struct cpu_info, ci_curlwp)
198define CPU_INFO_IDLE_LWP offsetof(struct cpu_info, ci_data.cpu_idlelwp) 200define CPU_INFO_IDLE_LWP offsetof(struct cpu_info, ci_data.cpu_idlelwp)
199define CPU_INFO_SSIR offsetof(struct cpu_info, ci_ssir) 201define CPU_INFO_SSIR offsetof(struct cpu_info, ci_ssir)
200define CPU_INFO_MTX_COUNT offsetof(struct cpu_info, ci_mtx_count) 202define CPU_INFO_MTX_COUNT offsetof(struct cpu_info, ci_mtx_count)
 203define CPU_INFO_MTX_OLDSPL offsetof(struct cpu_info, ci_mtx_oldspl)
201define CPU_INFO_SIZEOF sizeof(struct cpu_info) 204define CPU_INFO_SIZEOF sizeof(struct cpu_info)
202 205
203# Bits in lock fields 206# Bits in lock fields
204define RW_WRITE_WANTED RW_WRITE_WANTED 207define RW_WRITE_WANTED RW_WRITE_WANTED
205define RW_WRITE_LOCKED RW_WRITE_LOCKED 208define RW_WRITE_LOCKED RW_WRITE_LOCKED
206define RW_READ_INCR RW_READ_INCR 209define RW_READ_INCR RW_READ_INCR
207define RW_READ_COUNT_SHIFT RW_READ_COUNT_SHIFT 210define RW_READ_COUNT_SHIFT RW_READ_COUNT_SHIFT
 211define MUTEX_IPL offsetof(struct kmutex, mtx_ipl)
 212define MUTEX_SIMPLELOCK offsetof(struct kmutex, mtx_lock)
 213define __SIMPLELOCK_LOCKED __SIMPLELOCK_LOCKED

cvs diff -r1.5 -r1.6 src/sys/arch/alpha/alpha/lock_stubs.s (switch to unified diff)

--- src/sys/arch/alpha/alpha/lock_stubs.s 2021/07/11 01:58:41 1.5
+++ src/sys/arch/alpha/alpha/lock_stubs.s 2021/07/12 15:21:51 1.6
@@ -1,269 +1,383 @@ @@ -1,269 +1,383 @@
1/* $NetBSD: lock_stubs.s,v 1.5 2021/07/11 01:58:41 thorpej Exp $ */ 1/* $NetBSD: lock_stubs.s,v 1.6 2021/07/12 15:21:51 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2007, 2021 The NetBSD Foundation, Inc. 4 * Copyright (c) 2007, 2021 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, and by Jason R. Thorpe. 8 * by Andrew Doran, and by Jason R. Thorpe.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include "opt_lockdebug.h" 32#include "opt_lockdebug.h"
33#include "opt_multiprocessor.h" 33#include "opt_multiprocessor.h"
34 34
35#include <machine/asm.h> 35#include <machine/asm.h>
36 36
37__KERNEL_RCSID(0, "$NetBSD: lock_stubs.s,v 1.5 2021/07/11 01:58:41 thorpej Exp $"); 37__KERNEL_RCSID(0, "$NetBSD: lock_stubs.s,v 1.6 2021/07/12 15:21:51 thorpej Exp $");
38 38
39#include "assym.h" 39#include "assym.h"
40 40
41#if defined(MULTIPROCESSOR) 41#if defined(MULTIPROCESSOR)
42#define MB mb 42#define MB mb
43#else 43#else
44#define MB /* nothing */ 44#define MB /* nothing */
45#endif 45#endif
46 46
47/* 47/*
48 * int _lock_cas(uintptr_t *ptr, uintptr_t old, uintptr_t new) 48 * int _lock_cas(uintptr_t *ptr, uintptr_t old, uintptr_t new)
49 */ 49 */
50LEAF(_lock_cas, 3) 50LEAF(_lock_cas, 3)
511: 511:
52 mov a2, v0 52 mov a2, v0
53 ldq_l t1, 0(a0) 53 ldq_l t1, 0(a0)
54 cmpeq t1, a1, t1 54 cmpeq t1, a1, t1
55 beq t1, 2f 55 beq t1, 2f
56 stq_c v0, 0(a0) 56 stq_c v0, 0(a0)
57 beq v0, 3f 57 beq v0, 3f
58 MB  58 MB
59 RET 59 RET
602: 602:
61 mov zero, v0 61 mov zero, v0
62 MB 62 MB
63 RET 63 RET
643: 643:
65 br 1b 65 br 1b
66 END(_lock_cas) 66 END(_lock_cas)
67 67
68#if !defined(LOCKDEBUG) 68#if !defined(LOCKDEBUG)
69 69
70/* 70/*
71 * void mutex_enter(kmutex_t *mtx); 71 * void mutex_enter(kmutex_t *mtx);
72 */ 72 */
73LEAF(mutex_enter, 1) 73LEAF(mutex_enter, 1)
74 LDGP(pv) 74 LDGP(pv)
75 GET_CURLWP /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ 75 GET_CURLWP /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
761: 761:
77 mov v0, t1 77 mov v0, t1
78 ldq_l t2, 0(a0) 78 ldq_l t2, 0(a0)
79 bne t2, 2f 79 bne t2, 2f
80 stq_c t1, 0(a0) 80 stq_c t1, 0(a0)
81 beq t1, 3f 81 beq t1, 3f
82 MB 82 MB
83 RET 83 RET
842: 842:
85 lda t12, mutex_vector_enter 85 lda t12, mutex_vector_enter
86 jmp (t12) 86 jmp (t12)
873: 873:
88 br 1b 88 br 1b
89 END(mutex_enter) 89 END(mutex_enter)
90 90
91/* 91/*
92 * void mutex_exit(kmutex_t *mtx); 92 * void mutex_exit(kmutex_t *mtx);
93 */ 93 */
94LEAF(mutex_exit, 1) 94LEAF(mutex_exit, 1)
95 LDGP(pv) 95 LDGP(pv)
96 MB 96 MB
97 GET_CURLWP /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ 97 GET_CURLWP /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
98 mov zero, t3 98 mov zero, t3
991: 991:
100 ldq_l t2, 0(a0) 100 ldq_l t2, 0(a0)
101 cmpeq v0, t2, t2 101 cmpeq v0, t2, t2
102 beq t2, 2f 102 beq t2, 2f
103 stq_c t3, 0(a0) 103 stq_c t3, 0(a0)
104 beq t3, 3f 104 beq t3, 3f
105 RET 105 RET
1062: 1062:
107 lda t12, mutex_vector_exit 107 lda t12, mutex_vector_exit
108 jmp (t12) 108 jmp (t12)
1093: 1093:
110 br 1b 110 br 1b
111 END(mutex_exit) 111 END(mutex_exit)
112 112
113/* 113/*
 114 * void mutex_spin_enter(kmutex_t *mtx);
 115 */
 116LEAF(mutex_spin_enter, 1);
 117 LDGP(pv)
 118
 119 /*
 120 * STEP 1: Perform the MUTEX_SPIN_SPLRAISE() function.
 121 * (see sys/kern/kern_mutex.c)
 122 *
 123 * s = splraise(mtx->mtx_ipl);
 124 * if (curcpu->ci_mtx_count-- == 0)
 125 * curcpu->ci_mtx_oldspl = s;
 126 */
 127
 128 call_pal PAL_OSF1_rdps /* clobbers v0, t0, t8..t11 */
 129 /* v0 = cur_ipl */
 130#ifdef __BWX__
 131 mov a0, a1 /* a1 = mtx */
 132 ldbu a0, MUTEX_IPL(a0) /* a0 = new_ipl */
 133 mov v0, a4 /* save cur_ipl in a4 */
 134#else
 135 mov a0, a1 /* a1 = mtx */
 136 ldq_u a2, MUTEX_IPL(a0)
 137 mov v0, a4 /* save cur_ipl in a4 */
 138 extbl a2, MUTEX_IPL, a0 /* a0 = new_ipl */
 139#endif /* __BWX__ */
 140 cmplt v0, a0, a3 /* a3 = (cur_ipl < new_ipl) */
 141 GET_CURLWP /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
 142 mov v0, a5 /* save curlwp in a5 */
 143 /*
 144 * The forward-branch over the SWPIPL call is correctly predicted
 145 * not-taken by the CPU because it's rare for a code path to acquire
 146 * 2 spin mutexes.
 147 */
 148 beq a3, 1f /* no? -> skip... */
 149 call_pal PAL_OSF1_swpipl /* clobbers v0, t0, t8..t11 */
 150 /*
 151 * v0 returns the old_ipl, which will be the same as the
 152 * cur_ipl we squirreled away in a4 earlier.
 153 */
 1541:
 155 /*
 156 * curlwp->l_cpu is now stable. Update the counter and
 157 * stash the old_ipl. Just in case it's not clear what's
 158 * going on, we:
 159 *
 160 * - Load previous value of mtx_oldspl into t1.
 161 * - Conditionally move old_ipl into t1 if mtx_count == 0.
 162 * - Store t1 back to mtx_oldspl; if mtx_count != 0,
 163 * the store is redundant, but it's faster than a forward
 164 * branch.
 165 */
 166 ldq a3, L_CPU(a5) /* a3 = curlwp->l_cpu (curcpu) */
 167 ldl t0, CPU_INFO_MTX_COUNT(a3)
 168 ldl t1, CPU_INFO_MTX_OLDSPL(a3)
 169 cmoveq t0, a4, t1 /* mtx_count == 0? -> t1 = old_ipl */
 170 subl t0, 1, t2 /* mtx_count-- */
 171 stl t1, CPU_INFO_MTX_OLDSPL(a3)
 172 stl t2, CPU_INFO_MTX_COUNT(a3)
 173
 174 /*
 175 * STEP 2: __cpu_simple_lock_try(&mtx->mtx_lock)
 176 */
 177 ldl_l t0, MUTEX_SIMPLELOCK(a1)
 178 ldiq t1, __SIMPLELOCK_LOCKED
 179 bne t0, 2f /* contended */
 180 stl_c t1, MUTEX_SIMPLELOCK(a1)
 181 beq t1, 2f /* STL_C failed; consider contended */
 182 MB
 183 RET
 1842:
 185 mov a1, a0 /* restore first argument */
 186 lda pv, mutex_spin_retry
 187 jmp (pv)
 188 END(mutex_spin_enter)
 189
 190/*
 191 * void mutex_spin_exit(kmutex_t *mtx);
 192 */
 193LEAF(mutex_spin_exit, 1)
 194 LDGP(pv);
 195 MB
 196
 197 /*
 198 * STEP 1: __cpu_simple_unlock(&mtx->mtx_lock)
 199 */
 200 stl zero, MUTEX_SIMPLELOCK(a0)
 201
 202 /*
 203 * STEP 2: Perform the MUTEX_SPIN_SPLRESTORE() function.
 204 * (see sys/kern/kern_mutex.c)
 205 *
 206 * s = curcpu->ci_mtx_oldspl;
 207 * if (++curcpu->ci_mtx_count == 0)
 208 * splx(s);
 209 */
 210 GET_CURLWP /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
 211 ldq a3, L_CPU(v0) /* a3 = curlwp->l_cpu (curcpu) */
 212 ldl t0, CPU_INFO_MTX_COUNT(a3)
 213 ldl a0, CPU_INFO_MTX_OLDSPL(a3)
 214 addl t0, 1, t2 /* mtx_count++ */
 215 stl t2, CPU_INFO_MTX_COUNT(a3)
 216 /*
 217 * The forward-branch over the SWPIPL call is correctly predicted
 218 * not-taken by the CPU because it's rare for a code path to acquire
 219 * 2 spin mutexes.
 220 */
 221 bne t2, 1f /* t2 != 0? Skip... */
 222 call_pal PAL_OSF1_swpipl /* clobbers v0, t0, t8..t11 */
 2231:
 224 RET
 225 END(mutex_spin_exit)
 226
 227/*
114 * void rw_enter(krwlock_t *rwl, krw_t op); 228 * void rw_enter(krwlock_t *rwl, krw_t op);
115 * 229 *
116 * Acquire one hold on a RW lock. 230 * Acquire one hold on a RW lock.
117 */ 231 */
118LEAF(rw_enter, 2) 232LEAF(rw_enter, 2)
119 LDGP(pv) 233 LDGP(pv)
120 234
121 /* 235 /*
122 * RW_READER == 0 (we have a compile-time assert in machdep.c 236 * RW_READER == 0 (we have a compile-time assert in machdep.c
123 * to ensure this). 237 * to ensure this).
124 * 238 *
125 * Acquire for read is the most common case. 239 * Acquire for read is the most common case.
126 */ 240 */
127 bne a1, 3f 241 bne a1, 3f
128 242
129 /* Acquiring for read. */ 243 /* Acquiring for read. */
1301: ldq_l t0, 0(a0) 2441: ldq_l t0, 0(a0)
131 and t0, (RW_WRITE_LOCKED|RW_WRITE_WANTED), t1 245 and t0, (RW_WRITE_LOCKED|RW_WRITE_WANTED), t1
132 addq t0, RW_READ_INCR, t2 246 addq t0, RW_READ_INCR, t2
133 bne t1, 4f /* contended */ 247 bne t1, 4f /* contended */
134 stq_c t2, 0(a0) 248 stq_c t2, 0(a0)
135 beq t2, 2f /* STQ_C failed; retry */ 249 beq t2, 2f /* STQ_C failed; retry */
136 MB 250 MB
137 RET 251 RET
138 252
1392: br 1b 2532: br 1b
140 254
1413: /* Acquiring for write. */ 2553: /* Acquiring for write. */
142 GET_CURLWP /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ 256 GET_CURLWP /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
143 ldq_l t0, 0(a0) 257 ldq_l t0, 0(a0)
144 or v0, RW_WRITE_LOCKED, t2 258 or v0, RW_WRITE_LOCKED, t2
145 bne t0, 4f /* contended */ 259 bne t0, 4f /* contended */
146 stq_c t2, 0(a0) 260 stq_c t2, 0(a0)
147 beq t2, 4f /* STQ_C failed; consider it contended */ 261 beq t2, 4f /* STQ_C failed; consider it contended */
148 MB 262 MB
149 RET 263 RET
150 264
1514: lda pv, rw_vector_enter 2654: lda pv, rw_vector_enter
152 jmp (pv) 266 jmp (pv)
153 END(rw_enter) 267 END(rw_enter)
154 268
155/* 269/*
156 * int rw_tryenter(krwlock_t *rwl, krw_t op); 270 * int rw_tryenter(krwlock_t *rwl, krw_t op);
157 * 271 *
158 * Try to acquire one hold on a RW lock. 272 * Try to acquire one hold on a RW lock.
159 */ 273 */
160LEAF(rw_tryenter, 2) 274LEAF(rw_tryenter, 2)
161 LDGP(pv) 275 LDGP(pv)
162 276
163 /* See above. */ 277 /* See above. */
164 bne a1, 3f 278 bne a1, 3f
165 279
166 /* Acquiring for read. */ 280 /* Acquiring for read. */
1671: ldq_l t0, 0(a0) 2811: ldq_l t0, 0(a0)
168 and t0, (RW_WRITE_LOCKED|RW_WRITE_WANTED), t1 282 and t0, (RW_WRITE_LOCKED|RW_WRITE_WANTED), t1
169 addq t0, RW_READ_INCR, v0 283 addq t0, RW_READ_INCR, v0
170 bne t1, 4f /* contended */ 284 bne t1, 4f /* contended */
171 stq_c v0, 0(a0) 285 stq_c v0, 0(a0)
172 beq v0, 2f /* STQ_C failed; retry */ 286 beq v0, 2f /* STQ_C failed; retry */
173 MB 287 MB
174 RET /* v0 contains non-zero LOCK_FLAG from STQ_C */ 288 RET /* v0 contains non-zero LOCK_FLAG from STQ_C */
175 289
1762: br 1b 2902: br 1b
177 291
178 /* Acquiring for write. */ 292 /* Acquiring for write. */
1793: GET_CURLWP /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ 2933: GET_CURLWP /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
180 ldq_l t0, 0(a0) 294 ldq_l t0, 0(a0)
181 or v0, RW_WRITE_LOCKED, v0 295 or v0, RW_WRITE_LOCKED, v0
182 bne t0, 4f /* contended */ 296 bne t0, 4f /* contended */
183 stq_c v0, 0(a0) 297 stq_c v0, 0(a0)
184 /* 298 /*
185 * v0 now contains the LOCK_FLAG value from STQ_C, which is either 299 * v0 now contains the LOCK_FLAG value from STQ_C, which is either
186 * 0 for failure, or non-zero for success. In either case, v0's 300 * 0 for failure, or non-zero for success. In either case, v0's
187 * value is correct. Go ahead and perform the memory barrier even 301 * value is correct. Go ahead and perform the memory barrier even
188 * in the failure case because we expect it to be rare and it saves 302 * in the failure case because we expect it to be rare and it saves
189 * a branch-not-taken instruction in the success case. 303 * a branch-not-taken instruction in the success case.
190 */ 304 */
191 MB 305 MB
192 RET 306 RET
193 307
1944: mov zero, v0 /* return 0 (failure) */ 3084: mov zero, v0 /* return 0 (failure) */
195 RET 309 RET
196 END(rw_tryenter) 310 END(rw_tryenter)
197 311
198/* 312/*
199 * void rw_exit(krwlock_t *rwl); 313 * void rw_exit(krwlock_t *rwl);
200 * 314 *
201 * Release one hold on a RW lock. 315 * Release one hold on a RW lock.
202 */ 316 */
203LEAF(rw_exit, 1) 317LEAF(rw_exit, 1)
204 LDGP(pv) 318 LDGP(pv)
205 MB 319 MB
206 320
207 /* 321 /*
208 * Check for write-lock release, and get the owner/count field 322 * Check for write-lock release, and get the owner/count field
209 * on its own for sanity-checking against expected values. 323 * on its own for sanity-checking against expected values.
210 */ 324 */
211 ldq a1, 0(a0) 325 ldq a1, 0(a0)
212 and a1, RW_WRITE_LOCKED, t1 326 and a1, RW_WRITE_LOCKED, t1
213 srl a1, RW_READ_COUNT_SHIFT, a2 327 srl a1, RW_READ_COUNT_SHIFT, a2
214 bne t1, 3f 328 bne t1, 3f
215 329
216 /* 330 /*
217 * Releasing a read-lock. Make sure the count is non-zero. 331 * Releasing a read-lock. Make sure the count is non-zero.
218 * If it is zero, take the slow path where the juicy diagnostic 332 * If it is zero, take the slow path where the juicy diagnostic
219 * checks are located. 333 * checks are located.
220 */ 334 */
221 beq a2, 4f 335 beq a2, 4f
222 336
223 /* 337 /*
224 * We do the following trick to check to see if we're releasing 338 * We do the following trick to check to see if we're releasing
225 * the last read-count and there are waiters: 339 * the last read-count and there are waiters:
226 * 340 *
227 * 1. Set v0 to 1. 341 * 1. Set v0 to 1.
228 * 2. Shift the new read count into t1. 342 * 2. Shift the new read count into t1.
229 * 3. Conditally move t1 to v0 based on low-bit-set of t0 343 * 3. Conditally move t1 to v0 based on low-bit-set of t0
230 * (RW_HAS_WAITERS). If RW_HAS_WAITERS is not set, then 344 * (RW_HAS_WAITERS). If RW_HAS_WAITERS is not set, then
231 * the move will not take place, and v0 will remain 1. 345 * the move will not take place, and v0 will remain 1.
232 * Otherwise, v0 will contain the updated read count. 346 * Otherwise, v0 will contain the updated read count.
233 * 4. Jump to slow path if v0 == 0. 347 * 4. Jump to slow path if v0 == 0.
234 */ 348 */
2351: ldq_l t0, 0(a0) 3491: ldq_l t0, 0(a0)
236 ldiq v0, 1 350 ldiq v0, 1
237 subq t0, RW_READ_INCR, t2 351 subq t0, RW_READ_INCR, t2
238 srl t2, RW_READ_COUNT_SHIFT, t1 352 srl t2, RW_READ_COUNT_SHIFT, t1
239 cmovlbs t0, t1, v0 353 cmovlbs t0, t1, v0
240 beq v0, 4f 354 beq v0, 4f
241 stq_c t2, 0(a0) 355 stq_c t2, 0(a0)
242 beq t2, 2f /* STQ_C failed; try again */ 356 beq t2, 2f /* STQ_C failed; try again */
243 RET 357 RET
244 358
2452: br 1b 3592: br 1b
246 360
247 /* 361 /*
248 * Releasing a write-lock. Make sure the owner field points 362 * Releasing a write-lock. Make sure the owner field points
249 * to our LWP. If it does not, take the slow path where the 363 * to our LWP. If it does not, take the slow path where the
250 * juicy diagnostic checks are located. a2 contains the owner 364 * juicy diagnostic checks are located. a2 contains the owner
251 * field shifted down. Shift it back up to compare to curlwp; 365 * field shifted down. Shift it back up to compare to curlwp;
252 * this conveniently discards the bits we don't want to compare. 366 * this conveniently discards the bits we don't want to compare.
253 */ 367 */
2543: GET_CURLWP /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ 3683: GET_CURLWP /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */
255 sll a2, RW_READ_COUNT_SHIFT, a2 369 sll a2, RW_READ_COUNT_SHIFT, a2
256 mov zero, t2 /* fast-path write-unlock stores NULL */ 370 mov zero, t2 /* fast-path write-unlock stores NULL */
257 cmpeq v0, a2, v0 /* v0 = (owner == curlwp) */ 371 cmpeq v0, a2, v0 /* v0 = (owner == curlwp) */
258 ldq_l t0, 0(a0) 372 ldq_l t0, 0(a0)
259 beq v0, 4f /* owner field mismatch; need slow path */ 373 beq v0, 4f /* owner field mismatch; need slow path */
260 blbs t0, 4f /* RW_HAS_WAITERS set; need slow-path */ 374 blbs t0, 4f /* RW_HAS_WAITERS set; need slow-path */
261 stq_c t2, 0(a0) 375 stq_c t2, 0(a0)
262 beq t2, 4f /* STQ_C failed; need slow-path */ 376 beq t2, 4f /* STQ_C failed; need slow-path */
263 RET 377 RET
264 378
2654: lda pv, rw_vector_exit 3794: lda pv, rw_vector_exit
266 jmp (pv) 380 jmp (pv)
267 END(rw_exit) 381 END(rw_exit)
268 382
269#endif /* !LOCKDEBUG */ 383#endif /* !LOCKDEBUG */

cvs diff -r1.8 -r1.9 src/sys/arch/alpha/include/mutex.h (switch to unified diff)

--- src/sys/arch/alpha/include/mutex.h 2020/09/23 00:52:49 1.8
+++ src/sys/arch/alpha/include/mutex.h 2021/07/12 15:21:51 1.9
@@ -1,71 +1,72 @@ @@ -1,71 +1,72 @@
1/* $NetBSD: mutex.h,v 1.8 2020/09/23 00:52:49 thorpej Exp $ */ 1/* $NetBSD: mutex.h,v 1.9 2021/07/12 15:21:51 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc. 4 * Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Andrew Doran. 8 * by Jason R. Thorpe and Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#ifndef _ALPHA_MUTEX_H_ 32#ifndef _ALPHA_MUTEX_H_
33#define _ALPHA_MUTEX_H_ 33#define _ALPHA_MUTEX_H_
34 34
35#ifndef __MUTEX_PRIVATE 35#ifndef __MUTEX_PRIVATE
36 36
37struct kmutex { 37struct kmutex {
38 uintptr_t mtx_pad1; 38 uintptr_t mtx_pad1;
39}; 39};
40 40
41#else /* __MUTEX_PRIVATE */ 41#else /* __MUTEX_PRIVATE */
42 42
43struct kmutex { 43struct kmutex {
44 union { 44 union {
45 volatile uintptr_t mtxa_owner; 45 volatile uintptr_t mtxa_owner;
46 struct { 46 struct {
47 volatile uint8_t mtxs_flags; 47 volatile uint8_t mtxs_flags;
48 ipl_cookie_t mtxs_ipl; 48 ipl_cookie_t mtxs_ipl;
49 volatile uint16_t mtxs_unused; 49 volatile uint16_t mtxs_unused;
50 __cpu_simple_lock_t mtxs_lock; 50 __cpu_simple_lock_t mtxs_lock;
51 } s; 51 } s;
52 } u; 52 } u;
53}; 53};
54 54
55#define mtx_owner u.mtxa_owner 55#define mtx_owner u.mtxa_owner
56#define mtx_flags u.s.mtxs_flags 56#define mtx_flags u.s.mtxs_flags
57#define mtx_ipl u.s.mtxs_ipl 57#define mtx_ipl u.s.mtxs_ipl
58#define mtx_lock u.s.mtxs_lock 58#define mtx_lock u.s.mtxs_lock
59 59
60#define __HAVE_SIMPLE_MUTEXES 1 60#define __HAVE_SIMPLE_MUTEXES 1
61#define __HAVE_MUTEX_STUBS 1 61#define __HAVE_MUTEX_STUBS 1
 62#define __HAVE_SPIN_MUTEX_STUBS 1
62 63
63#define MUTEX_CAS(p, o, n) _lock_cas((p), (o), (n)) 64#define MUTEX_CAS(p, o, n) _lock_cas((p), (o), (n))
64 65
65int _lock_cas(volatile uintptr_t *, uintptr_t, uintptr_t); 66int _lock_cas(volatile uintptr_t *, uintptr_t, uintptr_t);
66 67
67#endif /* __MUTEX_PRIVATE */ 68#endif /* __MUTEX_PRIVATE */
68 69
69__CTASSERT(sizeof(struct kmutex) == sizeof(uintptr_t)); 70__CTASSERT(sizeof(struct kmutex) == sizeof(uintptr_t));
70 71
71#endif /* _ALPHA_MUTEX_H_ */ 72#endif /* _ALPHA_MUTEX_H_ */