Optimized fast-paths for mutex_spin_enter() and mutex_spin_exit().diff -r1.29 -r1.30 src/sys/arch/alpha/alpha/genassym.cf
(thorpej)
--- src/sys/arch/alpha/alpha/genassym.cf 2021/07/11 01:58:41 1.29
+++ src/sys/arch/alpha/alpha/genassym.cf 2021/07/12 15:21:51 1.30
@@ -1,207 +1,213 @@ | @@ -1,207 +1,213 @@ | |||
1 | # $NetBSD: genassym.cf,v 1.29 2021/07/11 01:58:41 thorpej Exp $ | 1 | # $NetBSD: genassym.cf,v 1.30 2021/07/12 15:21:51 thorpej Exp $ | |
2 | 2 | |||
3 | # | 3 | # | |
4 | # Copyright (c) 1982, 1990, 1993 | 4 | # Copyright (c) 1982, 1990, 1993 | |
5 | # The Regents of the University of California. All rights reserved. | 5 | # The Regents of the University of California. All rights reserved. | |
6 | # | 6 | # | |
7 | # Redistribution and use in source and binary forms, with or without | 7 | # Redistribution and use in source and binary forms, with or without | |
8 | # modification, are permitted provided that the following conditions | 8 | # modification, are permitted provided that the following conditions | |
9 | # are met: | 9 | # are met: | |
10 | # 1. Redistributions of source code must retain the above copyright | 10 | # 1. Redistributions of source code must retain the above copyright | |
11 | # notice, this list of conditions and the following disclaimer. | 11 | # notice, this list of conditions and the following disclaimer. | |
12 | # 2. Redistributions in binary form must reproduce the above copyright | 12 | # 2. Redistributions in binary form must reproduce the above copyright | |
13 | # notice, this list of conditions and the following disclaimer in the | 13 | # notice, this list of conditions and the following disclaimer in the | |
14 | # documentation and/or other materials provided with the distribution. | 14 | # documentation and/or other materials provided with the distribution. | |
15 | # 3. Neither the name of the University nor the names of its contributors | 15 | # 3. Neither the name of the University nor the names of its contributors | |
16 | # may be used to endorse or promote products derived from this software | 16 | # may be used to endorse or promote products derived from this software | |
17 | # without specific prior written permission. | 17 | # without specific prior written permission. | |
18 | # | 18 | # | |
19 | # THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | 19 | # THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
20 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | 20 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
21 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | 21 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
22 | # ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | 22 | # ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
23 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | 23 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
24 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | 24 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
25 | # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | 25 | # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
26 | # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | 26 | # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
27 | # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | 27 | # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
28 | # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | 28 | # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
29 | # SUCH DAMAGE. | 29 | # SUCH DAMAGE. | |
30 | # | 30 | # | |
31 | # from: @(#)genassym.c 8.3 (Berkeley) 1/4/94 | 31 | # from: @(#)genassym.c 8.3 (Berkeley) 1/4/94 | |
32 | # | 32 | # | |
33 | 33 | |||
34 | # | 34 | # | |
35 | # Copyright (c) 1994, 1995 Gordon W. Ross | 35 | # Copyright (c) 1994, 1995 Gordon W. Ross | |
36 | # Copyright (c) 1993 Adam Glass | 36 | # Copyright (c) 1993 Adam Glass | |
37 | # | 37 | # | |
38 | # Redistribution and use in source and binary forms, with or without | 38 | # Redistribution and use in source and binary forms, with or without | |
39 | # modification, are permitted provided that the following conditions | 39 | # modification, are permitted provided that the following conditions | |
40 | # are met: | 40 | # are met: | |
41 | # 1. Redistributions of source code must retain the above copyright | 41 | # 1. Redistributions of source code must retain the above copyright | |
42 | # notice, this list of conditions and the following disclaimer. | 42 | # notice, this list of conditions and the following disclaimer. | |
43 | # 2. Redistributions in binary form must reproduce the above copyright | 43 | # 2. Redistributions in binary form must reproduce the above copyright | |
44 | # notice, this list of conditions and the following disclaimer in the | 44 | # notice, this list of conditions and the following disclaimer in the | |
45 | # documentation and/or other materials provided with the distribution. | 45 | # documentation and/or other materials provided with the distribution. | |
46 | # 3. All advertising materials mentioning features or use of this software | 46 | # 3. All advertising materials mentioning features or use of this software | |
47 | # must display the following acknowledgement: | 47 | # must display the following acknowledgement: | |
48 | # This product includes software developed by the University of | 48 | # This product includes software developed by the University of | |
49 | # California, Berkeley and its contributors. | 49 | # California, Berkeley and its contributors. | |
50 | # 4. Neither the name of the University nor the names of its contributors | 50 | # 4. Neither the name of the University nor the names of its contributors | |
51 | # may be used to endorse or promote products derived from this software | 51 | # may be used to endorse or promote products derived from this software | |
52 | # without specific prior written permission. | 52 | # without specific prior written permission. | |
53 | # | 53 | # | |
54 | # THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | 54 | # THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
55 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | 55 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
56 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | 56 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
57 | # ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | 57 | # ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
58 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | 58 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
59 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | 59 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
60 | # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | 60 | # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
61 | # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | 61 | # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
62 | # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | 62 | # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
63 | # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | 63 | # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
64 | # SUCH DAMAGE. | 64 | # SUCH DAMAGE. | |
65 | # | 65 | # | |
66 | # from: @(#)genassym.c 8.3 (Berkeley) 1/4/94 | 66 | # from: @(#)genassym.c 8.3 (Berkeley) 1/4/94 | |
67 | # | 67 | # | |
68 | 68 | |||
69 | quote #define __RWLOCK_PRIVATE | 69 | quote #define __RWLOCK_PRIVATE | |
70 | quote #define __MUTEX_PRIVATE | |||
70 | 71 | |||
71 | include <sys/param.h> | 72 | include <sys/param.h> | |
72 | include <sys/buf.h> | 73 | include <sys/buf.h> | |
73 | include <sys/proc.h> | 74 | include <sys/proc.h> | |
74 | include <sys/sched.h> | 75 | include <sys/sched.h> | |
75 | include <sys/mbuf.h> | 76 | include <sys/mbuf.h> | |
76 | include <sys/msgbuf.h> | 77 | include <sys/msgbuf.h> | |
77 | include <sys/rwlock.h> | 78 | include <sys/rwlock.h> | |
79 | include <sys/mutex.h> | |||
78 | include <sys/syscall.h> | 80 | include <sys/syscall.h> | |
79 | 81 | |||
80 | include <machine/cpu.h> | 82 | include <machine/cpu.h> | |
81 | include <machine/reg.h> | 83 | include <machine/reg.h> | |
82 | include <machine/frame.h> | 84 | include <machine/frame.h> | |
83 | include <machine/rpb.h> | 85 | include <machine/rpb.h> | |
84 | include <machine/vmparam.h> | 86 | include <machine/vmparam.h> | |
85 | 87 | |||
86 | include <uvm/uvm_extern.h> | 88 | include <uvm/uvm_extern.h> | |
87 | 89 | |||
88 | # general constants | 90 | # general constants | |
89 | define VM_MAX_ADDRESS VM_MAX_ADDRESS | 91 | define VM_MAX_ADDRESS VM_MAX_ADDRESS | |
90 | define ALPHA_PGBYTES ALPHA_PGBYTES | 92 | define ALPHA_PGBYTES ALPHA_PGBYTES | |
91 | 93 | |||
92 | # Register offsets, for stack frames. | 94 | # Register offsets, for stack frames. | |
93 | define FRAME_V0 FRAME_V0 | 95 | define FRAME_V0 FRAME_V0 | |
94 | define FRAME_T0 FRAME_T0 | 96 | define FRAME_T0 FRAME_T0 | |
95 | define FRAME_T1 FRAME_T1 | 97 | define FRAME_T1 FRAME_T1 | |
96 | define FRAME_T2 FRAME_T2 | 98 | define FRAME_T2 FRAME_T2 | |
97 | define FRAME_T3 FRAME_T3 | 99 | define FRAME_T3 FRAME_T3 | |
98 | define FRAME_T4 FRAME_T4 | 100 | define FRAME_T4 FRAME_T4 | |
99 | define FRAME_T5 FRAME_T5 | 101 | define FRAME_T5 FRAME_T5 | |
100 | define FRAME_T6 FRAME_T6 | 102 | define FRAME_T6 FRAME_T6 | |
101 | define FRAME_T7 FRAME_T7 | 103 | define FRAME_T7 FRAME_T7 | |
102 | define FRAME_S0 FRAME_S0 | 104 | define FRAME_S0 FRAME_S0 | |
103 | define FRAME_S1 FRAME_S1 | 105 | define FRAME_S1 FRAME_S1 | |
104 | define FRAME_S2 FRAME_S2 | 106 | define FRAME_S2 FRAME_S2 | |
105 | define FRAME_S3 FRAME_S3 | 107 | define FRAME_S3 FRAME_S3 | |
106 | define FRAME_S4 FRAME_S4 | 108 | define FRAME_S4 FRAME_S4 | |
107 | define FRAME_S5 FRAME_S5 | 109 | define FRAME_S5 FRAME_S5 | |
108 | define FRAME_S6 FRAME_S6 | 110 | define FRAME_S6 FRAME_S6 | |
109 | define FRAME_A3 FRAME_A3 | 111 | define FRAME_A3 FRAME_A3 | |
110 | define FRAME_A4 FRAME_A4 | 112 | define FRAME_A4 FRAME_A4 | |
111 | define FRAME_A5 FRAME_A5 | 113 | define FRAME_A5 FRAME_A5 | |
112 | define FRAME_T8 FRAME_T8 | 114 | define FRAME_T8 FRAME_T8 | |
113 | define FRAME_T9 FRAME_T9 | 115 | define FRAME_T9 FRAME_T9 | |
114 | define FRAME_T10 FRAME_T10 | 116 | define FRAME_T10 FRAME_T10 | |
115 | define FRAME_T11 FRAME_T11 | 117 | define FRAME_T11 FRAME_T11 | |
116 | define FRAME_RA FRAME_RA | 118 | define FRAME_RA FRAME_RA | |
117 | define FRAME_T12 FRAME_T12 | 119 | define FRAME_T12 FRAME_T12 | |
118 | define FRAME_AT FRAME_AT | 120 | define FRAME_AT FRAME_AT | |
119 | define FRAME_SP FRAME_SP | 121 | define FRAME_SP FRAME_SP | |
120 | 122 | |||
121 | define FRAME_SW_SIZE FRAME_SW_SIZE | 123 | define FRAME_SW_SIZE FRAME_SW_SIZE | |
122 | 124 | |||
123 | define FRAME_PS FRAME_PS | 125 | define FRAME_PS FRAME_PS | |
124 | define FRAME_PC FRAME_PC | 126 | define FRAME_PC FRAME_PC | |
125 | define FRAME_GP FRAME_GP | 127 | define FRAME_GP FRAME_GP | |
126 | define FRAME_A0 FRAME_A0 | 128 | define FRAME_A0 FRAME_A0 | |
127 | define FRAME_A1 FRAME_A1 | 129 | define FRAME_A1 FRAME_A1 | |
128 | define FRAME_A2 FRAME_A2 | 130 | define FRAME_A2 FRAME_A2 | |
129 | 131 | |||
130 | define FRAME_SIZE FRAME_SIZE | 132 | define FRAME_SIZE FRAME_SIZE | |
131 | 133 | |||
132 | # bits of the PS register | 134 | # bits of the PS register | |
133 | define ALPHA_PSL_USERMODE ALPHA_PSL_USERMODE | 135 | define ALPHA_PSL_USERMODE ALPHA_PSL_USERMODE | |
134 | define ALPHA_PSL_IPL_MASK ALPHA_PSL_IPL_MASK | 136 | define ALPHA_PSL_IPL_MASK ALPHA_PSL_IPL_MASK | |
135 | define ALPHA_PSL_IPL_0 ALPHA_PSL_IPL_0 | 137 | define ALPHA_PSL_IPL_0 ALPHA_PSL_IPL_0 | |
136 | define ALPHA_PSL_IPL_SOFT_LO ALPHA_PSL_IPL_SOFT_LO | 138 | define ALPHA_PSL_IPL_SOFT_LO ALPHA_PSL_IPL_SOFT_LO | |
137 | define ALPHA_PSL_IPL_SOFT_HI ALPHA_PSL_IPL_SOFT_HI | 139 | define ALPHA_PSL_IPL_SOFT_HI ALPHA_PSL_IPL_SOFT_HI | |
138 | define ALPHA_PSL_IPL_HIGH ALPHA_PSL_IPL_HIGH | 140 | define ALPHA_PSL_IPL_HIGH ALPHA_PSL_IPL_HIGH | |
139 | 141 | |||
140 | # soft interrrupt definitions | 142 | # soft interrrupt definitions | |
141 | define ALPHA_ALL_SOFTINTS ALPHA_ALL_SOFTINTS | 143 | define ALPHA_ALL_SOFTINTS ALPHA_ALL_SOFTINTS | |
142 | 144 | |||
143 | # pte bits | 145 | # pte bits | |
144 | define ALPHA_PTE_VALID ALPHA_PTE_VALID | 146 | define ALPHA_PTE_VALID ALPHA_PTE_VALID | |
145 | define ALPHA_PTE_ASM ALPHA_PTE_ASM | 147 | define ALPHA_PTE_ASM ALPHA_PTE_ASM | |
146 | define ALPHA_PTE_KR ALPHA_PTE_KR | 148 | define ALPHA_PTE_KR ALPHA_PTE_KR | |
147 | define ALPHA_PTE_KW ALPHA_PTE_KW | 149 | define ALPHA_PTE_KW ALPHA_PTE_KW | |
148 | 150 | |||
149 | # Important offsets into the lwp and proc structs & associated constants | 151 | # Important offsets into the lwp and proc structs & associated constants | |
150 | define L_PCB offsetof(struct lwp, l_addr) | 152 | define L_PCB offsetof(struct lwp, l_addr) | |
151 | define L_CPU offsetof(struct lwp, l_cpu) | 153 | define L_CPU offsetof(struct lwp, l_cpu) | |
152 | define L_PROC offsetof(struct lwp, l_proc) | 154 | define L_PROC offsetof(struct lwp, l_proc) | |
153 | define L_MD_FLAGS offsetof(struct lwp, l_md.md_flags) | 155 | define L_MD_FLAGS offsetof(struct lwp, l_md.md_flags) | |
154 | define L_MD_PCBPADDR offsetof(struct lwp, l_md.md_pcbpaddr) | 156 | define L_MD_PCBPADDR offsetof(struct lwp, l_md.md_pcbpaddr) | |
155 | define L_MD_TF offsetof(struct lwp, l_md.md_tf) | 157 | define L_MD_TF offsetof(struct lwp, l_md.md_tf) | |
156 | define L_MD_ASTPENDING offsetof(struct lwp, l_md.md_astpending) | 158 | define L_MD_ASTPENDING offsetof(struct lwp, l_md.md_astpending) | |
157 | 159 | |||
158 | define P_VMSPACE offsetof(struct proc, p_vmspace) | 160 | define P_VMSPACE offsetof(struct proc, p_vmspace) | |
159 | define P_RASLIST offsetof(struct proc, p_raslist) | 161 | define P_RASLIST offsetof(struct proc, p_raslist) | |
160 | define P_MD_SYSCALL offsetof(struct proc, p_md.md_syscall) | 162 | define P_MD_SYSCALL offsetof(struct proc, p_md.md_syscall) | |
161 | 163 | |||
162 | # offsets needed by cpu_switch() to switch mappings. | 164 | # offsets needed by cpu_switch() to switch mappings. | |
163 | define VM_MAP_PMAP offsetof(struct vmspace, vm_map.pmap) | 165 | define VM_MAP_PMAP offsetof(struct vmspace, vm_map.pmap) | |
164 | 166 | |||
165 | # Important offsets into the user struct & associated constants | 167 | # Important offsets into the user struct & associated constants | |
166 | define UPAGES UPAGES | 168 | define UPAGES UPAGES | |
167 | define PCB_HWPCB_KSP offsetof(struct pcb, pcb_hw.apcb_ksp) | 169 | define PCB_HWPCB_KSP offsetof(struct pcb, pcb_hw.apcb_ksp) | |
168 | define PCB_CONTEXT offsetof(struct pcb, pcb_context[0]) | 170 | define PCB_CONTEXT offsetof(struct pcb, pcb_context[0]) | |
169 | define PCB_ONFAULT offsetof(struct pcb, pcb_onfault) | 171 | define PCB_ONFAULT offsetof(struct pcb, pcb_onfault) | |
170 | 172 | |||
171 | # Offsets into struct fpstate, for save, restore | 173 | # Offsets into struct fpstate, for save, restore | |
172 | define FPREG_FPR_REGS offsetof(struct fpreg, fpr_regs[0]) | 174 | define FPREG_FPR_REGS offsetof(struct fpreg, fpr_regs[0]) | |
173 | define FPREG_FPR_CR offsetof(struct fpreg, fpr_cr) | 175 | define FPREG_FPR_CR offsetof(struct fpreg, fpr_cr) | |
174 | 176 | |||
175 | # Important other addresses | 177 | # Important other addresses | |
176 | define HWRPB_ADDR HWRPB_ADDR /* Restart parameter block */ | 178 | define HWRPB_ADDR HWRPB_ADDR /* Restart parameter block */ | |
177 | define VPTBASE VPTBASE /* Virtual Page Table base */ | 179 | define VPTBASE VPTBASE /* Virtual Page Table base */ | |
178 | 180 | |||
179 | # Offsets into the HWRPB. | 181 | # Offsets into the HWRPB. | |
180 | define RPB_PRIMARY_CPU_ID offsetof(struct rpb, rpb_primary_cpu_id) | 182 | define RPB_PRIMARY_CPU_ID offsetof(struct rpb, rpb_primary_cpu_id) | |
181 | 183 | |||
182 | # Kernel entries | 184 | # Kernel entries | |
183 | define ALPHA_KENTRY_ARITH ALPHA_KENTRY_ARITH | 185 | define ALPHA_KENTRY_ARITH ALPHA_KENTRY_ARITH | |
184 | define ALPHA_KENTRY_MM ALPHA_KENTRY_MM | 186 | define ALPHA_KENTRY_MM ALPHA_KENTRY_MM | |
185 | define ALPHA_KENTRY_IF ALPHA_KENTRY_IF | 187 | define ALPHA_KENTRY_IF ALPHA_KENTRY_IF | |
186 | define ALPHA_KENTRY_UNA ALPHA_KENTRY_UNA | 188 | define ALPHA_KENTRY_UNA ALPHA_KENTRY_UNA | |
187 | 189 | |||
188 | # errno values | 190 | # errno values | |
189 | define ENAMETOOLONG ENAMETOOLONG | 191 | define ENAMETOOLONG ENAMETOOLONG | |
190 | define EFAULT EFAULT | 192 | define EFAULT EFAULT | |
191 | 193 | |||
192 | # Syscalls called from sigreturn. | 194 | # Syscalls called from sigreturn. | |
193 | define SYS_compat_16___sigreturn14 SYS_compat_16___sigreturn14 | 195 | define SYS_compat_16___sigreturn14 SYS_compat_16___sigreturn14 | |
194 | define SYS_exit SYS_exit | 196 | define SYS_exit SYS_exit | |
195 | 197 | |||
196 | # CPU info | 198 | # CPU info | |
197 | define CPU_INFO_CURLWP offsetof(struct cpu_info, ci_curlwp) | 199 | define CPU_INFO_CURLWP offsetof(struct cpu_info, ci_curlwp) | |
198 | define CPU_INFO_IDLE_LWP offsetof(struct cpu_info, ci_data.cpu_idlelwp) | 200 | define CPU_INFO_IDLE_LWP offsetof(struct cpu_info, ci_data.cpu_idlelwp) | |
199 | define CPU_INFO_SSIR offsetof(struct cpu_info, ci_ssir) | 201 | define CPU_INFO_SSIR offsetof(struct cpu_info, ci_ssir) | |
200 | define CPU_INFO_MTX_COUNT offsetof(struct cpu_info, ci_mtx_count) | 202 | define CPU_INFO_MTX_COUNT offsetof(struct cpu_info, ci_mtx_count) | |
203 | define CPU_INFO_MTX_OLDSPL offsetof(struct cpu_info, ci_mtx_oldspl) | |||
201 | define CPU_INFO_SIZEOF sizeof(struct cpu_info) | 204 | define CPU_INFO_SIZEOF sizeof(struct cpu_info) | |
202 | 205 | |||
203 | # Bits in lock fields | 206 | # Bits in lock fields | |
204 | define RW_WRITE_WANTED RW_WRITE_WANTED | 207 | define RW_WRITE_WANTED RW_WRITE_WANTED | |
205 | define RW_WRITE_LOCKED RW_WRITE_LOCKED | 208 | define RW_WRITE_LOCKED RW_WRITE_LOCKED | |
206 | define RW_READ_INCR RW_READ_INCR | 209 | define RW_READ_INCR RW_READ_INCR | |
207 | define RW_READ_COUNT_SHIFT RW_READ_COUNT_SHIFT | 210 | define RW_READ_COUNT_SHIFT RW_READ_COUNT_SHIFT | |
211 | define MUTEX_IPL offsetof(struct kmutex, mtx_ipl) | |||
212 | define MUTEX_SIMPLELOCK offsetof(struct kmutex, mtx_lock) | |||
213 | define __SIMPLELOCK_LOCKED __SIMPLELOCK_LOCKED |
--- src/sys/arch/alpha/alpha/lock_stubs.s 2021/07/11 01:58:41 1.5
+++ src/sys/arch/alpha/alpha/lock_stubs.s 2021/07/12 15:21:51 1.6
@@ -1,269 +1,383 @@ | @@ -1,269 +1,383 @@ | |||
1 | /* $NetBSD: lock_stubs.s,v 1.5 2021/07/11 01:58:41 thorpej Exp $ */ | 1 | /* $NetBSD: lock_stubs.s,v 1.6 2021/07/12 15:21:51 thorpej Exp $ */ | |
2 | 2 | |||
3 | /*- | 3 | /*- | |
4 | * Copyright (c) 2007, 2021 The NetBSD Foundation, Inc. | 4 | * Copyright (c) 2007, 2021 The NetBSD Foundation, Inc. | |
5 | * All rights reserved. | 5 | * All rights reserved. | |
6 | * | 6 | * | |
7 | * This code is derived from software contributed to The NetBSD Foundation | 7 | * This code is derived from software contributed to The NetBSD Foundation | |
8 | * by Andrew Doran, and by Jason R. Thorpe. | 8 | * by Andrew Doran, and by Jason R. Thorpe. | |
9 | * | 9 | * | |
10 | * Redistribution and use in source and binary forms, with or without | 10 | * Redistribution and use in source and binary forms, with or without | |
11 | * modification, are permitted provided that the following conditions | 11 | * modification, are permitted provided that the following conditions | |
12 | * are met: | 12 | * are met: | |
13 | * 1. Redistributions of source code must retain the above copyright | 13 | * 1. Redistributions of source code must retain the above copyright | |
14 | * notice, this list of conditions and the following disclaimer. | 14 | * notice, this list of conditions and the following disclaimer. | |
15 | * 2. Redistributions in binary form must reproduce the above copyright | 15 | * 2. Redistributions in binary form must reproduce the above copyright | |
16 | * notice, this list of conditions and the following disclaimer in the | 16 | * notice, this list of conditions and the following disclaimer in the | |
17 | * documentation and/or other materials provided with the distribution. | 17 | * documentation and/or other materials provided with the distribution. | |
18 | * | 18 | * | |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
29 | * POSSIBILITY OF SUCH DAMAGE. | 29 | * POSSIBILITY OF SUCH DAMAGE. | |
30 | */ | 30 | */ | |
31 | 31 | |||
32 | #include "opt_lockdebug.h" | 32 | #include "opt_lockdebug.h" | |
33 | #include "opt_multiprocessor.h" | 33 | #include "opt_multiprocessor.h" | |
34 | 34 | |||
35 | #include <machine/asm.h> | 35 | #include <machine/asm.h> | |
36 | 36 | |||
37 | __KERNEL_RCSID(0, "$NetBSD: lock_stubs.s,v 1.5 2021/07/11 01:58:41 thorpej Exp $"); | 37 | __KERNEL_RCSID(0, "$NetBSD: lock_stubs.s,v 1.6 2021/07/12 15:21:51 thorpej Exp $"); | |
38 | 38 | |||
39 | #include "assym.h" | 39 | #include "assym.h" | |
40 | 40 | |||
41 | #if defined(MULTIPROCESSOR) | 41 | #if defined(MULTIPROCESSOR) | |
42 | #define MB mb | 42 | #define MB mb | |
43 | #else | 43 | #else | |
44 | #define MB /* nothing */ | 44 | #define MB /* nothing */ | |
45 | #endif | 45 | #endif | |
46 | 46 | |||
47 | /* | 47 | /* | |
48 | * int _lock_cas(uintptr_t *ptr, uintptr_t old, uintptr_t new) | 48 | * int _lock_cas(uintptr_t *ptr, uintptr_t old, uintptr_t new) | |
49 | */ | 49 | */ | |
50 | LEAF(_lock_cas, 3) | 50 | LEAF(_lock_cas, 3) | |
51 | 1: | 51 | 1: | |
52 | mov a2, v0 | 52 | mov a2, v0 | |
53 | ldq_l t1, 0(a0) | 53 | ldq_l t1, 0(a0) | |
54 | cmpeq t1, a1, t1 | 54 | cmpeq t1, a1, t1 | |
55 | beq t1, 2f | 55 | beq t1, 2f | |
56 | stq_c v0, 0(a0) | 56 | stq_c v0, 0(a0) | |
57 | beq v0, 3f | 57 | beq v0, 3f | |
58 | MB | 58 | MB | |
59 | RET | 59 | RET | |
60 | 2: | 60 | 2: | |
61 | mov zero, v0 | 61 | mov zero, v0 | |
62 | MB | 62 | MB | |
63 | RET | 63 | RET | |
64 | 3: | 64 | 3: | |
65 | br 1b | 65 | br 1b | |
66 | END(_lock_cas) | 66 | END(_lock_cas) | |
67 | 67 | |||
68 | #if !defined(LOCKDEBUG) | 68 | #if !defined(LOCKDEBUG) | |
69 | 69 | |||
70 | /* | 70 | /* | |
71 | * void mutex_enter(kmutex_t *mtx); | 71 | * void mutex_enter(kmutex_t *mtx); | |
72 | */ | 72 | */ | |
73 | LEAF(mutex_enter, 1) | 73 | LEAF(mutex_enter, 1) | |
74 | LDGP(pv) | 74 | LDGP(pv) | |
75 | GET_CURLWP /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ | 75 | GET_CURLWP /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ | |
76 | 1: | 76 | 1: | |
77 | mov v0, t1 | 77 | mov v0, t1 | |
78 | ldq_l t2, 0(a0) | 78 | ldq_l t2, 0(a0) | |
79 | bne t2, 2f | 79 | bne t2, 2f | |
80 | stq_c t1, 0(a0) | 80 | stq_c t1, 0(a0) | |
81 | beq t1, 3f | 81 | beq t1, 3f | |
82 | MB | 82 | MB | |
83 | RET | 83 | RET | |
84 | 2: | 84 | 2: | |
85 | lda t12, mutex_vector_enter | 85 | lda t12, mutex_vector_enter | |
86 | jmp (t12) | 86 | jmp (t12) | |
87 | 3: | 87 | 3: | |
88 | br 1b | 88 | br 1b | |
89 | END(mutex_enter) | 89 | END(mutex_enter) | |
90 | 90 | |||
91 | /* | 91 | /* | |
92 | * void mutex_exit(kmutex_t *mtx); | 92 | * void mutex_exit(kmutex_t *mtx); | |
93 | */ | 93 | */ | |
94 | LEAF(mutex_exit, 1) | 94 | LEAF(mutex_exit, 1) | |
95 | LDGP(pv) | 95 | LDGP(pv) | |
96 | MB | 96 | MB | |
97 | GET_CURLWP /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ | 97 | GET_CURLWP /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ | |
98 | mov zero, t3 | 98 | mov zero, t3 | |
99 | 1: | 99 | 1: | |
100 | ldq_l t2, 0(a0) | 100 | ldq_l t2, 0(a0) | |
101 | cmpeq v0, t2, t2 | 101 | cmpeq v0, t2, t2 | |
102 | beq t2, 2f | 102 | beq t2, 2f | |
103 | stq_c t3, 0(a0) | 103 | stq_c t3, 0(a0) | |
104 | beq t3, 3f | 104 | beq t3, 3f | |
105 | RET | 105 | RET | |
106 | 2: | 106 | 2: | |
107 | lda t12, mutex_vector_exit | 107 | lda t12, mutex_vector_exit | |
108 | jmp (t12) | 108 | jmp (t12) | |
109 | 3: | 109 | 3: | |
110 | br 1b | 110 | br 1b | |
111 | END(mutex_exit) | 111 | END(mutex_exit) | |
112 | 112 | |||
113 | /* | 113 | /* | |
114 | * void mutex_spin_enter(kmutex_t *mtx); | |||
115 | */ | |||
116 | LEAF(mutex_spin_enter, 1); | |||
117 | LDGP(pv) | |||
118 | ||||
119 | /* | |||
120 | * STEP 1: Perform the MUTEX_SPIN_SPLRAISE() function. | |||
121 | * (see sys/kern/kern_mutex.c) | |||
122 | * | |||
123 | * s = splraise(mtx->mtx_ipl); | |||
124 | * if (curcpu->ci_mtx_count-- == 0) | |||
125 | * curcpu->ci_mtx_oldspl = s; | |||
126 | */ | |||
127 | ||||
128 | call_pal PAL_OSF1_rdps /* clobbers v0, t0, t8..t11 */ | |||
129 | /* v0 = cur_ipl */ | |||
130 | #ifdef __BWX__ | |||
131 | mov a0, a1 /* a1 = mtx */ | |||
132 | ldbu a0, MUTEX_IPL(a0) /* a0 = new_ipl */ | |||
133 | mov v0, a4 /* save cur_ipl in a4 */ | |||
134 | #else | |||
135 | mov a0, a1 /* a1 = mtx */ | |||
136 | ldq_u a2, MUTEX_IPL(a0) | |||
137 | mov v0, a4 /* save cur_ipl in a4 */ | |||
138 | extbl a2, MUTEX_IPL, a0 /* a0 = new_ipl */ | |||
139 | #endif /* __BWX__ */ | |||
140 | cmplt v0, a0, a3 /* a3 = (cur_ipl < new_ipl) */ | |||
141 | GET_CURLWP /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ | |||
142 | mov v0, a5 /* save curlwp in a5 */ | |||
143 | /* | |||
144 | * The forward-branch over the SWPIPL call is correctly predicted | |||
145 | * not-taken by the CPU because it's rare for a code path to acquire | |||
146 | * 2 spin mutexes. | |||
147 | */ | |||
148 | beq a3, 1f /* no? -> skip... */ | |||
149 | call_pal PAL_OSF1_swpipl /* clobbers v0, t0, t8..t11 */ | |||
150 | /* | |||
151 | * v0 returns the old_ipl, which will be the same as the | |||
152 | * cur_ipl we squirreled away in a4 earlier. | |||
153 | */ | |||
154 | 1: | |||
155 | /* | |||
156 | * curlwp->l_cpu is now stable. Update the counter and | |||
157 | * stash the old_ipl. Just in case it's not clear what's | |||
158 | * going on, we: | |||
159 | * | |||
160 | * - Load previous value of mtx_oldspl into t1. | |||
161 | * - Conditionally move old_ipl into t1 if mtx_count == 0. | |||
162 | * - Store t1 back to mtx_oldspl; if mtx_count != 0, | |||
163 | * the store is redundant, but it's faster than a forward | |||
164 | * branch. | |||
165 | */ | |||
166 | ldq a3, L_CPU(a5) /* a3 = curlwp->l_cpu (curcpu) */ | |||
167 | ldl t0, CPU_INFO_MTX_COUNT(a3) | |||
168 | ldl t1, CPU_INFO_MTX_OLDSPL(a3) | |||
169 | cmoveq t0, a4, t1 /* mtx_count == 0? -> t1 = old_ipl */ | |||
170 | subl t0, 1, t2 /* mtx_count-- */ | |||
171 | stl t1, CPU_INFO_MTX_OLDSPL(a3) | |||
172 | stl t2, CPU_INFO_MTX_COUNT(a3) | |||
173 | ||||
174 | /* | |||
175 | * STEP 2: __cpu_simple_lock_try(&mtx->mtx_lock) | |||
176 | */ | |||
177 | ldl_l t0, MUTEX_SIMPLELOCK(a1) | |||
178 | ldiq t1, __SIMPLELOCK_LOCKED | |||
179 | bne t0, 2f /* contended */ | |||
180 | stl_c t1, MUTEX_SIMPLELOCK(a1) | |||
181 | beq t1, 2f /* STL_C failed; consider contended */ | |||
182 | MB | |||
183 | RET | |||
184 | 2: | |||
185 | mov a1, a0 /* restore first argument */ | |||
186 | lda pv, mutex_spin_retry | |||
187 | jmp (pv) | |||
188 | END(mutex_spin_enter) | |||
189 | ||||
190 | /* | |||
191 | * void mutex_spin_exit(kmutex_t *mtx); | |||
192 | */ | |||
193 | LEAF(mutex_spin_exit, 1) | |||
194 | LDGP(pv); | |||
195 | MB | |||
196 | ||||
197 | /* | |||
198 | * STEP 1: __cpu_simple_unlock(&mtx->mtx_lock) | |||
199 | */ | |||
200 | stl zero, MUTEX_SIMPLELOCK(a0) | |||
201 | ||||
202 | /* | |||
203 | * STEP 2: Perform the MUTEX_SPIN_SPLRESTORE() function. | |||
204 | * (see sys/kern/kern_mutex.c) | |||
205 | * | |||
206 | * s = curcpu->ci_mtx_oldspl; | |||
207 | * if (++curcpu->ci_mtx_count == 0) | |||
208 | * splx(s); | |||
209 | */ | |||
210 | GET_CURLWP /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ | |||
211 | ldq a3, L_CPU(v0) /* a3 = curlwp->l_cpu (curcpu) */ | |||
212 | ldl t0, CPU_INFO_MTX_COUNT(a3) | |||
213 | ldl a0, CPU_INFO_MTX_OLDSPL(a3) | |||
214 | addl t0, 1, t2 /* mtx_count++ */ | |||
215 | stl t2, CPU_INFO_MTX_COUNT(a3) | |||
216 | /* | |||
217 | * The forward-branch over the SWPIPL call is correctly predicted | |||
218 | * not-taken by the CPU because it's rare for a code path to acquire | |||
219 | * 2 spin mutexes. | |||
220 | */ | |||
221 | bne t2, 1f /* t2 != 0? Skip... */ | |||
222 | call_pal PAL_OSF1_swpipl /* clobbers v0, t0, t8..t11 */ | |||
223 | 1: | |||
224 | RET | |||
225 | END(mutex_spin_exit) | |||
226 | ||||
227 | /* | |||
114 | * void rw_enter(krwlock_t *rwl, krw_t op); | 228 | * void rw_enter(krwlock_t *rwl, krw_t op); | |
115 | * | 229 | * | |
116 | * Acquire one hold on a RW lock. | 230 | * Acquire one hold on a RW lock. | |
117 | */ | 231 | */ | |
118 | LEAF(rw_enter, 2) | 232 | LEAF(rw_enter, 2) | |
119 | LDGP(pv) | 233 | LDGP(pv) | |
120 | 234 | |||
121 | /* | 235 | /* | |
122 | * RW_READER == 0 (we have a compile-time assert in machdep.c | 236 | * RW_READER == 0 (we have a compile-time assert in machdep.c | |
123 | * to ensure this). | 237 | * to ensure this). | |
124 | * | 238 | * | |
125 | * Acquire for read is the most common case. | 239 | * Acquire for read is the most common case. | |
126 | */ | 240 | */ | |
127 | bne a1, 3f | 241 | bne a1, 3f | |
128 | 242 | |||
129 | /* Acquiring for read. */ | 243 | /* Acquiring for read. */ | |
130 | 1: ldq_l t0, 0(a0) | 244 | 1: ldq_l t0, 0(a0) | |
131 | and t0, (RW_WRITE_LOCKED|RW_WRITE_WANTED), t1 | 245 | and t0, (RW_WRITE_LOCKED|RW_WRITE_WANTED), t1 | |
132 | addq t0, RW_READ_INCR, t2 | 246 | addq t0, RW_READ_INCR, t2 | |
133 | bne t1, 4f /* contended */ | 247 | bne t1, 4f /* contended */ | |
134 | stq_c t2, 0(a0) | 248 | stq_c t2, 0(a0) | |
135 | beq t2, 2f /* STQ_C failed; retry */ | 249 | beq t2, 2f /* STQ_C failed; retry */ | |
136 | MB | 250 | MB | |
137 | RET | 251 | RET | |
138 | 252 | |||
139 | 2: br 1b | 253 | 2: br 1b | |
140 | 254 | |||
141 | 3: /* Acquiring for write. */ | 255 | 3: /* Acquiring for write. */ | |
142 | GET_CURLWP /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ | 256 | GET_CURLWP /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ | |
143 | ldq_l t0, 0(a0) | 257 | ldq_l t0, 0(a0) | |
144 | or v0, RW_WRITE_LOCKED, t2 | 258 | or v0, RW_WRITE_LOCKED, t2 | |
145 | bne t0, 4f /* contended */ | 259 | bne t0, 4f /* contended */ | |
146 | stq_c t2, 0(a0) | 260 | stq_c t2, 0(a0) | |
147 | beq t2, 4f /* STQ_C failed; consider it contended */ | 261 | beq t2, 4f /* STQ_C failed; consider it contended */ | |
148 | MB | 262 | MB | |
149 | RET | 263 | RET | |
150 | 264 | |||
151 | 4: lda pv, rw_vector_enter | 265 | 4: lda pv, rw_vector_enter | |
152 | jmp (pv) | 266 | jmp (pv) | |
153 | END(rw_enter) | 267 | END(rw_enter) | |
154 | 268 | |||
155 | /* | 269 | /* | |
156 | * int rw_tryenter(krwlock_t *rwl, krw_t op); | 270 | * int rw_tryenter(krwlock_t *rwl, krw_t op); | |
157 | * | 271 | * | |
158 | * Try to acquire one hold on a RW lock. | 272 | * Try to acquire one hold on a RW lock. | |
159 | */ | 273 | */ | |
160 | LEAF(rw_tryenter, 2) | 274 | LEAF(rw_tryenter, 2) | |
161 | LDGP(pv) | 275 | LDGP(pv) | |
162 | 276 | |||
163 | /* See above. */ | 277 | /* See above. */ | |
164 | bne a1, 3f | 278 | bne a1, 3f | |
165 | 279 | |||
166 | /* Acquiring for read. */ | 280 | /* Acquiring for read. */ | |
167 | 1: ldq_l t0, 0(a0) | 281 | 1: ldq_l t0, 0(a0) | |
168 | and t0, (RW_WRITE_LOCKED|RW_WRITE_WANTED), t1 | 282 | and t0, (RW_WRITE_LOCKED|RW_WRITE_WANTED), t1 | |
169 | addq t0, RW_READ_INCR, v0 | 283 | addq t0, RW_READ_INCR, v0 | |
170 | bne t1, 4f /* contended */ | 284 | bne t1, 4f /* contended */ | |
171 | stq_c v0, 0(a0) | 285 | stq_c v0, 0(a0) | |
172 | beq v0, 2f /* STQ_C failed; retry */ | 286 | beq v0, 2f /* STQ_C failed; retry */ | |
173 | MB | 287 | MB | |
174 | RET /* v0 contains non-zero LOCK_FLAG from STQ_C */ | 288 | RET /* v0 contains non-zero LOCK_FLAG from STQ_C */ | |
175 | 289 | |||
176 | 2: br 1b | 290 | 2: br 1b | |
177 | 291 | |||
178 | /* Acquiring for write. */ | 292 | /* Acquiring for write. */ | |
179 | 3: GET_CURLWP /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ | 293 | 3: GET_CURLWP /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ | |
180 | ldq_l t0, 0(a0) | 294 | ldq_l t0, 0(a0) | |
181 | or v0, RW_WRITE_LOCKED, v0 | 295 | or v0, RW_WRITE_LOCKED, v0 | |
182 | bne t0, 4f /* contended */ | 296 | bne t0, 4f /* contended */ | |
183 | stq_c v0, 0(a0) | 297 | stq_c v0, 0(a0) | |
184 | /* | 298 | /* | |
185 | * v0 now contains the LOCK_FLAG value from STQ_C, which is either | 299 | * v0 now contains the LOCK_FLAG value from STQ_C, which is either | |
186 | * 0 for failure, or non-zero for success. In either case, v0's | 300 | * 0 for failure, or non-zero for success. In either case, v0's | |
187 | * value is correct. Go ahead and perform the memory barrier even | 301 | * value is correct. Go ahead and perform the memory barrier even | |
188 | * in the failure case because we expect it to be rare and it saves | 302 | * in the failure case because we expect it to be rare and it saves | |
189 | * a branch-not-taken instruction in the success case. | 303 | * a branch-not-taken instruction in the success case. | |
190 | */ | 304 | */ | |
191 | MB | 305 | MB | |
192 | RET | 306 | RET | |
193 | 307 | |||
194 | 4: mov zero, v0 /* return 0 (failure) */ | 308 | 4: mov zero, v0 /* return 0 (failure) */ | |
195 | RET | 309 | RET | |
196 | END(rw_tryenter) | 310 | END(rw_tryenter) | |
197 | 311 | |||
198 | /* | 312 | /* | |
199 | * void rw_exit(krwlock_t *rwl); | 313 | * void rw_exit(krwlock_t *rwl); | |
200 | * | 314 | * | |
201 | * Release one hold on a RW lock. | 315 | * Release one hold on a RW lock. | |
202 | */ | 316 | */ | |
203 | LEAF(rw_exit, 1) | 317 | LEAF(rw_exit, 1) | |
204 | LDGP(pv) | 318 | LDGP(pv) | |
205 | MB | 319 | MB | |
206 | 320 | |||
207 | /* | 321 | /* | |
208 | * Check for write-lock release, and get the owner/count field | 322 | * Check for write-lock release, and get the owner/count field | |
209 | * on its own for sanity-checking against expected values. | 323 | * on its own for sanity-checking against expected values. | |
210 | */ | 324 | */ | |
211 | ldq a1, 0(a0) | 325 | ldq a1, 0(a0) | |
212 | and a1, RW_WRITE_LOCKED, t1 | 326 | and a1, RW_WRITE_LOCKED, t1 | |
213 | srl a1, RW_READ_COUNT_SHIFT, a2 | 327 | srl a1, RW_READ_COUNT_SHIFT, a2 | |
214 | bne t1, 3f | 328 | bne t1, 3f | |
215 | 329 | |||
216 | /* | 330 | /* | |
217 | * Releasing a read-lock. Make sure the count is non-zero. | 331 | * Releasing a read-lock. Make sure the count is non-zero. | |
218 | * If it is zero, take the slow path where the juicy diagnostic | 332 | * If it is zero, take the slow path where the juicy diagnostic | |
219 | * checks are located. | 333 | * checks are located. | |
220 | */ | 334 | */ | |
221 | beq a2, 4f | 335 | beq a2, 4f | |
222 | 336 | |||
223 | /* | 337 | /* | |
224 | * We do the following trick to check to see if we're releasing | 338 | * We do the following trick to check to see if we're releasing | |
225 | * the last read-count and there are waiters: | 339 | * the last read-count and there are waiters: | |
226 | * | 340 | * | |
227 | * 1. Set v0 to 1. | 341 | * 1. Set v0 to 1. | |
228 | * 2. Shift the new read count into t1. | 342 | * 2. Shift the new read count into t1. | |
229 | * 3. Conditally move t1 to v0 based on low-bit-set of t0 | 343 | * 3. Conditally move t1 to v0 based on low-bit-set of t0 | |
230 | * (RW_HAS_WAITERS). If RW_HAS_WAITERS is not set, then | 344 | * (RW_HAS_WAITERS). If RW_HAS_WAITERS is not set, then | |
231 | * the move will not take place, and v0 will remain 1. | 345 | * the move will not take place, and v0 will remain 1. | |
232 | * Otherwise, v0 will contain the updated read count. | 346 | * Otherwise, v0 will contain the updated read count. | |
233 | * 4. Jump to slow path if v0 == 0. | 347 | * 4. Jump to slow path if v0 == 0. | |
234 | */ | 348 | */ | |
235 | 1: ldq_l t0, 0(a0) | 349 | 1: ldq_l t0, 0(a0) | |
236 | ldiq v0, 1 | 350 | ldiq v0, 1 | |
237 | subq t0, RW_READ_INCR, t2 | 351 | subq t0, RW_READ_INCR, t2 | |
238 | srl t2, RW_READ_COUNT_SHIFT, t1 | 352 | srl t2, RW_READ_COUNT_SHIFT, t1 | |
239 | cmovlbs t0, t1, v0 | 353 | cmovlbs t0, t1, v0 | |
240 | beq v0, 4f | 354 | beq v0, 4f | |
241 | stq_c t2, 0(a0) | 355 | stq_c t2, 0(a0) | |
242 | beq t2, 2f /* STQ_C failed; try again */ | 356 | beq t2, 2f /* STQ_C failed; try again */ | |
243 | RET | 357 | RET | |
244 | 358 | |||
245 | 2: br 1b | 359 | 2: br 1b | |
246 | 360 | |||
247 | /* | 361 | /* | |
248 | * Releasing a write-lock. Make sure the owner field points | 362 | * Releasing a write-lock. Make sure the owner field points | |
249 | * to our LWP. If it does not, take the slow path where the | 363 | * to our LWP. If it does not, take the slow path where the | |
250 | * juicy diagnostic checks are located. a2 contains the owner | 364 | * juicy diagnostic checks are located. a2 contains the owner | |
251 | * field shifted down. Shift it back up to compare to curlwp; | 365 | * field shifted down. Shift it back up to compare to curlwp; | |
252 | * this conveniently discards the bits we don't want to compare. | 366 | * this conveniently discards the bits we don't want to compare. | |
253 | */ | 367 | */ | |
254 | 3: GET_CURLWP /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ | 368 | 3: GET_CURLWP /* Note: GET_CURLWP clobbers v0, t0, t8...t11. */ | |
255 | sll a2, RW_READ_COUNT_SHIFT, a2 | 369 | sll a2, RW_READ_COUNT_SHIFT, a2 | |
256 | mov zero, t2 /* fast-path write-unlock stores NULL */ | 370 | mov zero, t2 /* fast-path write-unlock stores NULL */ | |
257 | cmpeq v0, a2, v0 /* v0 = (owner == curlwp) */ | 371 | cmpeq v0, a2, v0 /* v0 = (owner == curlwp) */ | |
258 | ldq_l t0, 0(a0) | 372 | ldq_l t0, 0(a0) | |
259 | beq v0, 4f /* owner field mismatch; need slow path */ | 373 | beq v0, 4f /* owner field mismatch; need slow path */ | |
260 | blbs t0, 4f /* RW_HAS_WAITERS set; need slow-path */ | 374 | blbs t0, 4f /* RW_HAS_WAITERS set; need slow-path */ | |
261 | stq_c t2, 0(a0) | 375 | stq_c t2, 0(a0) | |
262 | beq t2, 4f /* STQ_C failed; need slow-path */ | 376 | beq t2, 4f /* STQ_C failed; need slow-path */ | |
263 | RET | 377 | RET | |
264 | 378 | |||
265 | 4: lda pv, rw_vector_exit | 379 | 4: lda pv, rw_vector_exit | |
266 | jmp (pv) | 380 | jmp (pv) | |
267 | END(rw_exit) | 381 | END(rw_exit) | |
268 | 382 | |||
269 | #endif /* !LOCKDEBUG */ | 383 | #endif /* !LOCKDEBUG */ |
--- src/sys/arch/alpha/include/mutex.h 2020/09/23 00:52:49 1.8
+++ src/sys/arch/alpha/include/mutex.h 2021/07/12 15:21:51 1.9
@@ -1,71 +1,72 @@ | @@ -1,71 +1,72 @@ | |||
1 | /* $NetBSD: mutex.h,v 1.8 2020/09/23 00:52:49 thorpej Exp $ */ | 1 | /* $NetBSD: mutex.h,v 1.9 2021/07/12 15:21:51 thorpej Exp $ */ | |
2 | 2 | |||
3 | /*- | 3 | /*- | |
4 | * Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc. | 4 | * Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc. | |
5 | * All rights reserved. | 5 | * All rights reserved. | |
6 | * | 6 | * | |
7 | * This code is derived from software contributed to The NetBSD Foundation | 7 | * This code is derived from software contributed to The NetBSD Foundation | |
8 | * by Jason R. Thorpe and Andrew Doran. | 8 | * by Jason R. Thorpe and Andrew Doran. | |
9 | * | 9 | * | |
10 | * Redistribution and use in source and binary forms, with or without | 10 | * Redistribution and use in source and binary forms, with or without | |
11 | * modification, are permitted provided that the following conditions | 11 | * modification, are permitted provided that the following conditions | |
12 | * are met: | 12 | * are met: | |
13 | * 1. Redistributions of source code must retain the above copyright | 13 | * 1. Redistributions of source code must retain the above copyright | |
14 | * notice, this list of conditions and the following disclaimer. | 14 | * notice, this list of conditions and the following disclaimer. | |
15 | * 2. Redistributions in binary form must reproduce the above copyright | 15 | * 2. Redistributions in binary form must reproduce the above copyright | |
16 | * notice, this list of conditions and the following disclaimer in the | 16 | * notice, this list of conditions and the following disclaimer in the | |
17 | * documentation and/or other materials provided with the distribution. | 17 | * documentation and/or other materials provided with the distribution. | |
18 | * | 18 | * | |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
29 | * POSSIBILITY OF SUCH DAMAGE. | 29 | * POSSIBILITY OF SUCH DAMAGE. | |
30 | */ | 30 | */ | |
31 | 31 | |||
32 | #ifndef _ALPHA_MUTEX_H_ | 32 | #ifndef _ALPHA_MUTEX_H_ | |
33 | #define _ALPHA_MUTEX_H_ | 33 | #define _ALPHA_MUTEX_H_ | |
34 | 34 | |||
35 | #ifndef __MUTEX_PRIVATE | 35 | #ifndef __MUTEX_PRIVATE | |
36 | 36 | |||
37 | struct kmutex { | 37 | struct kmutex { | |
38 | uintptr_t mtx_pad1; | 38 | uintptr_t mtx_pad1; | |
39 | }; | 39 | }; | |
40 | 40 | |||
41 | #else /* __MUTEX_PRIVATE */ | 41 | #else /* __MUTEX_PRIVATE */ | |
42 | 42 | |||
43 | struct kmutex { | 43 | struct kmutex { | |
44 | union { | 44 | union { | |
45 | volatile uintptr_t mtxa_owner; | 45 | volatile uintptr_t mtxa_owner; | |
46 | struct { | 46 | struct { | |
47 | volatile uint8_t mtxs_flags; | 47 | volatile uint8_t mtxs_flags; | |
48 | ipl_cookie_t mtxs_ipl; | 48 | ipl_cookie_t mtxs_ipl; | |
49 | volatile uint16_t mtxs_unused; | 49 | volatile uint16_t mtxs_unused; | |
50 | __cpu_simple_lock_t mtxs_lock; | 50 | __cpu_simple_lock_t mtxs_lock; | |
51 | } s; | 51 | } s; | |
52 | } u; | 52 | } u; | |
53 | }; | 53 | }; | |
54 | 54 | |||
55 | #define mtx_owner u.mtxa_owner | 55 | #define mtx_owner u.mtxa_owner | |
56 | #define mtx_flags u.s.mtxs_flags | 56 | #define mtx_flags u.s.mtxs_flags | |
57 | #define mtx_ipl u.s.mtxs_ipl | 57 | #define mtx_ipl u.s.mtxs_ipl | |
58 | #define mtx_lock u.s.mtxs_lock | 58 | #define mtx_lock u.s.mtxs_lock | |
59 | 59 | |||
60 | #define __HAVE_SIMPLE_MUTEXES 1 | 60 | #define __HAVE_SIMPLE_MUTEXES 1 | |
61 | #define __HAVE_MUTEX_STUBS 1 | 61 | #define __HAVE_MUTEX_STUBS 1 | |
62 | #define __HAVE_SPIN_MUTEX_STUBS 1 | |||
62 | 63 | |||
63 | #define MUTEX_CAS(p, o, n) _lock_cas((p), (o), (n)) | 64 | #define MUTEX_CAS(p, o, n) _lock_cas((p), (o), (n)) | |
64 | 65 | |||
65 | int _lock_cas(volatile uintptr_t *, uintptr_t, uintptr_t); | 66 | int _lock_cas(volatile uintptr_t *, uintptr_t, uintptr_t); | |
66 | 67 | |||
67 | #endif /* __MUTEX_PRIVATE */ | 68 | #endif /* __MUTEX_PRIVATE */ | |
68 | 69 | |||
69 | __CTASSERT(sizeof(struct kmutex) == sizeof(uintptr_t)); | 70 | __CTASSERT(sizeof(struct kmutex) == sizeof(uintptr_t)); | |
70 | 71 | |||
71 | #endif /* _ALPHA_MUTEX_H_ */ | 72 | #endif /* _ALPHA_MUTEX_H_ */ |