Sun Feb 13 13:41:18 2022 UTC ()
sparc: __cpu_simple_lock membar audit.

No functional change -- comments only, to justify the absence of
barriers with reference to chapter & verse.


(riastradh)
diff -r1.33 -r1.34 src/sys/arch/sparc/include/lock.h

cvs diff -r1.33 -r1.34 src/sys/arch/sparc/include/lock.h (expand / switch to unified diff)

--- src/sys/arch/sparc/include/lock.h 2019/11/29 20:06:34 1.33
+++ src/sys/arch/sparc/include/lock.h 2022/02/13 13:41:17 1.34
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: lock.h,v 1.33 2019/11/29 20:06:34 riastradh Exp $ */ 1/* $NetBSD: lock.h,v 1.34 2022/02/13 13:41:17 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 1999, 2006 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 1999, 2006 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg and Andrew Doran. 8 * by Paul Kranenburg and Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -108,36 +108,49 @@ __cpu_simple_lock(__cpu_simple_lock_t *a @@ -108,36 +108,49 @@ __cpu_simple_lock(__cpu_simple_lock_t *a
108{ 108{
109 109
110 /* 110 /*
111 * If someone else holds the lock use simple reads until it 111 * If someone else holds the lock use simple reads until it
112 * is released, then retry the atomic operation. This reduces 112 * is released, then retry the atomic operation. This reduces
113 * memory bus contention because the cache-coherency logic 113 * memory bus contention because the cache-coherency logic
114 * does not have to broadcast invalidates on the lock while 114 * does not have to broadcast invalidates on the lock while
115 * we spin on it. 115 * we spin on it.
116 */ 116 */
117 while (__ldstub(alp) != __SIMPLELOCK_UNLOCKED) { 117 while (__ldstub(alp) != __SIMPLELOCK_UNLOCKED) {
118 while (*alp != __SIMPLELOCK_UNLOCKED) 118 while (*alp != __SIMPLELOCK_UNLOCKED)
119 /* spin */ ; 119 /* spin */ ;
120 } 120 }
 121
 122 /*
 123 * No memory barrier needed here to make this a load-acquire
 124 * operation because LDSTUB already implies that. See SPARCv8
 125 * Reference Manual, Appendix J.4 `Spin Locks', p. 271.
 126 */
121} 127}
122#endif /* __CPU_SIMPLE_LOCK_NOINLINE */ 128#endif /* __CPU_SIMPLE_LOCK_NOINLINE */
123 129
124static __inline int 130static __inline int
125__cpu_simple_lock_try(__cpu_simple_lock_t *alp) 131__cpu_simple_lock_try(__cpu_simple_lock_t *alp)
126{ 132{
127 133
 134 /*
 135 * No memory barrier needed for LDSTUB to be a load-acquire --
 136 * see __cpu_simple_lock.
 137 */
128 return (__ldstub(alp) == __SIMPLELOCK_UNLOCKED); 138 return (__ldstub(alp) == __SIMPLELOCK_UNLOCKED);
129} 139}
130 140
131static __inline void 141static __inline void
132__cpu_simple_unlock(__cpu_simple_lock_t *alp) 142__cpu_simple_unlock(__cpu_simple_lock_t *alp)
133{ 143{
134 144
135 /* 145 /*
136 * Insert compiler barrier to prevent instruction re-ordering 146 * Insert compiler barrier to prevent instruction re-ordering
137 * around the lock release. 147 * around the lock release.
 148 *
 149 * No memory barrier needed because we run the kernel in TSO.
 150 * If we ran the kernel in PSO, this would require STBAR.
138 */ 151 */
139 __insn_barrier(); 152 __insn_barrier();
140 *alp = __SIMPLELOCK_UNLOCKED; 153 *alp = __SIMPLELOCK_UNLOCKED;
141} 154}
142 155
143#endif /* _MACHINE_LOCK_H */ 156#endif /* _MACHINE_LOCK_H */