| @@ -1,136 +1,149 @@ | | | @@ -1,136 +1,149 @@ |
1 | /* $NetBSD: lock.h,v 1.2 2017/09/17 00:01:08 christos Exp $ */ | | 1 | /* $NetBSD: lock.h,v 1.3 2022/02/13 13:42:12 riastradh Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 2014 The NetBSD Foundation, Inc. | | 4 | * Copyright (c) 2014 The NetBSD Foundation, Inc. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to The NetBSD Foundation | | 7 | * This code is derived from software contributed to The NetBSD Foundation |
8 | * by Matt Thomas of 3am Software Foundry. | | 8 | * by Matt Thomas of 3am Software Foundry. |
9 | * | | 9 | * |
10 | * Redistribution and use in source and binary forms, with or without | | 10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions | | 11 | * modification, are permitted provided that the following conditions |
12 | * are met: | | 12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright | | 13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. | | 14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright | | 15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the | | 16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. | | 17 | * documentation and/or other materials provided with the distribution. |
18 | * | | 18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS | | 19 | * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | | 20 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | | 21 | * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS | | 22 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS |
23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | | 23 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | | 24 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | | 25 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | | 26 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | | 27 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | | 28 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
29 | * POSSIBILITY OF SUCH DAMAGE. | | 29 | * POSSIBILITY OF SUCH DAMAGE. |
30 | */ | | 30 | */ |
31 | /* | | 31 | /* |
32 | * Machine-dependent spin lock operations. | | 32 | * Machine-dependent spin lock operations. |
33 | */ | | 33 | */ |
34 | | | 34 | |
35 | #ifndef _OR1K_LOCK_H_ | | 35 | #ifndef _OR1K_LOCK_H_ |
36 | #define _OR1K_LOCK_H_ | | 36 | #define _OR1K_LOCK_H_ |
37 | | | 37 | |
38 | static __inline int | | 38 | static __inline int |
39 | __SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t *__ptr) | | 39 | __SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t *__ptr) |
40 | { | | 40 | { |
41 | return *__ptr != __SIMPLELOCK_UNLOCKED; | | 41 | return *__ptr != __SIMPLELOCK_UNLOCKED; |
42 | } | | 42 | } |
43 | | | 43 | |
44 | static __inline int | | 44 | static __inline int |
45 | __SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t *__ptr) | | 45 | __SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t *__ptr) |
46 | { | | 46 | { |
47 | return *__ptr == __SIMPLELOCK_UNLOCKED; | | 47 | return *__ptr == __SIMPLELOCK_UNLOCKED; |
48 | } | | 48 | } |
49 | | | 49 | |
50 | static __inline void | | 50 | static __inline void |
51 | __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr) | | 51 | __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr) |
52 | { | | 52 | { |
53 | #if 0 | | 53 | #if 0 |
54 | __atomic_clear(__ptr, __ATOMIC_RELAXED); | | 54 | __atomic_clear(__ptr, __ATOMIC_RELAXED); |
55 | #else | | 55 | #else |
56 | *__ptr = __SIMPLELOCK_UNLOCKED; | | 56 | *__ptr = __SIMPLELOCK_UNLOCKED; |
57 | #endif | | 57 | #endif |
58 | } | | 58 | } |
59 | | | 59 | |
60 | static __inline void | | 60 | static __inline void |
61 | __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr) | | 61 | __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr) |
62 | { | | 62 | { |
63 | #if 0 | | 63 | #if 0 |
64 | (void)__atomic_test_and_set(__ptr, __ATOMIC_RELAXED); | | 64 | (void)__atomic_test_and_set(__ptr, __ATOMIC_RELAXED); |
65 | #else | | 65 | #else |
66 | *__ptr = __SIMPLELOCK_LOCKED; | | 66 | *__ptr = __SIMPLELOCK_LOCKED; |
67 | #endif | | 67 | #endif |
68 | } | | 68 | } |
69 | | | 69 | |
70 | static __inline void __unused | | 70 | static __inline void __unused |
71 | __cpu_simple_lock_init(__cpu_simple_lock_t *__ptr) | | 71 | __cpu_simple_lock_init(__cpu_simple_lock_t *__ptr) |
72 | { | | 72 | { |
73 | #if 0 | | 73 | #if 0 |
74 | __atomic_clear(__ptr, __ATOMIC_RELAXED); | | 74 | __atomic_clear(__ptr, __ATOMIC_RELAXED); |
75 | #else | | 75 | #else |
76 | *__ptr = __SIMPLELOCK_UNLOCKED; | | 76 | *__ptr = __SIMPLELOCK_UNLOCKED; |
77 | #endif | | 77 | #endif |
78 | } | | 78 | } |
79 | | | 79 | |
80 | static __inline void __unused | | 80 | static __inline void __unused |
81 | __cpu_simple_lock(__cpu_simple_lock_t *__ptr) | | 81 | __cpu_simple_lock(__cpu_simple_lock_t *__ptr) |
82 | { | | 82 | { |
83 | #if 0 | | 83 | #if 0 |
84 | while (__atomic_test_and_set(__ptr, __ATOMIC_ACQUIRE)) { | | 84 | while (__atomic_test_and_set(__ptr, __ATOMIC_ACQUIRE)) { |
85 | /* do nothing */ | | 85 | /* do nothing */ |
86 | } | | 86 | } |
87 | #else | | 87 | #else |
88 | int tmp; | | 88 | int tmp; |
89 | __asm( | | 89 | /* |
| | | 90 | * No explicit memory barrier needed around ll/sc: |
| | | 91 | * |
| | | 92 | * `In implementations that use a weakly-ordered memory model, |
| | | 93 | * l.swa nad l.lwa will serve as synchronization points, |
| | | 94 | * similar to lsync.' |
| | | 95 | * |
| | | 96 | * https://openrisc.io/or1k.html#__RefHeading__341344_552419154 |
| | | 97 | */ |
| | | 98 | __asm volatile( |
90 | "1:" | | 99 | "1:" |
91 | "\t" "l.lwa %[tmp],0(%[ptr])" | | 100 | "\t" "l.lwa %[tmp],0(%[ptr])" |
92 | "\n\t" "l.sfeqi\t%[tmp],%[unlocked]" | | 101 | "\n\t" "l.sfeqi\t%[tmp],%[unlocked]" |
93 | "\n\t" "l.bnf 1b" | | 102 | "\n\t" "l.bnf 1b" |
94 | "\n\t" "l.nop" | | 103 | "\n\t" "l.nop" |
95 | | | 104 | |
96 | "\n\t" "l.swa 0(%[ptr]),%[newval]" | | 105 | "\n\t" "l.swa 0(%[ptr]),%[newval]" |
97 | "\n\t" "l.bnf 1b" | | 106 | "\n\t" "l.bnf 1b" |
98 | "\n\t" "l.nop" | | 107 | "\n\t" "l.nop" |
99 | : [tmp] "=&r" (tmp) | | 108 | : [tmp] "=&r" (tmp) |
100 | : [newval] "r" (__SIMPLELOCK_LOCKED), | | 109 | : [newval] "r" (__SIMPLELOCK_LOCKED), |
101 | [ptr] "r" (__ptr), | | 110 | [ptr] "r" (__ptr), |
102 | [unlocked] "n" (__SIMPLELOCK_UNLOCKED)); | | 111 | [unlocked] "n" (__SIMPLELOCK_UNLOCKED) |
| | | 112 | : "cc", "memory"); |
103 | #endif | | 113 | #endif |
104 | } | | 114 | } |
105 | | | 115 | |
106 | static __inline int __unused | | 116 | static __inline int __unused |
107 | __cpu_simple_lock_try(__cpu_simple_lock_t *__ptr) | | 117 | __cpu_simple_lock_try(__cpu_simple_lock_t *__ptr) |
108 | { | | 118 | { |
109 | #if 0 | | 119 | #if 0 |
110 | return !__atomic_test_and_set(__ptr, __ATOMIC_ACQUIRE); | | 120 | return !__atomic_test_and_set(__ptr, __ATOMIC_ACQUIRE); |
111 | #else | | 121 | #else |
112 | int oldval; | | 122 | int oldval; |
113 | __asm( | | 123 | /* No explicit memory barrier needed, as in __cpu_simple_lock. */ |
| | | 124 | __asm volatile( |
114 | "1:" | | 125 | "1:" |
115 | "\t" "l.lwa %[oldval],0(%[ptr])" | | 126 | "\t" "l.lwa %[oldval],0(%[ptr])" |
116 | "\n\t" "l.swa 0(%[ptr]),%[newval]" | | 127 | "\n\t" "l.swa 0(%[ptr]),%[newval]" |
117 | "\n\t" "l.bnf 1b" | | 128 | "\n\t" "l.bnf 1b" |
118 | "\n\t" "l.nop" | | 129 | "\n\t" "l.nop" |
119 | : [oldval] "=&r" (oldval) | | 130 | : [oldval] "=&r" (oldval) |
120 | : [newval] "r" (__SIMPLELOCK_LOCKED), | | 131 | : [newval] "r" (__SIMPLELOCK_LOCKED), |
121 | [ptr] "r" (__ptr)); | | 132 | [ptr] "r" (__ptr) |
| | | 133 | : "cc", "memory"); |
122 | return oldval == __SIMPLELOCK_UNLOCKED; | | 134 | return oldval == __SIMPLELOCK_UNLOCKED; |
123 | #endif | | 135 | #endif |
124 | } | | 136 | } |
125 | | | 137 | |
126 | static __inline void __unused | | 138 | static __inline void __unused |
127 | __cpu_simple_unlock(__cpu_simple_lock_t *__ptr) | | 139 | __cpu_simple_unlock(__cpu_simple_lock_t *__ptr) |
128 | { | | 140 | { |
129 | #if 0 | | 141 | #if 0 |
130 | __atomic_clear(__ptr, __ATOMIC_RELEASE); | | 142 | __atomic_clear(__ptr, __ATOMIC_RELEASE); |
131 | #else | | 143 | #else |
| | | 144 | __asm volatile("l.msync" ::: ""); |
132 | *__ptr = __SIMPLELOCK_UNLOCKED; | | 145 | *__ptr = __SIMPLELOCK_UNLOCKED; |
133 | #endif | | 146 | #endif |
134 | } | | 147 | } |
135 | | | 148 | |
136 | #endif /* _OR1K_LOCK_H_ */ | | 149 | #endif /* _OR1K_LOCK_H_ */ |