| @@ -1,153 +1,153 @@ | | | @@ -1,153 +1,153 @@ |
1 | /* $NetBSD: lock.h,v 1.33 2022/02/12 17:17:53 riastradh Exp $ */ | | 1 | /* $NetBSD: lock.h,v 1.34 2022/02/13 13:42:21 riastradh Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (c) 2000 Ludd, University of Lule}, Sweden. | | 4 | * Copyright (c) 2000 Ludd, University of Lule}, Sweden. |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * Redistribution and use in source and binary forms, with or without | | 7 | * Redistribution and use in source and binary forms, with or without |
8 | * modification, are permitted provided that the following conditions | | 8 | * modification, are permitted provided that the following conditions |
9 | * are met: | | 9 | * are met: |
10 | * 1. Redistributions of source code must retain the above copyright | | 10 | * 1. Redistributions of source code must retain the above copyright |
11 | * notice, this list of conditions and the following disclaimer. | | 11 | * notice, this list of conditions and the following disclaimer. |
12 | * 2. Redistributions in binary form must reproduce the above copyright | | 12 | * 2. Redistributions in binary form must reproduce the above copyright |
13 | * notice, this list of conditions and the following disclaimer in the | | 13 | * notice, this list of conditions and the following disclaimer in the |
14 | * documentation and/or other materials provided with the distribution. | | 14 | * documentation and/or other materials provided with the distribution. |
15 | * | | 15 | * |
16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | | 16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | | 17 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | | 18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | | 19 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | | 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | | 21 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | | 22 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | | 23 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | | 24 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | | 25 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | */ | | 26 | */ |
27 | | | 27 | |
28 | #ifndef _VAX_LOCK_H_ | | 28 | #ifndef _VAX_LOCK_H_ |
29 | #define _VAX_LOCK_H_ | | 29 | #define _VAX_LOCK_H_ |
30 | | | 30 | |
31 | #include <sys/param.h> | | 31 | #include <sys/param.h> |
32 | | | 32 | |
33 | #ifdef _KERNEL | | 33 | #ifdef _KERNEL |
34 | #ifdef _KERNEL_OPT | | 34 | #ifdef _KERNEL_OPT |
35 | #include "opt_multiprocessor.h" | | 35 | #include "opt_multiprocessor.h" |
36 | #include <machine/intr.h> | | 36 | #include <machine/intr.h> |
37 | #endif | | 37 | #endif |
38 | #include <machine/cpu.h> | | 38 | #include <machine/cpu.h> |
39 | #endif | | 39 | #endif |
40 | | | 40 | |
41 | static __inline int | | 41 | static __inline int |
42 | __SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t *__ptr) | | 42 | __SIMPLELOCK_LOCKED_P(const __cpu_simple_lock_t *__ptr) |
43 | { | | 43 | { |
44 | return *__ptr == __SIMPLELOCK_LOCKED; | | 44 | return *__ptr == __SIMPLELOCK_LOCKED; |
45 | } | | 45 | } |
46 | | | 46 | |
47 | static __inline int | | 47 | static __inline int |
48 | __SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t *__ptr) | | 48 | __SIMPLELOCK_UNLOCKED_P(const __cpu_simple_lock_t *__ptr) |
49 | { | | 49 | { |
50 | return *__ptr == __SIMPLELOCK_UNLOCKED; | | 50 | return *__ptr == __SIMPLELOCK_UNLOCKED; |
51 | } | | 51 | } |
52 | | | 52 | |
53 | static __inline void | | 53 | static __inline void |
54 | __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr) | | 54 | __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr) |
55 | { | | 55 | { |
56 | *__ptr = __SIMPLELOCK_UNLOCKED; | | 56 | *__ptr = __SIMPLELOCK_UNLOCKED; |
57 | } | | 57 | } |
58 | | | 58 | |
59 | static __inline void | | 59 | static __inline void |
60 | __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr) | | 60 | __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr) |
61 | { | | 61 | { |
62 | *__ptr = __SIMPLELOCK_LOCKED; | | 62 | *__ptr = __SIMPLELOCK_LOCKED; |
63 | } | | 63 | } |
64 | | | 64 | |
65 | static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *); | | 65 | static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *); |
66 | static __inline void | | 66 | static __inline void |
67 | __cpu_simple_lock_init(__cpu_simple_lock_t *__alp) | | 67 | __cpu_simple_lock_init(__cpu_simple_lock_t *__alp) |
68 | { | | 68 | { |
69 | *__alp = __SIMPLELOCK_UNLOCKED; | | 69 | *__alp = __SIMPLELOCK_UNLOCKED; |
70 | } | | 70 | } |
71 | | | 71 | |
72 | static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *); | | 72 | static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *); |
73 | static __inline int | | 73 | static __inline int |
74 | __cpu_simple_lock_try(__cpu_simple_lock_t *__alp) | | 74 | __cpu_simple_lock_try(__cpu_simple_lock_t *__alp) |
75 | { | | 75 | { |
76 | int ret; | | 76 | int ret; |
77 | | | 77 | |
78 | #ifdef _HARDKERNEL | | 78 | #ifdef _HARDKERNEL |
79 | __asm __volatile ("movl %1,%%r1;jsb Slocktry;movl %%r0,%0" | | 79 | __asm __volatile ("movl %1,%%r1;jsb Slocktry;movl %%r0,%0" |
80 | : "=&r"(ret) | | 80 | : "=&r"(ret) |
81 | : "g"(__alp) | | 81 | : "g"(__alp) |
82 | : "r0","r1","cc","memory"); | | 82 | : "r0","r1","cc","memory"); |
83 | #else | | 83 | #else |
84 | __asm __volatile ("clrl %0;bbssi $0,%1,1f;incl %0;1:" | | 84 | __asm __volatile ("clrl %0;bbssi $0,%1,1f;incl %0;1:" |
85 | : "=&r"(ret) | | 85 | : "=&r"(ret) |
86 | : "m"(*__alp) | | 86 | : "m"(*__alp) |
87 | : "cc"); | | 87 | : "cc", "memory"); |
88 | #endif | | 88 | #endif |
89 | | | 89 | |
90 | return ret; | | 90 | return ret; |
91 | } | | 91 | } |
92 | | | 92 | |
93 | static __inline void __cpu_simple_lock(__cpu_simple_lock_t *); | | 93 | static __inline void __cpu_simple_lock(__cpu_simple_lock_t *); |
94 | static __inline void | | 94 | static __inline void |
95 | __cpu_simple_lock(__cpu_simple_lock_t *__alp) | | 95 | __cpu_simple_lock(__cpu_simple_lock_t *__alp) |
96 | { | | 96 | { |
97 | #if defined(_HARDKERNEL) && defined(MULTIPROCESSOR) | | 97 | #if defined(_HARDKERNEL) && defined(MULTIPROCESSOR) |
98 | struct cpu_info * const __ci = curcpu(); | | 98 | struct cpu_info * const __ci = curcpu(); |
99 | | | 99 | |
100 | while (__cpu_simple_lock_try(__alp) == 0) { | | 100 | while (__cpu_simple_lock_try(__alp) == 0) { |
101 | #define VAX_LOCK_CHECKS ((1 << IPI_SEND_CNCHAR) | (1 << IPI_DDB)) | | 101 | #define VAX_LOCK_CHECKS ((1 << IPI_SEND_CNCHAR) | (1 << IPI_DDB)) |
102 | if (__ci->ci_ipimsgs & VAX_LOCK_CHECKS) { | | 102 | if (__ci->ci_ipimsgs & VAX_LOCK_CHECKS) { |
103 | cpu_handle_ipi(); | | 103 | cpu_handle_ipi(); |
104 | } | | 104 | } |
105 | } | | 105 | } |
106 | #else /* _HARDKERNEL && MULTIPROCESSOR */ | | 106 | #else /* _HARDKERNEL && MULTIPROCESSOR */ |
107 | __asm __volatile ("1:bbssi $0,%0,1b" | | 107 | __asm __volatile ("1:bbssi $0,%0,1b" |
108 | : /* No outputs */ | | 108 | : /* No outputs */ |
109 | : "m"(*__alp) | | 109 | : "m"(*__alp) |
110 | : "cc"); | | 110 | : "cc", "memory"); |
111 | #endif /* _HARDKERNEL && MULTIPROCESSOR */ | | 111 | #endif /* _HARDKERNEL && MULTIPROCESSOR */ |
112 | } | | 112 | } |
113 | | | 113 | |
114 | static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *); | | 114 | static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *); |
115 | static __inline void | | 115 | static __inline void |
116 | __cpu_simple_unlock(__cpu_simple_lock_t *__alp) | | 116 | __cpu_simple_unlock(__cpu_simple_lock_t *__alp) |
117 | { | | 117 | { |
118 | #ifdef _HARDKERNEL | | 118 | #ifdef _HARDKERNEL |
119 | __asm __volatile ("movl %0,%%r1;jsb Sunlock" | | 119 | __asm __volatile ("movl %0,%%r1;jsb Sunlock" |
120 | : /* No output */ | | 120 | : /* No output */ |
121 | : "g"(__alp) | | 121 | : "g"(__alp) |
122 | : "r1","cc","memory"); | | 122 | : "r1","cc","memory"); |
123 | #else | | 123 | #else |
124 | __asm __volatile ("bbcci $0,%0,1f;1:" | | 124 | __asm __volatile ("bbcci $0,%0,1f;1:" |
125 | : /* No output */ | | 125 | : /* No output */ |
126 | : "m"(*__alp) | | 126 | : "m"(*__alp) |
127 | : "cc"); | | 127 | : "cc", "memory"); |
128 | #endif | | 128 | #endif |
129 | } | | 129 | } |
130 | | | 130 | |
131 | #if defined(MULTIPROCESSOR) | | 131 | #if defined(MULTIPROCESSOR) |
132 | /* | | 132 | /* |
133 | * On the Vax, interprocessor interrupts can come in at device priority | | 133 | * On the Vax, interprocessor interrupts can come in at device priority |
134 | * level or lower. This can cause some problems while waiting for r/w | | 134 | * level or lower. This can cause some problems while waiting for r/w |
135 | * spinlocks from a high'ish priority level: IPIs that come in will not | | 135 | * spinlocks from a high'ish priority level: IPIs that come in will not |
136 | * be processed. This can lead to deadlock. | | 136 | * be processed. This can lead to deadlock. |
137 | * | | 137 | * |
138 | * This hook allows IPIs to be processed while a spinlock's interlock | | 138 | * This hook allows IPIs to be processed while a spinlock's interlock |
139 | * is released. | | 139 | * is released. |
140 | */ | | 140 | */ |
141 | #define SPINLOCK_SPIN_HOOK \ | | 141 | #define SPINLOCK_SPIN_HOOK \ |
142 | do { \ | | 142 | do { \ |
143 | struct cpu_info * const __ci = curcpu(); \ | | 143 | struct cpu_info * const __ci = curcpu(); \ |
144 | \ | | 144 | \ |
145 | if (__ci->ci_ipimsgs != 0) { \ | | 145 | if (__ci->ci_ipimsgs != 0) { \ |
146 | /* printf("CPU %lu has IPIs pending\n", \ | | 146 | /* printf("CPU %lu has IPIs pending\n", \ |
147 | __ci->ci_cpuid); */ \ | | 147 | __ci->ci_cpuid); */ \ |
148 | cpu_handle_ipi(); \ | | 148 | cpu_handle_ipi(); \ |
149 | } \ | | 149 | } \ |
150 | } while (/*CONSTCOND*/0) | | 150 | } while (/*CONSTCOND*/0) |
151 | #endif /* MULTIPROCESSOR */ | | 151 | #endif /* MULTIPROCESSOR */ |
152 | | | 152 | |
153 | #endif /* _VAX_LOCK_H_ */ | | 153 | #endif /* _VAX_LOCK_H_ */ |