| @@ -1,408 +1,405 @@ | | | @@ -1,408 +1,405 @@ |
1 | /* $NetBSD: locks.c,v 1.54 2011/03/21 16:41:08 pooka Exp $ */ | | 1 | /* $NetBSD: locks.c,v 1.55 2011/12/06 18:04:31 njoly Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (c) 2007-2011 Antti Kantee. All Rights Reserved. | | 4 | * Copyright (c) 2007-2011 Antti Kantee. All Rights Reserved. |
5 | * | | 5 | * |
6 | * Redistribution and use in source and binary forms, with or without | | 6 | * Redistribution and use in source and binary forms, with or without |
7 | * modification, are permitted provided that the following conditions | | 7 | * modification, are permitted provided that the following conditions |
8 | * are met: | | 8 | * are met: |
9 | * 1. Redistributions of source code must retain the above copyright | | 9 | * 1. Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. | | 10 | * notice, this list of conditions and the following disclaimer. |
11 | * 2. Redistributions in binary form must reproduce the above copyright | | 11 | * 2. Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the | | 12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. | | 13 | * documentation and/or other materials provided with the distribution. |
14 | * | | 14 | * |
15 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS | | 15 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS |
16 | * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | | 16 | * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
17 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | | 17 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
18 | * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE | | 18 | * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
19 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | | 19 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
20 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR | | 20 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
21 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 21 | * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
22 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 22 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
23 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 23 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
24 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 24 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
25 | * SUCH DAMAGE. | | 25 | * SUCH DAMAGE. |
26 | */ | | 26 | */ |
27 | | | 27 | |
28 | #include <sys/cdefs.h> | | 28 | #include <sys/cdefs.h> |
29 | __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.54 2011/03/21 16:41:08 pooka Exp $"); | | 29 | __KERNEL_RCSID(0, "$NetBSD: locks.c,v 1.55 2011/12/06 18:04:31 njoly Exp $"); |
30 | | | 30 | |
31 | #include <sys/param.h> | | 31 | #include <sys/param.h> |
32 | #include <sys/kmem.h> | | 32 | #include <sys/kmem.h> |
33 | #include <sys/mutex.h> | | 33 | #include <sys/mutex.h> |
34 | #include <sys/rwlock.h> | | 34 | #include <sys/rwlock.h> |
35 | | | 35 | |
36 | #include <rump/rumpuser.h> | | 36 | #include <rump/rumpuser.h> |
37 | | | 37 | |
38 | #include "rump_private.h" | | 38 | #include "rump_private.h" |
39 | | | 39 | |
40 | /* | | 40 | /* |
41 | * Simple lockdebug. If it's compiled in, it's always active. | | 41 | * Simple lockdebug. If it's compiled in, it's always active. |
42 | * Currently available only for mtx/rwlock. | | 42 | * Currently available only for mtx/rwlock. |
43 | */ | | 43 | */ |
44 | #ifdef LOCKDEBUG | | 44 | #ifdef LOCKDEBUG |
45 | #include <sys/lockdebug.h> | | 45 | #include <sys/lockdebug.h> |
46 | | | 46 | |
47 | static lockops_t mutex_lockops = { | | 47 | static lockops_t mutex_lockops = { |
48 | "mutex", | | 48 | "mutex", |
49 | LOCKOPS_SLEEP, | | 49 | LOCKOPS_SLEEP, |
50 | NULL | | 50 | NULL |
51 | }; | | 51 | }; |
52 | static lockops_t rw_lockops = { | | 52 | static lockops_t rw_lockops = { |
53 | "rwlock", | | 53 | "rwlock", |
54 | LOCKOPS_SLEEP, | | 54 | LOCKOPS_SLEEP, |
55 | NULL | | 55 | NULL |
56 | }; | | 56 | }; |
57 | | | 57 | |
58 | #define ALLOCK(lock, ops) \ | | 58 | #define ALLOCK(lock, ops) \ |
59 | lockdebug_alloc(lock, ops, (uintptr_t)__builtin_return_address(0)) | | 59 | lockdebug_alloc(lock, ops, (uintptr_t)__builtin_return_address(0)) |
60 | #define FREELOCK(lock) \ | | 60 | #define FREELOCK(lock) \ |
61 | lockdebug_free(lock) | | 61 | lockdebug_free(lock) |
62 | #define WANTLOCK(lock, shar, try) \ | | 62 | #define WANTLOCK(lock, shar, try) \ |
63 | lockdebug_wantlock(lock, (uintptr_t)__builtin_return_address(0), shar, try) | | 63 | lockdebug_wantlock(lock, (uintptr_t)__builtin_return_address(0), shar, try) |
64 | #define LOCKED(lock, shar) \ | | 64 | #define LOCKED(lock, shar) \ |
65 | lockdebug_locked(lock, NULL, (uintptr_t)__builtin_return_address(0), shar) | | 65 | lockdebug_locked(lock, NULL, (uintptr_t)__builtin_return_address(0), shar) |
66 | #define UNLOCKED(lock, shar) \ | | 66 | #define UNLOCKED(lock, shar) \ |
67 | lockdebug_unlocked(lock, (uintptr_t)__builtin_return_address(0), shar) | | 67 | lockdebug_unlocked(lock, (uintptr_t)__builtin_return_address(0), shar) |
68 | #else | | 68 | #else |
69 | #define ALLOCK(a, b) | | 69 | #define ALLOCK(a, b) |
70 | #define FREELOCK(a) | | 70 | #define FREELOCK(a) |
71 | #define WANTLOCK(a, b, c) | | 71 | #define WANTLOCK(a, b, c) |
72 | #define LOCKED(a, b) | | 72 | #define LOCKED(a, b) |
73 | #define UNLOCKED(a, b) | | 73 | #define UNLOCKED(a, b) |
74 | #endif | | 74 | #endif |
75 | | | 75 | |
76 | /* | | 76 | /* |
77 | * We map locks to pthread routines. The difference between kernel | | 77 | * We map locks to pthread routines. The difference between kernel |
78 | * and rumpuser routines is that while the kernel uses static | | 78 | * and rumpuser routines is that while the kernel uses static |
79 | * storage, rumpuser allocates the object from the heap. This | | 79 | * storage, rumpuser allocates the object from the heap. This |
80 | * indirection is necessary because we don't know the size of | | 80 | * indirection is necessary because we don't know the size of |
81 | * pthread objects here. It is also beneficial, since we can | | 81 | * pthread objects here. It is also beneficial, since we can |
82 | * be easily compatible with the kernel ABI because all kernel | | 82 | * be easily compatible with the kernel ABI because all kernel |
83 | * objects regardless of machine architecture are always at least | | 83 | * objects regardless of machine architecture are always at least |
84 | * the size of a pointer. The downside, of course, is a performance | | 84 | * the size of a pointer. The downside, of course, is a performance |
85 | * penalty. | | 85 | * penalty. |
86 | */ | | 86 | */ |
87 | | | 87 | |
88 | #define RUMPMTX(mtx) (*(struct rumpuser_mtx **)(mtx)) | | 88 | #define RUMPMTX(mtx) (*(struct rumpuser_mtx **)(mtx)) |
89 | | | 89 | |
90 | void | | 90 | void |
91 | mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl) | | 91 | mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl) |
92 | { | | 92 | { |
93 | | | 93 | |
94 | CTASSERT(sizeof(kmutex_t) >= sizeof(void *)); | | 94 | CTASSERT(sizeof(kmutex_t) >= sizeof(void *)); |
95 | | | 95 | |
96 | rumpuser_mutex_init_kmutex((struct rumpuser_mtx **)mtx); | | 96 | rumpuser_mutex_init_kmutex((struct rumpuser_mtx **)mtx); |
97 | ALLOCK(mtx, &mutex_lockops); | | 97 | ALLOCK(mtx, &mutex_lockops); |
98 | } | | 98 | } |
99 | | | 99 | |
100 | void | | 100 | void |
101 | mutex_destroy(kmutex_t *mtx) | | 101 | mutex_destroy(kmutex_t *mtx) |
102 | { | | 102 | { |
103 | | | 103 | |
104 | FREELOCK(mtx); | | 104 | FREELOCK(mtx); |
105 | rumpuser_mutex_destroy(RUMPMTX(mtx)); | | 105 | rumpuser_mutex_destroy(RUMPMTX(mtx)); |
106 | } | | 106 | } |
107 | | | 107 | |
108 | void | | 108 | void |
109 | mutex_enter(kmutex_t *mtx) | | 109 | mutex_enter(kmutex_t *mtx) |
110 | { | | 110 | { |
111 | | | 111 | |
112 | WANTLOCK(mtx, false, false); | | 112 | WANTLOCK(mtx, false, false); |
113 | rumpuser_mutex_enter(RUMPMTX(mtx)); | | 113 | rumpuser_mutex_enter(RUMPMTX(mtx)); |
114 | LOCKED(mtx, false); | | 114 | LOCKED(mtx, false); |
115 | } | | 115 | } |
116 | __strong_alias(mutex_spin_enter,mutex_enter); | | 116 | __strong_alias(mutex_spin_enter,mutex_enter); |
117 | | | 117 | |
118 | int | | 118 | int |
119 | mutex_tryenter(kmutex_t *mtx) | | 119 | mutex_tryenter(kmutex_t *mtx) |
120 | { | | 120 | { |
121 | int rv; | | 121 | int rv; |
122 | | | 122 | |
123 | rv = rumpuser_mutex_tryenter(RUMPMTX(mtx)); | | 123 | rv = rumpuser_mutex_tryenter(RUMPMTX(mtx)); |
124 | if (rv) { | | 124 | if (rv) { |
125 | WANTLOCK(mtx, false, true); | | 125 | WANTLOCK(mtx, false, true); |
126 | LOCKED(mtx, false); | | 126 | LOCKED(mtx, false); |
127 | } | | 127 | } |
128 | return rv; | | 128 | return rv; |
129 | } | | 129 | } |
130 | | | 130 | |
131 | void | | 131 | void |
132 | mutex_exit(kmutex_t *mtx) | | 132 | mutex_exit(kmutex_t *mtx) |
133 | { | | 133 | { |
134 | | | 134 | |
135 | UNLOCKED(mtx, false); | | 135 | UNLOCKED(mtx, false); |
136 | rumpuser_mutex_exit(RUMPMTX(mtx)); | | 136 | rumpuser_mutex_exit(RUMPMTX(mtx)); |
137 | } | | 137 | } |
138 | __strong_alias(mutex_spin_exit,mutex_exit); | | 138 | __strong_alias(mutex_spin_exit,mutex_exit); |
139 | | | 139 | |
140 | int | | 140 | int |
141 | mutex_owned(kmutex_t *mtx) | | 141 | mutex_owned(kmutex_t *mtx) |
142 | { | | 142 | { |
143 | | | 143 | |
144 | return mutex_owner(mtx) == curlwp; | | 144 | return mutex_owner(mtx) == curlwp; |
145 | } | | 145 | } |
146 | | | 146 | |
147 | struct lwp * | | 147 | struct lwp * |
148 | mutex_owner(kmutex_t *mtx) | | 148 | mutex_owner(kmutex_t *mtx) |
149 | { | | 149 | { |
150 | | | 150 | |
151 | return rumpuser_mutex_owner(RUMPMTX(mtx)); | | 151 | return rumpuser_mutex_owner(RUMPMTX(mtx)); |
152 | } | | 152 | } |
153 | | | 153 | |
154 | #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw)) | | 154 | #define RUMPRW(rw) (*(struct rumpuser_rw **)(rw)) |
155 | | | 155 | |
156 | /* reader/writer locks */ | | 156 | /* reader/writer locks */ |
157 | | | 157 | |
158 | void | | 158 | void |
159 | rw_init(krwlock_t *rw) | | 159 | rw_init(krwlock_t *rw) |
160 | { | | 160 | { |
161 | | | 161 | |
162 | CTASSERT(sizeof(krwlock_t) >= sizeof(void *)); | | 162 | CTASSERT(sizeof(krwlock_t) >= sizeof(void *)); |
163 | | | 163 | |
164 | rumpuser_rw_init((struct rumpuser_rw **)rw); | | 164 | rumpuser_rw_init((struct rumpuser_rw **)rw); |
165 | ALLOCK(rw, &rw_lockops); | | 165 | ALLOCK(rw, &rw_lockops); |
166 | } | | 166 | } |
167 | | | 167 | |
168 | void | | 168 | void |
169 | rw_destroy(krwlock_t *rw) | | 169 | rw_destroy(krwlock_t *rw) |
170 | { | | 170 | { |
171 | | | 171 | |
172 | FREELOCK(rw); | | 172 | FREELOCK(rw); |
173 | rumpuser_rw_destroy(RUMPRW(rw)); | | 173 | rumpuser_rw_destroy(RUMPRW(rw)); |
174 | } | | 174 | } |
175 | | | 175 | |
176 | void | | 176 | void |
177 | rw_enter(krwlock_t *rw, const krw_t op) | | 177 | rw_enter(krwlock_t *rw, const krw_t op) |
178 | { | | 178 | { |
179 | | | 179 | |
180 | | | 180 | |
181 | WANTLOCK(rw, op == RW_READER, false); | | 181 | WANTLOCK(rw, op == RW_READER, false); |
182 | rumpuser_rw_enter(RUMPRW(rw), op == RW_WRITER); | | 182 | rumpuser_rw_enter(RUMPRW(rw), op == RW_WRITER); |
183 | LOCKED(rw, op == RW_READER); | | 183 | LOCKED(rw, op == RW_READER); |
184 | } | | 184 | } |
185 | | | 185 | |
186 | int | | 186 | int |
187 | rw_tryenter(krwlock_t *rw, const krw_t op) | | 187 | rw_tryenter(krwlock_t *rw, const krw_t op) |
188 | { | | 188 | { |
189 | int rv; | | 189 | int rv; |
190 | | | 190 | |
191 | rv = rumpuser_rw_tryenter(RUMPRW(rw), op == RW_WRITER); | | 191 | rv = rumpuser_rw_tryenter(RUMPRW(rw), op == RW_WRITER); |
192 | if (rv) { | | 192 | if (rv) { |
193 | WANTLOCK(rw, op == RW_READER, true); | | 193 | WANTLOCK(rw, op == RW_READER, true); |
194 | LOCKED(rw, op == RW_READER); | | 194 | LOCKED(rw, op == RW_READER); |
195 | } | | 195 | } |
196 | return rv; | | 196 | return rv; |
197 | } | | 197 | } |
198 | | | 198 | |
199 | void | | 199 | void |
200 | rw_exit(krwlock_t *rw) | | 200 | rw_exit(krwlock_t *rw) |
201 | { | | 201 | { |
202 | | | 202 | |
203 | #ifdef LOCKDEBUG | | 203 | #ifdef LOCKDEBUG |
204 | bool shared = !rw_write_held(rw); | | 204 | bool shared = !rw_write_held(rw); |
205 | | | 205 | |
206 | if (shared) | | 206 | if (shared) |
207 | KASSERT(rw_read_held(rw)); | | 207 | KASSERT(rw_read_held(rw)); |
208 | UNLOCKED(rw, shared); | | 208 | UNLOCKED(rw, shared); |
209 | #endif | | 209 | #endif |
210 | rumpuser_rw_exit(RUMPRW(rw)); | | 210 | rumpuser_rw_exit(RUMPRW(rw)); |
211 | } | | 211 | } |
212 | | | 212 | |
213 | /* always fails */ | | 213 | /* always fails */ |
214 | int | | 214 | int |
215 | rw_tryupgrade(krwlock_t *rw) | | 215 | rw_tryupgrade(krwlock_t *rw) |
216 | { | | 216 | { |
217 | | | 217 | |
218 | return 0; | | 218 | return 0; |
219 | } | | 219 | } |
220 | | | 220 | |
221 | void | | 221 | void |
222 | rw_downgrade(krwlock_t *rw) | | 222 | rw_downgrade(krwlock_t *rw) |
223 | { | | 223 | { |
224 | | | 224 | |
225 | #ifdef LOCKDEBUG | | | |
226 | KASSERT(!rw_write_held(rw)); | | | |
227 | #endif | | | |
228 | /* | | 225 | /* |
229 | * XXX HACK: How we can downgrade re lock in rump properly. | | 226 | * XXX HACK: How we can downgrade re lock in rump properly. |
230 | */ | | 227 | */ |
231 | rw_exit(rw); | | 228 | rw_exit(rw); |
232 | rw_enter(rw, RW_READER); | | 229 | rw_enter(rw, RW_READER); |
233 | return; | | 230 | return; |
234 | } | | 231 | } |
235 | | | 232 | |
236 | int | | 233 | int |
237 | rw_write_held(krwlock_t *rw) | | 234 | rw_write_held(krwlock_t *rw) |
238 | { | | 235 | { |
239 | | | 236 | |
240 | return rumpuser_rw_wrheld(RUMPRW(rw)); | | 237 | return rumpuser_rw_wrheld(RUMPRW(rw)); |
241 | } | | 238 | } |
242 | | | 239 | |
243 | int | | 240 | int |
244 | rw_read_held(krwlock_t *rw) | | 241 | rw_read_held(krwlock_t *rw) |
245 | { | | 242 | { |
246 | | | 243 | |
247 | return rumpuser_rw_rdheld(RUMPRW(rw)); | | 244 | return rumpuser_rw_rdheld(RUMPRW(rw)); |
248 | } | | 245 | } |
249 | | | 246 | |
250 | int | | 247 | int |
251 | rw_lock_held(krwlock_t *rw) | | 248 | rw_lock_held(krwlock_t *rw) |
252 | { | | 249 | { |
253 | | | 250 | |
254 | return rumpuser_rw_held(RUMPRW(rw)); | | 251 | return rumpuser_rw_held(RUMPRW(rw)); |
255 | } | | 252 | } |
256 | | | 253 | |
257 | /* curriculum vitaes */ | | 254 | /* curriculum vitaes */ |
258 | | | 255 | |
259 | #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv)) | | 256 | #define RUMPCV(cv) (*(struct rumpuser_cv **)(cv)) |
260 | | | 257 | |
261 | void | | 258 | void |
262 | cv_init(kcondvar_t *cv, const char *msg) | | 259 | cv_init(kcondvar_t *cv, const char *msg) |
263 | { | | 260 | { |
264 | | | 261 | |
265 | CTASSERT(sizeof(kcondvar_t) >= sizeof(void *)); | | 262 | CTASSERT(sizeof(kcondvar_t) >= sizeof(void *)); |
266 | | | 263 | |
267 | rumpuser_cv_init((struct rumpuser_cv **)cv); | | 264 | rumpuser_cv_init((struct rumpuser_cv **)cv); |
268 | } | | 265 | } |
269 | | | 266 | |
270 | void | | 267 | void |
271 | cv_destroy(kcondvar_t *cv) | | 268 | cv_destroy(kcondvar_t *cv) |
272 | { | | 269 | { |
273 | | | 270 | |
274 | rumpuser_cv_destroy(RUMPCV(cv)); | | 271 | rumpuser_cv_destroy(RUMPCV(cv)); |
275 | } | | 272 | } |
276 | | | 273 | |
277 | static int | | 274 | static int |
278 | docvwait(kcondvar_t *cv, kmutex_t *mtx, struct timespec *ts) | | 275 | docvwait(kcondvar_t *cv, kmutex_t *mtx, struct timespec *ts) |
279 | { | | 276 | { |
280 | struct lwp *l = curlwp; | | 277 | struct lwp *l = curlwp; |
281 | int rv; | | 278 | int rv; |
282 | | | 279 | |
283 | if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) { | | 280 | if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) { |
284 | /* | | 281 | /* |
285 | * yield() here, someone might want the cpu | | 282 | * yield() here, someone might want the cpu |
286 | * to set a condition. otherwise we'll just | | 283 | * to set a condition. otherwise we'll just |
287 | * loop forever. | | 284 | * loop forever. |
288 | */ | | 285 | */ |
289 | yield(); | | 286 | yield(); |
290 | return EINTR; | | 287 | return EINTR; |
291 | } | | 288 | } |
292 | | | 289 | |
293 | UNLOCKED(mtx, false); | | 290 | UNLOCKED(mtx, false); |
294 | | | 291 | |
295 | l->l_private = cv; | | 292 | l->l_private = cv; |
296 | rv = 0; | | 293 | rv = 0; |
297 | if (ts) { | | 294 | if (ts) { |
298 | if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx), | | 295 | if (rumpuser_cv_timedwait(RUMPCV(cv), RUMPMTX(mtx), |
299 | ts->tv_sec, ts->tv_nsec)) | | 296 | ts->tv_sec, ts->tv_nsec)) |
300 | rv = EWOULDBLOCK; | | 297 | rv = EWOULDBLOCK; |
301 | } else { | | 298 | } else { |
302 | rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx)); | | 299 | rumpuser_cv_wait(RUMPCV(cv), RUMPMTX(mtx)); |
303 | } | | 300 | } |
304 | | | 301 | |
305 | LOCKED(mtx, false); | | 302 | LOCKED(mtx, false); |
306 | | | 303 | |
307 | /* | | 304 | /* |
308 | * Check for QEXIT. if so, we need to wait here until we | | 305 | * Check for QEXIT. if so, we need to wait here until we |
309 | * are allowed to exit. | | 306 | * are allowed to exit. |
310 | */ | | 307 | */ |
311 | if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) { | | 308 | if (__predict_false(l->l_flag & LW_RUMP_QEXIT)) { |
312 | struct proc *p = l->l_proc; | | 309 | struct proc *p = l->l_proc; |
313 | | | 310 | |
314 | UNLOCKED(mtx, false); | | 311 | UNLOCKED(mtx, false); |
315 | mutex_exit(mtx); /* drop and retake later */ | | 312 | mutex_exit(mtx); /* drop and retake later */ |
316 | | | 313 | |
317 | mutex_enter(p->p_lock); | | 314 | mutex_enter(p->p_lock); |
318 | while ((p->p_sflag & PS_RUMP_LWPEXIT) == 0) { | | 315 | while ((p->p_sflag & PS_RUMP_LWPEXIT) == 0) { |
319 | /* avoid recursion */ | | 316 | /* avoid recursion */ |
320 | rumpuser_cv_wait(RUMPCV(&p->p_waitcv), | | 317 | rumpuser_cv_wait(RUMPCV(&p->p_waitcv), |
321 | RUMPMTX(p->p_lock)); | | 318 | RUMPMTX(p->p_lock)); |
322 | } | | 319 | } |
323 | KASSERT(p->p_sflag & PS_RUMP_LWPEXIT); | | 320 | KASSERT(p->p_sflag & PS_RUMP_LWPEXIT); |
324 | mutex_exit(p->p_lock); | | 321 | mutex_exit(p->p_lock); |
325 | | | 322 | |
326 | /* ok, we can exit and remove "reference" to l->private */ | | 323 | /* ok, we can exit and remove "reference" to l->private */ |
327 | | | 324 | |
328 | mutex_enter(mtx); | | 325 | mutex_enter(mtx); |
329 | LOCKED(mtx, false); | | 326 | LOCKED(mtx, false); |
330 | rv = EINTR; | | 327 | rv = EINTR; |
331 | } | | 328 | } |
332 | l->l_private = NULL; | | 329 | l->l_private = NULL; |
333 | | | 330 | |
334 | return rv; | | 331 | return rv; |
335 | } | | 332 | } |
336 | | | 333 | |
337 | void | | 334 | void |
338 | cv_wait(kcondvar_t *cv, kmutex_t *mtx) | | 335 | cv_wait(kcondvar_t *cv, kmutex_t *mtx) |
339 | { | | 336 | { |
340 | | | 337 | |
341 | if (__predict_false(rump_threads == 0)) | | 338 | if (__predict_false(rump_threads == 0)) |
342 | panic("cv_wait without threads"); | | 339 | panic("cv_wait without threads"); |
343 | (void) docvwait(cv, mtx, NULL); | | 340 | (void) docvwait(cv, mtx, NULL); |
344 | } | | 341 | } |
345 | | | 342 | |
346 | int | | 343 | int |
347 | cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx) | | 344 | cv_wait_sig(kcondvar_t *cv, kmutex_t *mtx) |
348 | { | | 345 | { |
349 | | | 346 | |
350 | if (__predict_false(rump_threads == 0)) | | 347 | if (__predict_false(rump_threads == 0)) |
351 | panic("cv_wait without threads"); | | 348 | panic("cv_wait without threads"); |
352 | return docvwait(cv, mtx, NULL); | | 349 | return docvwait(cv, mtx, NULL); |
353 | } | | 350 | } |
354 | | | 351 | |
355 | int | | 352 | int |
356 | cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks) | | 353 | cv_timedwait(kcondvar_t *cv, kmutex_t *mtx, int ticks) |
357 | { | | 354 | { |
358 | struct timespec ts, tick; | | 355 | struct timespec ts, tick; |
359 | extern int hz; | | 356 | extern int hz; |
360 | int rv; | | 357 | int rv; |
361 | | | 358 | |
362 | if (ticks == 0) { | | 359 | if (ticks == 0) { |
363 | rv = cv_wait_sig(cv, mtx); | | 360 | rv = cv_wait_sig(cv, mtx); |
364 | } else { | | 361 | } else { |
365 | /* | | 362 | /* |
366 | * XXX: this fetches rump kernel time, but | | 363 | * XXX: this fetches rump kernel time, but |
367 | * rumpuser_cv_timedwait uses host time. | | 364 | * rumpuser_cv_timedwait uses host time. |
368 | */ | | 365 | */ |
369 | nanotime(&ts); | | 366 | nanotime(&ts); |
370 | tick.tv_sec = ticks / hz; | | 367 | tick.tv_sec = ticks / hz; |
371 | tick.tv_nsec = (ticks % hz) * (1000000000/hz); | | 368 | tick.tv_nsec = (ticks % hz) * (1000000000/hz); |
372 | timespecadd(&ts, &tick, &ts); | | 369 | timespecadd(&ts, &tick, &ts); |
373 | | | 370 | |
374 | rv = docvwait(cv, mtx, &ts); | | 371 | rv = docvwait(cv, mtx, &ts); |
375 | } | | 372 | } |
376 | | | 373 | |
377 | return rv; | | 374 | return rv; |
378 | } | | 375 | } |
379 | __strong_alias(cv_timedwait_sig,cv_timedwait); | | 376 | __strong_alias(cv_timedwait_sig,cv_timedwait); |
380 | | | 377 | |
381 | void | | 378 | void |
382 | cv_signal(kcondvar_t *cv) | | 379 | cv_signal(kcondvar_t *cv) |
383 | { | | 380 | { |
384 | | | 381 | |
385 | rumpuser_cv_signal(RUMPCV(cv)); | | 382 | rumpuser_cv_signal(RUMPCV(cv)); |
386 | } | | 383 | } |
387 | | | 384 | |
388 | void | | 385 | void |
389 | cv_broadcast(kcondvar_t *cv) | | 386 | cv_broadcast(kcondvar_t *cv) |
390 | { | | 387 | { |
391 | | | 388 | |
392 | rumpuser_cv_broadcast(RUMPCV(cv)); | | 389 | rumpuser_cv_broadcast(RUMPCV(cv)); |
393 | } | | 390 | } |
394 | | | 391 | |
395 | bool | | 392 | bool |
396 | cv_has_waiters(kcondvar_t *cv) | | 393 | cv_has_waiters(kcondvar_t *cv) |
397 | { | | 394 | { |
398 | | | 395 | |
399 | return rumpuser_cv_has_waiters(RUMPCV(cv)); | | 396 | return rumpuser_cv_has_waiters(RUMPCV(cv)); |
400 | } | | 397 | } |
401 | | | 398 | |
402 | /* this is not much of an attempt, but ... */ | | 399 | /* this is not much of an attempt, but ... */ |
403 | bool | | 400 | bool |
404 | cv_is_valid(kcondvar_t *cv) | | 401 | cv_is_valid(kcondvar_t *cv) |
405 | { | | 402 | { |
406 | | | 403 | |
407 | return RUMPCV(cv) != NULL; | | 404 | return RUMPCV(cv) != NULL; |
408 | } | | 405 | } |