Tue Aug 1 17:16:26 2023 UTC ()
Pull up following revision(s) (requested by riastradh in ticket #1695):
	sys/external/bsd/drm2/linux/linux_ww_mutex.c: revision 1.10
drm: Spruce up ww_mutex comments.  Audit return values.


(martin)
diff -r1.7.2.1 -r1.7.2.2 src/sys/external/bsd/drm2/linux/linux_ww_mutex.c

cvs diff -r1.7.2.1 -r1.7.2.2 src/sys/external/bsd/drm2/linux/linux_ww_mutex.c (expand / switch to unified diff)

--- src/sys/external/bsd/drm2/linux/linux_ww_mutex.c 2023/08/01 16:56:55 1.7.2.1
+++ src/sys/external/bsd/drm2/linux/linux_ww_mutex.c 2023/08/01 17:16:26 1.7.2.2
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: linux_ww_mutex.c,v 1.7.2.1 2023/08/01 16:56:55 martin Exp $ */ 1/* $NetBSD: linux_ww_mutex.c,v 1.7.2.2 2023/08/01 17:16:26 martin Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2014 The NetBSD Foundation, Inc. 4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell. 8 * by Taylor R. Campbell.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -20,27 +20,27 @@ @@ -20,27 +20,27 @@
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: linux_ww_mutex.c,v 1.7.2.1 2023/08/01 16:56:55 martin Exp $"); 33__KERNEL_RCSID(0, "$NetBSD: linux_ww_mutex.c,v 1.7.2.2 2023/08/01 17:16:26 martin Exp $");
34 34
35#include <sys/types.h> 35#include <sys/types.h>
36#include <sys/atomic.h> 36#include <sys/atomic.h>
37#include <sys/condvar.h> 37#include <sys/condvar.h>
38#include <sys/lockdebug.h> 38#include <sys/lockdebug.h>
39#include <sys/lwp.h> 39#include <sys/lwp.h>
40#include <sys/mutex.h> 40#include <sys/mutex.h>
41#include <sys/rbtree.h> 41#include <sys/rbtree.h>
42 42
43#include <linux/ww_mutex.h> 43#include <linux/ww_mutex.h>
44#include <linux/errno.h> 44#include <linux/errno.h>
45 45
46#define WW_WANTLOCK(WW) \ 46#define WW_WANTLOCK(WW) \
@@ -158,118 +158,183 @@ ww_dump(const volatile void *cookie, loc @@ -158,118 +158,183 @@ ww_dump(const volatile void *cookie, loc
158 default: 158 default:
159 pr("unknown\n"); 159 pr("unknown\n");
160 break; 160 break;
161 } 161 }
162} 162}
163 163
164static lockops_t ww_lockops = { 164static lockops_t ww_lockops = {
165 .lo_name = "Wait/wound mutex", 165 .lo_name = "Wait/wound mutex",
166 .lo_type = LOCKOPS_SLEEP, 166 .lo_type = LOCKOPS_SLEEP,
167 .lo_dump = ww_dump, 167 .lo_dump = ww_dump,
168}; 168};
169#endif 169#endif
170 170
 171/*
 172 * ww_mutex_init(mutex, class)
 173 *
 174 * Initialize mutex in the given class. Must precede any other
 175 * ww_mutex_* operations. After done, mutex must be destroyed
 176 * with ww_mutex_destroy.
 177 */
171void 178void
172ww_mutex_init(struct ww_mutex *mutex, struct ww_class *class) 179ww_mutex_init(struct ww_mutex *mutex, struct ww_class *class)
173{ 180{
174 181
175 /* 182 /*
176 * XXX Apparently Linux takes these with spin locks held. That 183 * XXX Apparently Linux takes these with spin locks held. That
177 * strikes me as a bad idea, but so it is... 184 * strikes me as a bad idea, but so it is...
178 */ 185 */
179 mutex_init(&mutex->wwm_lock, MUTEX_DEFAULT, IPL_VM); 186 mutex_init(&mutex->wwm_lock, MUTEX_DEFAULT, IPL_VM);
180 mutex->wwm_state = WW_UNLOCKED; 187 mutex->wwm_state = WW_UNLOCKED;
181 mutex->wwm_class = class; 188 mutex->wwm_class = class;
182 rb_tree_init(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops); 189 rb_tree_init(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
183 cv_init(&mutex->wwm_cv, "linuxwwm"); 190 cv_init(&mutex->wwm_cv, "linuxwwm");
184#ifdef LOCKDEBUG 191#ifdef LOCKDEBUG
185 mutex->wwm_debug = LOCKDEBUG_ALLOC(mutex, &ww_lockops, 192 mutex->wwm_debug = LOCKDEBUG_ALLOC(mutex, &ww_lockops,
186 (uintptr_t)__builtin_return_address(0)); 193 (uintptr_t)__builtin_return_address(0));
187#endif 194#endif
188} 195}
189 196
 197/*
 198 * ww_mutex_destroy(mutex)
 199 *
 200 * Destroy mutex initialized by ww_mutex_init. Caller must not be
 201 * with any other ww_mutex_* operations except after
 202 * reinitializing with ww_mutex_init.
 203 */
190void 204void
191ww_mutex_destroy(struct ww_mutex *mutex) 205ww_mutex_destroy(struct ww_mutex *mutex)
192{ 206{
193 207
194 KASSERT(mutex->wwm_state == WW_UNLOCKED); 208 KASSERT(mutex->wwm_state == WW_UNLOCKED);
195 209
196#ifdef LOCKDEBUG 210#ifdef LOCKDEBUG
197 LOCKDEBUG_FREE(mutex->wwm_debug, mutex); 211 LOCKDEBUG_FREE(mutex->wwm_debug, mutex);
198#endif 212#endif
199 cv_destroy(&mutex->wwm_cv); 213 cv_destroy(&mutex->wwm_cv);
200#if 0 214#if 0
201 rb_tree_destroy(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops); 215 rb_tree_destroy(&mutex->wwm_waiters, &ww_acquire_ctx_rb_ops);
202#endif 216#endif
203 KASSERT(mutex->wwm_state == WW_UNLOCKED); 217 KASSERT(mutex->wwm_state == WW_UNLOCKED);
204 mutex_destroy(&mutex->wwm_lock); 218 mutex_destroy(&mutex->wwm_lock);
205} 219}
206 220
207/* 221/*
208 * XXX WARNING: This returns true if it is locked by ANYONE. Does not 222 * ww_mutex_is_locked(mutex)
209 * mean `Do I hold this lock?' (answering which really requires an 223 *
210 * acquire context). 224 * True if anyone holds mutex locked at the moment, false if not.
 225 * Answer is stale as soon returned unless mutex is held by
 226 * caller.
 227 *
 228 * XXX WARNING: This returns true if it is locked by ANYONE. Does
 229 * not mean `Do I hold this lock?' (answering which really
 230 * requires an acquire context).
211 */ 231 */
212bool 232bool
213ww_mutex_is_locked(struct ww_mutex *mutex) 233ww_mutex_is_locked(struct ww_mutex *mutex)
214{ 234{
215 int locked; 235 int locked;
216 236
217 mutex_enter(&mutex->wwm_lock); 237 mutex_enter(&mutex->wwm_lock);
218 switch (mutex->wwm_state) { 238 switch (mutex->wwm_state) {
219 case WW_UNLOCKED: 239 case WW_UNLOCKED:
220 locked = false; 240 locked = false;
221 break; 241 break;
222 case WW_OWNED: 242 case WW_OWNED:
223 case WW_CTX: 243 case WW_CTX:
224 case WW_WANTOWN: 244 case WW_WANTOWN:
225 locked = true; 245 locked = true;
226 break; 246 break;
227 default: 247 default:
228 panic("wait/wound mutex %p in bad state: %d", mutex, 248 panic("wait/wound mutex %p in bad state: %d", mutex,
229 (int)mutex->wwm_state); 249 (int)mutex->wwm_state);
230 } 250 }
231 mutex_exit(&mutex->wwm_lock); 251 mutex_exit(&mutex->wwm_lock);
232 252
233 return locked; 253 return locked;
234} 254}
235 255
 256/*
 257 * ww_mutex_state_wait(mutex, state)
 258 *
 259 * Wait for mutex, which must be in the given state, to transition
 260 * to another state. Uninterruptible; never fails.
 261 *
 262 * Caller must hold mutex's internal lock.
 263 *
 264 * May sleep.
 265 *
 266 * Internal subroutine.
 267 */
236static void 268static void
237ww_mutex_state_wait(struct ww_mutex *mutex, enum ww_mutex_state state) 269ww_mutex_state_wait(struct ww_mutex *mutex, enum ww_mutex_state state)
238{ 270{
239 271
 272 KASSERT(mutex_owned(&mutex->wwm_lock));
240 KASSERT(mutex->wwm_state == state); 273 KASSERT(mutex->wwm_state == state);
241 do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock); 274 do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
242 while (mutex->wwm_state == state); 275 while (mutex->wwm_state == state);
243} 276}
244 277
 278/*
 279 * ww_mutex_state_wait_sig(mutex, state)
 280 *
 281 * Wait for mutex, which must be in the given state, to transition
 282 * to another state, or fail if interrupted by a signal. Return 0
 283 * on success, -EINTR if interrupted by a signal.
 284 *
 285 * Caller must hold mutex's internal lock.
 286 *
 287 * May sleep.
 288 *
 289 * Internal subroutine.
 290 */
245static int 291static int
246ww_mutex_state_wait_sig(struct ww_mutex *mutex, enum ww_mutex_state state) 292ww_mutex_state_wait_sig(struct ww_mutex *mutex, enum ww_mutex_state state)
247{ 293{
248 int ret; 294 int ret;
249 295
 296 KASSERT(mutex_owned(&mutex->wwm_lock));
250 KASSERT(mutex->wwm_state == state); 297 KASSERT(mutex->wwm_state == state);
251 do { 298 do {
252 /* XXX errno NetBSD->Linux */ 299 /* XXX errno NetBSD->Linux */
253 ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock); 300 ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
254 if (ret == -ERESTART) 301 if (ret) {
255 ret = -ERESTARTSYS; 302 KASSERTMSG((ret == -EINTR || ret == -ERESTART),
256 if (ret) 303 "ret=%d", ret);
 304 ret = -EINTR;
257 break; 305 break;
 306 }
258 } while (mutex->wwm_state == state); 307 } while (mutex->wwm_state == state);
259 308
 309 KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
260 return ret; 310 return ret;
261} 311}
262 312
 313/*
 314 * ww_mutex_lock_wait(mutex, ctx)
 315 *
 316 * With mutex locked and in the WW_CTX or WW_WANTOWN state, owned
 317 * by another thread with an acquire context, wait to acquire
 318 * mutex. While waiting, record ctx in the tree of waiters. Does
 319 * not update the mutex state otherwise.
 320 *
 321 * Caller must not already hold mutex. Caller must hold mutex's
 322 * internal lock. Uninterruptible; never fails.
 323 *
 324 * May sleep.
 325 *
 326 * Internal subroutine.
 327 */
263static void 328static void
264ww_mutex_lock_wait(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx) 329ww_mutex_lock_wait(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
265{ 330{
266 struct ww_acquire_ctx *collision __diagused; 331 struct ww_acquire_ctx *collision __diagused;
267 332
268 KASSERT(mutex_owned(&mutex->wwm_lock)); 333 KASSERT(mutex_owned(&mutex->wwm_lock));
269 334
270 KASSERT((mutex->wwm_state == WW_CTX) || 335 KASSERT((mutex->wwm_state == WW_CTX) ||
271 (mutex->wwm_state == WW_WANTOWN)); 336 (mutex->wwm_state == WW_WANTOWN));
272 KASSERT(mutex->wwm_u.ctx != ctx); 337 KASSERT(mutex->wwm_u.ctx != ctx);
273 KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class), 338 KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
274 "ww mutex class mismatch: %p != %p", 339 "ww mutex class mismatch: %p != %p",
275 ctx->wwx_class, mutex->wwm_u.ctx->wwx_class); 340 ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
@@ -281,26 +346,42 @@ ww_mutex_lock_wait(struct ww_mutex *mute @@ -281,26 +346,42 @@ ww_mutex_lock_wait(struct ww_mutex *mute
281 collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx); 346 collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
282 KASSERTMSG((collision == ctx), 347 KASSERTMSG((collision == ctx),
283 "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)", 348 "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
284 ctx->wwx_ticket, ctx, collision->wwx_ticket, collision); 349 ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
285 350
286 do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock); 351 do cv_wait(&mutex->wwm_cv, &mutex->wwm_lock);
287 while (!(((mutex->wwm_state == WW_CTX) || 352 while (!(((mutex->wwm_state == WW_CTX) ||
288 (mutex->wwm_state == WW_WANTOWN)) && 353 (mutex->wwm_state == WW_WANTOWN)) &&
289 (mutex->wwm_u.ctx == ctx))); 354 (mutex->wwm_u.ctx == ctx)));
290 355
291 rb_tree_remove_node(&mutex->wwm_waiters, ctx); 356 rb_tree_remove_node(&mutex->wwm_waiters, ctx);
292} 357}
293 358
 359/*
 360 * ww_mutex_lock_wait_sig(mutex, ctx)
 361 *
 362 * With mutex locked and in the WW_CTX or WW_WANTOWN state, owned
 363 * by another thread with an acquire context, wait to acquire
 364 * mutex and return 0, or return -EINTR if interrupted by a
 365 * signal. While waiting, record ctx in the tree of waiters.
 366 * Does not update the mutex state otherwise.
 367 *
 368 * Caller must not already hold mutex. Caller must hold mutex's
 369 * internal lock.
 370 *
 371 * May sleep.
 372 *
 373 * Internal subroutine.
 374 */
294static int 375static int
295ww_mutex_lock_wait_sig(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx) 376ww_mutex_lock_wait_sig(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
296{ 377{
297 struct ww_acquire_ctx *collision __diagused; 378 struct ww_acquire_ctx *collision __diagused;
298 int ret; 379 int ret;
299 380
300 KASSERT(mutex_owned(&mutex->wwm_lock)); 381 KASSERT(mutex_owned(&mutex->wwm_lock));
301 382
302 KASSERT((mutex->wwm_state == WW_CTX) || 383 KASSERT((mutex->wwm_state == WW_CTX) ||
303 (mutex->wwm_state == WW_WANTOWN)); 384 (mutex->wwm_state == WW_WANTOWN));
304 KASSERT(mutex->wwm_u.ctx != ctx); 385 KASSERT(mutex->wwm_u.ctx != ctx);
305 KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class), 386 KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
306 "ww mutex class mismatch: %p != %p", 387 "ww mutex class mismatch: %p != %p",
@@ -308,38 +389,51 @@ ww_mutex_lock_wait_sig(struct ww_mutex * @@ -308,38 +389,51 @@ ww_mutex_lock_wait_sig(struct ww_mutex *
308 KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket), 389 KASSERTMSG((mutex->wwm_u.ctx->wwx_ticket != ctx->wwx_ticket),
309 "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)", 390 "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
310 ctx->wwx_ticket, ctx, 391 ctx->wwx_ticket, ctx,
311 mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx); 392 mutex->wwm_u.ctx->wwx_ticket, mutex->wwm_u.ctx);
312 393
313 collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx); 394 collision = rb_tree_insert_node(&mutex->wwm_waiters, ctx);
314 KASSERTMSG((collision == ctx), 395 KASSERTMSG((collision == ctx),
315 "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)", 396 "ticket number reused: %"PRId64" (%p) %"PRId64" (%p)",
316 ctx->wwx_ticket, ctx, collision->wwx_ticket, collision); 397 ctx->wwx_ticket, ctx, collision->wwx_ticket, collision);
317 398
318 do { 399 do {
319 /* XXX errno NetBSD->Linux */ 400 /* XXX errno NetBSD->Linux */
320 ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock); 401 ret = -cv_wait_sig(&mutex->wwm_cv, &mutex->wwm_lock);
321 if (ret == -ERESTART) 402 if (ret) {
322 ret = -ERESTARTSYS; 403 KASSERTMSG((ret == -EINTR || ret == -ERESTART),
323 if (ret) 404 "ret=%d", ret);
 405 ret = -EINTR;
324 goto out; 406 goto out;
 407 }
325 } while (!(((mutex->wwm_state == WW_CTX) || 408 } while (!(((mutex->wwm_state == WW_CTX) ||
326 (mutex->wwm_state == WW_WANTOWN)) && 409 (mutex->wwm_state == WW_WANTOWN)) &&
327 (mutex->wwm_u.ctx == ctx))); 410 (mutex->wwm_u.ctx == ctx)));
328 411
329out: rb_tree_remove_node(&mutex->wwm_waiters, ctx); 412out: rb_tree_remove_node(&mutex->wwm_waiters, ctx);
 413 KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
330 return ret; 414 return ret;
331} 415}
332 416
 417/*
 418 * ww_mutex_lock_noctx(mutex)
 419 *
 420 * Acquire mutex without an acquire context. Caller must not
 421 * already hold the mutex. Uninterruptible; never fails.
 422 *
 423 * May sleep.
 424 *
 425 * Internal subroutine, implementing ww_mutex_lock(..., NULL).
 426 */
333static void 427static void
334ww_mutex_lock_noctx(struct ww_mutex *mutex) 428ww_mutex_lock_noctx(struct ww_mutex *mutex)
335{ 429{
336 430
337 mutex_enter(&mutex->wwm_lock); 431 mutex_enter(&mutex->wwm_lock);
338retry: switch (mutex->wwm_state) { 432retry: switch (mutex->wwm_state) {
339 case WW_UNLOCKED: 433 case WW_UNLOCKED:
340 mutex->wwm_state = WW_OWNED; 434 mutex->wwm_state = WW_OWNED;
341 mutex->wwm_u.owner = curlwp; 435 mutex->wwm_u.owner = curlwp;
342 break; 436 break;
343 case WW_OWNED: 437 case WW_OWNED:
344 KASSERTMSG((mutex->wwm_u.owner != curlwp), 438 KASSERTMSG((mutex->wwm_u.owner != curlwp),
345 "locking %p against myself: %p", mutex, curlwp); 439 "locking %p against myself: %p", mutex, curlwp);
@@ -354,82 +448,116 @@ retry: switch (mutex->wwm_state) { @@ -354,82 +448,116 @@ retry: switch (mutex->wwm_state) {
354 "locking %p against myself: %p", mutex, curlwp); 448 "locking %p against myself: %p", mutex, curlwp);
355 ww_mutex_state_wait(mutex, WW_WANTOWN); 449 ww_mutex_state_wait(mutex, WW_WANTOWN);
356 goto retry; 450 goto retry;
357 default: 451 default:
358 panic("wait/wound mutex %p in bad state: %d", 452 panic("wait/wound mutex %p in bad state: %d",
359 mutex, (int)mutex->wwm_state); 453 mutex, (int)mutex->wwm_state);
360 } 454 }
361 KASSERT(mutex->wwm_state == WW_OWNED); 455 KASSERT(mutex->wwm_state == WW_OWNED);
362 KASSERT(mutex->wwm_u.owner == curlwp); 456 KASSERT(mutex->wwm_u.owner == curlwp);
363 WW_LOCKED(mutex); 457 WW_LOCKED(mutex);
364 mutex_exit(&mutex->wwm_lock); 458 mutex_exit(&mutex->wwm_lock);
365} 459}
366 460
 461/*
 462 * ww_mutex_lock_noctx_sig(mutex)
 463 *
 464 * Acquire mutex without an acquire context and return 0, or fail
 465 * and return -EINTR if interrupted by a signal. Caller must not
 466 * already hold the mutex.
 467 *
 468 * May sleep.
 469 *
 470 * Internal subroutine, implementing
 471 * ww_mutex_lock_interruptible(..., NULL).
 472 */
367static int 473static int
368ww_mutex_lock_noctx_sig(struct ww_mutex *mutex) 474ww_mutex_lock_noctx_sig(struct ww_mutex *mutex)
369{ 475{
370 int ret; 476 int ret;
371 477
372 mutex_enter(&mutex->wwm_lock); 478 mutex_enter(&mutex->wwm_lock);
373retry: switch (mutex->wwm_state) { 479retry: switch (mutex->wwm_state) {
374 case WW_UNLOCKED: 480 case WW_UNLOCKED:
375 mutex->wwm_state = WW_OWNED; 481 mutex->wwm_state = WW_OWNED;
376 mutex->wwm_u.owner = curlwp; 482 mutex->wwm_u.owner = curlwp;
377 break; 483 break;
378 case WW_OWNED: 484 case WW_OWNED:
379 KASSERTMSG((mutex->wwm_u.owner != curlwp), 485 KASSERTMSG((mutex->wwm_u.owner != curlwp),
380 "locking %p against myself: %p", mutex, curlwp); 486 "locking %p against myself: %p", mutex, curlwp);
381 ret = ww_mutex_state_wait_sig(mutex, WW_OWNED); 487 ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
382 if (ret) 488 if (ret) {
 489 KASSERTMSG(ret == -EINTR, "ret=%d", ret);
383 goto out; 490 goto out;
 491 }
384 goto retry; 492 goto retry;
385 case WW_CTX: 493 case WW_CTX:
386 KASSERT(mutex->wwm_u.ctx != NULL); 494 KASSERT(mutex->wwm_u.ctx != NULL);
387 mutex->wwm_state = WW_WANTOWN; 495 mutex->wwm_state = WW_WANTOWN;
388 /* FALLTHROUGH */ 496 /* FALLTHROUGH */
389 case WW_WANTOWN: 497 case WW_WANTOWN:
390 KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp), 498 KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
391 "locking %p against myself: %p", mutex, curlwp); 499 "locking %p against myself: %p", mutex, curlwp);
392 ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN); 500 ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
393 if (ret) 501 if (ret) {
 502 KASSERTMSG(ret == -EINTR, "ret=%d", ret);
394 goto out; 503 goto out;
 504 }
395 goto retry; 505 goto retry;
396 default: 506 default:
397 panic("wait/wound mutex %p in bad state: %d", 507 panic("wait/wound mutex %p in bad state: %d",
398 mutex, (int)mutex->wwm_state); 508 mutex, (int)mutex->wwm_state);
399 } 509 }
400 KASSERT(mutex->wwm_state == WW_OWNED); 510 KASSERT(mutex->wwm_state == WW_OWNED);
401 KASSERT(mutex->wwm_u.owner == curlwp); 511 KASSERT(mutex->wwm_u.owner == curlwp);
402 WW_LOCKED(mutex); 512 WW_LOCKED(mutex);
403 ret = 0; 513 ret = 0;
404out: mutex_exit(&mutex->wwm_lock); 514out: mutex_exit(&mutex->wwm_lock);
 515 KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
405 return ret; 516 return ret;
406} 517}
407 518
 519/*
 520 * ww_mutex_lock(mutex, ctx)
 521 *
 522 * Lock the mutex and return 0, or fail if impossible.
 523 *
 524 * - If ctx is null, caller must not hold mutex, and ww_mutex_lock
 525 * always succeeds and returns 0.
 526 *
 527 * - If ctx is nonnull, then:
 528 * . Fail with -EALREADY if caller already holds mutex.
 529 * . Fail with -EDEADLK if someone else holds mutex but there is
 530 * a cycle.
 531 *
 532 * May sleep.
 533 */
408int 534int
409ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx) 535ww_mutex_lock(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
410{ 536{
 537 int ret;
411 538
412 /* 539 /*
413 * We do not WW_WANTLOCK at the beginning because we may 540 * We do not WW_WANTLOCK at the beginning because we may
414 * correctly already hold it, if we have a context, in which 541 * correctly already hold it, if we have a context, in which
415 * case we must return EALREADY to the caller. 542 * case we must return EALREADY to the caller.
416 */ 543 */
417 ASSERT_SLEEPABLE(); 544 ASSERT_SLEEPABLE();
418 545
419 if (ctx == NULL) { 546 if (ctx == NULL) {
420 WW_WANTLOCK(mutex); 547 WW_WANTLOCK(mutex);
421 ww_mutex_lock_noctx(mutex); 548 ww_mutex_lock_noctx(mutex);
422 return 0; 549 ret = 0;
 550 goto out;
423 } 551 }
424 552
425 KASSERTMSG((ctx->wwx_owner == curlwp), 553 KASSERTMSG((ctx->wwx_owner == curlwp),
426 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp); 554 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
427 KASSERTMSG(!ctx->wwx_acquire_done, 555 KASSERTMSG(!ctx->wwx_acquire_done,
428 "ctx %p done acquiring locks, can't acquire more", ctx); 556 "ctx %p done acquiring locks, can't acquire more", ctx);
429 KASSERTMSG((ctx->wwx_acquired != ~0U), 557 KASSERTMSG((ctx->wwx_acquired != ~0U),
430 "ctx %p finished, can't be used any more", ctx); 558 "ctx %p finished, can't be used any more", ctx);
431 KASSERTMSG((ctx->wwx_class == mutex->wwm_class), 559 KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
432 "ctx %p in class %p, mutex %p in class %p", 560 "ctx %p in class %p, mutex %p in class %p",
433 ctx, ctx->wwx_class, mutex, mutex->wwm_class); 561 ctx, ctx->wwx_class, mutex, mutex->wwm_class);
434 562
435 mutex_enter(&mutex->wwm_lock); 563 mutex_enter(&mutex->wwm_lock);
@@ -456,168 +584,212 @@ retry: switch (mutex->wwm_state) { @@ -456,168 +584,212 @@ retry: switch (mutex->wwm_state) {
456 } 584 }
457 585
458 KASSERT(mutex->wwm_state == WW_CTX); 586 KASSERT(mutex->wwm_state == WW_CTX);
459 KASSERT(mutex->wwm_u.ctx != NULL); 587 KASSERT(mutex->wwm_u.ctx != NULL);
460 KASSERT((mutex->wwm_u.ctx == ctx) || 588 KASSERT((mutex->wwm_u.ctx == ctx) ||
461 (mutex->wwm_u.ctx->wwx_owner != curlwp)); 589 (mutex->wwm_u.ctx->wwx_owner != curlwp));
462 590
463 if (mutex->wwm_u.ctx == ctx) { 591 if (mutex->wwm_u.ctx == ctx) {
464 /* 592 /*
465 * We already own it. Yes, this can happen correctly 593 * We already own it. Yes, this can happen correctly
466 * for objects whose locking order is determined by 594 * for objects whose locking order is determined by
467 * userland. 595 * userland.
468 */ 596 */
469 mutex_exit(&mutex->wwm_lock); 597 ret = -EALREADY;
470 return -EALREADY; 598 goto out_unlock;
471 } 599 }
472 600
473 /* 601 /*
474 * We do not own it. We can safely assert to LOCKDEBUG that we 602 * We do not own it. We can safely assert to LOCKDEBUG that we
475 * want it. 603 * want it.
476 */ 604 */
477 WW_WANTLOCK(mutex); 605 WW_WANTLOCK(mutex);
478 606
479 if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) { 607 if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
480 /* 608 /*
481 * Owned by a higher-priority party. Tell the caller 609 * Owned by a higher-priority party. Tell the caller
482 * to unlock everything and start over. 610 * to unlock everything and start over.
483 */ 611 */
484 KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class), 612 KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
485 "ww mutex class mismatch: %p != %p", 613 "ww mutex class mismatch: %p != %p",
486 ctx->wwx_class, mutex->wwm_u.ctx->wwx_class); 614 ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
487 mutex_exit(&mutex->wwm_lock); 615 ret = -EDEADLK;
488 return -EDEADLK; 616 goto out_unlock;
489 } 617 }
490 618
491 /* 619 /*
492 * Owned by a lower-priority party. Ask that party to wake us 620 * Owned by a lower-priority party. Ask that party to wake us
493 * when it is done or it realizes it needs to back off. 621 * when it is done or it realizes it needs to back off.
494 */ 622 */
495 ww_mutex_lock_wait(mutex, ctx); 623 ww_mutex_lock_wait(mutex, ctx);
496 624
497locked: KASSERT((mutex->wwm_state == WW_CTX) || 625locked: KASSERT((mutex->wwm_state == WW_CTX) ||
498 (mutex->wwm_state == WW_WANTOWN)); 626 (mutex->wwm_state == WW_WANTOWN));
499 KASSERT(mutex->wwm_u.ctx == ctx); 627 KASSERT(mutex->wwm_u.ctx == ctx);
500 WW_LOCKED(mutex); 628 WW_LOCKED(mutex);
501 ctx->wwx_acquired++; 629 ctx->wwx_acquired++;
 630 ret = 0;
 631out_unlock:
502 mutex_exit(&mutex->wwm_lock); 632 mutex_exit(&mutex->wwm_lock);
503 return 0; 633out: KASSERTMSG((ret == 0 || ret == -EALREADY || ret == -EDEADLK),
 634 "ret=%d", ret);
 635 return ret;
504} 636}
505 637
 638/*
 639 * ww_mutex_lock_interruptible(mutex, ctx)
 640 *
 641 * Lock the mutex and return 0, or fail if impossible or
 642 * interrupted.
 643 *
 644 * - If ctx is null, caller must not hold mutex, and ww_mutex_lock
 645 * always succeeds and returns 0.
 646 *
 647 * - If ctx is nonnull, then:
 648 * . Fail with -EALREADY if caller already holds mutex.
 649 * . Fail with -EDEADLK if someone else holds mutex but there is
 650 * a cycle.
 651 * . Fail with -EINTR if interrupted by a signal.
 652 *
 653 * May sleep.
 654 */
506int 655int
507ww_mutex_lock_interruptible(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx) 656ww_mutex_lock_interruptible(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
508{ 657{
509 int ret; 658 int ret;
510 659
511 /* 660 /*
512 * We do not WW_WANTLOCK at the beginning because we may 661 * We do not WW_WANTLOCK at the beginning because we may
513 * correctly already hold it, if we have a context, in which 662 * correctly already hold it, if we have a context, in which
514 * case we must return EALREADY to the caller. 663 * case we must return EALREADY to the caller.
515 */ 664 */
516 ASSERT_SLEEPABLE(); 665 ASSERT_SLEEPABLE();
517 666
518 if (ctx == NULL) { 667 if (ctx == NULL) {
519 WW_WANTLOCK(mutex); 668 WW_WANTLOCK(mutex);
520 return ww_mutex_lock_noctx_sig(mutex); 669 ret = ww_mutex_lock_noctx_sig(mutex);
 670 KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
 671 goto out;
521 } 672 }
522 673
523 KASSERTMSG((ctx->wwx_owner == curlwp), 674 KASSERTMSG((ctx->wwx_owner == curlwp),
524 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp); 675 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
525 KASSERTMSG(!ctx->wwx_acquire_done, 676 KASSERTMSG(!ctx->wwx_acquire_done,
526 "ctx %p done acquiring locks, can't acquire more", ctx); 677 "ctx %p done acquiring locks, can't acquire more", ctx);
527 KASSERTMSG((ctx->wwx_acquired != ~0U), 678 KASSERTMSG((ctx->wwx_acquired != ~0U),
528 "ctx %p finished, can't be used any more", ctx); 679 "ctx %p finished, can't be used any more", ctx);
529 KASSERTMSG((ctx->wwx_class == mutex->wwm_class), 680 KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
530 "ctx %p in class %p, mutex %p in class %p", 681 "ctx %p in class %p, mutex %p in class %p",
531 ctx, ctx->wwx_class, mutex, mutex->wwm_class); 682 ctx, ctx->wwx_class, mutex, mutex->wwm_class);
532 683
533 mutex_enter(&mutex->wwm_lock); 684 mutex_enter(&mutex->wwm_lock);
534retry: switch (mutex->wwm_state) { 685retry: switch (mutex->wwm_state) {
535 case WW_UNLOCKED: 686 case WW_UNLOCKED:
536 WW_WANTLOCK(mutex); 687 WW_WANTLOCK(mutex);
537 mutex->wwm_state = WW_CTX; 688 mutex->wwm_state = WW_CTX;
538 mutex->wwm_u.ctx = ctx; 689 mutex->wwm_u.ctx = ctx;
539 goto locked; 690 goto locked;
540 case WW_OWNED: 691 case WW_OWNED:
541 WW_WANTLOCK(mutex); 692 WW_WANTLOCK(mutex);
542 KASSERTMSG((mutex->wwm_u.owner != curlwp), 693 KASSERTMSG((mutex->wwm_u.owner != curlwp),
543 "locking %p against myself: %p", mutex, curlwp); 694 "locking %p against myself: %p", mutex, curlwp);
544 ret = ww_mutex_state_wait_sig(mutex, WW_OWNED); 695 ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
545 if (ret) 696 if (ret) {
546 goto out; 697 KASSERTMSG(ret == -EINTR, "ret=%d", ret);
 698 goto out_unlock;
 699 }
547 goto retry; 700 goto retry;
548 case WW_CTX: 701 case WW_CTX:
549 break; 702 break;
550 case WW_WANTOWN: 703 case WW_WANTOWN:
551 ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN); 704 ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
552 if (ret) 705 if (ret) {
553 goto out; 706 KASSERTMSG(ret == -EINTR, "ret=%d", ret);
 707 goto out_unlock;
 708 }
554 goto retry; 709 goto retry;
555 default: 710 default:
556 panic("wait/wound mutex %p in bad state: %d", 711 panic("wait/wound mutex %p in bad state: %d",
557 mutex, (int)mutex->wwm_state); 712 mutex, (int)mutex->wwm_state);
558 } 713 }
559 714
560 KASSERT(mutex->wwm_state == WW_CTX); 715 KASSERT(mutex->wwm_state == WW_CTX);
561 KASSERT(mutex->wwm_u.ctx != NULL); 716 KASSERT(mutex->wwm_u.ctx != NULL);
562 KASSERT((mutex->wwm_u.ctx == ctx) || 717 KASSERT((mutex->wwm_u.ctx == ctx) ||
563 (mutex->wwm_u.ctx->wwx_owner != curlwp)); 718 (mutex->wwm_u.ctx->wwx_owner != curlwp));
564 719
565 if (mutex->wwm_u.ctx == ctx) { 720 if (mutex->wwm_u.ctx == ctx) {
566 /* 721 /*
567 * We already own it. Yes, this can happen correctly 722 * We already own it. Yes, this can happen correctly
568 * for objects whose locking order is determined by 723 * for objects whose locking order is determined by
569 * userland. 724 * userland.
570 */ 725 */
571 mutex_exit(&mutex->wwm_lock); 726 ret = -EALREADY;
572 return -EALREADY; 727 goto out_unlock;
573 } 728 }
574 729
575 /* 730 /*
576 * We do not own it. We can safely assert to LOCKDEBUG that we 731 * We do not own it. We can safely assert to LOCKDEBUG that we
577 * want it. 732 * want it.
578 */ 733 */
579 WW_WANTLOCK(mutex); 734 WW_WANTLOCK(mutex);
580 735
581 if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) { 736 if (mutex->wwm_u.ctx->wwx_ticket < ctx->wwx_ticket) {
582 /* 737 /*
583 * Owned by a higher-priority party. Tell the caller 738 * Owned by a higher-priority party. Tell the caller
584 * to unlock everything and start over. 739 * to unlock everything and start over.
585 */ 740 */
586 KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class), 741 KASSERTMSG((ctx->wwx_class == mutex->wwm_u.ctx->wwx_class),
587 "ww mutex class mismatch: %p != %p", 742 "ww mutex class mismatch: %p != %p",
588 ctx->wwx_class, mutex->wwm_u.ctx->wwx_class); 743 ctx->wwx_class, mutex->wwm_u.ctx->wwx_class);
589 mutex_exit(&mutex->wwm_lock); 744 ret = -EDEADLK;
590 return -EDEADLK; 745 goto out_unlock;
591 } 746 }
592 747
593 /* 748 /*
594 * Owned by a lower-priority party. Ask that party to wake us 749 * Owned by a lower-priority party. Ask that party to wake us
595 * when it is done or it realizes it needs to back off. 750 * when it is done or it realizes it needs to back off.
596 */ 751 */
597 ret = ww_mutex_lock_wait_sig(mutex, ctx); 752 ret = ww_mutex_lock_wait_sig(mutex, ctx);
598 if (ret) 753 if (ret) {
599 goto out; 754 KASSERTMSG(ret == -EINTR, "ret=%d", ret);
 755 goto out_unlock;
 756 }
600 757
601locked: KASSERT((mutex->wwm_state == WW_CTX) || 758locked: KASSERT((mutex->wwm_state == WW_CTX) ||
602 (mutex->wwm_state == WW_WANTOWN)); 759 (mutex->wwm_state == WW_WANTOWN));
603 KASSERT(mutex->wwm_u.ctx == ctx); 760 KASSERT(mutex->wwm_u.ctx == ctx);
604 WW_LOCKED(mutex); 761 WW_LOCKED(mutex);
605 ctx->wwx_acquired++; 762 ctx->wwx_acquired++;
606 ret = 0; 763 ret = 0;
607out: mutex_exit(&mutex->wwm_lock); 764out_unlock:
 765 mutex_exit(&mutex->wwm_lock);
 766out: KASSERTMSG((ret == 0 || ret == -EALREADY || ret == -EDEADLK ||
 767 ret == -EINTR), "ret=%d", ret);
608 return ret; 768 return ret;
609} 769}
610 770
 771/*
 772 * ww_mutex_lock_slow(mutex, ctx)
 773 *
 774 * Slow path: After ww_mutex_lock* has failed with -EDEADLK, and
 775 * after the caller has ditched all its locks, wait for the owner
 776 * of mutex to relinquish mutex before the caller can start over
 777 * acquiring locks again.
 778 *
 779 * Uninterruptible; never fails.
 780 *
 781 * May sleep.
 782 */
611void 783void
612ww_mutex_lock_slow(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx) 784ww_mutex_lock_slow(struct ww_mutex *mutex, struct ww_acquire_ctx *ctx)
613{ 785{
614 786
615 /* Caller must not try to lock against self here. */ 787 /* Caller must not try to lock against self here. */
616 WW_WANTLOCK(mutex); 788 WW_WANTLOCK(mutex);
617 ASSERT_SLEEPABLE(); 789 ASSERT_SLEEPABLE();
618 790
619 if (ctx == NULL) { 791 if (ctx == NULL) {
620 ww_mutex_lock_noctx(mutex); 792 ww_mutex_lock_noctx(mutex);
621 return; 793 return;
622 } 794 }
623 795
@@ -664,99 +836,127 @@ retry: switch (mutex->wwm_state) { @@ -664,99 +836,127 @@ retry: switch (mutex->wwm_state) {
664 * Owned by another party, of any priority. Ask that party to 836 * Owned by another party, of any priority. Ask that party to
665 * wake us when it's done. 837 * wake us when it's done.
666 */ 838 */
667 ww_mutex_lock_wait(mutex, ctx); 839 ww_mutex_lock_wait(mutex, ctx);
668 840
669locked: KASSERT((mutex->wwm_state == WW_CTX) || 841locked: KASSERT((mutex->wwm_state == WW_CTX) ||
670 (mutex->wwm_state == WW_WANTOWN)); 842 (mutex->wwm_state == WW_WANTOWN));
671 KASSERT(mutex->wwm_u.ctx == ctx); 843 KASSERT(mutex->wwm_u.ctx == ctx);
672 WW_LOCKED(mutex); 844 WW_LOCKED(mutex);
673 ctx->wwx_acquired++; 845 ctx->wwx_acquired++;
674 mutex_exit(&mutex->wwm_lock); 846 mutex_exit(&mutex->wwm_lock);
675} 847}
676 848
 849/*
 850 * ww_mutex_lock_slow(mutex, ctx)
 851 *
 852 * Slow path: After ww_mutex_lock* has failed with -EDEADLK, and
 853 * after the caller has ditched all its locks, wait for the owner
 854 * of mutex to relinquish mutex before the caller can start over
 855 * acquiring locks again, or fail with -EINTR if interrupted by a
 856 * signal.
 857 *
 858 * May sleep.
 859 */
677int 860int
678ww_mutex_lock_slow_interruptible(struct ww_mutex *mutex, 861ww_mutex_lock_slow_interruptible(struct ww_mutex *mutex,
679 struct ww_acquire_ctx *ctx) 862 struct ww_acquire_ctx *ctx)
680{ 863{
681 int ret; 864 int ret;
682 865
683 WW_WANTLOCK(mutex); 866 WW_WANTLOCK(mutex);
684 ASSERT_SLEEPABLE(); 867 ASSERT_SLEEPABLE();
685 868
686 if (ctx == NULL) 869 if (ctx == NULL) {
687 return ww_mutex_lock_noctx_sig(mutex); 870 ret = ww_mutex_lock_noctx_sig(mutex);
 871 KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
 872 goto out;
 873 }
688 874
689 KASSERTMSG((ctx->wwx_owner == curlwp), 875 KASSERTMSG((ctx->wwx_owner == curlwp),
690 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp); 876 "ctx %p owned by %p, not self (%p)", ctx, ctx->wwx_owner, curlwp);
691 KASSERTMSG(!ctx->wwx_acquire_done, 877 KASSERTMSG(!ctx->wwx_acquire_done,
692 "ctx %p done acquiring locks, can't acquire more", ctx); 878 "ctx %p done acquiring locks, can't acquire more", ctx);
693 KASSERTMSG((ctx->wwx_acquired != ~0U), 879 KASSERTMSG((ctx->wwx_acquired != ~0U),
694 "ctx %p finished, can't be used any more", ctx); 880 "ctx %p finished, can't be used any more", ctx);
695 KASSERTMSG((ctx->wwx_acquired == 0), 881 KASSERTMSG((ctx->wwx_acquired == 0),
696 "ctx %p still holds %u locks, not allowed in slow path", 882 "ctx %p still holds %u locks, not allowed in slow path",
697 ctx, ctx->wwx_acquired); 883 ctx, ctx->wwx_acquired);
698 KASSERTMSG((ctx->wwx_class == mutex->wwm_class), 884 KASSERTMSG((ctx->wwx_class == mutex->wwm_class),
699 "ctx %p in class %p, mutex %p in class %p", 885 "ctx %p in class %p, mutex %p in class %p",
700 ctx, ctx->wwx_class, mutex, mutex->wwm_class); 886 ctx, ctx->wwx_class, mutex, mutex->wwm_class);
701 887
702 mutex_enter(&mutex->wwm_lock); 888 mutex_enter(&mutex->wwm_lock);
703retry: switch (mutex->wwm_state) { 889retry: switch (mutex->wwm_state) {
704 case WW_UNLOCKED: 890 case WW_UNLOCKED:
705 mutex->wwm_state = WW_CTX; 891 mutex->wwm_state = WW_CTX;
706 mutex->wwm_u.ctx = ctx; 892 mutex->wwm_u.ctx = ctx;
707 goto locked; 893 goto locked;
708 case WW_OWNED: 894 case WW_OWNED:
709 KASSERTMSG((mutex->wwm_u.owner != curlwp), 895 KASSERTMSG((mutex->wwm_u.owner != curlwp),
710 "locking %p against myself: %p", mutex, curlwp); 896 "locking %p against myself: %p", mutex, curlwp);
711 ret = ww_mutex_state_wait_sig(mutex, WW_OWNED); 897 ret = ww_mutex_state_wait_sig(mutex, WW_OWNED);
712 if (ret) 898 if (ret) {
713 goto out; 899 KASSERTMSG(ret == -EINTR, "ret=%d", ret);
 900 goto out_unlock;
 901 }
714 goto retry; 902 goto retry;
715 case WW_CTX: 903 case WW_CTX:
716 break; 904 break;
717 case WW_WANTOWN: 905 case WW_WANTOWN:
718 ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN); 906 ret = ww_mutex_state_wait_sig(mutex, WW_WANTOWN);
719 if (ret) 907 if (ret) {
720 goto out; 908 KASSERTMSG(ret == -EINTR, "ret=%d", ret);
 909 goto out_unlock;
 910 }
721 goto retry; 911 goto retry;
722 default: 912 default:
723 panic("wait/wound mutex %p in bad state: %d", 913 panic("wait/wound mutex %p in bad state: %d",
724 mutex, (int)mutex->wwm_state); 914 mutex, (int)mutex->wwm_state);
725 } 915 }
726 916
727 KASSERT(mutex->wwm_state == WW_CTX); 917 KASSERT(mutex->wwm_state == WW_CTX);
728 KASSERT(mutex->wwm_u.ctx != NULL); 918 KASSERT(mutex->wwm_u.ctx != NULL);
729 KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp), 919 KASSERTMSG((mutex->wwm_u.ctx->wwx_owner != curlwp),
730 "locking %p against myself: %p", mutex, curlwp); 920 "locking %p against myself: %p", mutex, curlwp);
731 921
732 /* 922 /*
733 * Owned by another party, of any priority. Ask that party to 923 * Owned by another party, of any priority. Ask that party to
734 * wake us when it's done. 924 * wake us when it's done.
735 */ 925 */
736 ret = ww_mutex_lock_wait_sig(mutex, ctx); 926 ret = ww_mutex_lock_wait_sig(mutex, ctx);
737 if (ret) 927 if (ret) {
738 goto out; 928 KASSERTMSG(ret == -EINTR, "ret=%d", ret);
 929 goto out_unlock;
 930 }
739 931
740locked: KASSERT((mutex->wwm_state == WW_CTX) || 932locked: KASSERT((mutex->wwm_state == WW_CTX) ||
741 (mutex->wwm_state == WW_WANTOWN)); 933 (mutex->wwm_state == WW_WANTOWN));
742 KASSERT(mutex->wwm_u.ctx == ctx); 934 KASSERT(mutex->wwm_u.ctx == ctx);
743 WW_LOCKED(mutex); 935 WW_LOCKED(mutex);
744 ctx->wwx_acquired++; 936 ctx->wwx_acquired++;
745 ret = 0; 937 ret = 0;
746out: mutex_exit(&mutex->wwm_lock); 938out_unlock:
 939 mutex_exit(&mutex->wwm_lock);
 940out: KASSERTMSG((ret == 0 || ret == -EINTR), "ret=%d", ret);
747 return ret; 941 return ret;
748} 942}
749 943
 944/*
 945 * ww_mutex_trylock(mutex)
 946 *
 947 * Tro to acquire mutex and return 1, but if it can't be done
 948 * immediately, return 0.
 949 */
750int 950int
751ww_mutex_trylock(struct ww_mutex *mutex) 951ww_mutex_trylock(struct ww_mutex *mutex)
752{ 952{
753 int ret; 953 int ret;
754 954
755 mutex_enter(&mutex->wwm_lock); 955 mutex_enter(&mutex->wwm_lock);
756 if (mutex->wwm_state == WW_UNLOCKED) { 956 if (mutex->wwm_state == WW_UNLOCKED) {
757 mutex->wwm_state = WW_OWNED; 957 mutex->wwm_state = WW_OWNED;
758 mutex->wwm_u.owner = curlwp; 958 mutex->wwm_u.owner = curlwp;
759 WW_WANTLOCK(mutex); 959 WW_WANTLOCK(mutex);
760 WW_LOCKED(mutex); 960 WW_LOCKED(mutex);
761 ret = 1; 961 ret = 1;
762 } else { 962 } else {
@@ -766,43 +966,59 @@ ww_mutex_trylock(struct ww_mutex *mutex) @@ -766,43 +966,59 @@ ww_mutex_trylock(struct ww_mutex *mutex)
766 KASSERTMSG(((mutex->wwm_state != WW_CTX) || 966 KASSERTMSG(((mutex->wwm_state != WW_CTX) ||
767 (mutex->wwm_u.ctx->wwx_owner != curlwp)), 967 (mutex->wwm_u.ctx->wwx_owner != curlwp)),
768 "locking %p against myself: %p", mutex, curlwp); 968 "locking %p against myself: %p", mutex, curlwp);
769 KASSERTMSG(((mutex->wwm_state != WW_WANTOWN) || 969 KASSERTMSG(((mutex->wwm_state != WW_WANTOWN) ||
770 (mutex->wwm_u.ctx->wwx_owner != curlwp)), 970 (mutex->wwm_u.ctx->wwx_owner != curlwp)),
771 "locking %p against myself: %p", mutex, curlwp); 971 "locking %p against myself: %p", mutex, curlwp);
772 ret = 0; 972 ret = 0;
773 } 973 }
774 mutex_exit(&mutex->wwm_lock); 974 mutex_exit(&mutex->wwm_lock);
775 975
776 return ret; 976 return ret;
777} 977}
778 978
 979/*
 980 * ww_mutex_unlock_release(mutex)
 981 *
 982 * Decrement the number of mutexes acquired in the current locking
 983 * context of mutex, which must be held by the caller and in
 984 * WW_CTX or WW_WANTOWN state, and clear the mutex's reference.
 985 * Caller must hold the internal lock of mutex, and is responsible
 986 * for notifying waiters.
 987 *
 988 * Internal subroutine.
 989 */
779static void 990static void
780ww_mutex_unlock_release(struct ww_mutex *mutex) 991ww_mutex_unlock_release(struct ww_mutex *mutex)
781{ 992{
782 993
783 KASSERT(mutex_owned(&mutex->wwm_lock)); 994 KASSERT(mutex_owned(&mutex->wwm_lock));
784 KASSERT((mutex->wwm_state == WW_CTX) || 995 KASSERT((mutex->wwm_state == WW_CTX) ||
785 (mutex->wwm_state == WW_WANTOWN)); 996 (mutex->wwm_state == WW_WANTOWN));
786 KASSERT(mutex->wwm_u.ctx != NULL); 997 KASSERT(mutex->wwm_u.ctx != NULL);
787 KASSERTMSG((mutex->wwm_u.ctx->wwx_owner == curlwp), 998 KASSERTMSG((mutex->wwm_u.ctx->wwx_owner == curlwp),
788 "ww_mutex %p ctx %p held by %p, not by self (%p)", 999 "ww_mutex %p ctx %p held by %p, not by self (%p)",
789 mutex, mutex->wwm_u.ctx, mutex->wwm_u.ctx->wwx_owner, 1000 mutex, mutex->wwm_u.ctx, mutex->wwm_u.ctx->wwx_owner,
790 curlwp); 1001 curlwp);
791 KASSERT(mutex->wwm_u.ctx->wwx_acquired != ~0U); 1002 KASSERT(mutex->wwm_u.ctx->wwx_acquired != ~0U);
792 mutex->wwm_u.ctx->wwx_acquired--; 1003 mutex->wwm_u.ctx->wwx_acquired--;
793 mutex->wwm_u.ctx = NULL; 1004 mutex->wwm_u.ctx = NULL;
794} 1005}
795 1006
 1007/*
 1008 * ww_mutex_unlock(mutex)
 1009 *
 1010 * Release mutex and wake the next caller waiting, if any.
 1011 */
796void 1012void
797ww_mutex_unlock(struct ww_mutex *mutex) 1013ww_mutex_unlock(struct ww_mutex *mutex)
798{ 1014{
799 struct ww_acquire_ctx *ctx; 1015 struct ww_acquire_ctx *ctx;
800 1016
801 mutex_enter(&mutex->wwm_lock); 1017 mutex_enter(&mutex->wwm_lock);
802 KASSERT(mutex->wwm_state != WW_UNLOCKED); 1018 KASSERT(mutex->wwm_state != WW_UNLOCKED);
803 switch (mutex->wwm_state) { 1019 switch (mutex->wwm_state) {
804 case WW_UNLOCKED: 1020 case WW_UNLOCKED:
805 panic("unlocking unlocked wait/wound mutex: %p", mutex); 1021 panic("unlocking unlocked wait/wound mutex: %p", mutex);
806 case WW_OWNED: 1022 case WW_OWNED:
807 /* Let the context lockers fight over it. */ 1023 /* Let the context lockers fight over it. */
808 mutex->wwm_u.owner = NULL; 1024 mutex->wwm_u.owner = NULL;