Sun Mar 16 23:11:30 2008 UTC ()
Workaround the case, when l_cpu changes to l_target_cpu, and causes
the locking against oneself. Will be revisited. OK by <ad>.


(rmind)
diff -r1.219 -r1.220 src/sys/kern/kern_synch.c

cvs diff -r1.219 -r1.220 src/sys/kern/kern_synch.c (expand / switch to unified diff)

--- src/sys/kern/kern_synch.c 2008/03/12 11:00:43 1.219
+++ src/sys/kern/kern_synch.c 2008/03/16 23:11:30 1.220
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_synch.c,v 1.219 2008/03/12 11:00:43 ad Exp $ */ 1/* $NetBSD: kern_synch.c,v 1.220 2008/03/16 23:11:30 rmind Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and 9 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
10 * Daniel Sieger. 10 * Daniel Sieger.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
@@ -65,27 +65,27 @@ @@ -65,27 +65,27 @@
65 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 65 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * SUCH DAMAGE. 72 * SUCH DAMAGE.
73 * 73 *
74 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 74 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
75 */ 75 */
76 76
77#include <sys/cdefs.h> 77#include <sys/cdefs.h>
78__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.219 2008/03/12 11:00:43 ad Exp $"); 78__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.220 2008/03/16 23:11:30 rmind Exp $");
79 79
80#include "opt_kstack.h" 80#include "opt_kstack.h"
81#include "opt_lockdebug.h" 81#include "opt_lockdebug.h"
82#include "opt_multiprocessor.h" 82#include "opt_multiprocessor.h"
83#include "opt_perfctrs.h" 83#include "opt_perfctrs.h"
84 84
85#define __MUTEX_PRIVATE 85#define __MUTEX_PRIVATE
86 86
87#include <sys/param.h> 87#include <sys/param.h>
88#include <sys/systm.h> 88#include <sys/systm.h>
89#include <sys/proc.h> 89#include <sys/proc.h>
90#include <sys/kernel.h> 90#include <sys/kernel.h>
91#if defined(PERFCTRS) 91#if defined(PERFCTRS)
@@ -415,27 +415,32 @@ mi_switch(lwp_t *l) @@ -415,27 +415,32 @@ mi_switch(lwp_t *l)
415 pmc_save_context(l->l_proc); 415 pmc_save_context(l->l_proc);
416 } 416 }
417#endif 417#endif
418 updatertime(l, &bt); 418 updatertime(l, &bt);
419 } 419 }
420 420
421 /* 421 /*
422 * If on the CPU and we have gotten this far, then we must yield. 422 * If on the CPU and we have gotten this far, then we must yield.
423 */ 423 */
424 KASSERT(l->l_stat != LSRUN); 424 KASSERT(l->l_stat != LSRUN);
425 if (l->l_stat == LSONPROC && (l->l_target_cpu || l != newl)) { 425 if (l->l_stat == LSONPROC && (l->l_target_cpu || l != newl)) {
426 KASSERT(lwp_locked(l, spc->spc_lwplock)); 426 KASSERT(lwp_locked(l, spc->spc_lwplock));
427 427
428 tci = l->l_target_cpu; 428 if (l->l_target_cpu == l->l_cpu) {
 429 l->l_target_cpu = NULL;
 430 } else {
 431 tci = l->l_target_cpu;
 432 }
 433
429 if (__predict_false(tci != NULL)) { 434 if (__predict_false(tci != NULL)) {
430 /* Double-lock the runqueues */ 435 /* Double-lock the runqueues */
431 spc_dlock(ci, tci); 436 spc_dlock(ci, tci);
432 } else { 437 } else {
433 /* Lock the runqueue */ 438 /* Lock the runqueue */
434 spc_lock(ci); 439 spc_lock(ci);
435 } 440 }
436 441
437 if ((l->l_flag & LW_IDLE) == 0) { 442 if ((l->l_flag & LW_IDLE) == 0) {
438 l->l_stat = LSRUN; 443 l->l_stat = LSRUN;
439 if (__predict_false(tci != NULL)) { 444 if (__predict_false(tci != NULL)) {
440 /*  445 /*
441 * Set the new CPU, lock and unset the 446 * Set the new CPU, lock and unset the