Sun Mar 8 15:00:31 2020 UTC ()
sched_preempted(): always clear LP_TELEPORT.


(ad)
diff -r1.62 -r1.63 src/sys/kern/kern_runq.c

cvs diff -r1.62 -r1.63 src/sys/kern/kern_runq.c (expand / switch to unified diff)

--- src/sys/kern/kern_runq.c 2020/01/25 15:09:54 1.62
+++ src/sys/kern/kern_runq.c 2020/03/08 15:00:31 1.63
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_runq.c,v 1.62 2020/01/25 15:09:54 ad Exp $ */ 1/* $NetBSD: kern_runq.c,v 1.63 2020/03/08 15:00:31 ad Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2019, 2020 The NetBSD Foundation, Inc. 4 * Copyright (c) 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -46,27 +46,27 @@ @@ -46,27 +46,27 @@
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE. 55 * SUCH DAMAGE.
56 */ 56 */
57 57
58#include <sys/cdefs.h> 58#include <sys/cdefs.h>
59__KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.62 2020/01/25 15:09:54 ad Exp $"); 59__KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.63 2020/03/08 15:00:31 ad Exp $");
60 60
61#include "opt_dtrace.h" 61#include "opt_dtrace.h"
62 62
63#include <sys/param.h> 63#include <sys/param.h>
64#include <sys/kernel.h> 64#include <sys/kernel.h>
65#include <sys/bitops.h> 65#include <sys/bitops.h>
66#include <sys/cpu.h> 66#include <sys/cpu.h>
67#include <sys/idle.h> 67#include <sys/idle.h>
68#include <sys/intr.h> 68#include <sys/intr.h>
69#include <sys/kmem.h> 69#include <sys/kmem.h>
70#include <sys/lwp.h> 70#include <sys/lwp.h>
71#include <sys/mutex.h> 71#include <sys/mutex.h>
72#include <sys/proc.h> 72#include <sys/proc.h>
@@ -898,26 +898,27 @@ sched_preempted(struct lwp *l) @@ -898,26 +898,27 @@ sched_preempted(struct lwp *l)
898 /* 898 /*
899 * Fast path: if the first SMT in the core is idle, send it back 899 * Fast path: if the first SMT in the core is idle, send it back
900 * there, because the cache is shared (cheap) and we want all LWPs 900 * there, because the cache is shared (cheap) and we want all LWPs
901 * to be clustered on 1st class CPUs (either running there or on 901 * to be clustered on 1st class CPUs (either running there or on
902 * their runqueues). 902 * their runqueues).
903 */ 903 */
904 tci = ci->ci_sibling[CPUREL_CORE]; 904 tci = ci->ci_sibling[CPUREL_CORE];
905 while (tci != ci) { 905 while (tci != ci) {
906 const int flags = SPCF_IDLE | SPCF_1STCLASS; 906 const int flags = SPCF_IDLE | SPCF_1STCLASS;
907 tspc = &tci->ci_schedstate; 907 tspc = &tci->ci_schedstate;
908 if ((tspc->spc_flags & flags) == flags && 908 if ((tspc->spc_flags & flags) == flags &&
909 sched_migratable(l, tci)) { 909 sched_migratable(l, tci)) {
910 l->l_target_cpu = tci; 910 l->l_target_cpu = tci;
 911 l->l_pflag &= ~LP_TELEPORT;
911 return; 912 return;
912 } 913 }
913 tci = tci->ci_sibling[CPUREL_CORE]; 914 tci = tci->ci_sibling[CPUREL_CORE];
914 } 915 }
915 916
916 if ((l->l_pflag & LP_TELEPORT) != 0) { 917 if ((l->l_pflag & LP_TELEPORT) != 0) {
917 /* 918 /*
918 * A child of vfork(): now that the parent is released, 919 * A child of vfork(): now that the parent is released,
919 * scatter far and wide, to match the LSIDL distribution 920 * scatter far and wide, to match the LSIDL distribution
920 * done in sched_takecpu(). 921 * done in sched_takecpu().
921 */ 922 */
922 l->l_pflag &= ~LP_TELEPORT; 923 l->l_pflag &= ~LP_TELEPORT;
923 tci = sched_bestcpu(l, sched_nextpkg()); 924 tci = sched_bestcpu(l, sched_nextpkg());