Mon Jul 20 16:43:03 2020 UTC ()
Fix fpu_kern_enter in a softint that interrupted a softint.

We need to find the lwp that was originally interrupted to save its
fpu state.

With this, fpu-heavy programs (like firefox) are once again stable,
at least under modest stress testing, on systems configured to use
wifi with WPA2 and CCMP.


(riastradh)
diff -r1.71 -r1.72 src/sys/arch/x86/x86/fpu.c

cvs diff -r1.71 -r1.72 src/sys/arch/x86/x86/fpu.c (expand / switch to unified diff)

--- src/sys/arch/x86/x86/fpu.c 2020/07/20 16:41:18 1.71
+++ src/sys/arch/x86/x86/fpu.c 2020/07/20 16:43:03 1.72
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: fpu.c,v 1.71 2020/07/20 16:41:18 riastradh Exp $ */ 1/* $NetBSD: fpu.c,v 1.72 2020/07/20 16:43:03 riastradh Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2008, 2019 The NetBSD Foundation, Inc. All 4 * Copyright (c) 2008, 2019 The NetBSD Foundation, Inc. All
5 * rights reserved. 5 * rights reserved.
6 * 6 *
7 * This code is derived from software developed for The NetBSD Foundation 7 * This code is derived from software developed for The NetBSD Foundation
8 * by Andrew Doran and Maxime Villard. 8 * by Andrew Doran and Maxime Villard.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -86,27 +86,27 @@ @@ -86,27 +86,27 @@
86 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 86 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
87 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 87 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
88 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 88 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
89 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 89 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
90 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 90 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
91 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 91 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
92 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 92 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
93 * SUCH DAMAGE. 93 * SUCH DAMAGE.
94 * 94 *
95 * @(#)npx.c 7.2 (Berkeley) 5/12/91 95 * @(#)npx.c 7.2 (Berkeley) 5/12/91
96 */ 96 */
97 97
98#include <sys/cdefs.h> 98#include <sys/cdefs.h>
99__KERNEL_RCSID(0, "$NetBSD: fpu.c,v 1.71 2020/07/20 16:41:18 riastradh Exp $"); 99__KERNEL_RCSID(0, "$NetBSD: fpu.c,v 1.72 2020/07/20 16:43:03 riastradh Exp $");
100 100
101#include "opt_multiprocessor.h" 101#include "opt_multiprocessor.h"
102 102
103#include <sys/param.h> 103#include <sys/param.h>
104#include <sys/systm.h> 104#include <sys/systm.h>
105#include <sys/conf.h> 105#include <sys/conf.h>
106#include <sys/cpu.h> 106#include <sys/cpu.h>
107#include <sys/file.h> 107#include <sys/file.h>
108#include <sys/proc.h> 108#include <sys/proc.h>
109#include <sys/kernel.h> 109#include <sys/kernel.h>
110#include <sys/sysctl.h> 110#include <sys/sysctl.h>
111#include <sys/xcall.h> 111#include <sys/xcall.h>
112 112
@@ -370,31 +370,29 @@ fpu_kern_enter(void) @@ -370,31 +370,29 @@ fpu_kern_enter(void)
370 int s; 370 int s;
371 371
372 s = splvm(); 372 s = splvm();
373 373
374 ci = curcpu(); 374 ci = curcpu();
375 KASSERTMSG(ci->ci_ilevel <= IPL_VM, "ilevel=%d", ci->ci_ilevel); 375 KASSERTMSG(ci->ci_ilevel <= IPL_VM, "ilevel=%d", ci->ci_ilevel);
376 KASSERT(ci->ci_kfpu_spl == -1); 376 KASSERT(ci->ci_kfpu_spl == -1);
377 ci->ci_kfpu_spl = s; 377 ci->ci_kfpu_spl = s;
378 378
379 /* 379 /*
380 * If we are in a softint and have a pinned lwp, the fpu state is that 380 * If we are in a softint and have a pinned lwp, the fpu state is that
381 * of the pinned lwp, so save it there. 381 * of the pinned lwp, so save it there.
382 */ 382 */
383 if ((l->l_pflag & LP_INTR) && (l->l_switchto != NULL)) { 383 while ((l->l_pflag & LP_INTR) && (l->l_switchto != NULL))
384 fpu_save_lwp(l->l_switchto); 384 l = l->l_switchto;
385 } else { 385 fpu_save_lwp(l);
386 fpu_save_lwp(l); 
387 } 
388 386
389 /* 387 /*
390 * Clear CR0_TS, which fpu_save_lwp set if it saved anything -- 388 * Clear CR0_TS, which fpu_save_lwp set if it saved anything --
391 * otherwise the CPU will trap if we try to use the FPU under 389 * otherwise the CPU will trap if we try to use the FPU under
392 * the false impression that there has been a task switch since 390 * the false impression that there has been a task switch since
393 * the last FPU usage requiring that we save the FPU state. 391 * the last FPU usage requiring that we save the FPU state.
394 */ 392 */
395 clts(); 393 clts();
396} 394}
397 395
398/* 396/*
399 * fpu_kern_leave() 397 * fpu_kern_leave()
400 * 398 *