Tue Mar 13 16:45:52 2018 UTC ()
Mmh, add a missing x86_disable_intr(). My intention there was to ensure
interrupts were disabled before the barriers.


(maxv)
diff -r1.13 -r1.14 src/sys/arch/x86/x86/svs.c

cvs diff -r1.13 -r1.14 src/sys/arch/x86/x86/svs.c (expand / switch to unified diff)

--- src/sys/arch/x86/x86/svs.c 2018/03/01 16:49:06 1.13
+++ src/sys/arch/x86/x86/svs.c 2018/03/13 16:45:52 1.14
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: svs.c,v 1.13 2018/03/01 16:49:06 maxv Exp $ */ 1/* $NetBSD: svs.c,v 1.14 2018/03/13 16:45:52 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2018 The NetBSD Foundation, Inc. 4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Maxime Villard. 8 * by Maxime Villard.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -20,27 +20,27 @@ @@ -20,27 +20,27 @@
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: svs.c,v 1.13 2018/03/01 16:49:06 maxv Exp $"); 33__KERNEL_RCSID(0, "$NetBSD: svs.c,v 1.14 2018/03/13 16:45:52 maxv Exp $");
34 34
35#include "opt_svs.h" 35#include "opt_svs.h"
36 36
37#include <sys/param.h> 37#include <sys/param.h>
38#include <sys/systm.h> 38#include <sys/systm.h>
39#include <sys/proc.h> 39#include <sys/proc.h>
40#include <sys/cpu.h> 40#include <sys/cpu.h>
41#include <sys/sysctl.h> 41#include <sys/sysctl.h>
42#include <sys/xcall.h> 42#include <sys/xcall.h>
43 43
44#include <x86/cputypes.h> 44#include <x86/cputypes.h>
45#include <machine/cpuvar.h> 45#include <machine/cpuvar.h>
46#include <machine/frameasm.h> 46#include <machine/frameasm.h>
@@ -637,26 +637,27 @@ svs_disable_hotpatch(void) @@ -637,26 +637,27 @@ svs_disable_hotpatch(void)
637 637
638static volatile unsigned long svs_cpu_barrier1 __cacheline_aligned; 638static volatile unsigned long svs_cpu_barrier1 __cacheline_aligned;
639static volatile unsigned long svs_cpu_barrier2 __cacheline_aligned; 639static volatile unsigned long svs_cpu_barrier2 __cacheline_aligned;
640typedef void (vector)(void); 640typedef void (vector)(void);
641 641
642static void 642static void
643svs_disable_cpu(void *arg1, void *arg2) 643svs_disable_cpu(void *arg1, void *arg2)
644{ 644{
645 struct cpu_info *ci = curcpu(); 645 struct cpu_info *ci = curcpu();
646 extern vector Xsyscall; 646 extern vector Xsyscall;
647 u_long psl; 647 u_long psl;
648 648
649 psl = x86_read_psl(); 649 psl = x86_read_psl();
 650 x86_disable_intr();
650 651
651 atomic_dec_ulong(&svs_cpu_barrier1); 652 atomic_dec_ulong(&svs_cpu_barrier1);
652 while (atomic_cas_ulong(&svs_cpu_barrier1, 0, 0) != 0) { 653 while (atomic_cas_ulong(&svs_cpu_barrier1, 0, 0) != 0) {
653 x86_pause(); 654 x86_pause();
654 } 655 }
655 656
656 /* cpu0 is the one that does the hotpatch job */ 657 /* cpu0 is the one that does the hotpatch job */
657 if (ci == &cpu_info_primary) { 658 if (ci == &cpu_info_primary) {
658 svs_enabled = false; 659 svs_enabled = false;
659 svs_disable_hotpatch(); 660 svs_disable_hotpatch();
660 } 661 }
661 662
662 /* put back the non-SVS syscall entry point */ 663 /* put back the non-SVS syscall entry point */