Wed Jun 14 17:21:04 2017 UTC ()
Disable interrupts for T_NMI (inline calltrap). Note that there's still a
way to evade the NMI mode here, if a segment register faults in
INTRFASTEXIT; but we don't care. I didn't test this change, but it seems
fine enough.


(maxv)
diff -r1.7 -r1.8 src/sys/arch/i386/i386/i386_trap.S

cvs diff -r1.7 -r1.8 src/sys/arch/i386/i386/i386_trap.S (expand / switch to unified diff)

--- src/sys/arch/i386/i386/i386_trap.S 2017/06/14 17:02:16 1.7
+++ src/sys/arch/i386/i386/i386_trap.S 2017/06/14 17:21:04 1.8
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: i386_trap.S,v 1.7 2017/06/14 17:02:16 maxv Exp $ */ 1/* $NetBSD: i386_trap.S,v 1.8 2017/06/14 17:21:04 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright 2002 (c) Wasabi Systems, Inc. 4 * Copyright 2002 (c) Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Frank van der Linden for Wasabi Systems, Inc. 7 * Written by Frank van der Linden for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -56,27 +56,27 @@ @@ -56,27 +56,27 @@
56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 57 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64 * POSSIBILITY OF SUCH DAMAGE. 64 * POSSIBILITY OF SUCH DAMAGE.
65 */ 65 */
66 66
67#if 0 67#if 0
68#include <machine/asm.h> 68#include <machine/asm.h>
69__KERNEL_RCSID(0, "$NetBSD: i386_trap.S,v 1.7 2017/06/14 17:02:16 maxv Exp $"); 69__KERNEL_RCSID(0, "$NetBSD: i386_trap.S,v 1.8 2017/06/14 17:21:04 maxv Exp $");
70#endif 70#endif
71 71
72/* 72/*
73 * Trap and fault vector routines 73 * Trap and fault vector routines
74 * 74 *
75 * On exit from the kernel to user mode, we always need to check for ASTs. In 75 * On exit from the kernel to user mode, we always need to check for ASTs. In
76 * addition, we need to do this atomically; otherwise an interrupt may occur 76 * addition, we need to do this atomically; otherwise an interrupt may occur
77 * which causes an AST, but it won't get processed until the next kernel entry 77 * which causes an AST, but it won't get processed until the next kernel entry
78 * (possibly the next clock tick). Thus, we disable interrupt before checking, 78 * (possibly the next clock tick). Thus, we disable interrupt before checking,
79 * and only enable them again on the final `iret' or before calling the AST 79 * and only enable them again on the final `iret' or before calling the AST
80 * handler. 80 * handler.
81 */ 81 */
82 82
@@ -88,31 +88,46 @@ __KERNEL_RCSID(0, "$NetBSD: i386_trap.S, @@ -88,31 +88,46 @@ __KERNEL_RCSID(0, "$NetBSD: i386_trap.S,
88#else 88#else
89#define BPTTRAP(a) ZTRAP(a) 89#define BPTTRAP(a) ZTRAP(a)
90#endif 90#endif
91 91
92 .text 92 .text
93IDTVEC(trap00) 93IDTVEC(trap00)
94 ZTRAP(T_DIVIDE) 94 ZTRAP(T_DIVIDE)
95IDTVEC_END(trap00) 95IDTVEC_END(trap00)
96 96
97IDTVEC(trap01) 97IDTVEC(trap01)
98 BPTTRAP(T_TRCTRAP) 98 BPTTRAP(T_TRCTRAP)
99IDTVEC_END(trap01) 99IDTVEC_END(trap01)
100 100
 101/*
 102 * Non Maskable Interrupts are a special case: they can be triggered even
 103 * with interrupts disabled, and once triggered they block further NMIs
 104 * until an 'iret' instruction is executed.
 105 *
 106 * Therefore we don't enable interrupts, because the CPU could switch to
 107 * another LWP, call 'iret' and unintentionally leave the NMI mode.
 108 */
101IDTVEC(trap02) 109IDTVEC(trap02)
102 pushl $0 110 pushl $0
103 pushl $(T_NMI) 111 pushl $(T_NMI)
104 INTRENTRY 112 INTRENTRY
105 jmp _C_LABEL(calltrap) 113
 114 addl $1,CPUVAR(NTRAP) /* statistical info */
 115 adcl $0,CPUVAR(NTRAP)+4
 116 pushl %esp
 117 call _C_LABEL(trap)
 118 addl $4,%esp
 119
 120 INTRFASTEXIT
106IDTVEC_END(trap02) 121IDTVEC_END(trap02)
107 122
108IDTVEC(trap03) 123IDTVEC(trap03)
109 BPTTRAP(T_BPTFLT) 124 BPTTRAP(T_BPTFLT)
110IDTVEC_END(trap03) 125IDTVEC_END(trap03)
111 126
112IDTVEC(trap04) 127IDTVEC(trap04)
113 ZTRAP(T_OFLOW) 128 ZTRAP(T_OFLOW)
114IDTVEC_END(trap04) 129IDTVEC_END(trap04)
115 130
116IDTVEC(trap05) 131IDTVEC(trap05)
117 ZTRAP(T_BOUND) 132 ZTRAP(T_BOUND)
118IDTVEC_END(trap05) 133IDTVEC_END(trap05)