Tue Jun 25 00:27:22 2013 UTC ()
Split these to improve diffability.


(uebayasi)
diff -r0 -r1.1 src/sys/arch/amd64/amd64/amd64_trap.S
diff -r1.43 -r1.44 src/sys/arch/amd64/amd64/vector.S
diff -r0 -r1.1 src/sys/arch/i386/i386/i386_trap.S
diff -r0 -r1.1 src/sys/arch/i386/i386/i386_trap_ipkdb.S
diff -r1.61 -r1.62 src/sys/arch/i386/i386/vector.S

File Added: src/sys/arch/amd64/amd64/amd64_trap.S
/*	$NetBSD: amd64_trap.S,v 1.1 2013/06/25 00:27:22 uebayasi Exp $	*/

/*-
 * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
 * All rights reserved.
 *
 * This code is derived from software contributed to The NetBSD Foundation
 * by Charles M. Hannum and by Andrew Doran.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

/*
 * Copyright (c) 2001 Wasabi Systems, Inc.
 * All rights reserved.
 *
 * Written by Frank van der Linden for Wasabi Systems, Inc.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. All advertising materials mentioning features or use of this software
 *    must display the following acknowledgement:
 *      This product includes software developed for the NetBSD Project by
 *      Wasabi Systems, Inc.
 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
 *    or promote products derived from this software without specific prior
 *    written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#if 0
#include <machine/asm.h>
__KERNEL_RCSID(0, "$NetBSD: amd64_trap.S,v 1.1 2013/06/25 00:27:22 uebayasi Exp $");
#endif

/*
 * Trap and fault vector routines
 *
 * On exit from the kernel to user mode, we always need to check for ASTs.  In
 * addition, we need to do this atomically; otherwise an interrupt may occur
 * which causes an AST, but it won't get processed until the next kernel entry
 * (possibly the next clock tick).  Thus, we disable interrupt before checking,
 * and only enable them again on the final `iret' or before calling the AST
 * handler.
 */ 

/*****************************************************************************/

#ifdef	XEN
#define	PRE_TRAP	movq (%rsp),%rcx ; movq 8(%rsp),%r11 ; addq $0x10,%rsp 
#else
#define	PRE_TRAP
#endif

#define	TRAP_NJ(a)	PRE_TRAP ; pushq $(a)
#define	ZTRAP_NJ(a)	PRE_TRAP ; pushq $0 ; pushq $(a)
#define	TRAP(a)		TRAP_NJ(a) ; jmp _C_LABEL(alltraps)
#define	ZTRAP(a)	ZTRAP_NJ(a) ; jmp _C_LABEL(alltraps)

	.text

IDTVEC(trap00)
	ZTRAP(T_DIVIDE)
IDTVEC_END(trap00)

IDTVEC(trap01)
	ZTRAP(T_TRCTRAP)
IDTVEC_END(trap01)

IDTVEC(trap02)
#if defined(XEN)
	ZTRAP(T_NMI)
#else /* defined(XEN) */
	pushq $0
	pushq $T_NMI
	subq	$TF_REGSIZE,%rsp
	INTR_SAVE_GPRS
	movl	$MSR_GSBASE,%ecx
	rdmsr
	cmpl	$VM_MIN_KERNEL_ADDRESS_HIGH32,%edx
	jae	1f
	swapgs
	movw	%gs,TF_GS(%rsp)
	movw	%fs,TF_FS(%rsp)
	movw	%es,TF_ES(%rsp)
	movw	%ds,TF_DS(%rsp)
	movq	%rsp,%rdi
	incq	CPUVAR(NTRAP)
	call	_C_LABEL(trap)
	movw	TF_ES(%rsp),%es
	movw	TF_DS(%rsp),%ds
	swapgs
	jmp	2f
1:
	movq	%rsp,%rdi
	incq	CPUVAR(NTRAP)
	call	_C_LABEL(trap)
2:
	INTR_RESTORE_GPRS
	addq	$TF_REGSIZE+16,%rsp
	iretq
#endif /* defined(XEN) */
IDTVEC_END(trap02)

IDTVEC(trap03)
#ifndef KDTRACE_HOOKS
	ZTRAP(T_BPTFLT)
#else
	ZTRAP_NJ(T_BPTFLT)
	INTRENTRY
  	STI(si)
	/*
	 * DTrace Function Boundary Trace (fbt) probes are triggered
	 * by int3 (0xcc).
	 */
	/* Check if there is no DTrace hook registered. */
	cmpq	$0,dtrace_invop_jump_addr
	je	calltrap

	/*
	 * Set our jump address for the jump back in the event that
	 * the exception wasn't caused by DTrace at all.
	 */
	/* XXX: This doesn't look right for SMP - unless it is a
	 * constant - so why set it everytime. (dsl) */
	movq	$calltrap, dtrace_invop_calltrap_addr(%rip)

	/* Jump to the code hooked in by DTrace. */
	movq	dtrace_invop_jump_addr, %rax
	jmpq	*dtrace_invop_jump_addr

	.bss
	.globl	dtrace_invop_jump_addr
	.align	8
	.type	dtrace_invop_jump_addr, @object
	.size	dtrace_invop_jump_addr, 8
dtrace_invop_jump_addr:
	.zero	8
	.globl	dtrace_invop_calltrap_addr
	.align	8
	.type	dtrace_invop_calltrap_addr, @object
	.size	dtrace_invop_calltrap_addr, 8
dtrace_invop_calltrap_addr:
	.zero	8
	.text
#endif
IDTVEC_END(trap03)

IDTVEC(trap04)
	ZTRAP(T_OFLOW)
IDTVEC_END(trap04)

IDTVEC(trap05)
	ZTRAP(T_BOUND)
IDTVEC_END(trap05)

IDTVEC(trap06)
	ZTRAP(T_PRIVINFLT)
IDTVEC_END(trap06)

IDTVEC(trap07)
	ZTRAP_NJ(T_ASTFLT)
	INTRENTRY
#ifdef DIAGNOSTIC
	movl	CPUVAR(ILEVEL),%ebx
#endif /* DIAGNOSTIC */
	movq	CPUVAR(SELF),%rdi
	call	_C_LABEL(fpudna)
	jmp	.Lalltraps_checkusr
IDTVEC_END(trap07)

IDTVEC(trap08)
	TRAP(T_DOUBLEFLT)
IDTVEC_END(trap08)

IDTVEC(trap09)
	ZTRAP(T_FPOPFLT)
IDTVEC_END(trap09)

IDTVEC(trap0a)
	TRAP(T_TSSFLT)
IDTVEC_END(trap0a)

#ifdef XEN
/*
 * I don't believe XEN generates in-kernel traps for the
 * equivalent of iret, if it does this code would be needed
 * in order to copy the user segment registers into the fault frame.
 */
#define check_swapgs alltraps
#endif

IDTVEC(trap0b)		/* #NP() Segment not present */
	TRAP_NJ(T_SEGNPFLT)
	jmp	check_swapgs
IDTVEC_END(trap0b)		/* #NP() Segment not present */

IDTVEC(trap0c)		/* #SS() Stack exception */
	TRAP_NJ(T_STKFLT)
	jmp	check_swapgs
IDTVEC_END(trap0c)		/* #SS() Stack exception */

IDTVEC(trap0d)		/* #GP() General protection */
	TRAP_NJ(T_PROTFLT)
#ifdef check_swapgs
	jmp	check_swapgs
#else
/* We need to worry about traps while the kernel %gs_base isn't loaded.
 * These are either loads to %gs (only 32bit) or faults on iret during
 * return to user. */
check_swapgs:
	INTRENTRY_L(3f,1:)
2:	sti
	jmp	calltrap
3:
	/* Trap in kernel mode. */
	/* If faulting instruction is 'iret' we may need to do a 'swapgs'. */
	movq	TF_RIP(%rsp),%rax
	cmpw	$0xcf48,(%rax)		/* Faulting instruction is iretq ? */
	jne	5f			/* Jump if not */
	movq	TF_RSP(%rsp),%rax	/* Must read %rsp, may be a pad word */
	testb	$SEL_UPL,8(%rax)	/* Check %cs of outer iret frame */
	je	2b			/* jump if iret was to kernel  */
	jmp	1b			/* to user - must restore %gs */
5:
	/* Not 'iret', all moves to %gs also need a swapgs */
	movw	(%rax),%ax
	andb	$070,%ah		/* mask mod/rm from mod/reg/rm */
	cmpw	$0x8e+050*256,%ax	/* Any move to %gs (reg 5) */
	jne	2b			/* No - normal kernel fault */
	jmp	1b			/* Yes - restore %gs */
#endif
IDTVEC_END(trap0d)

IDTVEC(trap0e)
	TRAP(T_PAGEFLT)
IDTVEC_END(trap0e)

IDTVEC(intrspurious)
IDTVEC(trap0f)
	ZTRAP_NJ(T_ASTFLT)
	INTRENTRY
#ifdef DIAGNOSTIC
	movl	CPUVAR(ILEVEL),%ebx
#endif /* DIAGNOSTIC */
	jmp	.Lalltraps_checkusr
IDTVEC_END(trap0f)
IDTVEC_END(intrspurious)

IDTVEC(trap10)
	ZTRAP_NJ(T_ARITHTRAP)
.Ldo_fputrap:
	INTRENTRY
#ifdef DIAGNOSTIC
	movl	CPUVAR(ILEVEL),%ebx
#endif /* DIAGNOSTIC */
	testb	$SEL_RPL,TF_CS(%rsp)
	jz	1f
	movq	%rsp,%rdi
	call	_C_LABEL(fputrap)
	jmp	.Lalltraps_checkusr
1:
  	STI(si)
	jmp	calltrap
IDTVEC_END(trap10)

IDTVEC(trap11)
	TRAP(T_ALIGNFLT)
IDTVEC_END(trap11)

IDTVEC(trap12)
	ZTRAP(T_MCA)
IDTVEC_END(trap12)

IDTVEC(trap13)
	ZTRAP_NJ(T_XMM)
	jmp	.Ldo_fputrap
IDTVEC_END(trap13)

IDTVEC(trap14)
IDTVEC(trap15)
IDTVEC(trap16)
IDTVEC(trap17)
IDTVEC(trap18)
IDTVEC(trap19)
IDTVEC(trap1a)
IDTVEC(trap1b)
IDTVEC(trap1c)
IDTVEC(trap1d)
IDTVEC(trap1e)
IDTVEC(trap1f)
	/* 20 - 31 reserved for future exp */
	ZTRAP(T_RESERVED)
IDTVEC_END(trap1f)
IDTVEC_END(trap1e)
IDTVEC_END(trap1d)
IDTVEC_END(trap1c)
IDTVEC_END(trap1b)
IDTVEC_END(trap1a)
IDTVEC_END(trap19)
IDTVEC_END(trap18)
IDTVEC_END(trap17)
IDTVEC_END(trap16)
IDTVEC_END(trap15)
IDTVEC_END(trap14)

IDTVEC(exceptions)
	.quad	_C_LABEL(Xtrap00), _C_LABEL(Xtrap01)
	.quad	_C_LABEL(Xtrap02), _C_LABEL(Xtrap03)
	.quad	_C_LABEL(Xtrap04), _C_LABEL(Xtrap05)
	.quad	_C_LABEL(Xtrap06), _C_LABEL(Xtrap07)
	.quad	_C_LABEL(Xtrap08), _C_LABEL(Xtrap09)
	.quad	_C_LABEL(Xtrap0a), _C_LABEL(Xtrap0b)
	.quad	_C_LABEL(Xtrap0c), _C_LABEL(Xtrap0d)
	.quad	_C_LABEL(Xtrap0e), _C_LABEL(Xtrap0f)
	.quad	_C_LABEL(Xtrap10), _C_LABEL(Xtrap11)
	.quad	_C_LABEL(Xtrap12), _C_LABEL(Xtrap13)
	.quad	_C_LABEL(Xtrap14), _C_LABEL(Xtrap15)
	.quad	_C_LABEL(Xtrap16), _C_LABEL(Xtrap17)
	.quad	_C_LABEL(Xtrap18), _C_LABEL(Xtrap19)
	.quad	_C_LABEL(Xtrap1a), _C_LABEL(Xtrap1b)
	.quad	_C_LABEL(Xtrap1c), _C_LABEL(Xtrap1d)
	.quad	_C_LABEL(Xtrap1e), _C_LABEL(Xtrap1f)
IDTVEC_END(exceptions)

/*
 * trap() calls here when it detects a fault in INTRFASTEXIT (loading the
 * segment registers or during the iret itself).
 * The address of the (possibly reconstructed) user trap frame is
 * passed as an argument.
 * Typically the code will have raised a SIGSEGV which will be actioned
 * by the code below.
 */
	.type	_C_LABEL(trap_return_fault_return), @function
LABEL(trap_return_fault_return)
	mov	%rdi,%rsp		/* frame for user return */
#ifdef DIAGNOSTIC
	/* We can't recover the saved %rbx, so suppress warning */
	movl	CPUVAR(ILEVEL),%ebx
#endif /* DIAGNOSTIC */
	jmp	.Lalltraps_checkusr
END(trap_return_fault_return)

/*
 * All traps go through here. Call the generic trap handler, and
 * check for ASTs afterwards.
 */
NENTRY(alltraps)
	INTRENTRY
  	STI(si)

calltrap:
#ifdef DIAGNOSTIC
	movl	CPUVAR(ILEVEL),%ebx
#endif /* DIAGNOSTIC */
	movq	%rsp,%rdi
	incq	CPUVAR(NTRAP)
	call	_C_LABEL(trap)
.Lalltraps_checkusr:
	testb	$SEL_RPL,TF_CS(%rsp)
	jz	6f
.Lalltraps_checkast:
	movq	CPUVAR(CURLWP),%r14
	/* Check for ASTs on exit to user mode. */
  	CLI(si)
	CHECK_ASTPENDING(%r14)
	je	3f
	CLEAR_ASTPENDING(%r14)
  	STI(si)
	movl	$T_ASTFLT,TF_TRAPNO(%rsp)
	movq	%rsp,%rdi
	incq	CPUVAR(NTRAP)
	call	_C_LABEL(trap)
	jmp	.Lalltraps_checkast	/* re-check ASTs */
3:	CHECK_DEFERRED_SWITCH
	jnz	9f
#ifndef DIAGNOSTIC
6:	INTRFASTEXIT
#else /* DIAGNOSTIC */
6:	cmpl	CPUVAR(ILEVEL),%ebx
	jne	3f
	INTRFASTEXIT
3:  	STI(si)
	movabsq	$4f,%rdi
	movl	CPUVAR(ILEVEL),%esi
	movl	%ebx,%edx
	xorq	%rax,%rax
	call	_C_LABEL(printf)
	movl	%ebx,%edi
	call	_C_LABEL(spllower)
	jmp	.Lalltraps_checkast
4:	.asciz	"WARNING: SPL NOT LOWERED ON TRAP EXIT %x %x\n"
#endif /* DIAGNOSTIC */
9:	STI(si)
	call	_C_LABEL(do_pmap_load)
	jmp	.Lalltraps_checkast	/* re-check ASTs */
END(alltraps)

cvs diff -r1.43 -r1.44 src/sys/arch/amd64/amd64/vector.S (expand / switch to unified diff)

--- src/sys/arch/amd64/amd64/vector.S 2013/06/22 08:48:48 1.43
+++ src/sys/arch/amd64/amd64/vector.S 2013/06/25 00:27:22 1.44
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: vector.S,v 1.43 2013/06/22 08:48:48 uebayasi Exp $ */ 1/* $NetBSD: vector.S,v 1.44 2013/06/25 00:27:22 uebayasi Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum and by Andrew Doran. 8 * by Charles M. Hannum and by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -75,395 +75,32 @@ @@ -75,395 +75,32 @@
75 75
76#include <machine/i8259.h> 76#include <machine/i8259.h>
77#include <machine/i82093reg.h> 77#include <machine/i82093reg.h>
78#include <machine/i82489reg.h> 78#include <machine/i82489reg.h>
79#include <machine/frameasm.h> 79#include <machine/frameasm.h>
80#include <machine/segments.h> 80#include <machine/segments.h>
81#include <machine/trap.h> 81#include <machine/trap.h>
82#include <machine/specialreg.h> 82#include <machine/specialreg.h>
83 83
84#include "ioapic.h" 84#include "ioapic.h"
85#include "lapic.h" 85#include "lapic.h"
86#include "assym.h" 86#include "assym.h"
87 87
88/*****************************************************************************/ 88#include "amd64_trap.S"
89 
90/* 
91 * Trap and fault vector routines 
92 * 
93 * On exit from the kernel to user mode, we always need to check for ASTs. In 
94 * addition, we need to do this atomically; otherwise an interrupt may occur 
95 * which causes an AST, but it won't get processed until the next kernel entry 
96 * (possibly the next clock tick). Thus, we disable interrupt before checking, 
97 * and only enable them again on the final `iret' or before calling the AST 
98 * handler. 
99 */  
100 89
101/*****************************************************************************/ 90/*****************************************************************************/
102 91
103#ifdef XEN 
104#define PRE_TRAP movq (%rsp),%rcx ; movq 8(%rsp),%r11 ; addq $0x10,%rsp  
105#else 
106#define PRE_TRAP 
107#endif 
108 
109#define TRAP_NJ(a) PRE_TRAP ; pushq $(a) 
110#define ZTRAP_NJ(a) PRE_TRAP ; pushq $0 ; pushq $(a) 
111#define TRAP(a) TRAP_NJ(a) ; jmp _C_LABEL(alltraps) 
112#define ZTRAP(a) ZTRAP_NJ(a) ; jmp _C_LABEL(alltraps) 
113 
114 .text 
115 
116IDTVEC(trap00) 
117 ZTRAP(T_DIVIDE) 
118IDTVEC_END(trap00) 
119 
120IDTVEC(trap01) 
121 ZTRAP(T_TRCTRAP) 
122IDTVEC_END(trap01) 
123 
124IDTVEC(trap02) 
125#if defined(XEN) 
126 ZTRAP(T_NMI) 
127#else /* defined(XEN) */ 
128 pushq $0 
129 pushq $T_NMI 
130 subq $TF_REGSIZE,%rsp 
131 INTR_SAVE_GPRS 
132 movl $MSR_GSBASE,%ecx 
133 rdmsr 
134 cmpl $VM_MIN_KERNEL_ADDRESS_HIGH32,%edx 
135 jae 1f 
136 swapgs 
137 movw %gs,TF_GS(%rsp) 
138 movw %fs,TF_FS(%rsp) 
139 movw %es,TF_ES(%rsp) 
140 movw %ds,TF_DS(%rsp) 
141 movq %rsp,%rdi 
142 incq CPUVAR(NTRAP) 
143 call _C_LABEL(trap) 
144 movw TF_ES(%rsp),%es 
145 movw TF_DS(%rsp),%ds 
146 swapgs 
147 jmp 2f 
1481: 
149 movq %rsp,%rdi 
150 incq CPUVAR(NTRAP) 
151 call _C_LABEL(trap) 
1522: 
153 INTR_RESTORE_GPRS 
154 addq $TF_REGSIZE+16,%rsp 
155 iretq 
156#endif /* defined(XEN) */ 
157IDTVEC_END(trap02) 
158 
159IDTVEC(trap03) 
160#ifndef KDTRACE_HOOKS 
161 ZTRAP(T_BPTFLT) 
162#else 
163 ZTRAP_NJ(T_BPTFLT) 
164 INTRENTRY 
165 STI(si) 
166 /* 
167 * DTrace Function Boundary Trace (fbt) probes are triggered 
168 * by int3 (0xcc). 
169 */ 
170 /* Check if there is no DTrace hook registered. */ 
171 cmpq $0,dtrace_invop_jump_addr 
172 je calltrap 
173 
174 /* 
175 * Set our jump address for the jump back in the event that 
176 * the exception wasn't caused by DTrace at all. 
177 */ 
178 /* XXX: This doesn't look right for SMP - unless it is a 
179 * constant - so why set it everytime. (dsl) */ 
180 movq $calltrap, dtrace_invop_calltrap_addr(%rip) 
181 
182 /* Jump to the code hooked in by DTrace. */ 
183 movq dtrace_invop_jump_addr, %rax 
184 jmpq *dtrace_invop_jump_addr 
185 
186 .bss 
187 .globl dtrace_invop_jump_addr 
188 .align 8 
189 .type dtrace_invop_jump_addr, @object 
190 .size dtrace_invop_jump_addr, 8 
191dtrace_invop_jump_addr: 
192 .zero 8 
193 .globl dtrace_invop_calltrap_addr 
194 .align 8 
195 .type dtrace_invop_calltrap_addr, @object 
196 .size dtrace_invop_calltrap_addr, 8 
197dtrace_invop_calltrap_addr: 
198 .zero 8 
199 .text 
200#endif 
201IDTVEC_END(trap03) 
202 
203IDTVEC(trap04) 
204 ZTRAP(T_OFLOW) 
205IDTVEC_END(trap04) 
206 
207IDTVEC(trap05) 
208 ZTRAP(T_BOUND) 
209IDTVEC_END(trap05) 
210 
211IDTVEC(trap06) 
212 ZTRAP(T_PRIVINFLT) 
213IDTVEC_END(trap06) 
214 
215IDTVEC(trap07) 
216 ZTRAP_NJ(T_ASTFLT) 
217 INTRENTRY 
218#ifdef DIAGNOSTIC 
219 movl CPUVAR(ILEVEL),%ebx 
220#endif /* DIAGNOSTIC */ 
221 movq CPUVAR(SELF),%rdi 
222 call _C_LABEL(fpudna) 
223 jmp .Lalltraps_checkusr 
224IDTVEC_END(trap07) 
225 
226IDTVEC(trap08) 
227 TRAP(T_DOUBLEFLT) 
228IDTVEC_END(trap08) 
229 
230IDTVEC(trap09) 
231 ZTRAP(T_FPOPFLT) 
232IDTVEC_END(trap09) 
233 
234IDTVEC(trap0a) 
235 TRAP(T_TSSFLT) 
236IDTVEC_END(trap0a) 
237 
238#ifdef XEN 
239/* 
240 * I don't believe XEN generates in-kernel traps for the 
241 * equivalent of iret, if it does this code would be needed 
242 * in order to copy the user segment registers into the fault frame. 
243 */ 
244#define check_swapgs alltraps 
245#endif 
246 
247IDTVEC(trap0b) /* #NP() Segment not present */ 
248 TRAP_NJ(T_SEGNPFLT) 
249 jmp check_swapgs 
250IDTVEC_END(trap0b) /* #NP() Segment not present */ 
251 
252IDTVEC(trap0c) /* #SS() Stack exception */ 
253 TRAP_NJ(T_STKFLT) 
254 jmp check_swapgs 
255IDTVEC_END(trap0c) /* #SS() Stack exception */ 
256 
257IDTVEC(trap0d) /* #GP() General protection */ 
258 TRAP_NJ(T_PROTFLT) 
259#ifdef check_swapgs 
260 jmp check_swapgs 
261#else 
262/* We need to worry about traps while the kernel %gs_base isn't loaded. 
263 * These are either loads to %gs (only 32bit) or faults on iret during 
264 * return to user. */ 
265check_swapgs: 
266 INTRENTRY_L(3f,1:) 
2672: sti 
268 jmp calltrap 
2693: 
270 /* Trap in kernel mode. */ 
271 /* If faulting instruction is 'iret' we may need to do a 'swapgs'. */ 
272 movq TF_RIP(%rsp),%rax 
273 cmpw $0xcf48,(%rax) /* Faulting instruction is iretq ? */ 
274 jne 5f /* Jump if not */ 
275 movq TF_RSP(%rsp),%rax /* Must read %rsp, may be a pad word */ 
276 testb $SEL_UPL,8(%rax) /* Check %cs of outer iret frame */ 
277 je 2b /* jump if iret was to kernel */ 
278 jmp 1b /* to user - must restore %gs */ 
2795: 
280 /* Not 'iret', all moves to %gs also need a swapgs */ 
281 movw (%rax),%ax 
282 andb $070,%ah /* mask mod/rm from mod/reg/rm */ 
283 cmpw $0x8e+050*256,%ax /* Any move to %gs (reg 5) */ 
284 jne 2b /* No - normal kernel fault */ 
285 jmp 1b /* Yes - restore %gs */ 
286#endif 
287IDTVEC_END(trap0d) 
288 
289IDTVEC(trap0e) 
290 TRAP(T_PAGEFLT) 
291IDTVEC_END(trap0e) 
292 
293IDTVEC(intrspurious) 
294IDTVEC(trap0f) 
295 ZTRAP_NJ(T_ASTFLT) 
296 INTRENTRY 
297#ifdef DIAGNOSTIC 
298 movl CPUVAR(ILEVEL),%ebx 
299#endif /* DIAGNOSTIC */ 
300 jmp .Lalltraps_checkusr 
301IDTVEC_END(trap0f) 
302IDTVEC_END(intrspurious) 
303 
304IDTVEC(trap10) 
305 ZTRAP_NJ(T_ARITHTRAP) 
306.Ldo_fputrap: 
307 INTRENTRY 
308#ifdef DIAGNOSTIC 
309 movl CPUVAR(ILEVEL),%ebx 
310#endif /* DIAGNOSTIC */ 
311 testb $SEL_RPL,TF_CS(%rsp) 
312 jz 1f 
313 movq %rsp,%rdi 
314 call _C_LABEL(fputrap) 
315 jmp .Lalltraps_checkusr 
3161: 
317 STI(si) 
318 jmp calltrap 
319IDTVEC_END(trap10) 
320 
321IDTVEC(trap11) 
322 TRAP(T_ALIGNFLT) 
323IDTVEC_END(trap11) 
324 
325IDTVEC(trap12) 
326 ZTRAP(T_MCA) 
327IDTVEC_END(trap12) 
328 
329IDTVEC(trap13) 
330 ZTRAP_NJ(T_XMM) 
331 jmp .Ldo_fputrap 
332IDTVEC_END(trap13) 
333 
334IDTVEC(trap14) 
335IDTVEC(trap15) 
336IDTVEC(trap16) 
337IDTVEC(trap17) 
338IDTVEC(trap18) 
339IDTVEC(trap19) 
340IDTVEC(trap1a) 
341IDTVEC(trap1b) 
342IDTVEC(trap1c) 
343IDTVEC(trap1d) 
344IDTVEC(trap1e) 
345IDTVEC(trap1f) 
346 /* 20 - 31 reserved for future exp */ 
347 ZTRAP(T_RESERVED) 
348IDTVEC_END(trap1f) 
349IDTVEC_END(trap1e) 
350IDTVEC_END(trap1d) 
351IDTVEC_END(trap1c) 
352IDTVEC_END(trap1b) 
353IDTVEC_END(trap1a) 
354IDTVEC_END(trap19) 
355IDTVEC_END(trap18) 
356IDTVEC_END(trap17) 
357IDTVEC_END(trap16) 
358IDTVEC_END(trap15) 
359IDTVEC_END(trap14) 
360 
361IDTVEC(exceptions) 
362 .quad _C_LABEL(Xtrap00), _C_LABEL(Xtrap01) 
363 .quad _C_LABEL(Xtrap02), _C_LABEL(Xtrap03) 
364 .quad _C_LABEL(Xtrap04), _C_LABEL(Xtrap05) 
365 .quad _C_LABEL(Xtrap06), _C_LABEL(Xtrap07) 
366 .quad _C_LABEL(Xtrap08), _C_LABEL(Xtrap09) 
367 .quad _C_LABEL(Xtrap0a), _C_LABEL(Xtrap0b) 
368 .quad _C_LABEL(Xtrap0c), _C_LABEL(Xtrap0d) 
369 .quad _C_LABEL(Xtrap0e), _C_LABEL(Xtrap0f) 
370 .quad _C_LABEL(Xtrap10), _C_LABEL(Xtrap11) 
371 .quad _C_LABEL(Xtrap12), _C_LABEL(Xtrap13) 
372 .quad _C_LABEL(Xtrap14), _C_LABEL(Xtrap15) 
373 .quad _C_LABEL(Xtrap16), _C_LABEL(Xtrap17) 
374 .quad _C_LABEL(Xtrap18), _C_LABEL(Xtrap19) 
375 .quad _C_LABEL(Xtrap1a), _C_LABEL(Xtrap1b) 
376 .quad _C_LABEL(Xtrap1c), _C_LABEL(Xtrap1d) 
377 .quad _C_LABEL(Xtrap1e), _C_LABEL(Xtrap1f) 
378IDTVEC_END(exceptions) 
379 
380/* 
381 * trap() calls here when it detects a fault in INTRFASTEXIT (loading the 
382 * segment registers or during the iret itself). 
383 * The address of the (possibly reconstructed) user trap frame is 
384 * passed as an argument. 
385 * Typically the code will have raised a SIGSEGV which will be actioned 
386 * by the code below. 
387 */ 
388 .type _C_LABEL(trap_return_fault_return), @function 
389LABEL(trap_return_fault_return) 
390 mov %rdi,%rsp /* frame for user return */ 
391#ifdef DIAGNOSTIC 
392 /* We can't recover the saved %rbx, so suppress warning */ 
393 movl CPUVAR(ILEVEL),%ebx 
394#endif /* DIAGNOSTIC */ 
395 jmp .Lalltraps_checkusr 
396END(trap_return_fault_return) 
397 
398/* 
399 * All traps go through here. Call the generic trap handler, and 
400 * check for ASTs afterwards. 
401 */ 
402NENTRY(alltraps) 
403 INTRENTRY 
404 STI(si) 
405 
406calltrap: 
407#ifdef DIAGNOSTIC 
408 movl CPUVAR(ILEVEL),%ebx 
409#endif /* DIAGNOSTIC */ 
410 movq %rsp,%rdi 
411 incq CPUVAR(NTRAP) 
412 call _C_LABEL(trap) 
413.Lalltraps_checkusr: 
414 testb $SEL_RPL,TF_CS(%rsp) 
415 jz 6f 
416.Lalltraps_checkast: 
417 movq CPUVAR(CURLWP),%r14 
418 /* Check for ASTs on exit to user mode. */ 
419 CLI(si) 
420 CHECK_ASTPENDING(%r14) 
421 je 3f 
422 CLEAR_ASTPENDING(%r14) 
423 STI(si) 
424 movl $T_ASTFLT,TF_TRAPNO(%rsp) 
425 movq %rsp,%rdi 
426 incq CPUVAR(NTRAP) 
427 call _C_LABEL(trap) 
428 jmp .Lalltraps_checkast /* re-check ASTs */ 
4293: CHECK_DEFERRED_SWITCH 
430 jnz 9f 
431#ifndef DIAGNOSTIC 
4326: INTRFASTEXIT 
433#else /* DIAGNOSTIC */ 
4346: cmpl CPUVAR(ILEVEL),%ebx 
435 jne 3f 
436 INTRFASTEXIT 
4373: STI(si) 
438 movabsq $4f,%rdi 
439 movl CPUVAR(ILEVEL),%esi 
440 movl %ebx,%edx 
441 xorq %rax,%rax 
442 call _C_LABEL(printf) 
443 movl %ebx,%edi 
444 call _C_LABEL(spllower) 
445 jmp .Lalltraps_checkast 
4464: .asciz "WARNING: SPL NOT LOWERED ON TRAP EXIT %x %x\n" 
447#endif /* DIAGNOSTIC */ 
4489: STI(si) 
449 call _C_LABEL(do_pmap_load) 
450 jmp .Lalltraps_checkast /* re-check ASTs */ 
451END(alltraps) 
452 
453 
454#define __HAVE_GENERIC_SOFT_INTERRUPTS /* XXX */ 92#define __HAVE_GENERIC_SOFT_INTERRUPTS /* XXX */
455 93
456 
457/* 94/*
458 * Macros for interrupt entry, call to handler, and exit. 95 * Macros for interrupt entry, call to handler, and exit.
459 * 96 *
460 * XXX 97 * XXX
461 * The interrupt frame is set up to look like a trap frame. This may be a 98 * The interrupt frame is set up to look like a trap frame. This may be a
462 * waste. The only handler which needs a frame is the clock handler, and it 99 * waste. The only handler which needs a frame is the clock handler, and it
463 * only needs a few bits. Xdoreti() needs a trap frame for handling ASTs, but 100 * only needs a few bits. Xdoreti() needs a trap frame for handling ASTs, but
464 * it could easily convert the frame on demand. 101 * it could easily convert the frame on demand.
465 * 102 *
466 * The direct costs of setting up a trap frame are two pushq's (error code and 103 * The direct costs of setting up a trap frame are two pushq's (error code and
467 * trap number), an addl to get rid of these, and pushing and popping the 104 * trap number), an addl to get rid of these, and pushing and popping the
468 * callee-saved registers %esi, %edi, %ebx, and %ebp twice. 105 * callee-saved registers %esi, %edi, %ebx, and %ebp twice.
469 * 106 *

File Added: src/sys/arch/i386/i386/i386_trap.S
/*	$NetBSD: i386_trap.S,v 1.1 2013/06/25 00:27:22 uebayasi Exp $	*/

/*
 * Copyright 2002 (c) Wasabi Systems, Inc.
 * All rights reserved.
 *
 * Written by Frank van der Linden for Wasabi Systems, Inc.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. All advertising materials mentioning features or use of this software
 *    must display the following acknowledgement:
 *      This product includes software developed for the NetBSD Project by
 *      Wasabi Systems, Inc.
 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
 *    or promote products derived from this software without specific prior
 *    written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

/*-
 * Copyright (c) 1998, 2007, 2009 The NetBSD Foundation, Inc.
 * All rights reserved.
 *
 * This code is derived from software contributed to The NetBSD Foundation
 * by Charles M. Hannum, and by Andrew Doran.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#if 0
#include <machine/asm.h>
__KERNEL_RCSID(0, "$NetBSD: i386_trap.S,v 1.1 2013/06/25 00:27:22 uebayasi Exp $");
#endif

/*
 * Trap and fault vector routines
 *
 * On exit from the kernel to user mode, we always need to check for ASTs.  In
 * addition, we need to do this atomically; otherwise an interrupt may occur
 * which causes an AST, but it won't get processed until the next kernel entry
 * (possibly the next clock tick).  Thus, we disable interrupt before checking,
 * and only enable them again on the final `iret' or before calling the AST
 * handler.
 */ 

#define TRAP(a)			pushl $(a) ; jmp _C_LABEL(alltraps)
#define ZTRAP(a)		pushl $0 ; TRAP(a)

#ifdef IPKDB
#define BPTTRAP(a)	pushl $0; pushl $(a); jmp _C_LABEL(bpttraps)
#else
#define BPTTRAP(a)	ZTRAP(a)
#endif


	.text
IDTVEC(trap00)
	ZTRAP(T_DIVIDE)
IDTVEC_END(trap00)
IDTVEC(trap01)
	BPTTRAP(T_TRCTRAP)
IDTVEC_END(trap01)
IDTVEC(trap02)
	pushl $0
	pushl $(T_NMI)
	INTRENTRY
	jmp _C_LABEL(calltrap)
IDTVEC_END(trap02)
IDTVEC(trap03)
	BPTTRAP(T_BPTFLT)
IDTVEC_END(trap03)
IDTVEC(trap04)
	ZTRAP(T_OFLOW)
IDTVEC_END(trap04)
IDTVEC(trap05)
	ZTRAP(T_BOUND)
IDTVEC_END(trap05)
/*
 * Privileged instruction fault.
 */
#ifdef KDTRACE_HOOKS
	SUPERALIGN_TEXT
IDTVEC(trap06)
	/* Check if there is no DTrace hook registered. */
	cmpl	$0,dtrace_invop_jump_addr
	je	norm_ill

	/* Check if this is a user fault. */
	/* XXX this was 0x0020 in FreeBSD */
	cmpl	$GSEL(GCODE_SEL, SEL_KPL), 4(%esp)   /* Check code segment. */

	/* If so, just handle it as a normal trap. */
	jne	norm_ill
              
	/*
	 * This is a kernel instruction fault that might have been caused
	 * by a DTrace provider.
	 */
	pushal				/* Push all registers onto the stack. */

	/*
	 * Set our jump address for the jump back in the event that
	 * the exception wasn't caused by DTrace at all.
	 */
	movl	$norm_ill, dtrace_invop_calltrap_addr

	/* Jump to the code hooked in by DTrace. */
	jmpl	*dtrace_invop_jump_addr

	/*
	 * Process the instruction fault in the normal way.
	 */
norm_ill:
	ZTRAP(T_PRIVINFLT)
IDTVEC_END(trap06)
#else
IDTVEC(trap06)
	ZTRAP(T_PRIVINFLT)
IDTVEC_END(trap06)
#endif
IDTVEC(trap07)
#if NNPX > 0
	pushl	$0			# dummy error code
	pushl	$T_DNA
	INTRENTRY
#ifdef DIAGNOSTIC
	movl	CPUVAR(ILEVEL),%ebx
#endif
	pushl	CPUVAR(SELF)
	call	*_C_LABEL(npxdna_func)
	addl	$4,%esp
	testl	%eax,%eax
	jz	calltrap
	jmp	_C_LABEL(trapreturn)
#else
#ifndef XEN
	sti
#endif
	ZTRAP(T_DNA)
#endif
IDTVEC_END(trap07)
IDTVEC(trap08)
	TRAP(T_DOUBLEFLT)
IDTVEC_END(trap08)
IDTVEC(trap09)
	ZTRAP(T_FPOPFLT)
IDTVEC_END(trap09)
IDTVEC(trap0a)
	TRAP(T_TSSFLT)
IDTVEC_END(trap0a)
IDTVEC(trap0b)
	TRAP(T_SEGNPFLT)
IDTVEC_END(trap0b)
IDTVEC(trap0c)
	TRAP(T_STKFLT)
IDTVEC_END(trap0c)
IDTVEC(trap0d)
	TRAP(T_PROTFLT)
IDTVEC_END(trap0d)
IDTVEC(trap0e)
#ifndef XEN
	pushl	$T_PAGEFLT
	INTRENTRY
	STI(%eax)
	testb	$PGEX_U,TF_ERR(%esp)
	jnz	calltrap
	movl	%cr2,%eax
	subl	_C_LABEL(pentium_idt),%eax
	cmpl	$(6*8),%eax
	jne	calltrap
	movb	$T_PRIVINFLT,TF_TRAPNO(%esp)
	jmp	calltrap
#else /* !XEN */
	TRAP(T_PAGEFLT)
#endif /* !XEN */
IDTVEC_END(trap0e)

IDTVEC(intrspurious)
IDTVEC(trap0f)
	/*
	 * The Pentium Pro local APIC may erroneously call this vector for a
	 * default IR7.  Just ignore it.
	 *
	 * (The local APIC does this when CPL is raised while it's on the 
	 * way to delivering an interrupt.. presumably enough has been set 
	 * up that it's inconvenient to abort delivery completely..)
	 */
	pushl	$0			# dummy error code
	pushl	$T_ASTFLT
	INTRENTRY
	STI(%eax)
#ifdef DIAGNOSTIC
	movl	CPUVAR(ILEVEL),%ebx
#endif
	jmp	_C_LABEL(trapreturn)
IDTVEC_END(trap0f)
IDTVEC_END(intrspurious)

IDTVEC(trap10)
#if NNPX > 0
	/*
	 * Handle like an interrupt so that we can call npxintr to clear the
	 * error.  It would be better to handle npx interrupts as traps but
	 * this is difficult for nested interrupts.
	 */
	pushl	$0			# dummy error code
	pushl	$T_ASTFLT
	INTRENTRY
	movl	CPUVAR(ILEVEL),%ebx
	pushl	%ebx
	pushl	%esp
	pushl	$0			# dummy arg
	addl	$1,CPUVAR(NTRAP)	# statistical info
	adcl	$0,CPUVAR(NTRAP)+4
	call	_C_LABEL(npxintr)
	addl	$12,%esp
	jmp	_C_LABEL(trapreturn)
#else
	sti
	ZTRAP(T_ARITHTRAP)
#endif
IDTVEC_END(trap10)
IDTVEC(trap11)
	TRAP(T_ALIGNFLT)
IDTVEC_END(trap11)
#ifdef XEN
IDTVEC(trap12)
IDTVEC(trap13)
#else
IDTVEC(trap12)
	ZTRAP(T_MCA)
IDTVEC(trap13)
	ZTRAP(T_XMM)
#endif
IDTVEC(trap14)
IDTVEC(trap15)
IDTVEC(trap16)
IDTVEC(trap17)
IDTVEC(trap18)
IDTVEC(trap19)
IDTVEC(trap1a)
IDTVEC(trap1b)
IDTVEC(trap1c)
IDTVEC(trap1d)
IDTVEC(trap1e)
IDTVEC(trap1f)
	/* 20 - 31 reserved for future exp */
	ZTRAP(T_RESERVED)
IDTVEC_END(trap1f)
IDTVEC_END(trap1e)
IDTVEC_END(trap1d)
IDTVEC_END(trap1c)
IDTVEC_END(trap1b)
IDTVEC_END(trap1a)
IDTVEC_END(trap19)
IDTVEC_END(trap18)
IDTVEC_END(trap17)
IDTVEC_END(trap16)
IDTVEC_END(trap15)
IDTVEC_END(trap14)
#ifndef XEN
IDTVEC_END(trap13)
IDTVEC_END(trap12)
#else
IDTVEC_END(trap13)
IDTVEC_END(trap12)
#endif
IDTVEC_END(trap11)

IDTVEC(exceptions)
	.long	_C_LABEL(Xtrap00), _C_LABEL(Xtrap01)
	.long	_C_LABEL(Xtrap02), _C_LABEL(Xtrap03)
	.long	_C_LABEL(Xtrap04), _C_LABEL(Xtrap05)
	.long	_C_LABEL(Xtrap06), _C_LABEL(Xtrap07)
	.long	_C_LABEL(Xtrap08), _C_LABEL(Xtrap09)
	.long	_C_LABEL(Xtrap0a), _C_LABEL(Xtrap0b)
	.long	_C_LABEL(Xtrap0c), _C_LABEL(Xtrap0d)
	.long	_C_LABEL(Xtrap0e), _C_LABEL(Xtrap0f)
	.long	_C_LABEL(Xtrap10), _C_LABEL(Xtrap11)
	.long	_C_LABEL(Xtrap12), _C_LABEL(Xtrap13)
	.long	_C_LABEL(Xtrap14), _C_LABEL(Xtrap15)
	.long	_C_LABEL(Xtrap16), _C_LABEL(Xtrap17)
	.long	_C_LABEL(Xtrap18), _C_LABEL(Xtrap19)
	.long	_C_LABEL(Xtrap1a), _C_LABEL(Xtrap1b)
	.long	_C_LABEL(Xtrap1c), _C_LABEL(Xtrap1d)
	.long	_C_LABEL(Xtrap1e), _C_LABEL(Xtrap1f)
IDTVEC_END(exceptions)

 
IDTVEC(tss_trap08)
1:
	str	%ax
	GET_TSS
	movzwl	(%eax),%eax
	GET_TSS
	pushl	$T_DOUBLEFLT
	pushl	%eax
	call	_C_LABEL(trap_tss)
	addl	$12,%esp
	iret
	jmp	1b
IDTVEC_END(tss_trap08)

/*
 * trap() calls here when it detects a fault in INTRFASTEXIT (loading the
 * segment registers or during the iret itself).
 * The address of the (possibly reconstructed) user trap frame is
 * passed as an argument.
 * Typically the code will have raised a SIGSEGV which will be actioned
 * by the code below.
 */
	.type	_C_LABEL(trap_return_fault_return), @function
LABEL(trap_return_fault_return)
	mov	4(%esp),%esp	/* frame for user return */
	jmp	_C_LABEL(trapreturn)
END(trap_return_fault_return)

/* LINTSTUB: Ignore */
NENTRY(alltraps)
	INTRENTRY
	STI(%eax)
calltrap:
#ifdef DIAGNOSTIC
	movl	CPUVAR(ILEVEL),%ebx
#endif /* DIAGNOSTIC */
	addl	$1,CPUVAR(NTRAP)	# statistical info
	adcl	$0,CPUVAR(NTRAP)+4
	pushl	%esp
	call	_C_LABEL(trap)
	addl	$4,%esp
_C_LABEL(trapreturn):	.globl	trapreturn
	testb	$CHK_UPL,TF_CS(%esp)
	jnz	.Lalltraps_checkast
#ifdef VM86
	testl	$PSL_VM,TF_EFLAGS(%esp)
	jz	6f
#else
	jmp	6f
#endif
.Lalltraps_checkast:
	/* Check for ASTs on exit to user mode. */
	CLI(%eax)
	CHECK_ASTPENDING(%eax)
	jz	3f
5:	CLEAR_ASTPENDING(%eax)
	STI(%eax)
	movl	$T_ASTFLT,TF_TRAPNO(%esp)
	addl	$1,CPUVAR(NTRAP)	# statistical info
	adcl	$0,CPUVAR(NTRAP)+4
	pushl	%esp
	call	_C_LABEL(trap)
	addl	$4,%esp
	jmp	.Lalltraps_checkast	/* re-check ASTs */
3:	CHECK_DEFERRED_SWITCH
	jnz	9f
#ifdef XEN
	STIC(%eax)
	jz      6f
	call    _C_LABEL(stipending)
	testl   %eax,%eax
	jz      6f
	/* process pending interrupts */
	CLI(%eax)
	movl    CPUVAR(ILEVEL), %ebx
	movl    $.Lalltraps_resume, %esi # address to resume loop at
.Lalltraps_resume:
	movl    %ebx,%eax               # get cpl
	movl    CPUVAR(IUNMASK)(,%eax,4),%eax
	andl    CPUVAR(IPENDING),%eax   # any non-masked bits left?
	jz	7f
	bsrl    %eax,%eax
	btrl    %eax,CPUVAR(IPENDING)
	movl    CPUVAR(ISOURCES)(,%eax,4),%eax
	jmp     *IS_RESUME(%eax)
7:      movl    %ebx, CPUVAR(ILEVEL) #restore cpl
	jmp     _C_LABEL(trapreturn)
#endif /* XEN */
#ifndef DIAGNOSTIC
6:	INTRFASTEXIT
#else
6:	cmpl	CPUVAR(ILEVEL),%ebx
	jne	3f
	INTRFASTEXIT
3:	STI(%eax)
	pushl	$4f
	call	_C_LABEL(panic)
	addl	$4,%esp
	pushl	%ebx
	call	_C_LABEL(spllower)
	addl	$4,%esp
	jmp	.Lalltraps_checkast	/* re-check ASTs */
4:	.asciz	"SPL NOT LOWERED ON TRAP EXIT\n"
#endif /* DIAGNOSTIC */
9:	STI(%eax)
	call	_C_LABEL(pmap_load)
	jmp	.Lalltraps_checkast	/* re-check ASTs */
END(alltraps)

File Added: src/sys/arch/i386/i386/Attic/i386_trap_ipkdb.S
/*	$NetBSD: i386_trap_ipkdb.S,v 1.1 2013/06/25 00:27:22 uebayasi Exp $	*/

/*
 * Copyright 2002 (c) Wasabi Systems, Inc.
 * All rights reserved.
 *
 * Written by Frank van der Linden for Wasabi Systems, Inc.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. All advertising materials mentioning features or use of this software
 *    must display the following acknowledgement:
 *      This product includes software developed for the NetBSD Project by
 *      Wasabi Systems, Inc.
 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
 *    or promote products derived from this software without specific prior
 *    written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

/*-
 * Copyright (c) 1998, 2007, 2009 The NetBSD Foundation, Inc.
 * All rights reserved.
 *
 * This code is derived from software contributed to The NetBSD Foundation
 * by Charles M. Hannum, and by Andrew Doran.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#if 0
#include <machine/asm.h>
__KERNEL_RCSID(0, "$NetBSD: i386_trap_ipkdb.S,v 1.1 2013/06/25 00:27:22 uebayasi Exp $");
#endif

#ifdef IPKDB
/* LINTSTUB: Ignore */
NENTRY(bpttraps)
	INTRENTRY
	call	_C_LABEL(ipkdb_trap_glue)
	testl	%eax,%eax
	jz	calltrap
	INTRFASTEXIT

ipkdbsetup:
	popl	%ecx

	/* Disable write protection: */
	movl	%cr0,%eax
	pushl	%eax
	andl	$~CR0_WP,%eax
	movl	%eax,%cr0

	/* Substitute Protection & Page Fault handlers: */
	movl	_C_LABEL(idt),%edx
	pushl	13*8(%edx)
	pushl	13*8+4(%edx)
	pushl	14*8(%edx)
	pushl	14*8+4(%edx)
	movl	$fault,%eax
	movw	%ax,13*8(%edx)
	movw	%ax,14*8(%edx)
	shrl	$16,%eax
	movw	%ax,13*8+6(%edx)
	movw	%ax,14*8+6(%edx)

	pushl	%ecx
	ret

ipkdbrestore:
	popl	%ecx

	/* Restore Protection & Page Fault handlers: */
	movl	_C_LABEL(idt),%edx
	popl	14*8+4(%edx)
	popl	14*8(%edx)
	popl	13*8+4(%edx)
	popl	13*8(%edx)

	/* Restore write protection: */
	popl	%edx
	movl	%edx,%cr0

	pushl	%ecx
	ret
END(bpttraps)
#endif /* IPKDB */

#ifdef IPKDB
/* LINTSTUB: Func: int ipkdbfbyte(u_char *c) */
NENTRY(ipkdbfbyte)
	pushl	%ebp
	movl	%esp,%ebp
	call	ipkdbsetup
	movl	8(%ebp),%edx
	movzbl	(%edx),%eax
faultexit:
	call	ipkdbrestore
	popl	%ebp
	ret
END(ipkdbfbyte)

/* LINTSTUB: Func: int ipkdbsbyte(u_char *c, int i) */
NENTRY(ipkdbsbyte)
	pushl	%ebp
	movl	%esp,%ebp
	call	ipkdbsetup
	movl	8(%ebp),%edx
	movl	12(%ebp),%eax
	movb	%al,(%edx)
	call	ipkdbrestore
	popl	%ebp
	ret

fault:
	popl	%eax		/* error code */
	movl	$faultexit,%eax
	movl	%eax,(%esp)
	movl	$-1,%eax
	iret
END(ipkdbsbyte)
#endif	/* IPKDB */

#ifdef XEN


cvs diff -r1.61 -r1.62 src/sys/arch/i386/i386/vector.S (expand / switch to unified diff)

--- src/sys/arch/i386/i386/vector.S 2013/06/22 08:48:48 1.61
+++ src/sys/arch/i386/i386/vector.S 2013/06/25 00:27:22 1.62
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: vector.S,v 1.61 2013/06/22 08:48:48 uebayasi Exp $ */ 1/* $NetBSD: vector.S,v 1.62 2013/06/25 00:27:22 uebayasi Exp $ */
2 2
3/* 3/*
4 * Copyright 2002 (c) Wasabi Systems, Inc. 4 * Copyright 2002 (c) Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Frank van der Linden for Wasabi Systems, Inc. 7 * Written by Frank van der Linden for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -55,27 +55,27 @@ @@ -55,27 +55,27 @@
55 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 55 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 57 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64 * POSSIBILITY OF SUCH DAMAGE. 64 * POSSIBILITY OF SUCH DAMAGE.
65 */ 65 */
66 66
67#include <machine/asm.h> 67#include <machine/asm.h>
68__KERNEL_RCSID(0, "$NetBSD: vector.S,v 1.61 2013/06/22 08:48:48 uebayasi Exp $"); 68__KERNEL_RCSID(0, "$NetBSD: vector.S,v 1.62 2013/06/25 00:27:22 uebayasi Exp $");
69 69
70#include "opt_ddb.h" 70#include "opt_ddb.h"
71#include "opt_multiprocessor.h" 71#include "opt_multiprocessor.h"
72#include "opt_ipkdb.h" 72#include "opt_ipkdb.h"
73#include "opt_vm86.h" 73#include "opt_vm86.h"
74#include "opt_xen.h" 74#include "opt_xen.h"
75#include "opt_dtrace.h" 75#include "opt_dtrace.h"
76 76
77#include <machine/i8259.h> 77#include <machine/i8259.h>
78#include <machine/i82093reg.h> 78#include <machine/i82093reg.h>
79#include <machine/i82489reg.h> 79#include <machine/i82489reg.h>
80#include <machine/frameasm.h> 80#include <machine/frameasm.h>
81#include <machine/segments.h> 81#include <machine/segments.h>
@@ -723,477 +723,27 @@ LABEL(xenev_stubs) @@ -723,477 +723,27 @@ LABEL(xenev_stubs)
723 .long _C_LABEL(Xrecurse_xenev23), _C_LABEL(Xresume_xenev23) 723 .long _C_LABEL(Xrecurse_xenev23), _C_LABEL(Xresume_xenev23)
724 .long _C_LABEL(Xrecurse_xenev24), _C_LABEL(Xresume_xenev24) 724 .long _C_LABEL(Xrecurse_xenev24), _C_LABEL(Xresume_xenev24)
725 .long _C_LABEL(Xrecurse_xenev25), _C_LABEL(Xresume_xenev25) 725 .long _C_LABEL(Xrecurse_xenev25), _C_LABEL(Xresume_xenev25)
726 .long _C_LABEL(Xrecurse_xenev26), _C_LABEL(Xresume_xenev26) 726 .long _C_LABEL(Xrecurse_xenev26), _C_LABEL(Xresume_xenev26)
727 .long _C_LABEL(Xrecurse_xenev27), _C_LABEL(Xresume_xenev27) 727 .long _C_LABEL(Xrecurse_xenev27), _C_LABEL(Xresume_xenev27)
728 .long _C_LABEL(Xrecurse_xenev28), _C_LABEL(Xresume_xenev28) 728 .long _C_LABEL(Xrecurse_xenev28), _C_LABEL(Xresume_xenev28)
729 .long _C_LABEL(Xrecurse_xenev29), _C_LABEL(Xresume_xenev29) 729 .long _C_LABEL(Xrecurse_xenev29), _C_LABEL(Xresume_xenev29)
730 .long _C_LABEL(Xrecurse_xenev30), _C_LABEL(Xresume_xenev30) 730 .long _C_LABEL(Xrecurse_xenev30), _C_LABEL(Xresume_xenev30)
731 .long _C_LABEL(Xrecurse_xenev31), _C_LABEL(Xresume_xenev31) 731 .long _C_LABEL(Xrecurse_xenev31), _C_LABEL(Xresume_xenev31)
732END(xenev_stubs) 732END(xenev_stubs)
733 733
734#endif /* XEN */ 734#endif /* XEN */
735 735
736/* 736#include "i386_trap.S"
737 * Trap and fault vector routines 
738 * 
739 * On exit from the kernel to user mode, we always need to check for ASTs. In 
740 * addition, we need to do this atomically; otherwise an interrupt may occur 
741 * which causes an AST, but it won't get processed until the next kernel entry 
742 * (possibly the next clock tick). Thus, we disable interrupt before checking, 
743 * and only enable them again on the final `iret' or before calling the AST 
744 * handler. 
745 */  
746 
747#define TRAP(a) pushl $(a) ; jmp _C_LABEL(alltraps) 
748#define ZTRAP(a) pushl $0 ; TRAP(a) 
749 
750#ifdef IPKDB 
751#define BPTTRAP(a) pushl $0; pushl $(a); jmp _C_LABEL(bpttraps) 
752#else 
753#define BPTTRAP(a) ZTRAP(a) 
754#endif 
755 
756 
757 .text 
758IDTVEC(trap00) 
759 ZTRAP(T_DIVIDE) 
760IDTVEC_END(trap00) 
761IDTVEC(trap01) 
762 BPTTRAP(T_TRCTRAP) 
763IDTVEC_END(trap01) 
764IDTVEC(trap02) 
765 pushl $0 
766 pushl $(T_NMI) 
767 INTRENTRY 
768 jmp _C_LABEL(calltrap) 
769IDTVEC_END(trap02) 
770IDTVEC(trap03) 
771 BPTTRAP(T_BPTFLT) 
772IDTVEC_END(trap03) 
773IDTVEC(trap04) 
774 ZTRAP(T_OFLOW) 
775IDTVEC_END(trap04) 
776IDTVEC(trap05) 
777 ZTRAP(T_BOUND) 
778IDTVEC_END(trap05) 
779/* 
780 * Privileged instruction fault. 
781 */ 
782#ifdef KDTRACE_HOOKS 
783 SUPERALIGN_TEXT 
784IDTVEC(trap06) 
785 /* Check if there is no DTrace hook registered. */ 
786 cmpl $0,dtrace_invop_jump_addr 
787 je norm_ill 
788 
789 /* Check if this is a user fault. */ 
790 /* XXX this was 0x0020 in FreeBSD */ 
791 cmpl $GSEL(GCODE_SEL, SEL_KPL), 4(%esp) /* Check code segment. */ 
792 
793 /* If so, just handle it as a normal trap. */ 
794 jne norm_ill 
795  
796 /* 
797 * This is a kernel instruction fault that might have been caused 
798 * by a DTrace provider. 
799 */ 
800 pushal /* Push all registers onto the stack. */ 
801 
802 /* 
803 * Set our jump address for the jump back in the event that 
804 * the exception wasn't caused by DTrace at all. 
805 */ 
806 movl $norm_ill, dtrace_invop_calltrap_addr 
807 
808 /* Jump to the code hooked in by DTrace. */ 
809 jmpl *dtrace_invop_jump_addr 
810 
811 /* 
812 * Process the instruction fault in the normal way. 
813 */ 
814norm_ill: 
815 ZTRAP(T_PRIVINFLT) 
816IDTVEC_END(trap06) 
817#else 
818IDTVEC(trap06) 
819 ZTRAP(T_PRIVINFLT) 
820IDTVEC_END(trap06) 
821#endif 
822IDTVEC(trap07) 
823#if NNPX > 0 
824 pushl $0 # dummy error code 
825 pushl $T_DNA 
826 INTRENTRY 
827#ifdef DIAGNOSTIC 
828 movl CPUVAR(ILEVEL),%ebx 
829#endif 
830 pushl CPUVAR(SELF) 
831 call *_C_LABEL(npxdna_func) 
832 addl $4,%esp 
833 testl %eax,%eax 
834 jz calltrap 
835 jmp _C_LABEL(trapreturn) 
836#else 
837#ifndef XEN 
838 sti 
839#endif 
840 ZTRAP(T_DNA) 
841#endif 
842IDTVEC_END(trap07) 
843IDTVEC(trap08) 
844 TRAP(T_DOUBLEFLT) 
845IDTVEC_END(trap08) 
846IDTVEC(trap09) 
847 ZTRAP(T_FPOPFLT) 
848IDTVEC_END(trap09) 
849IDTVEC(trap0a) 
850 TRAP(T_TSSFLT) 
851IDTVEC_END(trap0a) 
852IDTVEC(trap0b) 
853 TRAP(T_SEGNPFLT) 
854IDTVEC_END(trap0b) 
855IDTVEC(trap0c) 
856 TRAP(T_STKFLT) 
857IDTVEC_END(trap0c) 
858IDTVEC(trap0d) 
859 TRAP(T_PROTFLT) 
860IDTVEC_END(trap0d) 
861IDTVEC(trap0e) 
862#ifndef XEN 
863 pushl $T_PAGEFLT 
864 INTRENTRY 
865 STI(%eax) 
866 testb $PGEX_U,TF_ERR(%esp) 
867 jnz calltrap 
868 movl %cr2,%eax 
869 subl _C_LABEL(pentium_idt),%eax 
870 cmpl $(6*8),%eax 
871 jne calltrap 
872 movb $T_PRIVINFLT,TF_TRAPNO(%esp) 
873 jmp calltrap 
874#else /* !XEN */ 
875 TRAP(T_PAGEFLT) 
876#endif /* !XEN */ 
877IDTVEC_END(trap0e) 
878 
879IDTVEC(intrspurious) 
880IDTVEC(trap0f) 
881 /* 
882 * The Pentium Pro local APIC may erroneously call this vector for a 
883 * default IR7. Just ignore it. 
884 * 
885 * (The local APIC does this when CPL is raised while it's on the  
886 * way to delivering an interrupt.. presumably enough has been set  
887 * up that it's inconvenient to abort delivery completely..) 
888 */ 
889 pushl $0 # dummy error code 
890 pushl $T_ASTFLT 
891 INTRENTRY 
892 STI(%eax) 
893#ifdef DIAGNOSTIC 
894 movl CPUVAR(ILEVEL),%ebx 
895#endif 
896 jmp _C_LABEL(trapreturn) 
897IDTVEC_END(trap0f) 
898IDTVEC_END(intrspurious) 
899 
900IDTVEC(trap10) 
901#if NNPX > 0 
902 /* 
903 * Handle like an interrupt so that we can call npxintr to clear the 
904 * error. It would be better to handle npx interrupts as traps but 
905 * this is difficult for nested interrupts. 
906 */ 
907 pushl $0 # dummy error code 
908 pushl $T_ASTFLT 
909 INTRENTRY 
910 movl CPUVAR(ILEVEL),%ebx 
911 pushl %ebx 
912 pushl %esp 
913 pushl $0 # dummy arg 
914 addl $1,CPUVAR(NTRAP) # statistical info 
915 adcl $0,CPUVAR(NTRAP)+4 
916 call _C_LABEL(npxintr) 
917 addl $12,%esp 
918 jmp _C_LABEL(trapreturn) 
919#else 
920 sti 
921 ZTRAP(T_ARITHTRAP) 
922#endif 
923IDTVEC_END(trap10) 
924IDTVEC(trap11) 
925 TRAP(T_ALIGNFLT) 
926IDTVEC_END(trap11) 
927#ifdef XEN 
928IDTVEC(trap12) 
929IDTVEC(trap13) 
930#else 
931IDTVEC(trap12) 
932 ZTRAP(T_MCA) 
933IDTVEC(trap13) 
934 ZTRAP(T_XMM) 
935#endif 
936IDTVEC(trap14) 
937IDTVEC(trap15) 
938IDTVEC(trap16) 
939IDTVEC(trap17) 
940IDTVEC(trap18) 
941IDTVEC(trap19) 
942IDTVEC(trap1a) 
943IDTVEC(trap1b) 
944IDTVEC(trap1c) 
945IDTVEC(trap1d) 
946IDTVEC(trap1e) 
947IDTVEC(trap1f) 
948 /* 20 - 31 reserved for future exp */ 
949 ZTRAP(T_RESERVED) 
950IDTVEC_END(trap1f) 
951IDTVEC_END(trap1e) 
952IDTVEC_END(trap1d) 
953IDTVEC_END(trap1c) 
954IDTVEC_END(trap1b) 
955IDTVEC_END(trap1a) 
956IDTVEC_END(trap19) 
957IDTVEC_END(trap18) 
958IDTVEC_END(trap17) 
959IDTVEC_END(trap16) 
960IDTVEC_END(trap15) 
961IDTVEC_END(trap14) 
962#ifndef XEN 
963IDTVEC_END(trap13) 
964IDTVEC_END(trap12) 
965#else 
966IDTVEC_END(trap13) 
967IDTVEC_END(trap12) 
968#endif 
969IDTVEC_END(trap11) 
970 
971IDTVEC(exceptions) 
972 .long _C_LABEL(Xtrap00), _C_LABEL(Xtrap01) 
973 .long _C_LABEL(Xtrap02), _C_LABEL(Xtrap03) 
974 .long _C_LABEL(Xtrap04), _C_LABEL(Xtrap05) 
975 .long _C_LABEL(Xtrap06), _C_LABEL(Xtrap07) 
976 .long _C_LABEL(Xtrap08), _C_LABEL(Xtrap09) 
977 .long _C_LABEL(Xtrap0a), _C_LABEL(Xtrap0b) 
978 .long _C_LABEL(Xtrap0c), _C_LABEL(Xtrap0d) 
979 .long _C_LABEL(Xtrap0e), _C_LABEL(Xtrap0f) 
980 .long _C_LABEL(Xtrap10), _C_LABEL(Xtrap11) 
981 .long _C_LABEL(Xtrap12), _C_LABEL(Xtrap13) 
982 .long _C_LABEL(Xtrap14), _C_LABEL(Xtrap15) 
983 .long _C_LABEL(Xtrap16), _C_LABEL(Xtrap17) 
984 .long _C_LABEL(Xtrap18), _C_LABEL(Xtrap19) 
985 .long _C_LABEL(Xtrap1a), _C_LABEL(Xtrap1b) 
986 .long _C_LABEL(Xtrap1c), _C_LABEL(Xtrap1d) 
987 .long _C_LABEL(Xtrap1e), _C_LABEL(Xtrap1f) 
988IDTVEC_END(exceptions) 
989 
990  
991IDTVEC(tss_trap08) 
9921: 
993 str %ax 
994 GET_TSS 
995 movzwl (%eax),%eax 
996 GET_TSS 
997 pushl $T_DOUBLEFLT 
998 pushl %eax 
999 call _C_LABEL(trap_tss) 
1000 addl $12,%esp 
1001 iret 
1002 jmp 1b 
1003IDTVEC_END(tss_trap08) 
1004 
1005/* 
1006 * trap() calls here when it detects a fault in INTRFASTEXIT (loading the 
1007 * segment registers or during the iret itself). 
1008 * The address of the (possibly reconstructed) user trap frame is 
1009 * passed as an argument. 
1010 * Typically the code will have raised a SIGSEGV which will be actioned 
1011 * by the code below. 
1012 */ 
1013 .type _C_LABEL(trap_return_fault_return), @function 
1014LABEL(trap_return_fault_return) 
1015 mov 4(%esp),%esp /* frame for user return */ 
1016 jmp _C_LABEL(trapreturn) 
1017END(trap_return_fault_return) 
1018 
1019/* LINTSTUB: Ignore */ 
1020NENTRY(alltraps) 
1021 INTRENTRY 
1022 STI(%eax) 
1023calltrap: 
1024#ifdef DIAGNOSTIC 
1025 movl CPUVAR(ILEVEL),%ebx 
1026#endif /* DIAGNOSTIC */ 
1027 addl $1,CPUVAR(NTRAP) # statistical info 
1028 adcl $0,CPUVAR(NTRAP)+4 
1029 pushl %esp 
1030 call _C_LABEL(trap) 
1031 addl $4,%esp 
1032_C_LABEL(trapreturn): .globl trapreturn 
1033 testb $CHK_UPL,TF_CS(%esp) 
1034 jnz .Lalltraps_checkast 
1035#ifdef VM86 
1036 testl $PSL_VM,TF_EFLAGS(%esp) 
1037 jz 6f 
1038#else 
1039 jmp 6f 
1040#endif 
1041.Lalltraps_checkast: 
1042 /* Check for ASTs on exit to user mode. */ 
1043 CLI(%eax) 
1044 CHECK_ASTPENDING(%eax) 
1045 jz 3f 
10465: CLEAR_ASTPENDING(%eax) 
1047 STI(%eax) 
1048 movl $T_ASTFLT,TF_TRAPNO(%esp) 
1049 addl $1,CPUVAR(NTRAP) # statistical info 
1050 adcl $0,CPUVAR(NTRAP)+4 
1051 pushl %esp 
1052 call _C_LABEL(trap) 
1053 addl $4,%esp 
1054 jmp .Lalltraps_checkast /* re-check ASTs */ 
10553: CHECK_DEFERRED_SWITCH 
1056 jnz 9f 
1057#ifdef XEN 
1058 STIC(%eax) 
1059 jz 6f 
1060 call _C_LABEL(stipending) 
1061 testl %eax,%eax 
1062 jz 6f 
1063 /* process pending interrupts */ 
1064 CLI(%eax) 
1065 movl CPUVAR(ILEVEL), %ebx 
1066 movl $.Lalltraps_resume, %esi # address to resume loop at 
1067.Lalltraps_resume: 
1068 movl %ebx,%eax # get cpl 
1069 movl CPUVAR(IUNMASK)(,%eax,4),%eax 
1070 andl CPUVAR(IPENDING),%eax # any non-masked bits left? 
1071 jz 7f 
1072 bsrl %eax,%eax 
1073 btrl %eax,CPUVAR(IPENDING) 
1074 movl CPUVAR(ISOURCES)(,%eax,4),%eax 
1075 jmp *IS_RESUME(%eax) 
10767: movl %ebx, CPUVAR(ILEVEL) #restore cpl 
1077 jmp _C_LABEL(trapreturn) 
1078#endif /* XEN */ 
1079#ifndef DIAGNOSTIC 
10806: INTRFASTEXIT 
1081#else 
10826: cmpl CPUVAR(ILEVEL),%ebx 
1083 jne 3f 
1084 INTRFASTEXIT 
10853: STI(%eax) 
1086 pushl $4f 
1087 call _C_LABEL(panic) 
1088 addl $4,%esp 
1089 pushl %ebx 
1090 call _C_LABEL(spllower) 
1091 addl $4,%esp 
1092 jmp .Lalltraps_checkast /* re-check ASTs */ 
10934: .asciz "SPL NOT LOWERED ON TRAP EXIT\n" 
1094#endif /* DIAGNOSTIC */ 
10959: STI(%eax) 
1096 call _C_LABEL(pmap_load) 
1097 jmp .Lalltraps_checkast /* re-check ASTs */ 
1098END(alltraps) 
1099 
1100#ifdef IPKDB 
1101/* LINTSTUB: Ignore */ 
1102NENTRY(bpttraps) 
1103 INTRENTRY 
1104 call _C_LABEL(ipkdb_trap_glue) 
1105 testl %eax,%eax 
1106 jz calltrap 
1107 INTRFASTEXIT 
1108 
1109ipkdbsetup: 
1110 popl %ecx 
1111 
1112 /* Disable write protection: */ 
1113 movl %cr0,%eax 
1114 pushl %eax 
1115 andl $~CR0_WP,%eax 
1116 movl %eax,%cr0 
1117 
1118 /* Substitute Protection & Page Fault handlers: */ 
1119 movl _C_LABEL(idt),%edx 
1120 pushl 13*8(%edx) 
1121 pushl 13*8+4(%edx) 
1122 pushl 14*8(%edx) 
1123 pushl 14*8+4(%edx) 
1124 movl $fault,%eax 
1125 movw %ax,13*8(%edx) 
1126 movw %ax,14*8(%edx) 
1127 shrl $16,%eax 
1128 movw %ax,13*8+6(%edx) 
1129 movw %ax,14*8+6(%edx) 
1130 
1131 pushl %ecx 
1132 ret 
1133 
1134ipkdbrestore: 
1135 popl %ecx 
1136 
1137 /* Restore Protection & Page Fault handlers: */ 
1138 movl _C_LABEL(idt),%edx 
1139 popl 14*8+4(%edx) 
1140 popl 14*8(%edx) 
1141 popl 13*8+4(%edx) 
1142 popl 13*8(%edx) 
1143 
1144 /* Restore write protection: */ 
1145 popl %edx 
1146 movl %edx,%cr0 
1147 
1148 pushl %ecx 
1149 ret 
1150END(bpttraps) 
1151#endif /* IPKDB */ 
1152 
1153#ifdef IPKDB 
1154/* LINTSTUB: Func: int ipkdbfbyte(u_char *c) */ 
1155NENTRY(ipkdbfbyte) 
1156 pushl %ebp 
1157 movl %esp,%ebp 
1158 call ipkdbsetup 
1159 movl 8(%ebp),%edx 
1160 movzbl (%edx),%eax 
1161faultexit: 
1162 call ipkdbrestore 
1163 popl %ebp 
1164 ret 
1165END(ipkdbfbyte) 
1166 
1167/* LINTSTUB: Func: int ipkdbsbyte(u_char *c, int i) */ 
1168NENTRY(ipkdbsbyte) 
1169 pushl %ebp 
1170 movl %esp,%ebp 
1171 call ipkdbsetup 
1172 movl 8(%ebp),%edx 
1173 movl 12(%ebp),%eax 
1174 movb %al,(%edx) 
1175 call ipkdbrestore 
1176 popl %ebp 
1177 ret 
1178 
1179fault: 
1180 popl %eax /* error code */ 
1181 movl $faultexit,%eax 
1182 movl %eax,(%esp) 
1183 movl $-1,%eax 
1184 iret 
1185END(ipkdbsbyte) 
1186#endif /* IPKDB */ 
1187 737
1188#ifdef XEN 738#ifdef XEN
1189 739
1190/* 740/*
1191 * A note on the "critical region" in our callback handler. 741 * A note on the "critical region" in our callback handler.
1192 * We want to avoid stacking callback handlers due to events occurring 742 * We want to avoid stacking callback handlers due to events occurring
1193 * during handling of the last event. To do this, we keep events disabled 743 * during handling of the last event. To do this, we keep events disabled
1194 * until weve done all processing. HOWEVER, we must enable events before 744 * until weve done all processing. HOWEVER, we must enable events before
1195 * popping the stack frame (cant be done atomically) and so it would still 745 * popping the stack frame (cant be done atomically) and so it would still
1196 * be possible to get enough handler activations to overflow the stack. 746 * be possible to get enough handler activations to overflow the stack.
1197 * Although unlikely, bugs of that kind are hard to track down, so wed 747 * Although unlikely, bugs of that kind are hard to track down, so wed
1198 * like to avoid the possibility. 748 * like to avoid the possibility.
1199 * So, on entry to the handler we detect whether we interrupted an 749 * So, on entry to the handler we detect whether we interrupted an