Tue Nov 28 08:43:49 2017 UTC ()
style


(maxv)
diff -r1.30 -r1.31 src/sys/arch/amd64/amd64/spl.S

cvs diff -r1.30 -r1.31 src/sys/arch/amd64/amd64/spl.S (expand / switch to context diff)
--- src/sys/arch/amd64/amd64/spl.S 2015/11/22 13:41:24 1.30
+++ src/sys/arch/amd64/amd64/spl.S 2017/11/28 08:43:49 1.31
@@ -1,4 +1,4 @@
-/*	$NetBSD: spl.S,v 1.30 2015/11/22 13:41:24 maxv Exp $	*/
+/*	$NetBSD: spl.S,v 1.31 2017/11/28 08:43:49 maxv Exp $	*/
 
 /*
  * Copyright (c) 2003 Wasabi Systems, Inc.
@@ -90,28 +90,39 @@
  *	%r13		address to return to
  */
 IDTVEC(softintr)
-	pushq	$_C_LABEL(softintr_ret)	/* set up struct switchframe */
+	/* set up struct switchframe */
+	pushq	$_C_LABEL(softintr_ret)
 	pushq	%rbx
 	pushq	%r12
 	pushq	%r13
 	pushq	%r14
 	pushq	%r15
+
 	movl	$IPL_HIGH,CPUVAR(ILEVEL)
 	movq	CPUVAR(CURLWP),%r15
 	movq	IS_LWP(%rax),%rdi	/* switch to handler LWP */
 	movq	L_PCB(%rdi),%rdx
 	movq	L_PCB(%r15),%rcx
 	movq	%rdi,CPUVAR(CURLWP)
+
+	/* save old context */
 	movq	%rsp,PCB_RSP(%rcx)
 	movq	%rbp,PCB_RBP(%rcx)
-	movq	PCB_RSP0(%rdx),%rsp	/* onto new stack */
+
+	/* switch to the new stack */
+	movq	PCB_RSP0(%rdx),%rsp
+
+	/* dispatch */
 	sti
 	movq	%r15,%rdi		/* interrupted LWP */
 	movl	IS_MAXLEVEL(%rax),%esi	/* ipl to run at */
 	call	_C_LABEL(softint_dispatch)/* run handlers */
 	cli
+
+	/* restore old context */
 	movq	L_PCB(%r15),%rcx
 	movq	PCB_RSP(%rcx),%rsp
+
 	xchgq	%r15,CPUVAR(CURLWP)	/* must be globally visible */
 	popq	%r15			/* unwind switchframe */
 	addq	$(5 * 8),%rsp
@@ -128,7 +139,7 @@
  */
 NENTRY(softintr_ret)
 	incl	CPUVAR(MTX_COUNT)	/* re-adjust after mi_switch */
-	movl	$0, L_CTXSWTCH(%rax)	/* %rax from cpu_switchto */
+	movl	$0,L_CTXSWTCH(%rax)	/* %rax from cpu_switchto */
 	cli
 	jmp	*%r13			/* back to Xspllower/Xdoreti */
 END(softintr_ret)
@@ -150,9 +161,9 @@
  * Handles preemption interrupts via Xspllower().
  */
 IDTVEC(preemptrecurse)
-	movl	$IPL_PREEMPT, CPUVAR(ILEVEL)
+	movl	$IPL_PREEMPT,CPUVAR(ILEVEL)
 	sti
-	xorq	%rdi, %rdi
+	xorq	%rdi,%rdi
 	call	_C_LABEL(kpreempt)
 	cli
 	jmp	*%r13			/* back to Xspllower */
@@ -164,16 +175,16 @@
  * Handles preemption interrupts via Xdoreti().
  */
 IDTVEC(preemptresume)
-	movl	$IPL_PREEMPT, CPUVAR(ILEVEL)
+	movl	$IPL_PREEMPT,CPUVAR(ILEVEL)
 	sti
-	testq	$SEL_RPL, TF_CS(%rsp)
+	testq	$SEL_RPL,TF_CS(%rsp)
 	jnz	1f
-	movq	TF_RIP(%rsp), %rdi
-	call	_C_LABEL(kpreempt)		# from kernel
+	movq	TF_RIP(%rsp),%rdi
+	call	_C_LABEL(kpreempt)	/* from kernel */
 	cli
 	jmp	*%r13			/* back to Xdoreti */
 1:
-	call	_C_LABEL(preempt)		# from user
+	call	_C_LABEL(preempt)	/* from user */
 	cli
 	jmp	*%r13			/* back to Xdoreti */
 END(Xpreemptresume)
@@ -197,14 +208,14 @@
  * are disabled via eflags/IE.
  */
 ENTRY(spllower)
-	cmpl	CPUVAR(ILEVEL), %edi
+	cmpl	CPUVAR(ILEVEL),%edi
 	jae	1f
-	movl	CPUVAR(IUNMASK)(,%rdi,4), %edx
+	movl	CPUVAR(IUNMASK)(,%rdi,4),%edx
 	pushf
 	cli
-	testl	CPUVAR(IPENDING), %edx
+	testl	CPUVAR(IPENDING),%edx
 	jnz	2f
-	movl	%edi, CPUVAR(ILEVEL)
+	movl	%edi,CPUVAR(ILEVEL)
 	popf
 1:
 	ret
@@ -224,7 +235,7 @@
  *
  * For cmpxchg8b, edx/ecx are the high words and eax/ebx the low.
  *
- * edx : eax = old level / old ipending 
+ * edx : eax = old level / old ipending
  * ecx : ebx = new level / old ipending
  */
 ENTRY(cx8_spllower)
@@ -260,7 +271,7 @@
 
 /*
  * void Xspllower(int s);
- * 
+ *
  * Process pending interrupts.
  *
  * Important registers:
@@ -283,11 +294,11 @@
 	pushq	%r13
 	pushq	%r12
 	movl	%edi,%ebx
-	leaq	1f(%rip),%r13		# address to resume loop at
-1:	movl	%ebx,%eax		# get cpl
+	leaq	1f(%rip),%r13		/* address to resume loop at */
+1:	movl	%ebx,%eax		/* get cpl */
 	movl	CPUVAR(IUNMASK)(,%rax,4),%eax
 	CLI(si)
-	andl	CPUVAR(IPENDING),%eax		# any non-masked bits left?
+	andl	CPUVAR(IPENDING),%eax	/* any non-masked bits left? */
 	jz	2f
 	bsrl	%eax,%eax
 	btrl	%eax,CPUVAR(IPENDING)
@@ -304,7 +315,7 @@
 
 /*
  * void Xdoreti(void);
- * 
+ *
  * Handle return from interrupt after device handler finishes.
  *
  * Important registers:
@@ -312,7 +323,7 @@
  *   r13 - address to resume loop at
  */
 IDTVEC(doreti)
-	popq	%rbx			# get previous priority
+	popq	%rbx			/* get previous priority */
 	decl	CPUVAR(IDEPTH)
 	leaq	1f(%rip),%r13
 1:	movl	%ebx,%eax
@@ -320,9 +331,9 @@
 	CLI(si)
 	andl	CPUVAR(IPENDING),%eax
 	jz	2f
-	bsrl	%eax,%eax		# slow, but not worth optimizing
+	bsrl	%eax,%eax		/* slow, but not worth optimizing */
 	btrl	%eax,CPUVAR(IPENDING)
-	movq	CPUVAR(ISOURCES)(,%rax, 8),%rax
+	movq	CPUVAR(ISOURCES)(,%rax,8),%rax
 	jmp	*IS_RESUME(%rax)
 2:	/* Check for ASTs on exit to user mode. */
 	movl	%ebx,CPUVAR(ILEVEL)