Sat Jan 11 17:05:17 2014 UTC ()
fix a comment (cpu_switch() -> cpu_switchto()) (Richard Hansen)


(christos)
diff -r1.108 -r1.109 src/sys/arch/i386/i386/locore.S

cvs diff -r1.108 -r1.109 src/sys/arch/i386/i386/locore.S (switch to unified diff)

--- src/sys/arch/i386/i386/locore.S 2013/12/01 01:05:16 1.108
+++ src/sys/arch/i386/i386/locore.S 2014/01/11 17:05:17 1.109
@@ -1,1375 +1,1376 @@ @@ -1,1375 +1,1376 @@
1/* $NetBSD: locore.S,v 1.108 2013/12/01 01:05:16 christos Exp $ */ 1/* $NetBSD: locore.S,v 1.109 2014/01/11 17:05:17 christos Exp $ */
2 2
3/* 3/*
4 * Copyright-o-rama! 4 * Copyright-o-rama!
5 */ 5 */
6 6
7/* 7/*
8 * Copyright (c) 2006 Manuel Bouyer. 8 * Copyright (c) 2006 Manuel Bouyer.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * 29 *
30 */ 30 */
31 31
32/* 32/*
33 * Copyright (c) 2001 Wasabi Systems, Inc. 33 * Copyright (c) 2001 Wasabi Systems, Inc.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Written by Frank van der Linden for Wasabi Systems, Inc. 36 * Written by Frank van der Linden for Wasabi Systems, Inc.
37 * 37 *
38 * Redistribution and use in source and binary forms, with or without 38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions 39 * modification, are permitted provided that the following conditions
40 * are met: 40 * are met:
41 * 1. Redistributions of source code must retain the above copyright 41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer. 42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright 43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the 44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution. 45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software 46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement: 47 * must display the following acknowledgement:
48 * This product includes software developed for the NetBSD Project by 48 * This product includes software developed for the NetBSD Project by
49 * Wasabi Systems, Inc. 49 * Wasabi Systems, Inc.
50 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 50 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
51 * or promote products derived from this software without specific prior 51 * or promote products derived from this software without specific prior
52 * written permission. 52 * written permission.
53 * 53 *
54 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 54 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 57 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64 * POSSIBILITY OF SUCH DAMAGE. 64 * POSSIBILITY OF SUCH DAMAGE.
65 */ 65 */
66 66
67 67
68/*- 68/*-
69 * Copyright (c) 1998, 2000, 2004, 2006, 2007, 2009 The NetBSD Foundation, Inc. 69 * Copyright (c) 1998, 2000, 2004, 2006, 2007, 2009 The NetBSD Foundation, Inc.
70 * All rights reserved. 70 * All rights reserved.
71 * 71 *
72 * This code is derived from software contributed to The NetBSD Foundation 72 * This code is derived from software contributed to The NetBSD Foundation
73 * by Charles M. Hannum, and by Andrew Doran. 73 * by Charles M. Hannum, and by Andrew Doran.
74 * 74 *
75 * Redistribution and use in source and binary forms, with or without 75 * Redistribution and use in source and binary forms, with or without
76 * modification, are permitted provided that the following conditions 76 * modification, are permitted provided that the following conditions
77 * are met: 77 * are met:
78 * 1. Redistributions of source code must retain the above copyright 78 * 1. Redistributions of source code must retain the above copyright
79 * notice, this list of conditions and the following disclaimer. 79 * notice, this list of conditions and the following disclaimer.
80 * 2. Redistributions in binary form must reproduce the above copyright 80 * 2. Redistributions in binary form must reproduce the above copyright
81 * notice, this list of conditions and the following disclaimer in the 81 * notice, this list of conditions and the following disclaimer in the
82 * documentation and/or other materials provided with the distribution. 82 * documentation and/or other materials provided with the distribution.
83 * 83 *
84 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 84 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
85 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 85 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
86 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 86 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
87 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 87 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
88 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 88 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
94 * POSSIBILITY OF SUCH DAMAGE. 94 * POSSIBILITY OF SUCH DAMAGE.
95 */ 95 */
96 96
97/*- 97/*-
98 * Copyright (c) 1990 The Regents of the University of California. 98 * Copyright (c) 1990 The Regents of the University of California.
99 * All rights reserved. 99 * All rights reserved.
100 * 100 *
101 * This code is derived from software contributed to Berkeley by 101 * This code is derived from software contributed to Berkeley by
102 * William Jolitz. 102 * William Jolitz.
103 * 103 *
104 * Redistribution and use in source and binary forms, with or without 104 * Redistribution and use in source and binary forms, with or without
105 * modification, are permitted provided that the following conditions 105 * modification, are permitted provided that the following conditions
106 * are met: 106 * are met:
107 * 1. Redistributions of source code must retain the above copyright 107 * 1. Redistributions of source code must retain the above copyright
108 * notice, this list of conditions and the following disclaimer. 108 * notice, this list of conditions and the following disclaimer.
109 * 2. Redistributions in binary form must reproduce the above copyright 109 * 2. Redistributions in binary form must reproduce the above copyright
110 * notice, this list of conditions and the following disclaimer in the 110 * notice, this list of conditions and the following disclaimer in the
111 * documentation and/or other materials provided with the distribution. 111 * documentation and/or other materials provided with the distribution.
112 * 3. Neither the name of the University nor the names of its contributors 112 * 3. Neither the name of the University nor the names of its contributors
113 * may be used to endorse or promote products derived from this software 113 * may be used to endorse or promote products derived from this software
114 * without specific prior written permission. 114 * without specific prior written permission.
115 * 115 *
116 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 116 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
117 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 117 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
118 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 118 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
119 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 119 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
120 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 120 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
121 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 121 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
122 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 122 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
123 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 123 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
124 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 124 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
125 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 125 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
126 * SUCH DAMAGE. 126 * SUCH DAMAGE.
127 * 127 *
128 * @(#)locore.s 7.3 (Berkeley) 5/13/91 128 * @(#)locore.s 7.3 (Berkeley) 5/13/91
129 */ 129 */
130 130
131#include <machine/asm.h> 131#include <machine/asm.h>
132__KERNEL_RCSID(0, "$NetBSD: locore.S,v 1.108 2013/12/01 01:05:16 christos Exp $"); 132__KERNEL_RCSID(0, "$NetBSD: locore.S,v 1.109 2014/01/11 17:05:17 christos Exp $");
133 133
134#include "opt_compat_oldboot.h" 134#include "opt_compat_oldboot.h"
135#include "opt_ddb.h" 135#include "opt_ddb.h"
136#include "opt_modular.h" 136#include "opt_modular.h"
137#include "opt_multiboot.h" 137#include "opt_multiboot.h"
138#include "opt_realmem.h" 138#include "opt_realmem.h"
139#include "opt_vm86.h" 139#include "opt_vm86.h"
140#include "opt_xen.h" 140#include "opt_xen.h"
141 141
142#include "npx.h" 142#include "npx.h"
143#include "assym.h" 143#include "assym.h"
144#include "lapic.h" 144#include "lapic.h"
145#include "ioapic.h" 145#include "ioapic.h"
146#include "ksyms.h" 146#include "ksyms.h"
147 147
148#include <sys/errno.h> 148#include <sys/errno.h>
149#include <sys/syscall.h> 149#include <sys/syscall.h>
150 150
151#include <machine/cputypes.h> 151#include <machine/cputypes.h>
152#include <machine/segments.h> 152#include <machine/segments.h>
153#include <machine/specialreg.h> 153#include <machine/specialreg.h>
154#include <machine/trap.h> 154#include <machine/trap.h>
155#include <machine/i82489reg.h> 155#include <machine/i82489reg.h>
156#include <machine/frameasm.h> 156#include <machine/frameasm.h>
157#include <machine/i82489reg.h> 157#include <machine/i82489reg.h>
158#ifndef XEN 158#ifndef XEN
159#include <machine/multiboot.h> 159#include <machine/multiboot.h>
160#endif 160#endif
161 161
162/* XXX temporary kluge; these should not be here */ 162/* XXX temporary kluge; these should not be here */
163/* Get definitions for IOM_BEGIN, IOM_END, and IOM_SIZE */ 163/* Get definitions for IOM_BEGIN, IOM_END, and IOM_SIZE */
164#include <dev/isa/isareg.h> 164#include <dev/isa/isareg.h>
165 165
166#ifndef XEN 166#ifndef XEN
167#define _RELOC(x) ((x) - KERNBASE) 167#define _RELOC(x) ((x) - KERNBASE)
168#else 168#else
169#define _RELOC(x) ((x)) 169#define _RELOC(x) ((x))
170#endif /* XEN */ 170#endif /* XEN */
171#define RELOC(x) _RELOC(_C_LABEL(x)) 171#define RELOC(x) _RELOC(_C_LABEL(x))
172 172
173#ifdef XEN 173#ifdef XEN
174/* 174/*
175 * Xen guest identifier and loader selection 175 * Xen guest identifier and loader selection
176 */ 176 */
177.section __xen_guest 177.section __xen_guest
178 .ascii "GUEST_OS=netbsd,GUEST_VER=3.0,XEN_VER=xen-3.0" 178 .ascii "GUEST_OS=netbsd,GUEST_VER=3.0,XEN_VER=xen-3.0"
179 .ascii ",VIRT_BASE=0xc0000000" /* KERNBASE */ 179 .ascii ",VIRT_BASE=0xc0000000" /* KERNBASE */
180 .ascii ",ELF_PADDR_OFFSET=0xc0000000" /* KERNBASE */ 180 .ascii ",ELF_PADDR_OFFSET=0xc0000000" /* KERNBASE */
181 .ascii ",VIRT_ENTRY=0xc0100000" /* KERNTEXTOFF */ 181 .ascii ",VIRT_ENTRY=0xc0100000" /* KERNTEXTOFF */
182 .ascii ",HYPERCALL_PAGE=0x00000101" 182 .ascii ",HYPERCALL_PAGE=0x00000101"
183 /* (???+HYPERCALL_PAGE_OFFSET)/PAGE_SIZE) */ 183 /* (???+HYPERCALL_PAGE_OFFSET)/PAGE_SIZE) */
184#ifdef PAE 184#ifdef PAE
185 .ascii ",PAE=yes[extended-cr3]" 185 .ascii ",PAE=yes[extended-cr3]"
186#endif 186#endif
187 .ascii ",LOADER=generic" 187 .ascii ",LOADER=generic"
188#if (NKSYMS || defined(DDB) || defined(MODULAR)) && !defined(SYMTAB_SPACE) 188#if (NKSYMS || defined(DDB) || defined(MODULAR)) && !defined(SYMTAB_SPACE)
189 .ascii ",BSD_SYMTAB=yes" 189 .ascii ",BSD_SYMTAB=yes"
190#endif 190#endif
191 .byte 0 191 .byte 0
192#endif /* XEN */ 192#endif /* XEN */
193 193
194/* 194/*
195 * Initialization 195 * Initialization
196 */ 196 */
197 .data 197 .data
198 198
199 .globl _C_LABEL(cputype) 199 .globl _C_LABEL(cputype)
200 .globl _C_LABEL(cpuid_level) 200 .globl _C_LABEL(cpuid_level)
201 .globl _C_LABEL(esym) 201 .globl _C_LABEL(esym)
202 .globl _C_LABEL(eblob) 202 .globl _C_LABEL(eblob)
203 .globl _C_LABEL(atdevbase) 203 .globl _C_LABEL(atdevbase)
204 .globl _C_LABEL(lwp0uarea) 204 .globl _C_LABEL(lwp0uarea)
205 .globl _C_LABEL(PDPpaddr) 205 .globl _C_LABEL(PDPpaddr)
206 .globl _C_LABEL(gdt) 206 .globl _C_LABEL(gdt)
207 .globl _C_LABEL(idt) 207 .globl _C_LABEL(idt)
208 .globl _C_LABEL(lapic_tpr) 208 .globl _C_LABEL(lapic_tpr)
209 209
210#if NLAPIC > 0 210#if NLAPIC > 0
211#ifdef __ELF__ 211#ifdef __ELF__
212 .align PAGE_SIZE 212 .align PAGE_SIZE
213#else 213#else
214 .align 12 214 .align 12
215#endif 215#endif
216 .globl _C_LABEL(local_apic), _C_LABEL(lapic_id) 216 .globl _C_LABEL(local_apic), _C_LABEL(lapic_id)
217 .type _C_LABEL(local_apic), @object 217 .type _C_LABEL(local_apic), @object
218LABEL(local_apic) 218LABEL(local_apic)
219 .space LAPIC_ID 219 .space LAPIC_ID
220END(local_apic) 220END(local_apic)
221 .type _C_LABEL(lapic_id), @object 221 .type _C_LABEL(lapic_id), @object
222LABEL(lapic_id) 222LABEL(lapic_id)
223 .long 0x00000000 223 .long 0x00000000
224 .space LAPIC_TPRI-(LAPIC_ID+4) 224 .space LAPIC_TPRI-(LAPIC_ID+4)
225END(lapic_id) 225END(lapic_id)
226 .type _C_LABEL(lapic_tpr), @object 226 .type _C_LABEL(lapic_tpr), @object
227LABEL(lapic_tpr) 227LABEL(lapic_tpr)
228 .space LAPIC_PPRI-LAPIC_TPRI 228 .space LAPIC_PPRI-LAPIC_TPRI
229END(lapic_tpr) 229END(lapic_tpr)
230 .type _C_LABEL(lapic_ppr), @object 230 .type _C_LABEL(lapic_ppr), @object
231_C_LABEL(lapic_ppr): 231_C_LABEL(lapic_ppr):
232 .space LAPIC_ISR-LAPIC_PPRI 232 .space LAPIC_ISR-LAPIC_PPRI
233END(lapic_ppr) 233END(lapic_ppr)
234 .type _C_LABEL(lapic_isr), @object 234 .type _C_LABEL(lapic_isr), @object
235_C_LABEL(lapic_isr): 235_C_LABEL(lapic_isr):
236 .space PAGE_SIZE-LAPIC_ISR 236 .space PAGE_SIZE-LAPIC_ISR
237END(lapic_isr) 237END(lapic_isr)
238#else 238#else
239 .type _C_LABEL(lapic_tpr), @object 239 .type _C_LABEL(lapic_tpr), @object
240LABEL(lapic_tpr) 240LABEL(lapic_tpr)
241 .long 0 241 .long 0
242END(lapic_tpr) 242END(lapic_tpr)
243#endif 243#endif
244 .type _C_LABEL(cputype), @object 244 .type _C_LABEL(cputype), @object
245LABEL(cputype) .long 0 # are we 80486, Pentium, or.. 245LABEL(cputype) .long 0 # are we 80486, Pentium, or..
246END(cputype) 246END(cputype)
247 .type _C_LABEL(cpuid_level), @object 247 .type _C_LABEL(cpuid_level), @object
248LABEL(cpuid_level) .long 0 248LABEL(cpuid_level) .long 0
249END(cpuid_level) 249END(cpuid_level)
250 .type _C_LABEL(atdevbase), @object 250 .type _C_LABEL(atdevbase), @object
251LABEL(atdevbase) .long 0 # location of start of iomem in virtual 251LABEL(atdevbase) .long 0 # location of start of iomem in virtual
252END(atdevbase) 252END(atdevbase)
253 .type _C_LABEL(lwp0uarea), @object 253 .type _C_LABEL(lwp0uarea), @object
254LABEL(lwp0uarea) .long 0 254LABEL(lwp0uarea) .long 0
255END(lwp0uarea) 255END(lwp0uarea)
256 .type _C_LABEL(PDPpaddr), @object 256 .type _C_LABEL(PDPpaddr), @object
257LABEL(PDPpaddr) .long 0 # paddr of PDP, for libkvm 257LABEL(PDPpaddr) .long 0 # paddr of PDP, for libkvm
258END(PDPpaddr) 258END(PDPpaddr)
259 .type _C_LABEL(tablesize), @object 259 .type _C_LABEL(tablesize), @object
260_C_LABEL(tablesize): .long 0 260_C_LABEL(tablesize): .long 0
261END(tablesize) 261END(tablesize)
262 .size tmpstk, tmpstk - . 262 .size tmpstk, tmpstk - .
263 .space 512 263 .space 512
264tmpstk: 264tmpstk:
265#ifdef XEN 265#ifdef XEN
266 .align PAGE_SIZE, 0x0 # Align on page boundary 266 .align PAGE_SIZE, 0x0 # Align on page boundary
267LABEL(tmpgdt) 267LABEL(tmpgdt)
268 .space PAGE_SIZE # Xen expects a page 268 .space PAGE_SIZE # Xen expects a page
269END(tmpgdt) 269END(tmpgdt)
270#endif /* XEN */ 270#endif /* XEN */
271 271
272 .text 272 .text
273 .globl _C_LABEL(kernel_text) 273 .globl _C_LABEL(kernel_text)
274 .set _C_LABEL(kernel_text),KERNTEXTOFF 274 .set _C_LABEL(kernel_text),KERNTEXTOFF
275 275
276ENTRY(start) 276ENTRY(start)
277#ifndef XEN 277#ifndef XEN
278 movw $0x1234,0x472 # warm boot 278 movw $0x1234,0x472 # warm boot
279#if defined(MULTIBOOT) 279#if defined(MULTIBOOT)
280 jmp 1f 280 jmp 1f
281 281
282 .align 4 282 .align 4
283 .globl Multiboot_Header 283 .globl Multiboot_Header
284_C_LABEL(Multiboot_Header): 284_C_LABEL(Multiboot_Header):
285#define MULTIBOOT_HEADER_FLAGS (MULTIBOOT_HEADER_WANT_MEMORY) 285#define MULTIBOOT_HEADER_FLAGS (MULTIBOOT_HEADER_WANT_MEMORY)
286 .long MULTIBOOT_HEADER_MAGIC 286 .long MULTIBOOT_HEADER_MAGIC
287 .long MULTIBOOT_HEADER_FLAGS 287 .long MULTIBOOT_HEADER_FLAGS
288 .long -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS) 288 .long -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS)
289 289
2901: 2901:
291 /* Check if we are being executed by a Multiboot-compliant boot 291 /* Check if we are being executed by a Multiboot-compliant boot
292 * loader. */ 292 * loader. */
293 cmpl $MULTIBOOT_INFO_MAGIC,%eax 293 cmpl $MULTIBOOT_INFO_MAGIC,%eax
294 jne 1f 294 jne 1f
295 295
296 /* 296 /*
297 * Indeed, a multiboot-compliant boot loader executed us. We copy 297 * Indeed, a multiboot-compliant boot loader executed us. We copy
298 * the received Multiboot information structure into kernel's data 298 * the received Multiboot information structure into kernel's data
299 * space to process it later -- after we are relocated. It will 299 * space to process it later -- after we are relocated. It will
300 * be safer to run complex C code than doing it at this point. 300 * be safer to run complex C code than doing it at this point.
301 */ 301 */
302 pushl %ebx # Address of Multiboot information 302 pushl %ebx # Address of Multiboot information
303 call _C_LABEL(multiboot_pre_reloc) 303 call _C_LABEL(multiboot_pre_reloc)
304 addl $4,%esp 304 addl $4,%esp
305 jmp 2f 305 jmp 2f
306#endif 306#endif
307 307
3081: 3081:
309 /* 309 /*
310 * At this point, we know that a NetBSD-specific boot loader 310 * At this point, we know that a NetBSD-specific boot loader
311 * booted this kernel. The stack carries the following parameters: 311 * booted this kernel. The stack carries the following parameters:
312 * (boothowto, [bootdev], bootinfo, esym, biosextmem, biosbasemem), 312 * (boothowto, [bootdev], bootinfo, esym, biosextmem, biosbasemem),
313 * 4 bytes each. 313 * 4 bytes each.
314 */ 314 */
315 addl $4,%esp # Discard return address to boot loader 315 addl $4,%esp # Discard return address to boot loader
316 call _C_LABEL(native_loader) 316 call _C_LABEL(native_loader)
317 addl $24,%esp 317 addl $24,%esp
318 318
3192: 3192:
320 /* First, reset the PSL. */ 320 /* First, reset the PSL. */
321 pushl $PSL_MBO 321 pushl $PSL_MBO
322 popfl 322 popfl
323 323
324 /* Clear segment registers; always null in proc0. */ 324 /* Clear segment registers; always null in proc0. */
325 xorl %eax,%eax 325 xorl %eax,%eax
326 movw %ax,%fs 326 movw %ax,%fs
327 movw %ax,%gs 327 movw %ax,%gs
328 decl %eax 328 decl %eax
329 movl %eax,RELOC(cpuid_level) 329 movl %eax,RELOC(cpuid_level)
330 330
331 /* Find out our CPU type. */ 331 /* Find out our CPU type. */
332 332
333try386: /* Try to toggle alignment check flag; does not exist on 386. */ 333try386: /* Try to toggle alignment check flag; does not exist on 386. */
334 pushfl 334 pushfl
335 popl %eax 335 popl %eax
336 movl %eax,%ecx 336 movl %eax,%ecx
337 orl $PSL_AC,%eax 337 orl $PSL_AC,%eax
338 pushl %eax 338 pushl %eax
339 popfl 339 popfl
340 pushfl 340 pushfl
341 popl %eax 341 popl %eax
342 xorl %ecx,%eax 342 xorl %ecx,%eax
343 andl $PSL_AC,%eax 343 andl $PSL_AC,%eax
344 pushl %ecx 344 pushl %ecx
345 popfl 345 popfl
346 346
347 testl %eax,%eax 347 testl %eax,%eax
348 jnz try486 348 jnz try486
349 349
350 /* 350 /*
351 * Try the test of a NexGen CPU -- ZF will not change on a DIV 351 * Try the test of a NexGen CPU -- ZF will not change on a DIV
352 * instruction on a NexGen, it will on an i386. Documented in 352 * instruction on a NexGen, it will on an i386. Documented in
353 * Nx586 Processor Recognition Application Note, NexGen, Inc. 353 * Nx586 Processor Recognition Application Note, NexGen, Inc.
354 */ 354 */
355 movl $0x5555,%eax 355 movl $0x5555,%eax
356 xorl %edx,%edx 356 xorl %edx,%edx
357 movl $2,%ecx 357 movl $2,%ecx
358 divl %ecx 358 divl %ecx
359 jnz is386 359 jnz is386
360 360
361isnx586: 361isnx586:
362 /* 362 /*
363 * Don't try cpuid, as Nx586s reportedly don't support the 363 * Don't try cpuid, as Nx586s reportedly don't support the
364 * PSL_ID bit. 364 * PSL_ID bit.
365 */ 365 */
366 movl $CPU_NX586,RELOC(cputype) 366 movl $CPU_NX586,RELOC(cputype)
367 jmp 2f 367 jmp 2f
368 368
369is386: 369is386:
370 movl $CPU_386,RELOC(cputype) 370 movl $CPU_386,RELOC(cputype)
371 jmp 2f 371 jmp 2f
372 372
373try486: /* Try to toggle identification flag; does not exist on early 486s. */ 373try486: /* Try to toggle identification flag; does not exist on early 486s. */
374 pushfl 374 pushfl
375 popl %eax 375 popl %eax
376 movl %eax,%ecx 376 movl %eax,%ecx
377 xorl $PSL_ID,%eax 377 xorl $PSL_ID,%eax
378 pushl %eax 378 pushl %eax
379 popfl 379 popfl
380 pushfl 380 pushfl
381 popl %eax 381 popl %eax
382 xorl %ecx,%eax 382 xorl %ecx,%eax
383 andl $PSL_ID,%eax 383 andl $PSL_ID,%eax
384 pushl %ecx 384 pushl %ecx
385 popfl 385 popfl
386 386
387 testl %eax,%eax 387 testl %eax,%eax
388 jnz try586 388 jnz try586
389is486: movl $CPU_486,RELOC(cputype) 389is486: movl $CPU_486,RELOC(cputype)
390 /* 390 /*
391 * Check Cyrix CPU 391 * Check Cyrix CPU
392 * Cyrix CPUs do not change the undefined flags following 392 * Cyrix CPUs do not change the undefined flags following
393 * execution of the divide instruction which divides 5 by 2. 393 * execution of the divide instruction which divides 5 by 2.
394 * 394 *
395 * Note: CPUID is enabled on M2, so it passes another way. 395 * Note: CPUID is enabled on M2, so it passes another way.
396 */ 396 */
397 pushfl 397 pushfl
398 movl $0x5555, %eax 398 movl $0x5555, %eax
399 xorl %edx, %edx 399 xorl %edx, %edx
400 movl $2, %ecx 400 movl $2, %ecx
401 clc 401 clc
402 divl %ecx 402 divl %ecx
403 jnc trycyrix486 403 jnc trycyrix486
404 popfl 404 popfl
405 jmp 2f 405 jmp 2f
406trycyrix486: 406trycyrix486:
407 movl $CPU_6x86,RELOC(cputype) # set CPU type 407 movl $CPU_6x86,RELOC(cputype) # set CPU type
408 /* 408 /*
409 * Check for Cyrix 486 CPU by seeing if the flags change during a 409 * Check for Cyrix 486 CPU by seeing if the flags change during a
410 * divide. This is documented in the Cx486SLC/e SMM Programmer's 410 * divide. This is documented in the Cx486SLC/e SMM Programmer's
411 * Guide. 411 * Guide.
412 */ 412 */
413 xorl %edx,%edx 413 xorl %edx,%edx
414 cmpl %edx,%edx # set flags to known state 414 cmpl %edx,%edx # set flags to known state
415 pushfl 415 pushfl
416 popl %ecx # store flags in ecx 416 popl %ecx # store flags in ecx
417 movl $-1,%eax 417 movl $-1,%eax
418 movl $4,%ebx 418 movl $4,%ebx
419 divl %ebx # do a long division 419 divl %ebx # do a long division
420 pushfl 420 pushfl
421 popl %eax 421 popl %eax
422 xorl %ecx,%eax # are the flags different? 422 xorl %ecx,%eax # are the flags different?
423 testl $0x8d5,%eax # only check C|PF|AF|Z|N|V 423 testl $0x8d5,%eax # only check C|PF|AF|Z|N|V
424 jne 2f # yes; must be Cyrix 6x86 CPU 424 jne 2f # yes; must be Cyrix 6x86 CPU
425 movl $CPU_486DLC,RELOC(cputype) # set CPU type 425 movl $CPU_486DLC,RELOC(cputype) # set CPU type
426 426
427#ifndef CYRIX_CACHE_WORKS 427#ifndef CYRIX_CACHE_WORKS
428 /* Disable caching of the ISA hole only. */ 428 /* Disable caching of the ISA hole only. */
429 invd 429 invd
430 movb $CCR0,%al # Configuration Register index (CCR0) 430 movb $CCR0,%al # Configuration Register index (CCR0)
431 outb %al,$0x22 431 outb %al,$0x22
432 inb $0x23,%al 432 inb $0x23,%al
433 orb $(CCR0_NC1|CCR0_BARB),%al 433 orb $(CCR0_NC1|CCR0_BARB),%al
434 movb %al,%ah 434 movb %al,%ah
435 movb $CCR0,%al 435 movb $CCR0,%al
436 outb %al,$0x22 436 outb %al,$0x22
437 movb %ah,%al 437 movb %ah,%al
438 outb %al,$0x23 438 outb %al,$0x23
439 invd 439 invd
440#else /* CYRIX_CACHE_WORKS */ 440#else /* CYRIX_CACHE_WORKS */
441 /* Set cache parameters */ 441 /* Set cache parameters */
442 invd # Start with guaranteed clean cache 442 invd # Start with guaranteed clean cache
443 movb $CCR0,%al # Configuration Register index (CCR0) 443 movb $CCR0,%al # Configuration Register index (CCR0)
444 outb %al,$0x22 444 outb %al,$0x22
445 inb $0x23,%al 445 inb $0x23,%al
446 andb $~CCR0_NC0,%al 446 andb $~CCR0_NC0,%al
447#ifndef CYRIX_CACHE_REALLY_WORKS 447#ifndef CYRIX_CACHE_REALLY_WORKS
448 orb $(CCR0_NC1|CCR0_BARB),%al 448 orb $(CCR0_NC1|CCR0_BARB),%al
449#else 449#else
450 orb $CCR0_NC1,%al 450 orb $CCR0_NC1,%al
451#endif 451#endif
452 movb %al,%ah 452 movb %al,%ah
453 movb $CCR0,%al 453 movb $CCR0,%al
454 outb %al,$0x22 454 outb %al,$0x22
455 movb %ah,%al 455 movb %ah,%al
456 outb %al,$0x23 456 outb %al,$0x23
457 /* clear non-cacheable region 1 */ 457 /* clear non-cacheable region 1 */
458 movb $(NCR1+2),%al 458 movb $(NCR1+2),%al
459 outb %al,$0x22 459 outb %al,$0x22
460 movb $NCR_SIZE_0K,%al 460 movb $NCR_SIZE_0K,%al
461 outb %al,$0x23 461 outb %al,$0x23
462 /* clear non-cacheable region 2 */ 462 /* clear non-cacheable region 2 */
463 movb $(NCR2+2),%al 463 movb $(NCR2+2),%al
464 outb %al,$0x22 464 outb %al,$0x22
465 movb $NCR_SIZE_0K,%al 465 movb $NCR_SIZE_0K,%al
466 outb %al,$0x23 466 outb %al,$0x23
467 /* clear non-cacheable region 3 */ 467 /* clear non-cacheable region 3 */
468 movb $(NCR3+2),%al 468 movb $(NCR3+2),%al
469 outb %al,$0x22 469 outb %al,$0x22
470 movb $NCR_SIZE_0K,%al 470 movb $NCR_SIZE_0K,%al
471 outb %al,$0x23 471 outb %al,$0x23
472 /* clear non-cacheable region 4 */ 472 /* clear non-cacheable region 4 */
473 movb $(NCR4+2),%al 473 movb $(NCR4+2),%al
474 outb %al,$0x22 474 outb %al,$0x22
475 movb $NCR_SIZE_0K,%al 475 movb $NCR_SIZE_0K,%al
476 outb %al,$0x23 476 outb %al,$0x23
477 /* enable caching in CR0 */ 477 /* enable caching in CR0 */
478 movl %cr0,%eax 478 movl %cr0,%eax
479 andl $~(CR0_CD|CR0_NW),%eax 479 andl $~(CR0_CD|CR0_NW),%eax
480 movl %eax,%cr0 480 movl %eax,%cr0
481 invd 481 invd
482#endif /* CYRIX_CACHE_WORKS */ 482#endif /* CYRIX_CACHE_WORKS */
483 483
484 jmp 2f 484 jmp 2f
485 485
486try586: /* Use the `cpuid' instruction. */ 486try586: /* Use the `cpuid' instruction. */
487 xorl %eax,%eax 487 xorl %eax,%eax
488 cpuid 488 cpuid
489 movl %eax,RELOC(cpuid_level) 489 movl %eax,RELOC(cpuid_level)
490 490
4912: 4912:
492 /* 492 /*
493 * Finished with old stack; load new %esp now instead of later so we 493 * Finished with old stack; load new %esp now instead of later so we
494 * can trace this code without having to worry about the trace trap 494 * can trace this code without having to worry about the trace trap
495 * clobbering the memory test or the zeroing of the bss+bootstrap page 495 * clobbering the memory test or the zeroing of the bss+bootstrap page
496 * tables. 496 * tables.
497 * 497 *
498 * The boot program should check: 498 * The boot program should check:
499 * text+data <= &stack_variable - more_space_for_stack 499 * text+data <= &stack_variable - more_space_for_stack
500 * text+data+bss+pad+space_for_page_tables <= end_of_memory 500 * text+data+bss+pad+space_for_page_tables <= end_of_memory
501 * Oops, the gdt is in the carcass of the boot program so clearing 501 * Oops, the gdt is in the carcass of the boot program so clearing
502 * the rest of memory is still not possible. 502 * the rest of memory is still not possible.
503 */ 503 */
504 movl $_RELOC(tmpstk),%esp # bootstrap stack end location 504 movl $_RELOC(tmpstk),%esp # bootstrap stack end location
505 505
506/* 506/*
507 * Virtual address space of kernel, without PAE. The page dir is 1 page long. 507 * Virtual address space of kernel, without PAE. The page dir is 1 page long.
508 * 508 *
509 * text | data | bss | [syms] | [blobs] | page dir | proc0 kstack | L1 ptp 509 * text | data | bss | [syms] | [blobs] | page dir | proc0 kstack | L1 ptp
510 * 0 1 2 3 510 * 0 1 2 3
511 * 511 *
512 * Virtual address space of kernel, with PAE. We need 4 pages for the page dir 512 * Virtual address space of kernel, with PAE. We need 4 pages for the page dir
513 * and 1 page for the L3. 513 * and 1 page for the L3.
514 * text | data | bss | [syms] | [blobs] | L3 | page dir | proc0 kstack | L1 ptp 514 * text | data | bss | [syms] | [blobs] | L3 | page dir | proc0 kstack | L1 ptp
515 * 0 1 5 6 7 515 * 0 1 5 6 7
516 */ 516 */
517#ifndef PAE 517#ifndef PAE
518#define PROC0_PDIR_OFF 0 518#define PROC0_PDIR_OFF 0
519#else 519#else
520#define PROC0_L3_OFF 0 520#define PROC0_L3_OFF 0
521#define PROC0_PDIR_OFF 1 * PAGE_SIZE 521#define PROC0_PDIR_OFF 1 * PAGE_SIZE
522#endif 522#endif
523 523
524#define PROC0_STK_OFF (PROC0_PDIR_OFF + PDP_SIZE * PAGE_SIZE) 524#define PROC0_STK_OFF (PROC0_PDIR_OFF + PDP_SIZE * PAGE_SIZE)
525#define PROC0_PTP1_OFF (PROC0_STK_OFF + UPAGES * PAGE_SIZE) 525#define PROC0_PTP1_OFF (PROC0_STK_OFF + UPAGES * PAGE_SIZE)
526 526
527/* 527/*
528 * fillkpt - Fill in a kernel page table 528 * fillkpt - Fill in a kernel page table
529 * eax = pte (page frame | control | status) 529 * eax = pte (page frame | control | status)
530 * ebx = page table address 530 * ebx = page table address
531 * ecx = number of pages to map 531 * ecx = number of pages to map
532 * 532 *
533 * For PAE, each entry is 8 bytes long: we must set the 4 upper bytes to 0. 533 * For PAE, each entry is 8 bytes long: we must set the 4 upper bytes to 0.
534 * This is done by the first instruction of fillkpt. In the non-PAE case, this 534 * This is done by the first instruction of fillkpt. In the non-PAE case, this
535 * instruction just clears the page table entry. 535 * instruction just clears the page table entry.
536 */ 536 */
537 537
538#define fillkpt \ 538#define fillkpt \
5391: movl $0,(PDE_SIZE-4)(%ebx) ; /* clear bits */ \ 5391: movl $0,(PDE_SIZE-4)(%ebx) ; /* clear bits */ \
540 movl %eax,(%ebx) ; /* store phys addr */ \ 540 movl %eax,(%ebx) ; /* store phys addr */ \
541 addl $PDE_SIZE,%ebx ; /* next pte/pde */ \ 541 addl $PDE_SIZE,%ebx ; /* next pte/pde */ \
542 addl $PAGE_SIZE,%eax ; /* next phys page */ \ 542 addl $PAGE_SIZE,%eax ; /* next phys page */ \
543 loop 1b ; 543 loop 1b ;
544 544
545 /* Find end of kernel image. */ 545 /* Find end of kernel image. */
546 movl $RELOC(end),%edi 546 movl $RELOC(end),%edi
547 547
548#if (NKSYMS || defined(DDB) || defined(MODULAR)) && !defined(SYMTAB_SPACE) 548#if (NKSYMS || defined(DDB) || defined(MODULAR)) && !defined(SYMTAB_SPACE)
549 /* Save the symbols (if loaded). */ 549 /* Save the symbols (if loaded). */
550 movl RELOC(esym),%eax 550 movl RELOC(esym),%eax
551 testl %eax,%eax 551 testl %eax,%eax
552 jz 1f 552 jz 1f
553 subl $KERNBASE,%eax 553 subl $KERNBASE,%eax
554 movl %eax,%edi 554 movl %eax,%edi
5551: 5551:
556#endif 556#endif
557 557
558 /* Skip over any modules/blobs. */ 558 /* Skip over any modules/blobs. */
559 movl RELOC(eblob),%eax 559 movl RELOC(eblob),%eax
560 testl %eax,%eax 560 testl %eax,%eax
561 jz 1f 561 jz 1f
562 subl $KERNBASE,%eax 562 subl $KERNBASE,%eax
563 movl %eax,%edi 563 movl %eax,%edi
5641: 5641:
565 /* Compute sizes */ 565 /* Compute sizes */
566 movl %edi,%esi 566 movl %edi,%esi
567 addl $PGOFSET,%esi # page align up 567 addl $PGOFSET,%esi # page align up
568 andl $~PGOFSET,%esi 568 andl $~PGOFSET,%esi
569 569
570 /* nkptp[1] = (esi + ~L2_FRAME) >> L2_SHIFT + 1; */ 570 /* nkptp[1] = (esi + ~L2_FRAME) >> L2_SHIFT + 1; */
571 movl %esi,%eax 571 movl %esi,%eax
572 addl $~L2_FRAME,%eax 572 addl $~L2_FRAME,%eax
573 shrl $L2_SHIFT,%eax 573 shrl $L2_SHIFT,%eax
574 incl %eax /* one more ptp for VAs stolen by bootstrap */ 574 incl %eax /* one more ptp for VAs stolen by bootstrap */
5751: movl %eax,RELOC(nkptp)+1*4 5751: movl %eax,RELOC(nkptp)+1*4
576 576
577 /* tablesize = (PDP_SIZE + UPAGES + nkptp) << PGSHIFT; */ 577 /* tablesize = (PDP_SIZE + UPAGES + nkptp) << PGSHIFT; */
578 addl $(PDP_SIZE+UPAGES),%eax 578 addl $(PDP_SIZE+UPAGES),%eax
579#ifdef PAE 579#ifdef PAE
580 incl %eax /* one more page for the L3 PD */ 580 incl %eax /* one more page for the L3 PD */
581 shll $PGSHIFT+1,%eax /* PTP tables are twice larger with PAE */ 581 shll $PGSHIFT+1,%eax /* PTP tables are twice larger with PAE */
582#else 582#else
583 shll $PGSHIFT,%eax 583 shll $PGSHIFT,%eax
584#endif 584#endif
585 movl %eax,RELOC(tablesize) 585 movl %eax,RELOC(tablesize)
586 586
587 /* ensure that nkptp covers bootstrap tables */ 587 /* ensure that nkptp covers bootstrap tables */
588 addl %esi,%eax 588 addl %esi,%eax
589 addl $~L2_FRAME,%eax 589 addl $~L2_FRAME,%eax
590 shrl $L2_SHIFT,%eax 590 shrl $L2_SHIFT,%eax
591 incl %eax 591 incl %eax
592 cmpl %eax,RELOC(nkptp)+1*4 592 cmpl %eax,RELOC(nkptp)+1*4
593 jnz 1b 593 jnz 1b
594 594
595 /* Clear tables */ 595 /* Clear tables */
596 movl %esi,%edi 596 movl %esi,%edi
597 xorl %eax,%eax 597 xorl %eax,%eax
598 cld 598 cld
599 movl RELOC(tablesize),%ecx 599 movl RELOC(tablesize),%ecx
600 shrl $2,%ecx 600 shrl $2,%ecx
601 rep 601 rep
602 stosl 602 stosl
603 603
604 leal (PROC0_PTP1_OFF)(%esi), %ebx 604 leal (PROC0_PTP1_OFF)(%esi), %ebx
605 605
606/* 606/*
607 * Build initial page tables. 607 * Build initial page tables.
608 */ 608 */
609 /* 609 /*
610 * Compute &__data_start - KERNBASE. This can't be > 4G, 610 * Compute &__data_start - KERNBASE. This can't be > 4G,
611 * or we can't deal with it anyway, since we can't load it in 611 * or we can't deal with it anyway, since we can't load it in
612 * 32 bit mode. So use the bottom 32 bits. 612 * 32 bit mode. So use the bottom 32 bits.
613 */ 613 */
614 movl $RELOC(__data_start),%edx 614 movl $RELOC(__data_start),%edx
615 andl $~PGOFSET,%edx 615 andl $~PGOFSET,%edx
616 616
617 /* 617 /*
618 * Skip the first MB. 618 * Skip the first MB.
619 */ 619 */
620 movl $_RELOC(KERNTEXTOFF),%eax 620 movl $_RELOC(KERNTEXTOFF),%eax
621 movl %eax,%ecx 621 movl %eax,%ecx
622 shrl $(PGSHIFT-2),%ecx /* ((n >> PGSHIFT) << 2) for # pdes */ 622 shrl $(PGSHIFT-2),%ecx /* ((n >> PGSHIFT) << 2) for # pdes */
623#ifdef PAE 623#ifdef PAE
624 shll $1,%ecx /* pdes are twice larger with PAE */ 624 shll $1,%ecx /* pdes are twice larger with PAE */
625#endif 625#endif
626 addl %ecx,%ebx 626 addl %ecx,%ebx
627 627
628 /* Map the kernel text read-only. */ 628 /* Map the kernel text read-only. */
629 movl %edx,%ecx 629 movl %edx,%ecx
630 subl %eax,%ecx 630 subl %eax,%ecx
631 shrl $PGSHIFT,%ecx 631 shrl $PGSHIFT,%ecx
632 orl $(PG_V|PG_KR),%eax 632 orl $(PG_V|PG_KR),%eax
633 fillkpt 633 fillkpt
634 634
635 /* Map the data, BSS, and bootstrap tables read-write. */ 635 /* Map the data, BSS, and bootstrap tables read-write. */
636 leal (PG_V|PG_KW)(%edx),%eax 636 leal (PG_V|PG_KW)(%edx),%eax
637 movl RELOC(tablesize),%ecx 637 movl RELOC(tablesize),%ecx
638 addl %esi,%ecx # end of tables 638 addl %esi,%ecx # end of tables
639 subl %edx,%ecx # subtract end of text 639 subl %edx,%ecx # subtract end of text
640 shrl $PGSHIFT,%ecx 640 shrl $PGSHIFT,%ecx
641 fillkpt 641 fillkpt
642 642
643 /* Map ISA I/O mem (later atdevbase) */ 643 /* Map ISA I/O mem (later atdevbase) */
644 movl $(IOM_BEGIN|PG_V|PG_KW/*|PG_N*/),%eax # having these bits set 644 movl $(IOM_BEGIN|PG_V|PG_KW/*|PG_N*/),%eax # having these bits set
645 movl $(IOM_SIZE>>PGSHIFT),%ecx # for this many pte s, 645 movl $(IOM_SIZE>>PGSHIFT),%ecx # for this many pte s,
646 fillkpt 646 fillkpt
647 647
648/* 648/*
649 * Construct a page table directory. 649 * Construct a page table directory.
650 */ 650 */
651 /* Set up top level entries for identity mapping */ 651 /* Set up top level entries for identity mapping */
652 leal (PROC0_PDIR_OFF)(%esi),%ebx 652 leal (PROC0_PDIR_OFF)(%esi),%ebx
653 leal (PROC0_PTP1_OFF)(%esi),%eax 653 leal (PROC0_PTP1_OFF)(%esi),%eax
654 orl $(PG_V|PG_KW), %eax 654 orl $(PG_V|PG_KW), %eax
655 movl RELOC(nkptp)+1*4,%ecx 655 movl RELOC(nkptp)+1*4,%ecx
656 fillkpt 656 fillkpt
657 657
658 /* Set up top level entries for actual kernel mapping */ 658 /* Set up top level entries for actual kernel mapping */
659 leal (PROC0_PDIR_OFF + L2_SLOT_KERNBASE*PDE_SIZE)(%esi),%ebx 659 leal (PROC0_PDIR_OFF + L2_SLOT_KERNBASE*PDE_SIZE)(%esi),%ebx
660 leal (PROC0_PTP1_OFF)(%esi),%eax 660 leal (PROC0_PTP1_OFF)(%esi),%eax
661 orl $(PG_V|PG_KW), %eax 661 orl $(PG_V|PG_KW), %eax
662 movl RELOC(nkptp)+1*4,%ecx 662 movl RELOC(nkptp)+1*4,%ecx
663 fillkpt 663 fillkpt
664 664
665 /* Install a PDE recursively mapping page directory as a page table! */ 665 /* Install a PDE recursively mapping page directory as a page table! */
666 leal (PROC0_PDIR_OFF + PDIR_SLOT_PTE*PDE_SIZE)(%esi),%ebx 666 leal (PROC0_PDIR_OFF + PDIR_SLOT_PTE*PDE_SIZE)(%esi),%ebx
667 leal (PROC0_PDIR_OFF)(%esi),%eax 667 leal (PROC0_PDIR_OFF)(%esi),%eax
668 orl $(PG_V|PG_KW),%eax 668 orl $(PG_V|PG_KW),%eax
669 movl $PDP_SIZE,%ecx 669 movl $PDP_SIZE,%ecx
670 fillkpt 670 fillkpt
671 671
672#ifdef PAE 672#ifdef PAE
673 /* Fill in proc0 L3 page with entries pointing to the page dirs */ 673 /* Fill in proc0 L3 page with entries pointing to the page dirs */
674 leal (PROC0_L3_OFF)(%esi),%ebx 674 leal (PROC0_L3_OFF)(%esi),%ebx
675 leal (PROC0_PDIR_OFF)(%esi),%eax 675 leal (PROC0_PDIR_OFF)(%esi),%eax
676 orl $(PG_V),%eax 676 orl $(PG_V),%eax
677 movl $PDP_SIZE,%ecx 677 movl $PDP_SIZE,%ecx
678 fillkpt 678 fillkpt
679 679
680 /* Enable PAE mode */ 680 /* Enable PAE mode */
681 movl %cr4,%eax 681 movl %cr4,%eax
682 orl $CR4_PAE,%eax 682 orl $CR4_PAE,%eax
683 movl %eax,%cr4 683 movl %eax,%cr4
684#endif 684#endif
685 685
686 /* Save phys. addr of PDP, for libkvm. */ 686 /* Save phys. addr of PDP, for libkvm. */
687 leal (PROC0_PDIR_OFF)(%esi),%eax 687 leal (PROC0_PDIR_OFF)(%esi),%eax
688 movl %eax,RELOC(PDPpaddr) 688 movl %eax,RELOC(PDPpaddr)
689 689
690 /* 690 /*
691 * Startup checklist: 691 * Startup checklist:
692 * 1. Load %cr3 with pointer to PDIR (or L3 PD page for PAE). 692 * 1. Load %cr3 with pointer to PDIR (or L3 PD page for PAE).
693 */ 693 */
694 movl %esi,%eax # phys address of ptd in proc 0 694 movl %esi,%eax # phys address of ptd in proc 0
695 movl %eax,%cr3 # load ptd addr into mmu 695 movl %eax,%cr3 # load ptd addr into mmu
696 696
697 /* 697 /*
698 * 2. Enable paging and the rest of it. 698 * 2. Enable paging and the rest of it.
699 */ 699 */
700 movl %cr0,%eax # get control word 700 movl %cr0,%eax # get control word
701 # enable paging & NPX emulation 701 # enable paging & NPX emulation
702 orl $(CR0_PE|CR0_PG|CR0_NE|CR0_TS|CR0_EM|CR0_MP|CR0_WP|CR0_AM),%eax 702 orl $(CR0_PE|CR0_PG|CR0_NE|CR0_TS|CR0_EM|CR0_MP|CR0_WP|CR0_AM),%eax
703 movl %eax,%cr0 # and page NOW! 703 movl %eax,%cr0 # and page NOW!
704 704
705 pushl $begin # jump to high mem 705 pushl $begin # jump to high mem
706 ret 706 ret
707 707
708begin: 708begin:
709 /* 709 /*
710 * We have arrived. 710 * We have arrived.
711 * There's no need anymore for the identity mapping in low 711 * There's no need anymore for the identity mapping in low
712 * memory, remove it. 712 * memory, remove it.
713 */ 713 */
714 movl _C_LABEL(nkptp)+1*4,%ecx 714 movl _C_LABEL(nkptp)+1*4,%ecx
715 leal (PROC0_PDIR_OFF)(%esi),%ebx # old, phys address of PDIR 715 leal (PROC0_PDIR_OFF)(%esi),%ebx # old, phys address of PDIR
716 addl $(KERNBASE), %ebx # new, virtual address of PDIR 716 addl $(KERNBASE), %ebx # new, virtual address of PDIR
7171: movl $0,(PDE_SIZE-4)(%ebx) # Upper bits (for PAE) 7171: movl $0,(PDE_SIZE-4)(%ebx) # Upper bits (for PAE)
718 movl $0,(%ebx) 718 movl $0,(%ebx)
719 addl $PDE_SIZE,%ebx 719 addl $PDE_SIZE,%ebx
720 loop 1b 720 loop 1b
721 721
722 /* Relocate atdevbase. */ 722 /* Relocate atdevbase. */
723 movl $KERNBASE,%edx 723 movl $KERNBASE,%edx
724 addl _C_LABEL(tablesize),%edx 724 addl _C_LABEL(tablesize),%edx
725 addl %esi,%edx 725 addl %esi,%edx
726 movl %edx,_C_LABEL(atdevbase) 726 movl %edx,_C_LABEL(atdevbase)
727 727
728 /* Set up bootstrap stack. */ 728 /* Set up bootstrap stack. */
729 leal (PROC0_STK_OFF+KERNBASE)(%esi),%eax 729 leal (PROC0_STK_OFF+KERNBASE)(%esi),%eax
730 movl %eax,_C_LABEL(lwp0uarea) 730 movl %eax,_C_LABEL(lwp0uarea)
731 leal (KSTACK_SIZE-FRAMESIZE)(%eax),%esp 731 leal (KSTACK_SIZE-FRAMESIZE)(%eax),%esp
732 movl %esi,(KSTACK_SIZE+PCB_CR3)(%eax) # pcb->pcb_cr3 732 movl %esi,(KSTACK_SIZE+PCB_CR3)(%eax) # pcb->pcb_cr3
733 xorl %ebp,%ebp # mark end of frames 733 xorl %ebp,%ebp # mark end of frames
734 734
735#if defined(MULTIBOOT) 735#if defined(MULTIBOOT)
736 /* It is now safe to parse the Multiboot information structure 736 /* It is now safe to parse the Multiboot information structure
737 * we saved before from C code. Note that we cannot delay its 737 * we saved before from C code. Note that we cannot delay its
738 * parsing any more because initgdt (called below) needs to make 738 * parsing any more because initgdt (called below) needs to make
739 * use of this information. */ 739 * use of this information. */
740 call _C_LABEL(multiboot_post_reloc) 740 call _C_LABEL(multiboot_post_reloc)
741#endif 741#endif
742 742
743 subl $NGDT*8, %esp # space for temporary gdt 743 subl $NGDT*8, %esp # space for temporary gdt
744 pushl %esp 744 pushl %esp
745 call _C_LABEL(initgdt) 745 call _C_LABEL(initgdt)
746 addl $4,%esp 746 addl $4,%esp
747 747
748 movl _C_LABEL(tablesize),%eax 748 movl _C_LABEL(tablesize),%eax
749 addl %esi,%eax # skip past stack and page tables 749 addl %esi,%eax # skip past stack and page tables
750 750
751#ifdef PAE 751#ifdef PAE
752 pushl $0 # init386() expects a 64 bits paddr_t with PAE 752 pushl $0 # init386() expects a 64 bits paddr_t with PAE
753#endif 753#endif
754 pushl %eax 754 pushl %eax
755 call _C_LABEL(init386) # wire 386 chip for unix operation 755 call _C_LABEL(init386) # wire 386 chip for unix operation
756 addl $PDE_SIZE,%esp # pop paddr_t 756 addl $PDE_SIZE,%esp # pop paddr_t
757 addl $NGDT*8,%esp # pop temporary gdt 757 addl $NGDT*8,%esp # pop temporary gdt
758 758
759#ifdef SAFARI_FIFO_HACK 759#ifdef SAFARI_FIFO_HACK
760 movb $5,%al 760 movb $5,%al
761 movw $0x37b,%dx 761 movw $0x37b,%dx
762 outb %al,%dx 762 outb %al,%dx
763 movw $0x37f,%dx 763 movw $0x37f,%dx
764 inb %dx,%al 764 inb %dx,%al
765 movb %al,%cl 765 movb %al,%cl
766 766
767 orb $1,%cl 767 orb $1,%cl
768 768
769 movb $5,%al 769 movb $5,%al
770 movw $0x37b,%dx 770 movw $0x37b,%dx
771 outb %al,%dx 771 outb %al,%dx
772 movw $0x37f,%dx 772 movw $0x37f,%dx
773 movb %cl,%al 773 movb %cl,%al
774 outb %al,%dx 774 outb %al,%dx
775#endif /* SAFARI_FIFO_HACK */ 775#endif /* SAFARI_FIFO_HACK */
776 776
777 call _C_LABEL(main) 777 call _C_LABEL(main)
778#else /* XEN */ 778#else /* XEN */
779 /* First, reset the PSL. */ 779 /* First, reset the PSL. */
780 pushl $PSL_MBO 780 pushl $PSL_MBO
781 popfl 781 popfl
782 782
783 cld 783 cld
784 movl %esp, %ebx # save start of available space 784 movl %esp, %ebx # save start of available space
785 movl $_RELOC(tmpstk),%esp # bootstrap stack end location 785 movl $_RELOC(tmpstk),%esp # bootstrap stack end location
786 786
787 /* Clear BSS. */ 787 /* Clear BSS. */
788 xorl %eax,%eax 788 xorl %eax,%eax
789 movl $RELOC(__bss_start),%edi 789 movl $RELOC(__bss_start),%edi
790 movl $RELOC(_end),%ecx 790 movl $RELOC(_end),%ecx
791 subl %edi,%ecx 791 subl %edi,%ecx
792 rep stosb 792 rep stosb
793 793
794 /* Copy the necessary stuff from start_info structure. */ 794 /* Copy the necessary stuff from start_info structure. */
795 /* We need to copy shared_info early, so that sti/cli work */ 795 /* We need to copy shared_info early, so that sti/cli work */
796 movl $RELOC(start_info_union),%edi 796 movl $RELOC(start_info_union),%edi
797 movl $128,%ecx 797 movl $128,%ecx
798 rep movsl 798 rep movsl
799 799
800 /* Clear segment registers; always null in proc0. */ 800 /* Clear segment registers; always null in proc0. */
801 xorl %eax,%eax 801 xorl %eax,%eax
802 movw %ax,%fs 802 movw %ax,%fs
803 movw %ax,%gs 803 movw %ax,%gs
804 decl %eax 804 decl %eax
805 movl %eax,RELOC(cpuid_level) 805 movl %eax,RELOC(cpuid_level)
806 806
807 xorl %eax,%eax 807 xorl %eax,%eax
808 cpuid 808 cpuid
809 movl %eax,RELOC(cpuid_level) 809 movl %eax,RELOC(cpuid_level)
810 810
811 /* 811 /*
812 * Use a temp page. We'll re- add it to uvm(9) once we're 812 * Use a temp page. We'll re- add it to uvm(9) once we're
813 * done using it. 813 * done using it.
814 */ 814 */
815 movl $RELOC(tmpgdt), %eax 815 movl $RELOC(tmpgdt), %eax
816 pushl %eax # start of temporary gdt 816 pushl %eax # start of temporary gdt
817 call _C_LABEL(initgdt) 817 call _C_LABEL(initgdt)
818 addl $4,%esp 818 addl $4,%esp
819 819
820 call xen_pmap_bootstrap 820 call xen_pmap_bootstrap
821 821
822 /* 822 /*
823 * First avail returned by xen_pmap_bootstrap in %eax 823 * First avail returned by xen_pmap_bootstrap in %eax
824 */ 824 */
825 movl %eax, %esi; 825 movl %eax, %esi;
826 movl %esi, _C_LABEL(lwp0uarea) 826 movl %esi, _C_LABEL(lwp0uarea)
827 827
828 /* Set up bootstrap stack. */ 828 /* Set up bootstrap stack. */
829 leal (KSTACK_SIZE-FRAMESIZE)(%eax),%esp 829 leal (KSTACK_SIZE-FRAMESIZE)(%eax),%esp
830 xorl %ebp,%ebp # mark end of frames 830 xorl %ebp,%ebp # mark end of frames
831 831
832 addl $USPACE, %esi 832 addl $USPACE, %esi
833 subl $KERNBASE, %esi #init386 want a physical address 833 subl $KERNBASE, %esi #init386 want a physical address
834 834
835#ifdef PAE 835#ifdef PAE
836 pushl $0 # init386() expects a 64 bits paddr_t with PAE 836 pushl $0 # init386() expects a 64 bits paddr_t with PAE
837#endif 837#endif
838 pushl %esi 838 pushl %esi
839 call _C_LABEL(init386) # wire 386 chip for unix operation 839 call _C_LABEL(init386) # wire 386 chip for unix operation
840 addl $PDE_SIZE,%esp # pop paddr_t 840 addl $PDE_SIZE,%esp # pop paddr_t
841 call _C_LABEL(main) 841 call _C_LABEL(main)
842#endif /* XEN */ 842#endif /* XEN */
843END(start) 843END(start)
844 844
845#if defined(XEN) 845#if defined(XEN)
846/* space for the hypercall call page */ 846/* space for the hypercall call page */
847#define HYPERCALL_PAGE_OFFSET 0x1000 847#define HYPERCALL_PAGE_OFFSET 0x1000
848.org HYPERCALL_PAGE_OFFSET 848.org HYPERCALL_PAGE_OFFSET
849ENTRY(hypercall_page) 849ENTRY(hypercall_page)
850.skip 0x1000 850.skip 0x1000
851END(hypercall_page) 851END(hypercall_page)
852 852
853/* 853/*
854 * void lgdt_finish(void); 854 * void lgdt_finish(void);
855 * Finish load a new GDT pointer (do any necessary cleanup). 855 * Finish load a new GDT pointer (do any necessary cleanup).
856 * XXX It's somewhat questionable whether reloading all the segment registers 856 * XXX It's somewhat questionable whether reloading all the segment registers
857 * is necessary, since the actual descriptor data is not changed except by 857 * is necessary, since the actual descriptor data is not changed except by
858 * process creation and exit, both of which clean up via task switches. OTOH, 858 * process creation and exit, both of which clean up via task switches. OTOH,
859 * this only happens at run time when the GDT is resized. 859 * this only happens at run time when the GDT is resized.
860 */ 860 */
861/* LINTSTUB: Func: void lgdt_finish(void) */ 861/* LINTSTUB: Func: void lgdt_finish(void) */
862NENTRY(lgdt_finish) 862NENTRY(lgdt_finish)
863 movl $GSEL(GDATA_SEL, SEL_KPL),%eax 863 movl $GSEL(GDATA_SEL, SEL_KPL),%eax
864 movw %ax,%ds 864 movw %ax,%ds
865 movw %ax,%es 865 movw %ax,%es
866 movw %ax,%gs 866 movw %ax,%gs
867 movw %ax,%ss 867 movw %ax,%ss
868 movl $GSEL(GCPU_SEL, SEL_KPL),%eax 868 movl $GSEL(GCPU_SEL, SEL_KPL),%eax
869 movw %ax,%fs 869 movw %ax,%fs
870 /* Reload code selector by doing intersegment return. */ 870 /* Reload code selector by doing intersegment return. */
871 popl %eax 871 popl %eax
872 pushl $GSEL(GCODE_SEL, SEL_KPL) 872 pushl $GSEL(GCODE_SEL, SEL_KPL)
873 pushl %eax 873 pushl %eax
874 lret 874 lret
875END(lgdt_finish) 875END(lgdt_finish)
876 876
877#endif /* XEN */ 877#endif /* XEN */
878 878
879/* 879/*
880 * void lwp_trampoline(void); 880 * void lwp_trampoline(void);
881 * 881 *
882 * This is a trampoline function pushed onto the stack of a newly created 882 * This is a trampoline function pushed onto the stack of a newly created
883 * process in order to do some additional setup. The trampoline is entered by 883 * process in order to do some additional setup. The trampoline is entered by
884 * cpu_switch()ing to the process, so we abuse the callee-saved registers used 884 * cpu_switchto()ing to the process, so we abuse the callee-saved
885 * by cpu_switch() to store the information about the stub to call. 885 * registers used by cpu_switchto() to store the information about the
 886 * stub to call.
886 * NOTE: This function does not have a normal calling sequence! 887 * NOTE: This function does not have a normal calling sequence!
887 */ 888 */
888NENTRY(lwp_trampoline) 889NENTRY(lwp_trampoline)
889 movl %ebp,%edi /* for .Lsyscall_checkast */ 890 movl %ebp,%edi /* for .Lsyscall_checkast */
890 xorl %ebp,%ebp 891 xorl %ebp,%ebp
891 pushl %edi 892 pushl %edi
892 pushl %eax 893 pushl %eax
893 call _C_LABEL(lwp_startup) 894 call _C_LABEL(lwp_startup)
894 addl $8,%esp 895 addl $8,%esp
895 pushl %ebx 896 pushl %ebx
896 call *%esi 897 call *%esi
897 addl $4,%esp 898 addl $4,%esp
898 jmp .Lsyscall_checkast 899 jmp .Lsyscall_checkast
899 /* NOTREACHED */ 900 /* NOTREACHED */
900END(lwp_trampoline) 901END(lwp_trampoline)
901 902
902/* 903/*
903 * sigcode() 904 * sigcode()
904 * 905 *
905 * Signal trampoline; copied to top of user stack. Used only for 906 * Signal trampoline; copied to top of user stack. Used only for
906 * compatibility with old releases of NetBSD. 907 * compatibility with old releases of NetBSD.
907 */ 908 */
908NENTRY(sigcode) 909NENTRY(sigcode)
909 /* 910 /*
910 * Handler has returned here as if we called it. The sigcontext 911 * Handler has returned here as if we called it. The sigcontext
911 * is on the stack after the 3 args "we" pushed. 912 * is on the stack after the 3 args "we" pushed.
912 */ 913 */
913 leal 12(%esp),%eax # get pointer to sigcontext 914 leal 12(%esp),%eax # get pointer to sigcontext
914 movl %eax,4(%esp) # put it in the argument slot 915 movl %eax,4(%esp) # put it in the argument slot
915 # fake return address already there 916 # fake return address already there
916 movl $SYS_compat_16___sigreturn14,%eax 917 movl $SYS_compat_16___sigreturn14,%eax
917 int $0x80 # enter kernel with args on stack 918 int $0x80 # enter kernel with args on stack
918 movl $SYS_exit,%eax 919 movl $SYS_exit,%eax
919 int $0x80 # exit if sigreturn fails 920 int $0x80 # exit if sigreturn fails
920 .globl _C_LABEL(esigcode) 921 .globl _C_LABEL(esigcode)
921_C_LABEL(esigcode): 922_C_LABEL(esigcode):
922END(sigcode) 923END(sigcode)
923 924
924/* 925/*
925 * int setjmp(label_t *) 926 * int setjmp(label_t *)
926 * 927 *
927 * Used primarily by DDB. 928 * Used primarily by DDB.
928 */ 929 */
929ENTRY(setjmp) 930ENTRY(setjmp)
930 movl 4(%esp),%eax 931 movl 4(%esp),%eax
931 movl %ebx,(%eax) # save ebx 932 movl %ebx,(%eax) # save ebx
932 movl %esp,4(%eax) # save esp 933 movl %esp,4(%eax) # save esp
933 movl %ebp,8(%eax) # save ebp 934 movl %ebp,8(%eax) # save ebp
934 movl %esi,12(%eax) # save esi 935 movl %esi,12(%eax) # save esi
935 movl %edi,16(%eax) # save edi 936 movl %edi,16(%eax) # save edi
936 movl (%esp),%edx # get rta 937 movl (%esp),%edx # get rta
937 movl %edx,20(%eax) # save eip 938 movl %edx,20(%eax) # save eip
938 xorl %eax,%eax # return 0 939 xorl %eax,%eax # return 0
939 ret 940 ret
940END(setjmp) 941END(setjmp)
941 942
942/* 943/*
943 * int longjmp(label_t *) 944 * int longjmp(label_t *)
944 * 945 *
945 * Used primarily by DDB. 946 * Used primarily by DDB.
946 */ 947 */
947ENTRY(longjmp) 948ENTRY(longjmp)
948 movl 4(%esp),%eax 949 movl 4(%esp),%eax
949 movl (%eax),%ebx # restore ebx 950 movl (%eax),%ebx # restore ebx
950 movl 4(%eax),%esp # restore esp 951 movl 4(%eax),%esp # restore esp
951 movl 8(%eax),%ebp # restore ebp 952 movl 8(%eax),%ebp # restore ebp
952 movl 12(%eax),%esi # restore esi 953 movl 12(%eax),%esi # restore esi
953 movl 16(%eax),%edi # restore edi 954 movl 16(%eax),%edi # restore edi
954 movl 20(%eax),%edx # get rta 955 movl 20(%eax),%edx # get rta
955 movl %edx,(%esp) # put in return frame 956 movl %edx,(%esp) # put in return frame
956 movl $1,%eax # return 1 957 movl $1,%eax # return 1
957 ret 958 ret
958END(longjmp) 959END(longjmp)
959 960
960/* 961/*
961 * void dumpsys(void) 962 * void dumpsys(void)
962 * 963 *
963 * Mimic cpu_switchto() for postmortem debugging. 964 * Mimic cpu_switchto() for postmortem debugging.
964 */ 965 */
965ENTRY(dumpsys) 966ENTRY(dumpsys)
966 pushl %ebx # set up fake switchframe 967 pushl %ebx # set up fake switchframe
967 pushl %esi # and save context 968 pushl %esi # and save context
968 pushl %edi  969 pushl %edi
969 movl %esp,_C_LABEL(dumppcb)+PCB_ESP 970 movl %esp,_C_LABEL(dumppcb)+PCB_ESP
970 movl %ebp,_C_LABEL(dumppcb)+PCB_EBP 971 movl %ebp,_C_LABEL(dumppcb)+PCB_EBP
971 call _C_LABEL(dodumpsys) # dump! 972 call _C_LABEL(dodumpsys) # dump!
972 addl $(3*4), %esp # unwind switchframe 973 addl $(3*4), %esp # unwind switchframe
973 ret 974 ret
974END(dumpsys) 975END(dumpsys)
975 976
976/* 977/*
977 * struct lwp *cpu_switchto(struct lwp *oldlwp, struct lwp *newlwp, 978 * struct lwp *cpu_switchto(struct lwp *oldlwp, struct lwp *newlwp,
978 * bool returning) 979 * bool returning)
979 * 980 *
980 * 1. if (oldlwp != NULL), save its context. 981 * 1. if (oldlwp != NULL), save its context.
981 * 2. then, restore context of newlwp. 982 * 2. then, restore context of newlwp.
982 * 983 *
983 * Note that the stack frame layout is known to "struct switchframe" in 984 * Note that the stack frame layout is known to "struct switchframe" in
984 * <machine/frame.h> and to the code in cpu_lwp_fork() which initializes 985 * <machine/frame.h> and to the code in cpu_lwp_fork() which initializes
985 * it for a new lwp. 986 * it for a new lwp.
986 */ 987 */
987ENTRY(cpu_switchto) 988ENTRY(cpu_switchto)
988 pushl %ebx 989 pushl %ebx
989 pushl %esi 990 pushl %esi
990 pushl %edi 991 pushl %edi
991 992
992#if defined(DIAGNOSTIC) && !defined(XEN) 993#if defined(DIAGNOSTIC) && !defined(XEN)
993 cmpl $IPL_SCHED,CPUVAR(ILEVEL) 994 cmpl $IPL_SCHED,CPUVAR(ILEVEL)
994 jbe 0f 995 jbe 0f
995 pushl CPUVAR(ILEVEL) 996 pushl CPUVAR(ILEVEL)
996 pushl $.Lstr 997 pushl $.Lstr
997 call _C_LABEL(panic) 998 call _C_LABEL(panic)
998 addl $8,%esp 999 addl $8,%esp
999.Lstr: .string "cpu_switchto: switching above IPL_SCHED (%d)\0" 1000.Lstr: .string "cpu_switchto: switching above IPL_SCHED (%d)\0"
10000: 10010:
1001#endif 1002#endif
1002 1003
1003 movl 16(%esp),%esi # oldlwp 1004 movl 16(%esp),%esi # oldlwp
1004 movl 20(%esp),%edi # newlwp 1005 movl 20(%esp),%edi # newlwp
1005 movl 24(%esp),%edx # returning 1006 movl 24(%esp),%edx # returning
1006 testl %esi,%esi 1007 testl %esi,%esi
1007 jz 1f 1008 jz 1f
1008 1009
1009 /* Save old context. */ 1010 /* Save old context. */
1010 movl L_PCB(%esi),%eax 1011 movl L_PCB(%esi),%eax
1011 movl %esp,PCB_ESP(%eax) 1012 movl %esp,PCB_ESP(%eax)
1012 movl %ebp,PCB_EBP(%eax) 1013 movl %ebp,PCB_EBP(%eax)
1013 1014
1014 /* Switch to newlwp's stack. */ 1015 /* Switch to newlwp's stack. */
10151: movl L_PCB(%edi),%ebx 10161: movl L_PCB(%edi),%ebx
1016 movl PCB_EBP(%ebx),%ebp 1017 movl PCB_EBP(%ebx),%ebp
1017 movl PCB_ESP(%ebx),%esp 1018 movl PCB_ESP(%ebx),%esp
1018 1019
1019 /* 1020 /*
1020 * Set curlwp. This must be globally visible in order to permit 1021 * Set curlwp. This must be globally visible in order to permit
1021 * non-interlocked mutex release. 1022 * non-interlocked mutex release.
1022 */ 1023 */
1023 movl %edi,%ecx 1024 movl %edi,%ecx
1024 xchgl %ecx,CPUVAR(CURLWP) 1025 xchgl %ecx,CPUVAR(CURLWP)
1025 1026
1026 /* Skip the rest if returning to a pinned LWP. */ 1027 /* Skip the rest if returning to a pinned LWP. */
1027 testl %edx,%edx 1028 testl %edx,%edx
1028 jnz 4f 1029 jnz 4f
1029 1030
1030#ifdef XEN 1031#ifdef XEN
1031 pushl %edi 1032 pushl %edi
1032 call _C_LABEL(i386_switch_context) 1033 call _C_LABEL(i386_switch_context)
1033 addl $4,%esp 1034 addl $4,%esp
1034#else /* !XEN */ 1035#else /* !XEN */
1035 /* Switch ring0 esp */ 1036 /* Switch ring0 esp */
1036 movl PCB_ESP0(%ebx),%eax 1037 movl PCB_ESP0(%ebx),%eax
1037 movl %eax,CPUVAR(ESP0) 1038 movl %eax,CPUVAR(ESP0)
1038#endif /* !XEN */ 1039#endif /* !XEN */
1039 1040
1040 /* Don't bother with the rest if switching to a system process. */ 1041 /* Don't bother with the rest if switching to a system process. */
1041 testl $LW_SYSTEM,L_FLAG(%edi) 1042 testl $LW_SYSTEM,L_FLAG(%edi)
1042 jnz 4f 1043 jnz 4f
1043 1044
1044#ifndef XEN 1045#ifndef XEN
1045 /* Restore thread-private %fs/%gs descriptors. */ 1046 /* Restore thread-private %fs/%gs descriptors. */
1046 movl CPUVAR(GDT),%ecx 1047 movl CPUVAR(GDT),%ecx
1047 movl PCB_FSD(%ebx), %eax 1048 movl PCB_FSD(%ebx), %eax
1048 movl PCB_FSD+4(%ebx), %edx 1049 movl PCB_FSD+4(%ebx), %edx
1049 movl %eax, (GUFS_SEL*8)(%ecx) 1050 movl %eax, (GUFS_SEL*8)(%ecx)
1050 movl %edx, (GUFS_SEL*8+4)(%ecx) 1051 movl %edx, (GUFS_SEL*8+4)(%ecx)
1051 movl PCB_GSD(%ebx), %eax 1052 movl PCB_GSD(%ebx), %eax
1052 movl PCB_GSD+4(%ebx), %edx 1053 movl PCB_GSD+4(%ebx), %edx
1053 movl %eax, (GUGS_SEL*8)(%ecx) 1054 movl %eax, (GUGS_SEL*8)(%ecx)
1054 movl %edx, (GUGS_SEL*8+4)(%ecx) 1055 movl %edx, (GUGS_SEL*8+4)(%ecx)
1055#endif /* !XEN */ 1056#endif /* !XEN */
1056 1057
1057 /* Switch I/O bitmap */ 1058 /* Switch I/O bitmap */
1058 movl PCB_IOMAP(%ebx),%eax 1059 movl PCB_IOMAP(%ebx),%eax
1059 orl %eax,%eax 1060 orl %eax,%eax
1060 jnz .Lcopy_iobitmap 1061 jnz .Lcopy_iobitmap
1061 movl $(IOMAP_INVALOFF << 16),CPUVAR(IOBASE) 1062 movl $(IOMAP_INVALOFF << 16),CPUVAR(IOBASE)
1062.Liobitmap_done: 1063.Liobitmap_done:
1063 1064
1064 /* Is this process using RAS (restartable atomic sequences)? */ 1065 /* Is this process using RAS (restartable atomic sequences)? */
1065 movl L_PROC(%edi),%eax 1066 movl L_PROC(%edi),%eax
1066 cmpl $0,P_RASLIST(%eax) 1067 cmpl $0,P_RASLIST(%eax)
1067 jne 5f 1068 jne 5f
1068 1069
1069 /* 1070 /*
1070 * Restore cr0 (including FPU state). Raise the IPL to IPL_HIGH. 1071 * Restore cr0 (including FPU state). Raise the IPL to IPL_HIGH.
1071 * FPU IPIs can alter the LWP's saved cr0. Dropping the priority 1072 * FPU IPIs can alter the LWP's saved cr0. Dropping the priority
1072 * is deferred until mi_switch(), when cpu_switchto() returns. 1073 * is deferred until mi_switch(), when cpu_switchto() returns.
1073 */ 1074 */
10742: 10752:
1075#ifdef XEN 1076#ifdef XEN
1076 pushl %edi 1077 pushl %edi
1077 call _C_LABEL(i386_tls_switch) 1078 call _C_LABEL(i386_tls_switch)
1078 addl $4,%esp 1079 addl $4,%esp
1079#else /* !XEN */ 1080#else /* !XEN */
1080 movl $IPL_HIGH,CPUVAR(ILEVEL) 1081 movl $IPL_HIGH,CPUVAR(ILEVEL)
1081 movl PCB_CR0(%ebx),%ecx /* has CR0_TS clear */ 1082 movl PCB_CR0(%ebx),%ecx /* has CR0_TS clear */
1082 movl %cr0,%edx 1083 movl %cr0,%edx
1083 1084
1084 /* 1085 /*
1085 * If our floating point registers are on a different CPU, 1086 * If our floating point registers are on a different CPU,
1086 * set CR0_TS so we'll trap rather than reuse bogus state. 1087 * set CR0_TS so we'll trap rather than reuse bogus state.
1087 */ 1088 */
1088 cmpl CPUVAR(FPCURLWP),%edi 1089 cmpl CPUVAR(FPCURLWP),%edi
1089 je 3f 1090 je 3f
1090 orl $CR0_TS,%ecx 1091 orl $CR0_TS,%ecx
1091 1092
1092 /* Reloading CR0 is very expensive - avoid if possible. */ 1093 /* Reloading CR0 is very expensive - avoid if possible. */
10933: cmpl %edx,%ecx 10943: cmpl %edx,%ecx
1094 je 4f 1095 je 4f
1095 movl %ecx,%cr0 1096 movl %ecx,%cr0
1096#endif /* !XEN */ 1097#endif /* !XEN */
1097 1098
1098 /* Return to the new LWP, returning 'oldlwp' in %eax. */ 1099 /* Return to the new LWP, returning 'oldlwp' in %eax. */
10994: movl %esi,%eax 11004: movl %esi,%eax
1100 popl %edi 1101 popl %edi
1101 popl %esi 1102 popl %esi
1102 popl %ebx 1103 popl %ebx
1103 ret 1104 ret
1104 1105
1105 /* Check for restartable atomic sequences (RAS). */ 1106 /* Check for restartable atomic sequences (RAS). */
11065: movl L_MD_REGS(%edi),%ecx 11075: movl L_MD_REGS(%edi),%ecx
1107 pushl TF_EIP(%ecx) 1108 pushl TF_EIP(%ecx)
1108 pushl %eax 1109 pushl %eax
1109 call _C_LABEL(ras_lookup) 1110 call _C_LABEL(ras_lookup)
1110 addl $8,%esp 1111 addl $8,%esp
1111 cmpl $-1,%eax 1112 cmpl $-1,%eax
1112 je 2b 1113 je 2b
1113 movl L_MD_REGS(%edi),%ecx 1114 movl L_MD_REGS(%edi),%ecx
1114 movl %eax,TF_EIP(%ecx) 1115 movl %eax,TF_EIP(%ecx)
1115 jmp 2b 1116 jmp 2b
1116 1117
1117.Lcopy_iobitmap: 1118.Lcopy_iobitmap:
1118 /* Copy I/O bitmap. */ 1119 /* Copy I/O bitmap. */
1119 incl _C_LABEL(pmap_iobmp_evcnt)+EV_COUNT 1120 incl _C_LABEL(pmap_iobmp_evcnt)+EV_COUNT
1120 movl $(IOMAPSIZE/4),%ecx 1121 movl $(IOMAPSIZE/4),%ecx
1121 pushl %esi 1122 pushl %esi
1122 pushl %edi 1123 pushl %edi
1123 movl %eax,%esi /* pcb_iomap */ 1124 movl %eax,%esi /* pcb_iomap */
1124 movl CPUVAR(SELF),%edi 1125 movl CPUVAR(SELF),%edi
1125 leal CPU_INFO_IOMAP(%edi),%edi 1126 leal CPU_INFO_IOMAP(%edi),%edi
1126 rep 1127 rep
1127 movsl 1128 movsl
1128 popl %edi 1129 popl %edi
1129 popl %esi 1130 popl %esi
1130 movl $((CPU_INFO_IOMAP - CPU_INFO_TSS) << 16),CPUVAR(IOBASE) 1131 movl $((CPU_INFO_IOMAP - CPU_INFO_TSS) << 16),CPUVAR(IOBASE)
1131 jmp .Liobitmap_done 1132 jmp .Liobitmap_done
1132END(cpu_switchto) 1133END(cpu_switchto)
1133 1134
1134/* 1135/*
1135 * void savectx(struct pcb *pcb); 1136 * void savectx(struct pcb *pcb);
1136 * 1137 *
1137 * Update pcb, saving current processor state. 1138 * Update pcb, saving current processor state.
1138 */ 1139 */
1139ENTRY(savectx) 1140ENTRY(savectx)
1140 movl 4(%esp),%edx # edx = pcb 1141 movl 4(%esp),%edx # edx = pcb
1141 movl %esp,PCB_ESP(%edx) 1142 movl %esp,PCB_ESP(%edx)
1142 movl %ebp,PCB_EBP(%edx) 1143 movl %ebp,PCB_EBP(%edx)
1143 ret 1144 ret
1144END(savectx) 1145END(savectx)
1145 1146
1146/* 1147/*
1147 * osyscall() 1148 * osyscall()
1148 * 1149 *
1149 * Old call gate entry for syscall 1150 * Old call gate entry for syscall
1150 */ 1151 */
1151IDTVEC(osyscall) 1152IDTVEC(osyscall)
1152#ifndef XEN 1153#ifndef XEN
1153 /* XXX we are in trouble! interrupts be off here. */ 1154 /* XXX we are in trouble! interrupts be off here. */
1154 cli # must be first instruction 1155 cli # must be first instruction
1155#endif 1156#endif
1156 pushfl # set eflags in trap frame 1157 pushfl # set eflags in trap frame
1157 popl 8(%esp) 1158 popl 8(%esp)
1158 orl $PSL_I,8(%esp) # re-enable ints on return to user 1159 orl $PSL_I,8(%esp) # re-enable ints on return to user
1159 pushl $7 # size of instruction for restart 1160 pushl $7 # size of instruction for restart
1160 jmp syscall1 1161 jmp syscall1
1161IDTVEC_END(osyscall) 1162IDTVEC_END(osyscall)
1162 1163
1163/* 1164/*
1164 * syscall() 1165 * syscall()
1165 * 1166 *
1166 * Trap gate entry for syscall 1167 * Trap gate entry for syscall
1167 */ 1168 */
1168IDTVEC(syscall) 1169IDTVEC(syscall)
1169 pushl $2 # size of instruction for restart 1170 pushl $2 # size of instruction for restart
1170syscall1: 1171syscall1:
1171 pushl $T_ASTFLT # trap # for doing ASTs 1172 pushl $T_ASTFLT # trap # for doing ASTs
1172 INTRENTRY 1173 INTRENTRY
1173 STI(%eax) 1174 STI(%eax)
1174#ifdef DIAGNOSTIC 1175#ifdef DIAGNOSTIC
1175 movl CPUVAR(ILEVEL),%ebx 1176 movl CPUVAR(ILEVEL),%ebx
1176 testl %ebx,%ebx 1177 testl %ebx,%ebx
1177 jz 1f 1178 jz 1f
1178 pushl $5f 1179 pushl $5f
1179 call _C_LABEL(panic) 1180 call _C_LABEL(panic)
1180 addl $4,%esp 1181 addl $4,%esp
1181#ifdef DDB 1182#ifdef DDB
1182 int $3 1183 int $3
1183#endif 1184#endif
11841: 11851:
1185#endif /* DIAGNOSTIC */ 1186#endif /* DIAGNOSTIC */
1186 addl $1,CPUVAR(NSYSCALL) # count it atomically 1187 addl $1,CPUVAR(NSYSCALL) # count it atomically
1187 adcl $0,CPUVAR(NSYSCALL)+4 # count it atomically 1188 adcl $0,CPUVAR(NSYSCALL)+4 # count it atomically
1188 movl CPUVAR(CURLWP),%edi 1189 movl CPUVAR(CURLWP),%edi
1189 movl L_PROC(%edi),%edx 1190 movl L_PROC(%edi),%edx
1190 movl %esp,L_MD_REGS(%edi) # save pointer to frame 1191 movl %esp,L_MD_REGS(%edi) # save pointer to frame
1191 pushl %esp 1192 pushl %esp
1192 call *P_MD_SYSCALL(%edx) # get pointer to syscall() function 1193 call *P_MD_SYSCALL(%edx) # get pointer to syscall() function
1193 addl $4,%esp 1194 addl $4,%esp
1194.Lsyscall_checkast: 1195.Lsyscall_checkast:
1195 /* Check for ASTs on exit to user mode. */ 1196 /* Check for ASTs on exit to user mode. */
1196 CLI(%eax) 1197 CLI(%eax)
1197 movl L_MD_ASTPENDING(%edi), %eax 1198 movl L_MD_ASTPENDING(%edi), %eax
1198 orl CPUVAR(WANT_PMAPLOAD), %eax 1199 orl CPUVAR(WANT_PMAPLOAD), %eax
1199 jnz 9f 1200 jnz 9f
1200#ifdef XEN 1201#ifdef XEN
1201 STIC(%eax) 1202 STIC(%eax)
1202 jz 14f 1203 jz 14f
1203 call _C_LABEL(stipending) 1204 call _C_LABEL(stipending)
1204 testl %eax,%eax 1205 testl %eax,%eax
1205 jz 14f 1206 jz 14f
1206 /* process pending interrupts */ 1207 /* process pending interrupts */
1207 CLI(%eax) 1208 CLI(%eax)
1208 movl CPUVAR(ILEVEL), %ebx 1209 movl CPUVAR(ILEVEL), %ebx
1209 movl $.Lsyscall_resume, %esi # address to resume loop at 1210 movl $.Lsyscall_resume, %esi # address to resume loop at
1210.Lsyscall_resume: 1211.Lsyscall_resume:
1211 movl %ebx,%eax # get cpl 1212 movl %ebx,%eax # get cpl
1212 movl CPUVAR(IUNMASK)(,%eax,4),%eax 1213 movl CPUVAR(IUNMASK)(,%eax,4),%eax
1213 andl CPUVAR(IPENDING),%eax # any non-masked bits left? 1214 andl CPUVAR(IPENDING),%eax # any non-masked bits left?
1214 jz 17f 1215 jz 17f
1215 bsrl %eax,%eax 1216 bsrl %eax,%eax
1216 btrl %eax,CPUVAR(IPENDING) 1217 btrl %eax,CPUVAR(IPENDING)
1217 movl CPUVAR(ISOURCES)(,%eax,4),%eax 1218 movl CPUVAR(ISOURCES)(,%eax,4),%eax
1218 jmp *IS_RESUME(%eax) 1219 jmp *IS_RESUME(%eax)
121917: movl %ebx, CPUVAR(ILEVEL) #restore cpl 122017: movl %ebx, CPUVAR(ILEVEL) #restore cpl
1220 jmp .Lsyscall_checkast 1221 jmp .Lsyscall_checkast
122114: 122214:
1222#endif /* XEN */ 1223#endif /* XEN */
1223#ifndef DIAGNOSTIC 1224#ifndef DIAGNOSTIC
1224 INTRFASTEXIT 1225 INTRFASTEXIT
1225#else /* DIAGNOSTIC */ 1226#else /* DIAGNOSTIC */
1226 cmpl $IPL_NONE,CPUVAR(ILEVEL) 1227 cmpl $IPL_NONE,CPUVAR(ILEVEL)
1227 jne 3f 1228 jne 3f
1228 INTRFASTEXIT 1229 INTRFASTEXIT
12293: STI(%eax) 12303: STI(%eax)
1230 pushl $4f 1231 pushl $4f
1231 call _C_LABEL(panic) 1232 call _C_LABEL(panic)
1232 addl $4,%esp 1233 addl $4,%esp
1233 pushl $IPL_NONE 1234 pushl $IPL_NONE
1234 call _C_LABEL(spllower) 1235 call _C_LABEL(spllower)
1235 addl $4,%esp 1236 addl $4,%esp
1236 jmp .Lsyscall_checkast 1237 jmp .Lsyscall_checkast
12374: .asciz "SPL NOT LOWERED ON SYSCALL EXIT\n" 12384: .asciz "SPL NOT LOWERED ON SYSCALL EXIT\n"
12385: .asciz "SPL NOT ZERO ON SYSCALL ENTRY\n" 12395: .asciz "SPL NOT ZERO ON SYSCALL ENTRY\n"
1239#endif /* DIAGNOSTIC */ 1240#endif /* DIAGNOSTIC */
12409: 12419:
1241 cmpl $0, CPUVAR(WANT_PMAPLOAD) 1242 cmpl $0, CPUVAR(WANT_PMAPLOAD)
1242 jz 10f 1243 jz 10f
1243 STI(%eax) 1244 STI(%eax)
1244 call _C_LABEL(pmap_load) 1245 call _C_LABEL(pmap_load)
1245 jmp .Lsyscall_checkast /* re-check ASTs */ 1246 jmp .Lsyscall_checkast /* re-check ASTs */
124610: 124710:
1247 /* Always returning to user mode here. */ 1248 /* Always returning to user mode here. */
1248 movl $0, L_MD_ASTPENDING(%edi) 1249 movl $0, L_MD_ASTPENDING(%edi)
1249 STI(%eax) 1250 STI(%eax)
1250 /* Pushed T_ASTFLT into tf_trapno on entry. */ 1251 /* Pushed T_ASTFLT into tf_trapno on entry. */
1251 pushl %esp 1252 pushl %esp
1252 call _C_LABEL(trap) 1253 call _C_LABEL(trap)
1253 addl $4,%esp 1254 addl $4,%esp
1254 jmp .Lsyscall_checkast /* re-check ASTs */ 1255 jmp .Lsyscall_checkast /* re-check ASTs */
1255IDTVEC_END(syscall) 1256IDTVEC_END(syscall)
1256 1257
1257IDTVEC(svr4_fasttrap) 1258IDTVEC(svr4_fasttrap)
1258 pushl $2 # size of instruction for restart 1259 pushl $2 # size of instruction for restart
1259 pushl $T_ASTFLT # trap # for doing ASTs 1260 pushl $T_ASTFLT # trap # for doing ASTs
1260 INTRENTRY 1261 INTRENTRY
1261 STI(%eax) 1262 STI(%eax)
1262 pushl $RW_READER 1263 pushl $RW_READER
1263 pushl $_C_LABEL(svr4_fasttrap_lock) 1264 pushl $_C_LABEL(svr4_fasttrap_lock)
1264 call _C_LABEL(rw_enter) 1265 call _C_LABEL(rw_enter)
1265 addl $8,%esp 1266 addl $8,%esp
1266 call *_C_LABEL(svr4_fasttrap_vec) 1267 call *_C_LABEL(svr4_fasttrap_vec)
1267 pushl $_C_LABEL(svr4_fasttrap_lock) 1268 pushl $_C_LABEL(svr4_fasttrap_lock)
1268 call _C_LABEL(rw_exit) 1269 call _C_LABEL(rw_exit)
1269 addl $4,%esp 1270 addl $4,%esp
12702: /* Check for ASTs on exit to user mode. */ 12712: /* Check for ASTs on exit to user mode. */
1271 cli 1272 cli
1272 CHECK_ASTPENDING(%eax)  1273 CHECK_ASTPENDING(%eax)
1273 je 1f 1274 je 1f
1274 /* Always returning to user mode here. */ 1275 /* Always returning to user mode here. */
1275 CLEAR_ASTPENDING(%eax) 1276 CLEAR_ASTPENDING(%eax)
1276 sti 1277 sti
1277 /* Pushed T_ASTFLT into tf_trapno on entry. */ 1278 /* Pushed T_ASTFLT into tf_trapno on entry. */
1278 pushl %esp 1279 pushl %esp
1279 call _C_LABEL(trap) 1280 call _C_LABEL(trap)
1280 addl $4,%esp 1281 addl $4,%esp
1281 jmp 2b 1282 jmp 2b
12821: CHECK_DEFERRED_SWITCH 12831: CHECK_DEFERRED_SWITCH
1283 jnz 9f 1284 jnz 9f
1284 INTRFASTEXIT 1285 INTRFASTEXIT
12859: sti 12869: sti
1286 call _C_LABEL(pmap_load) 1287 call _C_LABEL(pmap_load)
1287 cli 1288 cli
1288 jmp 2b 1289 jmp 2b
1289 1290
1290#if NNPX > 0 1291#if NNPX > 0
1291/* 1292/*
1292 * Special interrupt handlers. Someday intr0-intr15 will be used to count 1293 * Special interrupt handlers. Someday intr0-intr15 will be used to count
1293 * interrupts. We'll still need a special exception 16 handler. The busy 1294 * interrupts. We'll still need a special exception 16 handler. The busy
1294 * latch stuff in probintr() can be moved to npxprobe(). 1295 * latch stuff in probintr() can be moved to npxprobe().
1295 */ 1296 */
1296 1297
1297/* 1298/*
1298 * void probeintr(void) 1299 * void probeintr(void)
1299 */ 1300 */
1300NENTRY(probeintr) 1301NENTRY(probeintr)
1301 ss 1302 ss
1302 incl _C_LABEL(npx_intrs_while_probing) 1303 incl _C_LABEL(npx_intrs_while_probing)
1303 pushl %eax 1304 pushl %eax
1304 movb $0x20,%al # EOI (asm in strings loses cpp features) 1305 movb $0x20,%al # EOI (asm in strings loses cpp features)
1305 outb %al,$0xa0 # IO_ICU2 1306 outb %al,$0xa0 # IO_ICU2
1306 outb %al,$0x20 # IO_ICU1 1307 outb %al,$0x20 # IO_ICU1
1307 movb $0,%al 1308 movb $0,%al
1308 outb %al,$0xf0 # clear BUSY# latch 1309 outb %al,$0xf0 # clear BUSY# latch
1309 popl %eax 1310 popl %eax
1310 iret 1311 iret
1311END(probeintr) 1312END(probeintr)
1312 1313
1313/* 1314/*
1314 * void probetrap(void) 1315 * void probetrap(void)
1315 */ 1316 */
1316NENTRY(probetrap) 1317NENTRY(probetrap)
1317 ss 1318 ss
1318 incl _C_LABEL(npx_traps_while_probing) 1319 incl _C_LABEL(npx_traps_while_probing)
1319 fnclex 1320 fnclex
1320 iret 1321 iret
1321END(probetrap) 1322END(probetrap)
1322 1323
1323/* 1324/*
1324 * int npx586bug1(int a, int b) 1325 * int npx586bug1(int a, int b)
1325 */ 1326 */
1326NENTRY(npx586bug1) 1327NENTRY(npx586bug1)
1327 fildl 4(%esp) # x 1328 fildl 4(%esp) # x
1328 fildl 8(%esp) # y 1329 fildl 8(%esp) # y
1329 fld %st(1) 1330 fld %st(1)
1330 fdiv %st(1),%st # x/y 1331 fdiv %st(1),%st # x/y
1331 fmulp %st,%st(1) # (x/y)*y 1332 fmulp %st,%st(1) # (x/y)*y
1332 fsubrp %st,%st(1) # x-(x/y)*y 1333 fsubrp %st,%st(1) # x-(x/y)*y
1333 pushl $0 1334 pushl $0
1334 fistpl (%esp) 1335 fistpl (%esp)
1335 popl %eax 1336 popl %eax
1336 ret 1337 ret
1337END(npx586bug1) 1338END(npx586bug1)
1338#endif /* NNPX > 0 */ 1339#endif /* NNPX > 0 */
1339 1340
1340/* 1341/*
1341 * void sse2_idlezero_page(void *pg) 1342 * void sse2_idlezero_page(void *pg)
1342 * 1343 *
1343 * Zero a page without polluting the cache. Preemption must be 1344 * Zero a page without polluting the cache. Preemption must be
1344 * disabled by the caller. Abort if a preemption is pending. 1345 * disabled by the caller. Abort if a preemption is pending.
1345 */ 1346 */
1346ENTRY(sse2_idlezero_page) 1347ENTRY(sse2_idlezero_page)
1347 pushl %ebp 1348 pushl %ebp
1348 movl %esp,%ebp 1349 movl %esp,%ebp
1349 movl 8(%esp), %edx 1350 movl 8(%esp), %edx
1350 movl $(PAGE_SIZE/32), %ecx 1351 movl $(PAGE_SIZE/32), %ecx
1351 xorl %eax, %eax 1352 xorl %eax, %eax
1352 .align 16 1353 .align 16
13531: 13541:
1354 testl $RESCHED_KPREEMPT, CPUVAR(RESCHED) 1355 testl $RESCHED_KPREEMPT, CPUVAR(RESCHED)
1355 jnz 2f 1356 jnz 2f
1356 movnti %eax, 0(%edx) 1357 movnti %eax, 0(%edx)
1357 movnti %eax, 4(%edx) 1358 movnti %eax, 4(%edx)
1358 movnti %eax, 8(%edx) 1359 movnti %eax, 8(%edx)
1359 movnti %eax, 12(%edx) 1360 movnti %eax, 12(%edx)
1360 movnti %eax, 16(%edx) 1361 movnti %eax, 16(%edx)
1361 movnti %eax, 20(%edx) 1362 movnti %eax, 20(%edx)
1362 movnti %eax, 24(%edx) 1363 movnti %eax, 24(%edx)
1363 movnti %eax, 28(%edx) 1364 movnti %eax, 28(%edx)
1364 addl $32, %edx 1365 addl $32, %edx
1365 decl %ecx 1366 decl %ecx
1366 jnz 1b 1367 jnz 1b
1367 sfence 1368 sfence
1368 incl %eax 1369 incl %eax
1369 pop %ebp 1370 pop %ebp
1370 ret 1371 ret
13712: 13722:
1372 sfence 1373 sfence
1373 popl %ebp 1374 popl %ebp
1374 ret 1375 ret
1375END(sse2_idlezero_page) 1376END(sse2_idlezero_page)