Tue Jun 7 14:53:03 2011 UTC ()
Don't call psignal() without holding proc_lock. This is the cause of
the reboot of PR port-xen/45028
Now that Xen2 is gone, handle FPU context switches the same way as
amd64. This makes all tests in /usr/tests/lib/libc/ieeefp pass.


(bouyer)
diff -r1.702 -r1.703 src/sys/arch/i386/i386/machdep.c
diff -r1.139 -r1.140 src/sys/arch/i386/isa/npx.c
diff -r1.11 -r1.12 src/sys/arch/xen/include/i386/hypercalls.h

cvs diff -r1.702 -r1.703 src/sys/arch/i386/i386/machdep.c (switch to unified diff)

--- src/sys/arch/i386/i386/machdep.c 2011/04/26 15:51:23 1.702
+++ src/sys/arch/i386/i386/machdep.c 2011/06/07 14:53:03 1.703
@@ -1,1554 +1,1554 @@ @@ -1,1554 +1,1554 @@
1/* $NetBSD: machdep.c,v 1.702 2011/04/26 15:51:23 joerg Exp $ */ 1/* $NetBSD: machdep.c,v 1.703 2011/06/07 14:53:03 bouyer Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1996, 1997, 1998, 2000, 2004, 2006, 2008, 2009 4 * Copyright (c) 1996, 1997, 1998, 2000, 2004, 2006, 2008, 2009
5 * The NetBSD Foundation, Inc. 5 * The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Charles M. Hannum, by Jason R. Thorpe of the Numerical Aerospace 9 * by Charles M. Hannum, by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility NASA Ames Research Center, by Julio M. Merino Vidal, 10 * Simulation Facility NASA Ames Research Center, by Julio M. Merino Vidal,
11 * and by Andrew Doran. 11 * and by Andrew Doran.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions 14 * modification, are permitted provided that the following conditions
15 * are met: 15 * are met:
16 * 1. Redistributions of source code must retain the above copyright 16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer. 17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright 18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the 19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution. 20 * documentation and/or other materials provided with the distribution.
21 * 21 *
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE. 32 * POSSIBILITY OF SUCH DAMAGE.
33 */ 33 */
34 34
35/*- 35/*-
36 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 36 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
37 * All rights reserved. 37 * All rights reserved.
38 * 38 *
39 * This code is derived from software contributed to Berkeley by 39 * This code is derived from software contributed to Berkeley by
40 * William Jolitz. 40 * William Jolitz.
41 * 41 *
42 * Redistribution and use in source and binary forms, with or without 42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions 43 * modification, are permitted provided that the following conditions
44 * are met: 44 * are met:
45 * 1. Redistributions of source code must retain the above copyright 45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer. 46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright 47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the 48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution. 49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors 50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software 51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission. 52 * without specific prior written permission.
53 * 53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE. 64 * SUCH DAMAGE.
65 * 65 *
66 * @(#)machdep.c 7.4 (Berkeley) 6/3/91 66 * @(#)machdep.c 7.4 (Berkeley) 6/3/91
67 */ 67 */
68 68
69#include <sys/cdefs.h> 69#include <sys/cdefs.h>
70__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.702 2011/04/26 15:51:23 joerg Exp $"); 70__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.703 2011/06/07 14:53:03 bouyer Exp $");
71 71
72#include "opt_beep.h" 72#include "opt_beep.h"
73#include "opt_compat_ibcs2.h" 73#include "opt_compat_ibcs2.h"
74#include "opt_compat_netbsd.h" 74#include "opt_compat_netbsd.h"
75#include "opt_compat_svr4.h" 75#include "opt_compat_svr4.h"
76#include "opt_cpureset_delay.h" 76#include "opt_cpureset_delay.h"
77#include "opt_ddb.h" 77#include "opt_ddb.h"
78#include "opt_ipkdb.h" 78#include "opt_ipkdb.h"
79#include "opt_kgdb.h" 79#include "opt_kgdb.h"
80#include "opt_mtrr.h" 80#include "opt_mtrr.h"
81#include "opt_modular.h" 81#include "opt_modular.h"
82#include "opt_multiboot.h" 82#include "opt_multiboot.h"
83#include "opt_multiprocessor.h" 83#include "opt_multiprocessor.h"
84#include "opt_physmem.h" 84#include "opt_physmem.h"
85#include "opt_realmem.h" 85#include "opt_realmem.h"
86#include "opt_user_ldt.h" 86#include "opt_user_ldt.h"
87#include "opt_vm86.h" 87#include "opt_vm86.h"
88#include "opt_xbox.h" 88#include "opt_xbox.h"
89#include "opt_xen.h" 89#include "opt_xen.h"
90#include "isa.h" 90#include "isa.h"
91#include "pci.h" 91#include "pci.h"
92 92
93 93
94#include <sys/param.h> 94#include <sys/param.h>
95#include <sys/systm.h> 95#include <sys/systm.h>
96#include <sys/signal.h> 96#include <sys/signal.h>
97#include <sys/signalvar.h> 97#include <sys/signalvar.h>
98#include <sys/kernel.h> 98#include <sys/kernel.h>
99#include <sys/cpu.h> 99#include <sys/cpu.h>
100#include <sys/exec.h> 100#include <sys/exec.h>
101#include <sys/reboot.h> 101#include <sys/reboot.h>
102#include <sys/conf.h> 102#include <sys/conf.h>
103#include <sys/malloc.h> 103#include <sys/malloc.h>
104#include <sys/mbuf.h> 104#include <sys/mbuf.h>
105#include <sys/msgbuf.h> 105#include <sys/msgbuf.h>
106#include <sys/mount.h> 106#include <sys/mount.h>
107#include <sys/syscallargs.h> 107#include <sys/syscallargs.h>
108#include <sys/core.h> 108#include <sys/core.h>
109#include <sys/kcore.h> 109#include <sys/kcore.h>
110#include <sys/ucontext.h> 110#include <sys/ucontext.h>
111#include <sys/ras.h> 111#include <sys/ras.h>
112#include <sys/sa.h> 112#include <sys/sa.h>
113#include <sys/savar.h> 113#include <sys/savar.h>
114#include <sys/ksyms.h> 114#include <sys/ksyms.h>
115#include <sys/device.h> 115#include <sys/device.h>
116 116
117#ifdef IPKDB 117#ifdef IPKDB
118#include <ipkdb/ipkdb.h> 118#include <ipkdb/ipkdb.h>
119#endif 119#endif
120 120
121#ifdef KGDB 121#ifdef KGDB
122#include <sys/kgdb.h> 122#include <sys/kgdb.h>
123#endif 123#endif
124 124
125#include <dev/cons.h> 125#include <dev/cons.h>
126 126
127#include <uvm/uvm.h> 127#include <uvm/uvm.h>
128#include <uvm/uvm_page.h> 128#include <uvm/uvm_page.h>
129 129
130#include <sys/sysctl.h> 130#include <sys/sysctl.h>
131 131
132#include <machine/cpu.h> 132#include <machine/cpu.h>
133#include <machine/cpufunc.h> 133#include <machine/cpufunc.h>
134#include <machine/cpuvar.h> 134#include <machine/cpuvar.h>
135#include <machine/gdt.h> 135#include <machine/gdt.h>
136#include <machine/intr.h> 136#include <machine/intr.h>
137#include <machine/kcore.h> 137#include <machine/kcore.h>
138#include <machine/pio.h> 138#include <machine/pio.h>
139#include <machine/psl.h> 139#include <machine/psl.h>
140#include <machine/reg.h> 140#include <machine/reg.h>
141#include <machine/specialreg.h> 141#include <machine/specialreg.h>
142#include <machine/bootinfo.h> 142#include <machine/bootinfo.h>
143#include <machine/mtrr.h> 143#include <machine/mtrr.h>
144#include <x86/x86/tsc.h> 144#include <x86/x86/tsc.h>
145 145
146#include <x86/machdep.h> 146#include <x86/machdep.h>
147 147
148#include <machine/multiboot.h> 148#include <machine/multiboot.h>
149#ifdef XEN 149#ifdef XEN
150#include <xen/evtchn.h> 150#include <xen/evtchn.h>
151#include <xen/xen.h> 151#include <xen/xen.h>
152#include <xen/hypervisor.h> 152#include <xen/hypervisor.h>
153 153
154/* #define XENDEBUG */ 154/* #define XENDEBUG */
155/* #define XENDEBUG_LOW */ 155/* #define XENDEBUG_LOW */
156 156
157#ifdef XENDEBUG 157#ifdef XENDEBUG
158#define XENPRINTF(x) printf x 158#define XENPRINTF(x) printf x
159#define XENPRINTK(x) printk x 159#define XENPRINTK(x) printk x
160#else 160#else
161#define XENPRINTF(x) 161#define XENPRINTF(x)
162#define XENPRINTK(x) 162#define XENPRINTK(x)
163#endif 163#endif
164#define PRINTK(x) printf x 164#define PRINTK(x) printf x
165#endif /* XEN */ 165#endif /* XEN */
166 166
167#include <dev/isa/isareg.h> 167#include <dev/isa/isareg.h>
168#include <machine/isa_machdep.h> 168#include <machine/isa_machdep.h>
169#include <dev/ic/i8042reg.h> 169#include <dev/ic/i8042reg.h>
170 170
171#ifdef DDB 171#ifdef DDB
172#include <machine/db_machdep.h> 172#include <machine/db_machdep.h>
173#include <ddb/db_extern.h> 173#include <ddb/db_extern.h>
174#endif 174#endif
175 175
176#ifdef VM86 176#ifdef VM86
177#include <machine/vm86.h> 177#include <machine/vm86.h>
178#endif 178#endif
179 179
180#ifdef XBOX 180#ifdef XBOX
181#include <machine/xbox.h> 181#include <machine/xbox.h>
182 182
183int arch_i386_is_xbox = 0; 183int arch_i386_is_xbox = 0;
184uint32_t arch_i386_xbox_memsize = 0; 184uint32_t arch_i386_xbox_memsize = 0;
185#endif 185#endif
186 186
187#include "acpica.h" 187#include "acpica.h"
188#include "apmbios.h" 188#include "apmbios.h"
189#include "bioscall.h" 189#include "bioscall.h"
190 190
191#if NBIOSCALL > 0 191#if NBIOSCALL > 0
192#include <machine/bioscall.h> 192#include <machine/bioscall.h>
193#endif 193#endif
194 194
195#if NACPICA > 0 195#if NACPICA > 0
196#include <dev/acpi/acpivar.h> 196#include <dev/acpi/acpivar.h>
197#define ACPI_MACHDEP_PRIVATE 197#define ACPI_MACHDEP_PRIVATE
198#include <machine/acpi_machdep.h> 198#include <machine/acpi_machdep.h>
199#endif 199#endif
200 200
201#if NAPMBIOS > 0 201#if NAPMBIOS > 0
202#include <machine/apmvar.h> 202#include <machine/apmvar.h>
203#endif 203#endif
204 204
205#include "isa.h" 205#include "isa.h"
206#include "isadma.h" 206#include "isadma.h"
207#include "npx.h" 207#include "npx.h"
208#include "ksyms.h" 208#include "ksyms.h"
209 209
210#include "cardbus.h" 210#include "cardbus.h"
211#if NCARDBUS > 0 211#if NCARDBUS > 0
212/* For rbus_min_start hint. */ 212/* For rbus_min_start hint. */
213#include <machine/bus.h> 213#include <machine/bus.h>
214#include <dev/cardbus/rbus.h> 214#include <dev/cardbus/rbus.h>
215#include <machine/rbus_machdep.h> 215#include <machine/rbus_machdep.h>
216#endif 216#endif
217 217
218#include "mca.h" 218#include "mca.h"
219#if NMCA > 0 219#if NMCA > 0
220#include <machine/mca_machdep.h> /* for mca_busprobe() */ 220#include <machine/mca_machdep.h> /* for mca_busprobe() */
221#endif 221#endif
222 222
223#ifdef MULTIPROCESSOR /* XXX */ 223#ifdef MULTIPROCESSOR /* XXX */
224#include <machine/mpbiosvar.h> /* XXX */ 224#include <machine/mpbiosvar.h> /* XXX */
225#endif /* XXX */ 225#endif /* XXX */
226 226
227/* the following is used externally (sysctl_hw) */ 227/* the following is used externally (sysctl_hw) */
228char machine[] = "i386"; /* CPU "architecture" */ 228char machine[] = "i386"; /* CPU "architecture" */
229char machine_arch[] = "i386"; /* machine == machine_arch */ 229char machine_arch[] = "i386"; /* machine == machine_arch */
230 230
231extern struct bi_devmatch *x86_alldisks; 231extern struct bi_devmatch *x86_alldisks;
232extern int x86_ndisks; 232extern int x86_ndisks;
233 233
234#ifdef CPURESET_DELAY 234#ifdef CPURESET_DELAY
235int cpureset_delay = CPURESET_DELAY; 235int cpureset_delay = CPURESET_DELAY;
236#else 236#else
237int cpureset_delay = 2000; /* default to 2s */ 237int cpureset_delay = 2000; /* default to 2s */
238#endif 238#endif
239 239
240#ifdef MTRR 240#ifdef MTRR
241struct mtrr_funcs *mtrr_funcs; 241struct mtrr_funcs *mtrr_funcs;
242#endif 242#endif
243 243
244int physmem; 244int physmem;
245 245
246int cpu_class; 246int cpu_class;
247int i386_fpu_present; 247int i386_fpu_present;
248int i386_fpu_exception; 248int i386_fpu_exception;
249int i386_fpu_fdivbug; 249int i386_fpu_fdivbug;
250 250
251int i386_use_fxsave; 251int i386_use_fxsave;
252int i386_use_pae = 0; 252int i386_use_pae = 0;
253int i386_has_sse; 253int i386_has_sse;
254int i386_has_sse2; 254int i386_has_sse2;
255 255
256vaddr_t msgbuf_vaddr; 256vaddr_t msgbuf_vaddr;
257struct { 257struct {
258 paddr_t paddr; 258 paddr_t paddr;
259 psize_t sz; 259 psize_t sz;
260} msgbuf_p_seg[VM_PHYSSEG_MAX]; 260} msgbuf_p_seg[VM_PHYSSEG_MAX];
261unsigned int msgbuf_p_cnt = 0; 261unsigned int msgbuf_p_cnt = 0;
262 262
263vaddr_t idt_vaddr; 263vaddr_t idt_vaddr;
264paddr_t idt_paddr; 264paddr_t idt_paddr;
265vaddr_t pentium_idt_vaddr; 265vaddr_t pentium_idt_vaddr;
266 266
267struct vm_map *phys_map = NULL; 267struct vm_map *phys_map = NULL;
268 268
269extern paddr_t avail_start, avail_end; 269extern paddr_t avail_start, avail_end;
270#ifdef XEN 270#ifdef XEN
271extern paddr_t pmap_pa_start, pmap_pa_end; 271extern paddr_t pmap_pa_start, pmap_pa_end;
272void hypervisor_callback(void); 272void hypervisor_callback(void);
273void failsafe_callback(void); 273void failsafe_callback(void);
274#endif 274#endif
275 275
276#ifdef XEN 276#ifdef XEN
277void (*delay_func)(unsigned int) = xen_delay; 277void (*delay_func)(unsigned int) = xen_delay;
278void (*initclock_func)(void) = xen_initclocks; 278void (*initclock_func)(void) = xen_initclocks;
279#else 279#else
280void (*delay_func)(unsigned int) = i8254_delay; 280void (*delay_func)(unsigned int) = i8254_delay;
281void (*initclock_func)(void) = i8254_initclocks; 281void (*initclock_func)(void) = i8254_initclocks;
282#endif 282#endif
283 283
284 284
285/* 285/*
286 * Size of memory segments, before any memory is stolen. 286 * Size of memory segments, before any memory is stolen.
287 */ 287 */
288phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX]; 288phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX];
289int mem_cluster_cnt = 0; 289int mem_cluster_cnt = 0;
290 290
291void init386(paddr_t); 291void init386(paddr_t);
292void initgdt(union descriptor *); 292void initgdt(union descriptor *);
293 293
294extern int time_adjusted; 294extern int time_adjusted;
295 295
296int *esym; 296int *esym;
297int *eblob; 297int *eblob;
298extern int boothowto; 298extern int boothowto;
299 299
300#ifndef XEN 300#ifndef XEN
301 301
302/* Base memory reported by BIOS. */ 302/* Base memory reported by BIOS. */
303#ifndef REALBASEMEM 303#ifndef REALBASEMEM
304int biosbasemem = 0; 304int biosbasemem = 0;
305#else 305#else
306int biosbasemem = REALBASEMEM; 306int biosbasemem = REALBASEMEM;
307#endif 307#endif
308 308
309/* Extended memory reported by BIOS. */ 309/* Extended memory reported by BIOS. */
310#ifndef REALEXTMEM 310#ifndef REALEXTMEM
311int biosextmem = 0; 311int biosextmem = 0;
312#else 312#else
313int biosextmem = REALEXTMEM; 313int biosextmem = REALEXTMEM;
314#endif 314#endif
315 315
316/* Set if any boot-loader set biosbasemem/biosextmem. */ 316/* Set if any boot-loader set biosbasemem/biosextmem. */
317int biosmem_implicit; 317int biosmem_implicit;
318 318
319/* Representation of the bootinfo structure constructed by a NetBSD native 319/* Representation of the bootinfo structure constructed by a NetBSD native
320 * boot loader. Only be used by native_loader(). */ 320 * boot loader. Only be used by native_loader(). */
321struct bootinfo_source { 321struct bootinfo_source {
322 uint32_t bs_naddrs; 322 uint32_t bs_naddrs;
323 void *bs_addrs[1]; /* Actually longer. */ 323 void *bs_addrs[1]; /* Actually longer. */
324}; 324};
325 325
326/* Only called by locore.h; no need to be in a header file. */ 326/* Only called by locore.h; no need to be in a header file. */
327void native_loader(int, int, struct bootinfo_source *, paddr_t, int, int); 327void native_loader(int, int, struct bootinfo_source *, paddr_t, int, int);
328 328
329/* 329/*
330 * Called as one of the very first things during system startup (just after 330 * Called as one of the very first things during system startup (just after
331 * the boot loader gave control to the kernel image), this routine is in 331 * the boot loader gave control to the kernel image), this routine is in
332 * charge of retrieving the parameters passed in by the boot loader and 332 * charge of retrieving the parameters passed in by the boot loader and
333 * storing them in the appropriate kernel variables. 333 * storing them in the appropriate kernel variables.
334 * 334 *
335 * WARNING: Because the kernel has not yet relocated itself to KERNBASE, 335 * WARNING: Because the kernel has not yet relocated itself to KERNBASE,
336 * special care has to be taken when accessing memory because absolute 336 * special care has to be taken when accessing memory because absolute
337 * addresses (referring to kernel symbols) do not work. So: 337 * addresses (referring to kernel symbols) do not work. So:
338 * 338 *
339 * 1) Avoid jumps to absolute addresses (such as gotos and switches). 339 * 1) Avoid jumps to absolute addresses (such as gotos and switches).
340 * 2) To access global variables use their physical address, which 340 * 2) To access global variables use their physical address, which
341 * can be obtained using the RELOC macro. 341 * can be obtained using the RELOC macro.
342 */ 342 */
343void 343void
344native_loader(int bl_boothowto, int bl_bootdev, 344native_loader(int bl_boothowto, int bl_bootdev,
345 struct bootinfo_source *bl_bootinfo, paddr_t bl_esym, 345 struct bootinfo_source *bl_bootinfo, paddr_t bl_esym,
346 int bl_biosextmem, int bl_biosbasemem) 346 int bl_biosextmem, int bl_biosbasemem)
347{ 347{
348#define RELOC(type, x) ((type)((vaddr_t)(x) - KERNBASE)) 348#define RELOC(type, x) ((type)((vaddr_t)(x) - KERNBASE))
349 349
350 *RELOC(int *, &boothowto) = bl_boothowto; 350 *RELOC(int *, &boothowto) = bl_boothowto;
351 351
352#ifdef COMPAT_OLDBOOT 352#ifdef COMPAT_OLDBOOT
353 /* 353 /*
354 * Pre-1.3 boot loaders gave the boot device as a parameter 354 * Pre-1.3 boot loaders gave the boot device as a parameter
355 * (instead of a bootinfo entry). 355 * (instead of a bootinfo entry).
356 */ 356 */
357 *RELOC(int *, &bootdev) = bl_bootdev; 357 *RELOC(int *, &bootdev) = bl_bootdev;
358#endif 358#endif
359 359
360 /* 360 /*
361 * The boot loader provides a physical, non-relocated address 361 * The boot loader provides a physical, non-relocated address
362 * for the symbols table's end. We need to convert it to a 362 * for the symbols table's end. We need to convert it to a
363 * virtual address. 363 * virtual address.
364 */ 364 */
365 if (bl_esym != 0) 365 if (bl_esym != 0)
366 *RELOC(int **, &esym) = (int *)((vaddr_t)bl_esym + KERNBASE); 366 *RELOC(int **, &esym) = (int *)((vaddr_t)bl_esym + KERNBASE);
367 else 367 else
368 *RELOC(int **, &esym) = 0; 368 *RELOC(int **, &esym) = 0;
369 369
370 /* 370 /*
371 * Copy bootinfo entries (if any) from the boot loader's 371 * Copy bootinfo entries (if any) from the boot loader's
372 * representation to the kernel's bootinfo space. 372 * representation to the kernel's bootinfo space.
373 */ 373 */
374 if (bl_bootinfo != NULL) { 374 if (bl_bootinfo != NULL) {
375 size_t i; 375 size_t i;
376 uint8_t *data; 376 uint8_t *data;
377 struct bootinfo *bidest; 377 struct bootinfo *bidest;
378 struct btinfo_modulelist *bi; 378 struct btinfo_modulelist *bi;
379 379
380 bidest = RELOC(struct bootinfo *, &bootinfo); 380 bidest = RELOC(struct bootinfo *, &bootinfo);
381 381
382 data = &bidest->bi_data[0]; 382 data = &bidest->bi_data[0];
383 383
384 for (i = 0; i < bl_bootinfo->bs_naddrs; i++) { 384 for (i = 0; i < bl_bootinfo->bs_naddrs; i++) {
385 struct btinfo_common *bc; 385 struct btinfo_common *bc;
386 386
387 bc = bl_bootinfo->bs_addrs[i]; 387 bc = bl_bootinfo->bs_addrs[i];
388 388
389 if ((data + bc->len) > 389 if ((data + bc->len) >
390 (&bidest->bi_data[0] + BOOTINFO_MAXSIZE)) 390 (&bidest->bi_data[0] + BOOTINFO_MAXSIZE))
391 break; 391 break;
392 392
393 memcpy(data, bc, bc->len); 393 memcpy(data, bc, bc->len);
394 /* 394 /*
395 * If any modules were loaded, record where they 395 * If any modules were loaded, record where they
396 * end. We'll need to skip over them. 396 * end. We'll need to skip over them.
397 */ 397 */
398 bi = (struct btinfo_modulelist *)data; 398 bi = (struct btinfo_modulelist *)data;
399 if (bi->common.type == BTINFO_MODULELIST) { 399 if (bi->common.type == BTINFO_MODULELIST) {
400 *RELOC(int **, &eblob) = 400 *RELOC(int **, &eblob) =
401 (int *)(bi->endpa + KERNBASE); 401 (int *)(bi->endpa + KERNBASE);
402 } 402 }
403 data += bc->len; 403 data += bc->len;
404 } 404 }
405 bidest->bi_nentries = i; 405 bidest->bi_nentries = i;
406 } 406 }
407 407
408 /* 408 /*
409 * Configure biosbasemem and biosextmem only if they were not 409 * Configure biosbasemem and biosextmem only if they were not
410 * explicitly given during the kernel's build. 410 * explicitly given during the kernel's build.
411 */ 411 */
412 if (*RELOC(int *, &biosbasemem) == 0) { 412 if (*RELOC(int *, &biosbasemem) == 0) {
413 *RELOC(int *, &biosbasemem) = bl_biosbasemem; 413 *RELOC(int *, &biosbasemem) = bl_biosbasemem;
414 *RELOC(int *, &biosmem_implicit) = 1; 414 *RELOC(int *, &biosmem_implicit) = 1;
415 } 415 }
416 if (*RELOC(int *, &biosextmem) == 0) { 416 if (*RELOC(int *, &biosextmem) == 0) {
417 *RELOC(int *, &biosextmem) = bl_biosextmem; 417 *RELOC(int *, &biosextmem) = bl_biosextmem;
418 *RELOC(int *, &biosmem_implicit) = 1; 418 *RELOC(int *, &biosmem_implicit) = 1;
419 } 419 }
420#undef RELOC 420#undef RELOC
421} 421}
422 422
423#endif /* XEN */ 423#endif /* XEN */
424 424
425/* 425/*
426 * Machine-dependent startup code 426 * Machine-dependent startup code
427 */ 427 */
428void 428void
429cpu_startup(void) 429cpu_startup(void)
430{ 430{
431 int x, y; 431 int x, y;
432 vaddr_t minaddr, maxaddr; 432 vaddr_t minaddr, maxaddr;
433 psize_t sz; 433 psize_t sz;
434 434
435 /* 435 /*
436 * For console drivers that require uvm and pmap to be initialized, 436 * For console drivers that require uvm and pmap to be initialized,
437 * we'll give them one more chance here... 437 * we'll give them one more chance here...
438 */ 438 */
439 consinit(); 439 consinit();
440 440
441#ifdef XBOX 441#ifdef XBOX
442 xbox_startup(); 442 xbox_startup();
443#endif 443#endif
444 444
445 /* 445 /*
446 * Initialize error message buffer (et end of core). 446 * Initialize error message buffer (et end of core).
447 */ 447 */
448 if (msgbuf_p_cnt == 0) 448 if (msgbuf_p_cnt == 0)
449 panic("msgbuf paddr map has not been set up"); 449 panic("msgbuf paddr map has not been set up");
450 for (x = 0, sz = 0; x < msgbuf_p_cnt; sz += msgbuf_p_seg[x++].sz) 450 for (x = 0, sz = 0; x < msgbuf_p_cnt; sz += msgbuf_p_seg[x++].sz)
451 continue; 451 continue;
452 msgbuf_vaddr = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_VAONLY); 452 msgbuf_vaddr = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_VAONLY);
453 if (msgbuf_vaddr == 0) 453 if (msgbuf_vaddr == 0)
454 panic("failed to valloc msgbuf_vaddr"); 454 panic("failed to valloc msgbuf_vaddr");
455 455
456 /* msgbuf_paddr was init'd in pmap */ 456 /* msgbuf_paddr was init'd in pmap */
457 for (y = 0, sz = 0; y < msgbuf_p_cnt; y++) { 457 for (y = 0, sz = 0; y < msgbuf_p_cnt; y++) {
458 for (x = 0; x < btoc(msgbuf_p_seg[y].sz); x++, sz += PAGE_SIZE) 458 for (x = 0; x < btoc(msgbuf_p_seg[y].sz); x++, sz += PAGE_SIZE)
459 pmap_kenter_pa((vaddr_t)msgbuf_vaddr + sz, 459 pmap_kenter_pa((vaddr_t)msgbuf_vaddr + sz,
460 msgbuf_p_seg[y].paddr + x * PAGE_SIZE, 460 msgbuf_p_seg[y].paddr + x * PAGE_SIZE,
461 VM_PROT_READ|VM_PROT_WRITE, 0); 461 VM_PROT_READ|VM_PROT_WRITE, 0);
462 } 462 }
463 pmap_update(pmap_kernel()); 463 pmap_update(pmap_kernel());
464 464
465 initmsgbuf((void *)msgbuf_vaddr, sz); 465 initmsgbuf((void *)msgbuf_vaddr, sz);
466 466
467#ifdef MULTIBOOT 467#ifdef MULTIBOOT
468 multiboot_print_info(); 468 multiboot_print_info();
469#endif 469#endif
470 470
471#ifdef TRAPLOG 471#ifdef TRAPLOG
472 /* 472 /*
473 * Enable recording of branch from/to in MSR's 473 * Enable recording of branch from/to in MSR's
474 */ 474 */
475 wrmsr(MSR_DEBUGCTLMSR, 0x1); 475 wrmsr(MSR_DEBUGCTLMSR, 0x1);
476#endif 476#endif
477 477
478#if NCARDBUS > 0 478#if NCARDBUS > 0
479 /* Tell RBUS how much RAM we have, so it can use heuristics. */ 479 /* Tell RBUS how much RAM we have, so it can use heuristics. */
480 rbus_min_start_hint(ctob((psize_t)physmem)); 480 rbus_min_start_hint(ctob((psize_t)physmem));
481#endif 481#endif
482 482
483 minaddr = 0; 483 minaddr = 0;
484 484
485 /* 485 /*
486 * Allocate a submap for physio 486 * Allocate a submap for physio
487 */ 487 */
488 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 488 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
489 VM_PHYS_SIZE, 0, false, NULL); 489 VM_PHYS_SIZE, 0, false, NULL);
490 490
491 /* Say hello. */ 491 /* Say hello. */
492 banner(); 492 banner();
493 493
494 /* Safe for i/o port / memory space allocation to use malloc now. */ 494 /* Safe for i/o port / memory space allocation to use malloc now. */
495#if NISA > 0 || NPCI > 0 495#if NISA > 0 || NPCI > 0
496 x86_bus_space_mallocok(); 496 x86_bus_space_mallocok();
497#endif 497#endif
498 498
499 gdt_init(); 499 gdt_init();
500 i386_proc0_tss_ldt_init(); 500 i386_proc0_tss_ldt_init();
501 501
502#ifndef XEN 502#ifndef XEN
503 cpu_init_tss(&cpu_info_primary); 503 cpu_init_tss(&cpu_info_primary);
504 ltr(cpu_info_primary.ci_tss_sel); 504 ltr(cpu_info_primary.ci_tss_sel);
505#endif 505#endif
506 506
507 x86_startup(); 507 x86_startup();
508} 508}
509 509
510/* 510/*
511 * Set up proc0's TSS and LDT. 511 * Set up proc0's TSS and LDT.
512 */ 512 */
513void 513void
514i386_proc0_tss_ldt_init(void) 514i386_proc0_tss_ldt_init(void)
515{ 515{
516 struct lwp *l = &lwp0; 516 struct lwp *l = &lwp0;
517 struct pcb *pcb = lwp_getpcb(l); 517 struct pcb *pcb = lwp_getpcb(l);
518 518
519 pmap_kernel()->pm_ldt_sel = GSEL(GLDT_SEL, SEL_KPL); 519 pmap_kernel()->pm_ldt_sel = GSEL(GLDT_SEL, SEL_KPL);
520 pcb->pcb_cr0 = rcr0() & ~CR0_TS; 520 pcb->pcb_cr0 = rcr0() & ~CR0_TS;
521 pcb->pcb_esp0 = uvm_lwp_getuarea(l) + KSTACK_SIZE - 16; 521 pcb->pcb_esp0 = uvm_lwp_getuarea(l) + KSTACK_SIZE - 16;
522 pcb->pcb_iopl = SEL_KPL; 522 pcb->pcb_iopl = SEL_KPL;
523 l->l_md.md_regs = (struct trapframe *)pcb->pcb_esp0 - 1; 523 l->l_md.md_regs = (struct trapframe *)pcb->pcb_esp0 - 1;
524 memcpy(&pcb->pcb_fsd, &gdt[GUDATA_SEL], sizeof(pcb->pcb_fsd)); 524 memcpy(&pcb->pcb_fsd, &gdt[GUDATA_SEL], sizeof(pcb->pcb_fsd));
525 memcpy(&pcb->pcb_gsd, &gdt[GUDATA_SEL], sizeof(pcb->pcb_gsd)); 525 memcpy(&pcb->pcb_gsd, &gdt[GUDATA_SEL], sizeof(pcb->pcb_gsd));
526 526
527#ifndef XEN 527#ifndef XEN
528 lldt(pmap_kernel()->pm_ldt_sel); 528 lldt(pmap_kernel()->pm_ldt_sel);
529#else 529#else
530 HYPERVISOR_fpu_taskswitch(); 530 HYPERVISOR_fpu_taskswitch(1);
531 XENPRINTF(("lwp tss sp %p ss %04x/%04x\n", 531 XENPRINTF(("lwp tss sp %p ss %04x/%04x\n",
532 (void *)pcb->pcb_esp0, 532 (void *)pcb->pcb_esp0,
533 GSEL(GDATA_SEL, SEL_KPL), 533 GSEL(GDATA_SEL, SEL_KPL),
534 IDXSEL(GSEL(GDATA_SEL, SEL_KPL)))); 534 IDXSEL(GSEL(GDATA_SEL, SEL_KPL))));
535 HYPERVISOR_stack_switch(GSEL(GDATA_SEL, SEL_KPL), pcb->pcb_esp0); 535 HYPERVISOR_stack_switch(GSEL(GDATA_SEL, SEL_KPL), pcb->pcb_esp0);
536#endif 536#endif
537} 537}
538 538
539#ifdef XEN 539#ifdef XEN
540/* 540/*
541 * Switch context: 541 * Switch context:
542 * - honor CR0_TS in saved CR0 and request DNA exception on FPU use 542 * - honor CR0_TS in saved CR0 and request DNA exception on FPU use
543 * - switch stack pointer for user->kernel transition 543 * - switch stack pointer for user->kernel transition
544 */ 544 */
545void 545void
546i386_switch_context(lwp_t *l) 546i386_switch_context(lwp_t *l)
547{ 547{
548 struct cpu_info *ci; 548 struct cpu_info *ci;
549 struct pcb *pcb; 549 struct pcb *pcb;
550 struct physdev_op physop; 550 struct physdev_op physop;
551 551
552 pcb = lwp_getpcb(l); 552 pcb = lwp_getpcb(l);
553 ci = curcpu(); 553 ci = curcpu();
554 if (ci->ci_fpused) { 554 if (ci->ci_fpused) {
555 HYPERVISOR_fpu_taskswitch(); 555 HYPERVISOR_fpu_taskswitch(1);
556 ci->ci_fpused = 0; 556 ci->ci_fpused = 0;
557 } 557 }
558 558
559 HYPERVISOR_stack_switch(GSEL(GDATA_SEL, SEL_KPL), pcb->pcb_esp0); 559 HYPERVISOR_stack_switch(GSEL(GDATA_SEL, SEL_KPL), pcb->pcb_esp0);
560 560
561 physop.cmd = PHYSDEVOP_SET_IOPL; 561 physop.cmd = PHYSDEVOP_SET_IOPL;
562 physop.u.set_iopl.iopl = pcb->pcb_iopl; 562 physop.u.set_iopl.iopl = pcb->pcb_iopl;
563 HYPERVISOR_physdev_op(&physop); 563 HYPERVISOR_physdev_op(&physop);
564} 564}
565#endif /* XEN */ 565#endif /* XEN */
566 566
567#ifndef XEN 567#ifndef XEN
568/* 568/*
569 * Set up TSS and I/O bitmap. 569 * Set up TSS and I/O bitmap.
570 */ 570 */
571void 571void
572cpu_init_tss(struct cpu_info *ci) 572cpu_init_tss(struct cpu_info *ci)
573{ 573{
574 struct i386tss *tss = &ci->ci_tss; 574 struct i386tss *tss = &ci->ci_tss;
575 575
576 tss->tss_iobase = IOMAP_INVALOFF << 16; 576 tss->tss_iobase = IOMAP_INVALOFF << 16;
577 tss->tss_ss0 = GSEL(GDATA_SEL, SEL_KPL); 577 tss->tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
578 tss->tss_ldt = GSEL(GLDT_SEL, SEL_KPL); 578 tss->tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
579 tss->tss_cr3 = rcr3(); 579 tss->tss_cr3 = rcr3();
580 ci->ci_tss_sel = tss_alloc(tss); 580 ci->ci_tss_sel = tss_alloc(tss);
581} 581}
582#endif /* XEN */ 582#endif /* XEN */
583 583
584/* 584/*
585 * sysctl helper routine for machdep.booted_kernel 585 * sysctl helper routine for machdep.booted_kernel
586 */ 586 */
587static int 587static int
588sysctl_machdep_booted_kernel(SYSCTLFN_ARGS) 588sysctl_machdep_booted_kernel(SYSCTLFN_ARGS)
589{ 589{
590 struct btinfo_bootpath *bibp; 590 struct btinfo_bootpath *bibp;
591 struct sysctlnode node; 591 struct sysctlnode node;
592 592
593 bibp = lookup_bootinfo(BTINFO_BOOTPATH); 593 bibp = lookup_bootinfo(BTINFO_BOOTPATH);
594 if(!bibp) 594 if(!bibp)
595 return(ENOENT); /* ??? */ 595 return(ENOENT); /* ??? */
596 596
597 node = *rnode; 597 node = *rnode;
598 node.sysctl_data = bibp->bootpath; 598 node.sysctl_data = bibp->bootpath;
599 node.sysctl_size = sizeof(bibp->bootpath); 599 node.sysctl_size = sizeof(bibp->bootpath);
600 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 600 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
601} 601}
602 602
603/* 603/*
604 * sysctl helper routine for machdep.diskinfo 604 * sysctl helper routine for machdep.diskinfo
605 */ 605 */
606static int 606static int
607sysctl_machdep_diskinfo(SYSCTLFN_ARGS) 607sysctl_machdep_diskinfo(SYSCTLFN_ARGS)
608{ 608{
609 struct sysctlnode node; 609 struct sysctlnode node;
610 610
611 node = *rnode; 611 node = *rnode;
612 if (x86_alldisks == NULL) 612 if (x86_alldisks == NULL)
613 return(EOPNOTSUPP); 613 return(EOPNOTSUPP);
614 node.sysctl_data = x86_alldisks; 614 node.sysctl_data = x86_alldisks;
615 node.sysctl_size = sizeof(struct disklist) + 615 node.sysctl_size = sizeof(struct disklist) +
616 (x86_ndisks - 1) * sizeof(struct nativedisk_info); 616 (x86_ndisks - 1) * sizeof(struct nativedisk_info);
617 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 617 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
618} 618}
619 619
620/* 620/*
621 * machine dependent system variables. 621 * machine dependent system variables.
622 */ 622 */
623SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup") 623SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
624{ 624{
625 extern uint64_t tsc_freq; 625 extern uint64_t tsc_freq;
626 626
627 sysctl_createv(clog, 0, NULL, NULL, 627 sysctl_createv(clog, 0, NULL, NULL,
628 CTLFLAG_PERMANENT, 628 CTLFLAG_PERMANENT,
629 CTLTYPE_NODE, "machdep", NULL, 629 CTLTYPE_NODE, "machdep", NULL,
630 NULL, 0, NULL, 0, 630 NULL, 0, NULL, 0,
631 CTL_MACHDEP, CTL_EOL); 631 CTL_MACHDEP, CTL_EOL);
632 632
633 sysctl_createv(clog, 0, NULL, NULL, 633 sysctl_createv(clog, 0, NULL, NULL,
634 CTLFLAG_PERMANENT, 634 CTLFLAG_PERMANENT,
635 CTLTYPE_STRUCT, "console_device", NULL, 635 CTLTYPE_STRUCT, "console_device", NULL,
636 sysctl_consdev, 0, NULL, sizeof(dev_t), 636 sysctl_consdev, 0, NULL, sizeof(dev_t),
637 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL); 637 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL);
638#ifndef XEN 638#ifndef XEN
639 sysctl_createv(clog, 0, NULL, NULL, 639 sysctl_createv(clog, 0, NULL, NULL,
640 CTLFLAG_PERMANENT, 640 CTLFLAG_PERMANENT,
641 CTLTYPE_INT, "biosbasemem", NULL, 641 CTLTYPE_INT, "biosbasemem", NULL,
642 NULL, 0, &biosbasemem, 0, 642 NULL, 0, &biosbasemem, 0,
643 CTL_MACHDEP, CPU_BIOSBASEMEM, CTL_EOL); 643 CTL_MACHDEP, CPU_BIOSBASEMEM, CTL_EOL);
644 sysctl_createv(clog, 0, NULL, NULL, 644 sysctl_createv(clog, 0, NULL, NULL,
645 CTLFLAG_PERMANENT, 645 CTLFLAG_PERMANENT,
646 CTLTYPE_INT, "biosextmem", NULL, 646 CTLTYPE_INT, "biosextmem", NULL,
647 NULL, 0, &biosextmem, 0, 647 NULL, 0, &biosextmem, 0,
648 CTL_MACHDEP, CPU_BIOSEXTMEM, CTL_EOL); 648 CTL_MACHDEP, CPU_BIOSEXTMEM, CTL_EOL);
649#endif /* XEN */ 649#endif /* XEN */
650 sysctl_createv(clog, 0, NULL, NULL, 650 sysctl_createv(clog, 0, NULL, NULL,
651 CTLFLAG_PERMANENT, 651 CTLFLAG_PERMANENT,
652 CTLTYPE_STRING, "booted_kernel", NULL, 652 CTLTYPE_STRING, "booted_kernel", NULL,
653 sysctl_machdep_booted_kernel, 0, NULL, 0, 653 sysctl_machdep_booted_kernel, 0, NULL, 0,
654 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL); 654 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
655 sysctl_createv(clog, 0, NULL, NULL, 655 sysctl_createv(clog, 0, NULL, NULL,
656 CTLFLAG_PERMANENT, 656 CTLFLAG_PERMANENT,
657 CTLTYPE_STRUCT, "diskinfo", NULL, 657 CTLTYPE_STRUCT, "diskinfo", NULL,
658 sysctl_machdep_diskinfo, 0, NULL, 0, 658 sysctl_machdep_diskinfo, 0, NULL, 0,
659 CTL_MACHDEP, CPU_DISKINFO, CTL_EOL); 659 CTL_MACHDEP, CPU_DISKINFO, CTL_EOL);
660 sysctl_createv(clog, 0, NULL, NULL, 660 sysctl_createv(clog, 0, NULL, NULL,
661 CTLFLAG_PERMANENT, 661 CTLFLAG_PERMANENT,
662 CTLTYPE_INT, "fpu_present", NULL, 662 CTLTYPE_INT, "fpu_present", NULL,
663 NULL, 0, &i386_fpu_present, 0, 663 NULL, 0, &i386_fpu_present, 0,
664 CTL_MACHDEP, CPU_FPU_PRESENT, CTL_EOL); 664 CTL_MACHDEP, CPU_FPU_PRESENT, CTL_EOL);
665 sysctl_createv(clog, 0, NULL, NULL, 665 sysctl_createv(clog, 0, NULL, NULL,
666 CTLFLAG_PERMANENT, 666 CTLFLAG_PERMANENT,
667 CTLTYPE_INT, "osfxsr", NULL, 667 CTLTYPE_INT, "osfxsr", NULL,
668 NULL, 0, &i386_use_fxsave, 0, 668 NULL, 0, &i386_use_fxsave, 0,
669 CTL_MACHDEP, CPU_OSFXSR, CTL_EOL); 669 CTL_MACHDEP, CPU_OSFXSR, CTL_EOL);
670 sysctl_createv(clog, 0, NULL, NULL, 670 sysctl_createv(clog, 0, NULL, NULL,
671 CTLFLAG_PERMANENT, 671 CTLFLAG_PERMANENT,
672 CTLTYPE_INT, "sse", NULL, 672 CTLTYPE_INT, "sse", NULL,
673 NULL, 0, &i386_has_sse, 0, 673 NULL, 0, &i386_has_sse, 0,
674 CTL_MACHDEP, CPU_SSE, CTL_EOL); 674 CTL_MACHDEP, CPU_SSE, CTL_EOL);
675 sysctl_createv(clog, 0, NULL, NULL, 675 sysctl_createv(clog, 0, NULL, NULL,
676 CTLFLAG_PERMANENT, 676 CTLFLAG_PERMANENT,
677 CTLTYPE_INT, "sse2", NULL, 677 CTLTYPE_INT, "sse2", NULL,
678 NULL, 0, &i386_has_sse2, 0, 678 NULL, 0, &i386_has_sse2, 0,
679 CTL_MACHDEP, CPU_SSE2, CTL_EOL); 679 CTL_MACHDEP, CPU_SSE2, CTL_EOL);
680 sysctl_createv(clog, 0, NULL, NULL,  680 sysctl_createv(clog, 0, NULL, NULL,
681 CTLFLAG_PERMANENT, 681 CTLFLAG_PERMANENT,
682 CTLTYPE_STRING, "cpu_brand", NULL, 682 CTLTYPE_STRING, "cpu_brand", NULL,
683 NULL, 0, &cpu_brand_string, 0, 683 NULL, 0, &cpu_brand_string, 0,
684 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 684 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
685 sysctl_createv(clog, 0, NULL, NULL, 685 sysctl_createv(clog, 0, NULL, NULL,
686 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 686 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
687 CTLTYPE_INT, "sparse_dump", NULL, 687 CTLTYPE_INT, "sparse_dump", NULL,
688 NULL, 0, &sparse_dump, 0, 688 NULL, 0, &sparse_dump, 0,
689 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 689 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
690 sysctl_createv(clog, 0, NULL, NULL, 690 sysctl_createv(clog, 0, NULL, NULL,
691 CTLFLAG_PERMANENT, 691 CTLFLAG_PERMANENT,
692 CTLTYPE_QUAD, "tsc_freq", NULL, 692 CTLTYPE_QUAD, "tsc_freq", NULL,
693 NULL, 0, &tsc_freq, 0, 693 NULL, 0, &tsc_freq, 0,
694 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 694 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
695 sysctl_createv(clog, 0, NULL, NULL, 695 sysctl_createv(clog, 0, NULL, NULL,
696 CTLFLAG_PERMANENT, 696 CTLFLAG_PERMANENT,
697 CTLTYPE_INT, "pae",  697 CTLTYPE_INT, "pae",
698 SYSCTL_DESCR("Whether the kernel uses PAE"), 698 SYSCTL_DESCR("Whether the kernel uses PAE"),
699 NULL, 0, &i386_use_pae, 0, 699 NULL, 0, &i386_use_pae, 0,
700 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 700 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
701} 701}
702 702
703void * 703void *
704getframe(struct lwp *l, int sig, int *onstack) 704getframe(struct lwp *l, int sig, int *onstack)
705{ 705{
706 struct proc *p = l->l_proc; 706 struct proc *p = l->l_proc;
707 struct trapframe *tf = l->l_md.md_regs; 707 struct trapframe *tf = l->l_md.md_regs;
708 708
709 /* Do we need to jump onto the signal stack? */ 709 /* Do we need to jump onto the signal stack? */
710 *onstack = (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 710 *onstack = (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0
711 && (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0; 711 && (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
712 if (*onstack) 712 if (*onstack)
713 return (char *)l->l_sigstk.ss_sp + l->l_sigstk.ss_size; 713 return (char *)l->l_sigstk.ss_sp + l->l_sigstk.ss_size;
714#ifdef VM86 714#ifdef VM86
715 if (tf->tf_eflags & PSL_VM) 715 if (tf->tf_eflags & PSL_VM)
716 return (void *)(tf->tf_esp + (tf->tf_ss << 4)); 716 return (void *)(tf->tf_esp + (tf->tf_ss << 4));
717 else 717 else
718#endif 718#endif
719 return (void *)tf->tf_esp; 719 return (void *)tf->tf_esp;
720} 720}
721 721
722/* 722/*
723 * Build context to run handler in. We invoke the handler 723 * Build context to run handler in. We invoke the handler
724 * directly, only returning via the trampoline. Note the 724 * directly, only returning via the trampoline. Note the
725 * trampoline version numbers are coordinated with machine- 725 * trampoline version numbers are coordinated with machine-
726 * dependent code in libc. 726 * dependent code in libc.
727 */ 727 */
728void 728void
729buildcontext(struct lwp *l, int sel, void *catcher, void *fp) 729buildcontext(struct lwp *l, int sel, void *catcher, void *fp)
730{ 730{
731 struct trapframe *tf = l->l_md.md_regs; 731 struct trapframe *tf = l->l_md.md_regs;
732 732
733#ifndef XEN 733#ifndef XEN
734 tf->tf_gs = GSEL(GUGS_SEL, SEL_UPL); 734 tf->tf_gs = GSEL(GUGS_SEL, SEL_UPL);
735 tf->tf_fs = GSEL(GUFS_SEL, SEL_UPL); 735 tf->tf_fs = GSEL(GUFS_SEL, SEL_UPL);
736#else 736#else
737 tf->tf_gs = GSEL(GUDATA_SEL, SEL_UPL); 737 tf->tf_gs = GSEL(GUDATA_SEL, SEL_UPL);
738 tf->tf_fs = GSEL(GUDATA_SEL, SEL_UPL); 738 tf->tf_fs = GSEL(GUDATA_SEL, SEL_UPL);
739#endif 739#endif
740 tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL); 740 tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL);
741 tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL); 741 tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL);
742 tf->tf_eip = (int)catcher; 742 tf->tf_eip = (int)catcher;
743 tf->tf_cs = GSEL(sel, SEL_UPL); 743 tf->tf_cs = GSEL(sel, SEL_UPL);
744 tf->tf_eflags &= ~PSL_CLEARSIG; 744 tf->tf_eflags &= ~PSL_CLEARSIG;
745 tf->tf_esp = (int)fp; 745 tf->tf_esp = (int)fp;
746 tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL); 746 tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL);
747 747
748 /* Ensure FP state is reset, if FP is used. */ 748 /* Ensure FP state is reset, if FP is used. */
749 l->l_md.md_flags &= ~MDL_USEDFPU; 749 l->l_md.md_flags &= ~MDL_USEDFPU;
750} 750}
751 751
752void 752void
753sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask) 753sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
754{ 754{
755 struct lwp *l = curlwp; 755 struct lwp *l = curlwp;
756 struct proc *p = l->l_proc; 756 struct proc *p = l->l_proc;
757 struct pmap *pmap = vm_map_pmap(&p->p_vmspace->vm_map); 757 struct pmap *pmap = vm_map_pmap(&p->p_vmspace->vm_map);
758 int sel = pmap->pm_hiexec > I386_MAX_EXE_ADDR ? 758 int sel = pmap->pm_hiexec > I386_MAX_EXE_ADDR ?
759 GUCODEBIG_SEL : GUCODE_SEL; 759 GUCODEBIG_SEL : GUCODE_SEL;
760 struct sigacts *ps = p->p_sigacts; 760 struct sigacts *ps = p->p_sigacts;
761 int onstack, error; 761 int onstack, error;
762 int sig = ksi->ksi_signo; 762 int sig = ksi->ksi_signo;
763 struct sigframe_siginfo *fp = getframe(l, sig, &onstack), frame; 763 struct sigframe_siginfo *fp = getframe(l, sig, &onstack), frame;
764 sig_t catcher = SIGACTION(p, sig).sa_handler; 764 sig_t catcher = SIGACTION(p, sig).sa_handler;
765 struct trapframe *tf = l->l_md.md_regs; 765 struct trapframe *tf = l->l_md.md_regs;
766 766
767 KASSERT(mutex_owned(p->p_lock)); 767 KASSERT(mutex_owned(p->p_lock));
768 768
769 fp--; 769 fp--;
770 770
771 frame.sf_ra = (int)ps->sa_sigdesc[sig].sd_tramp; 771 frame.sf_ra = (int)ps->sa_sigdesc[sig].sd_tramp;
772 frame.sf_signum = sig; 772 frame.sf_signum = sig;
773 frame.sf_sip = &fp->sf_si; 773 frame.sf_sip = &fp->sf_si;
774 frame.sf_ucp = &fp->sf_uc; 774 frame.sf_ucp = &fp->sf_uc;
775 frame.sf_si._info = ksi->ksi_info; 775 frame.sf_si._info = ksi->ksi_info;
776 frame.sf_uc.uc_flags = _UC_SIGMASK|_UC_VM; 776 frame.sf_uc.uc_flags = _UC_SIGMASK|_UC_VM;
777 frame.sf_uc.uc_sigmask = *mask; 777 frame.sf_uc.uc_sigmask = *mask;
778 frame.sf_uc.uc_link = l->l_ctxlink; 778 frame.sf_uc.uc_link = l->l_ctxlink;
779 frame.sf_uc.uc_flags |= (l->l_sigstk.ss_flags & SS_ONSTACK) 779 frame.sf_uc.uc_flags |= (l->l_sigstk.ss_flags & SS_ONSTACK)
780 ? _UC_SETSTACK : _UC_CLRSTACK; 780 ? _UC_SETSTACK : _UC_CLRSTACK;
781 memset(&frame.sf_uc.uc_stack, 0, sizeof(frame.sf_uc.uc_stack)); 781 memset(&frame.sf_uc.uc_stack, 0, sizeof(frame.sf_uc.uc_stack));
782 782
783 if (tf->tf_eflags & PSL_VM) 783 if (tf->tf_eflags & PSL_VM)
784 (*p->p_emul->e_syscall_intern)(p); 784 (*p->p_emul->e_syscall_intern)(p);
785 sendsig_reset(l, sig); 785 sendsig_reset(l, sig);
786 786
787 mutex_exit(p->p_lock); 787 mutex_exit(p->p_lock);
788 cpu_getmcontext(l, &frame.sf_uc.uc_mcontext, &frame.sf_uc.uc_flags); 788 cpu_getmcontext(l, &frame.sf_uc.uc_mcontext, &frame.sf_uc.uc_flags);
789 error = copyout(&frame, fp, sizeof(frame)); 789 error = copyout(&frame, fp, sizeof(frame));
790 mutex_enter(p->p_lock); 790 mutex_enter(p->p_lock);
791 791
792 if (error != 0) { 792 if (error != 0) {
793 /* 793 /*
794 * Process has trashed its stack; give it an illegal 794 * Process has trashed its stack; give it an illegal
795 * instruction to halt it in its tracks. 795 * instruction to halt it in its tracks.
796 */ 796 */
797 sigexit(l, SIGILL); 797 sigexit(l, SIGILL);
798 /* NOTREACHED */ 798 /* NOTREACHED */
799 } 799 }
800 800
801 buildcontext(l, sel, catcher, fp); 801 buildcontext(l, sel, catcher, fp);
802 802
803 /* Remember that we're now on the signal stack. */ 803 /* Remember that we're now on the signal stack. */
804 if (onstack) 804 if (onstack)
805 l->l_sigstk.ss_flags |= SS_ONSTACK; 805 l->l_sigstk.ss_flags |= SS_ONSTACK;
806} 806}
807 807
808void 808void
809cpu_upcall(struct lwp *l, int type, int nevents, int ninterrupted, void *sas, 809cpu_upcall(struct lwp *l, int type, int nevents, int ninterrupted, void *sas,
810 void *ap, void *sp, sa_upcall_t upcall) 810 void *ap, void *sp, sa_upcall_t upcall)
811{ 811{
812 struct pmap *pmap = vm_map_pmap(&l->l_proc->p_vmspace->vm_map); 812 struct pmap *pmap = vm_map_pmap(&l->l_proc->p_vmspace->vm_map);
813 struct saframe *sf, frame; 813 struct saframe *sf, frame;
814 struct trapframe *tf; 814 struct trapframe *tf;
815 815
816 tf = l->l_md.md_regs; 816 tf = l->l_md.md_regs;
817 817
818 /* Finally, copy out the rest of the frame. */ 818 /* Finally, copy out the rest of the frame. */
819 frame.sa_type = type; 819 frame.sa_type = type;
820 frame.sa_sas = sas; 820 frame.sa_sas = sas;
821 frame.sa_events = nevents; 821 frame.sa_events = nevents;
822 frame.sa_interrupted = ninterrupted; 822 frame.sa_interrupted = ninterrupted;
823 frame.sa_arg = ap; 823 frame.sa_arg = ap;
824 frame.sa_ra = 0; 824 frame.sa_ra = 0;
825 825
826 sf = (struct saframe *)sp - 1; 826 sf = (struct saframe *)sp - 1;
827 if (copyout(&frame, sf, sizeof(frame)) != 0) { 827 if (copyout(&frame, sf, sizeof(frame)) != 0) {
828 /* Copying onto the stack didn't work. Die. */ 828 /* Copying onto the stack didn't work. Die. */
829 sigexit(l, SIGILL); 829 sigexit(l, SIGILL);
830 /* NOTREACHED */ 830 /* NOTREACHED */
831 } 831 }
832 832
833 tf->tf_eip = (int) upcall; 833 tf->tf_eip = (int) upcall;
834 tf->tf_esp = (int) sf; 834 tf->tf_esp = (int) sf;
835 tf->tf_ebp = 0; /* indicate call-frame-top to debuggers */ 835 tf->tf_ebp = 0; /* indicate call-frame-top to debuggers */
836 tf->tf_gs = GSEL(GUDATA_SEL, SEL_UPL); 836 tf->tf_gs = GSEL(GUDATA_SEL, SEL_UPL);
837 tf->tf_fs = GSEL(GUDATA_SEL, SEL_UPL); 837 tf->tf_fs = GSEL(GUDATA_SEL, SEL_UPL);
838 tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL); 838 tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL);
839 tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL); 839 tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL);
840 tf->tf_cs = pmap->pm_hiexec > I386_MAX_EXE_ADDR ? 840 tf->tf_cs = pmap->pm_hiexec > I386_MAX_EXE_ADDR ?
841 GSEL(GUCODEBIG_SEL, SEL_UPL) : GSEL(GUCODE_SEL, SEL_UPL); 841 GSEL(GUCODEBIG_SEL, SEL_UPL) : GSEL(GUCODE_SEL, SEL_UPL);
842 tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL); 842 tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL);
843 tf->tf_eflags &= ~(PSL_T|PSL_VM|PSL_AC); 843 tf->tf_eflags &= ~(PSL_T|PSL_VM|PSL_AC);
844} 844}
845 845
846static void 846static void
847maybe_dump(int howto) 847maybe_dump(int howto)
848{ 848{
849 int s; 849 int s;
850 850
851 /* Disable interrupts. */ 851 /* Disable interrupts. */
852 s = splhigh(); 852 s = splhigh();
853 853
854 /* Do a dump if requested. */ 854 /* Do a dump if requested. */
855 if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP) 855 if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
856 dumpsys(); 856 dumpsys();
857 857
858 splx(s); 858 splx(s);
859} 859}
860 860
861void 861void
862cpu_reboot(int howto, char *bootstr) 862cpu_reboot(int howto, char *bootstr)
863{ 863{
864 static bool syncdone = false; 864 static bool syncdone = false;
865 struct lwp *l; 865 struct lwp *l;
866 int s; 866 int s;
867 867
868 s = IPL_NONE; 868 s = IPL_NONE;
869 l = (curlwp == NULL) ? &lwp0 : curlwp; 869 l = (curlwp == NULL) ? &lwp0 : curlwp;
870 870
871 if (cold) { 871 if (cold) {
872 howto |= RB_HALT; 872 howto |= RB_HALT;
873 goto haltsys; 873 goto haltsys;
874 } 874 }
875 875
876 boothowto = howto; 876 boothowto = howto;
877 877
878 /* XXX used to dump after vfs_shutdown() and before 878 /* XXX used to dump after vfs_shutdown() and before
879 * detaching devices / shutdown hooks / pmf_system_shutdown(). 879 * detaching devices / shutdown hooks / pmf_system_shutdown().
880 */ 880 */
881 maybe_dump(howto); 881 maybe_dump(howto);
882 882
883 /* 883 /*
884 * If we've panic'd, don't make the situation potentially 884 * If we've panic'd, don't make the situation potentially
885 * worse by syncing or unmounting the file systems. 885 * worse by syncing or unmounting the file systems.
886 */ 886 */
887 if ((howto & RB_NOSYNC) == 0 && panicstr == NULL) { 887 if ((howto & RB_NOSYNC) == 0 && panicstr == NULL) {
888 if (!syncdone) { 888 if (!syncdone) {
889 syncdone = true; 889 syncdone = true;
890 /* XXX used to force unmount as well, here */ 890 /* XXX used to force unmount as well, here */
891 vfs_sync_all(l); 891 vfs_sync_all(l);
892 /* 892 /*
893 * If we've been adjusting the clock, the todr 893 * If we've been adjusting the clock, the todr
894 * will be out of synch; adjust it now. 894 * will be out of synch; adjust it now.
895 * 895 *
896 * XXX used to do this after unmounting all 896 * XXX used to do this after unmounting all
897 * filesystems with vfs_shutdown(). 897 * filesystems with vfs_shutdown().
898 */ 898 */
899 if (time_adjusted != 0) 899 if (time_adjusted != 0)
900 resettodr(); 900 resettodr();
901 } 901 }
902 902
903 while (vfs_unmountall1(l, false, false) || 903 while (vfs_unmountall1(l, false, false) ||
904 config_detach_all(boothowto) || 904 config_detach_all(boothowto) ||
905 vfs_unmount_forceone(l)) 905 vfs_unmount_forceone(l))
906 ; /* do nothing */ 906 ; /* do nothing */
907 } else 907 } else
908 suspendsched(); 908 suspendsched();
909 909
910 pmf_system_shutdown(boothowto); 910 pmf_system_shutdown(boothowto);
911 911
912 s = splhigh(); 912 s = splhigh();
913haltsys: 913haltsys:
914 914
915 if ((howto & RB_POWERDOWN) == RB_POWERDOWN) { 915 if ((howto & RB_POWERDOWN) == RB_POWERDOWN) {
916#ifdef XEN 916#ifdef XEN
917 HYPERVISOR_shutdown(); 917 HYPERVISOR_shutdown();
918 for (;;); 918 for (;;);
919#endif 919#endif
920#ifdef XBOX 920#ifdef XBOX
921 if (arch_i386_is_xbox) { 921 if (arch_i386_is_xbox) {
922 xbox_poweroff(); 922 xbox_poweroff();
923 for (;;); 923 for (;;);
924 } 924 }
925#endif 925#endif
926#if NACPICA > 0 926#if NACPICA > 0
927 if (s != IPL_NONE) 927 if (s != IPL_NONE)
928 splx(s); 928 splx(s);
929 929
930 acpi_enter_sleep_state(ACPI_STATE_S5); 930 acpi_enter_sleep_state(ACPI_STATE_S5);
931#endif 931#endif
932#if NAPMBIOS > 0 && !defined(APM_NO_POWEROFF) 932#if NAPMBIOS > 0 && !defined(APM_NO_POWEROFF)
933 /* turn off, if we can. But try to turn disk off and 933 /* turn off, if we can. But try to turn disk off and
934 * wait a bit first--some disk drives are slow to clean up 934 * wait a bit first--some disk drives are slow to clean up
935 * and users have reported disk corruption. 935 * and users have reported disk corruption.
936 */ 936 */
937 delay(500000); 937 delay(500000);
938 apm_set_powstate(NULL, APM_DEV_DISK(APM_DEV_ALLUNITS), 938 apm_set_powstate(NULL, APM_DEV_DISK(APM_DEV_ALLUNITS),
939 APM_SYS_OFF); 939 APM_SYS_OFF);
940 delay(500000); 940 delay(500000);
941 apm_set_powstate(NULL, APM_DEV_ALLDEVS, APM_SYS_OFF); 941 apm_set_powstate(NULL, APM_DEV_ALLDEVS, APM_SYS_OFF);
942 printf("WARNING: APM powerdown failed!\n"); 942 printf("WARNING: APM powerdown failed!\n");
943 /* 943 /*
944 * RB_POWERDOWN implies RB_HALT... fall into it... 944 * RB_POWERDOWN implies RB_HALT... fall into it...
945 */ 945 */
946#endif 946#endif
947 } 947 }
948 948
949#ifdef MULTIPROCESSOR 949#ifdef MULTIPROCESSOR
950 x86_broadcast_ipi(X86_IPI_HALT); 950 x86_broadcast_ipi(X86_IPI_HALT);
951#endif 951#endif
952 952
953 if (howto & RB_HALT) { 953 if (howto & RB_HALT) {
954#if NACPICA > 0 954#if NACPICA > 0
955 acpi_disable(); 955 acpi_disable();
956#endif 956#endif
957 957
958 printf("\n"); 958 printf("\n");
959 printf("The operating system has halted.\n"); 959 printf("The operating system has halted.\n");
960 printf("Please press any key to reboot.\n\n"); 960 printf("Please press any key to reboot.\n\n");
961 961
962#ifdef BEEP_ONHALT 962#ifdef BEEP_ONHALT
963 { 963 {
964 int c; 964 int c;
965 for (c = BEEP_ONHALT_COUNT; c > 0; c--) { 965 for (c = BEEP_ONHALT_COUNT; c > 0; c--) {
966 sysbeep(BEEP_ONHALT_PITCH, 966 sysbeep(BEEP_ONHALT_PITCH,
967 BEEP_ONHALT_PERIOD * hz / 1000); 967 BEEP_ONHALT_PERIOD * hz / 1000);
968 delay(BEEP_ONHALT_PERIOD * 1000); 968 delay(BEEP_ONHALT_PERIOD * 1000);
969 sysbeep(0, BEEP_ONHALT_PERIOD * hz / 1000); 969 sysbeep(0, BEEP_ONHALT_PERIOD * hz / 1000);
970 delay(BEEP_ONHALT_PERIOD * 1000); 970 delay(BEEP_ONHALT_PERIOD * 1000);
971 } 971 }
972 } 972 }
973#endif 973#endif
974 974
975 cnpollc(1); /* for proper keyboard command handling */ 975 cnpollc(1); /* for proper keyboard command handling */
976 if (cngetc() == 0) { 976 if (cngetc() == 0) {
977 /* no console attached, so just hlt */ 977 /* no console attached, so just hlt */
978 for(;;) { 978 for(;;) {
979 x86_hlt(); 979 x86_hlt();
980 } 980 }
981 } 981 }
982 cnpollc(0); 982 cnpollc(0);
983 } 983 }
984 984
985 printf("rebooting...\n"); 985 printf("rebooting...\n");
986 if (cpureset_delay > 0) 986 if (cpureset_delay > 0)
987 delay(cpureset_delay * 1000); 987 delay(cpureset_delay * 1000);
988 cpu_reset(); 988 cpu_reset();
989 for(;;) ; 989 for(;;) ;
990 /*NOTREACHED*/ 990 /*NOTREACHED*/
991} 991}
992 992
993/* 993/*
994 * Clear registers on exec 994 * Clear registers on exec
995 */ 995 */
996void 996void
997setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack) 997setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
998{ 998{
999 struct pmap *pmap = vm_map_pmap(&l->l_proc->p_vmspace->vm_map); 999 struct pmap *pmap = vm_map_pmap(&l->l_proc->p_vmspace->vm_map);
1000 struct pcb *pcb = lwp_getpcb(l); 1000 struct pcb *pcb = lwp_getpcb(l);
1001 struct trapframe *tf; 1001 struct trapframe *tf;
1002 1002
1003#if NNPX > 0 1003#if NNPX > 0
1004 /* If we were using the FPU, forget about it. */ 1004 /* If we were using the FPU, forget about it. */
1005 if (pcb->pcb_fpcpu != NULL) { 1005 if (pcb->pcb_fpcpu != NULL) {
1006 npxsave_lwp(l, false); 1006 npxsave_lwp(l, false);
1007 } 1007 }
1008#endif 1008#endif
1009 1009
1010#ifdef USER_LDT 1010#ifdef USER_LDT
1011 pmap_ldt_cleanup(l); 1011 pmap_ldt_cleanup(l);
1012#endif 1012#endif
1013 1013
1014 l->l_md.md_flags &= ~MDL_USEDFPU; 1014 l->l_md.md_flags &= ~MDL_USEDFPU;
1015 if (i386_use_fxsave) { 1015 if (i386_use_fxsave) {
1016 pcb->pcb_savefpu.sv_xmm.sv_env.en_cw = __NetBSD_NPXCW__; 1016 pcb->pcb_savefpu.sv_xmm.sv_env.en_cw = __NetBSD_NPXCW__;
1017 pcb->pcb_savefpu.sv_xmm.sv_env.en_mxcsr = __INITIAL_MXCSR__; 1017 pcb->pcb_savefpu.sv_xmm.sv_env.en_mxcsr = __INITIAL_MXCSR__;
1018 } else 1018 } else
1019 pcb->pcb_savefpu.sv_87.sv_env.en_cw = __NetBSD_NPXCW__; 1019 pcb->pcb_savefpu.sv_87.sv_env.en_cw = __NetBSD_NPXCW__;
1020 memcpy(&pcb->pcb_fsd, &gdt[GUDATA_SEL], sizeof(pcb->pcb_fsd)); 1020 memcpy(&pcb->pcb_fsd, &gdt[GUDATA_SEL], sizeof(pcb->pcb_fsd));
1021 memcpy(&pcb->pcb_gsd, &gdt[GUDATA_SEL], sizeof(pcb->pcb_gsd)); 1021 memcpy(&pcb->pcb_gsd, &gdt[GUDATA_SEL], sizeof(pcb->pcb_gsd));
1022 1022
1023 tf = l->l_md.md_regs; 1023 tf = l->l_md.md_regs;
1024#ifndef XEN 1024#ifndef XEN
1025 tf->tf_gs = GSEL(GUGS_SEL, SEL_UPL); 1025 tf->tf_gs = GSEL(GUGS_SEL, SEL_UPL);
1026 tf->tf_fs = GSEL(GUFS_SEL, SEL_UPL); 1026 tf->tf_fs = GSEL(GUFS_SEL, SEL_UPL);
1027#else 1027#else
1028 tf->tf_gs = LSEL(LUDATA_SEL, SEL_UPL); 1028 tf->tf_gs = LSEL(LUDATA_SEL, SEL_UPL);
1029 tf->tf_fs = LSEL(LUDATA_SEL, SEL_UPL); 1029 tf->tf_fs = LSEL(LUDATA_SEL, SEL_UPL);
1030#endif 1030#endif
1031 tf->tf_es = LSEL(LUDATA_SEL, SEL_UPL); 1031 tf->tf_es = LSEL(LUDATA_SEL, SEL_UPL);
1032 tf->tf_ds = LSEL(LUDATA_SEL, SEL_UPL); 1032 tf->tf_ds = LSEL(LUDATA_SEL, SEL_UPL);
1033 tf->tf_edi = 0; 1033 tf->tf_edi = 0;
1034 tf->tf_esi = 0; 1034 tf->tf_esi = 0;
1035 tf->tf_ebp = 0; 1035 tf->tf_ebp = 0;
1036 tf->tf_ebx = l->l_proc->p_psstrp; 1036 tf->tf_ebx = l->l_proc->p_psstrp;
1037 tf->tf_edx = 0; 1037 tf->tf_edx = 0;
1038 tf->tf_ecx = 0; 1038 tf->tf_ecx = 0;
1039 tf->tf_eax = 0; 1039 tf->tf_eax = 0;
1040 tf->tf_eip = pack->ep_entry; 1040 tf->tf_eip = pack->ep_entry;
1041 tf->tf_cs = pmap->pm_hiexec > I386_MAX_EXE_ADDR ? 1041 tf->tf_cs = pmap->pm_hiexec > I386_MAX_EXE_ADDR ?
1042 LSEL(LUCODEBIG_SEL, SEL_UPL) : LSEL(LUCODE_SEL, SEL_UPL); 1042 LSEL(LUCODEBIG_SEL, SEL_UPL) : LSEL(LUCODE_SEL, SEL_UPL);
1043 tf->tf_eflags = PSL_USERSET; 1043 tf->tf_eflags = PSL_USERSET;
1044 tf->tf_esp = stack; 1044 tf->tf_esp = stack;
1045 tf->tf_ss = LSEL(LUDATA_SEL, SEL_UPL); 1045 tf->tf_ss = LSEL(LUDATA_SEL, SEL_UPL);
1046} 1046}
1047 1047
1048/* 1048/*
1049 * Initialize segments and descriptor tables 1049 * Initialize segments and descriptor tables
1050 */ 1050 */
1051 1051
1052union descriptor *gdt, *ldt; 1052union descriptor *gdt, *ldt;
1053union descriptor *pentium_idt; 1053union descriptor *pentium_idt;
1054extern vaddr_t lwp0uarea; 1054extern vaddr_t lwp0uarea;
1055 1055
1056void 1056void
1057setgate(struct gate_descriptor *gd, void *func, int args, int type, int dpl, 1057setgate(struct gate_descriptor *gd, void *func, int args, int type, int dpl,
1058 int sel) 1058 int sel)
1059{ 1059{
1060 1060
1061 gd->gd_looffset = (int)func; 1061 gd->gd_looffset = (int)func;
1062 gd->gd_selector = sel; 1062 gd->gd_selector = sel;
1063 gd->gd_stkcpy = args; 1063 gd->gd_stkcpy = args;
1064 gd->gd_xx = 0; 1064 gd->gd_xx = 0;
1065 gd->gd_type = type; 1065 gd->gd_type = type;
1066 gd->gd_dpl = dpl; 1066 gd->gd_dpl = dpl;
1067 gd->gd_p = 1; 1067 gd->gd_p = 1;
1068 gd->gd_hioffset = (int)func >> 16; 1068 gd->gd_hioffset = (int)func >> 16;
1069} 1069}
1070 1070
1071void 1071void
1072unsetgate(struct gate_descriptor *gd) 1072unsetgate(struct gate_descriptor *gd)
1073{ 1073{
1074 gd->gd_p = 0; 1074 gd->gd_p = 0;
1075 gd->gd_hioffset = 0; 1075 gd->gd_hioffset = 0;
1076 gd->gd_looffset = 0; 1076 gd->gd_looffset = 0;
1077 gd->gd_selector = 0; 1077 gd->gd_selector = 0;
1078 gd->gd_xx = 0; 1078 gd->gd_xx = 0;
1079 gd->gd_stkcpy = 0; 1079 gd->gd_stkcpy = 0;
1080 gd->gd_type = 0; 1080 gd->gd_type = 0;
1081 gd->gd_dpl = 0; 1081 gd->gd_dpl = 0;
1082} 1082}
1083 1083
1084 1084
1085void 1085void
1086setregion(struct region_descriptor *rd, void *base, size_t limit) 1086setregion(struct region_descriptor *rd, void *base, size_t limit)
1087{ 1087{
1088 1088
1089 rd->rd_limit = (int)limit; 1089 rd->rd_limit = (int)limit;
1090 rd->rd_base = (int)base; 1090 rd->rd_base = (int)base;
1091} 1091}
1092 1092
1093void 1093void
1094setsegment(struct segment_descriptor *sd, const void *base, size_t limit, 1094setsegment(struct segment_descriptor *sd, const void *base, size_t limit,
1095 int type, int dpl, int def32, int gran) 1095 int type, int dpl, int def32, int gran)
1096{ 1096{
1097 1097
1098 sd->sd_lolimit = (int)limit; 1098 sd->sd_lolimit = (int)limit;
1099 sd->sd_lobase = (int)base; 1099 sd->sd_lobase = (int)base;
1100 sd->sd_type = type; 1100 sd->sd_type = type;
1101 sd->sd_dpl = dpl; 1101 sd->sd_dpl = dpl;
1102 sd->sd_p = 1; 1102 sd->sd_p = 1;
1103 sd->sd_hilimit = (int)limit >> 16; 1103 sd->sd_hilimit = (int)limit >> 16;
1104 sd->sd_xx = 0; 1104 sd->sd_xx = 0;
1105 sd->sd_def32 = def32; 1105 sd->sd_def32 = def32;
1106 sd->sd_gran = gran; 1106 sd->sd_gran = gran;
1107 sd->sd_hibase = (int)base >> 24; 1107 sd->sd_hibase = (int)base >> 24;
1108} 1108}
1109 1109
1110#define IDTVEC(name) __CONCAT(X, name) 1110#define IDTVEC(name) __CONCAT(X, name)
1111typedef void (vector)(void); 1111typedef void (vector)(void);
1112extern vector IDTVEC(syscall); 1112extern vector IDTVEC(syscall);
1113extern vector IDTVEC(osyscall); 1113extern vector IDTVEC(osyscall);
1114extern vector *IDTVEC(exceptions)[]; 1114extern vector *IDTVEC(exceptions)[];
1115extern vector IDTVEC(svr4_fasttrap); 1115extern vector IDTVEC(svr4_fasttrap);
1116void (*svr4_fasttrap_vec)(void) = (void (*)(void))nullop; 1116void (*svr4_fasttrap_vec)(void) = (void (*)(void))nullop;
1117krwlock_t svr4_fasttrap_lock; 1117krwlock_t svr4_fasttrap_lock;
1118#ifdef XEN 1118#ifdef XEN
1119#define MAX_XEN_IDT 128 1119#define MAX_XEN_IDT 128
1120trap_info_t xen_idt[MAX_XEN_IDT]; 1120trap_info_t xen_idt[MAX_XEN_IDT];
1121int xen_idt_idx; 1121int xen_idt_idx;
1122#endif 1122#endif
1123 1123
1124#ifndef XEN 1124#ifndef XEN
1125void cpu_init_idt(void) 1125void cpu_init_idt(void)
1126{ 1126{
1127 struct region_descriptor region; 1127 struct region_descriptor region;
1128 setregion(&region, pentium_idt, NIDT * sizeof(idt[0]) - 1); 1128 setregion(&region, pentium_idt, NIDT * sizeof(idt[0]) - 1);
1129 lidt(&region); 1129 lidt(&region);
1130} 1130}
1131#endif /* !XEN */ 1131#endif /* !XEN */
1132 1132
1133void 1133void
1134initgdt(union descriptor *tgdt) 1134initgdt(union descriptor *tgdt)
1135{ 1135{
1136#ifdef XEN 1136#ifdef XEN
1137 u_long frames[16]; 1137 u_long frames[16];
1138#else 1138#else
1139 struct region_descriptor region; 1139 struct region_descriptor region;
1140 gdt = tgdt; 1140 gdt = tgdt;
1141 memset(gdt, 0, NGDT*sizeof(*gdt)); 1141 memset(gdt, 0, NGDT*sizeof(*gdt));
1142#endif /* XEN */ 1142#endif /* XEN */
1143 /* make gdt gates and memory segments */ 1143 /* make gdt gates and memory segments */
1144 setsegment(&gdt[GCODE_SEL].sd, 0, 0xfffff, SDT_MEMERA, SEL_KPL, 1, 1); 1144 setsegment(&gdt[GCODE_SEL].sd, 0, 0xfffff, SDT_MEMERA, SEL_KPL, 1, 1);
1145 setsegment(&gdt[GDATA_SEL].sd, 0, 0xfffff, SDT_MEMRWA, SEL_KPL, 1, 1); 1145 setsegment(&gdt[GDATA_SEL].sd, 0, 0xfffff, SDT_MEMRWA, SEL_KPL, 1, 1);
1146 setsegment(&gdt[GUCODE_SEL].sd, 0, x86_btop(I386_MAX_EXE_ADDR) - 1, 1146 setsegment(&gdt[GUCODE_SEL].sd, 0, x86_btop(I386_MAX_EXE_ADDR) - 1,
1147 SDT_MEMERA, SEL_UPL, 1, 1); 1147 SDT_MEMERA, SEL_UPL, 1, 1);
1148 setsegment(&gdt[GUCODEBIG_SEL].sd, 0, 0xfffff, 1148 setsegment(&gdt[GUCODEBIG_SEL].sd, 0, 0xfffff,
1149 SDT_MEMERA, SEL_UPL, 1, 1); 1149 SDT_MEMERA, SEL_UPL, 1, 1);
1150 setsegment(&gdt[GUDATA_SEL].sd, 0, 0xfffff, 1150 setsegment(&gdt[GUDATA_SEL].sd, 0, 0xfffff,
1151 SDT_MEMRWA, SEL_UPL, 1, 1); 1151 SDT_MEMRWA, SEL_UPL, 1, 1);
1152#if NBIOSCALL > 0 1152#if NBIOSCALL > 0
1153 /* bios trampoline GDT entries */ 1153 /* bios trampoline GDT entries */
1154 setsegment(&gdt[GBIOSCODE_SEL].sd, 0, 0xfffff, SDT_MEMERA, SEL_KPL, 0, 1154 setsegment(&gdt[GBIOSCODE_SEL].sd, 0, 0xfffff, SDT_MEMERA, SEL_KPL, 0,
1155 0); 1155 0);
1156 setsegment(&gdt[GBIOSDATA_SEL].sd, 0, 0xfffff, SDT_MEMRWA, SEL_KPL, 0, 1156 setsegment(&gdt[GBIOSDATA_SEL].sd, 0, 0xfffff, SDT_MEMRWA, SEL_KPL, 0,
1157 0); 1157 0);
1158#endif 1158#endif
1159 setsegment(&gdt[GCPU_SEL].sd, &cpu_info_primary, 0xfffff, 1159 setsegment(&gdt[GCPU_SEL].sd, &cpu_info_primary, 0xfffff,
1160 SDT_MEMRWA, SEL_KPL, 1, 1); 1160 SDT_MEMRWA, SEL_KPL, 1, 1);
1161 1161
1162#ifndef XEN 1162#ifndef XEN
1163 setregion(&region, gdt, NGDT * sizeof(gdt[0]) - 1); 1163 setregion(&region, gdt, NGDT * sizeof(gdt[0]) - 1);
1164 lgdt(&region); 1164 lgdt(&region);
1165#else /* !XEN */ 1165#else /* !XEN */
1166 frames[0] = xpmap_ptom((uint32_t)gdt - KERNBASE) >> PAGE_SHIFT; 1166 frames[0] = xpmap_ptom((uint32_t)gdt - KERNBASE) >> PAGE_SHIFT;
1167 pmap_kenter_pa((vaddr_t)gdt, (uint32_t)gdt - KERNBASE, VM_PROT_READ, 0); 1167 pmap_kenter_pa((vaddr_t)gdt, (uint32_t)gdt - KERNBASE, VM_PROT_READ, 0);
1168 XENPRINTK(("loading gdt %lx, %d entries\n", frames[0] << PAGE_SHIFT, 1168 XENPRINTK(("loading gdt %lx, %d entries\n", frames[0] << PAGE_SHIFT,
1169 NGDT)); 1169 NGDT));
1170 if (HYPERVISOR_set_gdt(frames, NGDT /* XXX is it right ? */)) 1170 if (HYPERVISOR_set_gdt(frames, NGDT /* XXX is it right ? */))
1171 panic("HYPERVISOR_set_gdt failed!\n"); 1171 panic("HYPERVISOR_set_gdt failed!\n");
1172 lgdt_finish(); 1172 lgdt_finish();
1173 1173
1174#endif /* !XEN */ 1174#endif /* !XEN */
1175} 1175}
1176 1176
1177static void 1177static void
1178init386_msgbuf(void) 1178init386_msgbuf(void)
1179{ 1179{
1180 /* Message buffer is located at end of core. */ 1180 /* Message buffer is located at end of core. */
1181 struct vm_physseg *vps; 1181 struct vm_physseg *vps;
1182 psize_t sz = round_page(MSGBUFSIZE); 1182 psize_t sz = round_page(MSGBUFSIZE);
1183 psize_t reqsz = sz; 1183 psize_t reqsz = sz;
1184 unsigned int x; 1184 unsigned int x;
1185 1185
1186 search_again: 1186 search_again:
1187 vps = NULL; 1187 vps = NULL;
1188 for (x = 0; x < vm_nphysseg; ++x) { 1188 for (x = 0; x < vm_nphysseg; ++x) {
1189 vps = VM_PHYSMEM_PTR(x); 1189 vps = VM_PHYSMEM_PTR(x);
1190 if (ctob(vps->avail_end) == avail_end) { 1190 if (ctob(vps->avail_end) == avail_end) {
1191 break; 1191 break;
1192 } 1192 }
1193 } 1193 }
1194 if (x == vm_nphysseg) 1194 if (x == vm_nphysseg)
1195 panic("init386: can't find end of memory"); 1195 panic("init386: can't find end of memory");
1196 1196
1197 /* Shrink so it'll fit in the last segment. */ 1197 /* Shrink so it'll fit in the last segment. */
1198 if (vps->avail_end - vps->avail_start < atop(sz)) 1198 if (vps->avail_end - vps->avail_start < atop(sz))
1199 sz = ctob(vps->avail_end - vps->avail_start); 1199 sz = ctob(vps->avail_end - vps->avail_start);
1200 1200
1201 vps->avail_end -= atop(sz); 1201 vps->avail_end -= atop(sz);
1202 vps->end -= atop(sz); 1202 vps->end -= atop(sz);
1203 msgbuf_p_seg[msgbuf_p_cnt].sz = sz; 1203 msgbuf_p_seg[msgbuf_p_cnt].sz = sz;
1204 msgbuf_p_seg[msgbuf_p_cnt++].paddr = ctob(vps->avail_end); 1204 msgbuf_p_seg[msgbuf_p_cnt++].paddr = ctob(vps->avail_end);
1205 1205
1206 /* Remove the last segment if it now has no pages. */ 1206 /* Remove the last segment if it now has no pages. */
1207 if (vps->start == vps->end) { 1207 if (vps->start == vps->end) {
1208 for (--vm_nphysseg; x < vm_nphysseg; x++) 1208 for (--vm_nphysseg; x < vm_nphysseg; x++)
1209 VM_PHYSMEM_PTR_SWAP(x, x + 1); 1209 VM_PHYSMEM_PTR_SWAP(x, x + 1);
1210 } 1210 }
1211 1211
1212 /* Now find where the new avail_end is. */ 1212 /* Now find where the new avail_end is. */
1213 for (avail_end = 0, x = 0; x < vm_nphysseg; x++) 1213 for (avail_end = 0, x = 0; x < vm_nphysseg; x++)
1214 if (VM_PHYSMEM_PTR(x)->avail_end > avail_end) 1214 if (VM_PHYSMEM_PTR(x)->avail_end > avail_end)
1215 avail_end = VM_PHYSMEM_PTR(x)->avail_end; 1215 avail_end = VM_PHYSMEM_PTR(x)->avail_end;
1216 avail_end = ctob(avail_end); 1216 avail_end = ctob(avail_end);
1217 1217
1218 if (sz == reqsz) 1218 if (sz == reqsz)
1219 return; 1219 return;
1220 1220
1221 reqsz -= sz; 1221 reqsz -= sz;
1222 if (msgbuf_p_cnt == VM_PHYSSEG_MAX) { 1222 if (msgbuf_p_cnt == VM_PHYSSEG_MAX) {
1223 /* No more segments available, bail out. */ 1223 /* No more segments available, bail out. */
1224 printf("WARNING: MSGBUFSIZE (%zu) too large, using %zu.\n", 1224 printf("WARNING: MSGBUFSIZE (%zu) too large, using %zu.\n",
1225 (size_t)MSGBUFSIZE, (size_t)(MSGBUFSIZE - reqsz)); 1225 (size_t)MSGBUFSIZE, (size_t)(MSGBUFSIZE - reqsz));
1226 return; 1226 return;
1227 } 1227 }
1228 1228
1229 sz = reqsz; 1229 sz = reqsz;
1230 goto search_again; 1230 goto search_again;
1231} 1231}
1232 1232
1233#ifndef XEN 1233#ifndef XEN
1234static void 1234static void
1235init386_pte0(void) 1235init386_pte0(void)
1236{ 1236{
1237 paddr_t paddr; 1237 paddr_t paddr;
1238 vaddr_t vaddr; 1238 vaddr_t vaddr;
1239 1239
1240 paddr = 4 * PAGE_SIZE; 1240 paddr = 4 * PAGE_SIZE;
1241 vaddr = (vaddr_t)vtopte(0); 1241 vaddr = (vaddr_t)vtopte(0);
1242 pmap_kenter_pa(vaddr, paddr, VM_PROT_ALL, 0); 1242 pmap_kenter_pa(vaddr, paddr, VM_PROT_ALL, 0);
1243 pmap_update(pmap_kernel()); 1243 pmap_update(pmap_kernel());
1244 /* make sure it is clean before using */ 1244 /* make sure it is clean before using */
1245 memset((void *)vaddr, 0, PAGE_SIZE); 1245 memset((void *)vaddr, 0, PAGE_SIZE);
1246} 1246}
1247#endif /* !XEN */ 1247#endif /* !XEN */
1248 1248
1249static void 1249static void
1250init386_ksyms(void) 1250init386_ksyms(void)
1251{ 1251{
1252#if NKSYMS || defined(DDB) || defined(MODULAR) 1252#if NKSYMS || defined(DDB) || defined(MODULAR)
1253 extern int end; 1253 extern int end;
1254 struct btinfo_symtab *symtab; 1254 struct btinfo_symtab *symtab;
1255 1255
1256#ifdef DDB 1256#ifdef DDB
1257 db_machine_init(); 1257 db_machine_init();
1258#endif 1258#endif
1259 1259
1260#if defined(MULTIBOOT) 1260#if defined(MULTIBOOT)
1261 if (multiboot_ksyms_addsyms_elf()) 1261 if (multiboot_ksyms_addsyms_elf())
1262 return; 1262 return;
1263#endif 1263#endif
1264 1264
1265 if ((symtab = lookup_bootinfo(BTINFO_SYMTAB)) == NULL) { 1265 if ((symtab = lookup_bootinfo(BTINFO_SYMTAB)) == NULL) {
1266 ksyms_addsyms_elf(*(int *)&end, ((int *)&end) + 1, esym); 1266 ksyms_addsyms_elf(*(int *)&end, ((int *)&end) + 1, esym);
1267 return; 1267 return;
1268 } 1268 }
1269 1269
1270 symtab->ssym += KERNBASE; 1270 symtab->ssym += KERNBASE;
1271 symtab->esym += KERNBASE; 1271 symtab->esym += KERNBASE;
1272 ksyms_addsyms_elf(symtab->nsym, (int *)symtab->ssym, (int *)symtab->esym); 1272 ksyms_addsyms_elf(symtab->nsym, (int *)symtab->ssym, (int *)symtab->esym);
1273#endif 1273#endif
1274} 1274}
1275 1275
1276void 1276void
1277init386(paddr_t first_avail) 1277init386(paddr_t first_avail)
1278{ 1278{
1279 extern void consinit(void); 1279 extern void consinit(void);
1280 struct pcb *pcb; 1280 struct pcb *pcb;
1281 int x; 1281 int x;
1282#ifndef XEN 1282#ifndef XEN
1283 union descriptor *tgdt; 1283 union descriptor *tgdt;
1284 extern struct extent *iomem_ex; 1284 extern struct extent *iomem_ex;
1285 struct region_descriptor region; 1285 struct region_descriptor region;
1286 struct btinfo_memmap *bim; 1286 struct btinfo_memmap *bim;
1287#endif 1287#endif
1288#if NBIOSCALL > 0 1288#if NBIOSCALL > 0
1289 extern int biostramp_image_size; 1289 extern int biostramp_image_size;
1290 extern u_char biostramp_image[]; 1290 extern u_char biostramp_image[];
1291#endif 1291#endif
1292 1292
1293#ifdef XEN 1293#ifdef XEN
1294 XENPRINTK(("HYPERVISOR_shared_info %p (%x)\n", HYPERVISOR_shared_info, 1294 XENPRINTK(("HYPERVISOR_shared_info %p (%x)\n", HYPERVISOR_shared_info,
1295 xen_start_info.shared_info)); 1295 xen_start_info.shared_info));
1296 KASSERT(HYPERVISOR_shared_info != NULL); 1296 KASSERT(HYPERVISOR_shared_info != NULL);
1297 cpu_info_primary.ci_vcpu = &HYPERVISOR_shared_info->vcpu_info[0]; 1297 cpu_info_primary.ci_vcpu = &HYPERVISOR_shared_info->vcpu_info[0];
1298#endif 1298#endif
1299 cpu_probe(&cpu_info_primary); 1299 cpu_probe(&cpu_info_primary);
1300 1300
1301 uvm_lwp_setuarea(&lwp0, lwp0uarea); 1301 uvm_lwp_setuarea(&lwp0, lwp0uarea);
1302 pcb = lwp_getpcb(&lwp0); 1302 pcb = lwp_getpcb(&lwp0);
1303 1303
1304 cpu_feature[0] &= ~CPUID_FEAT_BLACKLIST; 1304 cpu_feature[0] &= ~CPUID_FEAT_BLACKLIST;
1305 1305
1306 cpu_init_msrs(&cpu_info_primary, true); 1306 cpu_init_msrs(&cpu_info_primary, true);
1307 1307
1308#ifdef PAE 1308#ifdef PAE
1309 i386_use_pae = 1; 1309 i386_use_pae = 1;
1310#endif 1310#endif
1311 1311
1312#ifdef XEN 1312#ifdef XEN
1313 pcb->pcb_cr3 = PDPpaddr; 1313 pcb->pcb_cr3 = PDPpaddr;
1314 __PRINTK(("pcb_cr3 0x%lx cr3 0x%lx\n", 1314 __PRINTK(("pcb_cr3 0x%lx cr3 0x%lx\n",
1315 PDPpaddr, xpmap_ptom(PDPpaddr))); 1315 PDPpaddr, xpmap_ptom(PDPpaddr)));
1316 XENPRINTK(("lwp0uarea %p first_avail %p\n", 1316 XENPRINTK(("lwp0uarea %p first_avail %p\n",
1317 lwp0uarea, (void *)(long)first_avail)); 1317 lwp0uarea, (void *)(long)first_avail));
1318 XENPRINTK(("ptdpaddr %p atdevbase %p\n", (void *)PDPpaddr, 1318 XENPRINTK(("ptdpaddr %p atdevbase %p\n", (void *)PDPpaddr,
1319 (void *)atdevbase)); 1319 (void *)atdevbase));
1320#endif 1320#endif
1321 1321
1322#if defined(PAE) && !defined(XEN) 1322#if defined(PAE) && !defined(XEN)
1323 /* 1323 /*
1324 * Save VA and PA of L3 PD of boot processor (for Xen, this is done 1324 * Save VA and PA of L3 PD of boot processor (for Xen, this is done
1325 * in xen_pmap_bootstrap()) 1325 * in xen_pmap_bootstrap())
1326 */ 1326 */
1327 cpu_info_primary.ci_pae_l3_pdirpa = rcr3(); 1327 cpu_info_primary.ci_pae_l3_pdirpa = rcr3();
1328 cpu_info_primary.ci_pae_l3_pdir = (pd_entry_t *)(rcr3() + KERNBASE); 1328 cpu_info_primary.ci_pae_l3_pdir = (pd_entry_t *)(rcr3() + KERNBASE);
1329#endif /* PAE && !XEN */ 1329#endif /* PAE && !XEN */
1330 1330
1331#ifdef XBOX 1331#ifdef XBOX
1332 /* 1332 /*
1333 * From Rink Springer @ FreeBSD: 1333 * From Rink Springer @ FreeBSD:
1334 * 1334 *
1335 * The following code queries the PCI ID of 0:0:0. For the XBOX, 1335 * The following code queries the PCI ID of 0:0:0. For the XBOX,
1336 * This should be 0x10de / 0x02a5. 1336 * This should be 0x10de / 0x02a5.
1337 * 1337 *
1338 * This is exactly what Linux does. 1338 * This is exactly what Linux does.
1339 */ 1339 */
1340 outl(0xcf8, 0x80000000); 1340 outl(0xcf8, 0x80000000);
1341 if (inl(0xcfc) == 0x02a510de) { 1341 if (inl(0xcfc) == 0x02a510de) {
1342 arch_i386_is_xbox = 1; 1342 arch_i386_is_xbox = 1;
1343 xbox_lcd_init(); 1343 xbox_lcd_init();
1344 xbox_lcd_writetext("NetBSD/i386 "); 1344 xbox_lcd_writetext("NetBSD/i386 ");
1345 1345
1346 /* 1346 /*
1347 * We are an XBOX, but we may have either 64MB or 128MB of 1347 * We are an XBOX, but we may have either 64MB or 128MB of
1348 * memory. The PCI host bridge should be programmed for this, 1348 * memory. The PCI host bridge should be programmed for this,
1349 * so we just query it.  1349 * so we just query it.
1350 */ 1350 */
1351 outl(0xcf8, 0x80000084); 1351 outl(0xcf8, 0x80000084);
1352 arch_i386_xbox_memsize = (inl(0xcfc) == 0x7FFFFFF) ? 128 : 64; 1352 arch_i386_xbox_memsize = (inl(0xcfc) == 0x7FFFFFF) ? 128 : 64;
1353 } 1353 }
1354#endif /* XBOX */ 1354#endif /* XBOX */
1355 1355
1356#if NISA > 0 || NPCI > 0 1356#if NISA > 0 || NPCI > 0
1357 x86_bus_space_init(); 1357 x86_bus_space_init();
1358#endif 1358#endif
1359#ifdef XEN 1359#ifdef XEN
1360 xen_parse_cmdline(XEN_PARSE_BOOTFLAGS, NULL); 1360 xen_parse_cmdline(XEN_PARSE_BOOTFLAGS, NULL);
1361#endif 1361#endif
1362 1362
1363 /* 1363 /*
1364 * Initialize PAGE_SIZE-dependent variables. 1364 * Initialize PAGE_SIZE-dependent variables.
1365 */ 1365 */
1366 uvm_setpagesize(); 1366 uvm_setpagesize();
1367 1367
1368 /* 1368 /*
1369 * Saving SSE registers won't work if the save area isn't 1369 * Saving SSE registers won't work if the save area isn't
1370 * 16-byte aligned. 1370 * 16-byte aligned.
1371 */ 1371 */
1372 KASSERT((offsetof(struct pcb, pcb_savefpu) & 0xf) == 0); 1372 KASSERT((offsetof(struct pcb, pcb_savefpu) & 0xf) == 0);
1373 1373
1374 /* 1374 /*
1375 * Start with 2 color bins -- this is just a guess to get us 1375 * Start with 2 color bins -- this is just a guess to get us
1376 * started. We'll recolor when we determine the largest cache 1376 * started. We'll recolor when we determine the largest cache
1377 * sizes on the system. 1377 * sizes on the system.
1378 */ 1378 */
1379 uvmexp.ncolors = 2; 1379 uvmexp.ncolors = 2;
1380 1380
1381#ifndef XEN 1381#ifndef XEN
1382 /* 1382 /*
1383 * Low memory reservations: 1383 * Low memory reservations:
1384 * Page 0: BIOS data 1384 * Page 0: BIOS data
1385 * Page 1: BIOS callback 1385 * Page 1: BIOS callback
1386 * Page 2: MP bootstrap 1386 * Page 2: MP bootstrap
1387 * Page 3: ACPI wakeup code 1387 * Page 3: ACPI wakeup code
1388 * Page 4: Temporary page table for 0MB-4MB 1388 * Page 4: Temporary page table for 0MB-4MB
1389 * Page 5: Temporary page directory 1389 * Page 5: Temporary page directory
1390 */ 1390 */
1391 avail_start = 6 * PAGE_SIZE; 1391 avail_start = 6 * PAGE_SIZE;
1392#else /* !XEN */ 1392#else /* !XEN */
1393 /* steal one page for gdt */ 1393 /* steal one page for gdt */
1394 gdt = (void *)((u_long)first_avail + KERNBASE); 1394 gdt = (void *)((u_long)first_avail + KERNBASE);
1395 first_avail += PAGE_SIZE; 1395 first_avail += PAGE_SIZE;
1396 /* Make sure the end of the space used by the kernel is rounded. */ 1396 /* Make sure the end of the space used by the kernel is rounded. */
1397 first_avail = round_page(first_avail); 1397 first_avail = round_page(first_avail);
1398 avail_start = first_avail; 1398 avail_start = first_avail;
1399 avail_end = ctob(xen_start_info.nr_pages) + XPMAP_OFFSET; 1399 avail_end = ctob(xen_start_info.nr_pages) + XPMAP_OFFSET;
1400 pmap_pa_start = (KERNTEXTOFF - KERNBASE); 1400 pmap_pa_start = (KERNTEXTOFF - KERNBASE);
1401 pmap_pa_end = avail_end; 1401 pmap_pa_end = avail_end;
1402 mem_clusters[0].start = avail_start; 1402 mem_clusters[0].start = avail_start;
1403 mem_clusters[0].size = avail_end - avail_start; 1403 mem_clusters[0].size = avail_end - avail_start;
1404 mem_cluster_cnt++; 1404 mem_cluster_cnt++;
1405 physmem += xen_start_info.nr_pages; 1405 physmem += xen_start_info.nr_pages;
1406 uvmexp.wired += atop(avail_start); 1406 uvmexp.wired += atop(avail_start);
1407 /* 1407 /*
1408 * initgdt() has to be done before consinit(), so that %fs is properly 1408 * initgdt() has to be done before consinit(), so that %fs is properly
1409 * initialised. initgdt() uses pmap_kenter_pa so it can't be called 1409 * initialised. initgdt() uses pmap_kenter_pa so it can't be called
1410 * before the above variables are set. 1410 * before the above variables are set.
1411 */ 1411 */
1412 initgdt(NULL); 1412 initgdt(NULL);
1413#endif /* XEN */ 1413#endif /* XEN */
1414 consinit(); /* XXX SHOULD NOT BE DONE HERE */ 1414 consinit(); /* XXX SHOULD NOT BE DONE HERE */
1415 1415
1416#ifdef DEBUG_MEMLOAD 1416#ifdef DEBUG_MEMLOAD
1417 printf("mem_cluster_count: %d\n", mem_cluster_cnt); 1417 printf("mem_cluster_count: %d\n", mem_cluster_cnt);
1418#endif 1418#endif
1419 1419
1420 /* 1420 /*
1421 * Call pmap initialization to make new kernel address space. 1421 * Call pmap initialization to make new kernel address space.
1422 * We must do this before loading pages into the VM system. 1422 * We must do this before loading pages into the VM system.
1423 */ 1423 */
1424 pmap_bootstrap((vaddr_t)atdevbase + IOM_SIZE); 1424 pmap_bootstrap((vaddr_t)atdevbase + IOM_SIZE);
1425 1425
1426#ifndef XEN 1426#ifndef XEN
1427 /* 1427 /*
1428 * Check to see if we have a memory map from the BIOS (passed 1428 * Check to see if we have a memory map from the BIOS (passed
1429 * to us by the boot program. 1429 * to us by the boot program.
1430 */ 1430 */
1431 bim = lookup_bootinfo(BTINFO_MEMMAP); 1431 bim = lookup_bootinfo(BTINFO_MEMMAP);
1432 if ((biosmem_implicit || (biosbasemem == 0 && biosextmem == 0)) && 1432 if ((biosmem_implicit || (biosbasemem == 0 && biosextmem == 0)) &&
1433 bim != NULL && bim->num > 0) 1433 bim != NULL && bim->num > 0)
1434 initx86_parse_memmap(bim, iomem_ex); 1434 initx86_parse_memmap(bim, iomem_ex);
1435 1435
1436 /* 1436 /*
1437 * If the loop above didn't find any valid segment, fall back to 1437 * If the loop above didn't find any valid segment, fall back to
1438 * former code. 1438 * former code.
1439 */ 1439 */
1440 if (mem_cluster_cnt == 0) 1440 if (mem_cluster_cnt == 0)
1441 initx86_fake_memmap(iomem_ex); 1441 initx86_fake_memmap(iomem_ex);
1442 1442
1443 initx86_load_memmap(first_avail); 1443 initx86_load_memmap(first_avail);
1444 1444
1445#else /* !XEN */ 1445#else /* !XEN */
1446 XENPRINTK(("load the memory cluster %p(%d) - %p(%ld)\n", 1446 XENPRINTK(("load the memory cluster %p(%d) - %p(%ld)\n",
1447 (void *)(long)avail_start, (int)atop(avail_start), 1447 (void *)(long)avail_start, (int)atop(avail_start),
1448 (void *)(long)avail_end, (int)atop(avail_end))); 1448 (void *)(long)avail_end, (int)atop(avail_end)));
1449 uvm_page_physload(atop(avail_start), atop(avail_end), 1449 uvm_page_physload(atop(avail_start), atop(avail_end),
1450 atop(avail_start), atop(avail_end), 1450 atop(avail_start), atop(avail_end),
1451 VM_FREELIST_DEFAULT); 1451 VM_FREELIST_DEFAULT);
1452#endif /* !XEN */ 1452#endif /* !XEN */
1453 1453
1454 init386_msgbuf(); 1454 init386_msgbuf();
1455 1455
1456#ifndef XEN 1456#ifndef XEN
1457 /* 1457 /*
1458 * XXX Remove this 1458 * XXX Remove this
1459 * 1459 *
1460 * Setup a temporary Page Table Entry to allow identity mappings of 1460 * Setup a temporary Page Table Entry to allow identity mappings of
1461 * the real mode address. This is required by: 1461 * the real mode address. This is required by:
1462 * - bioscall 1462 * - bioscall
1463 * - MP bootstrap 1463 * - MP bootstrap
1464 * - ACPI wakecode 1464 * - ACPI wakecode
1465 */ 1465 */
1466 init386_pte0(); 1466 init386_pte0();
1467 1467
1468#if NBIOSCALL > 0 1468#if NBIOSCALL > 0
1469 KASSERT(biostramp_image_size <= PAGE_SIZE); 1469 KASSERT(biostramp_image_size <= PAGE_SIZE);
1470 pmap_kenter_pa((vaddr_t)BIOSTRAMP_BASE, /* virtual */ 1470 pmap_kenter_pa((vaddr_t)BIOSTRAMP_BASE, /* virtual */
1471 (paddr_t)BIOSTRAMP_BASE, /* physical */ 1471 (paddr_t)BIOSTRAMP_BASE, /* physical */
1472 VM_PROT_ALL, 0); /* protection */ 1472 VM_PROT_ALL, 0); /* protection */
1473 pmap_update(pmap_kernel()); 1473 pmap_update(pmap_kernel());
1474 memcpy((void *)BIOSTRAMP_BASE, biostramp_image, biostramp_image_size); 1474 memcpy((void *)BIOSTRAMP_BASE, biostramp_image, biostramp_image_size);
1475 1475
1476 /* Needed early, for bioscall() and kvm86_call() */ 1476 /* Needed early, for bioscall() and kvm86_call() */
1477 cpu_info_primary.ci_pmap = pmap_kernel(); 1477 cpu_info_primary.ci_pmap = pmap_kernel();
1478#endif 1478#endif
1479#endif /* !XEN */ 1479#endif /* !XEN */
1480 1480
1481 pmap_kenter_pa(idt_vaddr, idt_paddr, VM_PROT_READ|VM_PROT_WRITE, 0); 1481 pmap_kenter_pa(idt_vaddr, idt_paddr, VM_PROT_READ|VM_PROT_WRITE, 0);
1482 pmap_update(pmap_kernel()); 1482 pmap_update(pmap_kernel());
1483 memset((void *)idt_vaddr, 0, PAGE_SIZE); 1483 memset((void *)idt_vaddr, 0, PAGE_SIZE);
1484 1484
1485 1485
1486#ifndef XEN 1486#ifndef XEN
1487 idt_init(); 1487 idt_init();
1488 1488
1489 idt = (struct gate_descriptor *)idt_vaddr; 1489 idt = (struct gate_descriptor *)idt_vaddr;
1490 pmap_kenter_pa(pentium_idt_vaddr, idt_paddr, VM_PROT_READ, 0); 1490 pmap_kenter_pa(pentium_idt_vaddr, idt_paddr, VM_PROT_READ, 0);
1491 pmap_update(pmap_kernel()); 1491 pmap_update(pmap_kernel());
1492 pentium_idt = (union descriptor *)pentium_idt_vaddr; 1492 pentium_idt = (union descriptor *)pentium_idt_vaddr;
1493 1493
1494 tgdt = gdt; 1494 tgdt = gdt;
1495 gdt = (union descriptor *) 1495 gdt = (union descriptor *)
1496 ((char *)idt + NIDT * sizeof (struct gate_descriptor)); 1496 ((char *)idt + NIDT * sizeof (struct gate_descriptor));
1497 ldt = gdt + NGDT; 1497 ldt = gdt + NGDT;
1498 1498
1499 memcpy(gdt, tgdt, NGDT*sizeof(*gdt)); 1499 memcpy(gdt, tgdt, NGDT*sizeof(*gdt));
1500 1500
1501 setsegment(&gdt[GLDT_SEL].sd, ldt, NLDT * sizeof(ldt[0]) - 1, 1501 setsegment(&gdt[GLDT_SEL].sd, ldt, NLDT * sizeof(ldt[0]) - 1,
1502 SDT_SYSLDT, SEL_KPL, 0, 0); 1502 SDT_SYSLDT, SEL_KPL, 0, 0);
1503#else 1503#else
1504 HYPERVISOR_set_callbacks( 1504 HYPERVISOR_set_callbacks(
1505 GSEL(GCODE_SEL, SEL_KPL), (unsigned long)hypervisor_callback, 1505 GSEL(GCODE_SEL, SEL_KPL), (unsigned long)hypervisor_callback,
1506 GSEL(GCODE_SEL, SEL_KPL), (unsigned long)failsafe_callback); 1506 GSEL(GCODE_SEL, SEL_KPL), (unsigned long)failsafe_callback);
1507 1507
1508 ldt = (union descriptor *)idt_vaddr; 1508 ldt = (union descriptor *)idt_vaddr;
1509#endif /* XEN */ 1509#endif /* XEN */
1510 1510
1511 /* make ldt gates and memory segments */ 1511 /* make ldt gates and memory segments */
1512 setgate(&ldt[LSYS5CALLS_SEL].gd, &IDTVEC(osyscall), 1, 1512 setgate(&ldt[LSYS5CALLS_SEL].gd, &IDTVEC(osyscall), 1,
1513 SDT_SYS386CGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1513 SDT_SYS386CGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
1514 1514
1515 ldt[LUCODE_SEL] = gdt[GUCODE_SEL]; 1515 ldt[LUCODE_SEL] = gdt[GUCODE_SEL];
1516 ldt[LUCODEBIG_SEL] = gdt[GUCODEBIG_SEL]; 1516 ldt[LUCODEBIG_SEL] = gdt[GUCODEBIG_SEL];
1517 ldt[LUDATA_SEL] = gdt[GUDATA_SEL]; 1517 ldt[LUDATA_SEL] = gdt[GUDATA_SEL];
1518 ldt[LSOL26CALLS_SEL] = ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL]; 1518 ldt[LSOL26CALLS_SEL] = ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
1519 1519
1520#ifndef XEN 1520#ifndef XEN
1521 /* exceptions */ 1521 /* exceptions */
1522 for (x = 0; x < 32; x++) { 1522 for (x = 0; x < 32; x++) {
1523 idt_vec_reserve(x); 1523 idt_vec_reserve(x);
1524 setgate(&idt[x], IDTVEC(exceptions)[x], 0, SDT_SYS386IGT, 1524 setgate(&idt[x], IDTVEC(exceptions)[x], 0, SDT_SYS386IGT,
1525 (x == 3 || x == 4) ? SEL_UPL : SEL_KPL, 1525 (x == 3 || x == 4) ? SEL_UPL : SEL_KPL,
1526 GSEL(GCODE_SEL, SEL_KPL)); 1526 GSEL(GCODE_SEL, SEL_KPL));
1527 } 1527 }
1528 1528
1529 /* new-style interrupt gate for syscalls */ 1529 /* new-style interrupt gate for syscalls */
1530 idt_vec_reserve(128); 1530 idt_vec_reserve(128);
1531 setgate(&idt[128], &IDTVEC(syscall), 0, SDT_SYS386IGT, SEL_UPL, 1531 setgate(&idt[128], &IDTVEC(syscall), 0, SDT_SYS386IGT, SEL_UPL,
1532 GSEL(GCODE_SEL, SEL_KPL)); 1532 GSEL(GCODE_SEL, SEL_KPL));
1533 idt_vec_reserve(0xd2); 1533 idt_vec_reserve(0xd2);
1534 setgate(&idt[0xd2], &IDTVEC(svr4_fasttrap), 0, SDT_SYS386IGT, 1534 setgate(&idt[0xd2], &IDTVEC(svr4_fasttrap), 0, SDT_SYS386IGT,
1535 SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1535 SEL_UPL, GSEL(GCODE_SEL, SEL_KPL));
1536 1536
1537 setregion(&region, gdt, NGDT * sizeof(gdt[0]) - 1); 1537 setregion(&region, gdt, NGDT * sizeof(gdt[0]) - 1);
1538 lgdt(&region); 1538 lgdt(&region);
1539 1539
1540 cpu_init_idt(); 1540 cpu_init_idt();
1541#else /* !XEN */ 1541#else /* !XEN */
1542 memset(xen_idt, 0, sizeof(trap_info_t) * MAX_XEN_IDT); 1542 memset(xen_idt, 0, sizeof(trap_info_t) * MAX_XEN_IDT);
1543 xen_idt_idx = 0; 1543 xen_idt_idx = 0;
1544 for (x = 0; x < 32; x++) { 1544 for (x = 0; x < 32; x++) {
1545 KASSERT(xen_idt_idx < MAX_XEN_IDT); 1545 KASSERT(xen_idt_idx < MAX_XEN_IDT);
1546 xen_idt[xen_idt_idx].vector = x; 1546 xen_idt[xen_idt_idx].vector = x;
1547 1547
1548 switch (x) { 1548 switch (x) {
1549 case 2: /* NMI */ 1549 case 2: /* NMI */
1550 case 18: /* MCA */ 1550 case 18: /* MCA */
1551 TI_SET_IF(&(xen_idt[xen_idt_idx]), 2); 1551 TI_SET_IF(&(xen_idt[xen_idt_idx]), 2);
1552 break; 1552 break;
1553 case 3: 1553 case 3:
1554 case 4: 1554 case 4:

cvs diff -r1.139 -r1.140 src/sys/arch/i386/isa/Attic/npx.c (switch to unified diff)

--- src/sys/arch/i386/isa/Attic/npx.c 2010/12/20 00:25:35 1.139
+++ src/sys/arch/i386/isa/Attic/npx.c 2011/06/07 14:53:03 1.140
@@ -1,968 +1,968 @@ @@ -1,968 +1,968 @@
1/* $NetBSD: npx.c,v 1.139 2010/12/20 00:25:35 matt Exp $ */ 1/* $NetBSD: npx.c,v 1.140 2011/06/07 14:53:03 bouyer Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software developed for The NetBSD Foundation 7 * This code is derived from software developed for The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/*- 32/*-
33 * Copyright (c) 1991 The Regents of the University of California. 33 * Copyright (c) 1991 The Regents of the University of California.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions 37 * modification, are permitted provided that the following conditions
38 * are met: 38 * are met:
39 * 1. Redistributions of source code must retain the above copyright 39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer. 40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright 41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the 42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution. 43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the University nor the names of its contributors 44 * 3. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software 45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission. 46 * without specific prior written permission.
47 * 47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE. 58 * SUCH DAMAGE.
59 * 59 *
60 * @(#)npx.c 7.2 (Berkeley) 5/12/91 60 * @(#)npx.c 7.2 (Berkeley) 5/12/91
61 */ 61 */
62 62
63/*- 63/*-
64 * Copyright (c) 1994, 1995, 1998 Charles M. Hannum. All rights reserved. 64 * Copyright (c) 1994, 1995, 1998 Charles M. Hannum. All rights reserved.
65 * Copyright (c) 1990 William Jolitz. 65 * Copyright (c) 1990 William Jolitz.
66 * 66 *
67 * Redistribution and use in source and binary forms, with or without 67 * Redistribution and use in source and binary forms, with or without
68 * modification, are permitted provided that the following conditions 68 * modification, are permitted provided that the following conditions
69 * are met: 69 * are met:
70 * 1. Redistributions of source code must retain the above copyright 70 * 1. Redistributions of source code must retain the above copyright
71 * notice, this list of conditions and the following disclaimer. 71 * notice, this list of conditions and the following disclaimer.
72 * 2. Redistributions in binary form must reproduce the above copyright 72 * 2. Redistributions in binary form must reproduce the above copyright
73 * notice, this list of conditions and the following disclaimer in the 73 * notice, this list of conditions and the following disclaimer in the
74 * documentation and/or other materials provided with the distribution. 74 * documentation and/or other materials provided with the distribution.
75 * 3. All advertising materials mentioning features or use of this software 75 * 3. All advertising materials mentioning features or use of this software
76 * must display the following acknowledgement: 76 * must display the following acknowledgement:
77 * This product includes software developed by the University of 77 * This product includes software developed by the University of
78 * California, Berkeley and its contributors. 78 * California, Berkeley and its contributors.
79 * 4. Neither the name of the University nor the names of its contributors 79 * 4. Neither the name of the University nor the names of its contributors
80 * may be used to endorse or promote products derived from this software 80 * may be used to endorse or promote products derived from this software
81 * without specific prior written permission. 81 * without specific prior written permission.
82 * 82 *
83 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 83 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
84 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 84 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
85 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 85 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
86 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 86 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
87 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 87 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
88 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 88 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
89 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 89 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
90 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 90 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
91 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 91 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
92 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 92 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
93 * SUCH DAMAGE. 93 * SUCH DAMAGE.
94 * 94 *
95 * @(#)npx.c 7.2 (Berkeley) 5/12/91 95 * @(#)npx.c 7.2 (Berkeley) 5/12/91
96 */ 96 */
97 97
98#include <sys/cdefs.h> 98#include <sys/cdefs.h>
99__KERNEL_RCSID(0, "$NetBSD: npx.c,v 1.139 2010/12/20 00:25:35 matt Exp $"); 99__KERNEL_RCSID(0, "$NetBSD: npx.c,v 1.140 2011/06/07 14:53:03 bouyer Exp $");
100 100
101#if 0 101#if 0
102#define IPRINTF(x) printf x 102#define IPRINTF(x) printf x
103#else 103#else
104#define IPRINTF(x) 104#define IPRINTF(x)
105#endif 105#endif
106 106
107#include "opt_multiprocessor.h" 107#include "opt_multiprocessor.h"
108#include "opt_xen.h" 108#include "opt_xen.h"
109 109
110#include <sys/param.h> 110#include <sys/param.h>
111#include <sys/systm.h> 111#include <sys/systm.h>
112#include <sys/conf.h> 112#include <sys/conf.h>
113#include <sys/file.h> 113#include <sys/file.h>
114#include <sys/proc.h> 114#include <sys/proc.h>
115#include <sys/ioctl.h> 115#include <sys/ioctl.h>
116#include <sys/device.h> 116#include <sys/device.h>
117#include <sys/vmmeter.h> 117#include <sys/vmmeter.h>
118#include <sys/kernel.h> 118#include <sys/kernel.h>
119#include <sys/bus.h> 119#include <sys/bus.h>
120#include <sys/cpu.h> 120#include <sys/cpu.h>
121#include <sys/intr.h> 121#include <sys/intr.h>
122 122
123#include <uvm/uvm_extern.h> 123#include <uvm/uvm_extern.h>
124 124
125#include <machine/cpufunc.h> 125#include <machine/cpufunc.h>
126#include <machine/cpuvar.h> 126#include <machine/cpuvar.h>
127#include <machine/pcb.h> 127#include <machine/pcb.h>
128#include <machine/trap.h> 128#include <machine/trap.h>
129#include <machine/specialreg.h> 129#include <machine/specialreg.h>
130#include <machine/pio.h> 130#include <machine/pio.h>
131#include <machine/i8259.h> 131#include <machine/i8259.h>
132 132
133#include <dev/isa/isareg.h> 133#include <dev/isa/isareg.h>
134#include <dev/isa/isavar.h> 134#include <dev/isa/isavar.h>
135 135
136#include <i386/isa/npxvar.h> 136#include <i386/isa/npxvar.h>
137 137
138/* 138/*
139 * 387 and 287 Numeric Coprocessor Extension (NPX) Driver. 139 * 387 and 287 Numeric Coprocessor Extension (NPX) Driver.
140 * 140 *
141 * We do lazy initialization and switching using the TS bit in cr0 and the 141 * We do lazy initialization and switching using the TS bit in cr0 and the
142 * MDL_USEDFPU bit in mdlwp. 142 * MDL_USEDFPU bit in mdlwp.
143 * 143 *
144 * DNA exceptions are handled like this: 144 * DNA exceptions are handled like this:
145 * 145 *
146 * 1) If there is no NPX, return and go to the emulator. 146 * 1) If there is no NPX, return and go to the emulator.
147 * 2) If someone else has used the NPX, save its state into that process's PCB. 147 * 2) If someone else has used the NPX, save its state into that process's PCB.
148 * 3a) If MDL_USEDFPU is not set, set it and initialize the NPX. 148 * 3a) If MDL_USEDFPU is not set, set it and initialize the NPX.
149 * 3b) Otherwise, reload the process's previous NPX state. 149 * 3b) Otherwise, reload the process's previous NPX state.
150 * 150 *
151 * When a process is created or exec()s, its saved cr0 image has the TS bit 151 * When a process is created or exec()s, its saved cr0 image has the TS bit
152 * set and the MDL_USEDFPU bit clear. The MDL_USEDFPU bit is set when the 152 * set and the MDL_USEDFPU bit clear. The MDL_USEDFPU bit is set when the
153 * process first gets a DNA and the NPX is initialized. The TS bit is turned 153 * process first gets a DNA and the NPX is initialized. The TS bit is turned
154 * off when the NPX is used, and turned on again later when the process's NPX 154 * off when the NPX is used, and turned on again later when the process's NPX
155 * state is saved. 155 * state is saved.
156 */ 156 */
157 157
158static int x86fpflags_to_ksiginfo(uint32_t flags); 158static int x86fpflags_to_ksiginfo(uint32_t flags);
159static int npxdna(struct cpu_info *); 159static int npxdna(struct cpu_info *);
160 160
161#ifdef XEN 161#ifdef XEN
162#define clts() 162#define clts() HYPERVISOR_fpu_taskswitch(0)
163#define stts() 163#define stts() HYPERVISOR_fpu_taskswitch(1)
164#endif 164#endif
165 165
166static enum npx_type npx_type; 166static enum npx_type npx_type;
167volatile u_int npx_intrs_while_probing; 167volatile u_int npx_intrs_while_probing;
168volatile u_int npx_traps_while_probing; 168volatile u_int npx_traps_while_probing;
169 169
170extern int i386_fpu_present; 170extern int i386_fpu_present;
171extern int i386_fpu_exception; 171extern int i386_fpu_exception;
172extern int i386_fpu_fdivbug; 172extern int i386_fpu_fdivbug;
173 173
174struct npx_softc *npx_softc; 174struct npx_softc *npx_softc;
175 175
176static inline void 176static inline void
177fpu_save(union savefpu *addr) 177fpu_save(union savefpu *addr)
178{ 178{
179 if (i386_use_fxsave) 179 if (i386_use_fxsave)
180 { 180 {
181 fxsave(&addr->sv_xmm); 181 fxsave(&addr->sv_xmm);
182 182
183 /* FXSAVE doesn't FNINIT like FNSAVE does -- so do it here. */ 183 /* FXSAVE doesn't FNINIT like FNSAVE does -- so do it here. */
184 fninit(); 184 fninit();
185 } else 185 } else
186 fnsave(&addr->sv_87); 186 fnsave(&addr->sv_87);
187} 187}
188 188
189static int 189static int
190npxdna_empty(struct cpu_info *ci) 190npxdna_empty(struct cpu_info *ci)
191{ 191{
192 192
193#ifndef XEN 
194 panic("npxdna vector not initialized"); 193 panic("npxdna vector not initialized");
195#endif 
196 return 0; 194 return 0;
197} 195}
198 196
199 197
200int (*npxdna_func)(struct cpu_info *) = npxdna_empty; 198int (*npxdna_func)(struct cpu_info *) = npxdna_empty;
201 199
202#ifndef XEN 200#ifndef XEN
203/* 201/*
204 * This calls i8259_* directly, but currently we can count on systems 202 * This calls i8259_* directly, but currently we can count on systems
205 * having a i8259 compatible setup all the time. Maybe have to change 203 * having a i8259 compatible setup all the time. Maybe have to change
206 * that in the future. 204 * that in the future.
207 */ 205 */
208enum npx_type 206enum npx_type
209npxprobe1(bus_space_tag_t iot, bus_space_handle_t ioh, int irq) 207npxprobe1(bus_space_tag_t iot, bus_space_handle_t ioh, int irq)
210{ 208{
211 struct gate_descriptor save_idt_npxintr; 209 struct gate_descriptor save_idt_npxintr;
212 struct gate_descriptor save_idt_npxtrap; 210 struct gate_descriptor save_idt_npxtrap;
213 enum npx_type rv = NPX_NONE; 211 enum npx_type rv = NPX_NONE;
214 u_long save_eflags; 212 u_long save_eflags;
215 int control; 213 int control;
216 int status; 214 int status;
217 unsigned irqmask; 215 unsigned irqmask;
218 216
219 if (cpu_feature[0] & CPUID_FPU) { 217 if (cpu_feature[0] & CPUID_FPU) {
220 i386_fpu_exception = 1; 218 i386_fpu_exception = 1;
221 return NPX_CPUID; 219 return NPX_CPUID;
222 } 220 }
223 save_eflags = x86_read_psl(); 221 save_eflags = x86_read_psl();
224 x86_disable_intr(); 222 x86_disable_intr();
225 save_idt_npxintr = idt[NRSVIDT + irq]; 223 save_idt_npxintr = idt[NRSVIDT + irq];
226 save_idt_npxtrap = idt[16]; 224 save_idt_npxtrap = idt[16];
227 setgate(&idt[NRSVIDT + irq], probeintr, 0, SDT_SYS386IGT, SEL_KPL, 225 setgate(&idt[NRSVIDT + irq], probeintr, 0, SDT_SYS386IGT, SEL_KPL,
228 GSEL(GCODE_SEL, SEL_KPL)); 226 GSEL(GCODE_SEL, SEL_KPL));
229 setgate(&idt[16], probetrap, 0, SDT_SYS386TGT, SEL_KPL, 227 setgate(&idt[16], probetrap, 0, SDT_SYS386TGT, SEL_KPL,
230 GSEL(GCODE_SEL, SEL_KPL)); 228 GSEL(GCODE_SEL, SEL_KPL));
231 229
232 irqmask = i8259_setmask(~((1 << IRQ_SLAVE) | (1 << irq))); 230 irqmask = i8259_setmask(~((1 << IRQ_SLAVE) | (1 << irq)));
233 231
234 /* 232 /*
235 * Partially reset the coprocessor, if any. Some BIOS's don't reset 233 * Partially reset the coprocessor, if any. Some BIOS's don't reset
236 * it after a warm boot. 234 * it after a warm boot.
237 */ 235 */
238 /* full reset on some systems, NOP on others */ 236 /* full reset on some systems, NOP on others */
239 bus_space_write_1(iot, ioh, 1, 0); 237 bus_space_write_1(iot, ioh, 1, 0);
240 delay(1000); 238 delay(1000);
241 /* clear BUSY# latch */ 239 /* clear BUSY# latch */
242 bus_space_write_1(iot, ioh, 0, 0); 240 bus_space_write_1(iot, ioh, 0, 0);
243 241
244 /* 242 /*
245 * We set CR0 in locore to trap all ESC and WAIT instructions. 243 * We set CR0 in locore to trap all ESC and WAIT instructions.
246 * We have to turn off the CR0_EM bit temporarily while probing. 244 * We have to turn off the CR0_EM bit temporarily while probing.
247 */ 245 */
248 lcr0(rcr0() & ~(CR0_EM|CR0_TS)); 246 lcr0(rcr0() & ~(CR0_EM|CR0_TS));
249 x86_enable_intr(); 247 x86_enable_intr();
250 248
251 /* 249 /*
252 * Finish resetting the coprocessor, if any. If there is an error 250 * Finish resetting the coprocessor, if any. If there is an error
253 * pending, then we may get a bogus IRQ13, but probeintr() will handle 251 * pending, then we may get a bogus IRQ13, but probeintr() will handle
254 * it OK. Bogus halts have never been observed, but we enabled 252 * it OK. Bogus halts have never been observed, but we enabled
255 * IRQ13 and cleared the BUSY# latch early to handle them anyway. 253 * IRQ13 and cleared the BUSY# latch early to handle them anyway.
256 */ 254 */
257 fninit(); 255 fninit();
258 delay(1000); /* wait for any IRQ13 (fwait might hang) */ 256 delay(1000); /* wait for any IRQ13 (fwait might hang) */
259 257
260 /* 258 /*
261 * Check for a status of mostly zero. 259 * Check for a status of mostly zero.
262 */ 260 */
263 status = 0x5a5a; 261 status = 0x5a5a;
264 fnstsw(&status); 262 fnstsw(&status);
265 if ((status & 0xb8ff) == 0) { 263 if ((status & 0xb8ff) == 0) {
266 /* 264 /*
267 * Good, now check for a proper control word. 265 * Good, now check for a proper control word.
268 */ 266 */
269 control = 0x5a5a; 267 control = 0x5a5a;
270 fnstcw(&control); 268 fnstcw(&control);
271 if ((control & 0x1f3f) == 0x033f) { 269 if ((control & 0x1f3f) == 0x033f) {
272 /* 270 /*
273 * We have an npx, now divide by 0 to see if exception 271 * We have an npx, now divide by 0 to see if exception
274 * 16 works. 272 * 16 works.
275 */ 273 */
276 control &= ~(1 << 2); /* enable divide by 0 trap */ 274 control &= ~(1 << 2); /* enable divide by 0 trap */
277 fldcw(&control); 275 fldcw(&control);
278 npx_traps_while_probing = npx_intrs_while_probing = 0; 276 npx_traps_while_probing = npx_intrs_while_probing = 0;
279 fp_divide_by_0(); 277 fp_divide_by_0();
280 if (npx_traps_while_probing != 0) { 278 if (npx_traps_while_probing != 0) {
281 /* 279 /*
282 * Good, exception 16 works. 280 * Good, exception 16 works.
283 */ 281 */
284 rv = NPX_EXCEPTION; 282 rv = NPX_EXCEPTION;
285 i386_fpu_exception = 1; 283 i386_fpu_exception = 1;
286 } else if (npx_intrs_while_probing != 0) { 284 } else if (npx_intrs_while_probing != 0) {
287 /* 285 /*
288 * Bad, we are stuck with IRQ13. 286 * Bad, we are stuck with IRQ13.
289 */ 287 */
290 rv = NPX_INTERRUPT; 288 rv = NPX_INTERRUPT;
291 } else { 289 } else {
292 /* 290 /*
293 * Worse, even IRQ13 is broken. Use emulator. 291 * Worse, even IRQ13 is broken. Use emulator.
294 */ 292 */
295 rv = NPX_BROKEN; 293 rv = NPX_BROKEN;
296 } 294 }
297 } 295 }
298 } 296 }
299 297
300 x86_disable_intr(); 298 x86_disable_intr();
301 lcr0(rcr0() | (CR0_EM|CR0_TS)); 299 lcr0(rcr0() | (CR0_EM|CR0_TS));
302 300
303 irqmask = i8259_setmask(irqmask); 301 irqmask = i8259_setmask(irqmask);
304 302
305 idt[NRSVIDT + irq] = save_idt_npxintr; 303 idt[NRSVIDT + irq] = save_idt_npxintr;
306 304
307 idt[16] = save_idt_npxtrap; 305 idt[16] = save_idt_npxtrap;
308 x86_write_psl(save_eflags); 306 x86_write_psl(save_eflags);
309 307
310 return (rv); 308 return (rv);
311} 309}
312 310
313void npxinit(struct cpu_info *ci) 311void npxinit(struct cpu_info *ci)
314{ 312{
315 lcr0(rcr0() & ~(CR0_EM|CR0_TS)); 313 lcr0(rcr0() & ~(CR0_EM|CR0_TS));
316 fninit(); 314 fninit();
317 if (npx586bug1(4195835, 3145727) != 0) { 315 if (npx586bug1(4195835, 3145727) != 0) {
318 i386_fpu_fdivbug = 1; 316 i386_fpu_fdivbug = 1;
319 aprint_normal_dev(ci->ci_dev, 317 aprint_normal_dev(ci->ci_dev,
320 "WARNING: Pentium FDIV bug detected!\n"); 318 "WARNING: Pentium FDIV bug detected!\n");
321 } 319 }
322 lcr0(rcr0() | (CR0_TS)); 320 lcr0(rcr0() | (CR0_TS));
323} 321}
324#endif 322#endif
325 323
326/* 324/*
327 * Common attach routine. 325 * Common attach routine.
328 */ 326 */
329void 327void
330npxattach(struct npx_softc *sc) 328npxattach(struct npx_softc *sc)
331{ 329{
332 330
333 npx_softc = sc; 331 npx_softc = sc;
334 npx_type = sc->sc_type; 332 npx_type = sc->sc_type;
335 333
336#ifndef XEN 334#ifndef XEN
337 npxinit(&cpu_info_primary); 335 npxinit(&cpu_info_primary);
338#endif 336#endif
339 i386_fpu_present = 1; 337 i386_fpu_present = 1;
340 npxdna_func = npxdna; 338 npxdna_func = npxdna;
341 339
342 if (!pmf_device_register(sc->sc_dev, NULL, NULL)) 340 if (!pmf_device_register(sc->sc_dev, NULL, NULL))
343 aprint_error_dev(sc->sc_dev, "couldn't establish power handler\n"); 341 aprint_error_dev(sc->sc_dev, "couldn't establish power handler\n");
344} 342}
345 343
346int 344int
347npxdetach(device_t self, int flags) 345npxdetach(device_t self, int flags)
348{ 346{
349 struct npx_softc *sc = device_private(self); 347 struct npx_softc *sc = device_private(self);
350 348
351 if (sc->sc_type == NPX_INTERRUPT) 349 if (sc->sc_type == NPX_INTERRUPT)
352 return EBUSY; 350 return EBUSY;
353 351
354 pmf_device_deregister(self); 352 pmf_device_deregister(self);
355  353
356 return 0; 354 return 0;
357} 355}
358 356
359/* 357/*
360 * Record the FPU state and reinitialize it all except for the control word. 358 * Record the FPU state and reinitialize it all except for the control word.
361 * Then generate a SIGFPE. 359 * Then generate a SIGFPE.
362 * 360 *
363 * Reinitializing the state allows naive SIGFPE handlers to longjmp without 361 * Reinitializing the state allows naive SIGFPE handlers to longjmp without
364 * doing any fixups. 362 * doing any fixups.
365 * 363 *
366 * XXX there is currently no way to pass the full error state to signal 364 * XXX there is currently no way to pass the full error state to signal
367 * handlers, and if this is a nested interrupt there is no way to pass even 365 * handlers, and if this is a nested interrupt there is no way to pass even
368 * a status code! So there is no way to have a non-naive SIGFPE handler. At 366 * a status code! So there is no way to have a non-naive SIGFPE handler. At
369 * best a handler could do an fninit followed by an fldcw of a static value. 367 * best a handler could do an fninit followed by an fldcw of a static value.
370 * fnclex would be of little use because it would leave junk on the FPU stack. 368 * fnclex would be of little use because it would leave junk on the FPU stack.
371 * Returning from the handler would be even less safe than usual because 369 * Returning from the handler would be even less safe than usual because
372 * IRQ13 exception handling makes exceptions even less precise than usual. 370 * IRQ13 exception handling makes exceptions even less precise than usual.
373 */ 371 */
374int 372int
375npxintr(void *arg, struct intrframe *frame) 373npxintr(void *arg, struct intrframe *frame)
376{ 374{
377 struct cpu_info *ci = curcpu(); 375 struct cpu_info *ci = curcpu();
378 struct lwp *l = ci->ci_fpcurlwp; 376 struct lwp *l = ci->ci_fpcurlwp;
379 union savefpu *addr; 377 union savefpu *addr;
380 struct npx_softc *sc; 378 struct npx_softc *sc;
381 struct pcb *pcb; 379 struct pcb *pcb;
382 ksiginfo_t ksi; 380 ksiginfo_t ksi;
383 381
384 sc = npx_softc; 382 sc = npx_softc;
385 383
386 kpreempt_disable(); 384 kpreempt_disable();
387#ifndef XEN 385#ifndef XEN
388 KASSERT((x86_read_psl() & PSL_I) == 0); 386 KASSERT((x86_read_psl() & PSL_I) == 0);
389 x86_enable_intr(); 387 x86_enable_intr();
390#endif 388#endif
391 389
392 curcpu()->ci_data.cpu_ntrap++; 390 curcpu()->ci_data.cpu_ntrap++;
393 IPRINTF(("%s: fp intr\n", device_xname(ci->ci_dev))); 391 IPRINTF(("%s: fp intr\n", device_xname(ci->ci_dev)));
394 392
395#ifndef XEN 393#ifndef XEN
396 /* 394 /*
397 * Clear the interrupt latch. 395 * Clear the interrupt latch.
398 */ 396 */
399 if (sc->sc_type == NPX_INTERRUPT) 397 if (sc->sc_type == NPX_INTERRUPT)
400 bus_space_write_1(sc->sc_iot, sc->sc_ioh, 0, 0); 398 bus_space_write_1(sc->sc_iot, sc->sc_ioh, 0, 0);
401#endif 399#endif
402 400
403 /* 401 /*
404 * If we're saving, ignore the interrupt. The FPU will generate 402 * If we're saving, ignore the interrupt. The FPU will generate
405 * another one when we restore the state later. 403 * another one when we restore the state later.
406 */ 404 */
407 if (ci->ci_fpsaving) { 405 if (ci->ci_fpsaving) {
408 kpreempt_enable(); 406 kpreempt_enable();
409 return (1); 407 return (1);
410 } 408 }
411 409
412 if (l == NULL || npx_type == NPX_NONE) { 410 if (l == NULL || npx_type == NPX_NONE) {
413 printf("npxintr: l = %p, curproc = %p, npx_type = %d\n", 411 printf("npxintr: l = %p, curproc = %p, npx_type = %d\n",
414 l, curproc, npx_type); 412 l, curproc, npx_type);
415 printf("npxintr: came from nowhere"); 413 printf("npxintr: came from nowhere");
416 kpreempt_enable(); 414 kpreempt_enable();
417 return 1; 415 return 1;
418 } 416 }
419 417
420 /* 418 /*
421 * At this point, fpcurlwp should be curlwp. If it wasn't, the TS 419 * At this point, fpcurlwp should be curlwp. If it wasn't, the TS
422 * bit should be set, and we should have gotten a DNA exception. 420 * bit should be set, and we should have gotten a DNA exception.
423 */ 421 */
424 KASSERT(l == curlwp); 422 KASSERT(l == curlwp);
425 pcb = lwp_getpcb(l); 423 pcb = lwp_getpcb(l);
426 424
427 /* 425 /*
428 * Find the address of fpcurproc's saved FPU state. (Given the 426 * Find the address of fpcurproc's saved FPU state. (Given the
429 * invariant above, this is always the one in curpcb.) 427 * invariant above, this is always the one in curpcb.)
430 */ 428 */
431 addr = &pcb->pcb_savefpu; 429 addr = &pcb->pcb_savefpu;
432 430
433 /* 431 /*
434 * Save state. This does an implied fninit. It had better not halt 432 * Save state. This does an implied fninit. It had better not halt
435 * the CPU or we'll hang. 433 * the CPU or we'll hang.
436 */ 434 */
437 fpu_save(addr); 435 fpu_save(addr);
438 fwait(); 436 fwait();
439 if (i386_use_fxsave) { 437 if (i386_use_fxsave) {
440 fldcw(&addr->sv_xmm.sv_env.en_cw); 438 fldcw(&addr->sv_xmm.sv_env.en_cw);
441 /* 439 /*
442 * FNINIT doesn't affect MXCSR or the XMM registers; 440 * FNINIT doesn't affect MXCSR or the XMM registers;
443 * no need to re-load MXCSR here. 441 * no need to re-load MXCSR here.
444 */ 442 */
445 } else 443 } else
446 fldcw(&addr->sv_87.sv_env.en_cw); 444 fldcw(&addr->sv_87.sv_env.en_cw);
447 fwait(); 445 fwait();
448 /* 446 /*
449 * Remember the exception status word and tag word. The current 447 * Remember the exception status word and tag word. The current
450 * (almost fninit'ed) fpu state is in the fpu and the exception 448 * (almost fninit'ed) fpu state is in the fpu and the exception
451 * state just saved will soon be junk. However, the implied fninit 449 * state just saved will soon be junk. However, the implied fninit
452 * doesn't change the error pointers or register contents, and we 450 * doesn't change the error pointers or register contents, and we
453 * preserved the control word and will copy the status and tag 451 * preserved the control word and will copy the status and tag
454 * words, so the complete exception state can be recovered. 452 * words, so the complete exception state can be recovered.
455 */ 453 */
456 if (i386_use_fxsave) { 454 if (i386_use_fxsave) {
457 addr->sv_xmm.sv_ex_sw = addr->sv_xmm.sv_env.en_sw; 455 addr->sv_xmm.sv_ex_sw = addr->sv_xmm.sv_env.en_sw;
458 addr->sv_xmm.sv_ex_tw = addr->sv_xmm.sv_env.en_tw; 456 addr->sv_xmm.sv_ex_tw = addr->sv_xmm.sv_env.en_tw;
459 } else { 457 } else {
460 addr->sv_87.sv_ex_sw = addr->sv_87.sv_env.en_sw; 458 addr->sv_87.sv_ex_sw = addr->sv_87.sv_env.en_sw;
461 addr->sv_87.sv_ex_tw = addr->sv_87.sv_env.en_tw; 459 addr->sv_87.sv_ex_tw = addr->sv_87.sv_env.en_tw;
462 } 460 }
463 /* 461 /*
464 * Pass exception to process. 462 * Pass exception to process.
465 */ 463 */
466 if (USERMODE(frame->if_cs, frame->if_eflags)) { 464 if (USERMODE(frame->if_cs, frame->if_eflags)) {
467 /* 465 /*
468 * Interrupt is essentially a trap, so we can afford to call 466 * Interrupt is essentially a trap, so we can afford to call
469 * the SIGFPE handler (if any) as soon as the interrupt 467 * the SIGFPE handler (if any) as soon as the interrupt
470 * returns. 468 * returns.
471 * 469 *
472 * XXX little or nothing is gained from this, and plenty is 470 * XXX little or nothing is gained from this, and plenty is
473 * lost - the interrupt frame has to contain the trap frame 471 * lost - the interrupt frame has to contain the trap frame
474 * (this is otherwise only necessary for the rescheduling trap 472 * (this is otherwise only necessary for the rescheduling trap
475 * in doreti, and the frame for that could easily be set up 473 * in doreti, and the frame for that could easily be set up
476 * just before it is used). 474 * just before it is used).
477 */ 475 */
478 l->l_md.md_regs = (struct trapframe *)&frame->if_gs; 476 l->l_md.md_regs = (struct trapframe *)&frame->if_gs;
479 477
480 KSI_INIT_TRAP(&ksi); 478 KSI_INIT_TRAP(&ksi);
481 ksi.ksi_signo = SIGFPE; 479 ksi.ksi_signo = SIGFPE;
482 ksi.ksi_addr = (void *)frame->if_eip; 480 ksi.ksi_addr = (void *)frame->if_eip;
483 481
484 /* 482 /*
485 * Encode the appropriate code for detailed information on 483 * Encode the appropriate code for detailed information on
486 * this exception. 484 * this exception.
487 */ 485 */
488 486
489 if (i386_use_fxsave) { 487 if (i386_use_fxsave) {
490 ksi.ksi_code = 488 ksi.ksi_code =
491 x86fpflags_to_ksiginfo(addr->sv_xmm.sv_ex_sw); 489 x86fpflags_to_ksiginfo(addr->sv_xmm.sv_ex_sw);
492 ksi.ksi_trap = (int)addr->sv_xmm.sv_ex_sw; 490 ksi.ksi_trap = (int)addr->sv_xmm.sv_ex_sw;
493 } else { 491 } else {
494 ksi.ksi_code = 492 ksi.ksi_code =
495 x86fpflags_to_ksiginfo(addr->sv_87.sv_ex_sw); 493 x86fpflags_to_ksiginfo(addr->sv_87.sv_ex_sw);
496 ksi.ksi_trap = (int)addr->sv_87.sv_ex_sw; 494 ksi.ksi_trap = (int)addr->sv_87.sv_ex_sw;
497 } 495 }
498 496
499 trapsignal(l, &ksi); 497 trapsignal(l, &ksi);
500 } else { 498 } else {
501 /* 499 /*
502 * This is a nested interrupt. This should only happen when 500 * This is a nested interrupt. This should only happen when
503 * an IRQ13 occurs at the same time as a higher-priority 501 * an IRQ13 occurs at the same time as a higher-priority
504 * interrupt. 502 * interrupt.
505 * 503 *
506 * XXX 504 * XXX
507 * Currently, we treat this like an asynchronous interrupt, but 505 * Currently, we treat this like an asynchronous interrupt, but
508 * this has disadvantages. 506 * this has disadvantages.
509 */ 507 */
 508 mutex_enter(proc_lock);
510 psignal(l->l_proc, SIGFPE); 509 psignal(l->l_proc, SIGFPE);
 510 mutex_exit(proc_lock);
511 } 511 }
512 512
513 kpreempt_enable(); 513 kpreempt_enable();
514 return (1); 514 return (1);
515} 515}
516 516
517/* map x86 fp flags to ksiginfo fp codes */ 517/* map x86 fp flags to ksiginfo fp codes */
518/* see table 8-4 of the IA-32 Intel Architecture */ 518/* see table 8-4 of the IA-32 Intel Architecture */
519/* Software Developer's Manual, Volume 1 */ 519/* Software Developer's Manual, Volume 1 */
520/* XXX punting on the stack fault with FLTINV */ 520/* XXX punting on the stack fault with FLTINV */
521static int 521static int
522x86fpflags_to_ksiginfo(uint32_t flags) 522x86fpflags_to_ksiginfo(uint32_t flags)
523{ 523{
524 int i; 524 int i;
525 static int x86fp_ksiginfo_table[] = { 525 static int x86fp_ksiginfo_table[] = {
526 FPE_FLTINV, /* bit 0 - invalid operation */ 526 FPE_FLTINV, /* bit 0 - invalid operation */
527 FPE_FLTRES, /* bit 1 - denormal operand */ 527 FPE_FLTRES, /* bit 1 - denormal operand */
528 FPE_FLTDIV, /* bit 2 - divide by zero */ 528 FPE_FLTDIV, /* bit 2 - divide by zero */
529 FPE_FLTOVF, /* bit 3 - fp overflow */ 529 FPE_FLTOVF, /* bit 3 - fp overflow */
530 FPE_FLTUND, /* bit 4 - fp underflow */  530 FPE_FLTUND, /* bit 4 - fp underflow */
531 FPE_FLTRES, /* bit 5 - fp precision */ 531 FPE_FLTRES, /* bit 5 - fp precision */
532 FPE_FLTINV, /* bit 6 - stack fault */ 532 FPE_FLTINV, /* bit 6 - stack fault */
533 }; 533 };
534  534
535 for(i=0;i < sizeof(x86fp_ksiginfo_table)/sizeof(int); i++) { 535 for(i=0;i < sizeof(x86fp_ksiginfo_table)/sizeof(int); i++) {
536 if (flags & (1 << i)) 536 if (flags & (1 << i))
537 return(x86fp_ksiginfo_table[i]); 537 return(x86fp_ksiginfo_table[i]);
538 } 538 }
539 /* punt if flags not set */ 539 /* punt if flags not set */
540 return(0); 540 return(0);
541} 541}
542 542
543/* 543/*
544 * Implement device not available (DNA) exception 544 * Implement device not available (DNA) exception
545 * 545 *
546 * If we were the last lwp to use the FPU, we can simply return. 546 * If we were the last lwp to use the FPU, we can simply return.
547 * Otherwise, we save the previous state, if necessary, and restore 547 * Otherwise, we save the previous state, if necessary, and restore
548 * our last saved state. 548 * our last saved state.
549 */ 549 */
550static int 550static int
551npxdna(struct cpu_info *ci) 551npxdna(struct cpu_info *ci)
552{ 552{
553 struct lwp *l, *fl; 553 struct lwp *l, *fl;
554 struct pcb *pcb; 554 struct pcb *pcb;
555 int s; 555 int s;
556 556
557 if (ci->ci_fpsaving) { 557 if (ci->ci_fpsaving) {
558 /* Recursive trap. */ 558 /* Recursive trap. */
559 return 1; 559 return 1;
560 } 560 }
561 561
562 /* Lock out IPIs and disable preemption. */ 562 /* Lock out IPIs and disable preemption. */
563 s = splhigh(); 563 s = splhigh();
564#ifndef XEN 564#ifndef XEN
565 x86_enable_intr(); 565 x86_enable_intr();
566#endif 566#endif
567 /* Save state on current CPU. */ 567 /* Save state on current CPU. */
568 l = ci->ci_curlwp; 568 l = ci->ci_curlwp;
569 pcb = lwp_getpcb(l); 569 pcb = lwp_getpcb(l);
570 570
571 fl = ci->ci_fpcurlwp; 571 fl = ci->ci_fpcurlwp;
572 if (fl != NULL) { 572 if (fl != NULL) {
573 /* 573 /*
574 * It seems we can get here on Xen even if we didn't 574 * It seems we can get here on Xen even if we didn't
575 * switch lwp. In this case do nothing 575 * switch lwp. In this case do nothing
576 */ 576 */
577 if (fl == l) { 577 if (fl == l) {
578 KASSERT(pcb->pcb_fpcpu == ci); 578 KASSERT(pcb->pcb_fpcpu == ci);
579 ci->ci_fpused = 1; 579 ci->ci_fpused = 1;
580 clts(); 580 clts();
581 splx(s); 581 splx(s);
582 return 1; 582 return 1;
583 } 583 }
584 KASSERT(fl != l); 584 KASSERT(fl != l);
585 npxsave_cpu(true); 585 npxsave_cpu(true);
586 KASSERT(ci->ci_fpcurlwp == NULL); 586 KASSERT(ci->ci_fpcurlwp == NULL);
587 } 587 }
588 588
589 /* Save our state if on a remote CPU. */ 589 /* Save our state if on a remote CPU. */
590 if (pcb->pcb_fpcpu != NULL) { 590 if (pcb->pcb_fpcpu != NULL) {
591 /* Explicitly disable preemption before dropping spl. */ 591 /* Explicitly disable preemption before dropping spl. */
592 KPREEMPT_DISABLE(l); 592 KPREEMPT_DISABLE(l);
593 splx(s); 593 splx(s);
594 npxsave_lwp(l, true); 594 npxsave_lwp(l, true);
595 KASSERT(pcb->pcb_fpcpu == NULL); 595 KASSERT(pcb->pcb_fpcpu == NULL);
596 s = splhigh(); 596 s = splhigh();
597 KPREEMPT_ENABLE(l); 597 KPREEMPT_ENABLE(l);
598 } 598 }
599 599
600 /* 600 /*
601 * Restore state on this CPU, or initialize. Ensure that 601 * Restore state on this CPU, or initialize. Ensure that
602 * the entire update is atomic with respect to FPU-sync IPIs. 602 * the entire update is atomic with respect to FPU-sync IPIs.
603 */ 603 */
604 clts(); 604 clts();
605 ci->ci_fpcurlwp = l; 605 ci->ci_fpcurlwp = l;
606 pcb->pcb_fpcpu = ci; 606 pcb->pcb_fpcpu = ci;
607 ci->ci_fpused = 1; 607 ci->ci_fpused = 1;
608 608
609 if ((l->l_md.md_flags & MDL_USEDFPU) == 0) { 609 if ((l->l_md.md_flags & MDL_USEDFPU) == 0) {
610 fninit(); 610 fninit();
611 if (i386_use_fxsave) { 611 if (i386_use_fxsave) {
612 fldcw(&pcb->pcb_savefpu. 612 fldcw(&pcb->pcb_savefpu.
613 sv_xmm.sv_env.en_cw); 613 sv_xmm.sv_env.en_cw);
614 } else { 614 } else {
615 fldcw(&pcb->pcb_savefpu. 615 fldcw(&pcb->pcb_savefpu.
616 sv_87.sv_env.en_cw); 616 sv_87.sv_env.en_cw);
617 } 617 }
618 l->l_md.md_flags |= MDL_USEDFPU; 618 l->l_md.md_flags |= MDL_USEDFPU;
619 } else if (i386_use_fxsave) { 619 } else if (i386_use_fxsave) {
620 /* 620 /*
621 * AMD FPU's do not restore FIP, FDP, and FOP on fxrstor, 621 * AMD FPU's do not restore FIP, FDP, and FOP on fxrstor,
622 * leaking other process's execution history. Clear them 622 * leaking other process's execution history. Clear them
623 * manually. 623 * manually.
624 */ 624 */
625 static const double zero = 0.0; 625 static const double zero = 0.0;
626 int status; 626 int status;
627 /* 627 /*
628 * Clear the ES bit in the x87 status word if it is currently 628 * Clear the ES bit in the x87 status word if it is currently
629 * set, in order to avoid causing a fault in the upcoming load. 629 * set, in order to avoid causing a fault in the upcoming load.
630 */ 630 */
631 fnstsw(&status); 631 fnstsw(&status);
632 if (status & 0x80) 632 if (status & 0x80)
633 fnclex(); 633 fnclex();
634 /* 634 /*
635 * Load the dummy variable into the x87 stack. This mangles 635 * Load the dummy variable into the x87 stack. This mangles
636 * the x87 stack, but we don't care since we're about to call 636 * the x87 stack, but we don't care since we're about to call
637 * fxrstor() anyway. 637 * fxrstor() anyway.
638 */ 638 */
639 fldummy(&zero); 639 fldummy(&zero);
640 fxrstor(&pcb->pcb_savefpu.sv_xmm); 640 fxrstor(&pcb->pcb_savefpu.sv_xmm);
641 } else { 641 } else {
642 frstor(&pcb->pcb_savefpu.sv_87); 642 frstor(&pcb->pcb_savefpu.sv_87);
643 } 643 }
644 644
645 KASSERT(ci == curcpu()); 645 KASSERT(ci == curcpu());
646 splx(s); 646 splx(s);
647 return 1; 647 return 1;
648} 648}
649 649
650/* 650/*
651 * Save current CPU's FPU state. Must be called at IPL_HIGH. 651 * Save current CPU's FPU state. Must be called at IPL_HIGH.
652 */ 652 */
653void 653void
654npxsave_cpu(bool save) 654npxsave_cpu(bool save)
655{ 655{
656 struct cpu_info *ci; 656 struct cpu_info *ci;
657 struct lwp *l; 657 struct lwp *l;
658 struct pcb *pcb; 658 struct pcb *pcb;
659 659
660 KASSERT(curcpu()->ci_ilevel == IPL_HIGH); 660 KASSERT(curcpu()->ci_ilevel == IPL_HIGH);
661 661
662 ci = curcpu(); 662 ci = curcpu();
663 l = ci->ci_fpcurlwp; 663 l = ci->ci_fpcurlwp;
664 if (l == NULL) 664 if (l == NULL)
665 return; 665 return;
666 666
667 pcb = lwp_getpcb(l); 667 pcb = lwp_getpcb(l);
668 668
669 if (save) { 669 if (save) {
670 /* 670 /*
671 * Set ci->ci_fpsaving, so that any pending exception will 671 * Set ci->ci_fpsaving, so that any pending exception will
672 * be thrown away. It will be caught again if/when the 672 * be thrown away. It will be caught again if/when the
673 * FPU state is restored. 673 * FPU state is restored.
674 */ 674 */
675 KASSERT(ci->ci_fpsaving == 0); 675 KASSERT(ci->ci_fpsaving == 0);
676 clts(); 676 clts();
677 ci->ci_fpsaving = 1; 677 ci->ci_fpsaving = 1;
678 if (i386_use_fxsave) { 678 if (i386_use_fxsave) {
679 fxsave(&pcb->pcb_savefpu.sv_xmm); 679 fxsave(&pcb->pcb_savefpu.sv_xmm);
680 } else { 680 } else {
681 fnsave(&pcb->pcb_savefpu.sv_87); 681 fnsave(&pcb->pcb_savefpu.sv_87);
682 } 682 }
683 ci->ci_fpsaving = 0; 683 ci->ci_fpsaving = 0;
684 } 684 }
685 685
686 stts(); 686 stts();
687 pcb->pcb_fpcpu = NULL; 687 pcb->pcb_fpcpu = NULL;
688 ci->ci_fpcurlwp = NULL; 688 ci->ci_fpcurlwp = NULL;
689 ci->ci_fpused = 1; 689 ci->ci_fpused = 1;
690} 690}
691 691
692/* 692/*
693 * Save l's FPU state, which may be on this processor or another processor. 693 * Save l's FPU state, which may be on this processor or another processor.
694 * It may take some time, so we avoid disabling preemption where possible. 694 * It may take some time, so we avoid disabling preemption where possible.
695 * Caller must know that the target LWP is stopped, otherwise this routine 695 * Caller must know that the target LWP is stopped, otherwise this routine
696 * may race against it. 696 * may race against it.
697 */ 697 */
698void 698void
699npxsave_lwp(struct lwp *l, bool save) 699npxsave_lwp(struct lwp *l, bool save)
700{ 700{
701 struct cpu_info *oci; 701 struct cpu_info *oci;
702 struct pcb *pcb; 702 struct pcb *pcb;
703 int s, spins, ticks; 703 int s, spins, ticks;
704 704
705 spins = 0; 705 spins = 0;
706 ticks = hardclock_ticks; 706 ticks = hardclock_ticks;
707 for (;;) { 707 for (;;) {
708 s = splhigh(); 708 s = splhigh();
709 pcb = lwp_getpcb(l); 709 pcb = lwp_getpcb(l);
710 oci = pcb->pcb_fpcpu; 710 oci = pcb->pcb_fpcpu;
711 if (oci == NULL) { 711 if (oci == NULL) {
712 splx(s); 712 splx(s);
713 break; 713 break;
714 } 714 }
715 if (oci == curcpu()) { 715 if (oci == curcpu()) {
716 KASSERT(oci->ci_fpcurlwp == l); 716 KASSERT(oci->ci_fpcurlwp == l);
717 npxsave_cpu(save); 717 npxsave_cpu(save);
718 splx(s); 718 splx(s);
719 break; 719 break;
720 } 720 }
721 splx(s); 721 splx(s);
722 x86_send_ipi(oci, X86_IPI_SYNCH_FPU); 722 x86_send_ipi(oci, X86_IPI_SYNCH_FPU);
723 while (pcb->pcb_fpcpu == oci && 723 while (pcb->pcb_fpcpu == oci &&
724 ticks == hardclock_ticks) { 724 ticks == hardclock_ticks) {
725 x86_pause(); 725 x86_pause();
726 spins++; 726 spins++;
727 } 727 }
728 if (spins > 100000000) { 728 if (spins > 100000000) {
729 panic("npxsave_lwp: did not"); 729 panic("npxsave_lwp: did not");
730 } 730 }
731 } 731 }
732 732
733 if (!save) { 733 if (!save) {
734 /* Ensure we restart with a clean slate. */ 734 /* Ensure we restart with a clean slate. */
735 l->l_md.md_flags &= ~MDL_USEDFPU; 735 l->l_md.md_flags &= ~MDL_USEDFPU;
736 } 736 }
737} 737}
738 738
739/*  739/*
740 * The following mechanism is used to ensure that the FPE_... value 740 * The following mechanism is used to ensure that the FPE_... value
741 * that is passed as a trapcode to the signal handler of the user 741 * that is passed as a trapcode to the signal handler of the user
742 * process does not have more than one bit set. 742 * process does not have more than one bit set.
743 *  743 *
744 * Multiple bits may be set if the user process modifies the control 744 * Multiple bits may be set if the user process modifies the control
745 * word while a status word bit is already set. While this is a sign 745 * word while a status word bit is already set. While this is a sign
746 * of bad coding, we have no choise than to narrow them down to one 746 * of bad coding, we have no choise than to narrow them down to one
747 * bit, since we must not send a trapcode that is not exactly one of 747 * bit, since we must not send a trapcode that is not exactly one of
748 * the FPE_ macros. 748 * the FPE_ macros.
749 * 749 *
750 * The mechanism has a static table with 127 entries. Each combination 750 * The mechanism has a static table with 127 entries. Each combination
751 * of the 7 FPU status word exception bits directly translates to a 751 * of the 7 FPU status word exception bits directly translates to a
752 * position in this table, where a single FPE_... value is stored. 752 * position in this table, where a single FPE_... value is stored.
753 * This FPE_... value stored there is considered the "most important" 753 * This FPE_... value stored there is considered the "most important"
754 * of the exception bits and will be sent as the signal code. The 754 * of the exception bits and will be sent as the signal code. The
755 * precedence of the bits is based upon Intel Document "Numerical 755 * precedence of the bits is based upon Intel Document "Numerical
756 * Applications", Chapter "Special Computational Situations". 756 * Applications", Chapter "Special Computational Situations".
757 * 757 *
758 * The macro to choose one of these values does these steps: 1) Throw 758 * The macro to choose one of these values does these steps: 1) Throw
759 * away status word bits that cannot be masked. 2) Throw away the bits 759 * away status word bits that cannot be masked. 2) Throw away the bits
760 * currently masked in the control word, assuming the user isn't 760 * currently masked in the control word, assuming the user isn't
761 * interested in them anymore. 3) Reinsert status word bit 7 (stack 761 * interested in them anymore. 3) Reinsert status word bit 7 (stack
762 * fault) if it is set, which cannot be masked but must be presered. 762 * fault) if it is set, which cannot be masked but must be presered.
763 * 4) Use the remaining bits to point into the trapcode table. 763 * 4) Use the remaining bits to point into the trapcode table.
764 * 764 *
765 * The 6 maskable bits in order of their preference, as stated in the 765 * The 6 maskable bits in order of their preference, as stated in the
766 * above referenced Intel manual: 766 * above referenced Intel manual:
767 * 1 Invalid operation (FP_X_INV) 767 * 1 Invalid operation (FP_X_INV)
768 * 1a Stack underflow 768 * 1a Stack underflow
769 * 1b Stack overflow 769 * 1b Stack overflow
770 * 1c Operand of unsupported format 770 * 1c Operand of unsupported format
771 * 1d SNaN operand. 771 * 1d SNaN operand.
772 * 2 QNaN operand (not an exception, irrelavant here) 772 * 2 QNaN operand (not an exception, irrelavant here)
773 * 3 Any other invalid-operation not mentioned above or zero divide 773 * 3 Any other invalid-operation not mentioned above or zero divide
774 * (FP_X_INV, FP_X_DZ) 774 * (FP_X_INV, FP_X_DZ)
775 * 4 Denormal operand (FP_X_DNML) 775 * 4 Denormal operand (FP_X_DNML)
776 * 5 Numeric over/underflow (FP_X_OFL, FP_X_UFL) 776 * 5 Numeric over/underflow (FP_X_OFL, FP_X_UFL)
777 * 6 Inexact result (FP_X_IMP)  777 * 6 Inexact result (FP_X_IMP)
778 */ 778 */
779static const uint8_t fpetable[128] = { 779static const uint8_t fpetable[128] = {
780 0, 780 0,
781 FPE_FLTINV, /* 1 - INV */ 781 FPE_FLTINV, /* 1 - INV */
782 FPE_FLTUND, /* 2 - DNML */ 782 FPE_FLTUND, /* 2 - DNML */
783 FPE_FLTINV, /* 3 - INV | DNML */ 783 FPE_FLTINV, /* 3 - INV | DNML */
784 FPE_FLTDIV, /* 4 - DZ */ 784 FPE_FLTDIV, /* 4 - DZ */
785 FPE_FLTINV, /* 5 - INV | DZ */ 785 FPE_FLTINV, /* 5 - INV | DZ */
786 FPE_FLTDIV, /* 6 - DNML | DZ */ 786 FPE_FLTDIV, /* 6 - DNML | DZ */
787 FPE_FLTINV, /* 7 - INV | DNML | DZ */ 787 FPE_FLTINV, /* 7 - INV | DNML | DZ */
788 FPE_FLTOVF, /* 8 - OFL */ 788 FPE_FLTOVF, /* 8 - OFL */
789 FPE_FLTINV, /* 9 - INV | OFL */ 789 FPE_FLTINV, /* 9 - INV | OFL */
790 FPE_FLTUND, /* A - DNML | OFL */ 790 FPE_FLTUND, /* A - DNML | OFL */
791 FPE_FLTINV, /* B - INV | DNML | OFL */ 791 FPE_FLTINV, /* B - INV | DNML | OFL */
792 FPE_FLTDIV, /* C - DZ | OFL */ 792 FPE_FLTDIV, /* C - DZ | OFL */
793 FPE_FLTINV, /* D - INV | DZ | OFL */ 793 FPE_FLTINV, /* D - INV | DZ | OFL */
794 FPE_FLTDIV, /* E - DNML | DZ | OFL */ 794 FPE_FLTDIV, /* E - DNML | DZ | OFL */
795 FPE_FLTINV, /* F - INV | DNML | DZ | OFL */ 795 FPE_FLTINV, /* F - INV | DNML | DZ | OFL */
796 FPE_FLTUND, /* 10 - UFL */ 796 FPE_FLTUND, /* 10 - UFL */
797 FPE_FLTINV, /* 11 - INV | UFL */ 797 FPE_FLTINV, /* 11 - INV | UFL */
798 FPE_FLTUND, /* 12 - DNML | UFL */ 798 FPE_FLTUND, /* 12 - DNML | UFL */
799 FPE_FLTINV, /* 13 - INV | DNML | UFL */ 799 FPE_FLTINV, /* 13 - INV | DNML | UFL */
800 FPE_FLTDIV, /* 14 - DZ | UFL */ 800 FPE_FLTDIV, /* 14 - DZ | UFL */
801 FPE_FLTINV, /* 15 - INV | DZ | UFL */ 801 FPE_FLTINV, /* 15 - INV | DZ | UFL */
802 FPE_FLTDIV, /* 16 - DNML | DZ | UFL */ 802 FPE_FLTDIV, /* 16 - DNML | DZ | UFL */
803 FPE_FLTINV, /* 17 - INV | DNML | DZ | UFL */ 803 FPE_FLTINV, /* 17 - INV | DNML | DZ | UFL */
804 FPE_FLTOVF, /* 18 - OFL | UFL */ 804 FPE_FLTOVF, /* 18 - OFL | UFL */
805 FPE_FLTINV, /* 19 - INV | OFL | UFL */ 805 FPE_FLTINV, /* 19 - INV | OFL | UFL */
806 FPE_FLTUND, /* 1A - DNML | OFL | UFL */ 806 FPE_FLTUND, /* 1A - DNML | OFL | UFL */
807 FPE_FLTINV, /* 1B - INV | DNML | OFL | UFL */ 807 FPE_FLTINV, /* 1B - INV | DNML | OFL | UFL */
808 FPE_FLTDIV, /* 1C - DZ | OFL | UFL */ 808 FPE_FLTDIV, /* 1C - DZ | OFL | UFL */
809 FPE_FLTINV, /* 1D - INV | DZ | OFL | UFL */ 809 FPE_FLTINV, /* 1D - INV | DZ | OFL | UFL */
810 FPE_FLTDIV, /* 1E - DNML | DZ | OFL | UFL */ 810 FPE_FLTDIV, /* 1E - DNML | DZ | OFL | UFL */
811 FPE_FLTINV, /* 1F - INV | DNML | DZ | OFL | UFL */ 811 FPE_FLTINV, /* 1F - INV | DNML | DZ | OFL | UFL */
812 FPE_FLTRES, /* 20 - IMP */ 812 FPE_FLTRES, /* 20 - IMP */
813 FPE_FLTINV, /* 21 - INV | IMP */ 813 FPE_FLTINV, /* 21 - INV | IMP */
814 FPE_FLTUND, /* 22 - DNML | IMP */ 814 FPE_FLTUND, /* 22 - DNML | IMP */
815 FPE_FLTINV, /* 23 - INV | DNML | IMP */ 815 FPE_FLTINV, /* 23 - INV | DNML | IMP */
816 FPE_FLTDIV, /* 24 - DZ | IMP */ 816 FPE_FLTDIV, /* 24 - DZ | IMP */
817 FPE_FLTINV, /* 25 - INV | DZ | IMP */ 817 FPE_FLTINV, /* 25 - INV | DZ | IMP */
818 FPE_FLTDIV, /* 26 - DNML | DZ | IMP */ 818 FPE_FLTDIV, /* 26 - DNML | DZ | IMP */
819 FPE_FLTINV, /* 27 - INV | DNML | DZ | IMP */ 819 FPE_FLTINV, /* 27 - INV | DNML | DZ | IMP */
820 FPE_FLTOVF, /* 28 - OFL | IMP */ 820 FPE_FLTOVF, /* 28 - OFL | IMP */
821 FPE_FLTINV, /* 29 - INV | OFL | IMP */ 821 FPE_FLTINV, /* 29 - INV | OFL | IMP */
822 FPE_FLTUND, /* 2A - DNML | OFL | IMP */ 822 FPE_FLTUND, /* 2A - DNML | OFL | IMP */
823 FPE_FLTINV, /* 2B - INV | DNML | OFL | IMP */ 823 FPE_FLTINV, /* 2B - INV | DNML | OFL | IMP */
824 FPE_FLTDIV, /* 2C - DZ | OFL | IMP */ 824 FPE_FLTDIV, /* 2C - DZ | OFL | IMP */
825 FPE_FLTINV, /* 2D - INV | DZ | OFL | IMP */ 825 FPE_FLTINV, /* 2D - INV | DZ | OFL | IMP */
826 FPE_FLTDIV, /* 2E - DNML | DZ | OFL | IMP */ 826 FPE_FLTDIV, /* 2E - DNML | DZ | OFL | IMP */
827 FPE_FLTINV, /* 2F - INV | DNML | DZ | OFL | IMP */ 827 FPE_FLTINV, /* 2F - INV | DNML | DZ | OFL | IMP */
828 FPE_FLTUND, /* 30 - UFL | IMP */ 828 FPE_FLTUND, /* 30 - UFL | IMP */
829 FPE_FLTINV, /* 31 - INV | UFL | IMP */ 829 FPE_FLTINV, /* 31 - INV | UFL | IMP */
830 FPE_FLTUND, /* 32 - DNML | UFL | IMP */ 830 FPE_FLTUND, /* 32 - DNML | UFL | IMP */
831 FPE_FLTINV, /* 33 - INV | DNML | UFL | IMP */ 831 FPE_FLTINV, /* 33 - INV | DNML | UFL | IMP */
832 FPE_FLTDIV, /* 34 - DZ | UFL | IMP */ 832 FPE_FLTDIV, /* 34 - DZ | UFL | IMP */
833 FPE_FLTINV, /* 35 - INV | DZ | UFL | IMP */ 833 FPE_FLTINV, /* 35 - INV | DZ | UFL | IMP */
834 FPE_FLTDIV, /* 36 - DNML | DZ | UFL | IMP */ 834 FPE_FLTDIV, /* 36 - DNML | DZ | UFL | IMP */
835 FPE_FLTINV, /* 37 - INV | DNML | DZ | UFL | IMP */ 835 FPE_FLTINV, /* 37 - INV | DNML | DZ | UFL | IMP */
836 FPE_FLTOVF, /* 38 - OFL | UFL | IMP */ 836 FPE_FLTOVF, /* 38 - OFL | UFL | IMP */
837 FPE_FLTINV, /* 39 - INV | OFL | UFL | IMP */ 837 FPE_FLTINV, /* 39 - INV | OFL | UFL | IMP */
838 FPE_FLTUND, /* 3A - DNML | OFL | UFL | IMP */ 838 FPE_FLTUND, /* 3A - DNML | OFL | UFL | IMP */
839 FPE_FLTINV, /* 3B - INV | DNML | OFL | UFL | IMP */ 839 FPE_FLTINV, /* 3B - INV | DNML | OFL | UFL | IMP */
840 FPE_FLTDIV, /* 3C - DZ | OFL | UFL | IMP */ 840 FPE_FLTDIV, /* 3C - DZ | OFL | UFL | IMP */
841 FPE_FLTINV, /* 3D - INV | DZ | OFL | UFL | IMP */ 841 FPE_FLTINV, /* 3D - INV | DZ | OFL | UFL | IMP */
842 FPE_FLTDIV, /* 3E - DNML | DZ | OFL | UFL | IMP */ 842 FPE_FLTDIV, /* 3E - DNML | DZ | OFL | UFL | IMP */
843 FPE_FLTINV, /* 3F - INV | DNML | DZ | OFL | UFL | IMP */ 843 FPE_FLTINV, /* 3F - INV | DNML | DZ | OFL | UFL | IMP */
844 FPE_FLTSUB, /* 40 - STK */ 844 FPE_FLTSUB, /* 40 - STK */
845 FPE_FLTSUB, /* 41 - INV | STK */ 845 FPE_FLTSUB, /* 41 - INV | STK */
846 FPE_FLTUND, /* 42 - DNML | STK */ 846 FPE_FLTUND, /* 42 - DNML | STK */
847 FPE_FLTSUB, /* 43 - INV | DNML | STK */ 847 FPE_FLTSUB, /* 43 - INV | DNML | STK */
848 FPE_FLTDIV, /* 44 - DZ | STK */ 848 FPE_FLTDIV, /* 44 - DZ | STK */
849 FPE_FLTSUB, /* 45 - INV | DZ | STK */ 849 FPE_FLTSUB, /* 45 - INV | DZ | STK */
850 FPE_FLTDIV, /* 46 - DNML | DZ | STK */ 850 FPE_FLTDIV, /* 46 - DNML | DZ | STK */
851 FPE_FLTSUB, /* 47 - INV | DNML | DZ | STK */ 851 FPE_FLTSUB, /* 47 - INV | DNML | DZ | STK */
852 FPE_FLTOVF, /* 48 - OFL | STK */ 852 FPE_FLTOVF, /* 48 - OFL | STK */
853 FPE_FLTSUB, /* 49 - INV | OFL | STK */ 853 FPE_FLTSUB, /* 49 - INV | OFL | STK */
854 FPE_FLTUND, /* 4A - DNML | OFL | STK */ 854 FPE_FLTUND, /* 4A - DNML | OFL | STK */
855 FPE_FLTSUB, /* 4B - INV | DNML | OFL | STK */ 855 FPE_FLTSUB, /* 4B - INV | DNML | OFL | STK */
856 FPE_FLTDIV, /* 4C - DZ | OFL | STK */ 856 FPE_FLTDIV, /* 4C - DZ | OFL | STK */
857 FPE_FLTSUB, /* 4D - INV | DZ | OFL | STK */ 857 FPE_FLTSUB, /* 4D - INV | DZ | OFL | STK */
858 FPE_FLTDIV, /* 4E - DNML | DZ | OFL | STK */ 858 FPE_FLTDIV, /* 4E - DNML | DZ | OFL | STK */
859 FPE_FLTSUB, /* 4F - INV | DNML | DZ | OFL | STK */ 859 FPE_FLTSUB, /* 4F - INV | DNML | DZ | OFL | STK */
860 FPE_FLTUND, /* 50 - UFL | STK */ 860 FPE_FLTUND, /* 50 - UFL | STK */
861 FPE_FLTSUB, /* 51 - INV | UFL | STK */ 861 FPE_FLTSUB, /* 51 - INV | UFL | STK */
862 FPE_FLTUND, /* 52 - DNML | UFL | STK */ 862 FPE_FLTUND, /* 52 - DNML | UFL | STK */
863 FPE_FLTSUB, /* 53 - INV | DNML | UFL | STK */ 863 FPE_FLTSUB, /* 53 - INV | DNML | UFL | STK */
864 FPE_FLTDIV, /* 54 - DZ | UFL | STK */ 864 FPE_FLTDIV, /* 54 - DZ | UFL | STK */
865 FPE_FLTSUB, /* 55 - INV | DZ | UFL | STK */ 865 FPE_FLTSUB, /* 55 - INV | DZ | UFL | STK */
866 FPE_FLTDIV, /* 56 - DNML | DZ | UFL | STK */ 866 FPE_FLTDIV, /* 56 - DNML | DZ | UFL | STK */
867 FPE_FLTSUB, /* 57 - INV | DNML | DZ | UFL | STK */ 867 FPE_FLTSUB, /* 57 - INV | DNML | DZ | UFL | STK */
868 FPE_FLTOVF, /* 58 - OFL | UFL | STK */ 868 FPE_FLTOVF, /* 58 - OFL | UFL | STK */
869 FPE_FLTSUB, /* 59 - INV | OFL | UFL | STK */ 869 FPE_FLTSUB, /* 59 - INV | OFL | UFL | STK */
870 FPE_FLTUND, /* 5A - DNML | OFL | UFL | STK */ 870 FPE_FLTUND, /* 5A - DNML | OFL | UFL | STK */
871 FPE_FLTSUB, /* 5B - INV | DNML | OFL | UFL | STK */ 871 FPE_FLTSUB, /* 5B - INV | DNML | OFL | UFL | STK */
872 FPE_FLTDIV, /* 5C - DZ | OFL | UFL | STK */ 872 FPE_FLTDIV, /* 5C - DZ | OFL | UFL | STK */
873 FPE_FLTSUB, /* 5D - INV | DZ | OFL | UFL | STK */ 873 FPE_FLTSUB, /* 5D - INV | DZ | OFL | UFL | STK */
874 FPE_FLTDIV, /* 5E - DNML | DZ | OFL | UFL | STK */ 874 FPE_FLTDIV, /* 5E - DNML | DZ | OFL | UFL | STK */
875 FPE_FLTSUB, /* 5F - INV | DNML | DZ | OFL | UFL | STK */ 875 FPE_FLTSUB, /* 5F - INV | DNML | DZ | OFL | UFL | STK */
876 FPE_FLTRES, /* 60 - IMP | STK */ 876 FPE_FLTRES, /* 60 - IMP | STK */
877 FPE_FLTSUB, /* 61 - INV | IMP | STK */ 877 FPE_FLTSUB, /* 61 - INV | IMP | STK */
878 FPE_FLTUND, /* 62 - DNML | IMP | STK */ 878 FPE_FLTUND, /* 62 - DNML | IMP | STK */
879 FPE_FLTSUB, /* 63 - INV | DNML | IMP | STK */ 879 FPE_FLTSUB, /* 63 - INV | DNML | IMP | STK */
880 FPE_FLTDIV, /* 64 - DZ | IMP | STK */ 880 FPE_FLTDIV, /* 64 - DZ | IMP | STK */
881 FPE_FLTSUB, /* 65 - INV | DZ | IMP | STK */ 881 FPE_FLTSUB, /* 65 - INV | DZ | IMP | STK */
882 FPE_FLTDIV, /* 66 - DNML | DZ | IMP | STK */ 882 FPE_FLTDIV, /* 66 - DNML | DZ | IMP | STK */
883 FPE_FLTSUB, /* 67 - INV | DNML | DZ | IMP | STK */ 883 FPE_FLTSUB, /* 67 - INV | DNML | DZ | IMP | STK */
884 FPE_FLTOVF, /* 68 - OFL | IMP | STK */ 884 FPE_FLTOVF, /* 68 - OFL | IMP | STK */
885 FPE_FLTSUB, /* 69 - INV | OFL | IMP | STK */ 885 FPE_FLTSUB, /* 69 - INV | OFL | IMP | STK */
886 FPE_FLTUND, /* 6A - DNML | OFL | IMP | STK */ 886 FPE_FLTUND, /* 6A - DNML | OFL | IMP | STK */
887 FPE_FLTSUB, /* 6B - INV | DNML | OFL | IMP | STK */ 887 FPE_FLTSUB, /* 6B - INV | DNML | OFL | IMP | STK */
888 FPE_FLTDIV, /* 6C - DZ | OFL | IMP | STK */ 888 FPE_FLTDIV, /* 6C - DZ | OFL | IMP | STK */
889 FPE_FLTSUB, /* 6D - INV | DZ | OFL | IMP | STK */ 889 FPE_FLTSUB, /* 6D - INV | DZ | OFL | IMP | STK */
890 FPE_FLTDIV, /* 6E - DNML | DZ | OFL | IMP | STK */ 890 FPE_FLTDIV, /* 6E - DNML | DZ | OFL | IMP | STK */
891 FPE_FLTSUB, /* 6F - INV | DNML | DZ | OFL | IMP | STK */ 891 FPE_FLTSUB, /* 6F - INV | DNML | DZ | OFL | IMP | STK */
892 FPE_FLTUND, /* 70 - UFL | IMP | STK */ 892 FPE_FLTUND, /* 70 - UFL | IMP | STK */
893 FPE_FLTSUB, /* 71 - INV | UFL | IMP | STK */ 893 FPE_FLTSUB, /* 71 - INV | UFL | IMP | STK */
894 FPE_FLTUND, /* 72 - DNML | UFL | IMP | STK */ 894 FPE_FLTUND, /* 72 - DNML | UFL | IMP | STK */
895 FPE_FLTSUB, /* 73 - INV | DNML | UFL | IMP | STK */ 895 FPE_FLTSUB, /* 73 - INV | DNML | UFL | IMP | STK */
896 FPE_FLTDIV, /* 74 - DZ | UFL | IMP | STK */ 896 FPE_FLTDIV, /* 74 - DZ | UFL | IMP | STK */
897 FPE_FLTSUB, /* 75 - INV | DZ | UFL | IMP | STK */ 897 FPE_FLTSUB, /* 75 - INV | DZ | UFL | IMP | STK */
898 FPE_FLTDIV, /* 76 - DNML | DZ | UFL | IMP | STK */ 898 FPE_FLTDIV, /* 76 - DNML | DZ | UFL | IMP | STK */
899 FPE_FLTSUB, /* 77 - INV | DNML | DZ | UFL | IMP | STK */ 899 FPE_FLTSUB, /* 77 - INV | DNML | DZ | UFL | IMP | STK */
900 FPE_FLTOVF, /* 78 - OFL | UFL | IMP | STK */ 900 FPE_FLTOVF, /* 78 - OFL | UFL | IMP | STK */
901 FPE_FLTSUB, /* 79 - INV | OFL | UFL | IMP | STK */ 901 FPE_FLTSUB, /* 79 - INV | OFL | UFL | IMP | STK */
902 FPE_FLTUND, /* 7A - DNML | OFL | UFL | IMP | STK */ 902 FPE_FLTUND, /* 7A - DNML | OFL | UFL | IMP | STK */
903 FPE_FLTSUB, /* 7B - INV | DNML | OFL | UFL | IMP | STK */ 903 FPE_FLTSUB, /* 7B - INV | DNML | OFL | UFL | IMP | STK */
904 FPE_FLTDIV, /* 7C - DZ | OFL | UFL | IMP | STK */ 904 FPE_FLTDIV, /* 7C - DZ | OFL | UFL | IMP | STK */
905 FPE_FLTSUB, /* 7D - INV | DZ | OFL | UFL | IMP | STK */ 905 FPE_FLTSUB, /* 7D - INV | DZ | OFL | UFL | IMP | STK */
906 FPE_FLTDIV, /* 7E - DNML | DZ | OFL | UFL | IMP | STK */ 906 FPE_FLTDIV, /* 7E - DNML | DZ | OFL | UFL | IMP | STK */
907 FPE_FLTSUB, /* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */ 907 FPE_FLTSUB, /* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */
908}; 908};
909 909
910#define GET_FPU_CW(pcb) \ 910#define GET_FPU_CW(pcb) \
911 (i386_use_fxsave ? \ 911 (i386_use_fxsave ? \
912 pcb->pcb_savefpu.sv_xmm.sv_env.en_cw : \ 912 pcb->pcb_savefpu.sv_xmm.sv_env.en_cw : \
913 pcb->pcb_savefpu.sv_87.sv_env.en_cw) 913 pcb->pcb_savefpu.sv_87.sv_env.en_cw)
914#define GET_FPU_SW(pcb) \ 914#define GET_FPU_SW(pcb) \
915 (i386_use_fxsave ? \ 915 (i386_use_fxsave ? \
916 pcb->pcb_savefpu.sv_xmm.sv_env.en_sw : \ 916 pcb->pcb_savefpu.sv_xmm.sv_env.en_sw : \
917 pcb->pcb_savefpu.sv_87.sv_env.en_sw) 917 pcb->pcb_savefpu.sv_87.sv_env.en_sw)
918 918
919/* 919/*
920 * Preserve the FP status word, clear FP exceptions, then generate a SIGFPE. 920 * Preserve the FP status word, clear FP exceptions, then generate a SIGFPE.
921 * 921 *
922 * Clearing exceptions is necessary mainly to avoid IRQ13 bugs. We now 922 * Clearing exceptions is necessary mainly to avoid IRQ13 bugs. We now
923 * depend on longjmp() restoring a usable state. Restoring the state 923 * depend on longjmp() restoring a usable state. Restoring the state
924 * or examining it might fail if we didn't clear exceptions. 924 * or examining it might fail if we didn't clear exceptions.
925 * 925 *
926 * The error code chosen will be one of the FPE_... macros. It will be 926 * The error code chosen will be one of the FPE_... macros. It will be
927 * sent as the second argument to old BSD-style signal handlers and as 927 * sent as the second argument to old BSD-style signal handlers and as
928 * "siginfo_t->si_code" (second argument) to SA_SIGINFO signal handlers. 928 * "siginfo_t->si_code" (second argument) to SA_SIGINFO signal handlers.
929 * 929 *
930 * XXX the FP state is not preserved across signal handlers. So signal 930 * XXX the FP state is not preserved across signal handlers. So signal
931 * handlers cannot afford to do FP unless they preserve the state or 931 * handlers cannot afford to do FP unless they preserve the state or
932 * longjmp() out. Both preserving the state and longjmp()ing may be 932 * longjmp() out. Both preserving the state and longjmp()ing may be
933 * destroyed by IRQ13 bugs. Clearing FP exceptions is not an acceptable 933 * destroyed by IRQ13 bugs. Clearing FP exceptions is not an acceptable
934 * solution for signals other than SIGFPE. 934 * solution for signals other than SIGFPE.
935 */ 935 */
936int 936int
937npxtrap(struct lwp *l) 937npxtrap(struct lwp *l)
938{ 938{
939 u_short control, status; 939 u_short control, status;
940 struct cpu_info *ci = curcpu(); 940 struct cpu_info *ci = curcpu();
941 struct lwp *fl = ci->ci_fpcurlwp; 941 struct lwp *fl = ci->ci_fpcurlwp;
942 942
943 if (!i386_fpu_present) { 943 if (!i386_fpu_present) {
944 printf("%s: fpcurthread = %p, curthread = %p, npx_type = %d\n", 944 printf("%s: fpcurthread = %p, curthread = %p, npx_type = %d\n",
945 __func__, fl, l, npx_type); 945 __func__, fl, l, npx_type);
946 panic("npxtrap from nowhere"); 946 panic("npxtrap from nowhere");
947 } 947 }
948 kpreempt_disable(); 948 kpreempt_disable();
949 949
950 /* 950 /*
951 * Interrupt handling (for another interrupt) may have pushed the 951 * Interrupt handling (for another interrupt) may have pushed the
952 * state to memory. Fetch the relevant parts of the state from 952 * state to memory. Fetch the relevant parts of the state from
953 * wherever they are. 953 * wherever they are.
954 */ 954 */
955 if (fl != l) { 955 if (fl != l) {
956 struct pcb *pcb = lwp_getpcb(l); 956 struct pcb *pcb = lwp_getpcb(l);
957 control = GET_FPU_CW(pcb); 957 control = GET_FPU_CW(pcb);
958 status = GET_FPU_SW(pcb); 958 status = GET_FPU_SW(pcb);
959 } else { 959 } else {
960 fnstcw(&control); 960 fnstcw(&control);
961 fnstsw(&status); 961 fnstsw(&status);
962 } 962 }
963 963
964 if (fl == l) 964 if (fl == l)
965 fnclex(); 965 fnclex();
966 kpreempt_enable(); 966 kpreempt_enable();
967 return fpetable[status & ((~control & 0x3f) | 0x40)]; 967 return fpetable[status & ((~control & 0x3f) | 0x40)];
968} 968}

cvs diff -r1.11 -r1.12 src/sys/arch/xen/include/i386/Attic/hypercalls.h (switch to unified diff)

--- src/sys/arch/xen/include/i386/Attic/hypercalls.h 2011/03/30 22:57:24 1.11
+++ src/sys/arch/xen/include/i386/Attic/hypercalls.h 2011/06/07 14:53:03 1.12
@@ -1,549 +1,535 @@ @@ -1,549 +1,535 @@
1/* $NetBSD: hypercalls.h,v 1.11 2011/03/30 22:57:24 jym Exp $ */ 1/* $NetBSD: hypercalls.h,v 1.12 2011/06/07 14:53:03 bouyer Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 Manuel Bouyer. 4 * Copyright (c) 2006 Manuel Bouyer.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 * 25 *
26 */ 26 */
27 27
28/* 28/*
29 *  29 *
30 * Communication to/from hypervisor. 30 * Communication to/from hypervisor.
31 *  31 *
32 * Copyright (c) 2002-2004, K A Fraser 32 * Copyright (c) 2002-2004, K A Fraser
33 *  33 *
34 * Permission is hereby granted, free of charge, to any person obtaining a copy 34 * Permission is hereby granted, free of charge, to any person obtaining a copy
35 * of this source file (the "Software"), to deal in the Software without 35 * of this source file (the "Software"), to deal in the Software without
36 * restriction, including without limitation the rights to use, copy, modify, 36 * restriction, including without limitation the rights to use, copy, modify,
37 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 37 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
38 * and to permit persons to whom the Software is furnished to do so, subject to 38 * and to permit persons to whom the Software is furnished to do so, subject to
39 * the following conditions: 39 * the following conditions:
40 *  40 *
41 * The above copyright notice and this permission notice shall be included in 41 * The above copyright notice and this permission notice shall be included in
42 * all copies or substantial portions of the Software. 42 * all copies or substantial portions of the Software.
43 *  43 *
44 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 44 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
45 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 45 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
46 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 46 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
47 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 47 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
48 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 48 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
49 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 49 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
50 * IN THE SOFTWARE. 50 * IN THE SOFTWARE.
51 */ 51 */
52 52
53 53
54#ifndef _XENI386_HYPERVISOR_H_ 54#ifndef _XENI386_HYPERVISOR_H_
55#define _XENI386_HYPERVISOR_H_ 55#define _XENI386_HYPERVISOR_H_
56/* 56/*
57 * Assembler stubs for hyper-calls. 57 * Assembler stubs for hyper-calls.
58 */ 58 */
59 59
60#include <machine/pte.h> /* pt_entry_t */ 60#include <machine/pte.h> /* pt_entry_t */
61 61
62#if !defined(XEN_COMPAT_030001) 62#if !defined(XEN_COMPAT_030001)
63/* hypercall via the hypercall call page */ 63/* hypercall via the hypercall call page */
64#define __str(x) #x 64#define __str(x) #x
65#define _str(x) __str(x) 65#define _str(x) __str(x)
66#define _hypercall(name, input_const, output_const) \ 66#define _hypercall(name, input_const, output_const) \
67 __asm volatile ( \ 67 __asm volatile ( \
68 "call hypercall_page + ("_str(name)" * 32)" \ 68 "call hypercall_page + ("_str(name)" * 32)" \
69 : output_const \ 69 : output_const \
70 : input_const \ 70 : input_const \
71 : "memory" ) 71 : "memory" )
72#else  72#else
73/* traditionnal hypercall via int 0x82 */ 73/* traditionnal hypercall via int 0x82 */
74#define _hypercall(name, input_const, output_const) \ 74#define _hypercall(name, input_const, output_const) \
75 __asm volatile ( \ 75 __asm volatile ( \
76 TRAP_INSTR \ 76 TRAP_INSTR \
77 : output_const \ 77 : output_const \
78 : "0" (name), input_const \ 78 : "0" (name), input_const \
79 : "memory" ) 79 : "memory" )
80#endif 80#endif
81 81
82#define _harg(...) __VA_ARGS__ 82#define _harg(...) __VA_ARGS__
83  83
84 84
85static __inline int 85static __inline int
86HYPERVISOR_set_trap_table(trap_info_t *table) 86HYPERVISOR_set_trap_table(trap_info_t *table)
87{ 87{
88 int ret; 88 int ret;
89 unsigned long ign1; 89 unsigned long ign1;
90 90
91 _hypercall(__HYPERVISOR_set_trap_table, _harg("1" (table)), 91 _hypercall(__HYPERVISOR_set_trap_table, _harg("1" (table)),
92 _harg("=a" (ret), "=b" (ign1))); 92 _harg("=a" (ret), "=b" (ign1)));
93 93
94 return ret; 94 return ret;
95} 95}
96 96
97static __inline int 97static __inline int
98HYPERVISOR_set_gdt(unsigned long *frame_list, int entries) 98HYPERVISOR_set_gdt(unsigned long *frame_list, int entries)
99{ 99{
100 int ret; 100 int ret;
101 unsigned long ign1, ign2; 101 unsigned long ign1, ign2;
102 102
103 _hypercall(__HYPERVISOR_set_gdt, _harg("1" (frame_list), "2" (entries)), 103 _hypercall(__HYPERVISOR_set_gdt, _harg("1" (frame_list), "2" (entries)),
104 _harg("=a" (ret), "=b" (ign1), "=c" (ign2))); 104 _harg("=a" (ret), "=b" (ign1), "=c" (ign2)));
105 105
106 return ret; 106 return ret;
107} 107}
108 108
109static __inline int 109static __inline int
110HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp) 110HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp)
111{ 111{
112 int ret; 112 int ret;
113 unsigned long ign1, ign2; 113 unsigned long ign1, ign2;
114 114
115 _hypercall(__HYPERVISOR_stack_switch, _harg("1" (ss), "2" (esp)), 115 _hypercall(__HYPERVISOR_stack_switch, _harg("1" (ss), "2" (esp)),
116 _harg("=a" (ret), "=b" (ign1), "=c" (ign2))); 116 _harg("=a" (ret), "=b" (ign1), "=c" (ign2)));
117 117
118 return ret; 118 return ret;
119} 119}
120 120
121static __inline int 121static __inline int
122HYPERVISOR_set_callbacks( 122HYPERVISOR_set_callbacks(
123 unsigned long event_selector, unsigned long event_address, 123 unsigned long event_selector, unsigned long event_address,
124 unsigned long failsafe_selector, unsigned long failsafe_address) 124 unsigned long failsafe_selector, unsigned long failsafe_address)
125{ 125{
126 int ret; 126 int ret;
127 unsigned long ign1, ign2, ign3, ign4; 127 unsigned long ign1, ign2, ign3, ign4;
128 128
129 _hypercall(__HYPERVISOR_set_callbacks, 129 _hypercall(__HYPERVISOR_set_callbacks,
130 _harg("1" (event_selector),"2" (event_address), 130 _harg("1" (event_selector),"2" (event_address),
131 "3" (failsafe_selector), "4" (failsafe_address)), 131 "3" (failsafe_selector), "4" (failsafe_address)),
132 _harg("=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4))); 132 _harg("=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)));
133 133
134 return ret; 134 return ret;
135} 135}
136 136
137#if __XEN_INTERFACE_VERSION__ < 0x00030204 137#if __XEN_INTERFACE_VERSION__ < 0x00030204
138static __inline int 138static __inline int
139HYPERVISOR_dom0_op(dom0_op_t *dom0_op) 139HYPERVISOR_dom0_op(dom0_op_t *dom0_op)
140{ 140{
141 int ret; 141 int ret;
142 unsigned long ign1; 142 unsigned long ign1;
143 143
144 dom0_op->interface_version = DOM0_INTERFACE_VERSION; 144 dom0_op->interface_version = DOM0_INTERFACE_VERSION;
145 _hypercall(__HYPERVISOR_dom0_op, _harg("1" (dom0_op)), 145 _hypercall(__HYPERVISOR_dom0_op, _harg("1" (dom0_op)),
146 _harg("=a" (ret), "=b" (ign1))); 146 _harg("=a" (ret), "=b" (ign1)));
147 147
148 return ret; 148 return ret;
149} 149}
150#endif /* __XEN_INTERFACE_VERSION__ */ 150#endif /* __XEN_INTERFACE_VERSION__ */
151 151
152static __inline int 152static __inline int
153HYPERVISOR_set_debugreg(int reg, unsigned long value) 153HYPERVISOR_set_debugreg(int reg, unsigned long value)
154{ 154{
155 int ret; 155 int ret;
156 unsigned long ign1, ign2; 156 unsigned long ign1, ign2;
157 157
158 _hypercall(__HYPERVISOR_set_debugreg, _harg("1" (reg), "2" (value)), 158 _hypercall(__HYPERVISOR_set_debugreg, _harg("1" (reg), "2" (value)),
159 _harg("=a" (ret), "=b" (ign1), "=c" (ign2))); 159 _harg("=a" (ret), "=b" (ign1), "=c" (ign2)));
160 160
161 return ret; 161 return ret;
162} 162}
163 163
164static __inline unsigned long 164static __inline unsigned long
165HYPERVISOR_get_debugreg(int reg) 165HYPERVISOR_get_debugreg(int reg)
166{ 166{
167 unsigned long ret; 167 unsigned long ret;
168 unsigned long ign1; 168 unsigned long ign1;
169 169
170 _hypercall(__HYPERVISOR_get_debugreg, _harg("1" (reg)), 170 _hypercall(__HYPERVISOR_get_debugreg, _harg("1" (reg)),
171 _harg("=a" (ret), "=b" (ign1))); 171 _harg("=a" (ret), "=b" (ign1)));
172 172
173 return ret; 173 return ret;
174} 174}
175 175
176static __inline int 176static __inline int
177HYPERVISOR_machine_check(struct xen_mc *mc) 177HYPERVISOR_machine_check(struct xen_mc *mc)
178{ 178{
179 int ret; 179 int ret;
180 unsigned long ign1; 180 unsigned long ign1;
181  181
182 mc->interface_version = XEN_MCA_INTERFACE_VERSION; 182 mc->interface_version = XEN_MCA_INTERFACE_VERSION;
183 _hypercall(__HYPERVISOR_mca, _harg("1" (mc)), 183 _hypercall(__HYPERVISOR_mca, _harg("1" (mc)),
184 _harg("=a" (ret), "=b" (ign1))); 184 _harg("=a" (ret), "=b" (ign1)));
185  185
186 return ret; 186 return ret;
187} 187}
188 188
189static __inline int 189static __inline int
190HYPERVISOR_hvm_op(int cmd, void *arg) 190HYPERVISOR_hvm_op(int cmd, void *arg)
191{ 191{
192 int ret; 192 int ret;
193 unsigned long ign1, ign2; 193 unsigned long ign1, ign2;
194 194
195 _hypercall(__HYPERVISOR_hvm_op, _harg("1" (cmd), "2" (arg)), 195 _hypercall(__HYPERVISOR_hvm_op, _harg("1" (cmd), "2" (arg)),
196 _harg("=a" (ret), "=b" (ign1), "=c" (ign2))); 196 _harg("=a" (ret), "=b" (ign1), "=c" (ign2)));
197 197
198 return ret; 198 return ret;
199} 199}
200 200
201static __inline int 201static __inline int
202HYPERVISOR_mmu_update(mmu_update_t *req, int count, int *success_count, 202HYPERVISOR_mmu_update(mmu_update_t *req, int count, int *success_count,
203 domid_t domid) 203 domid_t domid)
204{ 204{
205 int ret; 205 int ret;
206 unsigned long ign1, ign2, ign3, ign4; 206 unsigned long ign1, ign2, ign3, ign4;
207 207
208 _hypercall(__HYPERVISOR_mmu_update, 208 _hypercall(__HYPERVISOR_mmu_update,
209 _harg("1" (req), "2" (count), "3" (success_count), "4" (domid)), 209 _harg("1" (req), "2" (count), "3" (success_count), "4" (domid)),
210 _harg("=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4))); 210 _harg("=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)));
211 211
212 return ret; 212 return ret;
213} 213}
214 214
215static __inline int 215static __inline int
216HYPERVISOR_mmuext_op(struct mmuext_op *op, int count, int *success_count, 216HYPERVISOR_mmuext_op(struct mmuext_op *op, int count, int *success_count,
217 domid_t domid) 217 domid_t domid)
218{ 218{
219 int ret; 219 int ret;
220 unsigned long ign1, ign2, ign3, ign4; 220 unsigned long ign1, ign2, ign3, ign4;
221 221
222 _hypercall(__HYPERVISOR_mmuext_op, 222 _hypercall(__HYPERVISOR_mmuext_op,
223 _harg("1" (op), "2" (count), "3" (success_count), "4" (domid)), 223 _harg("1" (op), "2" (count), "3" (success_count), "4" (domid)),
224 _harg("=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4))); 224 _harg("=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)));
225 225
226 return ret; 226 return ret;
227} 227}
228 228
229#if 0 
230static __inline int 229static __inline int
231HYPERVISOR_fpu_taskswitch(int set) 230HYPERVISOR_fpu_taskswitch(int set)
232{ 231{
233 long ret; 232 long ret;
234 long ign1; 233 long ign1;
235 234
236 _hypercall(__HYPERVISOR_fpu_taskswitch, _harg("1" (set)), 235 _hypercall(__HYPERVISOR_fpu_taskswitch, _harg("1" (set)),
237 _harg("=a" (ret), "=b" (ign1))); 236 _harg("=a" (ret), "=b" (ign1)));
238 237
239 return ret; 238 return ret;
240} 239}
241#else /* 0 */ 
242/* Xen2 compat: always i38HYPERVISOR_fpu_taskswitch(1) */ 
243static __inline int 
244HYPERVISOR_fpu_taskswitch(void) 
245{ 
246 long ret; 
247 long ign1; 
248 _hypercall(__HYPERVISOR_fpu_taskswitch, _harg("1" (1)), 
249 _harg("=a" (ret), "=b" (ign1))); 
250 
251 return ret; 
252} 
253#endif /* 0 */ 
254 240
255static __inline int 241static __inline int
256HYPERVISOR_update_descriptor(uint64_t ma, uint32_t word1, uint32_t word2) 242HYPERVISOR_update_descriptor(uint64_t ma, uint32_t word1, uint32_t word2)
257{ 243{
258 int ret; 244 int ret;
259 unsigned long ign1, ign2, ign3, ign4; 245 unsigned long ign1, ign2, ign3, ign4;
260 int ma1 = ma & 0xffffffff; 246 int ma1 = ma & 0xffffffff;
261 int ma2 = (ma >> 32) & 0xffffffff; 247 int ma2 = (ma >> 32) & 0xffffffff;
262 248
263 _hypercall(__HYPERVISOR_update_descriptor, 249 _hypercall(__HYPERVISOR_update_descriptor,
264 _harg("1" (ma1), "2" (ma2), "3" (word1), "4" (word2)), 250 _harg("1" (ma1), "2" (ma2), "3" (word1), "4" (word2)),
265 _harg("=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4))); 251 _harg("=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)));
266 252
267 return ret; 253 return ret;
268} 254}
269 255
270static __inline int 256static __inline int
271HYPERVISOR_memory_op(unsigned int cmd, void *arg) 257HYPERVISOR_memory_op(unsigned int cmd, void *arg)
272{ 258{
273 int ret; 259 int ret;
274 unsigned long ign1, ign2; 260 unsigned long ign1, ign2;
275 261
276 _hypercall(__HYPERVISOR_memory_op, _harg("1" (cmd), "2" (arg)), 262 _hypercall(__HYPERVISOR_memory_op, _harg("1" (cmd), "2" (arg)),
277 _harg("=a" (ret), "=b" (ign1), "=c" (ign2))); 263 _harg("=a" (ret), "=b" (ign1), "=c" (ign2)));
278 264
279 return ret; 265 return ret;
280} 266}
281 267
282static __inline int 268static __inline int
283HYPERVISOR_update_va_mapping(unsigned long page_nr, pt_entry_t new_val, 269HYPERVISOR_update_va_mapping(unsigned long page_nr, pt_entry_t new_val,
284 unsigned long flags) 270 unsigned long flags)
285{ 271{
286 int ret; 272 int ret;
287 unsigned long ign1, ign2, ign3, ign4; 273 unsigned long ign1, ign2, ign3, ign4;
288 unsigned long pte_low, pte_hi; 274 unsigned long pte_low, pte_hi;
289 275
290 pte_low = new_val & 0xffffffff; 276 pte_low = new_val & 0xffffffff;
291#ifdef PAE 277#ifdef PAE
292 pte_hi = new_val >> 32; 278 pte_hi = new_val >> 32;
293#else 279#else
294 pte_hi = 0; 280 pte_hi = 0;
295#endif 281#endif
296 282
297 _hypercall(__HYPERVISOR_update_va_mapping, 283 _hypercall(__HYPERVISOR_update_va_mapping,
298 _harg("1" (page_nr), "2" (pte_low), "3" (pte_hi), "4" (flags)), 284 _harg("1" (page_nr), "2" (pte_low), "3" (pte_hi), "4" (flags)),
299 _harg("=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4))); 285 _harg("=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)));
300 286
301#ifdef notdef 287#ifdef notdef
302 if (__predict_false(ret < 0)) 288 if (__predict_false(ret < 0))
303 panic("Failed update VA mapping: %08lx, %08lx, %08lx", 289 panic("Failed update VA mapping: %08lx, %08lx, %08lx",
304 page_nr, new_val, flags); 290 page_nr, new_val, flags);
305#endif 291#endif
306 292
307 return ret; 293 return ret;
308} 294}
309 295
310static __inline int 296static __inline int
311HYPERVISOR_xen_version(int cmd, void *arg) 297HYPERVISOR_xen_version(int cmd, void *arg)
312{ 298{
313 int ret; 299 int ret;
314 unsigned long ign1, ign2; 300 unsigned long ign1, ign2;
315 301
316 _hypercall(__HYPERVISOR_xen_version, _harg("1" (cmd), "2" (arg)), 302 _hypercall(__HYPERVISOR_xen_version, _harg("1" (cmd), "2" (arg)),
317 _harg("=a" (ret), "=b" (ign1), "=c" (ign2))); 303 _harg("=a" (ret), "=b" (ign1), "=c" (ign2)));
318 304
319 return ret; 305 return ret;
320} 306}
321 307
322static __inline int 308static __inline int
323HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count) 309HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
324{ 310{
325 int ret; 311 int ret;
326 unsigned long ign1, ign2, ign3; 312 unsigned long ign1, ign2, ign3;
327 313
328 _hypercall(__HYPERVISOR_grant_table_op, 314 _hypercall(__HYPERVISOR_grant_table_op,
329 _harg("1" (cmd), "2" (uop), "3" (count)), 315 _harg("1" (cmd), "2" (uop), "3" (count)),
330 _harg("=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3))); 316 _harg("=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)));
331 317
332 return ret; 318 return ret;
333} 319}
334 320
335static __inline int 321static __inline int
336HYPERVISOR_update_va_mapping_otherdomain(unsigned long page_nr, 322HYPERVISOR_update_va_mapping_otherdomain(unsigned long page_nr,
337 pt_entry_t new_val, unsigned long flags, domid_t domid) 323 pt_entry_t new_val, unsigned long flags, domid_t domid)
338{ 324{
339 int ret; 325 int ret;
340 unsigned long ign1, ign2, ign3, ign4, ign5; 326 unsigned long ign1, ign2, ign3, ign4, ign5;
341 unsigned long pte_low, pte_hi; 327 unsigned long pte_low, pte_hi;
342 328
343 pte_low = new_val & 0xffffffff; 329 pte_low = new_val & 0xffffffff;
344#ifdef PAE 330#ifdef PAE
345 pte_hi = new_val >> 32; 331 pte_hi = new_val >> 32;
346#else 332#else
347 pte_hi = 0; 333 pte_hi = 0;
348#endif 334#endif
349 335
350 _hypercall(__HYPERVISOR_update_va_mapping_otherdomain, 336 _hypercall(__HYPERVISOR_update_va_mapping_otherdomain,
351 _harg("1" (page_nr), "2" (pte_low), "3" (pte_hi), "4" (flags), "5" (domid)), 337 _harg("1" (page_nr), "2" (pte_low), "3" (pte_hi), "4" (flags), "5" (domid)),
352 _harg("=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4), 338 _harg("=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4),
353 "=D" (ign5))); 339 "=D" (ign5)));
354 340
355 return ret; 341 return ret;
356} 342}
357 343
358static __inline int 344static __inline int
359HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args) 345HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args)
360{ 346{
361 long ret; 347 long ret;
362 unsigned long ign1, ign2, ign3; 348 unsigned long ign1, ign2, ign3;
363 349
364 _hypercall(__HYPERVISOR_vcpu_op, 350 _hypercall(__HYPERVISOR_vcpu_op,
365 _harg("1" (cmd), "2" (vcpuid), "3" (extra_args)), 351 _harg("1" (cmd), "2" (vcpuid), "3" (extra_args)),
366 _harg("=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3))); 352 _harg("=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)));
367 353
368 return ret; 354 return ret;
369} 355}
370 356
371static __inline long 357static __inline long
372HYPERVISOR_yield(void) 358HYPERVISOR_yield(void)
373{ 359{
374 long ret; 360 long ret;
375 unsigned long ign1, ign2; 361 unsigned long ign1, ign2;
376 362
377 _hypercall(__HYPERVISOR_sched_op, _harg("1" (SCHEDOP_yield), "2" (0)), 363 _hypercall(__HYPERVISOR_sched_op, _harg("1" (SCHEDOP_yield), "2" (0)),
378 _harg("=a" (ret), "=b" (ign1), "=c" (ign2))); 364 _harg("=a" (ret), "=b" (ign1), "=c" (ign2)));
379 365
380 return ret; 366 return ret;
381} 367}
382 368
383static __inline long 369static __inline long
384HYPERVISOR_block(void) 370HYPERVISOR_block(void)
385{ 371{
386 long ret; 372 long ret;
387 unsigned long ign1, ign2; 373 unsigned long ign1, ign2;
388 374
389 _hypercall(__HYPERVISOR_sched_op, _harg("1" (SCHEDOP_block), "2" (0)), 375 _hypercall(__HYPERVISOR_sched_op, _harg("1" (SCHEDOP_block), "2" (0)),
390 _harg("=a" (ret), "=b" (ign1), "=c" (ign2))); 376 _harg("=a" (ret), "=b" (ign1), "=c" (ign2)));
391 377
392 return ret; 378 return ret;
393} 379}
394 380
395static __inline long 381static __inline long
396HYPERVISOR_shutdown(void) 382HYPERVISOR_shutdown(void)
397{ 383{
398 long ret; 384 long ret;
399 unsigned long ign1, ign2; 385 unsigned long ign1, ign2;
400 386
401 _hypercall(__HYPERVISOR_sched_op, 387 _hypercall(__HYPERVISOR_sched_op,
402 _harg("1" (SCHEDOP_shutdown), "2" (SHUTDOWN_poweroff)), 388 _harg("1" (SCHEDOP_shutdown), "2" (SHUTDOWN_poweroff)),
403 _harg("=a" (ret), "=b" (ign1), "=c" (ign2))); 389 _harg("=a" (ret), "=b" (ign1), "=c" (ign2)));
404 390
405 return ret; 391 return ret;
406} 392}
407 393
408static __inline long 394static __inline long
409HYPERVISOR_crash(void) 395HYPERVISOR_crash(void)
410{ 396{
411 long ret; 397 long ret;
412 unsigned long ign1, ign2; 398 unsigned long ign1, ign2;
413 399
414 _hypercall(__HYPERVISOR_sched_op, 400 _hypercall(__HYPERVISOR_sched_op,
415 _harg("1" (SCHEDOP_shutdown), "2" (SHUTDOWN_crash)), 401 _harg("1" (SCHEDOP_shutdown), "2" (SHUTDOWN_crash)),
416 _harg("=a" (ret), "=b" (ign1), "=c" (ign2))); 402 _harg("=a" (ret), "=b" (ign1), "=c" (ign2)));
417 403
418 return ret; 404 return ret;
419} 405}
420 406
421static __inline long 407static __inline long
422HYPERVISOR_reboot(void) 408HYPERVISOR_reboot(void)
423{ 409{
424 long ret; 410 long ret;
425 unsigned long ign1, ign2; 411 unsigned long ign1, ign2;
426 412
427 _hypercall(__HYPERVISOR_sched_op, 413 _hypercall(__HYPERVISOR_sched_op,
428 _harg("1" (SCHEDOP_shutdown), "2" (SHUTDOWN_reboot)), 414 _harg("1" (SCHEDOP_shutdown), "2" (SHUTDOWN_reboot)),
429 _harg("=a" (ret), "=b" (ign1), "=c" (ign2))); 415 _harg("=a" (ret), "=b" (ign1), "=c" (ign2)));
430 416
431 return ret; 417 return ret;
432} 418}
433 419
434static __inline long 420static __inline long
435HYPERVISOR_suspend(unsigned long srec) 421HYPERVISOR_suspend(unsigned long srec)
436{ 422{
437 long ret; 423 long ret;
438 unsigned long ign1, ign2, ign3; 424 unsigned long ign1, ign2, ign3;
439 425
440 _hypercall(__HYPERVISOR_sched_op, 426 _hypercall(__HYPERVISOR_sched_op,
441 _harg("1" (SCHEDOP_shutdown), "2" (SHUTDOWN_suspend), "3" (srec)), 427 _harg("1" (SCHEDOP_shutdown), "2" (SHUTDOWN_suspend), "3" (srec)),
442 _harg("=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3))); 428 _harg("=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)));
443 429
444 return ret; 430 return ret;
445} 431}
446 432
447static __inline long 433static __inline long
448HYPERVISOR_set_timer_op(uint64_t timeout) 434HYPERVISOR_set_timer_op(uint64_t timeout)
449{ 435{
450 long ret; 436 long ret;
451 unsigned long timeout_hi = (unsigned long)(timeout>>32); 437 unsigned long timeout_hi = (unsigned long)(timeout>>32);
452 unsigned long timeout_lo = (unsigned long)timeout; 438 unsigned long timeout_lo = (unsigned long)timeout;
453 unsigned long ign1, ign2; 439 unsigned long ign1, ign2;
454 440
455 _hypercall(__HYPERVISOR_set_timer_op, 441 _hypercall(__HYPERVISOR_set_timer_op,
456 _harg("1" (timeout_lo), "2" (timeout_hi)), 442 _harg("1" (timeout_lo), "2" (timeout_hi)),
457 _harg("=a" (ret), "=b" (ign1), "=c" (ign2))); 443 _harg("=a" (ret), "=b" (ign1), "=c" (ign2)));
458 444
459 return ret; 445 return ret;
460} 446}
461 447
462static __inline int 448static __inline int
463HYPERVISOR_platform_op(struct xen_platform_op *platform_op) 449HYPERVISOR_platform_op(struct xen_platform_op *platform_op)
464{ 450{
465 int ret; 451 int ret;
466 unsigned long ign1; 452 unsigned long ign1;
467 453
468 platform_op->interface_version = XENPF_INTERFACE_VERSION; 454 platform_op->interface_version = XENPF_INTERFACE_VERSION;
469 _hypercall(__HYPERVISOR_platform_op, _harg("1" (platform_op)), 455 _hypercall(__HYPERVISOR_platform_op, _harg("1" (platform_op)),
470 _harg("=a" (ret), "=b" (ign1))); 456 _harg("=a" (ret), "=b" (ign1)));
471 457
472 return ret; 458 return ret;
473} 459}
474 460
475static __inline int 461static __inline int
476HYPERVISOR_multicall(void *call_list, int nr_calls) 462HYPERVISOR_multicall(void *call_list, int nr_calls)
477{ 463{
478 int ret; 464 int ret;
479 unsigned long ign1, ign2; 465 unsigned long ign1, ign2;
480 466
481 _hypercall(__HYPERVISOR_multicall, 467 _hypercall(__HYPERVISOR_multicall,
482 _harg("1" (call_list), "2" (nr_calls)), 468 _harg("1" (call_list), "2" (nr_calls)),
483 _harg("=a" (ret), "=b" (ign1), "=c" (ign2))); 469 _harg("=a" (ret), "=b" (ign1), "=c" (ign2)));
484 470
485 return ret; 471 return ret;
486} 472}
487 473
488 474
489static __inline int 475static __inline int
490HYPERVISOR_event_channel_op(void *op) 476HYPERVISOR_event_channel_op(void *op)
491{ 477{
492 int ret; 478 int ret;
493 unsigned long ign1; 479 unsigned long ign1;
494 480
495 _hypercall(__HYPERVISOR_event_channel_op, _harg("1" (op)), 481 _hypercall(__HYPERVISOR_event_channel_op, _harg("1" (op)),
496 _harg("=a" (ret), "=b" (ign1))); 482 _harg("=a" (ret), "=b" (ign1)));
497 483
498 return ret; 484 return ret;
499} 485}
500 486
501static __inline int 487static __inline int
502HYPERVISOR_console_io(int cmd, int count, char *str) 488HYPERVISOR_console_io(int cmd, int count, char *str)
503{ 489{
504 int ret; 490 int ret;
505 unsigned long ign1, ign2, ign3; 491 unsigned long ign1, ign2, ign3;
506 492
507 _hypercall(__HYPERVISOR_console_io, 493 _hypercall(__HYPERVISOR_console_io,
508 _harg("1" (cmd), "2" (count), "3" (str)), 494 _harg("1" (cmd), "2" (count), "3" (str)),
509 _harg("=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3))); 495 _harg("=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)));
510 496
511 return ret; 497 return ret;
512} 498}
513 499
514static __inline int 500static __inline int
515HYPERVISOR_physdev_op(void *physdev_op) 501HYPERVISOR_physdev_op(void *physdev_op)
516{ 502{
517 int ret; 503 int ret;
518 unsigned long ign1; 504 unsigned long ign1;
519 505
520 _hypercall(__HYPERVISOR_physdev_op, _harg("1" (physdev_op)), 506 _hypercall(__HYPERVISOR_physdev_op, _harg("1" (physdev_op)),
521 _harg("=a" (ret), "=b" (ign1))); 507 _harg("=a" (ret), "=b" (ign1)));
522 508
523 return ret; 509 return ret;
524} 510}
525 511
526static __inline int 512static __inline int
527HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type) 513HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type)
528{ 514{
529 int ret; 515 int ret;
530 unsigned long ign1, ign2; 516 unsigned long ign1, ign2;
531 517
532 _hypercall(__HYPERVISOR_vm_assist, _harg("1" (cmd), "2" (type)), 518 _hypercall(__HYPERVISOR_vm_assist, _harg("1" (cmd), "2" (type)),
533 _harg("=a" (ret), "=b" (ign1), "=c" (ign2))); 519 _harg("=a" (ret), "=b" (ign1), "=c" (ign2)));
534 520
535 return ret; 521 return ret;
536} 522}
537 523
538static __inline int 524static __inline int
539HYPERVISOR_sysctl(void *sysctl) 525HYPERVISOR_sysctl(void *sysctl)
540{ 526{
541 int ret; 527 int ret;
542 unsigned long ign1; 528 unsigned long ign1;
543 529
544 _hypercall(__HYPERVISOR_sysctl, _harg("1" (sysctl)), 530 _hypercall(__HYPERVISOR_sysctl, _harg("1" (sysctl)),
545 _harg("=a" (ret), "=b" (ign1))); 531 _harg("=a" (ret), "=b" (ign1)));
546 532
547 return ret; 533 return ret;
548} 534}
549#endif /* _XENI386_HYPERVISOR_H_ */ 535#endif /* _XENI386_HYPERVISOR_H_ */