Fri Dec 6 08:35:21 2019 UTC ()
Fix a bunch of unimportant "Local variable hides global variable" warnings
from the LGTM bot.


(maxv)
diff -r1.341 -r1.342 src/sys/arch/amd64/amd64/machdep.c
diff -r1.2 -r1.3 src/sys/compat/common/compat_sysctl_09_43.c
diff -r1.41 -r1.42 src/sys/kern/tty_subr.c

cvs diff -r1.341 -r1.342 src/sys/arch/amd64/amd64/machdep.c (switch to unified diff)

--- src/sys/arch/amd64/amd64/machdep.c 2019/11/14 17:09:23 1.341
+++ src/sys/arch/amd64/amd64/machdep.c 2019/12/06 08:35:21 1.342
@@ -1,1932 +1,1932 @@ @@ -1,1932 +1,1932 @@
1/* $NetBSD: machdep.c,v 1.341 2019/11/14 17:09:23 maxv Exp $ */ 1/* $NetBSD: machdep.c,v 1.342 2019/12/06 08:35:21 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1996, 1997, 1998, 2000, 2006, 2007, 2008, 2011 4 * Copyright (c) 1996, 1997, 1998, 2000, 2006, 2007, 2008, 2011
5 * The NetBSD Foundation, Inc. 5 * The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace 9 * by Charles M. Hannum and by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center. 10 * Simulation Facility, NASA Ames Research Center.
11 * 11 *
12 * This code is derived from software contributed to The NetBSD Foundation 12 * This code is derived from software contributed to The NetBSD Foundation
13 * by Coyote Point Systems, Inc. which was written under contract to Coyote 13 * by Coyote Point Systems, Inc. which was written under contract to Coyote
14 * Point by Jed Davis and Devon O'Dell. 14 * Point by Jed Davis and Devon O'Dell.
15 * 15 *
16 * Redistribution and use in source and binary forms, with or without 16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions 17 * modification, are permitted provided that the following conditions
18 * are met: 18 * are met:
19 * 1. Redistributions of source code must retain the above copyright 19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer. 20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright 21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the 22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution. 23 * documentation and/or other materials provided with the distribution.
24 * 24 *
25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE. 35 * POSSIBILITY OF SUCH DAMAGE.
36 */ 36 */
37 37
38/* 38/*
39 * Copyright (c) 2006 Mathieu Ropert <mro@adviseo.fr> 39 * Copyright (c) 2006 Mathieu Ropert <mro@adviseo.fr>
40 * 40 *
41 * Permission to use, copy, modify, and distribute this software for any 41 * Permission to use, copy, modify, and distribute this software for any
42 * purpose with or without fee is hereby granted, provided that the above 42 * purpose with or without fee is hereby granted, provided that the above
43 * copyright notice and this permission notice appear in all copies. 43 * copyright notice and this permission notice appear in all copies.
44 * 44 *
45 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 45 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
46 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 46 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
47 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 47 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
48 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 48 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
49 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 49 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
50 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 50 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
51 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 51 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
52 */ 52 */
53 53
54/* 54/*
55 * Copyright (c) 2007 Manuel Bouyer. 55 * Copyright (c) 2007 Manuel Bouyer.
56 * 56 *
57 * Redistribution and use in source and binary forms, with or without 57 * Redistribution and use in source and binary forms, with or without
58 * modification, are permitted provided that the following conditions 58 * modification, are permitted provided that the following conditions
59 * are met: 59 * are met:
60 * 1. Redistributions of source code must retain the above copyright 60 * 1. Redistributions of source code must retain the above copyright
61 * notice, this list of conditions and the following disclaimer. 61 * notice, this list of conditions and the following disclaimer.
62 * 2. Redistributions in binary form must reproduce the above copyright 62 * 2. Redistributions in binary form must reproduce the above copyright
63 * notice, this list of conditions and the following disclaimer in the 63 * notice, this list of conditions and the following disclaimer in the
64 * documentation and/or other materials provided with the distribution. 64 * documentation and/or other materials provided with the distribution.
65 * 65 *
66 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 66 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
67 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 67 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
68 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 68 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
69 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 69 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
70 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 70 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
71 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 71 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
72 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 72 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
73 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 73 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
74 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 74 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
75 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 75 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
76 */ 76 */
77 77
78/* 78/*
79 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 79 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
80 * All rights reserved. 80 * All rights reserved.
81 * 81 *
82 * This code is derived from software contributed to Berkeley by 82 * This code is derived from software contributed to Berkeley by
83 * William Jolitz. 83 * William Jolitz.
84 * 84 *
85 * Redistribution and use in source and binary forms, with or without 85 * Redistribution and use in source and binary forms, with or without
86 * modification, are permitted provided that the following conditions 86 * modification, are permitted provided that the following conditions
87 * are met: 87 * are met:
88 * 1. Redistributions of source code must retain the above copyright 88 * 1. Redistributions of source code must retain the above copyright
89 * notice, this list of conditions and the following disclaimer. 89 * notice, this list of conditions and the following disclaimer.
90 * 2. Redistributions in binary form must reproduce the above copyright 90 * 2. Redistributions in binary form must reproduce the above copyright
91 * notice, this list of conditions and the following disclaimer in the 91 * notice, this list of conditions and the following disclaimer in the
92 * documentation and/or other materials provided with the distribution. 92 * documentation and/or other materials provided with the distribution.
93 * 3. Neither the name of the University nor the names of its contributors 93 * 3. Neither the name of the University nor the names of its contributors
94 * may be used to endorse or promote products derived from this software 94 * may be used to endorse or promote products derived from this software
95 * without specific prior written permission. 95 * without specific prior written permission.
96 * 96 *
97 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 97 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
98 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 98 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
99 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 99 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
100 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 100 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
101 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 101 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
102 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 102 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
103 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 103 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
104 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 104 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
105 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 105 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
106 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 106 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
107 * SUCH DAMAGE. 107 * SUCH DAMAGE.
108 * 108 *
109 * @(#)machdep.c 7.4 (Berkeley) 6/3/91 109 * @(#)machdep.c 7.4 (Berkeley) 6/3/91
110 */ 110 */
111 111
112#include <sys/cdefs.h> 112#include <sys/cdefs.h>
113__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.341 2019/11/14 17:09:23 maxv Exp $"); 113__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.342 2019/12/06 08:35:21 maxv Exp $");
114 114
115#include "opt_modular.h" 115#include "opt_modular.h"
116#include "opt_user_ldt.h" 116#include "opt_user_ldt.h"
117#include "opt_ddb.h" 117#include "opt_ddb.h"
118#include "opt_kgdb.h" 118#include "opt_kgdb.h"
119#include "opt_cpureset_delay.h" 119#include "opt_cpureset_delay.h"
120#include "opt_mtrr.h" 120#include "opt_mtrr.h"
121#include "opt_realmem.h" 121#include "opt_realmem.h"
122#include "opt_xen.h" 122#include "opt_xen.h"
123#include "opt_svs.h" 123#include "opt_svs.h"
124#include "opt_kaslr.h" 124#include "opt_kaslr.h"
125#ifndef XENPV 125#ifndef XENPV
126#include "opt_physmem.h" 126#include "opt_physmem.h"
127#endif 127#endif
128#include "isa.h" 128#include "isa.h"
129#include "pci.h" 129#include "pci.h"
130 130
131#include <sys/param.h> 131#include <sys/param.h>
132#include <sys/systm.h> 132#include <sys/systm.h>
133#include <sys/signal.h> 133#include <sys/signal.h>
134#include <sys/signalvar.h> 134#include <sys/signalvar.h>
135#include <sys/kernel.h> 135#include <sys/kernel.h>
136#include <sys/cpu.h> 136#include <sys/cpu.h>
137#include <sys/exec.h> 137#include <sys/exec.h>
138#include <sys/exec_aout.h> /* for MID_* */ 138#include <sys/exec_aout.h> /* for MID_* */
139#include <sys/reboot.h> 139#include <sys/reboot.h>
140#include <sys/conf.h> 140#include <sys/conf.h>
141#include <sys/msgbuf.h> 141#include <sys/msgbuf.h>
142#include <sys/mount.h> 142#include <sys/mount.h>
143#include <sys/core.h> 143#include <sys/core.h>
144#include <sys/kcore.h> 144#include <sys/kcore.h>
145#include <sys/ucontext.h> 145#include <sys/ucontext.h>
146#include <machine/kcore.h> 146#include <machine/kcore.h>
147#include <sys/ras.h> 147#include <sys/ras.h>
148#include <sys/syscallargs.h> 148#include <sys/syscallargs.h>
149#include <sys/ksyms.h> 149#include <sys/ksyms.h>
150#include <sys/device.h> 150#include <sys/device.h>
151#include <sys/lwp.h> 151#include <sys/lwp.h>
152#include <sys/proc.h> 152#include <sys/proc.h>
153#include <sys/asan.h> 153#include <sys/asan.h>
154#include <sys/csan.h> 154#include <sys/csan.h>
155#include <sys/msan.h> 155#include <sys/msan.h>
156 156
157#ifdef KGDB 157#ifdef KGDB
158#include <sys/kgdb.h> 158#include <sys/kgdb.h>
159#endif 159#endif
160 160
161#include <dev/cons.h> 161#include <dev/cons.h>
162#include <dev/mm.h> 162#include <dev/mm.h>
163 163
164#include <uvm/uvm.h> 164#include <uvm/uvm.h>
165#include <uvm/uvm_page.h> 165#include <uvm/uvm_page.h>
166 166
167#include <sys/sysctl.h> 167#include <sys/sysctl.h>
168 168
169#include <machine/cpu.h> 169#include <machine/cpu.h>
170#include <machine/cpufunc.h> 170#include <machine/cpufunc.h>
171#include <machine/gdt.h> 171#include <machine/gdt.h>
172#include <machine/intr.h> 172#include <machine/intr.h>
173#include <machine/pio.h> 173#include <machine/pio.h>
174#include <machine/psl.h> 174#include <machine/psl.h>
175#include <machine/reg.h> 175#include <machine/reg.h>
176#include <machine/specialreg.h> 176#include <machine/specialreg.h>
177#include <machine/bootinfo.h> 177#include <machine/bootinfo.h>
178#include <x86/fpu.h> 178#include <x86/fpu.h>
179#include <x86/dbregs.h> 179#include <x86/dbregs.h>
180#include <machine/mtrr.h> 180#include <machine/mtrr.h>
181#include <machine/mpbiosvar.h> 181#include <machine/mpbiosvar.h>
182 182
183#include <x86/cputypes.h> 183#include <x86/cputypes.h>
184#include <x86/cpuvar.h> 184#include <x86/cpuvar.h>
185#include <x86/machdep.h> 185#include <x86/machdep.h>
186 186
187#include <x86/x86/tsc.h> 187#include <x86/x86/tsc.h>
188 188
189#include <dev/isa/isareg.h> 189#include <dev/isa/isareg.h>
190#include <machine/isa_machdep.h> 190#include <machine/isa_machdep.h>
191#include <dev/ic/i8042reg.h> 191#include <dev/ic/i8042reg.h>
192 192
193#ifdef XEN 193#ifdef XEN
194#include <xen/xen.h> 194#include <xen/xen.h>
195#include <xen/hypervisor.h> 195#include <xen/hypervisor.h>
196#include <xen/evtchn.h> 196#include <xen/evtchn.h>
197#include <xen/include/public/version.h> 197#include <xen/include/public/version.h>
198#include <xen/include/public/vcpu.h> 198#include <xen/include/public/vcpu.h>
199#endif /* XEN */ 199#endif /* XEN */
200 200
201#ifdef DDB 201#ifdef DDB
202#include <machine/db_machdep.h> 202#include <machine/db_machdep.h>
203#include <ddb/db_extern.h> 203#include <ddb/db_extern.h>
204#include <ddb/db_output.h> 204#include <ddb/db_output.h>
205#include <ddb/db_interface.h> 205#include <ddb/db_interface.h>
206#endif 206#endif
207 207
208#include "acpica.h" 208#include "acpica.h"
209 209
210#if NACPICA > 0 210#if NACPICA > 0
211#include <dev/acpi/acpivar.h> 211#include <dev/acpi/acpivar.h>
212#define ACPI_MACHDEP_PRIVATE 212#define ACPI_MACHDEP_PRIVATE
213#include <machine/acpi_machdep.h> 213#include <machine/acpi_machdep.h>
214#else 214#else
215#include <machine/i82489var.h> 215#include <machine/i82489var.h>
216#endif 216#endif
217 217
218#include "isa.h" 218#include "isa.h"
219#include "isadma.h" 219#include "isadma.h"
220#include "ksyms.h" 220#include "ksyms.h"
221 221
222/* the following is used externally (sysctl_hw) */ 222/* the following is used externally (sysctl_hw) */
223char machine[] = "amd64"; /* CPU "architecture" */ 223char machine[] = "amd64"; /* CPU "architecture" */
224char machine_arch[] = "x86_64"; /* machine == machine_arch */ 224char machine_arch[] = "x86_64"; /* machine == machine_arch */
225 225
226#ifdef CPURESET_DELAY 226#ifdef CPURESET_DELAY
227int cpureset_delay = CPURESET_DELAY; 227int cpureset_delay = CPURESET_DELAY;
228#else 228#else
229int cpureset_delay = 2000; /* default to 2s */ 229int cpureset_delay = 2000; /* default to 2s */
230#endif 230#endif
231 231
232int cpu_class = CPUCLASS_686; 232int cpu_class = CPUCLASS_686;
233 233
234#ifdef MTRR 234#ifdef MTRR
235struct mtrr_funcs *mtrr_funcs; 235struct mtrr_funcs *mtrr_funcs;
236#endif 236#endif
237 237
238int cpu_class; 238int cpu_class;
239int use_pae; 239int use_pae;
240 240
241#ifndef NO_SPARSE_DUMP 241#ifndef NO_SPARSE_DUMP
242int sparse_dump = 1; 242int sparse_dump = 1;
243 243
244paddr_t max_paddr = 0; 244paddr_t max_paddr = 0;
245unsigned char *sparse_dump_physmap; 245unsigned char *sparse_dump_physmap;
246#endif 246#endif
247 247
248char *dump_headerbuf, *dump_headerbuf_ptr; 248char *dump_headerbuf, *dump_headerbuf_ptr;
249#define dump_headerbuf_size PAGE_SIZE 249#define dump_headerbuf_size PAGE_SIZE
250#define dump_headerbuf_end (dump_headerbuf + dump_headerbuf_size) 250#define dump_headerbuf_end (dump_headerbuf + dump_headerbuf_size)
251#define dump_headerbuf_avail (dump_headerbuf_end - dump_headerbuf_ptr) 251#define dump_headerbuf_avail (dump_headerbuf_end - dump_headerbuf_ptr)
252daddr_t dump_header_blkno; 252daddr_t dump_header_blkno;
253 253
254size_t dump_nmemsegs; 254size_t dump_nmemsegs;
255size_t dump_npages; 255size_t dump_npages;
256size_t dump_header_size; 256size_t dump_header_size;
257size_t dump_totalbytesleft; 257size_t dump_totalbytesleft;
258 258
259vaddr_t idt_vaddr; 259vaddr_t idt_vaddr;
260paddr_t idt_paddr; 260paddr_t idt_paddr;
261vaddr_t gdt_vaddr; 261vaddr_t gdt_vaddr;
262paddr_t gdt_paddr; 262paddr_t gdt_paddr;
263vaddr_t ldt_vaddr; 263vaddr_t ldt_vaddr;
264paddr_t ldt_paddr; 264paddr_t ldt_paddr;
265 265
266static struct vm_map module_map_store; 266static struct vm_map module_map_store;
267extern struct vm_map *module_map; 267extern struct vm_map *module_map;
268extern struct bootspace bootspace; 268extern struct bootspace bootspace;
269extern struct slotspace slotspace; 269extern struct slotspace slotspace;
270 270
271vaddr_t vm_min_kernel_address __read_mostly = VM_MIN_KERNEL_ADDRESS_DEFAULT; 271vaddr_t vm_min_kernel_address __read_mostly = VM_MIN_KERNEL_ADDRESS_DEFAULT;
272vaddr_t vm_max_kernel_address __read_mostly = VM_MAX_KERNEL_ADDRESS_DEFAULT; 272vaddr_t vm_max_kernel_address __read_mostly = VM_MAX_KERNEL_ADDRESS_DEFAULT;
273pd_entry_t *pte_base __read_mostly; 273pd_entry_t *pte_base __read_mostly;
274 274
275struct vm_map *phys_map = NULL; 275struct vm_map *phys_map = NULL;
276 276
277extern paddr_t lowmem_rsvd; 277extern paddr_t lowmem_rsvd;
278extern paddr_t avail_start, avail_end; 278extern paddr_t avail_start, avail_end;
279#ifdef XENPV 279#ifdef XENPV
280extern paddr_t pmap_pa_start, pmap_pa_end; 280extern paddr_t pmap_pa_start, pmap_pa_end;
281#endif 281#endif
282 282
283#ifndef XENPV 283#ifndef XENPV
284void (*delay_func)(unsigned int) = i8254_delay; 284void (*delay_func)(unsigned int) = i8254_delay;
285void (*initclock_func)(void) = i8254_initclocks; 285void (*initclock_func)(void) = i8254_initclocks;
286#else /* XENPV */ 286#else /* XENPV */
287void (*delay_func)(unsigned int) = xen_delay; 287void (*delay_func)(unsigned int) = xen_delay;
288void (*initclock_func)(void) = xen_initclocks; 288void (*initclock_func)(void) = xen_initclocks;
289#endif 289#endif
290 290
291struct nmistore { 291struct nmistore {
292 uint64_t cr3; 292 uint64_t cr3;
293 uint64_t scratch; 293 uint64_t scratch;
294} __packed; 294} __packed;
295 295
296/* 296/*
297 * Size of memory segments, before any memory is stolen. 297 * Size of memory segments, before any memory is stolen.
298 */ 298 */
299phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX]; 299phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX];
300int mem_cluster_cnt; 300int mem_cluster_cnt;
301 301
302int cpu_dump(void); 302int cpu_dump(void);
303int cpu_dumpsize(void); 303int cpu_dumpsize(void);
304u_long cpu_dump_mempagecnt(void); 304u_long cpu_dump_mempagecnt(void);
305void dodumpsys(void); 305void dodumpsys(void);
306void dumpsys(void); 306void dumpsys(void);
307 307
308static void x86_64_proc0_pcb_ldt_init(void); 308static void x86_64_proc0_pcb_ldt_init(void);
309 309
310extern int time_adjusted; /* XXX no common header */ 310extern int time_adjusted; /* XXX no common header */
311 311
312void dump_misc_init(void); 312void dump_misc_init(void);
313void dump_seg_prep(void); 313void dump_seg_prep(void);
314int dump_seg_iter(int (*)(paddr_t, paddr_t)); 314int dump_seg_iter(int (*)(paddr_t, paddr_t));
315 315
316#ifndef NO_SPARSE_DUMP 316#ifndef NO_SPARSE_DUMP
317void sparse_dump_reset(void); 317void sparse_dump_reset(void);
318void sparse_dump_mark(void); 318void sparse_dump_mark(void);
319void cpu_dump_prep_sparse(void); 319void cpu_dump_prep_sparse(void);
320#endif 320#endif
321 321
322void dump_header_start(void); 322void dump_header_start(void);
323int dump_header_flush(void); 323int dump_header_flush(void);
324int dump_header_addbytes(const void*, size_t); 324int dump_header_addbytes(const void*, size_t);
325int dump_header_addseg(paddr_t, paddr_t); 325int dump_header_addseg(paddr_t, paddr_t);
326int dump_header_finish(void); 326int dump_header_finish(void);
327 327
328int dump_seg_count_range(paddr_t, paddr_t); 328int dump_seg_count_range(paddr_t, paddr_t);
329int dumpsys_seg(paddr_t, paddr_t); 329int dumpsys_seg(paddr_t, paddr_t);
330 330
331void init_bootspace(void); 331void init_bootspace(void);
332void init_slotspace(void); 332void init_slotspace(void);
333void init_x86_64(paddr_t); 333void init_x86_64(paddr_t);
334 334
335/* 335/*
336 * Machine-dependent startup code 336 * Machine-dependent startup code
337 */ 337 */
338void 338void
339cpu_startup(void) 339cpu_startup(void)
340{ 340{
341 int x, y; 341 int x, y;
342 vaddr_t minaddr, maxaddr; 342 vaddr_t minaddr, maxaddr;
343 psize_t sz; 343 psize_t sz;
344 344
345 /* 345 /*
346 * For console drivers that require uvm and pmap to be initialized, 346 * For console drivers that require uvm and pmap to be initialized,
347 * we'll give them one more chance here... 347 * we'll give them one more chance here...
348 */ 348 */
349 consinit(); 349 consinit();
350 350
351 /* 351 /*
352 * Initialize error message buffer (et end of core). 352 * Initialize error message buffer (et end of core).
353 */ 353 */
354 if (msgbuf_p_cnt == 0) 354 if (msgbuf_p_cnt == 0)
355 panic("msgbuf paddr map has not been set up"); 355 panic("msgbuf paddr map has not been set up");
356 for (x = 0, sz = 0; x < msgbuf_p_cnt; sz += msgbuf_p_seg[x++].sz) 356 for (x = 0, sz = 0; x < msgbuf_p_cnt; sz += msgbuf_p_seg[x++].sz)
357 continue; 357 continue;
358 358
359 msgbuf_vaddr = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_VAONLY); 359 msgbuf_vaddr = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_VAONLY);
360 if (msgbuf_vaddr == 0) 360 if (msgbuf_vaddr == 0)
361 panic("failed to valloc msgbuf_vaddr"); 361 panic("failed to valloc msgbuf_vaddr");
362 362
363 for (y = 0, sz = 0; y < msgbuf_p_cnt; y++) { 363 for (y = 0, sz = 0; y < msgbuf_p_cnt; y++) {
364 for (x = 0; x < btoc(msgbuf_p_seg[y].sz); x++, sz += PAGE_SIZE) 364 for (x = 0; x < btoc(msgbuf_p_seg[y].sz); x++, sz += PAGE_SIZE)
365 pmap_kenter_pa((vaddr_t)msgbuf_vaddr + sz, 365 pmap_kenter_pa((vaddr_t)msgbuf_vaddr + sz,
366 msgbuf_p_seg[y].paddr + x * PAGE_SIZE, 366 msgbuf_p_seg[y].paddr + x * PAGE_SIZE,
367 VM_PROT_READ|VM_PROT_WRITE, 0); 367 VM_PROT_READ|VM_PROT_WRITE, 0);
368 } 368 }
369 369
370 pmap_update(pmap_kernel()); 370 pmap_update(pmap_kernel());
371 371
372 initmsgbuf((void *)msgbuf_vaddr, round_page(sz)); 372 initmsgbuf((void *)msgbuf_vaddr, round_page(sz));
373 373
374 minaddr = 0; 374 minaddr = 0;
375 375
376 /* 376 /*
377 * Allocate a submap for physio. 377 * Allocate a submap for physio.
378 */ 378 */
379 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 379 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
380 VM_PHYS_SIZE, 0, false, NULL); 380 VM_PHYS_SIZE, 0, false, NULL);
381 381
382 /* 382 /*
383 * Create the module map. 383 * Create the module map.
384 * 384 *
385 * The kernel uses RIP-relative addressing with a maximum offset of 385 * The kernel uses RIP-relative addressing with a maximum offset of
386 * 2GB. Because of that, we can't put the kernel modules in kernel_map 386 * 2GB. Because of that, we can't put the kernel modules in kernel_map
387 * (like i386 does), since kernel_map is too far away in memory from 387 * (like i386 does), since kernel_map is too far away in memory from
388 * the kernel sections. So we have to create a special module_map. 388 * the kernel sections. So we have to create a special module_map.
389 * 389 *
390 * The module map is taken as what is left of the bootstrap memory 390 * The module map is taken as what is left of the bootstrap memory
391 * created in locore/prekern. 391 * created in locore/prekern.
392 */ 392 */
393 uvm_map_setup(&module_map_store, bootspace.smodule, 393 uvm_map_setup(&module_map_store, bootspace.smodule,
394 bootspace.emodule, 0); 394 bootspace.emodule, 0);
395 module_map_store.pmap = pmap_kernel(); 395 module_map_store.pmap = pmap_kernel();
396 module_map = &module_map_store; 396 module_map = &module_map_store;
397 397
398 /* Say hello. */ 398 /* Say hello. */
399 banner(); 399 banner();
400 400
401#if NISA > 0 || NPCI > 0 401#if NISA > 0 || NPCI > 0
402 /* Safe for i/o port / memory space allocation to use malloc now. */ 402 /* Safe for i/o port / memory space allocation to use malloc now. */
403 x86_bus_space_mallocok(); 403 x86_bus_space_mallocok();
404#endif 404#endif
405 405
406#ifdef __HAVE_PCPU_AREA 406#ifdef __HAVE_PCPU_AREA
407 cpu_pcpuarea_init(&cpu_info_primary); 407 cpu_pcpuarea_init(&cpu_info_primary);
408#endif 408#endif
409 gdt_init(); 409 gdt_init();
410 x86_64_proc0_pcb_ldt_init(); 410 x86_64_proc0_pcb_ldt_init();
411 411
412 cpu_init_tss(&cpu_info_primary); 412 cpu_init_tss(&cpu_info_primary);
413#if !defined(XENPV) 413#if !defined(XENPV)
414 ltr(cpu_info_primary.ci_tss_sel); 414 ltr(cpu_info_primary.ci_tss_sel);
415#endif 415#endif
416 416
417 x86_startup(); 417 x86_startup();
418} 418}
419 419
420#ifdef XENPV 420#ifdef XENPV
421/* used in assembly */ 421/* used in assembly */
422void hypervisor_callback(void); 422void hypervisor_callback(void);
423void failsafe_callback(void); 423void failsafe_callback(void);
424void x86_64_switch_context(struct pcb *); 424void x86_64_switch_context(struct pcb *);
425void x86_64_tls_switch(struct lwp *); 425void x86_64_tls_switch(struct lwp *);
426 426
427void 427void
428x86_64_switch_context(struct pcb *new) 428x86_64_switch_context(struct pcb *new)
429{ 429{
430 HYPERVISOR_stack_switch(GSEL(GDATA_SEL, SEL_KPL), new->pcb_rsp0); 430 HYPERVISOR_stack_switch(GSEL(GDATA_SEL, SEL_KPL), new->pcb_rsp0);
431 struct physdev_op physop; 431 struct physdev_op physop;
432 physop.cmd = PHYSDEVOP_SET_IOPL; 432 physop.cmd = PHYSDEVOP_SET_IOPL;
433 physop.u.set_iopl.iopl = new->pcb_iopl; 433 physop.u.set_iopl.iopl = new->pcb_iopl;
434 HYPERVISOR_physdev_op(&physop); 434 HYPERVISOR_physdev_op(&physop);
435} 435}
436 436
437void 437void
438x86_64_tls_switch(struct lwp *l) 438x86_64_tls_switch(struct lwp *l)
439{ 439{
440 struct cpu_info *ci = curcpu(); 440 struct cpu_info *ci = curcpu();
441 struct pcb *pcb = lwp_getpcb(l); 441 struct pcb *pcb = lwp_getpcb(l);
442 struct trapframe *tf = l->l_md.md_regs; 442 struct trapframe *tf = l->l_md.md_regs;
443 uint64_t zero = 0; 443 uint64_t zero = 0;
444 444
445 /* 445 /*
446 * Raise the IPL to IPL_HIGH. XXX Still needed? 446 * Raise the IPL to IPL_HIGH. XXX Still needed?
447 */ 447 */
448 (void)splhigh(); 448 (void)splhigh();
449 449
450 /* Update segment registers */ 450 /* Update segment registers */
451 if (pcb->pcb_flags & PCB_COMPAT32) { 451 if (pcb->pcb_flags & PCB_COMPAT32) {
452 update_descriptor(&ci->ci_gdt[GUFS_SEL], &pcb->pcb_fs); 452 update_descriptor(&ci->ci_gdt[GUFS_SEL], &pcb->pcb_fs);
453 update_descriptor(&ci->ci_gdt[GUGS_SEL], &pcb->pcb_gs); 453 update_descriptor(&ci->ci_gdt[GUGS_SEL], &pcb->pcb_gs);
454 setds(GSEL(GUDATA32_SEL, SEL_UPL)); 454 setds(GSEL(GUDATA32_SEL, SEL_UPL));
455 setes(GSEL(GUDATA32_SEL, SEL_UPL)); 455 setes(GSEL(GUDATA32_SEL, SEL_UPL));
456 setfs(GSEL(GUDATA32_SEL, SEL_UPL)); 456 setfs(GSEL(GUDATA32_SEL, SEL_UPL));
457 HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, tf->tf_gs); 457 HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, tf->tf_gs);
458 } else { 458 } else {
459 update_descriptor(&ci->ci_gdt[GUFS_SEL], &zero); 459 update_descriptor(&ci->ci_gdt[GUFS_SEL], &zero);
460 update_descriptor(&ci->ci_gdt[GUGS_SEL], &zero); 460 update_descriptor(&ci->ci_gdt[GUGS_SEL], &zero);
461 setds(GSEL(GUDATA_SEL, SEL_UPL)); 461 setds(GSEL(GUDATA_SEL, SEL_UPL));
462 setes(GSEL(GUDATA_SEL, SEL_UPL)); 462 setes(GSEL(GUDATA_SEL, SEL_UPL));
463 setfs(0); 463 setfs(0);
464 HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, 0); 464 HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, 0);
465 HYPERVISOR_set_segment_base(SEGBASE_FS, pcb->pcb_fs); 465 HYPERVISOR_set_segment_base(SEGBASE_FS, pcb->pcb_fs);
466 HYPERVISOR_set_segment_base(SEGBASE_GS_USER, pcb->pcb_gs); 466 HYPERVISOR_set_segment_base(SEGBASE_GS_USER, pcb->pcb_gs);
467 } 467 }
468} 468}
469#endif /* XENPV */ 469#endif /* XENPV */
470 470
471/* 471/*
472 * Set up proc0's PCB and LDT. 472 * Set up proc0's PCB and LDT.
473 */ 473 */
474static void 474static void
475x86_64_proc0_pcb_ldt_init(void) 475x86_64_proc0_pcb_ldt_init(void)
476{ 476{
477 struct lwp *l = &lwp0; 477 struct lwp *l = &lwp0;
478 struct pcb *pcb = lwp_getpcb(l); 478 struct pcb *pcb = lwp_getpcb(l);
479 479
480 pcb->pcb_flags = 0; 480 pcb->pcb_flags = 0;
481 pcb->pcb_fs = 0; 481 pcb->pcb_fs = 0;
482 pcb->pcb_gs = 0; 482 pcb->pcb_gs = 0;
483 pcb->pcb_rsp0 = (uvm_lwp_getuarea(l) + USPACE - 16) & ~0xf; 483 pcb->pcb_rsp0 = (uvm_lwp_getuarea(l) + USPACE - 16) & ~0xf;
484 pcb->pcb_iopl = IOPL_KPL; 484 pcb->pcb_iopl = IOPL_KPL;
485 pcb->pcb_dbregs = NULL; 485 pcb->pcb_dbregs = NULL;
486 pcb->pcb_cr0 = rcr0() & ~CR0_TS; 486 pcb->pcb_cr0 = rcr0() & ~CR0_TS;
487 l->l_md.md_regs = (struct trapframe *)pcb->pcb_rsp0 - 1; 487 l->l_md.md_regs = (struct trapframe *)pcb->pcb_rsp0 - 1;
488 488
489#if !defined(XENPV) 489#if !defined(XENPV)
490 lldt(GSYSSEL(GLDT_SEL, SEL_KPL)); 490 lldt(GSYSSEL(GLDT_SEL, SEL_KPL));
491#else 491#else
492 struct physdev_op physop; 492 struct physdev_op physop;
493 xen_set_ldt((vaddr_t)ldtstore, LDT_SIZE >> 3); 493 xen_set_ldt((vaddr_t)ldtstore, LDT_SIZE >> 3);
494 /* Reset TS bit and set kernel stack for interrupt handlers */ 494 /* Reset TS bit and set kernel stack for interrupt handlers */
495 HYPERVISOR_fpu_taskswitch(1); 495 HYPERVISOR_fpu_taskswitch(1);
496 HYPERVISOR_stack_switch(GSEL(GDATA_SEL, SEL_KPL), pcb->pcb_rsp0); 496 HYPERVISOR_stack_switch(GSEL(GDATA_SEL, SEL_KPL), pcb->pcb_rsp0);
497 physop.cmd = PHYSDEVOP_SET_IOPL; 497 physop.cmd = PHYSDEVOP_SET_IOPL;
498 physop.u.set_iopl.iopl = pcb->pcb_iopl; 498 physop.u.set_iopl.iopl = pcb->pcb_iopl;
499 HYPERVISOR_physdev_op(&physop); 499 HYPERVISOR_physdev_op(&physop);
500#endif 500#endif
501} 501}
502 502
503/* 503/*
504 * Set up TSS and I/O bitmap. 504 * Set up TSS and I/O bitmap.
505 */ 505 */
506void 506void
507cpu_init_tss(struct cpu_info *ci) 507cpu_init_tss(struct cpu_info *ci)
508{ 508{
509#ifdef __HAVE_PCPU_AREA 509#ifdef __HAVE_PCPU_AREA
510 const cpuid_t cid = cpu_index(ci); 510 const cpuid_t cid = cpu_index(ci);
511#endif 511#endif
512 struct cpu_tss *cputss; 512 struct cpu_tss *cputss;
513 struct nmistore *store; 513 struct nmistore *store;
514 uintptr_t p; 514 uintptr_t p;
515 515
516#ifdef __HAVE_PCPU_AREA 516#ifdef __HAVE_PCPU_AREA
517 cputss = (struct cpu_tss *)&pcpuarea->ent[cid].tss; 517 cputss = (struct cpu_tss *)&pcpuarea->ent[cid].tss;
518#else 518#else
519 cputss = (struct cpu_tss *)uvm_km_alloc(kernel_map, 519 cputss = (struct cpu_tss *)uvm_km_alloc(kernel_map,
520 sizeof(struct cpu_tss), 0, UVM_KMF_WIRED|UVM_KMF_ZERO); 520 sizeof(struct cpu_tss), 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
521#endif 521#endif
522 522
523 cputss->tss.tss_iobase = IOMAP_INVALOFF << 16; 523 cputss->tss.tss_iobase = IOMAP_INVALOFF << 16;
524 524
525 /* DDB stack */ 525 /* DDB stack */
526#ifdef __HAVE_PCPU_AREA 526#ifdef __HAVE_PCPU_AREA
527 p = (vaddr_t)&pcpuarea->ent[cid].ist0; 527 p = (vaddr_t)&pcpuarea->ent[cid].ist0;
528#else 528#else
529 p = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED|UVM_KMF_ZERO); 529 p = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
530#endif 530#endif
531 cputss->tss.tss_ist[0] = p + PAGE_SIZE - 16; 531 cputss->tss.tss_ist[0] = p + PAGE_SIZE - 16;
532 532
533 /* double fault */ 533 /* double fault */
534#ifdef __HAVE_PCPU_AREA 534#ifdef __HAVE_PCPU_AREA
535 p = (vaddr_t)&pcpuarea->ent[cid].ist1; 535 p = (vaddr_t)&pcpuarea->ent[cid].ist1;
536#else 536#else
537 p = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED|UVM_KMF_ZERO); 537 p = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
538#endif 538#endif
539 cputss->tss.tss_ist[1] = p + PAGE_SIZE - 16; 539 cputss->tss.tss_ist[1] = p + PAGE_SIZE - 16;
540 540
541 /* NMI - store a structure at the top of the stack */ 541 /* NMI - store a structure at the top of the stack */
542#ifdef __HAVE_PCPU_AREA 542#ifdef __HAVE_PCPU_AREA
543 p = (vaddr_t)&pcpuarea->ent[cid].ist2; 543 p = (vaddr_t)&pcpuarea->ent[cid].ist2;
544#else 544#else
545 p = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED|UVM_KMF_ZERO); 545 p = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
546#endif 546#endif
547 cputss->tss.tss_ist[2] = p + PAGE_SIZE - sizeof(struct nmistore); 547 cputss->tss.tss_ist[2] = p + PAGE_SIZE - sizeof(struct nmistore);
548 store = (struct nmistore *)(p + PAGE_SIZE - sizeof(struct nmistore)); 548 store = (struct nmistore *)(p + PAGE_SIZE - sizeof(struct nmistore));
549 store->cr3 = pmap_pdirpa(pmap_kernel(), 0); 549 store->cr3 = pmap_pdirpa(pmap_kernel(), 0);
550 550
551 /* DB */ 551 /* DB */
552#ifdef __HAVE_PCPU_AREA 552#ifdef __HAVE_PCPU_AREA
553 p = (vaddr_t)&pcpuarea->ent[cid].ist3; 553 p = (vaddr_t)&pcpuarea->ent[cid].ist3;
554#else 554#else
555 p = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED|UVM_KMF_ZERO); 555 p = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
556#endif 556#endif
557 cputss->tss.tss_ist[3] = p + PAGE_SIZE - 16; 557 cputss->tss.tss_ist[3] = p + PAGE_SIZE - 16;
558 558
559 ci->ci_tss = cputss; 559 ci->ci_tss = cputss;
560 ci->ci_tss_sel = tss_alloc(&cputss->tss); 560 ci->ci_tss_sel = tss_alloc(&cputss->tss);
561} 561}
562 562
563void 563void
564buildcontext(struct lwp *l, void *catcher, void *f) 564buildcontext(struct lwp *l, void *catcher, void *f)
565{ 565{
566 struct trapframe *tf = l->l_md.md_regs; 566 struct trapframe *tf = l->l_md.md_regs;
567 567
568 tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL); 568 tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL);
569 tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL); 569 tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL);
570 tf->tf_fs = GSEL(GUDATA_SEL, SEL_UPL); 570 tf->tf_fs = GSEL(GUDATA_SEL, SEL_UPL);
571 tf->tf_gs = GSEL(GUDATA_SEL, SEL_UPL); 571 tf->tf_gs = GSEL(GUDATA_SEL, SEL_UPL);
572 572
573 tf->tf_rip = (uint64_t)catcher; 573 tf->tf_rip = (uint64_t)catcher;
574 tf->tf_cs = GSEL(GUCODE_SEL, SEL_UPL); 574 tf->tf_cs = GSEL(GUCODE_SEL, SEL_UPL);
575 tf->tf_rflags &= ~PSL_CLEARSIG; 575 tf->tf_rflags &= ~PSL_CLEARSIG;
576 tf->tf_rsp = (uint64_t)f; 576 tf->tf_rsp = (uint64_t)f;
577 tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL); 577 tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL);
578 578
579 /* Ensure FP state is sane */ 579 /* Ensure FP state is sane */
580 fpu_sigreset(l); 580 fpu_sigreset(l);
581} 581}
582 582
583void 583void
584sendsig_sigcontext(const ksiginfo_t *ksi, const sigset_t *mask) 584sendsig_sigcontext(const ksiginfo_t *ksi, const sigset_t *mask)
585{ 585{
586 586
587 printf("sendsig_sigcontext: illegal\n"); 587 printf("sendsig_sigcontext: illegal\n");
588 sigexit(curlwp, SIGILL); 588 sigexit(curlwp, SIGILL);
589} 589}
590 590
591void 591void
592sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask) 592sendsig_siginfo(const ksiginfo_t *ksi, const sigset_t *mask)
593{ 593{
594 struct lwp *l = curlwp; 594 struct lwp *l = curlwp;
595 struct proc *p = l->l_proc; 595 struct proc *p = l->l_proc;
596 struct sigacts *ps = p->p_sigacts; 596 struct sigacts *ps = p->p_sigacts;
597 int onstack, error; 597 int onstack, error;
598 int sig = ksi->ksi_signo; 598 int sig = ksi->ksi_signo;
599 struct sigframe_siginfo *fp, frame; 599 struct sigframe_siginfo *fp, frame;
600 sig_t catcher = SIGACTION(p, sig).sa_handler; 600 sig_t catcher = SIGACTION(p, sig).sa_handler;
601 struct trapframe *tf = l->l_md.md_regs; 601 struct trapframe *tf = l->l_md.md_regs;
602 char *sp; 602 char *sp;
603 603
604 KASSERT(mutex_owned(p->p_lock)); 604 KASSERT(mutex_owned(p->p_lock));
605 605
606 /* Do we need to jump onto the signal stack? */ 606 /* Do we need to jump onto the signal stack? */
607 onstack = 607 onstack =
608 (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 && 608 (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 &&
609 (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0; 609 (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0;
610 610
611 /* Allocate space for the signal handler context. */ 611 /* Allocate space for the signal handler context. */
612 if (onstack) 612 if (onstack)
613 sp = ((char *)l->l_sigstk.ss_sp + l->l_sigstk.ss_size); 613 sp = ((char *)l->l_sigstk.ss_sp + l->l_sigstk.ss_size);
614 else 614 else
615 /* AMD64 ABI 128-bytes "red zone". */ 615 /* AMD64 ABI 128-bytes "red zone". */
616 sp = (char *)tf->tf_rsp - 128; 616 sp = (char *)tf->tf_rsp - 128;
617 617
618 sp -= sizeof(struct sigframe_siginfo); 618 sp -= sizeof(struct sigframe_siginfo);
619 /* Round down the stackpointer to a multiple of 16 for the ABI. */ 619 /* Round down the stackpointer to a multiple of 16 for the ABI. */
620 fp = (struct sigframe_siginfo *)(((unsigned long)sp & ~15) - 8); 620 fp = (struct sigframe_siginfo *)(((unsigned long)sp & ~15) - 8);
621 621
622 memset(&frame, 0, sizeof(frame)); 622 memset(&frame, 0, sizeof(frame));
623 frame.sf_ra = (uint64_t)ps->sa_sigdesc[sig].sd_tramp; 623 frame.sf_ra = (uint64_t)ps->sa_sigdesc[sig].sd_tramp;
624 frame.sf_si._info = ksi->ksi_info; 624 frame.sf_si._info = ksi->ksi_info;
625 frame.sf_uc.uc_flags = _UC_SIGMASK; 625 frame.sf_uc.uc_flags = _UC_SIGMASK;
626 frame.sf_uc.uc_sigmask = *mask; 626 frame.sf_uc.uc_sigmask = *mask;
627 frame.sf_uc.uc_link = l->l_ctxlink; 627 frame.sf_uc.uc_link = l->l_ctxlink;
628 frame.sf_uc.uc_flags |= (l->l_sigstk.ss_flags & SS_ONSTACK) 628 frame.sf_uc.uc_flags |= (l->l_sigstk.ss_flags & SS_ONSTACK)
629 ? _UC_SETSTACK : _UC_CLRSTACK; 629 ? _UC_SETSTACK : _UC_CLRSTACK;
630 sendsig_reset(l, sig); 630 sendsig_reset(l, sig);
631 631
632 mutex_exit(p->p_lock); 632 mutex_exit(p->p_lock);
633 cpu_getmcontext(l, &frame.sf_uc.uc_mcontext, &frame.sf_uc.uc_flags); 633 cpu_getmcontext(l, &frame.sf_uc.uc_mcontext, &frame.sf_uc.uc_flags);
634 /* Copyout all the fp regs, the signal handler might expect them. */ 634 /* Copyout all the fp regs, the signal handler might expect them. */
635 error = copyout(&frame, fp, sizeof frame); 635 error = copyout(&frame, fp, sizeof frame);
636 mutex_enter(p->p_lock); 636 mutex_enter(p->p_lock);
637 637
638 if (error != 0) { 638 if (error != 0) {
639 /* 639 /*
640 * Process has trashed its stack; give it an illegal 640 * Process has trashed its stack; give it an illegal
641 * instruction to halt it in its tracks. 641 * instruction to halt it in its tracks.
642 */ 642 */
643 sigexit(l, SIGILL); 643 sigexit(l, SIGILL);
644 /* NOTREACHED */ 644 /* NOTREACHED */
645 } 645 }
646 646
647 buildcontext(l, catcher, fp); 647 buildcontext(l, catcher, fp);
648 648
649 tf->tf_rdi = sig; 649 tf->tf_rdi = sig;
650 tf->tf_rsi = (uint64_t)&fp->sf_si; 650 tf->tf_rsi = (uint64_t)&fp->sf_si;
651 tf->tf_rdx = tf->tf_r15 = (uint64_t)&fp->sf_uc; 651 tf->tf_rdx = tf->tf_r15 = (uint64_t)&fp->sf_uc;
652 652
653 /* Remember that we're now on the signal stack. */ 653 /* Remember that we're now on the signal stack. */
654 if (onstack) 654 if (onstack)
655 l->l_sigstk.ss_flags |= SS_ONSTACK; 655 l->l_sigstk.ss_flags |= SS_ONSTACK;
656 656
657 if ((vaddr_t)catcher >= VM_MAXUSER_ADDRESS) { 657 if ((vaddr_t)catcher >= VM_MAXUSER_ADDRESS) {
658 /* 658 /*
659 * process has given an invalid address for the 659 * process has given an invalid address for the
660 * handler. Stop it, but do not do it before so 660 * handler. Stop it, but do not do it before so
661 * we can return the right info to userland (or in core dump) 661 * we can return the right info to userland (or in core dump)
662 */ 662 */
663 sigexit(l, SIGILL); 663 sigexit(l, SIGILL);
664 /* NOTREACHED */ 664 /* NOTREACHED */
665 } 665 }
666} 666}
667 667
668struct pcb dumppcb; 668struct pcb dumppcb;
669 669
670void 670void
671cpu_reboot(int howto, char *bootstr) 671cpu_reboot(int howto, char *bootstr)
672{ 672{
673 static bool syncdone = false; 673 static bool syncdone = false;
674 int s = IPL_NONE; 674 int s = IPL_NONE;
675 __USE(s); /* ugly otherwise */ 675 __USE(s); /* ugly otherwise */
676 676
677 if (cold) { 677 if (cold) {
678 howto |= RB_HALT; 678 howto |= RB_HALT;
679 goto haltsys; 679 goto haltsys;
680 } 680 }
681 681
682 boothowto = howto; 682 boothowto = howto;
683 683
684 /* i386 maybe_dump() */ 684 /* i386 maybe_dump() */
685 685
686 /* 686 /*
687 * If we've panic'd, don't make the situation potentially 687 * If we've panic'd, don't make the situation potentially
688 * worse by syncing or unmounting the file systems. 688 * worse by syncing or unmounting the file systems.
689 */ 689 */
690 if ((howto & RB_NOSYNC) == 0 && panicstr == NULL) { 690 if ((howto & RB_NOSYNC) == 0 && panicstr == NULL) {
691 if (!syncdone) { 691 if (!syncdone) {
692 syncdone = true; 692 syncdone = true;
693 /* XXX used to force unmount as well, here */ 693 /* XXX used to force unmount as well, here */
694 vfs_sync_all(curlwp); 694 vfs_sync_all(curlwp);
695 /* 695 /*
696 * If we've been adjusting the clock, the todr 696 * If we've been adjusting the clock, the todr
697 * will be out of synch; adjust it now. 697 * will be out of synch; adjust it now.
698 * 698 *
699 * XXX used to do this after unmounting all 699 * XXX used to do this after unmounting all
700 * filesystems with vfs_shutdown(). 700 * filesystems with vfs_shutdown().
701 */ 701 */
702 if (time_adjusted != 0) 702 if (time_adjusted != 0)
703 resettodr(); 703 resettodr();
704 } 704 }
705 705
706 while (vfs_unmountall1(curlwp, false, false) || 706 while (vfs_unmountall1(curlwp, false, false) ||
707 config_detach_all(boothowto) || 707 config_detach_all(boothowto) ||
708 vfs_unmount_forceone(curlwp)) 708 vfs_unmount_forceone(curlwp))
709 ; /* do nothing */ 709 ; /* do nothing */
710 } else 710 } else
711 suspendsched(); 711 suspendsched();
712 712
713 pmf_system_shutdown(boothowto); 713 pmf_system_shutdown(boothowto);
714 714
715 /* Disable interrupts. */ 715 /* Disable interrupts. */
716 s = splhigh(); 716 s = splhigh();
717 717
718 /* Do a dump if requested. */ 718 /* Do a dump if requested. */
719 if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP) 719 if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
720 dumpsys(); 720 dumpsys();
721 721
722haltsys: 722haltsys:
723 doshutdownhooks(); 723 doshutdownhooks();
724 724
725 if ((howto & RB_POWERDOWN) == RB_POWERDOWN) { 725 if ((howto & RB_POWERDOWN) == RB_POWERDOWN) {
726#if NACPICA > 0 726#if NACPICA > 0
727 if (s != IPL_NONE) 727 if (s != IPL_NONE)
728 splx(s); 728 splx(s);
729 729
730 acpi_enter_sleep_state(ACPI_STATE_S5); 730 acpi_enter_sleep_state(ACPI_STATE_S5);
731#endif 731#endif
732#ifdef XENPV 732#ifdef XENPV
733 HYPERVISOR_shutdown(); 733 HYPERVISOR_shutdown();
734#endif /* XENPV */ 734#endif /* XENPV */
735 } 735 }
736 736
737 cpu_broadcast_halt(); 737 cpu_broadcast_halt();
738 738
739 if (howto & RB_HALT) { 739 if (howto & RB_HALT) {
740#if NACPICA > 0 740#if NACPICA > 0
741 acpi_disable(); 741 acpi_disable();
742#endif 742#endif
743 743
744 printf("\n"); 744 printf("\n");
745 printf("The operating system has halted.\n"); 745 printf("The operating system has halted.\n");
746 printf("Please press any key to reboot.\n\n"); 746 printf("Please press any key to reboot.\n\n");
747 cnpollc(1); /* for proper keyboard command handling */ 747 cnpollc(1); /* for proper keyboard command handling */
748 if (cngetc() == 0) { 748 if (cngetc() == 0) {
749 /* no console attached, so just hlt */ 749 /* no console attached, so just hlt */
750 printf("No keyboard - cannot reboot after all.\n"); 750 printf("No keyboard - cannot reboot after all.\n");
751 for(;;) { 751 for(;;) {
752 x86_hlt(); 752 x86_hlt();
753 } 753 }
754 } 754 }
755 cnpollc(0); 755 cnpollc(0);
756 } 756 }
757 757
758 printf("rebooting...\n"); 758 printf("rebooting...\n");
759 if (cpureset_delay > 0) 759 if (cpureset_delay > 0)
760 delay(cpureset_delay * 1000); 760 delay(cpureset_delay * 1000);
761 cpu_reset(); 761 cpu_reset();
762 for(;;) ; 762 for(;;) ;
763 /*NOTREACHED*/ 763 /*NOTREACHED*/
764} 764}
765 765
766/* 766/*
767 * XXXfvdl share dumpcode. 767 * XXXfvdl share dumpcode.
768 */ 768 */
769 769
770/* 770/*
771 * Perform assorted dump-related initialization tasks. Assumes that 771 * Perform assorted dump-related initialization tasks. Assumes that
772 * the maximum physical memory address will not increase afterwards. 772 * the maximum physical memory address will not increase afterwards.
773 */ 773 */
774void 774void
775dump_misc_init(void) 775dump_misc_init(void)
776{ 776{
777#ifndef NO_SPARSE_DUMP 777#ifndef NO_SPARSE_DUMP
778 int i; 778 int i;
779#endif 779#endif
780 780
781 if (dump_headerbuf != NULL) 781 if (dump_headerbuf != NULL)
782 return; /* already called */ 782 return; /* already called */
783 783
784#ifndef NO_SPARSE_DUMP 784#ifndef NO_SPARSE_DUMP
785 for (i = 0; i < mem_cluster_cnt; ++i) { 785 for (i = 0; i < mem_cluster_cnt; ++i) {
786 paddr_t top = mem_clusters[i].start + mem_clusters[i].size; 786 paddr_t top = mem_clusters[i].start + mem_clusters[i].size;
787 if (max_paddr < top) 787 if (max_paddr < top)
788 max_paddr = top; 788 max_paddr = top;
789 } 789 }
790#ifdef DEBUG 790#ifdef DEBUG
791 printf("dump_misc_init: max_paddr = 0x%lx\n", 791 printf("dump_misc_init: max_paddr = 0x%lx\n",
792 (unsigned long)max_paddr); 792 (unsigned long)max_paddr);
793#endif 793#endif
794 if (max_paddr == 0) { 794 if (max_paddr == 0) {
795 printf("Your machine does not initialize mem_clusters; " 795 printf("Your machine does not initialize mem_clusters; "
796 "sparse_dumps disabled\n"); 796 "sparse_dumps disabled\n");
797 sparse_dump = 0; 797 sparse_dump = 0;
798 } else { 798 } else {
799 sparse_dump_physmap = (void *)uvm_km_alloc(kernel_map, 799 sparse_dump_physmap = (void *)uvm_km_alloc(kernel_map,
800 roundup(max_paddr / (PAGE_SIZE * NBBY), PAGE_SIZE), 800 roundup(max_paddr / (PAGE_SIZE * NBBY), PAGE_SIZE),
801 PAGE_SIZE, UVM_KMF_WIRED|UVM_KMF_ZERO); 801 PAGE_SIZE, UVM_KMF_WIRED|UVM_KMF_ZERO);
802 } 802 }
803#endif 803#endif
804 dump_headerbuf = (void *)uvm_km_alloc(kernel_map, 804 dump_headerbuf = (void *)uvm_km_alloc(kernel_map,
805 dump_headerbuf_size, 805 dump_headerbuf_size,
806 PAGE_SIZE, UVM_KMF_WIRED|UVM_KMF_ZERO); 806 PAGE_SIZE, UVM_KMF_WIRED|UVM_KMF_ZERO);
807 /* XXXjld should check for failure here, disable dumps if so. */ 807 /* XXXjld should check for failure here, disable dumps if so. */
808} 808}
809 809
810#ifndef NO_SPARSE_DUMP 810#ifndef NO_SPARSE_DUMP
811/* 811/*
812 * Clear the set of pages to include in a sparse dump. 812 * Clear the set of pages to include in a sparse dump.
813 */ 813 */
814void 814void
815sparse_dump_reset(void) 815sparse_dump_reset(void)
816{ 816{
817 memset(sparse_dump_physmap, 0, 817 memset(sparse_dump_physmap, 0,
818 roundup(max_paddr / (PAGE_SIZE * NBBY), PAGE_SIZE)); 818 roundup(max_paddr / (PAGE_SIZE * NBBY), PAGE_SIZE));
819} 819}
820 820
821/* 821/*
822 * Include or exclude pages in a sparse dump. 822 * Include or exclude pages in a sparse dump.
823 */ 823 */
824void 824void
825sparse_dump_mark(void) 825sparse_dump_mark(void)
826{ 826{
827 paddr_t p, pstart, pend; 827 paddr_t p, pstart, pend;
828 struct vm_page *pg; 828 struct vm_page *pg;
829 int i; 829 int i;
830 uvm_physseg_t upm; 830 uvm_physseg_t upm;
831 831
832 /* 832 /*
833 * Mark all memory pages, then unmark pages that are uninteresting. 833 * Mark all memory pages, then unmark pages that are uninteresting.
834 * Dereferenceing pg->uobject might crash again if another CPU 834 * Dereferenceing pg->uobject might crash again if another CPU
835 * frees the object out from under us, but we can't lock anything 835 * frees the object out from under us, but we can't lock anything
836 * so it's a risk we have to take. 836 * so it's a risk we have to take.
837 */ 837 */
838 838
839 for (i = 0; i < mem_cluster_cnt; ++i) { 839 for (i = 0; i < mem_cluster_cnt; ++i) {
840 pstart = mem_clusters[i].start / PAGE_SIZE; 840 pstart = mem_clusters[i].start / PAGE_SIZE;
841 pend = pstart + mem_clusters[i].size / PAGE_SIZE; 841 pend = pstart + mem_clusters[i].size / PAGE_SIZE;
842 842
843 for (p = pstart; p < pend; p++) { 843 for (p = pstart; p < pend; p++) {
844 setbit(sparse_dump_physmap, p); 844 setbit(sparse_dump_physmap, p);
845 } 845 }
846 } 846 }
847 for (upm = uvm_physseg_get_first(); 847 for (upm = uvm_physseg_get_first();
848 uvm_physseg_valid_p(upm); 848 uvm_physseg_valid_p(upm);
849 upm = uvm_physseg_get_next(upm)) { 849 upm = uvm_physseg_get_next(upm)) {
850 paddr_t pfn; 850 paddr_t pfn;
851 851
852 /* 852 /*
853 * We assume that seg->start to seg->end are 853 * We assume that seg->start to seg->end are
854 * uvm_page_physload()ed 854 * uvm_page_physload()ed
855 */ 855 */
856 for (pfn = uvm_physseg_get_start(upm); 856 for (pfn = uvm_physseg_get_start(upm);
857 pfn < uvm_physseg_get_end(upm); 857 pfn < uvm_physseg_get_end(upm);
858 pfn++) { 858 pfn++) {
859 pg = PHYS_TO_VM_PAGE(ptoa(pfn)); 859 pg = PHYS_TO_VM_PAGE(ptoa(pfn));
860 860
861 if (pg->uanon || (pg->pqflags & PQ_FREE) || 861 if (pg->uanon || (pg->pqflags & PQ_FREE) ||
862 (pg->uobject && pg->uobject->pgops)) { 862 (pg->uobject && pg->uobject->pgops)) {
863 p = VM_PAGE_TO_PHYS(pg) / PAGE_SIZE; 863 p = VM_PAGE_TO_PHYS(pg) / PAGE_SIZE;
864 clrbit(sparse_dump_physmap, p); 864 clrbit(sparse_dump_physmap, p);
865 } 865 }
866 } 866 }
867 } 867 }
868} 868}
869 869
870/* 870/*
871 * Machine-dependently decides on the contents of a sparse dump, using 871 * Machine-dependently decides on the contents of a sparse dump, using
872 * the above. 872 * the above.
873 */ 873 */
874void 874void
875cpu_dump_prep_sparse(void) 875cpu_dump_prep_sparse(void)
876{ 876{
877 sparse_dump_reset(); 877 sparse_dump_reset();
878 /* XXX could the alternate recursive page table be skipped? */ 878 /* XXX could the alternate recursive page table be skipped? */
879 sparse_dump_mark(); 879 sparse_dump_mark();
880 /* Memory for I/O buffers could be unmarked here, for example. */ 880 /* Memory for I/O buffers could be unmarked here, for example. */
881 /* The kernel text could also be unmarked, but gdb would be upset. */ 881 /* The kernel text could also be unmarked, but gdb would be upset. */
882} 882}
883#endif 883#endif
884 884
885/* 885/*
886 * Abstractly iterate over the collection of memory segments to be 886 * Abstractly iterate over the collection of memory segments to be
887 * dumped; the callback lacks the customary environment-pointer 887 * dumped; the callback lacks the customary environment-pointer
888 * argument because none of the current users really need one. 888 * argument because none of the current users really need one.
889 * 889 *
890 * To be used only after dump_seg_prep is called to set things up. 890 * To be used only after dump_seg_prep is called to set things up.
891 */ 891 */
892int 892int
893dump_seg_iter(int (*callback)(paddr_t, paddr_t)) 893dump_seg_iter(int (*callback)(paddr_t, paddr_t))
894{ 894{
895 int error, i; 895 int error, i;
896 896
897#define CALLBACK(start,size) do { \ 897#define CALLBACK(start,size) do { \
898 error = callback(start,size); \ 898 error = callback(start,size); \
899 if (error) \ 899 if (error) \
900 return error; \ 900 return error; \
901} while(0) 901} while(0)
902 902
903 for (i = 0; i < mem_cluster_cnt; ++i) { 903 for (i = 0; i < mem_cluster_cnt; ++i) {
904#ifndef NO_SPARSE_DUMP 904#ifndef NO_SPARSE_DUMP
905 /* 905 /*
906 * The bitmap is scanned within each memory segment, 906 * The bitmap is scanned within each memory segment,
907 * rather than over its entire domain, in case any 907 * rather than over its entire domain, in case any
908 * pages outside of the memory proper have been mapped 908 * pages outside of the memory proper have been mapped
909 * into kva; they might be devices that wouldn't 909 * into kva; they might be devices that wouldn't
910 * appreciate being arbitrarily read, and including 910 * appreciate being arbitrarily read, and including
911 * them could also break the assumption that a sparse 911 * them could also break the assumption that a sparse
912 * dump will always be smaller than a full one. 912 * dump will always be smaller than a full one.
913 */ 913 */
914 if (sparse_dump && sparse_dump_physmap) { 914 if (sparse_dump && sparse_dump_physmap) {
915 paddr_t p, start, end; 915 paddr_t p, sp_start, sp_end;
916 int lastset; 916 int lastset;
917 917
918 start = mem_clusters[i].start; 918 sp_start = mem_clusters[i].start;
919 end = start + mem_clusters[i].size; 919 sp_end = sp_start + mem_clusters[i].size;
920 start = rounddown(start, PAGE_SIZE); /* unnecessary? */ 920 sp_start = rounddown(sp_start, PAGE_SIZE); /* unnecessary? */
921 lastset = 0; 921 lastset = 0;
922 for (p = start; p < end; p += PAGE_SIZE) { 922 for (p = sp_start; p < sp_end; p += PAGE_SIZE) {
923 int thisset = isset(sparse_dump_physmap, 923 int thisset = isset(sparse_dump_physmap,
924 p/PAGE_SIZE); 924 p/PAGE_SIZE);
925 925
926 if (!lastset && thisset) 926 if (!lastset && thisset)
927 start = p; 927 sp_start = p;
928 if (lastset && !thisset) 928 if (lastset && !thisset)
929 CALLBACK(start, p - start); 929 CALLBACK(sp_start, p - sp_start);
930 lastset = thisset; 930 lastset = thisset;
931 } 931 }
932 if (lastset) 932 if (lastset)
933 CALLBACK(start, p - start); 933 CALLBACK(sp_start, p - sp_start);
934 } else 934 } else
935#endif 935#endif
936 CALLBACK(mem_clusters[i].start, mem_clusters[i].size); 936 CALLBACK(mem_clusters[i].start, mem_clusters[i].size);
937 } 937 }
938 return 0; 938 return 0;
939#undef CALLBACK 939#undef CALLBACK
940} 940}
941 941
942/* 942/*
943 * Prepare for an impending core dump: decide what's being dumped and 943 * Prepare for an impending core dump: decide what's being dumped and
944 * how much space it will take up. 944 * how much space it will take up.
945 */ 945 */
946void 946void
947dump_seg_prep(void) 947dump_seg_prep(void)
948{ 948{
949#ifndef NO_SPARSE_DUMP 949#ifndef NO_SPARSE_DUMP
950 if (sparse_dump && sparse_dump_physmap) 950 if (sparse_dump && sparse_dump_physmap)
951 cpu_dump_prep_sparse(); 951 cpu_dump_prep_sparse();
952#endif 952#endif
953 953
954 dump_nmemsegs = 0; 954 dump_nmemsegs = 0;
955 dump_npages = 0; 955 dump_npages = 0;
956 dump_seg_iter(dump_seg_count_range); 956 dump_seg_iter(dump_seg_count_range);
957 957
958 dump_header_size = ALIGN(sizeof(kcore_seg_t)) + 958 dump_header_size = ALIGN(sizeof(kcore_seg_t)) +
959 ALIGN(sizeof(cpu_kcore_hdr_t)) + 959 ALIGN(sizeof(cpu_kcore_hdr_t)) +
960 ALIGN(dump_nmemsegs * sizeof(phys_ram_seg_t)); 960 ALIGN(dump_nmemsegs * sizeof(phys_ram_seg_t));
961 dump_header_size = roundup(dump_header_size, dbtob(1)); 961 dump_header_size = roundup(dump_header_size, dbtob(1));
962 962
963 /* 963 /*
964 * savecore(8) will read this to decide how many pages to 964 * savecore(8) will read this to decide how many pages to
965 * copy, and cpu_dumpconf has already used the pessimistic 965 * copy, and cpu_dumpconf has already used the pessimistic
966 * value to set dumplo, so it's time to tell the truth. 966 * value to set dumplo, so it's time to tell the truth.
967 */ 967 */
968 dumpsize = dump_npages; /* XXX could these just be one variable? */ 968 dumpsize = dump_npages; /* XXX could these just be one variable? */
969} 969}
970 970
971int 971int
972dump_seg_count_range(paddr_t start, paddr_t size) 972dump_seg_count_range(paddr_t start, paddr_t size)
973{ 973{
974 ++dump_nmemsegs; 974 ++dump_nmemsegs;
975 dump_npages += size / PAGE_SIZE; 975 dump_npages += size / PAGE_SIZE;
976 return 0; 976 return 0;
977} 977}
978 978
979/* 979/*
980 * A sparse dump's header may be rather large, due to the number of 980 * A sparse dump's header may be rather large, due to the number of
981 * "segments" emitted. These routines manage a simple output buffer, 981 * "segments" emitted. These routines manage a simple output buffer,
982 * so that the header can be written to disk incrementally. 982 * so that the header can be written to disk incrementally.
983 */ 983 */
984void 984void
985dump_header_start(void) 985dump_header_start(void)
986{ 986{
987 dump_headerbuf_ptr = dump_headerbuf; 987 dump_headerbuf_ptr = dump_headerbuf;
988 dump_header_blkno = dumplo; 988 dump_header_blkno = dumplo;
989} 989}
990 990
991int 991int
992dump_header_flush(void) 992dump_header_flush(void)
993{ 993{
994 const struct bdevsw *bdev; 994 const struct bdevsw *bdev;
995 size_t to_write; 995 size_t to_write;
996 int error; 996 int error;
997 997
998 bdev = bdevsw_lookup(dumpdev); 998 bdev = bdevsw_lookup(dumpdev);
999 to_write = roundup(dump_headerbuf_ptr - dump_headerbuf, dbtob(1)); 999 to_write = roundup(dump_headerbuf_ptr - dump_headerbuf, dbtob(1));
1000 error = bdev->d_dump(dumpdev, dump_header_blkno, 1000 error = bdev->d_dump(dumpdev, dump_header_blkno,
1001 dump_headerbuf, to_write); 1001 dump_headerbuf, to_write);
1002 dump_header_blkno += btodb(to_write); 1002 dump_header_blkno += btodb(to_write);
1003 dump_headerbuf_ptr = dump_headerbuf; 1003 dump_headerbuf_ptr = dump_headerbuf;
1004 return error; 1004 return error;
1005} 1005}
1006 1006
1007int 1007int
1008dump_header_addbytes(const void* vptr, size_t n) 1008dump_header_addbytes(const void* vptr, size_t n)
1009{ 1009{
1010 const char* ptr = vptr; 1010 const char* ptr = vptr;
1011 int error; 1011 int error;
1012 1012
1013 while (n > dump_headerbuf_avail) { 1013 while (n > dump_headerbuf_avail) {
1014 memcpy(dump_headerbuf_ptr, ptr, dump_headerbuf_avail); 1014 memcpy(dump_headerbuf_ptr, ptr, dump_headerbuf_avail);
1015 ptr += dump_headerbuf_avail; 1015 ptr += dump_headerbuf_avail;
1016 n -= dump_headerbuf_avail; 1016 n -= dump_headerbuf_avail;
1017 dump_headerbuf_ptr = dump_headerbuf_end; 1017 dump_headerbuf_ptr = dump_headerbuf_end;
1018 error = dump_header_flush(); 1018 error = dump_header_flush();
1019 if (error) 1019 if (error)
1020 return error; 1020 return error;
1021 } 1021 }
1022 memcpy(dump_headerbuf_ptr, ptr, n); 1022 memcpy(dump_headerbuf_ptr, ptr, n);
1023 dump_headerbuf_ptr += n; 1023 dump_headerbuf_ptr += n;
1024 1024
1025 return 0; 1025 return 0;
1026} 1026}
1027 1027
1028int 1028int
1029dump_header_addseg(paddr_t start, paddr_t size) 1029dump_header_addseg(paddr_t start, paddr_t size)
1030{ 1030{
1031 phys_ram_seg_t seg = { start, size }; 1031 phys_ram_seg_t seg = { start, size };
1032 1032
1033 return dump_header_addbytes(&seg, sizeof(seg)); 1033 return dump_header_addbytes(&seg, sizeof(seg));
1034} 1034}
1035 1035
1036int 1036int
1037dump_header_finish(void) 1037dump_header_finish(void)
1038{ 1038{
1039 memset(dump_headerbuf_ptr, 0, dump_headerbuf_avail); 1039 memset(dump_headerbuf_ptr, 0, dump_headerbuf_avail);
1040 return dump_header_flush(); 1040 return dump_header_flush();
1041} 1041}
1042 1042
1043 1043
1044/* 1044/*
1045 * These variables are needed by /sbin/savecore 1045 * These variables are needed by /sbin/savecore
1046 */ 1046 */
1047uint32_t dumpmag = 0x8fca0101; /* magic number */ 1047uint32_t dumpmag = 0x8fca0101; /* magic number */
1048int dumpsize = 0; /* pages */ 1048int dumpsize = 0; /* pages */
1049long dumplo = 0; /* blocks */ 1049long dumplo = 0; /* blocks */
1050 1050
1051/* 1051/*
1052 * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers 1052 * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers
1053 * for a full (non-sparse) dump. 1053 * for a full (non-sparse) dump.
1054 */ 1054 */
1055int 1055int
1056cpu_dumpsize(void) 1056cpu_dumpsize(void)
1057{ 1057{
1058 int size; 1058 int size;
1059 1059
1060 size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)) + 1060 size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)) +
1061 ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t)); 1061 ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t));
1062 if (roundup(size, dbtob(1)) != dbtob(1)) 1062 if (roundup(size, dbtob(1)) != dbtob(1))
1063 return (-1); 1063 return (-1);
1064 1064
1065 return (1); 1065 return (1);
1066} 1066}
1067 1067
1068/* 1068/*
1069 * cpu_dump_mempagecnt: calculate the size of RAM (in pages) to be dumped 1069 * cpu_dump_mempagecnt: calculate the size of RAM (in pages) to be dumped
1070 * for a full (non-sparse) dump. 1070 * for a full (non-sparse) dump.
1071 */ 1071 */
1072u_long 1072u_long
1073cpu_dump_mempagecnt(void) 1073cpu_dump_mempagecnt(void)
1074{ 1074{
1075 u_long i, n; 1075 u_long i, n;
1076 1076
1077 n = 0; 1077 n = 0;
1078 for (i = 0; i < mem_cluster_cnt; i++) 1078 for (i = 0; i < mem_cluster_cnt; i++)
1079 n += atop(mem_clusters[i].size); 1079 n += atop(mem_clusters[i].size);
1080 return (n); 1080 return (n);
1081} 1081}
1082 1082
1083/* 1083/*
1084 * cpu_dump: dump the machine-dependent kernel core dump headers. 1084 * cpu_dump: dump the machine-dependent kernel core dump headers.
1085 */ 1085 */
1086int 1086int
1087cpu_dump(void) 1087cpu_dump(void)
1088{ 1088{
1089 kcore_seg_t seg; 1089 kcore_seg_t seg;
1090 cpu_kcore_hdr_t cpuhdr; 1090 cpu_kcore_hdr_t cpuhdr;
1091 const struct bdevsw *bdev; 1091 const struct bdevsw *bdev;
1092 1092
1093 bdev = bdevsw_lookup(dumpdev); 1093 bdev = bdevsw_lookup(dumpdev);
1094 if (bdev == NULL) 1094 if (bdev == NULL)
1095 return (ENXIO); 1095 return (ENXIO);
1096 1096
1097 /* 1097 /*
1098 * Generate a segment header. 1098 * Generate a segment header.
1099 */ 1099 */
1100 CORE_SETMAGIC(seg, KCORE_MAGIC, MID_MACHINE, CORE_CPU); 1100 CORE_SETMAGIC(seg, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
1101 seg.c_size = dump_header_size - ALIGN(sizeof(seg)); 1101 seg.c_size = dump_header_size - ALIGN(sizeof(seg));
1102 (void)dump_header_addbytes(&seg, ALIGN(sizeof(seg))); 1102 (void)dump_header_addbytes(&seg, ALIGN(sizeof(seg)));
1103 1103
1104 /* 1104 /*
1105 * Add the machine-dependent header info. 1105 * Add the machine-dependent header info.
1106 */ 1106 */
1107 cpuhdr.ptdpaddr = PDPpaddr; 1107 cpuhdr.ptdpaddr = PDPpaddr;
1108 cpuhdr.nmemsegs = dump_nmemsegs; 1108 cpuhdr.nmemsegs = dump_nmemsegs;
1109 (void)dump_header_addbytes(&cpuhdr, ALIGN(sizeof(cpuhdr))); 1109 (void)dump_header_addbytes(&cpuhdr, ALIGN(sizeof(cpuhdr)));
1110 1110
1111 /* 1111 /*
1112 * Write out the memory segment descriptors. 1112 * Write out the memory segment descriptors.
1113 */ 1113 */
1114 return dump_seg_iter(dump_header_addseg); 1114 return dump_seg_iter(dump_header_addseg);
1115} 1115}
1116 1116
1117/* 1117/*
1118 * Doadump comes here after turning off memory management and 1118 * Doadump comes here after turning off memory management and
1119 * getting on the dump stack, either when called above, or by 1119 * getting on the dump stack, either when called above, or by
1120 * the auto-restart code. 1120 * the auto-restart code.
1121 */ 1121 */
1122#define BYTES_PER_DUMP PAGE_SIZE /* must be a multiple of pagesize XXX small */ 1122#define BYTES_PER_DUMP PAGE_SIZE /* must be a multiple of pagesize XXX small */
1123static vaddr_t dumpspace; 1123static vaddr_t dumpspace;
1124 1124
1125vaddr_t 1125vaddr_t
1126reserve_dumppages(vaddr_t p) 1126reserve_dumppages(vaddr_t p)
1127{ 1127{
1128 1128
1129 dumpspace = p; 1129 dumpspace = p;
1130 return (p + BYTES_PER_DUMP); 1130 return (p + BYTES_PER_DUMP);
1131} 1131}
1132 1132
1133int 1133int
1134dumpsys_seg(paddr_t maddr, paddr_t bytes) 1134dumpsys_seg(paddr_t maddr, paddr_t bytes)
1135{ 1135{
1136 u_long i, m, n; 1136 u_long i, m, n;
1137 daddr_t blkno; 1137 daddr_t blkno;
1138 const struct bdevsw *bdev; 1138 const struct bdevsw *bdev;
1139 int (*dump)(dev_t, daddr_t, void *, size_t); 1139 int (*dump)(dev_t, daddr_t, void *, size_t);
1140 int error; 1140 int error;
1141 1141
1142 if (dumpdev == NODEV) 1142 if (dumpdev == NODEV)
1143 return ENODEV; 1143 return ENODEV;
1144 bdev = bdevsw_lookup(dumpdev); 1144 bdev = bdevsw_lookup(dumpdev);
1145 if (bdev == NULL || bdev->d_psize == NULL) 1145 if (bdev == NULL || bdev->d_psize == NULL)
1146 return ENODEV; 1146 return ENODEV;
1147 1147
1148 dump = bdev->d_dump; 1148 dump = bdev->d_dump;
1149 1149
1150 blkno = dump_header_blkno; 1150 blkno = dump_header_blkno;
1151 for (i = 0; i < bytes; i += n, dump_totalbytesleft -= n) { 1151 for (i = 0; i < bytes; i += n, dump_totalbytesleft -= n) {
1152 /* Print out how many MBs we have left to go. */ 1152 /* Print out how many MBs we have left to go. */
1153 if ((dump_totalbytesleft % (1024*1024)) == 0) 1153 if ((dump_totalbytesleft % (1024*1024)) == 0)
1154 printf_nolog("%lu ", (unsigned long) 1154 printf_nolog("%lu ", (unsigned long)
1155 (dump_totalbytesleft / (1024 * 1024))); 1155 (dump_totalbytesleft / (1024 * 1024)));
1156 1156
1157 /* Limit size for next transfer. */ 1157 /* Limit size for next transfer. */
1158 n = bytes - i; 1158 n = bytes - i;
1159 if (n > BYTES_PER_DUMP) 1159 if (n > BYTES_PER_DUMP)
1160 n = BYTES_PER_DUMP; 1160 n = BYTES_PER_DUMP;
1161 1161
1162 for (m = 0; m < n; m += NBPG) 1162 for (m = 0; m < n; m += NBPG)
1163 pmap_kenter_pa(dumpspace + m, maddr + m, 1163 pmap_kenter_pa(dumpspace + m, maddr + m,
1164 VM_PROT_READ, 0); 1164 VM_PROT_READ, 0);
1165 pmap_update(pmap_kernel()); 1165 pmap_update(pmap_kernel());
1166 1166
1167 error = (*dump)(dumpdev, blkno, (void *)dumpspace, n); 1167 error = (*dump)(dumpdev, blkno, (void *)dumpspace, n);
1168 pmap_kremove_local(dumpspace, n); 1168 pmap_kremove_local(dumpspace, n);
1169 if (error) 1169 if (error)
1170 return error; 1170 return error;
1171 maddr += n; 1171 maddr += n;
1172 blkno += btodb(n); /* XXX? */ 1172 blkno += btodb(n); /* XXX? */
1173 1173
1174#if 0 /* XXX this doesn't work. grr. */ 1174#if 0 /* XXX this doesn't work. grr. */
1175 /* operator aborting dump? */ 1175 /* operator aborting dump? */
1176 if (sget() != NULL) 1176 if (sget() != NULL)
1177 return EINTR; 1177 return EINTR;
1178#endif 1178#endif
1179 } 1179 }
1180 dump_header_blkno = blkno; 1180 dump_header_blkno = blkno;
1181 1181
1182 return 0; 1182 return 0;
1183} 1183}
1184 1184
1185void 1185void
1186dodumpsys(void) 1186dodumpsys(void)
1187{ 1187{
1188 const struct bdevsw *bdev; 1188 const struct bdevsw *bdev;
1189 int dumpend, psize; 1189 int dumpend, psize;
1190 int error; 1190 int error;
1191 1191
1192 if (dumpdev == NODEV) 1192 if (dumpdev == NODEV)
1193 return; 1193 return;
1194 1194
1195 bdev = bdevsw_lookup(dumpdev); 1195 bdev = bdevsw_lookup(dumpdev);
1196 if (bdev == NULL || bdev->d_psize == NULL) 1196 if (bdev == NULL || bdev->d_psize == NULL)
1197 return; 1197 return;
1198 /* 1198 /*
1199 * For dumps during autoconfiguration, 1199 * For dumps during autoconfiguration,
1200 * if dump device has already configured... 1200 * if dump device has already configured...
1201 */ 1201 */
1202 if (dumpsize == 0) 1202 if (dumpsize == 0)
1203 cpu_dumpconf(); 1203 cpu_dumpconf();
1204 1204
1205 printf("\ndumping to dev %llu,%llu (offset=%ld, size=%d):", 1205 printf("\ndumping to dev %llu,%llu (offset=%ld, size=%d):",
1206 (unsigned long long)major(dumpdev), 1206 (unsigned long long)major(dumpdev),
1207 (unsigned long long)minor(dumpdev), dumplo, dumpsize); 1207 (unsigned long long)minor(dumpdev), dumplo, dumpsize);
1208 1208
1209 if (dumplo <= 0 || dumpsize <= 0) { 1209 if (dumplo <= 0 || dumpsize <= 0) {
1210 printf(" not possible\n"); 1210 printf(" not possible\n");
1211 return; 1211 return;
1212 } 1212 }
1213 1213
1214 psize = bdev_size(dumpdev); 1214 psize = bdev_size(dumpdev);
1215 printf("\ndump "); 1215 printf("\ndump ");
1216 if (psize == -1) { 1216 if (psize == -1) {
1217 printf("area unavailable\n"); 1217 printf("area unavailable\n");
1218 return; 1218 return;
1219 } 1219 }
1220 1220
1221#if 0 /* XXX this doesn't work. grr. */ 1221#if 0 /* XXX this doesn't work. grr. */
1222 /* toss any characters present prior to dump */ 1222 /* toss any characters present prior to dump */
1223 while (sget() != NULL); /*syscons and pccons differ */ 1223 while (sget() != NULL); /*syscons and pccons differ */
1224#endif 1224#endif
1225 1225
1226 dump_seg_prep(); 1226 dump_seg_prep();
1227 dumpend = dumplo + btodb(dump_header_size) + ctod(dump_npages); 1227 dumpend = dumplo + btodb(dump_header_size) + ctod(dump_npages);
1228 if (dumpend > psize) { 1228 if (dumpend > psize) {
1229 printf("failed: insufficient space (%d < %d)\n", 1229 printf("failed: insufficient space (%d < %d)\n",
1230 psize, dumpend); 1230 psize, dumpend);
1231 goto failed; 1231 goto failed;
1232 } 1232 }
1233 1233
1234 dump_header_start(); 1234 dump_header_start();
1235 if ((error = cpu_dump()) != 0) 1235 if ((error = cpu_dump()) != 0)
1236 goto err; 1236 goto err;
1237 if ((error = dump_header_finish()) != 0) 1237 if ((error = dump_header_finish()) != 0)
1238 goto err; 1238 goto err;
1239 1239
1240 if (dump_header_blkno != dumplo + btodb(dump_header_size)) { 1240 if (dump_header_blkno != dumplo + btodb(dump_header_size)) {
1241 printf("BAD header size (%ld [written] != %ld [expected])\n", 1241 printf("BAD header size (%ld [written] != %ld [expected])\n",
1242 (long)(dump_header_blkno - dumplo), 1242 (long)(dump_header_blkno - dumplo),
1243 (long)btodb(dump_header_size)); 1243 (long)btodb(dump_header_size));
1244 goto failed; 1244 goto failed;
1245 } 1245 }
1246 1246
1247 dump_totalbytesleft = roundup(ptoa(dump_npages), BYTES_PER_DUMP); 1247 dump_totalbytesleft = roundup(ptoa(dump_npages), BYTES_PER_DUMP);
1248 error = dump_seg_iter(dumpsys_seg); 1248 error = dump_seg_iter(dumpsys_seg);
1249 1249
1250 if (error == 0 && dump_header_blkno != dumpend) { 1250 if (error == 0 && dump_header_blkno != dumpend) {
1251 printf("BAD dump size (%ld [written] != %ld [expected])\n", 1251 printf("BAD dump size (%ld [written] != %ld [expected])\n",
1252 (long)(dumpend - dumplo), 1252 (long)(dumpend - dumplo),
1253 (long)(dump_header_blkno - dumplo)); 1253 (long)(dump_header_blkno - dumplo));
1254 goto failed; 1254 goto failed;
1255 } 1255 }
1256 1256
1257err: 1257err:
1258 switch (error) { 1258 switch (error) {
1259 1259
1260 case ENXIO: 1260 case ENXIO:
1261 printf("device bad\n"); 1261 printf("device bad\n");
1262 break; 1262 break;
1263 1263
1264 case EFAULT: 1264 case EFAULT:
1265 printf("device not ready\n"); 1265 printf("device not ready\n");
1266 break; 1266 break;
1267 1267
1268 case EINVAL: 1268 case EINVAL:
1269 printf("area improper\n"); 1269 printf("area improper\n");
1270 break; 1270 break;
1271 1271
1272 case EIO: 1272 case EIO:
1273 printf("i/o error\n"); 1273 printf("i/o error\n");
1274 break; 1274 break;
1275 1275
1276 case EINTR: 1276 case EINTR:
1277 printf("aborted from console\n"); 1277 printf("aborted from console\n");
1278 break; 1278 break;
1279 1279
1280 case 0: 1280 case 0:
1281 printf("succeeded\n"); 1281 printf("succeeded\n");
1282 break; 1282 break;
1283 1283
1284 default: 1284 default:
1285 printf("error %d\n", error); 1285 printf("error %d\n", error);
1286 break; 1286 break;
1287 } 1287 }
1288failed: 1288failed:
1289 printf("\n\n"); 1289 printf("\n\n");
1290 delay(5000000); /* 5 seconds */ 1290 delay(5000000); /* 5 seconds */
1291} 1291}
1292 1292
1293/* 1293/*
1294 * This is called by main to set dumplo and dumpsize. 1294 * This is called by main to set dumplo and dumpsize.
1295 * Dumps always skip the first PAGE_SIZE of disk space 1295 * Dumps always skip the first PAGE_SIZE of disk space
1296 * in case there might be a disk label stored there. 1296 * in case there might be a disk label stored there.
1297 * If there is extra space, put dump at the end to 1297 * If there is extra space, put dump at the end to
1298 * reduce the chance that swapping trashes it. 1298 * reduce the chance that swapping trashes it.
1299 * 1299 *
1300 * Sparse dumps can't placed as close to the end as possible, because 1300 * Sparse dumps can't placed as close to the end as possible, because
1301 * savecore(8) has to know where to start reading in the dump device 1301 * savecore(8) has to know where to start reading in the dump device
1302 * before it has access to any of the crashed system's state. 1302 * before it has access to any of the crashed system's state.
1303 * 1303 *
1304 * Note also that a sparse dump will never be larger than a full one: 1304 * Note also that a sparse dump will never be larger than a full one:
1305 * in order to add a phys_ram_seg_t to the header, at least one page 1305 * in order to add a phys_ram_seg_t to the header, at least one page
1306 * must be removed. 1306 * must be removed.
1307 */ 1307 */
1308void 1308void
1309cpu_dumpconf(void) 1309cpu_dumpconf(void)
1310{ 1310{
1311 int nblks, dumpblks; /* size of dump area */ 1311 int nblks, dumpblks; /* size of dump area */
1312 1312
1313 if (dumpdev == NODEV) 1313 if (dumpdev == NODEV)
1314 goto bad; 1314 goto bad;
1315 nblks = bdev_size(dumpdev); 1315 nblks = bdev_size(dumpdev);
1316 if (nblks <= ctod(1)) 1316 if (nblks <= ctod(1))
1317 goto bad; 1317 goto bad;
1318 1318
1319 dumpblks = cpu_dumpsize(); 1319 dumpblks = cpu_dumpsize();
1320 if (dumpblks < 0) 1320 if (dumpblks < 0)
1321 goto bad; 1321 goto bad;
1322 1322
1323 /* dumpsize is in page units, and doesn't include headers. */ 1323 /* dumpsize is in page units, and doesn't include headers. */
1324 dumpsize = cpu_dump_mempagecnt(); 1324 dumpsize = cpu_dump_mempagecnt();
1325 1325
1326 dumpblks += ctod(dumpsize); 1326 dumpblks += ctod(dumpsize);
1327 1327
1328 /* If dump won't fit (incl. room for possible label), punt. */ 1328 /* If dump won't fit (incl. room for possible label), punt. */
1329 if (dumpblks > (nblks - ctod(1))) { 1329 if (dumpblks > (nblks - ctod(1))) {
1330#ifndef NO_SPARSE_DUMP 1330#ifndef NO_SPARSE_DUMP
1331 /* A sparse dump might (and hopefully will) fit. */ 1331 /* A sparse dump might (and hopefully will) fit. */
1332 dumplo = ctod(1); 1332 dumplo = ctod(1);
1333#else 1333#else
1334 /* But if we're not configured for that, punt. */ 1334 /* But if we're not configured for that, punt. */
1335 goto bad; 1335 goto bad;
1336#endif 1336#endif
1337 } else { 1337 } else {
1338 /* Put dump at end of partition */ 1338 /* Put dump at end of partition */
1339 dumplo = nblks - dumpblks; 1339 dumplo = nblks - dumpblks;
1340 } 1340 }
1341 1341
1342 1342
1343 /* Now that we've decided this will work, init ancillary stuff. */ 1343 /* Now that we've decided this will work, init ancillary stuff. */
1344 dump_misc_init(); 1344 dump_misc_init();
1345 return; 1345 return;
1346 1346
1347 bad: 1347 bad:
1348 dumpsize = 0; 1348 dumpsize = 0;
1349} 1349}
1350 1350
1351/* 1351/*
1352 * Clear registers on exec 1352 * Clear registers on exec
1353 */ 1353 */
1354void 1354void
1355setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack) 1355setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
1356{ 1356{
1357 struct pcb *pcb = lwp_getpcb(l); 1357 struct pcb *pcb = lwp_getpcb(l);
1358 struct trapframe *tf; 1358 struct trapframe *tf;
1359 1359
1360#ifdef USER_LDT 1360#ifdef USER_LDT
1361 pmap_ldt_cleanup(l); 1361 pmap_ldt_cleanup(l);
1362#endif 1362#endif
1363 1363
1364 fpu_clear(l, pack->ep_osversion >= 699002600 1364 fpu_clear(l, pack->ep_osversion >= 699002600
1365 ? __NetBSD_NPXCW__ : __NetBSD_COMPAT_NPXCW__); 1365 ? __NetBSD_NPXCW__ : __NetBSD_COMPAT_NPXCW__);
1366 x86_dbregs_clear(l); 1366 x86_dbregs_clear(l);
1367 1367
1368 kpreempt_disable(); 1368 kpreempt_disable();
1369 pcb->pcb_flags = 0; 1369 pcb->pcb_flags = 0;
1370 l->l_proc->p_flag &= ~PK_32; 1370 l->l_proc->p_flag &= ~PK_32;
1371 l->l_md.md_flags = MDL_IRET; 1371 l->l_md.md_flags = MDL_IRET;
1372 cpu_segregs64_zero(l); 1372 cpu_segregs64_zero(l);
1373 kpreempt_enable(); 1373 kpreempt_enable();
1374 1374
1375 tf = l->l_md.md_regs; 1375 tf = l->l_md.md_regs;
1376 tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL); 1376 tf->tf_ds = GSEL(GUDATA_SEL, SEL_UPL);
1377 tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL); 1377 tf->tf_es = GSEL(GUDATA_SEL, SEL_UPL);
1378 tf->tf_rdi = 0; 1378 tf->tf_rdi = 0;
1379 tf->tf_rsi = 0; 1379 tf->tf_rsi = 0;
1380 tf->tf_rbp = 0; 1380 tf->tf_rbp = 0;
1381 tf->tf_rbx = l->l_proc->p_psstrp; 1381 tf->tf_rbx = l->l_proc->p_psstrp;
1382 tf->tf_rdx = 0; 1382 tf->tf_rdx = 0;
1383 tf->tf_rcx = 0; 1383 tf->tf_rcx = 0;
1384 tf->tf_rax = 0; 1384 tf->tf_rax = 0;
1385 tf->tf_rip = pack->ep_entry; 1385 tf->tf_rip = pack->ep_entry;
1386 tf->tf_cs = LSEL(LUCODE_SEL, SEL_UPL); 1386 tf->tf_cs = LSEL(LUCODE_SEL, SEL_UPL);
1387 tf->tf_rflags = PSL_USERSET; 1387 tf->tf_rflags = PSL_USERSET;
1388 tf->tf_rsp = stack; 1388 tf->tf_rsp = stack;
1389 tf->tf_ss = LSEL(LUDATA_SEL, SEL_UPL); 1389 tf->tf_ss = LSEL(LUDATA_SEL, SEL_UPL);
1390} 1390}
1391 1391
1392/* 1392/*
1393 * Initialize segments and descriptor tables 1393 * Initialize segments and descriptor tables
1394 */ 1394 */
1395char *ldtstore; 1395char *ldtstore;
1396char *gdtstore; 1396char *gdtstore;
1397 1397
1398void 1398void
1399setgate(struct gate_descriptor *gd, void *func, int ist, int type, int dpl, int sel) 1399setgate(struct gate_descriptor *gd, void *func, int ist, int type, int dpl, int sel)
1400{ 1400{
1401 1401
1402 kpreempt_disable(); 1402 kpreempt_disable();
1403 pmap_changeprot_local(idt_vaddr, VM_PROT_READ|VM_PROT_WRITE); 1403 pmap_changeprot_local(idt_vaddr, VM_PROT_READ|VM_PROT_WRITE);
1404 1404
1405 gd->gd_looffset = (uint64_t)func & 0xffff; 1405 gd->gd_looffset = (uint64_t)func & 0xffff;
1406 gd->gd_selector = sel; 1406 gd->gd_selector = sel;
1407 gd->gd_ist = ist; 1407 gd->gd_ist = ist;
1408 gd->gd_type = type; 1408 gd->gd_type = type;
1409 gd->gd_dpl = dpl; 1409 gd->gd_dpl = dpl;
1410 gd->gd_p = 1; 1410 gd->gd_p = 1;
1411 gd->gd_hioffset = (uint64_t)func >> 16; 1411 gd->gd_hioffset = (uint64_t)func >> 16;
1412 gd->gd_zero = 0; 1412 gd->gd_zero = 0;
1413 gd->gd_xx1 = 0; 1413 gd->gd_xx1 = 0;
1414 gd->gd_xx2 = 0; 1414 gd->gd_xx2 = 0;
1415 gd->gd_xx3 = 0; 1415 gd->gd_xx3 = 0;
1416 1416
1417 pmap_changeprot_local(idt_vaddr, VM_PROT_READ); 1417 pmap_changeprot_local(idt_vaddr, VM_PROT_READ);
1418 kpreempt_enable(); 1418 kpreempt_enable();
1419} 1419}
1420 1420
1421void 1421void
1422unsetgate(struct gate_descriptor *gd) 1422unsetgate(struct gate_descriptor *gd)
1423{ 1423{
1424 1424
1425 kpreempt_disable(); 1425 kpreempt_disable();
1426 pmap_changeprot_local(idt_vaddr, VM_PROT_READ|VM_PROT_WRITE); 1426 pmap_changeprot_local(idt_vaddr, VM_PROT_READ|VM_PROT_WRITE);
1427 1427
1428 memset(gd, 0, sizeof (*gd)); 1428 memset(gd, 0, sizeof (*gd));
1429 1429
1430 pmap_changeprot_local(idt_vaddr, VM_PROT_READ); 1430 pmap_changeprot_local(idt_vaddr, VM_PROT_READ);
1431 kpreempt_enable(); 1431 kpreempt_enable();
1432} 1432}
1433 1433
1434void 1434void
1435setregion(struct region_descriptor *rd, void *base, uint16_t limit) 1435setregion(struct region_descriptor *rd, void *base, uint16_t limit)
1436{ 1436{
1437 rd->rd_limit = limit; 1437 rd->rd_limit = limit;
1438 rd->rd_base = (uint64_t)base; 1438 rd->rd_base = (uint64_t)base;
1439} 1439}
1440 1440
1441/* 1441/*
1442 * Note that the base and limit fields are ignored in long mode. 1442 * Note that the base and limit fields are ignored in long mode.
1443 */ 1443 */
1444void 1444void
1445set_mem_segment(struct mem_segment_descriptor *sd, void *base, size_t limit, 1445set_mem_segment(struct mem_segment_descriptor *sd, void *base, size_t limit,
1446 int type, int dpl, int gran, int def32, int is64) 1446 int type, int dpl, int gran, int def32, int is64)
1447{ 1447{
1448 sd->sd_lolimit = (unsigned)limit; 1448 sd->sd_lolimit = (unsigned)limit;
1449 sd->sd_lobase = (unsigned long)base; 1449 sd->sd_lobase = (unsigned long)base;
1450 sd->sd_type = type; 1450 sd->sd_type = type;
1451 sd->sd_dpl = dpl; 1451 sd->sd_dpl = dpl;
1452 sd->sd_p = 1; 1452 sd->sd_p = 1;
1453 sd->sd_hilimit = (unsigned)limit >> 16; 1453 sd->sd_hilimit = (unsigned)limit >> 16;
1454 sd->sd_avl = 0; 1454 sd->sd_avl = 0;
1455 sd->sd_long = is64; 1455 sd->sd_long = is64;
1456 sd->sd_def32 = def32; 1456 sd->sd_def32 = def32;
1457 sd->sd_gran = gran; 1457 sd->sd_gran = gran;
1458 sd->sd_hibase = (unsigned long)base >> 24; 1458 sd->sd_hibase = (unsigned long)base >> 24;
1459} 1459}
1460 1460
1461void 1461void
1462set_sys_segment(struct sys_segment_descriptor *sd, void *base, size_t limit, 1462set_sys_segment(struct sys_segment_descriptor *sd, void *base, size_t limit,
1463 int type, int dpl, int gran) 1463 int type, int dpl, int gran)
1464{ 1464{
1465 memset(sd, 0, sizeof *sd); 1465 memset(sd, 0, sizeof *sd);
1466 sd->sd_lolimit = (unsigned)limit; 1466 sd->sd_lolimit = (unsigned)limit;
1467 sd->sd_lobase = (uint64_t)base; 1467 sd->sd_lobase = (uint64_t)base;
1468 sd->sd_type = type; 1468 sd->sd_type = type;
1469 sd->sd_dpl = dpl; 1469 sd->sd_dpl = dpl;
1470 sd->sd_p = 1; 1470 sd->sd_p = 1;
1471 sd->sd_hilimit = (unsigned)limit >> 16; 1471 sd->sd_hilimit = (unsigned)limit >> 16;
1472 sd->sd_gran = gran; 1472 sd->sd_gran = gran;
1473 sd->sd_hibase = (uint64_t)base >> 24; 1473 sd->sd_hibase = (uint64_t)base >> 24;
1474} 1474}
1475 1475
1476void 1476void
1477cpu_init_idt(void) 1477cpu_init_idt(void)
1478{ 1478{
1479 struct region_descriptor region; 1479 struct region_descriptor region;
1480 1480
1481 setregion(&region, idt, NIDT * sizeof(idt[0]) - 1); 1481 setregion(&region, idt, NIDT * sizeof(idt[0]) - 1);
1482 lidt(&region); 1482 lidt(&region);
1483} 1483}
1484 1484
1485#define IDTVEC(name) __CONCAT(X, name) 1485#define IDTVEC(name) __CONCAT(X, name)
1486typedef void (vector)(void); 1486typedef void (vector)(void);
1487extern vector IDTVEC(syscall); 1487extern vector IDTVEC(syscall);
1488extern vector IDTVEC(syscall32); 1488extern vector IDTVEC(syscall32);
1489extern vector IDTVEC(osyscall); 1489extern vector IDTVEC(osyscall);
1490extern vector *x86_exceptions[]; 1490extern vector *x86_exceptions[];
1491 1491
1492static void 1492static void
1493init_x86_64_ksyms(void) 1493init_x86_64_ksyms(void)
1494{ 1494{
1495#if NKSYMS || defined(DDB) || defined(MODULAR) 1495#if NKSYMS || defined(DDB) || defined(MODULAR)
1496 extern int end; 1496 extern int end;
1497 extern int *esym; 1497 extern int *esym;
1498#ifndef XENPV 1498#ifndef XENPV
1499 struct btinfo_symtab *symtab; 1499 struct btinfo_symtab *symtab;
1500 vaddr_t tssym, tesym; 1500 vaddr_t tssym, tesym;
1501#endif 1501#endif
1502 1502
1503#ifdef DDB 1503#ifdef DDB
1504 db_machine_init(); 1504 db_machine_init();
1505#endif 1505#endif
1506 1506
1507#ifndef XENPV 1507#ifndef XENPV
1508 symtab = lookup_bootinfo(BTINFO_SYMTAB); 1508 symtab = lookup_bootinfo(BTINFO_SYMTAB);
1509 if (symtab) { 1509 if (symtab) {
1510#ifdef KASLR 1510#ifdef KASLR
1511 tssym = bootspace.head.va; 1511 tssym = bootspace.head.va;
1512 tesym = bootspace.head.va; /* (unused...) */ 1512 tesym = bootspace.head.va; /* (unused...) */
1513#else 1513#else
1514 tssym = (vaddr_t)symtab->ssym + KERNBASE; 1514 tssym = (vaddr_t)symtab->ssym + KERNBASE;
1515 tesym = (vaddr_t)symtab->esym + KERNBASE; 1515 tesym = (vaddr_t)symtab->esym + KERNBASE;
1516#endif 1516#endif
1517 ksyms_addsyms_elf(symtab->nsym, (void *)tssym, (void *)tesym); 1517 ksyms_addsyms_elf(symtab->nsym, (void *)tssym, (void *)tesym);
1518 } else 1518 } else
1519 ksyms_addsyms_elf(*(long *)(void *)&end, 1519 ksyms_addsyms_elf(*(long *)(void *)&end,
1520 ((long *)(void *)&end) + 1, esym); 1520 ((long *)(void *)&end) + 1, esym);
1521#else /* XENPV */ 1521#else /* XENPV */
1522 esym = xen_start_info.mod_start ? 1522 esym = xen_start_info.mod_start ?
1523 (void *)xen_start_info.mod_start : 1523 (void *)xen_start_info.mod_start :
1524 (void *)xen_start_info.mfn_list; 1524 (void *)xen_start_info.mfn_list;
1525 ksyms_addsyms_elf(*(int *)(void *)&end, 1525 ksyms_addsyms_elf(*(int *)(void *)&end,
1526 ((int *)(void *)&end) + 1, esym); 1526 ((int *)(void *)&end) + 1, esym);
1527#endif /* XENPV */ 1527#endif /* XENPV */
1528#endif 1528#endif
1529} 1529}
1530 1530
1531void __noasan 1531void __noasan
1532init_bootspace(void) 1532init_bootspace(void)
1533{ 1533{
1534 extern char __rodata_start; 1534 extern char __rodata_start;
1535 extern char __data_start; 1535 extern char __data_start;
1536 extern char __kernel_end; 1536 extern char __kernel_end;
1537 size_t i = 0; 1537 size_t i = 0;
1538 1538
1539 memset(&bootspace, 0, sizeof(bootspace)); 1539 memset(&bootspace, 0, sizeof(bootspace));
1540 1540
1541 bootspace.head.va = KERNTEXTOFF; 1541 bootspace.head.va = KERNTEXTOFF;
1542 bootspace.head.pa = KERNTEXTOFF - KERNBASE; 1542 bootspace.head.pa = KERNTEXTOFF - KERNBASE;
1543 bootspace.head.sz = 0; 1543 bootspace.head.sz = 0;
1544 1544
1545 bootspace.segs[i].type = BTSEG_TEXT; 1545 bootspace.segs[i].type = BTSEG_TEXT;
1546 bootspace.segs[i].va = KERNTEXTOFF; 1546 bootspace.segs[i].va = KERNTEXTOFF;
1547 bootspace.segs[i].pa = KERNTEXTOFF - KERNBASE; 1547 bootspace.segs[i].pa = KERNTEXTOFF - KERNBASE;
1548 bootspace.segs[i].sz = (size_t)&__rodata_start - KERNTEXTOFF; 1548 bootspace.segs[i].sz = (size_t)&__rodata_start - KERNTEXTOFF;
1549 i++; 1549 i++;
1550 1550
1551 bootspace.segs[i].type = BTSEG_RODATA; 1551 bootspace.segs[i].type = BTSEG_RODATA;
1552 bootspace.segs[i].va = (vaddr_t)&__rodata_start; 1552 bootspace.segs[i].va = (vaddr_t)&__rodata_start;
1553 bootspace.segs[i].pa = (paddr_t)&__rodata_start - KERNBASE; 1553 bootspace.segs[i].pa = (paddr_t)&__rodata_start - KERNBASE;
1554 bootspace.segs[i].sz = (size_t)&__data_start - (size_t)&__rodata_start; 1554 bootspace.segs[i].sz = (size_t)&__data_start - (size_t)&__rodata_start;
1555 i++; 1555 i++;
1556 1556
1557 bootspace.segs[i].type = BTSEG_DATA; 1557 bootspace.segs[i].type = BTSEG_DATA;
1558 bootspace.segs[i].va = (vaddr_t)&__data_start; 1558 bootspace.segs[i].va = (vaddr_t)&__data_start;
1559 bootspace.segs[i].pa = (paddr_t)&__data_start - KERNBASE; 1559 bootspace.segs[i].pa = (paddr_t)&__data_start - KERNBASE;
1560 bootspace.segs[i].sz = (size_t)&__kernel_end - (size_t)&__data_start; 1560 bootspace.segs[i].sz = (size_t)&__kernel_end - (size_t)&__data_start;
1561 i++; 1561 i++;
1562 1562
1563 bootspace.boot.va = (vaddr_t)&__kernel_end; 1563 bootspace.boot.va = (vaddr_t)&__kernel_end;
1564 bootspace.boot.pa = (paddr_t)&__kernel_end - KERNBASE; 1564 bootspace.boot.pa = (paddr_t)&__kernel_end - KERNBASE;
1565 bootspace.boot.sz = (size_t)(atdevbase + IOM_SIZE) - 1565 bootspace.boot.sz = (size_t)(atdevbase + IOM_SIZE) -
1566 (size_t)&__kernel_end; 1566 (size_t)&__kernel_end;
1567 1567
1568 /* In locore.S, we allocated a tmp va. We will use it now. */ 1568 /* In locore.S, we allocated a tmp va. We will use it now. */
1569 bootspace.spareva = KERNBASE + NKL2_KIMG_ENTRIES * NBPD_L2; 1569 bootspace.spareva = KERNBASE + NKL2_KIMG_ENTRIES * NBPD_L2;
1570 1570
1571 /* Virtual address of the L4 page. */ 1571 /* Virtual address of the L4 page. */
1572 bootspace.pdir = (vaddr_t)(PDPpaddr + KERNBASE); 1572 bootspace.pdir = (vaddr_t)(PDPpaddr + KERNBASE);
1573 1573
1574 /* Kernel module map. */ 1574 /* Kernel module map. */
1575 bootspace.smodule = (vaddr_t)atdevbase + IOM_SIZE; 1575 bootspace.smodule = (vaddr_t)atdevbase + IOM_SIZE;
1576 bootspace.emodule = KERNBASE + NKL2_KIMG_ENTRIES * NBPD_L2; 1576 bootspace.emodule = KERNBASE + NKL2_KIMG_ENTRIES * NBPD_L2;
1577} 1577}
1578 1578
1579static void __noasan 1579static void __noasan
1580init_pte(void) 1580init_pte(void)
1581{ 1581{
1582#ifndef XENPV 1582#ifndef XENPV
1583 extern uint32_t nox_flag; 1583 extern uint32_t nox_flag;
1584 pd_entry_t *pdir = (pd_entry_t *)bootspace.pdir; 1584 pd_entry_t *pdir = (pd_entry_t *)bootspace.pdir;
1585 pdir[L4_SLOT_PTE] = PDPpaddr | PTE_W | ((uint64_t)nox_flag << 32) | 1585 pdir[L4_SLOT_PTE] = PDPpaddr | PTE_W | ((uint64_t)nox_flag << 32) |
1586 PTE_P; 1586 PTE_P;
1587#endif 1587#endif
1588 1588
1589 extern pd_entry_t *normal_pdes[3]; 1589 extern pd_entry_t *normal_pdes[3];
1590 normal_pdes[0] = L2_BASE; 1590 normal_pdes[0] = L2_BASE;
1591 normal_pdes[1] = L3_BASE; 1591 normal_pdes[1] = L3_BASE;
1592 normal_pdes[2] = L4_BASE; 1592 normal_pdes[2] = L4_BASE;
1593} 1593}
1594 1594
1595void __noasan 1595void __noasan
1596init_slotspace(void) 1596init_slotspace(void)
1597{ 1597{
1598 vaddr_t va; 1598 vaddr_t va;
1599 1599
1600 memset(&slotspace, 0, sizeof(slotspace)); 1600 memset(&slotspace, 0, sizeof(slotspace));
1601 1601
1602 /* User. [256, because we want to land in >= 256] */ 1602 /* User. [256, because we want to land in >= 256] */
1603 slotspace.area[SLAREA_USER].sslot = 0; 1603 slotspace.area[SLAREA_USER].sslot = 0;
1604 slotspace.area[SLAREA_USER].nslot = PDIR_SLOT_USERLIM+1; 1604 slotspace.area[SLAREA_USER].nslot = PDIR_SLOT_USERLIM+1;
1605 slotspace.area[SLAREA_USER].active = true; 1605 slotspace.area[SLAREA_USER].active = true;
1606 1606
1607#ifdef XENPV 1607#ifdef XENPV
1608 /* PTE. */ 1608 /* PTE. */
1609 slotspace.area[SLAREA_PTE].sslot = PDIR_SLOT_PTE; 1609 slotspace.area[SLAREA_PTE].sslot = PDIR_SLOT_PTE;
1610 slotspace.area[SLAREA_PTE].nslot = 1; 1610 slotspace.area[SLAREA_PTE].nslot = 1;
1611 slotspace.area[SLAREA_PTE].active = true; 1611 slotspace.area[SLAREA_PTE].active = true;
1612#endif 1612#endif
1613 1613
1614#ifdef __HAVE_PCPU_AREA 1614#ifdef __HAVE_PCPU_AREA
1615 /* Per-CPU. */ 1615 /* Per-CPU. */
1616 slotspace.area[SLAREA_PCPU].sslot = PDIR_SLOT_PCPU; 1616 slotspace.area[SLAREA_PCPU].sslot = PDIR_SLOT_PCPU;
1617 slotspace.area[SLAREA_PCPU].nslot = 1; 1617 slotspace.area[SLAREA_PCPU].nslot = 1;
1618 slotspace.area[SLAREA_PCPU].active = true; 1618 slotspace.area[SLAREA_PCPU].active = true;
1619#endif 1619#endif
1620 1620
1621#ifdef __HAVE_DIRECT_MAP 1621#ifdef __HAVE_DIRECT_MAP
1622 /* Direct Map. [Randomized later] */ 1622 /* Direct Map. [Randomized later] */
1623 slotspace.area[SLAREA_DMAP].active = false; 1623 slotspace.area[SLAREA_DMAP].active = false;
1624#endif 1624#endif
1625 1625
1626#ifdef XENPV 1626#ifdef XENPV
1627 /* Hypervisor. */ 1627 /* Hypervisor. */
1628 slotspace.area[SLAREA_HYPV].sslot = 256; 1628 slotspace.area[SLAREA_HYPV].sslot = 256;
1629 slotspace.area[SLAREA_HYPV].nslot = 17; 1629 slotspace.area[SLAREA_HYPV].nslot = 17;
1630 slotspace.area[SLAREA_HYPV].active = true; 1630 slotspace.area[SLAREA_HYPV].active = true;
1631#endif 1631#endif
1632 1632
1633#ifdef KASAN 1633#ifdef KASAN
1634 /* ASAN. */ 1634 /* ASAN. */
1635 slotspace.area[SLAREA_ASAN].sslot = L4_SLOT_KASAN; 1635 slotspace.area[SLAREA_ASAN].sslot = L4_SLOT_KASAN;
1636 slotspace.area[SLAREA_ASAN].nslot = NL4_SLOT_KASAN; 1636 slotspace.area[SLAREA_ASAN].nslot = NL4_SLOT_KASAN;
1637 slotspace.area[SLAREA_ASAN].active = true; 1637 slotspace.area[SLAREA_ASAN].active = true;
1638#endif 1638#endif
1639 1639
1640#ifdef KMSAN 1640#ifdef KMSAN
1641 /* MSAN. */ 1641 /* MSAN. */
1642 slotspace.area[SLAREA_MSAN].sslot = L4_SLOT_KMSAN; 1642 slotspace.area[SLAREA_MSAN].sslot = L4_SLOT_KMSAN;
1643 slotspace.area[SLAREA_MSAN].nslot = NL4_SLOT_KMSAN; 1643 slotspace.area[SLAREA_MSAN].nslot = NL4_SLOT_KMSAN;
1644 slotspace.area[SLAREA_MSAN].active = true; 1644 slotspace.area[SLAREA_MSAN].active = true;
1645#endif 1645#endif
1646 1646
1647 /* Kernel. */ 1647 /* Kernel. */
1648 slotspace.area[SLAREA_KERN].sslot = L4_SLOT_KERNBASE; 1648 slotspace.area[SLAREA_KERN].sslot = L4_SLOT_KERNBASE;
1649 slotspace.area[SLAREA_KERN].nslot = 1; 1649 slotspace.area[SLAREA_KERN].nslot = 1;
1650 slotspace.area[SLAREA_KERN].active = true; 1650 slotspace.area[SLAREA_KERN].active = true;
1651 1651
1652 /* Main. */ 1652 /* Main. */
1653 va = slotspace_rand(SLAREA_MAIN, NKL4_MAX_ENTRIES * NBPD_L4, 1653 va = slotspace_rand(SLAREA_MAIN, NKL4_MAX_ENTRIES * NBPD_L4,
1654 NBPD_L4); /* TODO: NBPD_L1 */ 1654 NBPD_L4); /* TODO: NBPD_L1 */
1655 vm_min_kernel_address = va; 1655 vm_min_kernel_address = va;
1656 vm_max_kernel_address = va + NKL4_MAX_ENTRIES * NBPD_L4; 1656 vm_max_kernel_address = va + NKL4_MAX_ENTRIES * NBPD_L4;
1657 1657
1658#ifndef XENPV 1658#ifndef XENPV
1659 /* PTE. */ 1659 /* PTE. */
1660 va = slotspace_rand(SLAREA_PTE, NBPD_L4, NBPD_L4); 1660 va = slotspace_rand(SLAREA_PTE, NBPD_L4, NBPD_L4);
1661 pte_base = (pd_entry_t *)va; 1661 pte_base = (pd_entry_t *)va;
1662#endif 1662#endif
1663} 1663}
1664 1664
1665void __noasan 1665void __noasan
1666init_x86_64(paddr_t first_avail) 1666init_x86_64(paddr_t first_avail)
1667{ 1667{
1668 extern void consinit(void); 1668 extern void consinit(void);
1669 struct region_descriptor region; 1669 struct region_descriptor region;
1670 struct mem_segment_descriptor *ldt_segp; 1670 struct mem_segment_descriptor *ldt_segp;
1671 int x; 1671 int x;
1672 struct pcb *pcb; 1672 struct pcb *pcb;
1673 extern vaddr_t lwp0uarea; 1673 extern vaddr_t lwp0uarea;
1674#ifndef XENPV 1674#ifndef XENPV
1675 extern paddr_t local_apic_pa; 1675 extern paddr_t local_apic_pa;
1676#endif 1676#endif
1677 1677
1678 KASSERT(first_avail % PAGE_SIZE == 0); 1678 KASSERT(first_avail % PAGE_SIZE == 0);
1679 1679
1680#ifdef XENPV 1680#ifdef XENPV
1681 KASSERT(HYPERVISOR_shared_info != NULL); 1681 KASSERT(HYPERVISOR_shared_info != NULL);
1682 cpu_info_primary.ci_vcpu = &HYPERVISOR_shared_info->vcpu_info[0]; 1682 cpu_info_primary.ci_vcpu = &HYPERVISOR_shared_info->vcpu_info[0];
1683#endif 1683#endif
1684 1684
1685 init_pte(); 1685 init_pte();
1686 1686
1687 kasan_early_init((void *)lwp0uarea); 1687 kasan_early_init((void *)lwp0uarea);
1688 1688
1689 uvm_lwp_setuarea(&lwp0, lwp0uarea); 1689 uvm_lwp_setuarea(&lwp0, lwp0uarea);
1690 1690
1691 cpu_probe(&cpu_info_primary); 1691 cpu_probe(&cpu_info_primary);
1692#ifdef SVS 1692#ifdef SVS
1693 svs_init(); 1693 svs_init();
1694#endif 1694#endif
1695 cpu_init_msrs(&cpu_info_primary, true); 1695 cpu_init_msrs(&cpu_info_primary, true);
1696#ifndef XEN 1696#ifndef XEN
1697 cpu_speculation_init(&cpu_info_primary); 1697 cpu_speculation_init(&cpu_info_primary);
1698#endif 1698#endif
1699 1699
1700 use_pae = 1; /* PAE always enabled in long mode */ 1700 use_pae = 1; /* PAE always enabled in long mode */
1701 1701
1702 pcb = lwp_getpcb(&lwp0); 1702 pcb = lwp_getpcb(&lwp0);
1703#ifdef XENPV 1703#ifdef XENPV
1704 mutex_init(&pte_lock, MUTEX_DEFAULT, IPL_VM); 1704 mutex_init(&pte_lock, MUTEX_DEFAULT, IPL_VM);
1705 pcb->pcb_cr3 = xen_start_info.pt_base - KERNBASE; 1705 pcb->pcb_cr3 = xen_start_info.pt_base - KERNBASE;
1706#else 1706#else
1707 pcb->pcb_cr3 = PDPpaddr; 1707 pcb->pcb_cr3 = PDPpaddr;
1708#endif 1708#endif
1709 1709
1710#if NISA > 0 || NPCI > 0 1710#if NISA > 0 || NPCI > 0
1711 x86_bus_space_init(); 1711 x86_bus_space_init();
1712#endif 1712#endif
1713 1713
1714 consinit(); /* XXX SHOULD NOT BE DONE HERE */ 1714 consinit(); /* XXX SHOULD NOT BE DONE HERE */
1715 1715
1716 /* 1716 /*
1717 * Initialize PAGE_SIZE-dependent variables. 1717 * Initialize PAGE_SIZE-dependent variables.
1718 */ 1718 */
1719 uvm_md_init(); 1719 uvm_md_init();
1720 1720
1721 uvmexp.ncolors = 2; 1721 uvmexp.ncolors = 2;
1722 1722
1723 avail_start = first_avail; 1723 avail_start = first_avail;
1724 1724
1725#ifndef XENPV 1725#ifndef XENPV
1726 /* 1726 /*
1727 * Low memory reservations: 1727 * Low memory reservations:
1728 * Page 0: BIOS data 1728 * Page 0: BIOS data
1729 * Page 1: BIOS callback (not used yet, for symmetry with i386) 1729 * Page 1: BIOS callback (not used yet, for symmetry with i386)
1730 * Page 2: MP bootstrap code (MP_TRAMPOLINE) 1730 * Page 2: MP bootstrap code (MP_TRAMPOLINE)
1731 * Page 3: ACPI wakeup code (ACPI_WAKEUP_ADDR) 1731 * Page 3: ACPI wakeup code (ACPI_WAKEUP_ADDR)
1732 * Page 4: Temporary page table for 0MB-4MB 1732 * Page 4: Temporary page table for 0MB-4MB
1733 * Page 5: Temporary page directory 1733 * Page 5: Temporary page directory
1734 * Page 6: Temporary page map level 3 1734 * Page 6: Temporary page map level 3
1735 * Page 7: Temporary page map level 4 1735 * Page 7: Temporary page map level 4
1736 */ 1736 */
1737 lowmem_rsvd = 8 * PAGE_SIZE; 1737 lowmem_rsvd = 8 * PAGE_SIZE;
1738 1738
1739 /* Initialize the memory clusters (needed in pmap_bootstrap). */ 1739 /* Initialize the memory clusters (needed in pmap_bootstrap). */
1740 init_x86_clusters(); 1740 init_x86_clusters();
1741#else 1741#else
1742 /* Parse Xen command line (replace bootinfo) */ 1742 /* Parse Xen command line (replace bootinfo) */
1743 xen_parse_cmdline(XEN_PARSE_BOOTFLAGS, NULL); 1743 xen_parse_cmdline(XEN_PARSE_BOOTFLAGS, NULL);
1744 1744
1745 avail_end = ctob(xen_start_info.nr_pages); 1745 avail_end = ctob(xen_start_info.nr_pages);
1746 pmap_pa_start = (KERNTEXTOFF - KERNBASE); 1746 pmap_pa_start = (KERNTEXTOFF - KERNBASE);
1747 pmap_pa_end = avail_end; 1747 pmap_pa_end = avail_end;
1748#endif 1748#endif
1749 1749
1750 /* 1750 /*
1751 * Call pmap initialization to make new kernel address space. 1751 * Call pmap initialization to make new kernel address space.
1752 * We must do this before loading pages into the VM system. 1752 * We must do this before loading pages into the VM system.
1753 */ 1753 */
1754 pmap_bootstrap(VM_MIN_KERNEL_ADDRESS); 1754 pmap_bootstrap(VM_MIN_KERNEL_ADDRESS);
1755 1755
1756#ifndef XENPV 1756#ifndef XENPV
1757 /* Internalize the physical pages into the VM system. */ 1757 /* Internalize the physical pages into the VM system. */
1758 init_x86_vm(avail_start); 1758 init_x86_vm(avail_start);
1759#else 1759#else
1760 physmem = xen_start_info.nr_pages; 1760 physmem = xen_start_info.nr_pages;
1761 uvm_page_physload(atop(avail_start), atop(avail_end), 1761 uvm_page_physload(atop(avail_start), atop(avail_end),
1762 atop(avail_start), atop(avail_end), VM_FREELIST_DEFAULT); 1762 atop(avail_start), atop(avail_end), VM_FREELIST_DEFAULT);
1763#endif 1763#endif
1764 1764
1765 init_x86_msgbuf(); 1765 init_x86_msgbuf();
1766 1766
1767 kasan_init(); 1767 kasan_init();
1768 kcsan_init(); 1768 kcsan_init();
1769 kmsan_init((void *)lwp0uarea); 1769 kmsan_init((void *)lwp0uarea);
1770 1770
1771 pmap_growkernel(VM_MIN_KERNEL_ADDRESS + 32 * 1024 * 1024); 1771 pmap_growkernel(VM_MIN_KERNEL_ADDRESS + 32 * 1024 * 1024);
1772 1772
1773 kpreempt_disable(); 1773 kpreempt_disable();
1774 1774
1775#ifndef XENPV 1775#ifndef XENPV
1776 pmap_kenter_pa(local_apic_va, local_apic_pa, 1776 pmap_kenter_pa(local_apic_va, local_apic_pa,
1777 VM_PROT_READ|VM_PROT_WRITE, 0); 1777 VM_PROT_READ|VM_PROT_WRITE, 0);
1778 pmap_update(pmap_kernel()); 1778 pmap_update(pmap_kernel());
1779 memset((void *)local_apic_va, 0, PAGE_SIZE); 1779 memset((void *)local_apic_va, 0, PAGE_SIZE);
1780#endif 1780#endif
1781 1781
1782 pmap_kenter_pa(idt_vaddr, idt_paddr, VM_PROT_READ|VM_PROT_WRITE, 0); 1782 pmap_kenter_pa(idt_vaddr, idt_paddr, VM_PROT_READ|VM_PROT_WRITE, 0);
1783 pmap_kenter_pa(gdt_vaddr, gdt_paddr, VM_PROT_READ|VM_PROT_WRITE, 0); 1783 pmap_kenter_pa(gdt_vaddr, gdt_paddr, VM_PROT_READ|VM_PROT_WRITE, 0);
1784 pmap_kenter_pa(ldt_vaddr, ldt_paddr, VM_PROT_READ|VM_PROT_WRITE, 0); 1784 pmap_kenter_pa(ldt_vaddr, ldt_paddr, VM_PROT_READ|VM_PROT_WRITE, 0);
1785 pmap_update(pmap_kernel()); 1785 pmap_update(pmap_kernel());
1786 memset((void *)idt_vaddr, 0, PAGE_SIZE); 1786 memset((void *)idt_vaddr, 0, PAGE_SIZE);
1787 memset((void *)gdt_vaddr, 0, PAGE_SIZE); 1787 memset((void *)gdt_vaddr, 0, PAGE_SIZE);
1788 memset((void *)ldt_vaddr, 0, PAGE_SIZE); 1788 memset((void *)ldt_vaddr, 0, PAGE_SIZE);
1789 1789
1790#ifndef XENPV 1790#ifndef XENPV
1791 pmap_changeprot_local(idt_vaddr, VM_PROT_READ); 1791 pmap_changeprot_local(idt_vaddr, VM_PROT_READ);
1792#endif 1792#endif
1793 1793
1794 pmap_update(pmap_kernel()); 1794 pmap_update(pmap_kernel());
1795 1795
1796 idt = (idt_descriptor_t *)idt_vaddr; 1796 idt = (idt_descriptor_t *)idt_vaddr;
1797 gdtstore = (char *)gdt_vaddr; 1797 gdtstore = (char *)gdt_vaddr;
1798 ldtstore = (char *)ldt_vaddr; 1798 ldtstore = (char *)ldt_vaddr;
1799 1799
1800 /* 1800 /*
1801 * Make GDT gates and memory segments. 1801 * Make GDT gates and memory segments.
1802 */ 1802 */
1803 set_mem_segment(GDT_ADDR_MEM(gdtstore, GCODE_SEL), 0, 1803 set_mem_segment(GDT_ADDR_MEM(gdtstore, GCODE_SEL), 0,
1804 0xfffff, SDT_MEMERA, SEL_KPL, 1, 0, 1); 1804 0xfffff, SDT_MEMERA, SEL_KPL, 1, 0, 1);
1805 1805
1806 set_mem_segment(GDT_ADDR_MEM(gdtstore, GDATA_SEL), 0, 1806 set_mem_segment(GDT_ADDR_MEM(gdtstore, GDATA_SEL), 0,
1807 0xfffff, SDT_MEMRWA, SEL_KPL, 1, 0, 1); 1807 0xfffff, SDT_MEMRWA, SEL_KPL, 1, 0, 1);
1808 1808
1809 set_mem_segment(GDT_ADDR_MEM(gdtstore, GUCODE_SEL), 0, 1809 set_mem_segment(GDT_ADDR_MEM(gdtstore, GUCODE_SEL), 0,
1810 x86_btop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMERA, SEL_UPL, 1, 0, 1); 1810 x86_btop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMERA, SEL_UPL, 1, 0, 1);
1811 1811
1812 set_mem_segment(GDT_ADDR_MEM(gdtstore, GUDATA_SEL), 0, 1812 set_mem_segment(GDT_ADDR_MEM(gdtstore, GUDATA_SEL), 0,
1813 x86_btop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMRWA, SEL_UPL, 1, 0, 1); 1813 x86_btop(VM_MAXUSER_ADDRESS) - 1, SDT_MEMRWA, SEL_UPL, 1, 0, 1);
1814 1814
1815#ifndef XENPV 1815#ifndef XENPV
1816 set_sys_segment(GDT_ADDR_SYS(gdtstore, GLDT_SEL), ldtstore, 1816 set_sys_segment(GDT_ADDR_SYS(gdtstore, GLDT_SEL), ldtstore,
1817 LDT_SIZE - 1, SDT_SYSLDT, SEL_KPL, 0); 1817 LDT_SIZE - 1, SDT_SYSLDT, SEL_KPL, 0);
1818#endif 1818#endif
1819 1819
1820 /* 1820 /*
1821 * Make LDT memory segments. 1821 * Make LDT memory segments.
1822 */ 1822 */
1823 *(struct mem_segment_descriptor *)(ldtstore + LUCODE_SEL) = 1823 *(struct mem_segment_descriptor *)(ldtstore + LUCODE_SEL) =
1824 *GDT_ADDR_MEM(gdtstore, GUCODE_SEL); 1824 *GDT_ADDR_MEM(gdtstore, GUCODE_SEL);
1825 *(struct mem_segment_descriptor *)(ldtstore + LUDATA_SEL) = 1825 *(struct mem_segment_descriptor *)(ldtstore + LUDATA_SEL) =
1826 *GDT_ADDR_MEM(gdtstore, GUDATA_SEL); 1826 *GDT_ADDR_MEM(gdtstore, GUDATA_SEL);
1827 1827
1828 /* 1828 /*
1829 * 32 bit GDT entries. 1829 * 32 bit GDT entries.
1830 */ 1830 */
1831 set_mem_segment(GDT_ADDR_MEM(gdtstore, GUCODE32_SEL), 0, 1831 set_mem_segment(GDT_ADDR_MEM(gdtstore, GUCODE32_SEL), 0,
1832 x86_btop(VM_MAXUSER_ADDRESS32) - 1, SDT_MEMERA, SEL_UPL, 1, 1, 0); 1832 x86_btop(VM_MAXUSER_ADDRESS32) - 1, SDT_MEMERA, SEL_UPL, 1, 1, 0);
1833 1833
1834 set_mem_segment(GDT_ADDR_MEM(gdtstore, GUDATA32_SEL), 0, 1834 set_mem_segment(GDT_ADDR_MEM(gdtstore, GUDATA32_SEL), 0,
1835 x86_btop(VM_MAXUSER_ADDRESS32) - 1, SDT_MEMRWA, SEL_UPL, 1, 1, 0); 1835 x86_btop(VM_MAXUSER_ADDRESS32) - 1, SDT_MEMRWA, SEL_UPL, 1, 1, 0);
1836 1836
1837 set_mem_segment(GDT_ADDR_MEM(gdtstore, GUFS_SEL), 0, 1837 set_mem_segment(GDT_ADDR_MEM(gdtstore, GUFS_SEL), 0,
1838 x86_btop(VM_MAXUSER_ADDRESS32) - 1, SDT_MEMRWA, SEL_UPL, 1, 1, 0); 1838 x86_btop(VM_MAXUSER_ADDRESS32) - 1, SDT_MEMRWA, SEL_UPL, 1, 1, 0);
1839 1839
1840 set_mem_segment(GDT_ADDR_MEM(gdtstore, GUGS_SEL), 0, 1840 set_mem_segment(GDT_ADDR_MEM(gdtstore, GUGS_SEL), 0,
1841 x86_btop(VM_MAXUSER_ADDRESS32) - 1, SDT_MEMRWA, SEL_UPL, 1, 1, 0); 1841 x86_btop(VM_MAXUSER_ADDRESS32) - 1, SDT_MEMRWA, SEL_UPL, 1, 1, 0);
1842 1842
1843 /* 1843 /*
1844 * 32 bit LDT entries. 1844 * 32 bit LDT entries.
1845 */ 1845 */
1846 ldt_segp = (struct mem_segment_descriptor *)(ldtstore + LUCODE32_SEL); 1846 ldt_segp = (struct mem_segment_descriptor *)(ldtstore + LUCODE32_SEL);
1847 set_mem_segment(ldt_segp, 0, x86_btop(VM_MAXUSER_ADDRESS32) - 1, 1847 set_mem_segment(ldt_segp, 0, x86_btop(VM_MAXUSER_ADDRESS32) - 1,
1848 SDT_MEMERA, SEL_UPL, 1, 1, 0); 1848 SDT_MEMERA, SEL_UPL, 1, 1, 0);
1849 ldt_segp = (struct mem_segment_descriptor *)(ldtstore + LUDATA32_SEL); 1849 ldt_segp = (struct mem_segment_descriptor *)(ldtstore + LUDATA32_SEL);
1850 set_mem_segment(ldt_segp, 0, x86_btop(VM_MAXUSER_ADDRESS32) - 1, 1850 set_mem_segment(ldt_segp, 0, x86_btop(VM_MAXUSER_ADDRESS32) - 1,
1851 SDT_MEMRWA, SEL_UPL, 1, 1, 0); 1851 SDT_MEMRWA, SEL_UPL, 1, 1, 0);
1852 1852
1853 /* CPU-specific IDT exceptions. */ 1853 /* CPU-specific IDT exceptions. */
1854 for (x = 0; x < NCPUIDT; x++) { 1854 for (x = 0; x < NCPUIDT; x++) {
1855 int sel, ist; 1855 int sel, ist;
1856 1856
1857 /* Reset to default. Special cases below */ 1857 /* Reset to default. Special cases below */
1858 sel = SEL_KPL; 1858 sel = SEL_KPL;
1859 ist = 0; 1859 ist = 0;
1860 1860
1861 idt_vec_reserve(x); 1861 idt_vec_reserve(x);
1862 1862
1863 switch (x) { 1863 switch (x) {
1864 case 1: /* DB */ 1864 case 1: /* DB */
1865 ist = 4; 1865 ist = 4;
1866 break; 1866 break;
1867 case 2: /* NMI */ 1867 case 2: /* NMI */
1868 ist = 3; 1868 ist = 3;
1869 break; 1869 break;
1870 case 3: 1870 case 3:
1871 case 4:  1871 case 4:
1872 sel = SEL_UPL; 1872 sel = SEL_UPL;
1873 break; 1873 break;
1874 case 8: /* double fault */ 1874 case 8: /* double fault */
1875 ist = 2; 1875 ist = 2;
1876 break; 1876 break;
1877#ifdef XENPV  1877#ifdef XENPV
1878 case 18: /* MCA */ 1878 case 18: /* MCA */
1879 sel |= 0x4; /* Auto EOI/mask */ 1879 sel |= 0x4; /* Auto EOI/mask */
1880 break; 1880 break;
1881#endif /* XENPV */  1881#endif /* XENPV */
1882 default: 1882 default:
1883 break; 1883 break;
1884 } 1884 }
1885 1885
1886 set_idtgate(&idt[x], x86_exceptions[x], ist, SDT_SYS386IGT, 1886 set_idtgate(&idt[x], x86_exceptions[x], ist, SDT_SYS386IGT,
1887 sel, GSEL(GCODE_SEL, SEL_KPL)); 1887 sel, GSEL(GCODE_SEL, SEL_KPL));
1888 } 1888 }
1889 1889
1890 /* new-style interrupt gate for syscalls */ 1890 /* new-style interrupt gate for syscalls */
1891 idt_vec_reserve(128); 1891 idt_vec_reserve(128);
1892 set_idtgate(&idt[128], &IDTVEC(osyscall), 0, SDT_SYS386IGT, SEL_UPL, 1892 set_idtgate(&idt[128], &IDTVEC(osyscall), 0, SDT_SYS386IGT, SEL_UPL,
1893 GSEL(GCODE_SEL, SEL_KPL)); 1893 GSEL(GCODE_SEL, SEL_KPL));
1894 1894
1895 kpreempt_enable(); 1895 kpreempt_enable();
1896 1896
1897 setregion(&region, gdtstore, DYNSEL_START - 1); 1897 setregion(&region, gdtstore, DYNSEL_START - 1);
1898 lgdt(&region); 1898 lgdt(&region);
1899 1899
1900#ifdef XENPV 1900#ifdef XENPV
1901 /* Init Xen callbacks and syscall handlers */ 1901 /* Init Xen callbacks and syscall handlers */
1902 if (HYPERVISOR_set_callbacks( 1902 if (HYPERVISOR_set_callbacks(
1903 (unsigned long) hypervisor_callback, 1903 (unsigned long) hypervisor_callback,
1904 (unsigned long) failsafe_callback, 1904 (unsigned long) failsafe_callback,
1905 (unsigned long) Xsyscall)) 1905 (unsigned long) Xsyscall))
1906 panic("HYPERVISOR_set_callbacks() failed"); 1906 panic("HYPERVISOR_set_callbacks() failed");
1907#endif /* XENPV */ 1907#endif /* XENPV */
1908 1908
1909 cpu_init_idt(); 1909 cpu_init_idt();
1910 1910
1911 init_x86_64_ksyms(); 1911 init_x86_64_ksyms();
1912 1912
1913#ifndef XENPV 1913#ifndef XENPV
1914 intr_default_setup(); 1914 intr_default_setup();
1915#else 1915#else
1916 events_default_setup(); 1916 events_default_setup();
1917#endif 1917#endif
1918 1918
1919 splraise(IPL_HIGH); 1919 splraise(IPL_HIGH);
1920 x86_enable_intr(); 1920 x86_enable_intr();
1921 1921
1922#ifdef DDB 1922#ifdef DDB
1923 if (boothowto & RB_KDB) 1923 if (boothowto & RB_KDB)
1924 Debugger(); 1924 Debugger();
1925#endif 1925#endif
1926#ifdef KGDB 1926#ifdef KGDB
1927 kgdb_port_init(); 1927 kgdb_port_init();
1928 if (boothowto & RB_KDB) { 1928 if (boothowto & RB_KDB) {
1929 kgdb_debug_init = 1; 1929 kgdb_debug_init = 1;
1930 kgdb_connect(1); 1930 kgdb_connect(1);
1931 } 1931 }
1932#endif 1932#endif

cvs diff -r1.2 -r1.3 src/sys/compat/common/compat_sysctl_09_43.c (switch to unified diff)

--- src/sys/compat/common/compat_sysctl_09_43.c 2019/01/27 02:08:39 1.2
+++ src/sys/compat/common/compat_sysctl_09_43.c 2019/12/06 08:35:21 1.3
@@ -1,163 +1,163 @@ @@ -1,163 +1,163 @@
1/* $NetBSD: compat_sysctl_09_43.c,v 1.2 2019/01/27 02:08:39 pgoyette Exp $ */ 1/* $NetBSD: compat_sysctl_09_43.c,v 1.3 2019/12/06 08:35:21 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1989, 1993 4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc. 6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed 7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph 8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc. 10 * the permission of UNIX System Laboratories, Inc.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
15 * 1. Redistributions of source code must retain the above copyright 15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer. 16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright 17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the 18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution. 19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors 20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software 21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission. 22 * without specific prior written permission.
23 * 23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE. 34 * SUCH DAMAGE.
35 * 35 *
36 * @(#)vfs_syscalls.c 8.28 (Berkeley) 12/10/94 36 * @(#)vfs_syscalls.c 8.28 (Berkeley) 12/10/94
37 */ 37 */
38 38
39#include <sys/cdefs.h> 39#include <sys/cdefs.h>
40__KERNEL_RCSID(0, "$NetBSD: compat_sysctl_09_43.c,v 1.2 2019/01/27 02:08:39 pgoyette Exp $"); 40__KERNEL_RCSID(0, "$NetBSD: compat_sysctl_09_43.c,v 1.3 2019/12/06 08:35:21 maxv Exp $");
41 41
42#if defined(_KERNEL_OPT) 42#if defined(_KERNEL_OPT)
43#include "opt_compat_netbsd.h" 43#include "opt_compat_netbsd.h"
44#endif 44#endif
45 45
46#include <sys/param.h> 46#include <sys/param.h>
47#include <sys/systm.h> 47#include <sys/systm.h>
48#include <sys/filedesc.h> 48#include <sys/filedesc.h>
49#include <sys/kernel.h> 49#include <sys/kernel.h>
50#include <sys/proc.h> 50#include <sys/proc.h>
51#include <sys/malloc.h> 51#include <sys/malloc.h>
52#include <sys/sysctl.h> 52#include <sys/sysctl.h>
53#include <sys/module.h> 53#include <sys/module.h>
54 54
55#include <sys/mount.h> 55#include <sys/mount.h>
56#include <sys/syscall.h> 56#include <sys/syscall.h>
57#include <sys/syscallvar.h> 57#include <sys/syscallvar.h>
58#include <sys/syscallargs.h> 58#include <sys/syscallargs.h>
59#include <sys/vfs_syscalls.h> 59#include <sys/vfs_syscalls.h>
60 60
61#include <compat/sys/mount.h> 61#include <compat/sys/mount.h>
62#include <compat/common/compat_mod.h> 62#include <compat/common/compat_mod.h>
63 63
64/* 64/*
65 * sysctl helper routine for vfs.generic.conf lookups. 65 * sysctl helper routine for vfs.generic.conf lookups.
66 */ 66 */
67#if defined(COMPAT_09) || defined(COMPAT_43) || defined(COMPAT_44) 67#if defined(COMPAT_09) || defined(COMPAT_43) || defined(COMPAT_44)
68 68
69static int 69static int
70sysctl_vfs_generic_conf(SYSCTLFN_ARGS) 70sysctl_vfs_generic_conf(SYSCTLFN_ARGS)
71{ 71{
72 struct vfsconf vfc; 72 struct vfsconf vfc;
73 struct sysctlnode node; 73 struct sysctlnode node;
74 struct vfsops *vfsp; 74 struct vfsops *vfsp;
75 u_int vfsnum; 75 u_int vfsnum;
76 76
77 if (namelen != 1) 77 if (namelen != 1)
78 return (ENOTDIR); 78 return (ENOTDIR);
79 vfsnum = name[0]; 79 vfsnum = name[0];
80 if (vfsnum >= nmountcompatnames || 80 if (vfsnum >= nmountcompatnames ||
81 mountcompatnames[vfsnum] == NULL) 81 mountcompatnames[vfsnum] == NULL)
82 return (EOPNOTSUPP); 82 return (EOPNOTSUPP);
83 vfsp = vfs_getopsbyname(mountcompatnames[vfsnum]); 83 vfsp = vfs_getopsbyname(mountcompatnames[vfsnum]);
84 if (vfsp == NULL) 84 if (vfsp == NULL)
85 return (EOPNOTSUPP); 85 return (EOPNOTSUPP);
86 86
87 vfc.vfc_vfsops = vfsp; 87 vfc.vfc_vfsops = vfsp;
88 strncpy(vfc.vfc_name, vfsp->vfs_name, sizeof(vfc.vfc_name)); 88 strncpy(vfc.vfc_name, vfsp->vfs_name, sizeof(vfc.vfc_name));
89 vfc.vfc_typenum = vfsnum; 89 vfc.vfc_typenum = vfsnum;
90 vfc.vfc_refcount = vfsp->vfs_refcount; 90 vfc.vfc_refcount = vfsp->vfs_refcount;
91 vfc.vfc_flags = 0; 91 vfc.vfc_flags = 0;
92 vfc.vfc_mountroot = vfsp->vfs_mountroot; 92 vfc.vfc_mountroot = vfsp->vfs_mountroot;
93 vfc.vfc_next = NULL; 93 vfc.vfc_next = NULL;
94 vfs_delref(vfsp); 94 vfs_delref(vfsp);
95 95
96 node = *rnode; 96 node = *rnode;
97 node.sysctl_data = &vfc; 97 node.sysctl_data = &vfc;
98 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 98 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
99} 99}
100 100
101/* 101/*
102 * Top level filesystem related information gathering. 102 * Top level filesystem related information gathering.
103 */ 103 */
104static int 104static int
105compat_sysctl_vfs(struct sysctllog **clog) 105compat_sysctl_vfs(struct sysctllog **clog)
106{ 106{
107 int error; 107 int error;
108 108
109 error = sysctl_createv(clog, 0, NULL, NULL, 109 error = sysctl_createv(clog, 0, NULL, NULL,
110 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE, 110 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
111 CTLTYPE_INT, "maxtypenum", 111 CTLTYPE_INT, "maxtypenum",
112 SYSCTL_DESCR("Highest valid filesystem type number"), 112 SYSCTL_DESCR("Highest valid filesystem type number"),
113 NULL, nmountcompatnames, NULL, 0, 113 NULL, nmountcompatnames, NULL, 0,
114 CTL_VFS, VFS_GENERIC, VFS_MAXTYPENUM, CTL_EOL); 114 CTL_VFS, VFS_GENERIC, VFS_MAXTYPENUM, CTL_EOL);
115 if (error == EEXIST) 115 if (error == EEXIST)
116 error = 0; 116 error = 0;
117 if (error != 0) 117 if (error != 0)
118 return error; 118 return error;
119 119
120 error = sysctl_createv(clog, 0, NULL, NULL, 120 error = sysctl_createv(clog, 0, NULL, NULL,
121 CTLFLAG_PERMANENT, 121 CTLFLAG_PERMANENT,
122 CTLTYPE_STRUCT, "conf", 122 CTLTYPE_STRUCT, "conf",
123 SYSCTL_DESCR("Filesystem configuration information"), 123 SYSCTL_DESCR("Filesystem configuration information"),
124 sysctl_vfs_generic_conf, 0, NULL, 124 sysctl_vfs_generic_conf, 0, NULL,
125 sizeof(struct vfsconf), 125 sizeof(struct vfsconf),
126 CTL_VFS, VFS_GENERIC, VFS_CONF, CTL_EOL); 126 CTL_VFS, VFS_GENERIC, VFS_CONF, CTL_EOL);
127 127
128 return error; 128 return error;
129} 129}
130#endif 130#endif
131 131
132static struct sysctllog *clog = NULL; 132static struct sysctllog *compat_09_43_clog = NULL;
133 133
134int 134int
135compat_sysctl_09_43_init(void) 135compat_sysctl_09_43_init(void)
136{ 136{
137 137
138 return compat_sysctl_vfs(&clog); 138 return compat_sysctl_vfs(&compat_09_43_clog);
139} 139}
140 140
141int 141int
142compat_sysctl_09_43_fini(void) 142compat_sysctl_09_43_fini(void)
143{ 143{
144 144
145 sysctl_teardown(&clog); 145 sysctl_teardown(&compat_09_43_clog);
146 return 0; 146 return 0;
147} 147}
148 148
149MODULE (MODULE_CLASS_EXEC, compat_sysctl_09_43, NULL); 149MODULE (MODULE_CLASS_EXEC, compat_sysctl_09_43, NULL);
150 150
151static int 151static int
152compat_sysctl_09_43_modcmd(modcmd_t cmd, void *arg) 152compat_sysctl_09_43_modcmd(modcmd_t cmd, void *arg)
153{ 153{
154 154
155 switch (cmd) { 155 switch (cmd) {
156 case MODULE_CMD_INIT: 156 case MODULE_CMD_INIT:
157 return compat_sysctl_09_43_init(); 157 return compat_sysctl_09_43_init();
158 case MODULE_CMD_FINI: 158 case MODULE_CMD_FINI:
159 return compat_sysctl_09_43_fini(); 159 return compat_sysctl_09_43_fini();
160 default: 160 default:
161 return ENOTTY; 161 return ENOTTY;
162 } 162 }
163} 163}

cvs diff -r1.41 -r1.42 src/sys/kern/tty_subr.c (switch to unified diff)

--- src/sys/kern/tty_subr.c 2017/06/01 02:45:13 1.41
+++ src/sys/kern/tty_subr.c 2019/12/06 08:35:21 1.42
@@ -1,498 +1,498 @@ @@ -1,498 +1,498 @@
1/* $NetBSD: tty_subr.c,v 1.41 2017/06/01 02:45:13 chs Exp $ */ 1/* $NetBSD: tty_subr.c,v 1.42 2019/12/06 08:35:21 maxv Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1993, 1994 Theo de Raadt 4 * Copyright (c) 1993, 1994 Theo de Raadt
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Per Lindqvist <pgd@compuram.bbt.se> supplied an almost fully working 7 * Per Lindqvist <pgd@compuram.bbt.se> supplied an almost fully working
8 * set of true clist functions that this is very loosely based on. 8 * set of true clist functions that this is very loosely based on.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */ 29 */
30 30
31#include <sys/cdefs.h> 31#include <sys/cdefs.h>
32__KERNEL_RCSID(0, "$NetBSD: tty_subr.c,v 1.41 2017/06/01 02:45:13 chs Exp $"); 32__KERNEL_RCSID(0, "$NetBSD: tty_subr.c,v 1.42 2019/12/06 08:35:21 maxv Exp $");
33 33
34#include <sys/param.h> 34#include <sys/param.h>
35#include <sys/systm.h> 35#include <sys/systm.h>
36#include <sys/buf.h> 36#include <sys/buf.h>
37#include <sys/ioctl.h> 37#include <sys/ioctl.h>
38#include <sys/tty.h> 38#include <sys/tty.h>
39#include <sys/kmem.h> 39#include <sys/kmem.h>
40 40
41/* 41/*
42 * At compile time, choose: 42 * At compile time, choose:
43 * There are two ways the TTY_QUOTE bit can be stored. If QBITS is 43 * There are two ways the TTY_QUOTE bit can be stored. If QBITS is
44 * defined we allocate an array of bits -- 1/8th as much memory but 44 * defined we allocate an array of bits -- 1/8th as much memory but
45 * setbit(), clrbit(), and isset() take more CPU. If QBITS is 45 * setbit(), clrbit(), and isset() take more CPU. If QBITS is
46 * undefined, we just use an array of bytes. 46 * undefined, we just use an array of bytes.
47 * 47 *
48 * If TTY_QUOTE functionality isn't required by a line discipline, 48 * If TTY_QUOTE functionality isn't required by a line discipline,
49 * it can free c_cq and set it to NULL. This speeds things up, 49 * it can free c_cq and set it to NULL. This speeds things up,
50 * and also does not use any extra memory. This is useful for (say) 50 * and also does not use any extra memory. This is useful for (say)
51 * a SLIP line discipline that wants a 32K ring buffer for data 51 * a SLIP line discipline that wants a 32K ring buffer for data
52 * but doesn't need quoting. 52 * but doesn't need quoting.
53 */ 53 */
54#define QBITS 54#define QBITS
55 55
56#ifdef QBITS 56#ifdef QBITS
57#define QMEM(n) ((((n)-1)/NBBY)+1) 57#define QMEM(n) ((((n)-1)/NBBY)+1)
58#else 58#else
59#define QMEM(n) (n) 59#define QMEM(n) (n)
60#endif 60#endif
61 61
62#ifdef QBITS 62#ifdef QBITS
63static void clrbits(u_char *, unsigned int, unsigned int); 63static void clrbits(u_char *, unsigned int, unsigned int);
64#endif 64#endif
65 65
66/* 66/*
67 * Initialize a particular clist. Ok, they are really ring buffers, 67 * Initialize a particular clist. Ok, they are really ring buffers,
68 * of the specified length, with/without quoting support. 68 * of the specified length, with/without quoting support.
69 */ 69 */
70int 70int
71clalloc(struct clist *clp, int size, int quot) 71clalloc(struct clist *clp, int size, int quot)
72{ 72{
73 73
74 clp->c_cs = kmem_zalloc(size, KM_SLEEP); 74 clp->c_cs = kmem_zalloc(size, KM_SLEEP);
75 if (quot) 75 if (quot)
76 clp->c_cq = kmem_zalloc(QMEM(size), KM_SLEEP); 76 clp->c_cq = kmem_zalloc(QMEM(size), KM_SLEEP);
77 else 77 else
78 clp->c_cq = NULL; 78 clp->c_cq = NULL;
79 79
80 clp->c_cf = clp->c_cl = NULL; 80 clp->c_cf = clp->c_cl = NULL;
81 clp->c_ce = clp->c_cs + size; 81 clp->c_ce = clp->c_cs + size;
82 clp->c_cn = size; 82 clp->c_cn = size;
83 clp->c_cc = 0; 83 clp->c_cc = 0;
84 84
85 return (0); 85 return (0);
86} 86}
87 87
88void 88void
89clfree(struct clist *clp) 89clfree(struct clist *clp)
90{ 90{
91 if (clp->c_cs) 91 if (clp->c_cs)
92 kmem_free(clp->c_cs, clp->c_cn); 92 kmem_free(clp->c_cs, clp->c_cn);
93 if (clp->c_cq) 93 if (clp->c_cq)
94 kmem_free(clp->c_cq, QMEM(clp->c_cn)); 94 kmem_free(clp->c_cq, QMEM(clp->c_cn));
95 clp->c_cs = clp->c_cq = NULL; 95 clp->c_cs = clp->c_cq = NULL;
96} 96}
97 97
98/* 98/*
99 * Get a character from a clist. 99 * Get a character from a clist.
100 */ 100 */
101int 101int
102getc(struct clist *clp) 102getc(struct clist *clp)
103{ 103{
104 int c = -1; 104 int c = -1;
105 int s; 105 int s;
106 106
107 s = spltty(); 107 s = spltty();
108 if (clp->c_cc == 0) 108 if (clp->c_cc == 0)
109 goto out; 109 goto out;
110 110
111 c = *clp->c_cf & 0xff; 111 c = *clp->c_cf & 0xff;
112 if (clp->c_cq) { 112 if (clp->c_cq) {
113#ifdef QBITS 113#ifdef QBITS
114 if (isset(clp->c_cq, clp->c_cf - clp->c_cs) ) 114 if (isset(clp->c_cq, clp->c_cf - clp->c_cs) )
115 c |= TTY_QUOTE; 115 c |= TTY_QUOTE;
116#else 116#else
117 if (*(clp->c_cf - clp->c_cs + clp->c_cq)) 117 if (*(clp->c_cf - clp->c_cs + clp->c_cq))
118 c |= TTY_QUOTE; 118 c |= TTY_QUOTE;
119#endif 119#endif
120 } 120 }
121 *clp->c_cf = 0; /* wipe out to avoid information disclosure */ 121 *clp->c_cf = 0; /* wipe out to avoid information disclosure */
122 if (++clp->c_cf == clp->c_ce) 122 if (++clp->c_cf == clp->c_ce)
123 clp->c_cf = clp->c_cs; 123 clp->c_cf = clp->c_cs;
124 if (--clp->c_cc == 0) 124 if (--clp->c_cc == 0)
125 clp->c_cf = clp->c_cl = (u_char *)0; 125 clp->c_cf = clp->c_cl = (u_char *)0;
126out: 126out:
127 splx(s); 127 splx(s);
128 return c; 128 return c;
129} 129}
130 130
131/* 131/*
132 * Copy clist to buffer. 132 * Copy clist to buffer.
133 * Return number of bytes moved. 133 * Return number of bytes moved.
134 */ 134 */
135int 135int
136q_to_b(struct clist *clp, u_char *cp, int count) 136q_to_b(struct clist *clp, u_char *cp, int count)
137{ 137{
138 int cc; 138 int cc;
139 u_char *p = cp; 139 u_char *p = cp;
140 int s; 140 int s;
141 141
142 s = spltty(); 142 s = spltty();
143 /* optimize this while loop */ 143 /* optimize this while loop */
144 while (count > 0 && clp->c_cc > 0) { 144 while (count > 0 && clp->c_cc > 0) {
145 cc = clp->c_cl - clp->c_cf; 145 cc = clp->c_cl - clp->c_cf;
146 if (clp->c_cf >= clp->c_cl) 146 if (clp->c_cf >= clp->c_cl)
147 cc = clp->c_ce - clp->c_cf; 147 cc = clp->c_ce - clp->c_cf;
148 if (cc > count) 148 if (cc > count)
149 cc = count; 149 cc = count;
150 memcpy(p, clp->c_cf, cc); 150 memcpy(p, clp->c_cf, cc);
151 count -= cc; 151 count -= cc;
152 p += cc; 152 p += cc;
153 clp->c_cc -= cc; 153 clp->c_cc -= cc;
154 clp->c_cf += cc; 154 clp->c_cf += cc;
155 if (clp->c_cf == clp->c_ce) 155 if (clp->c_cf == clp->c_ce)
156 clp->c_cf = clp->c_cs; 156 clp->c_cf = clp->c_cs;
157 } 157 }
158 if (clp->c_cc == 0) 158 if (clp->c_cc == 0)
159 clp->c_cf = clp->c_cl = (u_char *)0; 159 clp->c_cf = clp->c_cl = (u_char *)0;
160 splx(s); 160 splx(s);
161 return p - cp; 161 return p - cp;
162} 162}
163 163
164/* 164/*
165 * Return count of contiguous characters in clist. 165 * Return count of contiguous characters in clist.
166 * Stop counting if flag&character is non-null. 166 * Stop counting if flag&character is non-null.
167 */ 167 */
168int 168int
169ndqb(struct clist *clp, int flag) 169ndqb(struct clist *clp, int flag)
170{ 170{
171 int count = 0; 171 int count = 0;
172 int i; 172 int i;
173 int cc; 173 int cc;
174 int s; 174 int s;
175 175
176 s = spltty(); 176 s = spltty();
177 if ((cc = clp->c_cc) == 0) 177 if ((cc = clp->c_cc) == 0)
178 goto out; 178 goto out;
179 179
180 if (flag == 0) { 180 if (flag == 0) {
181 count = clp->c_cl - clp->c_cf; 181 count = clp->c_cl - clp->c_cf;
182 if (count <= 0) 182 if (count <= 0)
183 count = clp->c_ce - clp->c_cf; 183 count = clp->c_ce - clp->c_cf;
184 goto out; 184 goto out;
185 } 185 }
186 186
187 i = clp->c_cf - clp->c_cs; 187 i = clp->c_cf - clp->c_cs;
188 if (flag & TTY_QUOTE) { 188 if (flag & TTY_QUOTE) {
189 while (cc-- > 0 && !(clp->c_cs[i++] & (flag & ~TTY_QUOTE) || 189 while (cc-- > 0 && !(clp->c_cs[i++] & (flag & ~TTY_QUOTE) ||
190 isset(clp->c_cq, i))) { 190 isset(clp->c_cq, i))) {
191 count++; 191 count++;
192 if (i == clp->c_cn) 192 if (i == clp->c_cn)
193 break; 193 break;
194 } 194 }
195 } else { 195 } else {
196 while (cc-- > 0 && !(clp->c_cs[i++] & flag)) { 196 while (cc-- > 0 && !(clp->c_cs[i++] & flag)) {
197 count++; 197 count++;
198 if (i == clp->c_cn) 198 if (i == clp->c_cn)
199 break; 199 break;
200 } 200 }
201 } 201 }
202out: 202out:
203 splx(s); 203 splx(s);
204 return count; 204 return count;
205} 205}
206 206
207/* 207/*
208 * Flush count bytes from clist. 208 * Flush count bytes from clist.
209 */ 209 */
210void 210void
211ndflush(struct clist *clp, int count) 211ndflush(struct clist *clp, int count)
212{ 212{
213 int cc; 213 int cc;
214 int s; 214 int s;
215 215
216 s = spltty(); 216 s = spltty();
217 if (count == clp->c_cc) { 217 if (count == clp->c_cc) {
218 clp->c_cc = 0; 218 clp->c_cc = 0;
219 clp->c_cf = clp->c_cl = (u_char *)0; 219 clp->c_cf = clp->c_cl = (u_char *)0;
220 goto out; 220 goto out;
221 } 221 }
222 /* optimize this while loop */ 222 /* optimize this while loop */
223 while (count > 0 && clp->c_cc > 0) { 223 while (count > 0 && clp->c_cc > 0) {
224 cc = clp->c_cl - clp->c_cf; 224 cc = clp->c_cl - clp->c_cf;
225 if (clp->c_cf >= clp->c_cl) 225 if (clp->c_cf >= clp->c_cl)
226 cc = clp->c_ce - clp->c_cf; 226 cc = clp->c_ce - clp->c_cf;
227 if (cc > count) 227 if (cc > count)
228 cc = count; 228 cc = count;
229 count -= cc; 229 count -= cc;
230 clp->c_cc -= cc; 230 clp->c_cc -= cc;
231 clp->c_cf += cc; 231 clp->c_cf += cc;
232 if (clp->c_cf == clp->c_ce) 232 if (clp->c_cf == clp->c_ce)
233 clp->c_cf = clp->c_cs; 233 clp->c_cf = clp->c_cs;
234 } 234 }
235 if (clp->c_cc == 0) 235 if (clp->c_cc == 0)
236 clp->c_cf = clp->c_cl = (u_char *)0; 236 clp->c_cf = clp->c_cl = (u_char *)0;
237out: 237out:
238 splx(s); 238 splx(s);
239} 239}
240 240
241/* 241/*
242 * Put a character into the output queue. 242 * Put a character into the output queue.
243 */ 243 */
244int 244int
245putc(int c, struct clist *clp) 245putc(int c, struct clist *clp)
246{ 246{
247 int i; 247 int i;
248 int s; 248 int s;
249 249
250 s = spltty(); 250 s = spltty();
251 if (clp->c_cc == clp->c_cn) 251 if (clp->c_cc == clp->c_cn)
252 goto out; 252 goto out;
253 253
254 if (clp->c_cc == 0) { 254 if (clp->c_cc == 0) {
255 if (!clp->c_cs) { 255 if (!clp->c_cs) {
256#if defined(DIAGNOSTIC) || 1 256#if defined(DIAGNOSTIC) || 1
257 printf("putc: required clalloc\n"); 257 printf("putc: required clalloc\n");
258#endif 258#endif
259 if (clalloc(clp, clp->c_cn, 1)) { 259 if (clalloc(clp, clp->c_cn, 1)) {
260out: 260out:
261 splx(s); 261 splx(s);
262 return -1; 262 return -1;
263 } 263 }
264 } 264 }
265 clp->c_cf = clp->c_cl = clp->c_cs; 265 clp->c_cf = clp->c_cl = clp->c_cs;
266 } 266 }
267 267
268 *clp->c_cl = c & 0xff; 268 *clp->c_cl = c & 0xff;
269 i = clp->c_cl - clp->c_cs; 269 i = clp->c_cl - clp->c_cs;
270 if (clp->c_cq) { 270 if (clp->c_cq) {
271#ifdef QBITS 271#ifdef QBITS
272 if (c & TTY_QUOTE) 272 if (c & TTY_QUOTE)
273 setbit(clp->c_cq, i); 273 setbit(clp->c_cq, i);
274 else 274 else
275 clrbit(clp->c_cq, i); 275 clrbit(clp->c_cq, i);
276#else 276#else
277 q = clp->c_cq + i; 277 q = clp->c_cq + i;
278 *q = (c & TTY_QUOTE) ? 1 : 0; 278 *q = (c & TTY_QUOTE) ? 1 : 0;
279#endif 279#endif
280 } 280 }
281 clp->c_cc++; 281 clp->c_cc++;
282 clp->c_cl++; 282 clp->c_cl++;
283 if (clp->c_cl == clp->c_ce) 283 if (clp->c_cl == clp->c_ce)
284 clp->c_cl = clp->c_cs; 284 clp->c_cl = clp->c_cs;
285 splx(s); 285 splx(s);
286 return 0; 286 return 0;
287} 287}
288 288
289#ifdef QBITS 289#ifdef QBITS
290/* 290/*
291 * optimized version of 291 * optimized version of
292 * 292 *
293 * for (i = 0; i < len; i++) 293 * for (i = 0; i < len; i++)
294 * clrbit(cp, off + len); 294 * clrbit(cp, off + len);
295 */ 295 */
296static void 296static void
297clrbits(u_char *cp, unsigned int off, unsigned int len) 297clrbits(u_char *cp, unsigned int off, unsigned int len)
298{ 298{
299 unsigned int sbi, ebi; 299 unsigned int sbi, ebi;
300 u_char *scp, *ecp; 300 u_char *scp, *ecp;
301 unsigned int end; 301 unsigned int end;
302 unsigned char mask; 302 unsigned char mask;
303 303
304 scp = cp + off / NBBY; 304 scp = cp + off / NBBY;
305 sbi = off % NBBY; 305 sbi = off % NBBY;
306 end = off + len + NBBY - 1; 306 end = off + len + NBBY - 1;
307 ecp = cp + end / NBBY - 1; 307 ecp = cp + end / NBBY - 1;
308 ebi = end % NBBY + 1; 308 ebi = end % NBBY + 1;
309 if (scp >= ecp) { 309 if (scp >= ecp) {
310 mask = ((1 << len) - 1) << sbi; 310 mask = ((1 << len) - 1) << sbi;
311 *scp &= ~mask; 311 *scp &= ~mask;
312 } else { 312 } else {
313 mask = (1 << sbi) - 1; 313 mask = (1 << sbi) - 1;
314 *scp++ &= mask; 314 *scp++ &= mask;
315 315
316 mask = (1 << ebi) - 1; 316 mask = (1 << ebi) - 1;
317 *ecp &= ~mask; 317 *ecp &= ~mask;
318 318
319 while (scp < ecp) 319 while (scp < ecp)
320 *scp++ = 0x00; 320 *scp++ = 0x00;
321 } 321 }
322} 322}
323#endif 323#endif
324 324
325/* 325/*
326 * Copy buffer to clist. 326 * Copy buffer to clist.
327 * Return number of bytes not transfered. 327 * Return number of bytes not transfered.
328 */ 328 */
329int 329int
330b_to_q(const u_char *cp, int count, struct clist *clp) 330b_to_q(const u_char *cp, int count, struct clist *clp)
331{ 331{
332 int cc; 332 int cc;
333 const u_char *p = cp; 333 const u_char *p = cp;
334 int s; 334 int s;
335 335
336 if (count <= 0) 336 if (count <= 0)
337 return 0; 337 return 0;
338 338
339 s = spltty(); 339 s = spltty();
340 if (clp->c_cc == clp->c_cn) 340 if (clp->c_cc == clp->c_cn)
341 goto out; 341 goto out;
342 342
343 if (clp->c_cc == 0) { 343 if (clp->c_cc == 0) {
344 if (!clp->c_cs) { 344 if (!clp->c_cs) {
345#if defined(DIAGNOSTIC) || 1 345#if defined(DIAGNOSTIC) || 1
346 printf("b_to_q: required clalloc\n"); 346 printf("b_to_q: required clalloc\n");
347#endif 347#endif
348 if (clalloc(clp, clp->c_cn, 1)) 348 if (clalloc(clp, clp->c_cn, 1))
349 goto out; 349 goto out;
350 } 350 }
351 clp->c_cf = clp->c_cl = clp->c_cs; 351 clp->c_cf = clp->c_cl = clp->c_cs;
352 } 352 }
353 353
354 /* optimize this while loop */ 354 /* optimize this while loop */
355 while (count > 0 && clp->c_cc < clp->c_cn) { 355 while (count > 0 && clp->c_cc < clp->c_cn) {
356 cc = clp->c_ce - clp->c_cl; 356 cc = clp->c_ce - clp->c_cl;
357 if (clp->c_cf > clp->c_cl) 357 if (clp->c_cf > clp->c_cl)
358 cc = clp->c_cf - clp->c_cl; 358 cc = clp->c_cf - clp->c_cl;
359 if (cc > count) 359 if (cc > count)
360 cc = count; 360 cc = count;
361 memcpy(clp->c_cl, p, cc); 361 memcpy(clp->c_cl, p, cc);
362 if (clp->c_cq) { 362 if (clp->c_cq) {
363#ifdef QBITS 363#ifdef QBITS
364 clrbits(clp->c_cq, clp->c_cl - clp->c_cs, cc); 364 clrbits(clp->c_cq, clp->c_cl - clp->c_cs, cc);
365#else 365#else
366 memset(clp->c_cl - clp->c_cs + clp->c_cq, 0, cc); 366 memset(clp->c_cl - clp->c_cs + clp->c_cq, 0, cc);
367#endif 367#endif
368 } 368 }
369 p += cc; 369 p += cc;
370 count -= cc; 370 count -= cc;
371 clp->c_cc += cc; 371 clp->c_cc += cc;
372 clp->c_cl += cc; 372 clp->c_cl += cc;
373 if (clp->c_cl == clp->c_ce) 373 if (clp->c_cl == clp->c_ce)
374 clp->c_cl = clp->c_cs; 374 clp->c_cl = clp->c_cs;
375 } 375 }
376out: 376out:
377 splx(s); 377 splx(s);
378 return count; 378 return count;
379} 379}
380 380
381static int cc; 381static int tty_global_cc;
382 382
383/* 383/*
384 * Given a non-NULL pointer into the clist return the pointer 384 * Given a non-NULL pointer into the clist return the pointer
385 * to the next character in the list or return NULL if no more chars. 385 * to the next character in the list or return NULL if no more chars.
386 * 386 *
387 * Callers must not allow getc's to happen between firstc's and getc's 387 * Callers must not allow getc's to happen between firstc's and getc's
388 * so that the pointer becomes invalid. Note that interrupts are NOT 388 * so that the pointer becomes invalid. Note that interrupts are NOT
389 * masked. 389 * masked.
390 */ 390 */
391u_char * 391u_char *
392nextc(struct clist *clp, u_char *cp, int *c) 392nextc(struct clist *clp, u_char *cp, int *c)
393{ 393{
394 394
395 if (clp->c_cf == cp) { 395 if (clp->c_cf == cp) {
396 /* 396 /*
397 * First time initialization. 397 * First time initialization.
398 */ 398 */
399 cc = clp->c_cc; 399 tty_global_cc = clp->c_cc;
400 } 400 }
401 if (cc == 0 || cp == NULL) 401 if (tty_global_cc == 0 || cp == NULL)
402 return NULL; 402 return NULL;
403 if (--cc == 0) 403 if (--tty_global_cc == 0)
404 return NULL; 404 return NULL;
405 if (++cp == clp->c_ce) 405 if (++cp == clp->c_ce)
406 cp = clp->c_cs; 406 cp = clp->c_cs;
407 *c = *cp & 0xff; 407 *c = *cp & 0xff;
408 if (clp->c_cq) { 408 if (clp->c_cq) {
409#ifdef QBITS 409#ifdef QBITS
410 if (isset(clp->c_cq, cp - clp->c_cs)) 410 if (isset(clp->c_cq, cp - clp->c_cs))
411 *c |= TTY_QUOTE; 411 *c |= TTY_QUOTE;
412#else 412#else
413 if (*(clp->c_cf - clp->c_cs + clp->c_cq)) 413 if (*(clp->c_cf - clp->c_cs + clp->c_cq))
414 *c |= TTY_QUOTE; 414 *c |= TTY_QUOTE;
415#endif 415#endif
416 } 416 }
417 return cp; 417 return cp;
418} 418}
419 419
420/* 420/*
421 * Given a non-NULL pointer into the clist return the pointer 421 * Given a non-NULL pointer into the clist return the pointer
422 * to the first character in the list or return NULL if no more chars. 422 * to the first character in the list or return NULL if no more chars.
423 * 423 *
424 * Callers must not allow getc's to happen between firstc's and getc's 424 * Callers must not allow getc's to happen between firstc's and getc's
425 * so that the pointer becomes invalid. Note that interrupts are NOT 425 * so that the pointer becomes invalid. Note that interrupts are NOT
426 * masked. 426 * masked.
427 * 427 *
428 * *c is set to the NEXT character 428 * *c is set to the NEXT character
429 */ 429 */
430u_char * 430u_char *
431firstc(struct clist *clp, int *c) 431firstc(struct clist *clp, int *c)
432{ 432{
433 u_char *cp; 433 u_char *cp;
434 434
435 cc = clp->c_cc; 435 tty_global_cc = clp->c_cc;
436 if (cc == 0) 436 if (tty_global_cc == 0)
437 return NULL; 437 return NULL;
438 cp = clp->c_cf; 438 cp = clp->c_cf;
439 *c = *cp & 0xff; 439 *c = *cp & 0xff;
440 if (clp->c_cq) { 440 if (clp->c_cq) {
441#ifdef QBITS 441#ifdef QBITS
442 if (isset(clp->c_cq, cp - clp->c_cs)) 442 if (isset(clp->c_cq, cp - clp->c_cs))
443 *c |= TTY_QUOTE; 443 *c |= TTY_QUOTE;
444#else 444#else
445 if (*(cp - clp->c_cs + clp->c_cq)) 445 if (*(cp - clp->c_cs + clp->c_cq))
446 *c |= TTY_QUOTE; 446 *c |= TTY_QUOTE;
447#endif 447#endif
448 } 448 }
449 return clp->c_cf; 449 return clp->c_cf;
450} 450}
451 451
452/* 452/*
453 * Remove the last character in the clist and return it. 453 * Remove the last character in the clist and return it.
454 */ 454 */
455int 455int
456unputc(struct clist *clp) 456unputc(struct clist *clp)
457{ 457{
458 unsigned int c = -1; 458 unsigned int c = -1;
459 int s; 459 int s;
460 460
461 s = spltty(); 461 s = spltty();
462 if (clp->c_cc == 0) 462 if (clp->c_cc == 0)
463 goto out; 463 goto out;
464 464
465 if (clp->c_cl == clp->c_cs) 465 if (clp->c_cl == clp->c_cs)
466 clp->c_cl = clp->c_ce - 1; 466 clp->c_cl = clp->c_ce - 1;
467 else 467 else
468 --clp->c_cl; 468 --clp->c_cl;
469 clp->c_cc--; 469 clp->c_cc--;
470 470
471 c = *clp->c_cl & 0xff; 471 c = *clp->c_cl & 0xff;
472 if (clp->c_cq) { 472 if (clp->c_cq) {
473#ifdef QBITS 473#ifdef QBITS
474 if (isset(clp->c_cq, clp->c_cl - clp->c_cs)) 474 if (isset(clp->c_cq, clp->c_cl - clp->c_cs))
475 c |= TTY_QUOTE; 475 c |= TTY_QUOTE;
476#else 476#else
477 if (*(clp->c_cf - clp->c_cs + clp->c_cq)) 477 if (*(clp->c_cf - clp->c_cs + clp->c_cq))
478 c |= TTY_QUOTE; 478 c |= TTY_QUOTE;
479#endif 479#endif
480 } 480 }
481 if (clp->c_cc == 0) 481 if (clp->c_cc == 0)
482 clp->c_cf = clp->c_cl = (u_char *)0; 482 clp->c_cf = clp->c_cl = (u_char *)0;
483out: 483out:
484 splx(s); 484 splx(s);
485 return c; 485 return c;
486} 486}
487 487
488/* 488/*
489 * Put the chars in the from queue on the end of the to queue. 489 * Put the chars in the from queue on the end of the to queue.
490 */ 490 */
491void 491void
492catq(struct clist *from, struct clist *to) 492catq(struct clist *from, struct clist *to)
493{ 493{
494 int c; 494 int c;
495 495
496 while ((c = getc(from)) != -1) 496 while ((c = getc(from)) != -1)
497 putc(c, to); 497 putc(c, to);
498} 498}