Sat Apr 18 15:06:18 2020 UTC ()
Add PVHVM multiprocessor support:
We need the hypervisor to be set up before cpus attaches.
Move hypervisor setup to a new function xen_hvm_init(), called at the
beggining of mainbus_attach(). This function searches the cfdata[] array
to see if the hypervisor device is enabled (so you can disable PV
support with
disable hypervisor
from userconf).
For HVM, ci_cpuid doens't match the virtual CPU index needed by Xen.
Introduce ci_vcpuid to cpu_info. Introduce xen_hvm_init_cpu(), to be
called for each CPU in in its context, which initialize ci_vcpuid and
ci_vcpu, and setup the event callback.
Change Xen code to use ci_vcpuid.

Do not call lapic_calibrate_timer() for VM_GUEST_XENPVHVM, we will use
Xen timers.

Don't call lapic_initclocks() from cpu_hatch(); instead set
x86_cpu_initclock_func to lapic_initclocks() in lapic_calibrate_timer(),
and call *(x86_cpu_initclock_func)() from cpu_hatch().
Also call x86_cpu_initclock_func from cpu_attach() for the boot CPU.
As x86_cpu_initclock_func is called for all CPUs, x86_initclock_func can
be a NOP for lapic timer.

Reorganize Xen code for x86_initclock_func/x86_cpu_initclock_func.
Move x86_cpu_idle_xen() to hypervisor_machdep.c


(bouyer)
diff -r1.117.4.5 -r1.117.4.6 src/sys/arch/x86/include/cpu.h
diff -r1.51 -r1.51.10.1 src/sys/arch/x86/include/cpuvar.h
diff -r1.181.4.2 -r1.181.4.3 src/sys/arch/x86/x86/cpu.c
diff -r1.3.12.2 -r1.3.12.3 src/sys/arch/x86/x86/mainbus.c
diff -r1.49.10.2 -r1.49.10.3 src/sys/arch/xen/include/hypervisor.h
diff -r1.44 -r1.44.8.1 src/sys/arch/xen/include/xen.h
diff -r1.133 -r1.133.4.1 src/sys/arch/xen/x86/cpu.c
diff -r1.36.8.3 -r1.36.8.4 src/sys/arch/xen/x86/hypervisor_machdep.c
diff -r1.35.6.3 -r1.35.6.4 src/sys/arch/xen/x86/xen_ipi.c
diff -r1.6.12.2 -r1.6.12.3 src/sys/arch/xen/x86/xen_mainbus.c
diff -r1.88.2.4 -r1.88.2.5 src/sys/arch/xen/xen/evtchn.c
diff -r1.73.2.5 -r1.73.2.6 src/sys/arch/xen/xen/hypervisor.c
diff -r1.1.2.2 -r1.1.2.3 src/sys/arch/xen/xen/xen_clock.c

cvs diff -r1.117.4.5 -r1.117.4.6 src/sys/arch/x86/include/cpu.h (expand / switch to unified diff)

--- src/sys/arch/x86/include/cpu.h 2020/04/16 17:44:54 1.117.4.5
+++ src/sys/arch/x86/include/cpu.h 2020/04/18 15:06:18 1.117.4.6
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpu.h,v 1.117.4.5 2020/04/16 17:44:54 bouyer Exp $ */ 1/* $NetBSD: cpu.h,v 1.117.4.6 2020/04/18 15:06:18 bouyer Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1990 The Regents of the University of California. 4 * Copyright (c) 1990 The Regents of the University of California.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to Berkeley by 7 * This code is derived from software contributed to Berkeley by
8 * William Jolitz. 8 * William Jolitz.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -213,26 +213,27 @@ struct cpu_info { @@ -213,26 +213,27 @@ struct cpu_info {
213 * stored to regularly by remote CPUs; when they were mixed with 213 * stored to regularly by remote CPUs; when they were mixed with
214 * other fields we observed frequent cache misses. 214 * other fields we observed frequent cache misses.
215 */ 215 */
216 int ci_want_resched __aligned(64); 216 int ci_want_resched __aligned(64);
217 uint32_t ci_ipis; /* interprocessor interrupts pending */ 217 uint32_t ci_ipis; /* interprocessor interrupts pending */
218 218
219 /* 219 /*
220 * These are largely static, and will be frequently fetched by other 220 * These are largely static, and will be frequently fetched by other
221 * CPUs. For that reason they get their own cache line, too. 221 * CPUs. For that reason they get their own cache line, too.
222 */ 222 */
223 uint32_t ci_flags __aligned(64);/* general flags */ 223 uint32_t ci_flags __aligned(64);/* general flags */
224 uint32_t ci_acpiid; /* our ACPI/MADT ID */ 224 uint32_t ci_acpiid; /* our ACPI/MADT ID */
225 uint32_t ci_initapicid; /* our initial APIC ID */ 225 uint32_t ci_initapicid; /* our initial APIC ID */
 226 uint32_t ci_vcpuid; /* our CPU id for hypervisor */
226 cpuid_t ci_cpuid; /* our CPU ID */ 227 cpuid_t ci_cpuid; /* our CPU ID */
227 struct cpu_info *ci_next; /* next cpu */ 228 struct cpu_info *ci_next; /* next cpu */
228 229
229 /* 230 /*
230 * This is stored frequently, and is fetched by remote CPUs. 231 * This is stored frequently, and is fetched by remote CPUs.
231 */ 232 */
232 struct lwp *ci_curlwp __aligned(64);/* general flags */ 233 struct lwp *ci_curlwp __aligned(64);/* general flags */
233 struct lwp *ci_onproc; /* current user LWP / kthread */ 234 struct lwp *ci_onproc; /* current user LWP / kthread */
234 235
235 /* Here ends the cachline-aligned sections. */ 236 /* Here ends the cachline-aligned sections. */
236 int ci_padout __aligned(64); 237 int ci_padout __aligned(64);
237 238
238#ifndef __HAVE_DIRECT_MAP 239#ifndef __HAVE_DIRECT_MAP
@@ -520,26 +521,27 @@ void x86_cpu_topology(struct cpu_info *) @@ -520,26 +521,27 @@ void x86_cpu_topology(struct cpu_info *)
520struct region_descriptor; 521struct region_descriptor;
521void lgdt(struct region_descriptor *); 522void lgdt(struct region_descriptor *);
522#ifdef XENPV 523#ifdef XENPV
523void lgdt_finish(void); 524void lgdt_finish(void);
524#endif 525#endif
525 526
526struct pcb; 527struct pcb;
527void savectx(struct pcb *); 528void savectx(struct pcb *);
528void lwp_trampoline(void); 529void lwp_trampoline(void);
529#ifdef XEN 530#ifdef XEN
530void xen_startrtclock(void); 531void xen_startrtclock(void);
531void xen_delay(unsigned int); 532void xen_delay(unsigned int);
532void xen_initclocks(void); 533void xen_initclocks(void);
 534void xen_cpu_initclocks(void);
533void xen_suspendclocks(struct cpu_info *); 535void xen_suspendclocks(struct cpu_info *);
534void xen_resumeclocks(struct cpu_info *); 536void xen_resumeclocks(struct cpu_info *);
535#endif /* XEN */ 537#endif /* XEN */
536/* clock.c */ 538/* clock.c */
537void initrtclock(u_long); 539void initrtclock(u_long);
538void startrtclock(void); 540void startrtclock(void);
539void i8254_delay(unsigned int); 541void i8254_delay(unsigned int);
540void i8254_microtime(struct timeval *); 542void i8254_microtime(struct timeval *);
541void i8254_initclocks(void); 543void i8254_initclocks(void);
542unsigned int gettick(void); 544unsigned int gettick(void);
543extern void (*x86_delay)(unsigned int); 545extern void (*x86_delay)(unsigned int);
544 546
545/* cpu.c */ 547/* cpu.c */

cvs diff -r1.51 -r1.51.10.1 src/sys/arch/x86/include/cpuvar.h (expand / switch to unified diff)

--- src/sys/arch/x86/include/cpuvar.h 2019/02/11 14:59:32 1.51
+++ src/sys/arch/x86/include/cpuvar.h 2020/04/18 15:06:18 1.51.10.1
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpuvar.h,v 1.51 2019/02/11 14:59:32 cherry Exp $ */ 1/* $NetBSD: cpuvar.h,v 1.51.10.1 2020/04/18 15:06:18 bouyer Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2000, 2007 The NetBSD Foundation, Inc. 4 * Copyright (c) 2000, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by RedBack Networks Inc. 8 * by RedBack Networks Inc.
9 * 9 *
10 * Author: Bill Sommerfeld 10 * Author: Bill Sommerfeld
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
@@ -89,43 +89,44 @@ struct cpu_attach_args { @@ -89,43 +89,44 @@ struct cpu_attach_args {
89 int cpu_role; 89 int cpu_role;
90 const struct cpu_functions *cpu_func; 90 const struct cpu_functions *cpu_func;
91}; 91};
92 92
93struct cpufeature_attach_args { 93struct cpufeature_attach_args {
94 struct cpu_info *ci; 94 struct cpu_info *ci;
95 const char *name; 95 const char *name;
96}; 96};
97 97
98#ifdef _KERNEL 98#ifdef _KERNEL
99#include <sys/kcpuset.h> 99#include <sys/kcpuset.h>
100#if defined(_KERNEL_OPT) 100#if defined(_KERNEL_OPT)
101#include "opt_multiprocessor.h" 101#include "opt_multiprocessor.h"
 102#include "opt_xen.h"
102#endif /* defined(_KERNEL_OPT) */ 103#endif /* defined(_KERNEL_OPT) */
103 104
104extern int (*x86_ipi)(int, int, int); 105extern int (*x86_ipi)(int, int, int);
105int x86_ipi_init(int); 106int x86_ipi_init(int);
106int x86_ipi_startup(int, int); 107int x86_ipi_startup(int, int);
107void x86_errata(void); 108void x86_errata(void);
108 109
109void identifycpu(struct cpu_info *); 110void identifycpu(struct cpu_info *);
110void identifycpu_cpuids(struct cpu_info *); 111void identifycpu_cpuids(struct cpu_info *);
111void cpu_init(struct cpu_info *); 112void cpu_init(struct cpu_info *);
112void cpu_init_tss(struct cpu_info *); 113void cpu_init_tss(struct cpu_info *);
113void cpu_init_first(void); 114void cpu_init_first(void);
114 115
115void x86_cpu_idle_init(void); 116void x86_cpu_idle_init(void);
116void x86_cpu_idle_halt(void); 117void x86_cpu_idle_halt(void);
117void x86_cpu_idle_mwait(void); 118void x86_cpu_idle_mwait(void);
118#ifdef XENPV 119#ifdef XEN
119void x86_cpu_idle_xen(void); 120void x86_cpu_idle_xen(void);
120#endif 121#endif
121 122
122void cpu_get_tsc_freq(struct cpu_info *); 123void cpu_get_tsc_freq(struct cpu_info *);
123void pat_init(struct cpu_info *); 124void pat_init(struct cpu_info *);
124 125
125extern int cpu_vendor; 126extern int cpu_vendor;
126extern bool x86_mp_online; 127extern bool x86_mp_online;
127 128
128extern uint32_t cpu_feature[7]; 129extern uint32_t cpu_feature[7];
129 130
130#endif /* _KERNEL */ 131#endif /* _KERNEL */
131 132

cvs diff -r1.181.4.2 -r1.181.4.3 src/sys/arch/x86/x86/cpu.c (expand / switch to unified diff)

--- src/sys/arch/x86/x86/cpu.c 2020/04/16 09:45:56 1.181.4.2
+++ src/sys/arch/x86/x86/cpu.c 2020/04/18 15:06:18 1.181.4.3
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpu.c,v 1.181.4.2 2020/04/16 09:45:56 bouyer Exp $ */ 1/* $NetBSD: cpu.c,v 1.181.4.3 2020/04/18 15:06:18 bouyer Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2000-2012 NetBSD Foundation, Inc. 4 * Copyright (c) 2000-2012 NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Bill Sommerfeld of RedBack Networks Inc, and by Andrew Doran. 8 * by Bill Sommerfeld of RedBack Networks Inc, and by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -52,53 +52,54 @@ @@ -52,53 +52,54 @@
52 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE 54 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE. 61 * SUCH DAMAGE.
62 */ 62 */
63 63
64#include <sys/cdefs.h> 64#include <sys/cdefs.h>
65__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.181.4.2 2020/04/16 09:45:56 bouyer Exp $"); 65__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.181.4.3 2020/04/18 15:06:18 bouyer Exp $");
66 66
67#include "opt_ddb.h" 67#include "opt_ddb.h"
68#include "opt_mpbios.h" /* for MPDEBUG */ 68#include "opt_mpbios.h" /* for MPDEBUG */
69#include "opt_mtrr.h" 69#include "opt_mtrr.h"
70#include "opt_multiprocessor.h" 70#include "opt_multiprocessor.h"
71#include "opt_svs.h" 71#include "opt_svs.h"
72 72
73#include "lapic.h" 73#include "lapic.h"
74#include "ioapic.h" 74#include "ioapic.h"
75#include "acpica.h" 75#include "acpica.h"
76 76
77#include <sys/param.h> 77#include <sys/param.h>
78#include <sys/proc.h> 78#include <sys/proc.h>
79#include <sys/systm.h> 79#include <sys/systm.h>
80#include <sys/device.h> 80#include <sys/device.h>
81#include <sys/cpu.h> 81#include <sys/cpu.h>
82#include <sys/cpufreq.h> 82#include <sys/cpufreq.h>
83#include <sys/idle.h> 83#include <sys/idle.h>
84#include <sys/atomic.h> 84#include <sys/atomic.h>
85#include <sys/reboot.h> 85#include <sys/reboot.h>
86#include <sys/csan.h> 86#include <sys/csan.h>
87 87
88#include <uvm/uvm.h> 88#include <uvm/uvm.h>
89 89
90#include "acpica.h" /* for NACPICA, for mp_verbose */ 90#include "acpica.h" /* for NACPICA, for mp_verbose */
91 91
 92#include <x86/machdep.h>
92#include <machine/cpufunc.h> 93#include <machine/cpufunc.h>
93#include <machine/cpuvar.h> 94#include <machine/cpuvar.h>
94#include <machine/pmap.h> 95#include <machine/pmap.h>
95#include <machine/vmparam.h> 96#include <machine/vmparam.h>
96#if defined(MULTIPROCESSOR) 97#if defined(MULTIPROCESSOR)
97#include <machine/mpbiosvar.h> 98#include <machine/mpbiosvar.h>
98#endif 99#endif
99#include <machine/mpconfig.h> /* for mp_verbose */ 100#include <machine/mpconfig.h> /* for mp_verbose */
100#include <machine/pcb.h> 101#include <machine/pcb.h>
101#include <machine/specialreg.h> 102#include <machine/specialreg.h>
102#include <machine/segments.h> 103#include <machine/segments.h>
103#include <machine/gdt.h> 104#include <machine/gdt.h>
104#include <machine/mtrr.h> 105#include <machine/mtrr.h>
@@ -120,26 +121,30 @@ __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.18 @@ -120,26 +121,30 @@ __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.18
120#include <dev/ic/mc146818reg.h> 121#include <dev/ic/mc146818reg.h>
121#include <i386/isa/nvram.h> 122#include <i386/isa/nvram.h>
122#include <dev/isa/isareg.h> 123#include <dev/isa/isareg.h>
123 124
124#include "tsc.h" 125#include "tsc.h"
125 126
126#ifndef XENPV 127#ifndef XENPV
127#include "hyperv.h" 128#include "hyperv.h"
128#if NHYPERV > 0 129#if NHYPERV > 0
129#include <x86/x86/hypervvar.h> 130#include <x86/x86/hypervvar.h>
130#endif 131#endif
131#endif 132#endif
132 133
 134#ifdef XEN
 135#include <xen/hypervisor.h>
 136#endif
 137
133static int cpu_match(device_t, cfdata_t, void *); 138static int cpu_match(device_t, cfdata_t, void *);
134static void cpu_attach(device_t, device_t, void *); 139static void cpu_attach(device_t, device_t, void *);
135static void cpu_defer(device_t); 140static void cpu_defer(device_t);
136static int cpu_rescan(device_t, const char *, const int *); 141static int cpu_rescan(device_t, const char *, const int *);
137static void cpu_childdetached(device_t, device_t); 142static void cpu_childdetached(device_t, device_t);
138static bool cpu_stop(device_t); 143static bool cpu_stop(device_t);
139static bool cpu_suspend(device_t, const pmf_qual_t *); 144static bool cpu_suspend(device_t, const pmf_qual_t *);
140static bool cpu_resume(device_t, const pmf_qual_t *); 145static bool cpu_resume(device_t, const pmf_qual_t *);
141static bool cpu_shutdown(device_t, int); 146static bool cpu_shutdown(device_t, int);
142 147
143struct cpu_softc { 148struct cpu_softc {
144 device_t sc_dev; /* device tree glue */ 149 device_t sc_dev; /* device tree glue */
145 struct cpu_info *sc_info; /* pointer to CPU info */ 150 struct cpu_info *sc_info; /* pointer to CPU info */
@@ -432,50 +437,59 @@ cpu_attach(device_t parent, device_t sel @@ -432,50 +437,59 @@ cpu_attach(device_t parent, device_t sel
432 /* Basic init. */ 437 /* Basic init. */
433 cpu_intr_init(ci); 438 cpu_intr_init(ci);
434 cpu_get_tsc_freq(ci); 439 cpu_get_tsc_freq(ci);
435 cpu_init(ci); 440 cpu_init(ci);
436#ifdef i386 441#ifdef i386
437 cpu_set_tss_gates(ci); 442 cpu_set_tss_gates(ci);
438#endif 443#endif
439 pmap_cpu_init_late(ci); 444 pmap_cpu_init_late(ci);
440#if NLAPIC > 0 445#if NLAPIC > 0
441 if (caa->cpu_role != CPU_ROLE_SP) { 446 if (caa->cpu_role != CPU_ROLE_SP) {
442 /* Enable lapic. */ 447 /* Enable lapic. */
443 lapic_enable(); 448 lapic_enable();
444 lapic_set_lvt(); 449 lapic_set_lvt();
445 lapic_calibrate_timer(ci); 450 if (vm_guest != VM_GUEST_XENPVHVM)
 451 lapic_calibrate_timer(ci);
446 } 452 }
447#endif 453#endif
448 /* Make sure DELAY() is initialized. */ 454 /* Make sure DELAY() is initialized. */
449 DELAY(1); 455 DELAY(1);
450 kcsan_cpu_init(ci); 456 kcsan_cpu_init(ci);
451 again = true; 457 again = true;
452 } 458 }
453 459
454 /* further PCB init done later. */ 460 /* further PCB init done later. */
455 461
456 switch (caa->cpu_role) { 462 switch (caa->cpu_role) {
457 case CPU_ROLE_SP: 463 case CPU_ROLE_SP:
458 atomic_or_32(&ci->ci_flags, CPUF_SP); 464 atomic_or_32(&ci->ci_flags, CPUF_SP);
459 cpu_identify(ci); 465 cpu_identify(ci);
460 x86_errata(); 466 x86_errata();
461 x86_cpu_idle_init(); 467 x86_cpu_idle_init();
 468 (*x86_cpu_initclock_func)();
 469#ifdef XENPVHVM
 470 xen_hvm_init_cpu(ci);
 471#endif
462 break; 472 break;
463 473
464 case CPU_ROLE_BP: 474 case CPU_ROLE_BP:
465 atomic_or_32(&ci->ci_flags, CPUF_BSP); 475 atomic_or_32(&ci->ci_flags, CPUF_BSP);
466 cpu_identify(ci); 476 cpu_identify(ci);
467 x86_errata(); 477 x86_errata();
468 x86_cpu_idle_init(); 478 x86_cpu_idle_init();
 479#ifdef XENPVHVM
 480 xen_hvm_init_cpu(ci);
 481#endif
 482 (*x86_cpu_initclock_func)();
469 break; 483 break;
470 484
471#ifdef MULTIPROCESSOR 485#ifdef MULTIPROCESSOR
472 case CPU_ROLE_AP: 486 case CPU_ROLE_AP:
473 /* 487 /*
474 * report on an AP 488 * report on an AP
475 */ 489 */
476 cpu_intr_init(ci); 490 cpu_intr_init(ci);
477 gdt_alloc_cpu(ci); 491 gdt_alloc_cpu(ci);
478#ifdef i386 492#ifdef i386
479 cpu_set_tss_gates(ci); 493 cpu_set_tss_gates(ci);
480#endif 494#endif
481 pmap_cpu_init_late(ci); 495 pmap_cpu_init_late(ci);
@@ -961,39 +975,42 @@ cpu_hatch(void *v) @@ -961,39 +975,42 @@ cpu_hatch(void *v)
961 lcr3(pmap_pdirpa(pmap_kernel(), 0)); 975 lcr3(pmap_pdirpa(pmap_kernel(), 0));
962#endif 976#endif
963 977
964 pcb = lwp_getpcb(curlwp); 978 pcb = lwp_getpcb(curlwp);
965 pcb->pcb_cr3 = rcr3(); 979 pcb->pcb_cr3 = rcr3();
966 pcb = lwp_getpcb(ci->ci_data.cpu_idlelwp); 980 pcb = lwp_getpcb(ci->ci_data.cpu_idlelwp);
967 lcr0(pcb->pcb_cr0); 981 lcr0(pcb->pcb_cr0);
968 982
969 cpu_init_idt(); 983 cpu_init_idt();
970 gdt_init_cpu(ci); 984 gdt_init_cpu(ci);
971#if NLAPIC > 0 985#if NLAPIC > 0
972 lapic_enable(); 986 lapic_enable();
973 lapic_set_lvt(); 987 lapic_set_lvt();
974 lapic_initclocks(); 
975#endif 988#endif
976 989
977 fpuinit(ci); 990 fpuinit(ci);
978 lldt(GSYSSEL(GLDT_SEL, SEL_KPL)); 991 lldt(GSYSSEL(GLDT_SEL, SEL_KPL));
979 ltr(ci->ci_tss_sel); 992 ltr(ci->ci_tss_sel);
980 993
981 /* 994 /*
982 * cpu_init will re-synchronize the TSC, and will detect any abnormal 995 * cpu_init will re-synchronize the TSC, and will detect any abnormal
983 * drift that would have been caused by the use of MONITOR/MWAIT 996 * drift that would have been caused by the use of MONITOR/MWAIT
984 * above. 997 * above.
985 */ 998 */
986 cpu_init(ci); 999 cpu_init(ci);
 1000#ifdef XENPVHVM
 1001 xen_hvm_init_cpu(ci);
 1002#endif
 1003 (*x86_cpu_initclock_func)();
987 cpu_get_tsc_freq(ci); 1004 cpu_get_tsc_freq(ci);
988 1005
989 s = splhigh(); 1006 s = splhigh();
990#if NLAPIC > 0 1007#if NLAPIC > 0
991 lapic_write_tpri(0); 1008 lapic_write_tpri(0);
992#endif 1009#endif
993 x86_enable_intr(); 1010 x86_enable_intr();
994 splx(s); 1011 splx(s);
995 x86_errata(); 1012 x86_errata();
996 1013
997 aprint_debug_dev(ci->ci_dev, "running\n"); 1014 aprint_debug_dev(ci->ci_dev, "running\n");
998 1015
999 kcsan_cpu_init(ci); 1016 kcsan_cpu_init(ci);

cvs diff -r1.3.12.2 -r1.3.12.3 src/sys/arch/x86/x86/mainbus.c (expand / switch to unified diff)

--- src/sys/arch/x86/x86/mainbus.c 2020/04/16 08:46:35 1.3.12.2
+++ src/sys/arch/x86/x86/mainbus.c 2020/04/18 15:06:18 1.3.12.3
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: mainbus.c,v 1.3.12.2 2020/04/16 08:46:35 bouyer Exp $ */ 1/* $NetBSD: mainbus.c,v 1.3.12.3 2020/04/18 15:06:18 bouyer Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2018 The NetBSD Foundation, Inc. 4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -18,27 +18,27 @@ @@ -18,27 +18,27 @@
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE. 26 * POSSIBILITY OF SUCH DAMAGE.
27 */ 27 */
28 28
29#include <sys/cdefs.h> 29#include <sys/cdefs.h>
30 30
31__KERNEL_RCSID(0, "$NetBSD: mainbus.c,v 1.3.12.2 2020/04/16 08:46:35 bouyer Exp $"); 31__KERNEL_RCSID(0, "$NetBSD: mainbus.c,v 1.3.12.3 2020/04/18 15:06:18 bouyer Exp $");
32 32
33#include <sys/param.h> 33#include <sys/param.h>
34#include <sys/systm.h> 34#include <sys/systm.h>
35#include <sys/device.h> 35#include <sys/device.h>
36#include <sys/reboot.h> 36#include <sys/reboot.h>
37 37
38#include <dev/pci/pcivar.h> 38#include <dev/pci/pcivar.h>
39 39
40#include <machine/cpuvar.h> 40#include <machine/cpuvar.h>
41#include <machine/mpbiosvar.h> 41#include <machine/mpbiosvar.h>
42#include <machine/mpacpi.h> 42#include <machine/mpacpi.h>
43#include <xen/hypervisor.h> 43#include <xen/hypervisor.h>
44 44
@@ -207,35 +207,43 @@ int @@ -207,35 +207,43 @@ int
207mainbus_match(device_t parent, cfdata_t match, void *aux) 207mainbus_match(device_t parent, cfdata_t match, void *aux)
208{ 208{
209 209
210 return 1; 210 return 1;
211} 211}
212 212
213void 213void
214mainbus_attach(device_t parent, device_t self, void *aux) 214mainbus_attach(device_t parent, device_t self, void *aux)
215{ 215{
216 216
217 aprint_naive("\n"); 217 aprint_naive("\n");
218 aprint_normal("\n"); 218 aprint_normal("\n");
219 219
 220#if defined(XENPVHVM)
 221 xen_hvm_init(); /* before attaching CPUs */
 222#endif
 223
220#if defined(XENPV) 224#if defined(XENPV)
221 if (xendomain_is_dom0()) { 225 if (xendomain_is_dom0()) {
222#endif /* XENPV */ 226#endif /* XENPV */
223 x86_cpubus_attach(self); 227 x86_cpubus_attach(self);
224 228
225#if defined(XENPV) 229#if defined(XENPV)
226 } 230 }
227#endif /* XENPV */ 231#endif /* XENPV */
228#if defined(XEN) 232#if defined(XEN)
 233 /*
 234 * before isa/pci probe, so that PV devices are not probed again
 235 * as emulated
 236 */
229 xen_mainbus_attach(parent, self, aux); 237 xen_mainbus_attach(parent, self, aux);
230#endif 238#endif
231#if defined(__i386__) && !defined(XENPV) 239#if defined(__i386__) && !defined(XENPV)
232 i386_mainbus_attach(parent, self, aux); 240 i386_mainbus_attach(parent, self, aux);
233#elif defined(__x86_64__) && !defined(XENPV) 241#elif defined(__x86_64__) && !defined(XENPV)
234 amd64_mainbus_attach(parent, self, aux); 242 amd64_mainbus_attach(parent, self, aux);
235#endif 243#endif
236} 244}
237 245
238int 246int
239mainbus_rescan(device_t self, const char *ifattr, const int *locators) 247mainbus_rescan(device_t self, const char *ifattr, const int *locators)
240{ 248{
241#if defined(__i386__) && !defined(XEN) 249#if defined(__i386__) && !defined(XEN)

cvs diff -r1.49.10.2 -r1.49.10.3 src/sys/arch/xen/include/hypervisor.h (expand / switch to unified diff)

--- src/sys/arch/xen/include/hypervisor.h 2020/04/16 08:46:35 1.49.10.2
+++ src/sys/arch/xen/include/hypervisor.h 2020/04/18 15:06:18 1.49.10.3
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: hypervisor.h,v 1.49.10.2 2020/04/16 08:46:35 bouyer Exp $ */ 1/* $NetBSD: hypervisor.h,v 1.49.10.3 2020/04/18 15:06:18 bouyer Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 Manuel Bouyer. 4 * Copyright (c) 2006 Manuel Bouyer.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 14 *
@@ -48,26 +48,30 @@ @@ -48,26 +48,30 @@
48 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 48 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
49 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 49 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
50 * IN THE SOFTWARE. 50 * IN THE SOFTWARE.
51 */ 51 */
52 52
53 53
54#ifndef _XEN_HYPERVISOR_H_ 54#ifndef _XEN_HYPERVISOR_H_
55#define _XEN_HYPERVISOR_H_ 55#define _XEN_HYPERVISOR_H_
56 56
57#include "opt_xen.h" 57#include "opt_xen.h"
58#include "isa.h" 58#include "isa.h"
59#include "pci.h" 59#include "pci.h"
60 60
 61struct cpu_info;
 62
 63int xen_hvm_init(void);
 64int xen_hvm_init_cpu(struct cpu_info *);
61void xen_mainbus_attach(device_t, device_t, void *); 65void xen_mainbus_attach(device_t, device_t, void *);
62 66
63struct hypervisor_attach_args { 67struct hypervisor_attach_args {
64 const char *haa_busname; 68 const char *haa_busname;
65}; 69};
66 70
67struct xencons_attach_args { 71struct xencons_attach_args {
68 const char *xa_device; 72 const char *xa_device;
69}; 73};
70 74
71struct xen_npx_attach_args { 75struct xen_npx_attach_args {
72 const char *xa_device; 76 const char *xa_device;
73}; 77};

cvs diff -r1.44 -r1.44.8.1 src/sys/arch/xen/include/xen.h (expand / switch to unified diff)

--- src/sys/arch/xen/include/xen.h 2019/05/09 17:09:50 1.44
+++ src/sys/arch/xen/include/xen.h 2020/04/18 15:06:18 1.44.8.1
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: xen.h,v 1.44 2019/05/09 17:09:50 bouyer Exp $ */ 1/* $NetBSD: xen.h,v 1.44.8.1 2020/04/18 15:06:18 bouyer Exp $ */
2 2
3/* 3/*
4 * 4 *
5 * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team) 5 * Copyright (c) 2003, 2004 Keir Fraser (on behalf of the Xen team)
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy 8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to 9 * of this software and associated documentation files (the "Software"), to
10 * deal in the Software without restriction, including without limitation the 10 * deal in the Software without restriction, including without limitation the
11 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 11 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
12 * sell copies of the Software, and to permit persons to whom the Software is 12 * sell copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions: 13 * furnished to do so, subject to the following conditions:
14 *  14 *
@@ -60,28 +60,26 @@ void xen_parse_cmdline(int, union xen_cm @@ -60,28 +60,26 @@ void xen_parse_cmdline(int, union xen_cm
60 60
61void xenconscn_attach(void); 61void xenconscn_attach(void);
62 62
63void xenprivcmd_init(void); 63void xenprivcmd_init(void);
64 64
65void xbdback_init(void); 65void xbdback_init(void);
66void xennetback_init(void); 66void xennetback_init(void);
67void xen_shm_init(void); 67void xen_shm_init(void);
68 68
69void xenevt_event(int); 69void xenevt_event(int);
70void xenevt_setipending(int, int); 70void xenevt_setipending(int, int);
71void xenevt_notify(void); 71void xenevt_notify(void);
72 72
73void idle_block(void); 
74 
75/* xen_machdep.c */ 73/* xen_machdep.c */
76void sysctl_xen_suspend_setup(void); 74void sysctl_xen_suspend_setup(void);
77 75
78#include <sys/stdarg.h> 76#include <sys/stdarg.h>
79void printk(const char *, ...); 77void printk(const char *, ...);
80 78
81#endif 79#endif
82 80
83#endif /* _XEN_H */ 81#endif /* _XEN_H */
84 82
85/****************************************************************************** 83/******************************************************************************
86 * os.h 84 * os.h
87 *  85 *

cvs diff -r1.133 -r1.133.4.1 src/sys/arch/xen/x86/cpu.c (expand / switch to unified diff)

--- src/sys/arch/xen/x86/cpu.c 2020/02/24 12:20:29 1.133
+++ src/sys/arch/xen/x86/cpu.c 2020/04/18 15:06:18 1.133.4.1
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cpu.c,v 1.133 2020/02/24 12:20:29 rin Exp $ */ 1/* $NetBSD: cpu.c,v 1.133.4.1 2020/04/18 15:06:18 bouyer Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2000 The NetBSD Foundation, Inc. 4 * Copyright (c) 2000 The NetBSD Foundation, Inc.
5 * Copyright (c) 2002, 2006, 2007 YAMAMOTO Takashi, 5 * Copyright (c) 2002, 2006, 2007 YAMAMOTO Takashi,
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by RedBack Networks Inc. 9 * by RedBack Networks Inc.
10 * 10 *
11 * Author: Bill Sommerfeld 11 * Author: Bill Sommerfeld
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions 14 * modification, are permitted provided that the following conditions
@@ -55,27 +55,27 @@ @@ -55,27 +55,27 @@
55 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 55 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE. 64 * SUCH DAMAGE.
65 */ 65 */
66 66
67#include <sys/cdefs.h> 67#include <sys/cdefs.h>
68__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.133 2020/02/24 12:20:29 rin Exp $"); 68__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.133.4.1 2020/04/18 15:06:18 bouyer Exp $");
69 69
70#include "opt_ddb.h" 70#include "opt_ddb.h"
71#include "opt_multiprocessor.h" 71#include "opt_multiprocessor.h"
72#include "opt_mpbios.h" /* for MPDEBUG */ 72#include "opt_mpbios.h" /* for MPDEBUG */
73#include "opt_mtrr.h" 73#include "opt_mtrr.h"
74#include "opt_xen.h" 74#include "opt_xen.h"
75 75
76#include "lapic.h" 76#include "lapic.h"
77#include "ioapic.h" 77#include "ioapic.h"
78 78
79#include <sys/param.h> 79#include <sys/param.h>
80#include <sys/proc.h> 80#include <sys/proc.h>
81#include <sys/systm.h> 81#include <sys/systm.h>
@@ -307,27 +307,27 @@ vcpu_attach(device_t parent, device_t se @@ -307,27 +307,27 @@ vcpu_attach(device_t parent, device_t se
307 307
308 KASSERT(vcaa->vcaa_caa.cpu_func == NULL); 308 KASSERT(vcaa->vcaa_caa.cpu_func == NULL);
309 vcaa->vcaa_caa.cpu_func = &mp_cpu_funcs; 309 vcaa->vcaa_caa.cpu_func = &mp_cpu_funcs;
310 cpu_attach_common(parent, self, &vcaa->vcaa_caa); 310 cpu_attach_common(parent, self, &vcaa->vcaa_caa);
311 311
312 if (!pmf_device_register(self, NULL, NULL)) 312 if (!pmf_device_register(self, NULL, NULL))
313 aprint_error_dev(self, "couldn't establish power handler\n"); 313 aprint_error_dev(self, "couldn't establish power handler\n");
314} 314}
315 315
316static int 316static int
317vcpu_is_up(struct cpu_info *ci) 317vcpu_is_up(struct cpu_info *ci)
318{ 318{
319 KASSERT(ci != NULL); 319 KASSERT(ci != NULL);
320 return HYPERVISOR_vcpu_op(VCPUOP_is_up, ci->ci_cpuid, NULL); 320 return HYPERVISOR_vcpu_op(VCPUOP_is_up, ci->ci_vcpuid, NULL);
321} 321}
322 322
323static void 323static void
324cpu_vm_init(struct cpu_info *ci) 324cpu_vm_init(struct cpu_info *ci)
325{ 325{
326 int ncolors = 2, i; 326 int ncolors = 2, i;
327 327
328 for (i = CAI_ICACHE; i <= CAI_L2CACHE; i++) { 328 for (i = CAI_ICACHE; i <= CAI_L2CACHE; i++) {
329 struct x86_cache_info *cai; 329 struct x86_cache_info *cai;
330 int tcolors; 330 int tcolors;
331 331
332 cai = &ci->ci_cinfo[i]; 332 cai = &ci->ci_cinfo[i];
333 333
@@ -380,26 +380,27 @@ cpu_attach_common(device_t parent, devic @@ -380,26 +380,27 @@ cpu_attach_common(device_t parent, devic
380 ci = (struct cpu_info *)roundup2(ptr, CACHE_LINE_SIZE); 380 ci = (struct cpu_info *)roundup2(ptr, CACHE_LINE_SIZE);
381 memset(ci, 0, sizeof(*ci)); 381 memset(ci, 0, sizeof(*ci));
382 cpu_init_tss(ci); 382 cpu_init_tss(ci);
383 } else { 383 } else {
384 aprint_naive(": %s Processor\n", 384 aprint_naive(": %s Processor\n",
385 caa->cpu_role == CPU_ROLE_SP ? "Single" : "Boot"); 385 caa->cpu_role == CPU_ROLE_SP ? "Single" : "Boot");
386 ci = &cpu_info_primary; 386 ci = &cpu_info_primary;
387 } 387 }
388 388
389 ci->ci_self = ci; 389 ci->ci_self = ci;
390 sc->sc_info = ci; 390 sc->sc_info = ci;
391 ci->ci_dev = self; 391 ci->ci_dev = self;
392 ci->ci_cpuid = cpunum; 392 ci->ci_cpuid = cpunum;
 393 ci->ci_vcpuid = cpunum;
393 394
394 KASSERT(HYPERVISOR_shared_info != NULL); 395 KASSERT(HYPERVISOR_shared_info != NULL);
395 KASSERT(cpunum < XEN_LEGACY_MAX_VCPUS); 396 KASSERT(cpunum < XEN_LEGACY_MAX_VCPUS);
396 ci->ci_vcpu = &HYPERVISOR_shared_info->vcpu_info[cpunum]; 397 ci->ci_vcpu = &HYPERVISOR_shared_info->vcpu_info[cpunum];
397 398
398 KASSERT(ci->ci_func == 0); 399 KASSERT(ci->ci_func == 0);
399 ci->ci_func = caa->cpu_func; 400 ci->ci_func = caa->cpu_func;
400 aprint_normal("\n"); 401 aprint_normal("\n");
401 402
402 /* Must be called before mi_cpu_attach(). */ 403 /* Must be called before mi_cpu_attach(). */
403 cpu_vm_init(ci); 404 cpu_vm_init(ci);
404 405
405 if (caa->cpu_role == CPU_ROLE_AP) { 406 if (caa->cpu_role == CPU_ROLE_AP) {
@@ -445,32 +446,34 @@ cpu_attach_common(device_t parent, devic @@ -445,32 +446,34 @@ cpu_attach_common(device_t parent, devic
445 446
446 /* Make sure DELAY() is initialized. */ 447 /* Make sure DELAY() is initialized. */
447 DELAY(1); 448 DELAY(1);
448 again = true; 449 again = true;
449 } 450 }
450 451
451 /* further PCB init done later. */ 452 /* further PCB init done later. */
452 453
453 switch (caa->cpu_role) { 454 switch (caa->cpu_role) {
454 case CPU_ROLE_SP: 455 case CPU_ROLE_SP:
455 atomic_or_32(&ci->ci_flags, CPUF_SP); 456 atomic_or_32(&ci->ci_flags, CPUF_SP);
456 cpu_identify(ci); 457 cpu_identify(ci);
457 x86_cpu_idle_init(); 458 x86_cpu_idle_init();
 459 xen_cpu_initclocks();
458 break; 460 break;
459 461
460 case CPU_ROLE_BP: 462 case CPU_ROLE_BP:
461 atomic_or_32(&ci->ci_flags, CPUF_BSP); 463 atomic_or_32(&ci->ci_flags, CPUF_BSP);
462 cpu_identify(ci); 464 cpu_identify(ci);
463 x86_cpu_idle_init(); 465 x86_cpu_idle_init();
 466 xen_cpu_initclocks();
464 break; 467 break;
465 468
466 case CPU_ROLE_AP: 469 case CPU_ROLE_AP:
467 atomic_or_32(&ci->ci_flags, CPUF_AP); 470 atomic_or_32(&ci->ci_flags, CPUF_AP);
468 471
469 /* 472 /*
470 * report on an AP 473 * report on an AP
471 */ 474 */
472 475
473#if defined(MULTIPROCESSOR) 476#if defined(MULTIPROCESSOR)
474 /* interrupt handler stack */ 477 /* interrupt handler stack */
475 cpu_intr_init(ci); 478 cpu_intr_init(ci);
476 479
@@ -713,27 +716,27 @@ cpu_hatch(void *v) @@ -713,27 +716,27 @@ cpu_hatch(void *v)
713 /* Because the text may have been patched in x86_patch(). */ 716 /* Because the text may have been patched in x86_patch(). */
714 x86_flush(); 717 x86_flush();
715 tlbflushg(); 718 tlbflushg();
716 719
717 KASSERT((ci->ci_flags & CPUF_RUNNING) == 0); 720 KASSERT((ci->ci_flags & CPUF_RUNNING) == 0);
718 721
719 KASSERT(ci->ci_curlwp == ci->ci_data.cpu_idlelwp); 722 KASSERT(ci->ci_curlwp == ci->ci_data.cpu_idlelwp);
720 KASSERT(curlwp == ci->ci_data.cpu_idlelwp); 723 KASSERT(curlwp == ci->ci_data.cpu_idlelwp);
721 pcb = lwp_getpcb(curlwp); 724 pcb = lwp_getpcb(curlwp);
722 pcb->pcb_cr3 = pmap_pdirpa(pmap_kernel(), 0); 725 pcb->pcb_cr3 = pmap_pdirpa(pmap_kernel(), 0);
723 726
724 xen_ipi_init(); 727 xen_ipi_init();
725 728
726 xen_initclocks(); 729 xen_cpu_initclocks();
727 730
728#ifdef __x86_64__ 731#ifdef __x86_64__
729 fpuinit(ci); 732 fpuinit(ci);
730#endif 733#endif
731 734
732 lldt(GSEL(GLDT_SEL, SEL_KPL)); 735 lldt(GSEL(GLDT_SEL, SEL_KPL));
733 736
734 cpu_init(ci); 737 cpu_init(ci);
735 cpu_get_tsc_freq(ci); 738 cpu_get_tsc_freq(ci);
736 739
737 s = splhigh(); 740 s = splhigh();
738 x86_enable_intr(); 741 x86_enable_intr();
739 splx(s); 742 splx(s);
@@ -754,27 +757,27 @@ cpu_hatch(void *v) @@ -754,27 +757,27 @@ cpu_hatch(void *v)
754 * Dump CPU information from ddb. 757 * Dump CPU information from ddb.
755 */ 758 */
756void 759void
757cpu_debug_dump(void) 760cpu_debug_dump(void)
758{ 761{
759 struct cpu_info *ci; 762 struct cpu_info *ci;
760 CPU_INFO_ITERATOR cii; 763 CPU_INFO_ITERATOR cii;
761 764
762 db_printf("addr dev id flags ipis curlwp\n"); 765 db_printf("addr dev id flags ipis curlwp\n");
763 for (CPU_INFO_FOREACH(cii, ci)) { 766 for (CPU_INFO_FOREACH(cii, ci)) {
764 db_printf("%p %s %ld %x %x %10p\n", 767 db_printf("%p %s %ld %x %x %10p\n",
765 ci, 768 ci,
766 ci->ci_dev == NULL ? "BOOT" : device_xname(ci->ci_dev), 769 ci->ci_dev == NULL ? "BOOT" : device_xname(ci->ci_dev),
767 (long)ci->ci_cpuid, 770 (long)ci->ci_vcpuid,
768 ci->ci_flags, ci->ci_ipis, 771 ci->ci_flags, ci->ci_ipis,
769 ci->ci_curlwp); 772 ci->ci_curlwp);
770 } 773 }
771} 774}
772#endif /* DDB */ 775#endif /* DDB */
773 776
774#endif /* MULTIPROCESSOR */ 777#endif /* MULTIPROCESSOR */
775 778
776extern void hypervisor_callback(void); 779extern void hypervisor_callback(void);
777extern void failsafe_callback(void); 780extern void failsafe_callback(void);
778#ifdef __x86_64__ 781#ifdef __x86_64__
779typedef void (vector)(void); 782typedef void (vector)(void);
780extern vector Xsyscall, Xsyscall32; 783extern vector Xsyscall, Xsyscall32;
@@ -1001,40 +1004,40 @@ mp_cpu_start(struct cpu_info *ci, vaddr_ @@ -1001,40 +1004,40 @@ mp_cpu_start(struct cpu_info *ci, vaddr_
1001 struct vcpu_guest_context vcpuctx; 1004 struct vcpu_guest_context vcpuctx;
1002 1005
1003 KASSERT(ci != NULL); 1006 KASSERT(ci != NULL);
1004 KASSERT(ci != &cpu_info_primary); 1007 KASSERT(ci != &cpu_info_primary);
1005 KASSERT(ci->ci_flags & CPUF_AP); 1008 KASSERT(ci->ci_flags & CPUF_AP);
1006 1009
1007#ifdef __x86_64__ 1010#ifdef __x86_64__
1008 xen_init_amd64_vcpuctxt(ci, &vcpuctx, (void (*)(struct cpu_info *))target); 1011 xen_init_amd64_vcpuctxt(ci, &vcpuctx, (void (*)(struct cpu_info *))target);
1009#else 1012#else
1010 xen_init_i386_vcpuctxt(ci, &vcpuctx, (void (*)(struct cpu_info *))target); 1013 xen_init_i386_vcpuctxt(ci, &vcpuctx, (void (*)(struct cpu_info *))target);
1011#endif 1014#endif
1012 1015
1013 /* Initialise the given vcpu to execute cpu_hatch(ci); */ 1016 /* Initialise the given vcpu to execute cpu_hatch(ci); */
1014 if ((hyperror = HYPERVISOR_vcpu_op(VCPUOP_initialise, ci->ci_cpuid, &vcpuctx))) { 1017 if ((hyperror = HYPERVISOR_vcpu_op(VCPUOP_initialise, ci->ci_vcpuid, &vcpuctx))) {
1015 aprint_error(": context initialisation failed. errno = %d\n", hyperror); 1018 aprint_error(": context initialisation failed. errno = %d\n", hyperror);
1016 return hyperror; 1019 return hyperror;
1017 } 1020 }
1018 1021
1019 /* Start it up */ 1022 /* Start it up */
1020 1023
1021 /* First bring it down */ 1024 /* First bring it down */
1022 if ((hyperror = HYPERVISOR_vcpu_op(VCPUOP_down, ci->ci_cpuid, NULL))) { 1025 if ((hyperror = HYPERVISOR_vcpu_op(VCPUOP_down, ci->ci_vcpuid, NULL))) {
1023 aprint_error(": VCPUOP_down hypervisor command failed. errno = %d\n", hyperror); 1026 aprint_error(": VCPUOP_down hypervisor command failed. errno = %d\n", hyperror);
1024 return hyperror; 1027 return hyperror;
1025 } 1028 }
1026 1029
1027 if ((hyperror = HYPERVISOR_vcpu_op(VCPUOP_up, ci->ci_cpuid, NULL))) { 1030 if ((hyperror = HYPERVISOR_vcpu_op(VCPUOP_up, ci->ci_vcpuid, NULL))) {
1028 aprint_error(": VCPUOP_up hypervisor command failed. errno = %d\n", hyperror); 1031 aprint_error(": VCPUOP_up hypervisor command failed. errno = %d\n", hyperror);
1029 return hyperror; 1032 return hyperror;
1030 } 1033 }
1031 1034
1032 if (!vcpu_is_up(ci)) { 1035 if (!vcpu_is_up(ci)) {
1033 aprint_error(": did not come up\n"); 1036 aprint_error(": did not come up\n");
1034 return -1; 1037 return -1;
1035 } 1038 }
1036 1039
1037 return 0; 1040 return 0;
1038} 1041}
1039 1042
1040void 1043void
@@ -1076,41 +1079,26 @@ cpu_get_tsc_freq(struct cpu_info *ci) @@ -1076,41 +1079,26 @@ cpu_get_tsc_freq(struct cpu_info *ci)
1076 1079
1077 vcpu_tversion = tinfo->version; 1080 vcpu_tversion = tinfo->version;
1078 while (tinfo->version == vcpu_tversion); /* Wait for a time update. XXX: timeout ? */ 1081 while (tinfo->version == vcpu_tversion); /* Wait for a time update. XXX: timeout ? */
1079 1082
1080 uint64_t freq = 1000000000ULL << 32; 1083 uint64_t freq = 1000000000ULL << 32;
1081 freq = freq / (uint64_t)tinfo->tsc_to_system_mul; 1084 freq = freq / (uint64_t)tinfo->tsc_to_system_mul;
1082 if (tinfo->tsc_shift < 0) 1085 if (tinfo->tsc_shift < 0)
1083 freq = freq << -tinfo->tsc_shift; 1086 freq = freq << -tinfo->tsc_shift;
1084 else 1087 else
1085 freq = freq >> tinfo->tsc_shift; 1088 freq = freq >> tinfo->tsc_shift;
1086 ci->ci_data.cpu_cc_freq = freq; 1089 ci->ci_data.cpu_cc_freq = freq;
1087} 1090}
1088 1091
1089void 
1090x86_cpu_idle_xen(void) 
1091{ 
1092 struct cpu_info *ci = curcpu(); 
1093  
1094 KASSERT(ci->ci_ilevel == IPL_NONE); 
1095 
1096 x86_disable_intr(); 
1097 if (!__predict_false(ci->ci_want_resched)) { 
1098 idle_block(); 
1099 } else { 
1100 x86_enable_intr(); 
1101 } 
1102} 
1103 
1104/* 1092/*
1105 * Loads pmap for the current CPU. 1093 * Loads pmap for the current CPU.
1106 */ 1094 */
1107void 1095void
1108cpu_load_pmap(struct pmap *pmap, struct pmap *oldpmap) 1096cpu_load_pmap(struct pmap *pmap, struct pmap *oldpmap)
1109{ 1097{
1110 struct cpu_info *ci = curcpu(); 1098 struct cpu_info *ci = curcpu();
1111 cpuid_t cid = cpu_index(ci); 1099 cpuid_t cid = cpu_index(ci);
1112 int i; 1100 int i;
1113 1101
1114 KASSERT(pmap != pmap_kernel()); 1102 KASSERT(pmap != pmap_kernel());
1115 1103
1116 mutex_enter(&ci->ci_kpm_mtx); 1104 mutex_enter(&ci->ci_kpm_mtx);

cvs diff -r1.36.8.3 -r1.36.8.4 src/sys/arch/xen/x86/hypervisor_machdep.c (expand / switch to unified diff)

--- src/sys/arch/xen/x86/hypervisor_machdep.c 2020/04/16 17:50:52 1.36.8.3
+++ src/sys/arch/xen/x86/hypervisor_machdep.c 2020/04/18 15:06:18 1.36.8.4
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: hypervisor_machdep.c,v 1.36.8.3 2020/04/16 17:50:52 bouyer Exp $ */ 1/* $NetBSD: hypervisor_machdep.c,v 1.36.8.4 2020/04/18 15:06:18 bouyer Exp $ */
2 2
3/* 3/*
4 * 4 *
5 * Copyright (c) 2004 Christian Limpach. 5 * Copyright (c) 2004 Christian Limpach.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
@@ -44,38 +44,41 @@ @@ -44,38 +44,41 @@
44 * all copies or substantial portions of the Software. 44 * all copies or substantial portions of the Software.
45 *  45 *
46 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR  46 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
47 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,  47 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
48 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE  48 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
49 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER  49 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
50 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING  50 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
51 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER  51 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
52 * DEALINGS IN THE SOFTWARE. 52 * DEALINGS IN THE SOFTWARE.
53 */ 53 */
54 54
55 55
56#include <sys/cdefs.h> 56#include <sys/cdefs.h>
57__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.36.8.3 2020/04/16 17:50:52 bouyer Exp $"); 57__KERNEL_RCSID(0, "$NetBSD: hypervisor_machdep.c,v 1.36.8.4 2020/04/18 15:06:18 bouyer Exp $");
58 58
59#include <sys/param.h> 59#include <sys/param.h>
60#include <sys/systm.h> 60#include <sys/systm.h>
61#include <sys/kmem.h> 61#include <sys/kmem.h>
62#include <sys/cpu.h> 62#include <sys/cpu.h>
63 63
64#include <uvm/uvm_extern.h> 64#include <uvm/uvm_extern.h>
65 65
66#include <machine/vmparam.h> 66#include <machine/vmparam.h>
67#include <machine/pmap.h> 67#include <machine/pmap.h>
68 68
 69#include <x86/machdep.h>
 70#include <x86/cpuvar.h>
 71
69#include <xen/xen.h> 72#include <xen/xen.h>
70#include <xen/intr.h> 73#include <xen/intr.h>
71#include <xen/hypervisor.h> 74#include <xen/hypervisor.h>
72#include <xen/evtchn.h> 75#include <xen/evtchn.h>
73#include <xen/xenpmap.h> 76#include <xen/xenpmap.h>
74 77
75#include "opt_xen.h" 78#include "opt_xen.h"
76#include "isa.h" 79#include "isa.h"
77#include "pci.h" 80#include "pci.h"
78 81
79#ifdef XENPV 82#ifdef XENPV
80/* 83/*
81 * arch-dependent p2m frame lists list (L3 and L2) 84 * arch-dependent p2m frame lists list (L3 and L2)
@@ -305,28 +308,28 @@ hypervisor_send_event(struct cpu_info *c @@ -305,28 +308,28 @@ hypervisor_send_event(struct cpu_info *c
305 308
306 if (__predict_false(ci == curcpu())) { 309 if (__predict_false(ci == curcpu())) {
307 xen_atomic_set_bit(&vci->evtchn_pending_sel, 310 xen_atomic_set_bit(&vci->evtchn_pending_sel,
308 ev >> LONG_SHIFT); 311 ev >> LONG_SHIFT);
309 xen_atomic_set_bit(&vci->evtchn_upcall_pending, 0); 312 xen_atomic_set_bit(&vci->evtchn_upcall_pending, 0);
310 } 313 }
311 314
312 xen_atomic_clear_bit(&s->evtchn_mask[0], ev); 315 xen_atomic_clear_bit(&s->evtchn_mask[0], ev);
313 316
314 if (__predict_true(ci == curcpu())) { 317 if (__predict_true(ci == curcpu())) {
315 hypervisor_force_callback(); 318 hypervisor_force_callback();
316 } else { 319 } else {
317 if (__predict_false(xen_send_ipi(ci, XEN_IPI_HVCB))) { 320 if (__predict_false(xen_send_ipi(ci, XEN_IPI_HVCB))) {
318 panic("xen_send_ipi(cpu%d, XEN_IPI_HVCB) failed\n", 321 panic("xen_send_ipi(cpu%d id %d, XEN_IPI_HVCB) failed\n",
319 (int) ci->ci_cpuid); 322 (int) ci->ci_cpuid, ci->ci_vcpuid);
320 } 323 }
321 } 324 }
322} 325}
323 326
324void 327void
325hypervisor_unmask_event(unsigned int ev) 328hypervisor_unmask_event(unsigned int ev)
326{ 329{
327 330
328 KASSERT(ev > 0 && ev < NR_EVENT_CHANNELS); 331 KASSERT(ev > 0 && ev < NR_EVENT_CHANNELS);
329 332
330#ifdef PORT_DEBUG 333#ifdef PORT_DEBUG
331 if (ev == PORT_DEBUG) 334 if (ev == PORT_DEBUG)
332 printf("hypervisor_unmask_event %d\n", ev); 335 printf("hypervisor_unmask_event %d\n", ev);
@@ -412,54 +415,83 @@ hypervisor_set_ipending(uint32_t imask,  @@ -412,54 +415,83 @@ hypervisor_set_ipending(uint32_t imask,
412 * from high to low, this ensure that all callbacks will have been 415 * from high to low, this ensure that all callbacks will have been
413 * called when we ack the event 416 * called when we ack the event
414 */ 417 */
415 sir = ffs(imask); 418 sir = ffs(imask);
416 KASSERT(sir > SIR_XENIPL_VM); 419 KASSERT(sir > SIR_XENIPL_VM);
417 sir--; 420 sir--;
418 KASSERT(sir <= SIR_XENIPL_HIGH); 421 KASSERT(sir <= SIR_XENIPL_HIGH);
419 KASSERT(ci->ci_isources[sir] != NULL); 422 KASSERT(ci->ci_isources[sir] != NULL);
420 ci->ci_isources[sir]->ipl_evt_mask1 |= 1UL << l1; 423 ci->ci_isources[sir]->ipl_evt_mask1 |= 1UL << l1;
421 ci->ci_isources[sir]->ipl_evt_mask2[l1] |= 1UL << l2; 424 ci->ci_isources[sir]->ipl_evt_mask2[l1] |= 1UL << l2;
422 if (__predict_false(ci != curcpu())) { 425 if (__predict_false(ci != curcpu())) {
423 if (xen_send_ipi(ci, XEN_IPI_HVCB)) { 426 if (xen_send_ipi(ci, XEN_IPI_HVCB)) {
424 panic("hypervisor_set_ipending: " 427 panic("hypervisor_set_ipending: "
425 "xen_send_ipi(cpu%d, XEN_IPI_HVCB) failed\n", 428 "xen_send_ipi(cpu%d id %d, XEN_IPI_HVCB) failed\n",
426 (int) ci->ci_cpuid); 429 (int) ci->ci_cpuid, ci->ci_vcpuid);
427 } 430 }
428 } 431 }
429} 432}
430 433
431void 434void
432hypervisor_machdep_attach(void) 435hypervisor_machdep_attach(void)
433{ 436{
434#ifdef XENPV 437#ifdef XENPV
435 /* dom0 does not require the arch-dependent P2M translation table */ 438 /* dom0 does not require the arch-dependent P2M translation table */
436 if (!xendomain_is_dom0()) { 439 if (!xendomain_is_dom0()) {
437 build_p2m_frame_list_list(); 440 build_p2m_frame_list_list();
438 sysctl_xen_suspend_setup(); 441 sysctl_xen_suspend_setup();
439 } 442 }
440#endif 443#endif
441} 444}
442 445
443void 446void
444hypervisor_machdep_resume(void) 447hypervisor_machdep_resume(void)
445{ 448{
446#ifdef XENPV 449#ifdef XENPV
447 /* dom0 does not require the arch-dependent P2M translation table */ 450 /* dom0 does not require the arch-dependent P2M translation table */
448 if (!xendomain_is_dom0()) 451 if (!xendomain_is_dom0())
449 update_p2m_frame_list_list(); 452 update_p2m_frame_list_list();
450#endif 453#endif
451} 454}
452 455
 456/*
 457 * idle_block()
 458 *
 459 * Called from the idle loop when we have nothing to do but wait
 460 * for an interrupt.
 461 */
 462static void
 463idle_block(void)
 464{
 465 KASSERT(curcpu()->ci_ipending == 0);
 466 HYPERVISOR_block();
 467 KASSERT(curcpu()->ci_ipending == 0);
 468}
 469
 470void
 471x86_cpu_idle_xen(void)
 472{
 473 struct cpu_info *ci = curcpu();
 474
 475 KASSERT(ci->ci_ilevel == IPL_NONE);
 476
 477 x86_disable_intr();
 478 if (!__predict_false(ci->ci_want_resched)) {
 479 idle_block();
 480 } else {
 481 x86_enable_intr();
 482 }
 483}
 484
453#ifdef XENPV 485#ifdef XENPV
454/* 486/*
455 * Generate the p2m_frame_list_list table, 487 * Generate the p2m_frame_list_list table,
456 * needed for guest save/restore 488 * needed for guest save/restore
457 */ 489 */
458static void 490static void
459build_p2m_frame_list_list(void) 491build_p2m_frame_list_list(void)
460{ 492{
461 int fpp; /* number of page (frame) pointer per page */ 493 int fpp; /* number of page (frame) pointer per page */
462 unsigned long max_pfn; 494 unsigned long max_pfn;
463 /* 495 /*
464 * The p2m list is composed of three levels of indirection, 496 * The p2m list is composed of three levels of indirection,
465 * each layer containing MFNs pointing to lower level pages 497 * each layer containing MFNs pointing to lower level pages

cvs diff -r1.35.6.3 -r1.35.6.4 src/sys/arch/xen/x86/xen_ipi.c (expand / switch to unified diff)

--- src/sys/arch/xen/x86/xen_ipi.c 2020/04/16 08:46:35 1.35.6.3
+++ src/sys/arch/xen/x86/xen_ipi.c 2020/04/18 15:06:18 1.35.6.4
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: xen_ipi.c,v 1.35.6.3 2020/04/16 08:46:35 bouyer Exp $ */ 1/* $NetBSD: xen_ipi.c,v 1.35.6.4 2020/04/18 15:06:18 bouyer Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2011, 2019 The NetBSD Foundation, Inc. 4 * Copyright (c) 2011, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Cherry G. Mathew <cherry@zyx.in> 8 * by Cherry G. Mathew <cherry@zyx.in>
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -23,30 +23,30 @@ @@ -23,30 +23,30 @@
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> /* RCS ID macro */ 32#include <sys/cdefs.h> /* RCS ID macro */
33 33
34/*  34/*
35 * Based on: x86/ipi.c 35 * Based on: x86/ipi.c
36 * __KERNEL_RCSID(0, "$NetBSD: xen_ipi.c,v 1.35.6.3 2020/04/16 08:46:35 bouyer Exp $"); 36 * __KERNEL_RCSID(0, "$NetBSD: xen_ipi.c,v 1.35.6.4 2020/04/18 15:06:18 bouyer Exp $");
37 */ 37 */
38 38
39__KERNEL_RCSID(0, "$NetBSD: xen_ipi.c,v 1.35.6.3 2020/04/16 08:46:35 bouyer Exp $"); 39__KERNEL_RCSID(0, "$NetBSD: xen_ipi.c,v 1.35.6.4 2020/04/18 15:06:18 bouyer Exp $");
40 40
41#include "opt_ddb.h" 41#include "opt_ddb.h"
42 42
43#include <sys/types.h> 43#include <sys/types.h>
44 44
45#include <sys/atomic.h> 45#include <sys/atomic.h>
46#include <sys/cpu.h> 46#include <sys/cpu.h>
47#include <sys/mutex.h> 47#include <sys/mutex.h>
48#include <sys/device.h> 48#include <sys/device.h>
49#include <sys/xcall.h> 49#include <sys/xcall.h>
50#include <sys/ipi.h> 50#include <sys/ipi.h>
51#include <sys/errno.h> 51#include <sys/errno.h>
52#include <sys/systm.h> 52#include <sys/systm.h>
@@ -122,27 +122,27 @@ xen_ipi_handler(void *arg) @@ -122,27 +122,27 @@ xen_ipi_handler(void *arg)
122} 122}
123 123
124/* Must be called once for every cpu that expects to send/recv ipis */ 124/* Must be called once for every cpu that expects to send/recv ipis */
125void 125void
126xen_ipi_init(void) 126xen_ipi_init(void)
127{ 127{
128 cpuid_t vcpu; 128 cpuid_t vcpu;
129 evtchn_port_t evtchn; 129 evtchn_port_t evtchn;
130 struct cpu_info *ci; 130 struct cpu_info *ci;
131 char intr_xname[INTRDEVNAMEBUF]; 131 char intr_xname[INTRDEVNAMEBUF];
132 132
133 ci = curcpu(); 133 ci = curcpu();
134 134
135 vcpu = ci->ci_cpuid; 135 vcpu = ci->ci_vcpuid;
136 KASSERT(vcpu < XEN_LEGACY_MAX_VCPUS); 136 KASSERT(vcpu < XEN_LEGACY_MAX_VCPUS);
137 137
138 evtchn = bind_vcpu_to_evtch(vcpu); 138 evtchn = bind_vcpu_to_evtch(vcpu);
139 ci->ci_ipi_evtchn = evtchn; 139 ci->ci_ipi_evtchn = evtchn;
140 140
141 KASSERT(evtchn != -1 && evtchn < NR_EVENT_CHANNELS); 141 KASSERT(evtchn != -1 && evtchn < NR_EVENT_CHANNELS);
142 142
143 snprintf(intr_xname, sizeof(intr_xname), "%s ipi", 143 snprintf(intr_xname, sizeof(intr_xname), "%s ipi",
144 device_xname(ci->ci_dev)); 144 device_xname(ci->ci_dev));
145 145
146 if (xen_intr_establish_xname(-1, &xen_pic, evtchn, IST_LEVEL, IPL_HIGH, 146 if (xen_intr_establish_xname(-1, &xen_pic, evtchn, IST_LEVEL, IPL_HIGH,
147 xen_ipi_handler, ci, true, intr_xname) == NULL) { 147 xen_ipi_handler, ci, true, intr_xname) == NULL) {
148 panic("%s: unable to register ipi handler\n", __func__); 148 panic("%s: unable to register ipi handler\n", __func__);
@@ -221,27 +221,27 @@ xen_broadcast_ipi(uint32_t ipimask) @@ -221,27 +221,27 @@ xen_broadcast_ipi(uint32_t ipimask)
221 cpu_name(ci)); 221 cpu_name(ci));
222 } 222 }
223 } 223 }
224 } 224 }
225} 225}
226 226
227/* MD wrapper for the xcall(9) callback. */ 227/* MD wrapper for the xcall(9) callback. */
228 228
229static void 229static void
230xen_ipi_halt(struct cpu_info *ci, struct intrframe *intrf) 230xen_ipi_halt(struct cpu_info *ci, struct intrframe *intrf)
231{ 231{
232 KASSERT(ci == curcpu()); 232 KASSERT(ci == curcpu());
233 KASSERT(ci != NULL); 233 KASSERT(ci != NULL);
234 if (HYPERVISOR_vcpu_op(VCPUOP_down, ci->ci_cpuid, NULL)) { 234 if (HYPERVISOR_vcpu_op(VCPUOP_down, ci->ci_vcpuid, NULL)) {
235 panic("%s shutdown failed.\n", device_xname(ci->ci_dev)); 235 panic("%s shutdown failed.\n", device_xname(ci->ci_dev));
236 } 236 }
237 237
238} 238}
239 239
240static void 240static void
241xen_ipi_synch_fpu(struct cpu_info *ci, struct intrframe *intrf) 241xen_ipi_synch_fpu(struct cpu_info *ci, struct intrframe *intrf)
242{ 242{
243 KASSERT(ci != NULL); 243 KASSERT(ci != NULL);
244 KASSERT(intrf != NULL); 244 KASSERT(intrf != NULL);
245 245
246 panic("%s: impossible", __func__); 246 panic("%s: impossible", __func__);
247} 247}

cvs diff -r1.6.12.2 -r1.6.12.3 src/sys/arch/xen/x86/xen_mainbus.c (expand / switch to unified diff)

--- src/sys/arch/xen/x86/xen_mainbus.c 2020/04/16 17:46:44 1.6.12.2
+++ src/sys/arch/xen/x86/xen_mainbus.c 2020/04/18 15:06:18 1.6.12.3
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: xen_mainbus.c,v 1.6.12.2 2020/04/16 17:46:44 bouyer Exp $ */ 1/* $NetBSD: xen_mainbus.c,v 1.6.12.3 2020/04/18 15:06:18 bouyer Exp $ */
2/* NetBSD: mainbus.c,v 1.19 2017/05/23 08:54:39 nonaka Exp */ 2/* NetBSD: mainbus.c,v 1.19 2017/05/23 08:54:39 nonaka Exp */
3/* NetBSD: mainbus.c,v 1.53 2003/10/27 14:11:47 junyoung Exp */ 3/* NetBSD: mainbus.c,v 1.53 2003/10/27 14:11:47 junyoung Exp */
4 4
5/* 5/*
6 * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved. 6 * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
@@ -23,27 +23,27 @@ @@ -23,27 +23,27 @@
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */ 33 */
34 34
35#include <sys/cdefs.h> 35#include <sys/cdefs.h>
36__KERNEL_RCSID(0, "$NetBSD: xen_mainbus.c,v 1.6.12.2 2020/04/16 17:46:44 bouyer Exp $"); 36__KERNEL_RCSID(0, "$NetBSD: xen_mainbus.c,v 1.6.12.3 2020/04/18 15:06:18 bouyer Exp $");
37 37
38#include <sys/param.h> 38#include <sys/param.h>
39#include <sys/systm.h> 39#include <sys/systm.h>
40#include <sys/device.h> 40#include <sys/device.h>
41 41
42#include <sys/bus.h> 42#include <sys/bus.h>
43 43
44#include "hypervisor.h" 44#include "hypervisor.h"
45#include "pci.h" 45#include "pci.h"
46 46
47#include "opt_xen.h" 47#include "opt_xen.h"
48#include "opt_mpbios.h" 48#include "opt_mpbios.h"
49#include "opt_pcifixup.h" 49#include "opt_pcifixup.h"
@@ -121,27 +121,27 @@ xen_mainbus_attach(device_t parent, devi @@ -121,27 +121,27 @@ xen_mainbus_attach(device_t parent, devi
121{ 121{
122 union xen_mainbus_attach_args mba; 122 union xen_mainbus_attach_args mba;
123 123
124 switch(vm_guest) { 124 switch(vm_guest) {
125 case VM_GUEST_XENPV: 125 case VM_GUEST_XENPV:
126#if NIPMI > 0 && defined(XENPV) 126#if NIPMI > 0 && defined(XENPV)
127 memset(&mba.mba_ipmi, 0, sizeof(mba.mba_ipmi)); 127 memset(&mba.mba_ipmi, 0, sizeof(mba.mba_ipmi));
128 mba.mba_ipmi.iaa_iot = x86_bus_space_io; 128 mba.mba_ipmi.iaa_iot = x86_bus_space_io;
129 mba.mba_ipmi.iaa_memt = x86_bus_space_mem; 129 mba.mba_ipmi.iaa_memt = x86_bus_space_mem;
130 if (ipmi_probe(&mba.mba_ipmi)) 130 if (ipmi_probe(&mba.mba_ipmi))
131 config_found_ia(self, "ipmibus", &mba.mba_ipmi, 0); 131 config_found_ia(self, "ipmibus", &mba.mba_ipmi, 0);
132#endif 132#endif
133 /* FALLTHROUGH */ 133 /* FALLTHROUGH */
134 case VM_GUEST_XENHVM: 134 case VM_GUEST_XENPVHVM:
135 mba.mba_haa.haa_busname = "hypervisor"; 135 mba.mba_haa.haa_busname = "hypervisor";
136 config_found_ia(self, "hypervisorbus", 136 config_found_ia(self, "hypervisorbus",
137 &mba.mba_haa, xen_mainbus_print); 137 &mba.mba_haa, xen_mainbus_print);
138 break; 138 break;
139 default: 139 default:
140 return; 140 return;
141 } 141 }
142 142
143 if (vm_guest == VM_GUEST_XENPV) { 143 if (vm_guest == VM_GUEST_XENPV) {
144 /* save/restore for Xen */ 144 /* save/restore for Xen */
145 if (!pmf_device_register(self, NULL, NULL)) 145 if (!pmf_device_register(self, NULL, NULL))
146 aprint_error_dev(self, 146 aprint_error_dev(self,
147 "couldn't establish power handler\n"); 147 "couldn't establish power handler\n");

cvs diff -r1.88.2.4 -r1.88.2.5 src/sys/arch/xen/xen/evtchn.c (expand / switch to unified diff)

--- src/sys/arch/xen/xen/evtchn.c 2020/04/16 08:46:36 1.88.2.4
+++ src/sys/arch/xen/xen/evtchn.c 2020/04/18 15:06:18 1.88.2.5
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: evtchn.c,v 1.88.2.4 2020/04/16 08:46:36 bouyer Exp $ */ 1/* $NetBSD: evtchn.c,v 1.88.2.5 2020/04/18 15:06:18 bouyer Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 Manuel Bouyer. 4 * Copyright (c) 2006 Manuel Bouyer.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 14 *
@@ -44,27 +44,27 @@ @@ -44,27 +44,27 @@
44 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 44 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
45 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 45 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
46 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 46 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
47 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 47 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
48 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 48 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
49 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 49 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
50 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 50 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
51 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 51 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
52 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 52 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
53 */ 53 */
54 54
55 55
56#include <sys/cdefs.h> 56#include <sys/cdefs.h>
57__KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.88.2.4 2020/04/16 08:46:36 bouyer Exp $"); 57__KERNEL_RCSID(0, "$NetBSD: evtchn.c,v 1.88.2.5 2020/04/18 15:06:18 bouyer Exp $");
58 58
59#include "opt_xen.h" 59#include "opt_xen.h"
60#include "isa.h" 60#include "isa.h"
61#include "pci.h" 61#include "pci.h"
62 62
63#include <sys/param.h> 63#include <sys/param.h>
64#include <sys/cpu.h> 64#include <sys/cpu.h>
65#include <sys/kernel.h> 65#include <sys/kernel.h>
66#include <sys/systm.h> 66#include <sys/systm.h>
67#include <sys/device.h> 67#include <sys/device.h>
68#include <sys/proc.h> 68#include <sys/proc.h>
69#include <sys/kmem.h> 69#include <sys/kmem.h>
70#include <sys/reboot.h> 70#include <sys/reboot.h>
@@ -554,86 +554,86 @@ bind_virq_to_evtch(int virq) @@ -554,86 +554,86 @@ bind_virq_to_evtch(int virq)
554 * Please re-visit this implementation when others are used. 554 * Please re-visit this implementation when others are used.
555 * Note: VIRQ_DEBUG is special-cased, and not used or bound on APs. 555 * Note: VIRQ_DEBUG is special-cased, and not used or bound on APs.
556 * XXX: event->virq/ipi can be unified in a linked-list 556 * XXX: event->virq/ipi can be unified in a linked-list
557 * implementation. 557 * implementation.
558 */ 558 */
559 struct cpu_info *ci = curcpu(); 559 struct cpu_info *ci = curcpu();
560 560
561 if (virq == VIRQ_DEBUG && ci != &cpu_info_primary) { 561 if (virq == VIRQ_DEBUG && ci != &cpu_info_primary) {
562 mutex_spin_exit(&evtchn_lock); 562 mutex_spin_exit(&evtchn_lock);
563 return -1; 563 return -1;
564 } 564 }
565 565
566 if (virq == VIRQ_TIMER) { 566 if (virq == VIRQ_TIMER) {
567 evtchn = virq_timer_to_evtch[ci->ci_cpuid]; 567 evtchn = virq_timer_to_evtch[ci->ci_vcpuid];
568 } else { 568 } else {
569 evtchn = virq_to_evtch[virq]; 569 evtchn = virq_to_evtch[virq];
570 } 570 }
571 571
572 /* Allocate a channel if there is none already allocated */ 572 /* Allocate a channel if there is none already allocated */
573 if (evtchn == -1) { 573 if (evtchn == -1) {
574 op.cmd = EVTCHNOP_bind_virq; 574 op.cmd = EVTCHNOP_bind_virq;
575 op.u.bind_virq.virq = virq; 575 op.u.bind_virq.virq = virq;
576 op.u.bind_virq.vcpu = ci->ci_cpuid; 576 op.u.bind_virq.vcpu = ci->ci_vcpuid;
577 if (HYPERVISOR_event_channel_op(&op) != 0) 577 if (HYPERVISOR_event_channel_op(&op) != 0)
578 panic("Failed to bind virtual IRQ %d\n", virq); 578 panic("Failed to bind virtual IRQ %d\n", virq);
579 evtchn = op.u.bind_virq.port; 579 evtchn = op.u.bind_virq.port;
580 } 580 }
581 581
582 /* Set event channel */ 582 /* Set event channel */
583 if (virq == VIRQ_TIMER) { 583 if (virq == VIRQ_TIMER) {
584 virq_timer_to_evtch[ci->ci_cpuid] = evtchn; 584 virq_timer_to_evtch[ci->ci_vcpuid] = evtchn;
585 } else { 585 } else {
586 virq_to_evtch[virq] = evtchn; 586 virq_to_evtch[virq] = evtchn;
587 } 587 }
588 588
589 /* Increase ref counter */ 589 /* Increase ref counter */
590 evtch_bindcount[evtchn]++; 590 evtch_bindcount[evtchn]++;
591 591
592 mutex_spin_exit(&evtchn_lock); 592 mutex_spin_exit(&evtchn_lock);
593 593
594 return evtchn; 594 return evtchn;
595} 595}
596 596
597int 597int
598unbind_virq_from_evtch(int virq) 598unbind_virq_from_evtch(int virq)
599{ 599{
600 evtchn_op_t op; 600 evtchn_op_t op;
601 int evtchn; 601 int evtchn;
602 602
603 struct cpu_info *ci = curcpu(); 603 struct cpu_info *ci = curcpu();
604 604
605 if (virq == VIRQ_TIMER) { 605 if (virq == VIRQ_TIMER) {
606 evtchn = virq_timer_to_evtch[ci->ci_cpuid]; 606 evtchn = virq_timer_to_evtch[ci->ci_vcpuid];
607 } 607 }
608 else { 608 else {
609 evtchn = virq_to_evtch[virq]; 609 evtchn = virq_to_evtch[virq];
610 } 610 }
611 611
612 if (evtchn == -1) { 612 if (evtchn == -1) {
613 return -1; 613 return -1;
614 } 614 }
615 615
616 mutex_spin_enter(&evtchn_lock); 616 mutex_spin_enter(&evtchn_lock);
617 617
618 evtch_bindcount[evtchn]--; 618 evtch_bindcount[evtchn]--;
619 if (evtch_bindcount[evtchn] == 0) { 619 if (evtch_bindcount[evtchn] == 0) {
620 op.cmd = EVTCHNOP_close; 620 op.cmd = EVTCHNOP_close;
621 op.u.close.port = evtchn; 621 op.u.close.port = evtchn;
622 if (HYPERVISOR_event_channel_op(&op) != 0) 622 if (HYPERVISOR_event_channel_op(&op) != 0)
623 panic("Failed to unbind virtual IRQ %d\n", virq); 623 panic("Failed to unbind virtual IRQ %d\n", virq);
624 624
625 if (virq == VIRQ_TIMER) { 625 if (virq == VIRQ_TIMER) {
626 virq_timer_to_evtch[ci->ci_cpuid] = -1; 626 virq_timer_to_evtch[ci->ci_vcpuid] = -1;
627 } else { 627 } else {
628 virq_to_evtch[virq] = -1; 628 virq_to_evtch[virq] = -1;
629 } 629 }
630 } 630 }
631 631
632 mutex_spin_exit(&evtchn_lock); 632 mutex_spin_exit(&evtchn_lock);
633 633
634 return evtchn; 634 return evtchn;
635} 635}
636 636
637#if NPCI > 0 || NISA > 0 637#if NPCI > 0 || NISA > 0
638int 638int
639get_pirq_to_evtch(int pirq) 639get_pirq_to_evtch(int pirq)

cvs diff -r1.73.2.5 -r1.73.2.6 src/sys/arch/xen/xen/hypervisor.c (expand / switch to unified diff)

--- src/sys/arch/xen/xen/hypervisor.c 2020/04/16 20:21:04 1.73.2.5
+++ src/sys/arch/xen/xen/hypervisor.c 2020/04/18 15:06:18 1.73.2.6
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: hypervisor.c,v 1.73.2.5 2020/04/16 20:21:04 bouyer Exp $ */ 1/* $NetBSD: hypervisor.c,v 1.73.2.6 2020/04/18 15:06:18 bouyer Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2005 Manuel Bouyer. 4 * Copyright (c) 2005 Manuel Bouyer.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 14 *
@@ -43,47 +43,48 @@ @@ -43,47 +43,48 @@
43 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 43 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
44 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 44 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
45 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 45 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
46 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 46 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
47 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 47 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 48 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 49 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
51 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 51 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */ 52 */
53 53
54 54
55#include <sys/cdefs.h> 55#include <sys/cdefs.h>
56__KERNEL_RCSID(0, "$NetBSD: hypervisor.c,v 1.73.2.5 2020/04/16 20:21:04 bouyer Exp $"); 56__KERNEL_RCSID(0, "$NetBSD: hypervisor.c,v 1.73.2.6 2020/04/18 15:06:18 bouyer Exp $");
57 57
58#include <sys/param.h> 58#include <sys/param.h>
59#include <sys/systm.h> 59#include <sys/systm.h>
60#include <sys/device.h> 60#include <sys/device.h>
61#include <sys/sysctl.h> 61#include <sys/sysctl.h>
62 62
63#include "xenbus.h" 63#include "xenbus.h"
64#include "xencons.h" 64#include "xencons.h"
65#include "isa.h" 65#include "isa.h"
66#include "pci.h" 66#include "pci.h"
67#include "acpica.h" 67#include "acpica.h"
68 68
69#include "opt_xen.h" 69#include "opt_xen.h"
70#include "opt_mpbios.h" 70#include "opt_mpbios.h"
71 71
72#include <xen/xen.h> 72#include <xen/xen.h>
73#include <xen/hypervisor.h> 73#include <xen/hypervisor.h>
74#include <xen/evtchn.h> 74#include <xen/evtchn.h>
75#include <xen/include/public/version.h> 75#include <xen/include/public/version.h>
76#include <x86//pio.h> 76#include <x86/pio.h>
 77#include <x86/machdep.h>
77 78
78#include <sys/cpu.h> 79#include <sys/cpu.h>
79#include <sys/dirent.h> 80#include <sys/dirent.h>
80#include <sys/stat.h> 81#include <sys/stat.h>
81#include <sys/tree.h> 82#include <sys/tree.h>
82#include <sys/vnode.h> 83#include <sys/vnode.h>
83#include <miscfs/specfs/specdev.h> 84#include <miscfs/specfs/specdev.h>
84#include <miscfs/kernfs/kernfs.h> 85#include <miscfs/kernfs/kernfs.h>
85#include <xen/kernfs_machdep.h> 86#include <xen/kernfs_machdep.h>
86#include <dev/isa/isavar.h> 87#include <dev/isa/isavar.h>
87#include <xen/granttables.h> 88#include <xen/granttables.h>
88#include <xen/vcpuvar.h> 89#include <xen/vcpuvar.h>
89#if NPCI > 0 90#if NPCI > 0
@@ -174,331 +175,355 @@ struct x86_isa_chipset x86_isa_chipset; @@ -174,331 +175,355 @@ struct x86_isa_chipset x86_isa_chipset;
174typedef void (vector)(void); 175typedef void (vector)(void);
175extern vector IDTVEC(syscall); 176extern vector IDTVEC(syscall);
176extern vector IDTVEC(syscall32); 177extern vector IDTVEC(syscall32);
177extern vector IDTVEC(osyscall); 178extern vector IDTVEC(osyscall);
178extern vector *x86_exceptions[]; 179extern vector *x86_exceptions[];
179 180
180extern vector IDTVEC(hypervisor_pvhvm_callback); 181extern vector IDTVEC(hypervisor_pvhvm_callback);
181extern volatile struct xencons_interface *xencons_interface; /* XXX */ 182extern volatile struct xencons_interface *xencons_interface; /* XXX */
182extern struct xenstore_domain_interface *xenstore_interface; /* XXX */ 183extern struct xenstore_domain_interface *xenstore_interface; /* XXX */
183 184
184volatile shared_info_t *HYPERVISOR_shared_info __read_mostly; 185volatile shared_info_t *HYPERVISOR_shared_info __read_mostly;
185paddr_t HYPERVISOR_shared_info_pa; 186paddr_t HYPERVISOR_shared_info_pa;
186union start_info_union start_info_union __aligned(PAGE_SIZE); 187union start_info_union start_info_union __aligned(PAGE_SIZE);
187#endif 
188 
189extern void (*delay_func)(unsigned int); 
190extern void (*initclock_func)(void); 
191 188
 189static int xen_hvm_vec = 0;
 190#endif
192 191
193int xen_version; 192int xen_version;
194 193
195/* power management, for save/restore */ 194/* power management, for save/restore */
196static bool hypervisor_suspend(device_t, const pmf_qual_t *); 195static bool hypervisor_suspend(device_t, const pmf_qual_t *);
197static bool hypervisor_resume(device_t, const pmf_qual_t *); 196static bool hypervisor_resume(device_t, const pmf_qual_t *);
198 197
199/* from FreeBSD */ 198/* from FreeBSD */
200#define XEN_MAGIC_IOPORT 0x10 199#define XEN_MAGIC_IOPORT 0x10
201enum { 200enum {
202 XMI_MAGIC = 0x49d2, 201 XMI_MAGIC = 0x49d2,
203 XMI_UNPLUG_IDE_DISKS = 0x01, 202 XMI_UNPLUG_IDE_DISKS = 0x01,
204 XMI_UNPLUG_NICS = 0x02, 203 XMI_UNPLUG_NICS = 0x02,
205 XMI_UNPLUG_IDE_EXCEPT_PRI_MASTER = 0x04 204 XMI_UNPLUG_IDE_EXCEPT_PRI_MASTER = 0x04
206};  205};
207 206
208/* 207
209 * Probe for the hypervisor; always succeeds. 208#ifdef XENPVHVM
210 */ 209
 210static bool
 211xen_check_hypervisordev(void)
 212{
 213 extern struct cfdata cfdata[];
 214 for (int i = 0; cfdata[i].cf_name != NULL; i++) {
 215 if (strcasecmp("hypervisor", cfdata[i].cf_name) == 0) {
 216 switch(cfdata[i].cf_fstate) {
 217 case FSTATE_NOTFOUND:
 218 case FSTATE_FOUND:
 219 case FSTATE_STAR:
 220 printf("xen_check_hypervisordev: enabled\n");
 221 return true;
 222 default:
 223 printf("xen_check_hypervisordev: disabled\n");
 224 return false;
 225 }
 226 }
 227 }
 228 printf("xen_check_hypervisordev: notfound\n");
 229 return 0;
 230}
211int 231int
212hypervisor_match(device_t parent, cfdata_t match, void *aux) 232xen_hvm_init(void)
213{ 233{
214 struct hypervisor_attach_args *haa = aux; 234 extern vaddr_t hypercall_page;
215 235 u_int descs[4];
216 /* Attach path sanity check */ 
217 if (strncmp(haa->haa_busname, "hypervisor", sizeof("hypervisor")) != 0) 
218 return 0; 
219 236
220#if defined(XENPVHVM) 
221 /* 237 /*
222 * The strategy here is to setup hypercall and all PVHVM 238 * We need to setup the HVM interfaces early, so that we can
223 * interfaces on match, or fail to match. 239 * properly setup the CPUs later (especially, all CPUs needs to
224 * Ideally this should happen under attach, but it's too late 240 * run x86_cpuid() locally to get their vcpuid.
225 * then and there's no way to bailout. 
226 * 241 *
227 * If match fails, hypervisor does not attach, and the domain 242 * if everything goes fine, we switch vm_guest to VM_GUEST_XENPVHVM
228 * can boot with the minimal PC AT ISA configuration just 
229 * enough to attach wd(4) and mount the rootfs. 
230 */ 243 */
231 int vec; 
232 extern vaddr_t hypercall_page; 
233 244
234 if (vm_guest == VM_GUEST_XENHVM) { 245 if (vm_guest != VM_GUEST_XENHVM)
235 aprint_normal("%s: Identified Guest XEN in HVM mode.\n", 246 return 0;
236 haa->haa_busname); 
237 
238 u_int descs[4]; 
239 x86_cpuid(XEN_CPUID_LEAF(2), descs); 
240 
241 /*  
242 * Given 32 bytes per hypercall stub, and an optimistic number 
243 * of 100 hypercalls ( the current max is 55), there shouldn't 
244 * be any reason to spill over the arbitrary number of 1 
245 * hypercall page. This is what we allocate in locore.S 
246 * anyway. Make sure the allocation matches the registration. 
247 */ 
248 247
249 KASSERT(descs[0] == 1); 248 /* check if hypervisor was disabled with userconf */
 249 if (!xen_check_hypervisordev())
 250 return 0;
250 251
251 /* XXX: vtophys(&hypercall_page) */ 252 aprint_normal("Identified Guest XEN in HVM mode.\n");
252 wrmsr(descs[1], (uintptr_t)&hypercall_page - KERNBASE); 
253 253
254 } else { 254 x86_cpuid(XEN_CPUID_LEAF(2), descs);
255 return 0; 
256 } 
257 255
258 if (-1 != HYPERVISOR_xen_version(XENVER_version, NULL)) { 256 /*
259 printf("%s: detected functional hypercall page.\n", 257 * Given 32 bytes per hypercall stub, and an optimistic number
260 haa->haa_busname); 258 * of 100 hypercalls ( the current max is 55), there shouldn't
 259 * be any reason to spill over the arbitrary number of 1
 260 * hypercall page. This is what we allocate in locore.S
 261 * anyway. Make sure the allocation matches the registration.
 262 */
 263
 264 KASSERT(descs[0] == 1);
261 265
 266 /* XXX: vtophys(&hypercall_page) */
 267 wrmsr(descs[1], (uintptr_t)&hypercall_page - KERNBASE);
 268
 269 if (-1 != HYPERVISOR_xen_version(XENVER_version, NULL)) {
 270 printf("Xen HVM: detected functional hypercall page.\n");
262 xen_init_features(); 271 xen_init_features();
263 } 272 }
264 273
265 /* Init various preset boot time data structures */ 274 /* Init various preset boot time data structures */
266 275
267 /* XEN xenstore shared page address, event channel */ 276 /* XEN xenstore shared page address, event channel */
268 struct xen_hvm_param xen_hvm_param; 277 struct xen_hvm_param xen_hvm_param;
269 278
270 xen_hvm_param.domid = DOMID_SELF; 279 xen_hvm_param.domid = DOMID_SELF;
271 xen_hvm_param.index = HVM_PARAM_STORE_PFN; 280 xen_hvm_param.index = HVM_PARAM_STORE_PFN;
272  281
273 if ( HYPERVISOR_hvm_op(HVMOP_get_param, &xen_hvm_param) < 0) { 282 if ( HYPERVISOR_hvm_op(HVMOP_get_param, &xen_hvm_param) < 0) {
274 aprint_error("%s: Unable to obtain xenstore page address\n", 283 aprint_error(
275 haa->haa_busname); 284 "Xen HVM: Unable to obtain xenstore page address\n");
276 return 0; 285 return 0;
277 } 286 }
278 287
279 /* Re-use PV field */ 288 /* Re-use PV field */
280 xen_start_info.store_mfn = xen_hvm_param.value; 289 xen_start_info.store_mfn = xen_hvm_param.value;
281 290
282 pmap_kenter_pa((vaddr_t) xenstore_interface, ptoa(xen_start_info.store_mfn), 291 pmap_kenter_pa((vaddr_t) xenstore_interface, ptoa(xen_start_info.store_mfn),
283 VM_PROT_READ|VM_PROT_WRITE, 0); 292 VM_PROT_READ|VM_PROT_WRITE, 0);
284 293
285 xen_hvm_param.domid = DOMID_SELF; 294 xen_hvm_param.domid = DOMID_SELF;
286 xen_hvm_param.index = HVM_PARAM_STORE_EVTCHN; 295 xen_hvm_param.index = HVM_PARAM_STORE_EVTCHN;
287 296
288 if ( HYPERVISOR_hvm_op(HVMOP_get_param, &xen_hvm_param) < 0) { 297 if ( HYPERVISOR_hvm_op(HVMOP_get_param, &xen_hvm_param) < 0) {
289 aprint_error("%s: Unable to obtain xenstore event channel\n", 298 aprint_error(
290 haa->haa_busname); 299 "Xen HVM: Unable to obtain xenstore event channel\n");
291 return 0; 300 return 0;
292 } 301 }
293 302
294 xen_start_info.store_evtchn = xen_hvm_param.value; 303 xen_start_info.store_evtchn = xen_hvm_param.value;
295 304
296 xen_hvm_param.domid = DOMID_SELF; 305 xen_hvm_param.domid = DOMID_SELF;
297 xen_hvm_param.index = HVM_PARAM_CONSOLE_PFN; 306 xen_hvm_param.index = HVM_PARAM_CONSOLE_PFN;
298  307
299 if ( HYPERVISOR_hvm_op(HVMOP_get_param, &xen_hvm_param) < 0) { 308 if ( HYPERVISOR_hvm_op(HVMOP_get_param, &xen_hvm_param) < 0) {
300 aprint_error("%s: Unable to obtain xencons page address\n", 309 aprint_error(
301 haa->haa_busname); 310 "Xen HVM: Unable to obtain xencons page address\n");
302 return 0; 311 return 0;
303 } 312 }
304 313
305 /* Re-use PV field */ 314 /* Re-use PV field */
306 xen_start_info.console.domU.mfn = xen_hvm_param.value; 315 xen_start_info.console.domU.mfn = xen_hvm_param.value;
307 316
308 pmap_kenter_pa((vaddr_t) xencons_interface, ptoa(xen_start_info.console.domU.mfn), 317 pmap_kenter_pa((vaddr_t) xencons_interface, ptoa(xen_start_info.console.domU.mfn),
309 VM_PROT_READ|VM_PROT_WRITE, 0); 318 VM_PROT_READ|VM_PROT_WRITE, 0);
310 319
311 xen_hvm_param.domid = DOMID_SELF; 320 xen_hvm_param.domid = DOMID_SELF;
312 xen_hvm_param.index = HVM_PARAM_CONSOLE_EVTCHN; 321 xen_hvm_param.index = HVM_PARAM_CONSOLE_EVTCHN;
313 322
314 if ( HYPERVISOR_hvm_op(HVMOP_get_param, &xen_hvm_param) < 0) { 323 if ( HYPERVISOR_hvm_op(HVMOP_get_param, &xen_hvm_param) < 0) {
315 aprint_error("%s: Unable to obtain xencons event channel\n", 324 aprint_error(
316 haa->haa_busname); 325 "Xen HVM: Unable to obtain xencons event channel\n");
317 return 0; 326 return 0;
318 } 327 }
319 328
320 xen_start_info.console.domU.evtchn = xen_hvm_param.value; 329 xen_start_info.console.domU.evtchn = xen_hvm_param.value;
321 330
322 /* HYPERVISOR_shared_info */ 331 /* HYPERVISOR_shared_info */
323 struct xen_add_to_physmap xmap = { 332 struct xen_add_to_physmap xmap = {
324 .domid = DOMID_SELF, 333 .domid = DOMID_SELF,
325 .space = XENMAPSPACE_shared_info, 334 .space = XENMAPSPACE_shared_info,
326 .idx = 0, /* Important - XEN checks for this */ 335 .idx = 0, /* Important - XEN checks for this */
327 .gpfn = atop(HYPERVISOR_shared_info_pa) 336 .gpfn = atop(HYPERVISOR_shared_info_pa)
328 }; 337 };
329 338
330 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xmap) < 0) { 339 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xmap) < 0) {
331 aprint_error("%s: Unable to register HYPERVISOR_shared_info\n", 340 aprint_error(
332 haa->haa_busname); 341 "Xen HVM: Unable to register HYPERVISOR_shared_info\n");
333 return 0; 342 return 0;
334 } 343 }
335 344
336 /* HYPERVISOR_shared_info va,pa has been allocated in pmap_bootstrap() */ 345 /* HYPERVISOR_shared_info va,pa has been allocated in pmap_bootstrap() */
337 pmap_kenter_pa((vaddr_t) HYPERVISOR_shared_info, 346 pmap_kenter_pa((vaddr_t) HYPERVISOR_shared_info,
338 HYPERVISOR_shared_info_pa, VM_PROT_READ|VM_PROT_WRITE, 0); 347 HYPERVISOR_shared_info_pa, VM_PROT_READ|VM_PROT_WRITE, 0);
339 348
340 cpu_info_primary.ci_vcpu = &HYPERVISOR_shared_info->vcpu_info[0]; 
341 
342 /* 349 /*
343 * First register callback: here's why 350 * First register callback: here's why
344 * http://xenbits.xen.org/gitweb/?p=xen.git;a=commit;h=7b5b8ca7dffde866d851f0b87b994e0b13e5b867 351 * http://xenbits.xen.org/gitweb/?p=xen.git;a=commit;h=7b5b8ca7dffde866d851f0b87b994e0b13e5b867
345 */ 352 */
346 353
347 /* 354 /*
348 * Check for XENFEAT_hvm_callback_vector. Can't proceed 355 * Check for XENFEAT_hvm_callback_vector. Can't proceed
349 * without it. 356 * without it.
350 */ 357 */
351 if (!xen_feature(XENFEAT_hvm_callback_vector)) { 358 if (!xen_feature(XENFEAT_hvm_callback_vector)) {
352 aprint_error("%s: XENFEAT_hvm_callback_vector" 359 aprint_error("Xen HVM: XENFEAT_hvm_callback_vector"
353 "not available, cannot proceed", haa->haa_busname); 360 "not available, cannot proceed");
354  
355 return 0; 361 return 0;
356 } 362 }
357 363
358 /* Register event callback handler. */ 364 /*
 365 * prepare vector.
 366 * We don't really care where it is, as long as it's free
 367 */
 368 xen_hvm_vec = idt_vec_alloc(129, 255);
 369 idt_vec_set(xen_hvm_vec, &IDTVEC(hypervisor_pvhvm_callback));
359 370
360 /* We don't really care where it is, as long as it's free */ 371 events_default_setup();
361 vec = idt_vec_alloc(129, 255); 
362 372
363 idt_vec_set(vec, &IDTVEC(hypervisor_pvhvm_callback)); 373 delay_func = xen_delay;
 374 x86_initclock_func = xen_initclocks;
 375 x86_cpu_initclock_func = xen_cpu_initclocks;
 376 x86_cpu_idle_set(x86_cpu_idle_xen, "xen", true);
 377 vm_guest = VM_GUEST_XENPVHVM; /* Be more specific */
 378 return 1;
 379}
364 380
365 cpu_init_idt(); /* XXX remove and use only native one below ? */ 381int
 382xen_hvm_init_cpu(struct cpu_info *ci)
 383{
 384 u_int32_t descs[4];
 385 struct xen_hvm_param xen_hvm_param;
366 386
367 xen_hvm_param.domid = DOMID_SELF; 387 if (vm_guest != VM_GUEST_XENPVHVM)
368 xen_hvm_param.index = HVM_PARAM_CALLBACK_IRQ; 388 return 0;
369 389
370 /* val[63:56] = 2, val[7:0] = vec */ 390 KASSERT(ci == curcpu());
371 xen_hvm_param.value = ((int64_t)0x2 << 56) | vec; 
372 391
373 if (HYPERVISOR_hvm_op(HVMOP_set_param, &xen_hvm_param) < 0) { 392 descs[0] = 0;
374 aprint_error("%s: Unable to register event callback vector\n", 393 x86_cpuid(XEN_CPUID_LEAF(4), descs);
375 haa->haa_busname); 394 if (!(descs[0] & XEN_HVM_CPUID_VCPU_ID_PRESENT)) {
 395 aprint_error_dev(ci->ci_dev, "Xen HVM: can't get VCPU id\n");
 396 vm_guest = VM_GUEST_XENHVM;
376 return 0; 397 return 0;
377 } 398 }
 399 printf("cpu %s ci_acpiid %d vcpuid %d domid %d\n",
 400 device_xname(ci->ci_dev), ci->ci_acpiid, descs[1], descs[2]);
 401
 402 ci->ci_vcpuid = descs[1];
 403 ci->ci_vcpu = &HYPERVISOR_shared_info->vcpu_info[ci->ci_vcpuid];
 404
 405 /* Register event callback handler. */
378 406
379 /* Print out value. */ 
380 xen_hvm_param.domid = DOMID_SELF; 407 xen_hvm_param.domid = DOMID_SELF;
381 xen_hvm_param.index = HVM_PARAM_CALLBACK_IRQ; 408 xen_hvm_param.index = HVM_PARAM_CALLBACK_IRQ;
382 xen_hvm_param.value = 0; 
383 409
384 if (HYPERVISOR_hvm_op(HVMOP_get_param, &xen_hvm_param) < 0) { 410 /* val[63:56] = 2, val[7:0] = vec */
385 printf("%s: Unable to get event callback vector\n", 411 xen_hvm_param.value = ((int64_t)0x2 << 56) | xen_hvm_vec;
386 haa->haa_busname); 412
 413 if (HYPERVISOR_hvm_op(HVMOP_set_param, &xen_hvm_param) < 0) {
 414 aprint_error_dev(ci->ci_dev,
 415 "Xen HVM: Unable to register event callback vector\n");
 416 vm_guest = VM_GUEST_XENHVM;
387 return 0; 417 return 0;
388 } 418 }
389 419
390 /* 420 return 1;
391 * Afterwards vector callback is done, register VCPU info 421}
392 * page. Here's why: 
393 * http://xenbits.xen.org/gitweb/?p=xen.git;a=commit;h=7b5b8ca7dffde866d851f0b87b994e0b13e5b867 
394 * XXX: Ideally this should happen at vcpu attach. 
395 */ 
396 struct vcpu_register_vcpu_info vrvi; 
397 422
398 paddr_t vcpu_info_pa = HYPERVISOR_shared_info_pa + 423#endif /* XENPVHVM */
399 offsetof(struct shared_info, vcpu_info); 424
400  425/*
401 vrvi.mfn = atop(vcpu_info_pa); 426 * Probe for the hypervisor; always succeeds.
402 vrvi.offset = vcpu_info_pa - trunc_page(vcpu_info_pa); 427 */
 428int
 429hypervisor_match(device_t parent, cfdata_t match, void *aux)
 430{
 431 struct hypervisor_attach_args *haa = aux;
403 432
404 if (HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, curcpu()->ci_cpuid /* VCPU0 */, 433 /* Attach path sanity check */
405 &vrvi) < 0) { 434 if (strncmp(haa->haa_busname, "hypervisor", sizeof("hypervisor")) != 0)
406 aprint_error("%s: Unable to register vcpu info page\n", 
407 haa->haa_busname); 
408 return 0; 435 return 0;
409 } 
410 436
 437
 438#ifdef XENPVHVM
 439 if (vm_guest != VM_GUEST_XENPVHVM)
 440 return 0;
 441#endif
 442 /* If we got here, it must mean we matched */
 443 return 1;
 444}
 445
 446#ifdef MULTIPROCESSOR
 447static int
 448hypervisor_vcpu_print(void *aux, const char *parent)
 449{
 450 /* Unconfigured cpus are ignored quietly. */
 451 return (QUIET);
 452}
 453#endif /* MULTIPROCESSOR */
 454
 455/*
 456 * Attach the hypervisor.
 457 */
 458void
 459hypervisor_attach(device_t parent, device_t self, void *aux)
 460{
 461
 462#if NPCI >0
 463#ifdef PCI_BUS_FIXUP
 464 int pci_maxbus = 0;
 465#endif
 466#endif /* NPCI */
 467 union hypervisor_attach_cookie hac;
 468 char xen_extra_version[XEN_EXTRAVERSION_LEN];
 469 static char xen_version_string[20];
 470 int rc;
 471 const struct sysctlnode *node = NULL;
 472
 473#ifdef XENPVHVM
411 /* 474 /*
412 * Set the boot device to xbd0a. 475 * Set the boot device to xbd0a.
413 * We claim this is a reasonable default which is picked up 476 * We claim this is a reasonable default which is picked up
414 * later as the rootfs device. 477 * later as the rootfs device.
415 * 478 *
416 * We need to do this because the HVM domain loader uses the 479 * We need to do this because the HVM domain loader uses the
417 * regular BIOS based native boot(8) procedure, which sets the 480 * regular BIOS based native boot(8) procedure, which sets the
418 * boot device to the native driver/partition of whatever was 481 * boot device to the native driver/partition of whatever was
419 * detected by the native bootloader. 482 * detected by the native bootloader.
420 */ 483 */
421 484
422 struct btinfo_rootdevice bi; 485 struct btinfo_rootdevice bi;
423 snprintf(bi.devname, 6, "xbd0a"); 486 snprintf(bi.devname, 6, "xbd0a");
424 bi.common.type = BTINFO_ROOTDEVICE; 487 bi.common.type = BTINFO_ROOTDEVICE;
425 bi.common.len = sizeof(struct btinfo_rootdevice); 488 bi.common.len = sizeof(struct btinfo_rootdevice);
426 489
427 /* From i386/multiboot.c */ 490 /* From i386/multiboot.c */
428 /* $NetBSD: hypervisor.c,v 1.73.2.5 2020/04/16 20:21:04 bouyer Exp $ */ 491 /* $NetBSD: hypervisor.c,v 1.73.2.6 2020/04/18 15:06:18 bouyer Exp $ */
429 int i, len; 492 int i, len;
430 vaddr_t data; 493 vaddr_t data;
431 extern struct bootinfo bootinfo; 494 extern struct bootinfo bootinfo;
432 struct bootinfo *bip = (struct bootinfo *)&bootinfo; 495 struct bootinfo *bip = (struct bootinfo *)&bootinfo;
433 len = bi.common.len; 496 len = bi.common.len;
434 497
435 data = (vaddr_t)&bip->bi_data; 498 data = (vaddr_t)&bip->bi_data;
436 for (i = 0; i < bip->bi_nentries; i++) { 499 for (i = 0; i < bip->bi_nentries; i++) {
437 struct btinfo_common *tmp; 500 struct btinfo_common *tmp;
438 501
439 tmp = (struct btinfo_common *)data; 502 tmp = (struct btinfo_common *)data;
440 data += tmp->len; 503 data += tmp->len;
441 } 504 }
442 if (data + len < (vaddr_t)&bip->bi_data + sizeof(bip->bi_data)) { 505 if (data + len < (vaddr_t)&bip->bi_data + sizeof(bip->bi_data)) {
443 memcpy((void *)data, &bi, len); 506 memcpy((void *)data, &bi, len);
444 bip->bi_nentries++; 507 bip->bi_nentries++;
445 } 508 }
446 509
447 /* disable emulated devices */ 510 /* disable emulated devices */
448 if (inw(XEN_MAGIC_IOPORT) == XMI_MAGIC) { 511 if (inw(XEN_MAGIC_IOPORT) == XMI_MAGIC) {
449 outw(XEN_MAGIC_IOPORT, XMI_UNPLUG_IDE_DISKS | XMI_UNPLUG_NICS); 512 outw(XEN_MAGIC_IOPORT, XMI_UNPLUG_IDE_DISKS | XMI_UNPLUG_NICS);
450 } else { 513 } else {
451 aprint_error("%s: Unable to disable emulated devices\n", 514 aprint_error_dev(self, "Unable to disable emulated devices\n");
452 haa->haa_busname); 
453 } 515 }
454 events_default_setup(); 
455 delay_func = xen_delay; 
456 initclock_func = xen_initclocks; 
457 vm_guest = VM_GUEST_XENPVHVM; /* Be more specific */ 
458 
459#endif /* XENPVHVM */ 516#endif /* XENPVHVM */
460 
461 /* If we got here, it must mean we matched */ 
462 return 1; 
463} 
464 
465#ifdef MULTIPROCESSOR 
466static int 
467hypervisor_vcpu_print(void *aux, const char *parent) 
468{ 
469 /* Unconfigured cpus are ignored quietly. */ 
470 return (QUIET); 
471} 
472#endif /* MULTIPROCESSOR */ 
473 
474/* 
475 * Attach the hypervisor. 
476 */ 
477void 
478hypervisor_attach(device_t parent, device_t self, void *aux) 
479{ 
480 
481#if NPCI >0 
482#ifdef PCI_BUS_FIXUP 
483 int pci_maxbus = 0; 
484#endif 
485#endif /* NPCI */ 
486 union hypervisor_attach_cookie hac; 
487 char xen_extra_version[XEN_EXTRAVERSION_LEN]; 
488 static char xen_version_string[20]; 
489 int rc; 
490 const struct sysctlnode *node = NULL; 
491 
492 xenkernfs_init(); 517 xenkernfs_init();
493 518
494 xen_version = HYPERVISOR_xen_version(XENVER_version, NULL); 519 xen_version = HYPERVISOR_xen_version(XENVER_version, NULL);
495 memset(xen_extra_version, 0, sizeof(xen_extra_version)); 520 memset(xen_extra_version, 0, sizeof(xen_extra_version));
496 HYPERVISOR_xen_version(XENVER_extraversion, xen_extra_version); 521 HYPERVISOR_xen_version(XENVER_extraversion, xen_extra_version);
497 rc = snprintf(xen_version_string, 20, "%d.%d%s", XEN_MAJOR(xen_version), 522 rc = snprintf(xen_version_string, 20, "%d.%d%s", XEN_MAJOR(xen_version),
498 XEN_MINOR(xen_version), xen_extra_version); 523 XEN_MINOR(xen_version), xen_extra_version);
499 aprint_normal(": Xen version %s\n", xen_version_string); 524 aprint_normal(": Xen version %s\n", xen_version_string);
500 if (rc >= 20) 525 if (rc >= 20)
501 aprint_debug(": xen_version_string truncated\n"); 526 aprint_debug(": xen_version_string truncated\n");
502 527
503 sysctl_createv(NULL, 0, NULL, &node, 0, 528 sysctl_createv(NULL, 0, NULL, &node, 0,
504 CTLTYPE_NODE, "xen", 529 CTLTYPE_NODE, "xen",

cvs diff -r1.1.2.2 -r1.1.2.3 src/sys/arch/xen/xen/xen_clock.c (expand / switch to unified diff)

--- src/sys/arch/xen/xen/xen_clock.c 2020/04/16 20:21:44 1.1.2.2
+++ src/sys/arch/xen/xen/xen_clock.c 2020/04/18 15:06:18 1.1.2.3
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: xen_clock.c,v 1.1.2.2 2020/04/16 20:21:44 bouyer Exp $ */ 1/* $NetBSD: xen_clock.c,v 1.1.2.3 2020/04/18 15:06:18 bouyer Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2017, 2018 The NetBSD Foundation, Inc. 4 * Copyright (c) 2017, 2018 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell. 8 * by Taylor R. Campbell.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -26,27 +26,27 @@ @@ -26,27 +26,27 @@
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include "opt_xen.h" 32#include "opt_xen.h"
33 33
34#ifndef XEN_CLOCK_DEBUG 34#ifndef XEN_CLOCK_DEBUG
35#define XEN_CLOCK_DEBUG 0 35#define XEN_CLOCK_DEBUG 0
36#endif 36#endif
37 37
38#include <sys/cdefs.h> 38#include <sys/cdefs.h>
39__KERNEL_RCSID(0, "$NetBSD: xen_clock.c,v 1.1.2.2 2020/04/16 20:21:44 bouyer Exp $"); 39__KERNEL_RCSID(0, "$NetBSD: xen_clock.c,v 1.1.2.3 2020/04/18 15:06:18 bouyer Exp $");
40 40
41#include <sys/param.h> 41#include <sys/param.h>
42#include <sys/types.h> 42#include <sys/types.h>
43#include <sys/atomic.h> 43#include <sys/atomic.h>
44#include <sys/callout.h> 44#include <sys/callout.h>
45#include <sys/cpu.h> 45#include <sys/cpu.h>
46#include <sys/device.h> 46#include <sys/device.h>
47#include <sys/evcnt.h> 47#include <sys/evcnt.h>
48#include <sys/intr.h> 48#include <sys/intr.h>
49#include <sys/kernel.h> 49#include <sys/kernel.h>
50#include <sys/lwp.h> 50#include <sys/lwp.h>
51#include <sys/proc.h> 51#include <sys/proc.h>
52#include <sys/sysctl.h> 52#include <sys/sysctl.h>
@@ -105,40 +105,26 @@ static volatile uint64_t xen_global_syst @@ -105,40 +105,26 @@ static volatile uint64_t xen_global_syst
105 * NetBSD ticks, set the Xen hypervisor's wall clock time. 105 * NetBSD ticks, set the Xen hypervisor's wall clock time.
106 */ 106 */
107static struct { 107static struct {
108 struct callout ch; 108 struct callout ch;
109 int ticks; 109 int ticks;
110} xen_timepush; 110} xen_timepush;
111 111
112static void xen_timepush_init(void); 112static void xen_timepush_init(void);
113static void xen_timepush_intr(void *); 113static void xen_timepush_intr(void *);
114static int sysctl_xen_timepush(SYSCTLFN_ARGS); 114static int sysctl_xen_timepush(SYSCTLFN_ARGS);
115#endif 115#endif
116 116
117/* 117/*
118 * idle_block() 
119 * 
120 * Called from the idle loop when we have nothing to do but wait 
121 * for an interrupt. 
122 */ 
123void 
124idle_block(void) 
125{ 
126 KASSERT(curcpu()->ci_ipending == 0); 
127 HYPERVISOR_block(); 
128 KASSERT(curcpu()->ci_ipending == 0); 
129} 
130 
131/* 
132 * xen_rdtsc() 118 * xen_rdtsc()
133 * 119 *
134 * Read the local pCPU's tsc. 120 * Read the local pCPU's tsc.
135 */ 121 */
136static inline uint64_t 122static inline uint64_t
137xen_rdtsc(void) 123xen_rdtsc(void)
138{ 124{
139 uint32_t lo, hi; 125 uint32_t lo, hi;
140 126
141 asm volatile("rdtsc" : "=a"(lo), "=d"(hi)); 127 asm volatile("rdtsc" : "=a"(lo), "=d"(hi));
142 128
143 return ((uint64_t)hi << 32) | lo; 129 return ((uint64_t)hi << 32) | lo;
144} 130}
@@ -511,26 +497,31 @@ xen_get_timecount(struct timecounter *tc @@ -511,26 +497,31 @@ xen_get_timecount(struct timecounter *tc
511/* 497/*
512 * xen_delay(n) 498 * xen_delay(n)
513 * 499 *
514 * Wait approximately n microseconds. 500 * Wait approximately n microseconds.
515 */ 501 */
516void 502void
517xen_delay(unsigned n) 503xen_delay(unsigned n)
518{ 504{
519 int bound; 505 int bound;
520 506
521 /* Bind to the CPU so we don't compare tsc on different CPUs. */ 507 /* Bind to the CPU so we don't compare tsc on different CPUs. */
522 bound = curlwp_bind(); 508 bound = curlwp_bind();
523 509
 510 if (curcpu()->ci_vcpu == NULL) {
 511 curlwp_bindx(bound);
 512 return;
 513 }
 514
524 /* Short wait (<500us) or long wait? */ 515 /* Short wait (<500us) or long wait? */
525 if (n < 500000) { 516 if (n < 500000) {
526 /* 517 /*
527 * Xen system time is not precise enough for short 518 * Xen system time is not precise enough for short
528 * delays, so use the tsc instead. 519 * delays, so use the tsc instead.
529 * 520 *
530 * We work with the current tsc frequency, and figure 521 * We work with the current tsc frequency, and figure
531 * that if it changes while we're delaying, we've 522 * that if it changes while we're delaying, we've
532 * probably delayed long enough -- up to 500us. 523 * probably delayed long enough -- up to 500us.
533 * 524 *
534 * We do not use cpu_frequency(ci), which uses a 525 * We do not use cpu_frequency(ci), which uses a
535 * quantity detected at boot time, and which may have 526 * quantity detected at boot time, and which may have
536 * changed by now if Xen has migrated this vCPU to 527 * changed by now if Xen has migrated this vCPU to
@@ -645,27 +636,27 @@ xen_resumeclocks(struct cpu_info *ci) @@ -645,27 +636,27 @@ xen_resumeclocks(struct cpu_info *ci)
645 /* XXX sketchy function pointer cast -- fix the API, please */ 636 /* XXX sketchy function pointer cast -- fix the API, please */
646 if (event_set_handler(evtch, 637 if (event_set_handler(evtch,
647 __FPTRCAST(int (*)(void *), xen_timer_handler), 638 __FPTRCAST(int (*)(void *), xen_timer_handler),
648 ci, IPL_CLOCK, NULL, intr_xname, true, false) != 0) 639 ci, IPL_CLOCK, NULL, intr_xname, true, false) != 0)
649 panic("failed to establish timer interrupt handler"); 640 panic("failed to establish timer interrupt handler");
650 641
651 hypervisor_unmask_event(evtch); 642 hypervisor_unmask_event(evtch);
652 643
653 aprint_verbose("Xen %s: using event channel %d\n", intr_xname, evtch); 644 aprint_verbose("Xen %s: using event channel %d\n", intr_xname, evtch);
654 645
655 /* Disarm the periodic timer on Xen>=3.1 which is allegedly buggy. */ 646 /* Disarm the periodic timer on Xen>=3.1 which is allegedly buggy. */
656 if (XEN_MAJOR(xen_version) > 3 || XEN_MINOR(xen_version) > 0) { 647 if (XEN_MAJOR(xen_version) > 3 || XEN_MINOR(xen_version) > 0) {
657 error = HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, 648 error = HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer,
658 ci->ci_cpuid, NULL); 649 ci->ci_vcpuid, NULL);
659 KASSERT(error == 0); 650 KASSERT(error == 0);
660 } 651 }
661 652
662 /* Pretend the last hardclock happened right now. */ 653 /* Pretend the last hardclock happened right now. */
663 ci->ci_xen_hardclock_systime_ns = xen_vcputime_systime_ns(); 654 ci->ci_xen_hardclock_systime_ns = xen_vcputime_systime_ns();
664 655
665 /* Arm the one-shot timer. */ 656 /* Arm the one-shot timer. */
666 error = HYPERVISOR_set_timer_op(ci->ci_xen_hardclock_systime_ns + 657 error = HYPERVISOR_set_timer_op(ci->ci_xen_hardclock_systime_ns +
667 NS_PER_TICK); 658 NS_PER_TICK);
668 KASSERT(error == 0); 659 KASSERT(error == 0);
669 660
670 /* We'd better not have switched CPUs. */ 661 /* We'd better not have switched CPUs. */
671 KASSERT(ci == curcpu()); 662 KASSERT(ci == curcpu());
@@ -728,74 +719,83 @@ again: @@ -728,74 +719,83 @@ again:
728 * time is in the past, so update our idea of what the Xen 719 * time is in the past, so update our idea of what the Xen
729 * system time is and try again. 720 * system time is and try again.
730 */ 721 */
731 next = ci->ci_xen_hardclock_systime_ns + NS_PER_TICK; 722 next = ci->ci_xen_hardclock_systime_ns + NS_PER_TICK;
732 error = HYPERVISOR_set_timer_op(next); 723 error = HYPERVISOR_set_timer_op(next);
733 if (error) 724 if (error)
734 goto again; 725 goto again;
735 726
736 /* Success! */ 727 /* Success! */
737 return 0; 728 return 0;
738} 729}
739 730
740/* 731/*
741 * xen_initclocks() 732 * xen_cpu_initclocks()
742 * 733 *
743 * Initialize the Xen clocks on the current CPU. 734 * Initialize the Xen clocks on the current CPU.
744 */ 735 */
745void 736void
746xen_initclocks(void) 737xen_cpu_initclocks(void)
747{ 738{
748 struct cpu_info *ci = curcpu(); 739 struct cpu_info *ci = curcpu();
749 740
750 /* If this is the primary CPU, do global initialization first. */ 741 /* If this is the primary CPU, do global initialization first. */
751 if (ci == &cpu_info_primary) { 742 if (ci == &cpu_info_primary) {
752 /* Initialize the systemwide Xen timecounter. */ 743 /* Initialize the systemwide Xen timecounter. */
753 tc_init(&xen_timecounter); 744 tc_init(&xen_timecounter);
754 
755#ifdef DOM0OPS 
756 /* 
757 * If this is a privileged dom0, start pushing the wall 
758 * clock time back to the Xen hypervisor. 
759 */ 
760 if (xendomain_is_privileged()) 
761 xen_timepush_init(); 
762#endif 
763 } 745 }
764 746
765 /* Attach the event counters. */ 747 /* Attach the event counters. */
766 evcnt_attach_dynamic(&ci->ci_xen_cpu_tsc_backwards_evcnt, 748 evcnt_attach_dynamic(&ci->ci_xen_cpu_tsc_backwards_evcnt,
767 EVCNT_TYPE_INTR, NULL, device_xname(ci->ci_dev), 749 EVCNT_TYPE_INTR, NULL, device_xname(ci->ci_dev),
768 "cpu tsc ran backwards"); 750 "cpu tsc ran backwards");
769 evcnt_attach_dynamic(&ci->ci_xen_tsc_delta_negative_evcnt, 751 evcnt_attach_dynamic(&ci->ci_xen_tsc_delta_negative_evcnt,
770 EVCNT_TYPE_INTR, NULL, device_xname(ci->ci_dev), 752 EVCNT_TYPE_INTR, NULL, device_xname(ci->ci_dev),
771 "tsc delta went negative"); 753 "tsc delta went negative");
772 evcnt_attach_dynamic(&ci->ci_xen_raw_systime_wraparound_evcnt, 754 evcnt_attach_dynamic(&ci->ci_xen_raw_systime_wraparound_evcnt,
773 EVCNT_TYPE_INTR, NULL, device_xname(ci->ci_dev), 755 EVCNT_TYPE_INTR, NULL, device_xname(ci->ci_dev),
774 "raw systime wrapped around"); 756 "raw systime wrapped around");
775 evcnt_attach_dynamic(&ci->ci_xen_raw_systime_backwards_evcnt, 757 evcnt_attach_dynamic(&ci->ci_xen_raw_systime_backwards_evcnt,
776 EVCNT_TYPE_INTR, NULL, device_xname(ci->ci_dev), 758 EVCNT_TYPE_INTR, NULL, device_xname(ci->ci_dev),
777 "raw systime went backwards"); 759 "raw systime went backwards");
778 evcnt_attach_dynamic(&ci->ci_xen_systime_backwards_hardclock_evcnt, 760 evcnt_attach_dynamic(&ci->ci_xen_systime_backwards_hardclock_evcnt,
779 EVCNT_TYPE_INTR, NULL, device_xname(ci->ci_dev), 761 EVCNT_TYPE_INTR, NULL, device_xname(ci->ci_dev),
780 "systime went backwards in hardclock"); 762 "systime went backwards in hardclock");
781 evcnt_attach_dynamic(&ci->ci_xen_missed_hardclock_evcnt, 763 evcnt_attach_dynamic(&ci->ci_xen_missed_hardclock_evcnt,
782 EVCNT_TYPE_INTR, NULL, device_xname(ci->ci_dev), 764 EVCNT_TYPE_INTR, NULL, device_xname(ci->ci_dev),
783 "missed hardclock"); 765 "missed hardclock");
784 766
785 /* Fire up the clocks. */ 767 /* Fire up the clocks. */
786 xen_resumeclocks(ci); 768 xen_resumeclocks(ci);
787} 769}
788 770
 771/*
 772 * xen_initclocks()
 773 *
 774 * Initialize the Xen global clock
 775 */
 776void
 777xen_initclocks(void)
 778{
 779#ifdef DOM0OPS
 780 /*
 781 * If this is a privileged dom0, start pushing the wall
 782 * clock time back to the Xen hypervisor.
 783 */
 784 if (xendomain_is_privileged())
 785 xen_timepush_init();
 786#endif
 787}
 788
789#ifdef DOM0OPS 789#ifdef DOM0OPS
790 790
791/* 791/*
792 * xen_timepush_init() 792 * xen_timepush_init()
793 * 793 *
794 * Initialize callout to periodically set Xen hypervisor's wall 794 * Initialize callout to periodically set Xen hypervisor's wall
795 * clock time. 795 * clock time.
796 */ 796 */
797static void 797static void
798xen_timepush_init(void) 798xen_timepush_init(void)
799{ 799{
800 struct sysctllog *log = NULL; 800 struct sysctllog *log = NULL;
801 const struct sysctlnode *node = NULL; 801 const struct sysctlnode *node = NULL;