Thu Jan 28 01:57:31 2021 UTC ()
Remove x86_genfb_mtrr_init. PATs have been available since the Pentium III
and this code has been #if notyet'd shortly after being introduced.


(jmcneill)
diff -r1.4 -r1.5 src/sys/arch/x86/include/genfb_machdep.h
diff -r1.87 -r1.88 src/sys/arch/x86/pci/pci_machdep.c
diff -r1.15 -r1.16 src/sys/arch/x86/x86/genfb_machdep.c
diff -r1.12 -r1.13 src/sys/arch/x86/x86/hyperv.c

cvs diff -r1.4 -r1.5 src/sys/arch/x86/include/genfb_machdep.h (switch to unified diff)

--- src/sys/arch/x86/include/genfb_machdep.h 2019/11/30 05:28:28 1.4
+++ src/sys/arch/x86/include/genfb_machdep.h 2021/01/28 01:57:31 1.5
@@ -1,38 +1,37 @@ @@ -1,38 +1,37 @@
1/* $NetBSD: genfb_machdep.h,v 1.4 2019/11/30 05:28:28 nonaka Exp $ */ 1/* $NetBSD: genfb_machdep.h,v 1.5 2021/01/28 01:57:31 jmcneill Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2009 Jared D. McNeill <jmcneill@invisible.ca> 4 * Copyright (c) 2009 Jared D. McNeill <jmcneill@invisible.ca>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE. 26 * POSSIBILITY OF SUCH DAMAGE.
27 */ 27 */
28 28
29#ifndef _X86_GENFB_MACHDEP_H 29#ifndef _X86_GENFB_MACHDEP_H
30#define _X86_GENFB_MACHDEP_H 30#define _X86_GENFB_MACHDEP_H
31 31
32int x86_genfb_init(void); 32int x86_genfb_init(void);
33int x86_genfb_cnattach(void); 33int x86_genfb_cnattach(void);
34void x86_genfb_mtrr_init(uint64_t, uint32_t); 
35void x86_genfb_set_console_dev(device_t); 34void x86_genfb_set_console_dev(device_t);
36void x86_genfb_ddb_trap_callback(int); 35void x86_genfb_ddb_trap_callback(int);
37 36
38#endif /* !_X86_GENFB_MACHDEP_H */ 37#endif /* !_X86_GENFB_MACHDEP_H */

cvs diff -r1.87 -r1.88 src/sys/arch/x86/pci/pci_machdep.c (switch to unified diff)

--- src/sys/arch/x86/pci/pci_machdep.c 2020/05/04 15:55:56 1.87
+++ src/sys/arch/x86/pci/pci_machdep.c 2021/01/28 01:57:31 1.88
@@ -1,1244 +1,1242 @@ @@ -1,1244 +1,1242 @@
1/* $NetBSD: pci_machdep.c,v 1.87 2020/05/04 15:55:56 jdolecek Exp $ */ 1/* $NetBSD: pci_machdep.c,v 1.88 2021/01/28 01:57:31 jmcneill Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center. 9 * NASA Ames Research Center.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33/* 33/*
34 * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved. 34 * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
35 * Copyright (c) 1994 Charles M. Hannum. All rights reserved. 35 * Copyright (c) 1994 Charles M. Hannum. All rights reserved.
36 * 36 *
37 * Redistribution and use in source and binary forms, with or without 37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions 38 * modification, are permitted provided that the following conditions
39 * are met: 39 * are met:
40 * 1. Redistributions of source code must retain the above copyright 40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer. 41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright 42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the 43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution. 44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software 45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement: 46 * must display the following acknowledgement:
47 * This product includes software developed by Charles M. Hannum. 47 * This product includes software developed by Charles M. Hannum.
48 * 4. The name of the author may not be used to endorse or promote products 48 * 4. The name of the author may not be used to endorse or promote products
49 * derived from this software without specific prior written permission. 49 * derived from this software without specific prior written permission.
50 * 50 *
51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
53 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 53 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
54 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 54 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
55 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 55 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
56 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 56 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
60 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 */ 61 */
62 62
63/* 63/*
64 * Machine-specific functions for PCI autoconfiguration. 64 * Machine-specific functions for PCI autoconfiguration.
65 * 65 *
66 * On PCs, there are two methods of generating PCI configuration cycles. 66 * On PCs, there are two methods of generating PCI configuration cycles.
67 * We try to detect the appropriate mechanism for this machine and set 67 * We try to detect the appropriate mechanism for this machine and set
68 * up a few function pointers to access the correct method directly. 68 * up a few function pointers to access the correct method directly.
69 * 69 *
70 * The configuration method can be hard-coded in the config file by 70 * The configuration method can be hard-coded in the config file by
71 * using `options PCI_CONF_MODE=N', where `N' is the configuration mode 71 * using `options PCI_CONF_MODE=N', where `N' is the configuration mode
72 * as defined in section 3.6.4.1, `Generating Configuration Cycles'. 72 * as defined in section 3.6.4.1, `Generating Configuration Cycles'.
73 */ 73 */
74 74
75#include <sys/cdefs.h> 75#include <sys/cdefs.h>
76__KERNEL_RCSID(0, "$NetBSD: pci_machdep.c,v 1.87 2020/05/04 15:55:56 jdolecek Exp $"); 76__KERNEL_RCSID(0, "$NetBSD: pci_machdep.c,v 1.88 2021/01/28 01:57:31 jmcneill Exp $");
77 77
78#include <sys/types.h> 78#include <sys/types.h>
79#include <sys/param.h> 79#include <sys/param.h>
80#include <sys/time.h> 80#include <sys/time.h>
81#include <sys/systm.h> 81#include <sys/systm.h>
82#include <sys/errno.h> 82#include <sys/errno.h>
83#include <sys/device.h> 83#include <sys/device.h>
84#include <sys/bus.h> 84#include <sys/bus.h>
85#include <sys/cpu.h> 85#include <sys/cpu.h>
86#include <sys/kmem.h> 86#include <sys/kmem.h>
87 87
88#include <uvm/uvm_extern.h> 88#include <uvm/uvm_extern.h>
89 89
90#include <machine/bus_private.h> 90#include <machine/bus_private.h>
91 91
92#include <machine/pio.h> 92#include <machine/pio.h>
93#include <machine/lock.h> 93#include <machine/lock.h>
94 94
95#include <dev/isa/isareg.h> 95#include <dev/isa/isareg.h>
96#include <dev/isa/isavar.h> 96#include <dev/isa/isavar.h>
97#include <dev/pci/pcivar.h> 97#include <dev/pci/pcivar.h>
98#include <dev/pci/pcireg.h> 98#include <dev/pci/pcireg.h>
99#include <dev/pci/pccbbreg.h> 99#include <dev/pci/pccbbreg.h>
100#include <dev/pci/pcidevs.h> 100#include <dev/pci/pcidevs.h>
101#include <dev/pci/ppbvar.h> 101#include <dev/pci/ppbvar.h>
102#include <dev/pci/genfb_pcivar.h> 102#include <dev/pci/genfb_pcivar.h>
103 103
104#include <dev/wsfb/genfbvar.h> 104#include <dev/wsfb/genfbvar.h>
105#include <arch/x86/include/genfb_machdep.h> 105#include <arch/x86/include/genfb_machdep.h>
106#include <dev/ic/vgareg.h> 106#include <dev/ic/vgareg.h>
107 107
108#include "acpica.h" 108#include "acpica.h"
109#include "genfb.h" 109#include "genfb.h"
110#include "isa.h" 110#include "isa.h"
111#include "opt_acpi.h" 111#include "opt_acpi.h"
112#include "opt_ddb.h" 112#include "opt_ddb.h"
113#include "opt_mpbios.h" 113#include "opt_mpbios.h"
114#include "opt_puc.h" 114#include "opt_puc.h"
115#include "opt_vga.h" 115#include "opt_vga.h"
116#include "pci.h" 116#include "pci.h"
117#include "wsdisplay.h" 117#include "wsdisplay.h"
118#include "com.h" 118#include "com.h"
119 119
120#ifdef DDB 120#ifdef DDB
121#include <machine/db_machdep.h> 121#include <machine/db_machdep.h>
122#include <ddb/db_sym.h> 122#include <ddb/db_sym.h>
123#include <ddb/db_extern.h> 123#include <ddb/db_extern.h>
124#endif 124#endif
125 125
126#ifdef VGA_POST 126#ifdef VGA_POST
127#include <x86/vga_post.h> 127#include <x86/vga_post.h>
128#endif 128#endif
129 129
130#include <x86/cpuvar.h> 130#include <x86/cpuvar.h>
131 131
132#include <machine/autoconf.h> 132#include <machine/autoconf.h>
133#include <machine/bootinfo.h> 133#include <machine/bootinfo.h>
134 134
135#ifdef MPBIOS 135#ifdef MPBIOS
136#include <machine/mpbiosvar.h> 136#include <machine/mpbiosvar.h>
137#endif 137#endif
138 138
139#if NACPICA > 0 139#if NACPICA > 0
140#include <machine/mpacpi.h> 140#include <machine/mpacpi.h>
141#if !defined(NO_PCI_EXTENDED_CONFIG) 141#if !defined(NO_PCI_EXTENDED_CONFIG)
142#include <dev/acpi/acpivar.h> 142#include <dev/acpi/acpivar.h>
143#include <dev/acpi/acpi_mcfg.h> 143#include <dev/acpi/acpi_mcfg.h>
144#endif 144#endif
145#endif 145#endif
146 146
147#include <machine/mpconfig.h> 147#include <machine/mpconfig.h>
148 148
149#if NCOM > 0 149#if NCOM > 0
150#include <dev/pci/puccn.h> 150#include <dev/pci/puccn.h>
151#endif 151#endif
152 152
153#ifndef XENPV 153#ifndef XENPV
154#include <x86/efi.h> 154#include <x86/efi.h>
155#endif 155#endif
156 156
157#include "opt_pci_conf_mode.h" 157#include "opt_pci_conf_mode.h"
158 158
159#ifdef PCI_CONF_MODE 159#ifdef PCI_CONF_MODE
160#if (PCI_CONF_MODE == 1) || (PCI_CONF_MODE == 2) 160#if (PCI_CONF_MODE == 1) || (PCI_CONF_MODE == 2)
161static int pci_mode = PCI_CONF_MODE; 161static int pci_mode = PCI_CONF_MODE;
162#else 162#else
163#error Invalid PCI configuration mode. 163#error Invalid PCI configuration mode.
164#endif 164#endif
165#else 165#else
166static int pci_mode = -1; 166static int pci_mode = -1;
167#endif 167#endif
168 168
169struct pci_conf_lock { 169struct pci_conf_lock {
170 uint32_t cl_cpuno; /* 0: unlocked 170 uint32_t cl_cpuno; /* 0: unlocked
171 * 1 + n: locked by CPU n (0 <= n) 171 * 1 + n: locked by CPU n (0 <= n)
172 */ 172 */
173 uint32_t cl_sel; /* the address that's being read. */ 173 uint32_t cl_sel; /* the address that's being read. */
174}; 174};
175 175
176static void pci_conf_unlock(struct pci_conf_lock *); 176static void pci_conf_unlock(struct pci_conf_lock *);
177static uint32_t pci_conf_selector(pcitag_t, int); 177static uint32_t pci_conf_selector(pcitag_t, int);
178static unsigned int pci_conf_port(pcitag_t, int); 178static unsigned int pci_conf_port(pcitag_t, int);
179static void pci_conf_select(uint32_t); 179static void pci_conf_select(uint32_t);
180static void pci_conf_lock(struct pci_conf_lock *, uint32_t); 180static void pci_conf_lock(struct pci_conf_lock *, uint32_t);
181static void pci_bridge_hook(pci_chipset_tag_t, pcitag_t, void *); 181static void pci_bridge_hook(pci_chipset_tag_t, pcitag_t, void *);
182struct pci_bridge_hook_arg { 182struct pci_bridge_hook_arg {
183 void (*func)(pci_chipset_tag_t, pcitag_t, void *); 183 void (*func)(pci_chipset_tag_t, pcitag_t, void *);
184 void *arg; 184 void *arg;
185}; 185};
186 186
187#define PCI_MODE1_ENABLE 0x80000000UL 187#define PCI_MODE1_ENABLE 0x80000000UL
188#define PCI_MODE1_ADDRESS_REG 0x0cf8 188#define PCI_MODE1_ADDRESS_REG 0x0cf8
189#define PCI_MODE1_DATA_REG 0x0cfc 189#define PCI_MODE1_DATA_REG 0x0cfc
190 190
191#define PCI_MODE2_ENABLE_REG 0x0cf8 191#define PCI_MODE2_ENABLE_REG 0x0cf8
192#define PCI_MODE2_FORWARD_REG 0x0cfa 192#define PCI_MODE2_FORWARD_REG 0x0cfa
193 193
194#define _tag(b, d, f) \ 194#define _tag(b, d, f) \
195 {.mode1 = PCI_MODE1_ENABLE | ((b) << 16) | ((d) << 11) | ((f) << 8)} 195 {.mode1 = PCI_MODE1_ENABLE | ((b) << 16) | ((d) << 11) | ((f) << 8)}
196#define _qe(bus, dev, fcn, vend, prod) \ 196#define _qe(bus, dev, fcn, vend, prod) \
197 {_tag(bus, dev, fcn), PCI_ID_CODE(vend, prod)} 197 {_tag(bus, dev, fcn), PCI_ID_CODE(vend, prod)}
198const struct { 198const struct {
199 pcitag_t tag; 199 pcitag_t tag;
200 pcireg_t id; 200 pcireg_t id;
201} pcim1_quirk_tbl[] = { 201} pcim1_quirk_tbl[] = {
202 _qe(0, 0, 0, PCI_VENDOR_INVALID, 0x0000), /* patchable */ 202 _qe(0, 0, 0, PCI_VENDOR_INVALID, 0x0000), /* patchable */
203 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX1), 203 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX1),
204 /* XXX Triflex2 not tested */ 204 /* XXX Triflex2 not tested */
205 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX2), 205 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX2),
206 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX4), 206 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX4),
207#if 0 207#if 0
208 /* Triton needed for Connectix Virtual PC */ 208 /* Triton needed for Connectix Virtual PC */
209 _qe(0, 0, 0, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437FX), 209 _qe(0, 0, 0, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437FX),
210 /* Connectix Virtual PC 5 has a 440BX */ 210 /* Connectix Virtual PC 5 has a 440BX */
211 _qe(0, 0, 0, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX_NOAGP), 211 _qe(0, 0, 0, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX_NOAGP),
212 /* Parallels Desktop for Mac */ 212 /* Parallels Desktop for Mac */
213 _qe(0, 2, 0, PCI_VENDOR_PARALLELS, PCI_PRODUCT_PARALLELS_VIDEO), 213 _qe(0, 2, 0, PCI_VENDOR_PARALLELS, PCI_PRODUCT_PARALLELS_VIDEO),
214 _qe(0, 3, 0, PCI_VENDOR_PARALLELS, PCI_PRODUCT_PARALLELS_TOOLS), 214 _qe(0, 3, 0, PCI_VENDOR_PARALLELS, PCI_PRODUCT_PARALLELS_TOOLS),
215 /* SIS 740 */ 215 /* SIS 740 */
216 _qe(0, 0, 0, PCI_VENDOR_SIS, PCI_PRODUCT_SIS_740), 216 _qe(0, 0, 0, PCI_VENDOR_SIS, PCI_PRODUCT_SIS_740),
217 /* SIS 741 */ 217 /* SIS 741 */
218 _qe(0, 0, 0, PCI_VENDOR_SIS, PCI_PRODUCT_SIS_741), 218 _qe(0, 0, 0, PCI_VENDOR_SIS, PCI_PRODUCT_SIS_741),
219 /* VIA Technologies VX900 */ 219 /* VIA Technologies VX900 */
220 _qe(0, 0, 0, PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VX900_HB) 220 _qe(0, 0, 0, PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VX900_HB)
221#endif 221#endif
222}; 222};
223#undef _tag 223#undef _tag
224#undef _qe 224#undef _qe
225 225
226/* arch/xen does not support MSI/MSI-X yet. */ 226/* arch/xen does not support MSI/MSI-X yet. */
227#ifdef __HAVE_PCI_MSI_MSIX 227#ifdef __HAVE_PCI_MSI_MSIX
228#define PCI_QUIRK_DISABLE_MSI 1 /* Neigher MSI nor MSI-X work */ 228#define PCI_QUIRK_DISABLE_MSI 1 /* Neigher MSI nor MSI-X work */
229#define PCI_QUIRK_DISABLE_MSIX 2 /* MSI-X does not work */ 229#define PCI_QUIRK_DISABLE_MSIX 2 /* MSI-X does not work */
230#define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI and MSI-X works */ 230#define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI and MSI-X works */
231 231
232#define _dme(vend, prod) \ 232#define _dme(vend, prod) \
233 { PCI_QUIRK_DISABLE_MSI, PCI_ID_CODE(vend, prod) } 233 { PCI_QUIRK_DISABLE_MSI, PCI_ID_CODE(vend, prod) }
234#define _dmxe(vend, prod) \ 234#define _dmxe(vend, prod) \
235 { PCI_QUIRK_DISABLE_MSIX, PCI_ID_CODE(vend, prod) } 235 { PCI_QUIRK_DISABLE_MSIX, PCI_ID_CODE(vend, prod) }
236#define _emve(vend, prod) \ 236#define _emve(vend, prod) \
237 { PCI_QUIRK_ENABLE_MSI_VM, PCI_ID_CODE(vend, prod) } 237 { PCI_QUIRK_ENABLE_MSI_VM, PCI_ID_CODE(vend, prod) }
238const struct { 238const struct {
239 int type; 239 int type;
240 pcireg_t id; 240 pcireg_t id;
241} pci_msi_quirk_tbl[] = { 241} pci_msi_quirk_tbl[] = {
242 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCMC), 242 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCMC),
243 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437FX), 243 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437FX),
244 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437MX), 244 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437MX),
245 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437VX), 245 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437VX),
246 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82439HX), 246 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82439HX),
247 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82439TX), 247 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82439TX),
248 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443GX), 248 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443GX),
249 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443GX_AGP), 249 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443GX_AGP),
250 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82440MX), 250 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82440MX),
251 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82441FX), 251 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82441FX),
252 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX), 252 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX),
253 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX_AGP), 253 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX_AGP),
254 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX_NOAGP), 254 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX_NOAGP),
255 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443GX_NOAGP), 255 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443GX_NOAGP),
256 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443LX), 256 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443LX),
257 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443LX_AGP), 257 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443LX_AGP),
258 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810_MCH), 258 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810_MCH),
259 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810E_MCH), 259 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82810E_MCH),
260 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82815_FULL_HUB), 260 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82815_FULL_HUB),
261 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82820_MCH), 261 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82820_MCH),
262 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82830MP_IO_1), 262 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82830MP_IO_1),
263 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82840_HB), 263 _dme(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82840_HB),
264 _dme(PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_PCHB), 264 _dme(PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_PCHB),
265 _dme(PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_PCHB), 265 _dme(PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_PCHB),
266 _dme(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_SC751_SC), 266 _dme(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_SC751_SC),
267 _dme(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_SC761_SC), 267 _dme(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_SC761_SC),
268 _dme(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_SC762_NB), 268 _dme(PCI_VENDOR_AMD, PCI_PRODUCT_AMD_SC762_NB),
269 269
270 _emve(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82441FX), /* QEMU */ 270 _emve(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82441FX), /* QEMU */
271 _emve(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX), /* VMWare */ 271 _emve(PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX), /* VMWare */
272}; 272};
273#undef _dme 273#undef _dme
274#undef _dmxe 274#undef _dmxe
275#undef _emve 275#undef _emve
276#endif /* __HAVE_PCI_MSI_MSIX */ 276#endif /* __HAVE_PCI_MSI_MSIX */
277 277
278/* 278/*
279 * PCI doesn't have any special needs; just use the generic versions 279 * PCI doesn't have any special needs; just use the generic versions
280 * of these functions. 280 * of these functions.
281 */ 281 */
282struct x86_bus_dma_tag pci_bus_dma_tag = { 282struct x86_bus_dma_tag pci_bus_dma_tag = {
283 ._tag_needs_free = 0, 283 ._tag_needs_free = 0,
284#if defined(_LP64) || defined(PAE) 284#if defined(_LP64) || defined(PAE)
285 ._bounce_thresh = PCI32_DMA_BOUNCE_THRESHOLD, 285 ._bounce_thresh = PCI32_DMA_BOUNCE_THRESHOLD,
286 ._bounce_alloc_lo = ISA_DMA_BOUNCE_THRESHOLD, 286 ._bounce_alloc_lo = ISA_DMA_BOUNCE_THRESHOLD,
287 ._bounce_alloc_hi = PCI32_DMA_BOUNCE_THRESHOLD, 287 ._bounce_alloc_hi = PCI32_DMA_BOUNCE_THRESHOLD,
288#else 288#else
289 ._bounce_thresh = 0, 289 ._bounce_thresh = 0,
290 ._bounce_alloc_lo = 0, 290 ._bounce_alloc_lo = 0,
291 ._bounce_alloc_hi = 0, 291 ._bounce_alloc_hi = 0,
292#endif 292#endif
293 ._may_bounce = NULL, 293 ._may_bounce = NULL,
294}; 294};
295 295
296#ifdef _LP64 296#ifdef _LP64
297struct x86_bus_dma_tag pci_bus_dma64_tag = { 297struct x86_bus_dma_tag pci_bus_dma64_tag = {
298 ._tag_needs_free = 0, 298 ._tag_needs_free = 0,
299 ._bounce_thresh = 0, 299 ._bounce_thresh = 0,
300 ._bounce_alloc_lo = 0, 300 ._bounce_alloc_lo = 0,
301 ._bounce_alloc_hi = 0, 301 ._bounce_alloc_hi = 0,
302 ._may_bounce = NULL, 302 ._may_bounce = NULL,
303}; 303};
304#endif 304#endif
305 305
306static struct pci_conf_lock cl0 = { 306static struct pci_conf_lock cl0 = {
307 .cl_cpuno = 0UL 307 .cl_cpuno = 0UL
308 , .cl_sel = 0UL 308 , .cl_sel = 0UL
309}; 309};
310 310
311static struct pci_conf_lock * const cl = &cl0; 311static struct pci_conf_lock * const cl = &cl0;
312 312
313#if NGENFB > 0 && NACPICA > 0 && defined(VGA_POST) 313#if NGENFB > 0 && NACPICA > 0 && defined(VGA_POST)
314extern int acpi_md_vbios_reset; 314extern int acpi_md_vbios_reset;
315extern int acpi_md_vesa_modenum; 315extern int acpi_md_vesa_modenum;
316#endif 316#endif
317 317
318static struct genfb_colormap_callback gfb_cb; 318static struct genfb_colormap_callback gfb_cb;
319static struct genfb_pmf_callback pmf_cb; 319static struct genfb_pmf_callback pmf_cb;
320static struct genfb_mode_callback mode_cb; 320static struct genfb_mode_callback mode_cb;
321#ifdef VGA_POST 321#ifdef VGA_POST
322static struct vga_post *vga_posth = NULL; 322static struct vga_post *vga_posth = NULL;
323#endif 323#endif
324 324
325static void 325static void
326pci_conf_lock(struct pci_conf_lock *ocl, uint32_t sel) 326pci_conf_lock(struct pci_conf_lock *ocl, uint32_t sel)
327{ 327{
328 uint32_t cpuno; 328 uint32_t cpuno;
329 329
330 KASSERT(sel != 0); 330 KASSERT(sel != 0);
331 331
332 kpreempt_disable(); 332 kpreempt_disable();
333 cpuno = cpu_number() + 1; 333 cpuno = cpu_number() + 1;
334 /* If the kernel enters pci_conf_lock() through an interrupt 334 /* If the kernel enters pci_conf_lock() through an interrupt
335 * handler, then the CPU may already hold the lock. 335 * handler, then the CPU may already hold the lock.
336 * 336 *
337 * If the CPU does not already hold the lock, spin until 337 * If the CPU does not already hold the lock, spin until
338 * we can acquire it. 338 * we can acquire it.
339 */ 339 */
340 if (cpuno == cl->cl_cpuno) { 340 if (cpuno == cl->cl_cpuno) {
341 ocl->cl_cpuno = cpuno; 341 ocl->cl_cpuno = cpuno;
342 } else { 342 } else {
343#ifdef LOCKDEBUG 343#ifdef LOCKDEBUG
344 u_int spins = 0; 344 u_int spins = 0;
345#endif 345#endif
346 u_int count; 346 u_int count;
347 count = SPINLOCK_BACKOFF_MIN; 347 count = SPINLOCK_BACKOFF_MIN;
348 348
349 ocl->cl_cpuno = 0; 349 ocl->cl_cpuno = 0;
350 350
351 while (atomic_cas_32(&cl->cl_cpuno, 0, cpuno) != 0) { 351 while (atomic_cas_32(&cl->cl_cpuno, 0, cpuno) != 0) {
352 SPINLOCK_BACKOFF(count); 352 SPINLOCK_BACKOFF(count);
353#ifdef LOCKDEBUG 353#ifdef LOCKDEBUG
354 if (SPINLOCK_SPINOUT(spins)) { 354 if (SPINLOCK_SPINOUT(spins)) {
355 panic("%s: cpu %" PRId32 355 panic("%s: cpu %" PRId32
356 " spun out waiting for cpu %" PRId32, 356 " spun out waiting for cpu %" PRId32,
357 __func__, cpuno, cl->cl_cpuno); 357 __func__, cpuno, cl->cl_cpuno);
358 } 358 }
359#endif 359#endif
360 } 360 }
361 } 361 }
362 362
363 /* Only one CPU can be here, so an interlocked atomic_swap(3) 363 /* Only one CPU can be here, so an interlocked atomic_swap(3)
364 * is not necessary. 364 * is not necessary.
365 * 365 *
366 * Evaluating atomic_cas_32_ni()'s argument, cl->cl_sel, 366 * Evaluating atomic_cas_32_ni()'s argument, cl->cl_sel,
367 * and applying atomic_cas_32_ni() is not an atomic operation, 367 * and applying atomic_cas_32_ni() is not an atomic operation,
368 * however, any interrupt that, in the middle of the 368 * however, any interrupt that, in the middle of the
369 * operation, modifies cl->cl_sel, will also restore 369 * operation, modifies cl->cl_sel, will also restore
370 * cl->cl_sel. So cl->cl_sel will have the same value when 370 * cl->cl_sel. So cl->cl_sel will have the same value when
371 * we apply atomic_cas_32_ni() as when we evaluated it, 371 * we apply atomic_cas_32_ni() as when we evaluated it,
372 * before. 372 * before.
373 */ 373 */
374 ocl->cl_sel = atomic_cas_32_ni(&cl->cl_sel, cl->cl_sel, sel); 374 ocl->cl_sel = atomic_cas_32_ni(&cl->cl_sel, cl->cl_sel, sel);
375 pci_conf_select(sel); 375 pci_conf_select(sel);
376} 376}
377 377
378static void 378static void
379pci_conf_unlock(struct pci_conf_lock *ocl) 379pci_conf_unlock(struct pci_conf_lock *ocl)
380{ 380{
381 atomic_cas_32_ni(&cl->cl_sel, cl->cl_sel, ocl->cl_sel); 381 atomic_cas_32_ni(&cl->cl_sel, cl->cl_sel, ocl->cl_sel);
382 pci_conf_select(ocl->cl_sel); 382 pci_conf_select(ocl->cl_sel);
383 if (ocl->cl_cpuno != cl->cl_cpuno) 383 if (ocl->cl_cpuno != cl->cl_cpuno)
384 atomic_cas_32(&cl->cl_cpuno, cl->cl_cpuno, ocl->cl_cpuno); 384 atomic_cas_32(&cl->cl_cpuno, cl->cl_cpuno, ocl->cl_cpuno);
385 kpreempt_enable(); 385 kpreempt_enable();
386} 386}
387 387
388static uint32_t 388static uint32_t
389pci_conf_selector(pcitag_t tag, int reg) 389pci_conf_selector(pcitag_t tag, int reg)
390{ 390{
391 static const pcitag_t mode2_mask = { 391 static const pcitag_t mode2_mask = {
392 .mode2 = { 392 .mode2 = {
393 .enable = 0xff 393 .enable = 0xff
394 , .forward = 0xff 394 , .forward = 0xff
395 } 395 }
396 }; 396 };
397 397
398 switch (pci_mode) { 398 switch (pci_mode) {
399 case 1: 399 case 1:
400 return tag.mode1 | reg; 400 return tag.mode1 | reg;
401 case 2: 401 case 2:
402 return tag.mode1 & mode2_mask.mode1; 402 return tag.mode1 & mode2_mask.mode1;
403 default: 403 default:
404 panic("%s: mode %d not configured", __func__, pci_mode); 404 panic("%s: mode %d not configured", __func__, pci_mode);
405 } 405 }
406} 406}
407 407
408static unsigned int 408static unsigned int
409pci_conf_port(pcitag_t tag, int reg) 409pci_conf_port(pcitag_t tag, int reg)
410{ 410{
411 switch (pci_mode) { 411 switch (pci_mode) {
412 case 1: 412 case 1:
413 return PCI_MODE1_DATA_REG; 413 return PCI_MODE1_DATA_REG;
414 case 2: 414 case 2:
415 return tag.mode2.port | reg; 415 return tag.mode2.port | reg;
416 default: 416 default:
417 panic("%s: mode %d not configured", __func__, pci_mode); 417 panic("%s: mode %d not configured", __func__, pci_mode);
418 } 418 }
419} 419}
420 420
421static void 421static void
422pci_conf_select(uint32_t sel) 422pci_conf_select(uint32_t sel)
423{ 423{
424 pcitag_t tag; 424 pcitag_t tag;
425 425
426 switch (pci_mode) { 426 switch (pci_mode) {
427 case 1: 427 case 1:
428 outl(PCI_MODE1_ADDRESS_REG, sel); 428 outl(PCI_MODE1_ADDRESS_REG, sel);
429 return; 429 return;
430 case 2: 430 case 2:
431 tag.mode1 = sel; 431 tag.mode1 = sel;
432 outb(PCI_MODE2_ENABLE_REG, tag.mode2.enable); 432 outb(PCI_MODE2_ENABLE_REG, tag.mode2.enable);
433 if (tag.mode2.enable != 0) 433 if (tag.mode2.enable != 0)
434 outb(PCI_MODE2_FORWARD_REG, tag.mode2.forward); 434 outb(PCI_MODE2_FORWARD_REG, tag.mode2.forward);
435 return; 435 return;
436 default: 436 default:
437 panic("%s: mode %d not configured", __func__, pci_mode); 437 panic("%s: mode %d not configured", __func__, pci_mode);
438 } 438 }
439} 439}
440 440
441static int 441static int
442pci_mode_check(void) 442pci_mode_check(void)
443{ 443{
444 pcireg_t x; 444 pcireg_t x;
445 pcitag_t t; 445 pcitag_t t;
446 int device; 446 int device;
447 const int maxdev = pci_bus_maxdevs(NULL, 0); 447 const int maxdev = pci_bus_maxdevs(NULL, 0);
448 448
449 for (device = 0; device < maxdev; device++) { 449 for (device = 0; device < maxdev; device++) {
450 t = pci_make_tag(NULL, 0, device, 0); 450 t = pci_make_tag(NULL, 0, device, 0);
451 x = pci_conf_read(NULL, t, PCI_CLASS_REG); 451 x = pci_conf_read(NULL, t, PCI_CLASS_REG);
452 if (PCI_CLASS(x) == PCI_CLASS_BRIDGE && 452 if (PCI_CLASS(x) == PCI_CLASS_BRIDGE &&
453 PCI_SUBCLASS(x) == PCI_SUBCLASS_BRIDGE_HOST) 453 PCI_SUBCLASS(x) == PCI_SUBCLASS_BRIDGE_HOST)
454 return 0; 454 return 0;
455 x = pci_conf_read(NULL, t, PCI_ID_REG); 455 x = pci_conf_read(NULL, t, PCI_ID_REG);
456 switch (PCI_VENDOR(x)) { 456 switch (PCI_VENDOR(x)) {
457 case PCI_VENDOR_COMPAQ: 457 case PCI_VENDOR_COMPAQ:
458 case PCI_VENDOR_INTEL: 458 case PCI_VENDOR_INTEL:
459 case PCI_VENDOR_VIATECH: 459 case PCI_VENDOR_VIATECH:
460 return 0; 460 return 0;
461 } 461 }
462 } 462 }
463 return -1; 463 return -1;
464} 464}
465#ifdef __HAVE_PCI_MSI_MSIX 465#ifdef __HAVE_PCI_MSI_MSIX
466static int 466static int
467pci_has_msi_quirk(pcireg_t id, int type) 467pci_has_msi_quirk(pcireg_t id, int type)
468{ 468{
469 int i; 469 int i;
470 470
471 for (i = 0; i < __arraycount(pci_msi_quirk_tbl); i++) { 471 for (i = 0; i < __arraycount(pci_msi_quirk_tbl); i++) {
472 if (id == pci_msi_quirk_tbl[i].id && 472 if (id == pci_msi_quirk_tbl[i].id &&
473 type == pci_msi_quirk_tbl[i].type) 473 type == pci_msi_quirk_tbl[i].type)
474 return 1; 474 return 1;
475 } 475 }
476 476
477 return 0; 477 return 0;
478} 478}
479#endif 479#endif
480 480
481void 481void
482pci_attach_hook(device_t parent, device_t self, struct pcibus_attach_args *pba) 482pci_attach_hook(device_t parent, device_t self, struct pcibus_attach_args *pba)
483{ 483{
484#ifdef __HAVE_PCI_MSI_MSIX 484#ifdef __HAVE_PCI_MSI_MSIX
485 pci_chipset_tag_t pc = pba->pba_pc; 485 pci_chipset_tag_t pc = pba->pba_pc;
486 pcitag_t tag; 486 pcitag_t tag;
487 pcireg_t id, class; 487 pcireg_t id, class;
488#endif 488#endif
489 489
490 if (pba->pba_bus == 0) 490 if (pba->pba_bus == 0)
491 aprint_normal(": configuration mode %d", pci_mode); 491 aprint_normal(": configuration mode %d", pci_mode);
492#ifdef MPBIOS 492#ifdef MPBIOS
493 mpbios_pci_attach_hook(parent, self, pba); 493 mpbios_pci_attach_hook(parent, self, pba);
494#endif 494#endif
495#if NACPICA > 0 495#if NACPICA > 0
496 mpacpi_pci_attach_hook(parent, self, pba); 496 mpacpi_pci_attach_hook(parent, self, pba);
497#endif 497#endif
498#if NACPICA > 0 && !defined(NO_PCI_EXTENDED_CONFIG) 498#if NACPICA > 0 && !defined(NO_PCI_EXTENDED_CONFIG)
499 acpimcfg_map_bus(self, pba->pba_pc, pba->pba_bus); 499 acpimcfg_map_bus(self, pba->pba_pc, pba->pba_bus);
500#endif 500#endif
501 501
502#ifdef __HAVE_PCI_MSI_MSIX 502#ifdef __HAVE_PCI_MSI_MSIX
503 /* 503 /*
504 * In order to decide whether the system supports MSI we look 504 * In order to decide whether the system supports MSI we look
505 * at the host bridge, which should be device 0 function 0 on 505 * at the host bridge, which should be device 0 function 0 on
506 * bus 0. It is better to not enable MSI on systems that 506 * bus 0. It is better to not enable MSI on systems that
507 * support it than the other way around, so be conservative 507 * support it than the other way around, so be conservative
508 * here. So we don't enable MSI if we don't find a host 508 * here. So we don't enable MSI if we don't find a host
509 * bridge there. We also deliberately don't enable MSI on 509 * bridge there. We also deliberately don't enable MSI on
510 * chipsets from low-end manifacturers like VIA and SiS. 510 * chipsets from low-end manifacturers like VIA and SiS.
511 */ 511 */
512 tag = pci_make_tag(pc, 0, 0, 0); 512 tag = pci_make_tag(pc, 0, 0, 0);
513 id = pci_conf_read(pc, tag, PCI_ID_REG); 513 id = pci_conf_read(pc, tag, PCI_ID_REG);
514 class = pci_conf_read(pc, tag, PCI_CLASS_REG); 514 class = pci_conf_read(pc, tag, PCI_CLASS_REG);
515 515
516 if (PCI_CLASS(class) != PCI_CLASS_BRIDGE || 516 if (PCI_CLASS(class) != PCI_CLASS_BRIDGE ||
517 PCI_SUBCLASS(class) != PCI_SUBCLASS_BRIDGE_HOST) 517 PCI_SUBCLASS(class) != PCI_SUBCLASS_BRIDGE_HOST)
518 return; 518 return;
519 519
520 /* VMware and KVM use old chipset, but they can use MSI/MSI-X */ 520 /* VMware and KVM use old chipset, but they can use MSI/MSI-X */
521 if ((cpu_feature[1] & CPUID2_RAZ) 521 if ((cpu_feature[1] & CPUID2_RAZ)
522 && (pci_has_msi_quirk(id, PCI_QUIRK_ENABLE_MSI_VM))) { 522 && (pci_has_msi_quirk(id, PCI_QUIRK_ENABLE_MSI_VM))) {
523 pba->pba_flags |= PCI_FLAGS_MSI_OKAY; 523 pba->pba_flags |= PCI_FLAGS_MSI_OKAY;
524 pba->pba_flags |= PCI_FLAGS_MSIX_OKAY; 524 pba->pba_flags |= PCI_FLAGS_MSIX_OKAY;
525 } else if (pci_has_msi_quirk(id, PCI_QUIRK_DISABLE_MSI)) { 525 } else if (pci_has_msi_quirk(id, PCI_QUIRK_DISABLE_MSI)) {
526 pba->pba_flags &= ~PCI_FLAGS_MSI_OKAY; 526 pba->pba_flags &= ~PCI_FLAGS_MSI_OKAY;
527 pba->pba_flags &= ~PCI_FLAGS_MSIX_OKAY; 527 pba->pba_flags &= ~PCI_FLAGS_MSIX_OKAY;
528 aprint_verbose("\n"); 528 aprint_verbose("\n");
529 aprint_verbose_dev(self, 529 aprint_verbose_dev(self,
530 "This pci host supports neither MSI nor MSI-X."); 530 "This pci host supports neither MSI nor MSI-X.");
531 } else if (pci_has_msi_quirk(id, PCI_QUIRK_DISABLE_MSIX)) { 531 } else if (pci_has_msi_quirk(id, PCI_QUIRK_DISABLE_MSIX)) {
532 pba->pba_flags |= PCI_FLAGS_MSI_OKAY; 532 pba->pba_flags |= PCI_FLAGS_MSI_OKAY;
533 pba->pba_flags &= ~PCI_FLAGS_MSIX_OKAY; 533 pba->pba_flags &= ~PCI_FLAGS_MSIX_OKAY;
534 aprint_verbose("\n"); 534 aprint_verbose("\n");
535 aprint_verbose_dev(self, 535 aprint_verbose_dev(self,
536 "This pci host does not support MSI-X."); 536 "This pci host does not support MSI-X.");
537 } else { 537 } else {
538 pba->pba_flags |= PCI_FLAGS_MSI_OKAY; 538 pba->pba_flags |= PCI_FLAGS_MSI_OKAY;
539 pba->pba_flags |= PCI_FLAGS_MSIX_OKAY; 539 pba->pba_flags |= PCI_FLAGS_MSIX_OKAY;
540 } 540 }
541 541
542 /* 542 /*
543 * Don't enable MSI on a HyperTransport bus. In order to 543 * Don't enable MSI on a HyperTransport bus. In order to
544 * determine that bus 0 is a HyperTransport bus, we look at 544 * determine that bus 0 is a HyperTransport bus, we look at
545 * device 24 function 0, which is the HyperTransport 545 * device 24 function 0, which is the HyperTransport
546 * host/primary interface integrated on most 64-bit AMD CPUs. 546 * host/primary interface integrated on most 64-bit AMD CPUs.
547 * If that device has a HyperTransport capability, bus 0 must 547 * If that device has a HyperTransport capability, bus 0 must
548 * be a HyperTransport bus and we disable MSI. 548 * be a HyperTransport bus and we disable MSI.
549 */ 549 */
550 if (24 < pci_bus_maxdevs(pc, 0)) { 550 if (24 < pci_bus_maxdevs(pc, 0)) {
551 tag = pci_make_tag(pc, 0, 24, 0); 551 tag = pci_make_tag(pc, 0, 24, 0);
552 if (pci_get_capability(pc, tag, PCI_CAP_LDT, NULL, NULL)) { 552 if (pci_get_capability(pc, tag, PCI_CAP_LDT, NULL, NULL)) {
553 pba->pba_flags &= ~PCI_FLAGS_MSI_OKAY; 553 pba->pba_flags &= ~PCI_FLAGS_MSI_OKAY;
554 pba->pba_flags &= ~PCI_FLAGS_MSIX_OKAY; 554 pba->pba_flags &= ~PCI_FLAGS_MSIX_OKAY;
555 } 555 }
556 } 556 }
557 557
558#ifdef XENPV 558#ifdef XENPV
559 /* 559 /*
560 * XXX MSI-X doesn't work for XenPV yet - setup seems to be correct, 560 * XXX MSI-X doesn't work for XenPV yet - setup seems to be correct,
561 * XXX but no interrupts are actually delivered. 561 * XXX but no interrupts are actually delivered.
562 */ 562 */
563 pba->pba_flags &= ~PCI_FLAGS_MSIX_OKAY; 563 pba->pba_flags &= ~PCI_FLAGS_MSIX_OKAY;
564#endif 564#endif
565 565
566#endif /* __HAVE_PCI_MSI_MSIX */ 566#endif /* __HAVE_PCI_MSI_MSIX */
567} 567}
568 568
569int 569int
570pci_bus_maxdevs(pci_chipset_tag_t pc, int busno) 570pci_bus_maxdevs(pci_chipset_tag_t pc, int busno)
571{ 571{
572 /* 572 /*
573 * Bus number is irrelevant. If Configuration Mechanism 2 is in 573 * Bus number is irrelevant. If Configuration Mechanism 2 is in
574 * use, can only have devices 0-15 on any bus. If Configuration 574 * use, can only have devices 0-15 on any bus. If Configuration
575 * Mechanism 1 is in use, can have devices 0-32 (i.e. the `normal' 575 * Mechanism 1 is in use, can have devices 0-32 (i.e. the `normal'
576 * range). 576 * range).
577 */ 577 */
578 if (pci_mode == 2) 578 if (pci_mode == 2)
579 return (16); 579 return (16);
580 else 580 else
581 return (32); 581 return (32);
582} 582}
583 583
584pcitag_t 584pcitag_t
585pci_make_tag(pci_chipset_tag_t pc, int bus, int device, int function) 585pci_make_tag(pci_chipset_tag_t pc, int bus, int device, int function)
586{ 586{
587 pci_chipset_tag_t ipc; 587 pci_chipset_tag_t ipc;
588 pcitag_t tag; 588 pcitag_t tag;
589 589
590 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 590 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) {
591 if ((ipc->pc_present & PCI_OVERRIDE_MAKE_TAG) == 0) 591 if ((ipc->pc_present & PCI_OVERRIDE_MAKE_TAG) == 0)
592 continue; 592 continue;
593 return (*ipc->pc_ov->ov_make_tag)(ipc->pc_ctx, 593 return (*ipc->pc_ov->ov_make_tag)(ipc->pc_ctx,
594 pc, bus, device, function); 594 pc, bus, device, function);
595 } 595 }
596 596
597 switch (pci_mode) { 597 switch (pci_mode) {
598 case 1: 598 case 1:
599 if (bus >= 256 || device >= 32 || function >= 8) 599 if (bus >= 256 || device >= 32 || function >= 8)
600 panic("%s: bad request(%d, %d, %d)", __func__, 600 panic("%s: bad request(%d, %d, %d)", __func__,
601 bus, device, function); 601 bus, device, function);
602 602
603 tag.mode1 = PCI_MODE1_ENABLE | 603 tag.mode1 = PCI_MODE1_ENABLE |
604 (bus << 16) | (device << 11) | (function << 8); 604 (bus << 16) | (device << 11) | (function << 8);
605 return tag; 605 return tag;
606 case 2: 606 case 2:
607 if (bus >= 256 || device >= 16 || function >= 8) 607 if (bus >= 256 || device >= 16 || function >= 8)
608 panic("%s: bad request(%d, %d, %d)", __func__, 608 panic("%s: bad request(%d, %d, %d)", __func__,
609 bus, device, function); 609 bus, device, function);
610 610
611 tag.mode2.port = 0xc000 | (device << 8); 611 tag.mode2.port = 0xc000 | (device << 8);
612 tag.mode2.enable = 0xf0 | (function << 1); 612 tag.mode2.enable = 0xf0 | (function << 1);
613 tag.mode2.forward = bus; 613 tag.mode2.forward = bus;
614 return tag; 614 return tag;
615 default: 615 default:
616 panic("%s: mode %d not configured", __func__, pci_mode); 616 panic("%s: mode %d not configured", __func__, pci_mode);
617 } 617 }
618} 618}
619 619
620void 620void
621pci_decompose_tag(pci_chipset_tag_t pc, pcitag_t tag, 621pci_decompose_tag(pci_chipset_tag_t pc, pcitag_t tag,
622 int *bp, int *dp, int *fp) 622 int *bp, int *dp, int *fp)
623{ 623{
624 pci_chipset_tag_t ipc; 624 pci_chipset_tag_t ipc;
625 625
626 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 626 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) {
627 if ((ipc->pc_present & PCI_OVERRIDE_DECOMPOSE_TAG) == 0) 627 if ((ipc->pc_present & PCI_OVERRIDE_DECOMPOSE_TAG) == 0)
628 continue; 628 continue;
629 (*ipc->pc_ov->ov_decompose_tag)(ipc->pc_ctx, 629 (*ipc->pc_ov->ov_decompose_tag)(ipc->pc_ctx,
630 pc, tag, bp, dp, fp); 630 pc, tag, bp, dp, fp);
631 return; 631 return;
632 } 632 }
633 633
634 switch (pci_mode) { 634 switch (pci_mode) {
635 case 1: 635 case 1:
636 if (bp != NULL) 636 if (bp != NULL)
637 *bp = (tag.mode1 >> 16) & 0xff; 637 *bp = (tag.mode1 >> 16) & 0xff;
638 if (dp != NULL) 638 if (dp != NULL)
639 *dp = (tag.mode1 >> 11) & 0x1f; 639 *dp = (tag.mode1 >> 11) & 0x1f;
640 if (fp != NULL) 640 if (fp != NULL)
641 *fp = (tag.mode1 >> 8) & 0x7; 641 *fp = (tag.mode1 >> 8) & 0x7;
642 return; 642 return;
643 case 2: 643 case 2:
644 if (bp != NULL) 644 if (bp != NULL)
645 *bp = tag.mode2.forward & 0xff; 645 *bp = tag.mode2.forward & 0xff;
646 if (dp != NULL) 646 if (dp != NULL)
647 *dp = (tag.mode2.port >> 8) & 0xf; 647 *dp = (tag.mode2.port >> 8) & 0xf;
648 if (fp != NULL) 648 if (fp != NULL)
649 *fp = (tag.mode2.enable >> 1) & 0x7; 649 *fp = (tag.mode2.enable >> 1) & 0x7;
650 return; 650 return;
651 default: 651 default:
652 panic("%s: mode %d not configured", __func__, pci_mode); 652 panic("%s: mode %d not configured", __func__, pci_mode);
653 } 653 }
654} 654}
655 655
656pcireg_t 656pcireg_t
657pci_conf_read(pci_chipset_tag_t pc, pcitag_t tag, int reg) 657pci_conf_read(pci_chipset_tag_t pc, pcitag_t tag, int reg)
658{ 658{
659 pci_chipset_tag_t ipc; 659 pci_chipset_tag_t ipc;
660 pcireg_t data; 660 pcireg_t data;
661 struct pci_conf_lock ocl; 661 struct pci_conf_lock ocl;
662 int dev; 662 int dev;
663 663
664 KASSERT((reg & 0x3) == 0); 664 KASSERT((reg & 0x3) == 0);
665 665
666 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 666 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) {
667 if ((ipc->pc_present & PCI_OVERRIDE_CONF_READ) == 0) 667 if ((ipc->pc_present & PCI_OVERRIDE_CONF_READ) == 0)
668 continue; 668 continue;
669 return (*ipc->pc_ov->ov_conf_read)(ipc->pc_ctx, pc, tag, reg); 669 return (*ipc->pc_ov->ov_conf_read)(ipc->pc_ctx, pc, tag, reg);
670 } 670 }
671 671
672 pci_decompose_tag(pc, tag, NULL, &dev, NULL); 672 pci_decompose_tag(pc, tag, NULL, &dev, NULL);
673 if (__predict_false(pci_mode == 2 && dev >= 16)) 673 if (__predict_false(pci_mode == 2 && dev >= 16))
674 return (pcireg_t) -1; 674 return (pcireg_t) -1;
675 675
676 if (reg < 0) 676 if (reg < 0)
677 return (pcireg_t) -1; 677 return (pcireg_t) -1;
678 if (reg >= PCI_CONF_SIZE) { 678 if (reg >= PCI_CONF_SIZE) {
679#if NACPICA > 0 && !defined(NO_PCI_EXTENDED_CONFIG) 679#if NACPICA > 0 && !defined(NO_PCI_EXTENDED_CONFIG)
680 if (reg >= PCI_EXTCONF_SIZE) 680 if (reg >= PCI_EXTCONF_SIZE)
681 return (pcireg_t) -1; 681 return (pcireg_t) -1;
682 acpimcfg_conf_read(pc, tag, reg, &data); 682 acpimcfg_conf_read(pc, tag, reg, &data);
683 return data; 683 return data;
684#else 684#else
685 return (pcireg_t) -1; 685 return (pcireg_t) -1;
686#endif 686#endif
687 } 687 }
688 688
689 pci_conf_lock(&ocl, pci_conf_selector(tag, reg)); 689 pci_conf_lock(&ocl, pci_conf_selector(tag, reg));
690 data = inl(pci_conf_port(tag, reg)); 690 data = inl(pci_conf_port(tag, reg));
691 pci_conf_unlock(&ocl); 691 pci_conf_unlock(&ocl);
692 return data; 692 return data;
693} 693}
694 694
695void 695void
696pci_conf_write(pci_chipset_tag_t pc, pcitag_t tag, int reg, pcireg_t data) 696pci_conf_write(pci_chipset_tag_t pc, pcitag_t tag, int reg, pcireg_t data)
697{ 697{
698 pci_chipset_tag_t ipc; 698 pci_chipset_tag_t ipc;
699 struct pci_conf_lock ocl; 699 struct pci_conf_lock ocl;
700 int dev; 700 int dev;
701 701
702 KASSERT((reg & 0x3) == 0); 702 KASSERT((reg & 0x3) == 0);
703 703
704 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 704 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) {
705 if ((ipc->pc_present & PCI_OVERRIDE_CONF_WRITE) == 0) 705 if ((ipc->pc_present & PCI_OVERRIDE_CONF_WRITE) == 0)
706 continue; 706 continue;
707 (*ipc->pc_ov->ov_conf_write)(ipc->pc_ctx, pc, tag, reg, 707 (*ipc->pc_ov->ov_conf_write)(ipc->pc_ctx, pc, tag, reg,
708 data); 708 data);
709 return; 709 return;
710 } 710 }
711 711
712 pci_decompose_tag(pc, tag, NULL, &dev, NULL); 712 pci_decompose_tag(pc, tag, NULL, &dev, NULL);
713 if (__predict_false(pci_mode == 2 && dev >= 16)) { 713 if (__predict_false(pci_mode == 2 && dev >= 16)) {
714 return; 714 return;
715 } 715 }
716 716
717 if (reg < 0) 717 if (reg < 0)
718 return; 718 return;
719 if (reg >= PCI_CONF_SIZE) { 719 if (reg >= PCI_CONF_SIZE) {
720#if NACPICA > 0 && !defined(NO_PCI_EXTENDED_CONFIG) 720#if NACPICA > 0 && !defined(NO_PCI_EXTENDED_CONFIG)
721 if (reg >= PCI_EXTCONF_SIZE) 721 if (reg >= PCI_EXTCONF_SIZE)
722 return; 722 return;
723 acpimcfg_conf_write(pc, tag, reg, data); 723 acpimcfg_conf_write(pc, tag, reg, data);
724#endif 724#endif
725 return; 725 return;
726 } 726 }
727 727
728 pci_conf_lock(&ocl, pci_conf_selector(tag, reg)); 728 pci_conf_lock(&ocl, pci_conf_selector(tag, reg));
729 outl(pci_conf_port(tag, reg), data); 729 outl(pci_conf_port(tag, reg), data);
730 pci_conf_unlock(&ocl); 730 pci_conf_unlock(&ocl);
731} 731}
732 732
733void 733void
734pci_mode_set(int mode) 734pci_mode_set(int mode)
735{ 735{
736 KASSERT(pci_mode == -1 || pci_mode == mode); 736 KASSERT(pci_mode == -1 || pci_mode == mode);
737 737
738 pci_mode = mode; 738 pci_mode = mode;
739} 739}
740 740
741int 741int
742pci_mode_detect(void) 742pci_mode_detect(void)
743{ 743{
744 uint32_t sav, val; 744 uint32_t sav, val;
745 int i; 745 int i;
746 pcireg_t idreg; 746 pcireg_t idreg;
747 747
748 if (pci_mode != -1) 748 if (pci_mode != -1)
749 return pci_mode; 749 return pci_mode;
750 750
751 /* 751 /*
752 * We try to divine which configuration mode the host bridge wants. 752 * We try to divine which configuration mode the host bridge wants.
753 */ 753 */
754 754
755 sav = inl(PCI_MODE1_ADDRESS_REG); 755 sav = inl(PCI_MODE1_ADDRESS_REG);
756 756
757 pci_mode = 1; /* assume this for now */ 757 pci_mode = 1; /* assume this for now */
758 /* 758 /*
759 * catch some known buggy implementations of mode 1 759 * catch some known buggy implementations of mode 1
760 */ 760 */
761 for (i = 0; i < __arraycount(pcim1_quirk_tbl); i++) { 761 for (i = 0; i < __arraycount(pcim1_quirk_tbl); i++) {
762 pcitag_t t; 762 pcitag_t t;
763 763
764 if (PCI_VENDOR(pcim1_quirk_tbl[i].id) == PCI_VENDOR_INVALID) 764 if (PCI_VENDOR(pcim1_quirk_tbl[i].id) == PCI_VENDOR_INVALID)
765 continue; 765 continue;
766 t.mode1 = pcim1_quirk_tbl[i].tag.mode1; 766 t.mode1 = pcim1_quirk_tbl[i].tag.mode1;
767 idreg = pci_conf_read(NULL, t, PCI_ID_REG); /* needs "pci_mode" */ 767 idreg = pci_conf_read(NULL, t, PCI_ID_REG); /* needs "pci_mode" */
768 if (idreg == pcim1_quirk_tbl[i].id) { 768 if (idreg == pcim1_quirk_tbl[i].id) {
769#ifdef DEBUG 769#ifdef DEBUG
770 printf("%s: known mode 1 PCI chipset (%08x)\n", 770 printf("%s: known mode 1 PCI chipset (%08x)\n",
771 __func__, idreg); 771 __func__, idreg);
772#endif 772#endif
773 return (pci_mode); 773 return (pci_mode);
774 } 774 }
775 } 775 }
776 776
777#if 0 777#if 0
778 extern char cpu_brand_string[]; 778 extern char cpu_brand_string[];
779 const char *reason, *system_vendor, *system_product; 779 const char *reason, *system_vendor, *system_product;
780 if (memcmp(cpu_brand_string, "QEMU", 4) == 0) 780 if (memcmp(cpu_brand_string, "QEMU", 4) == 0)
781 /* PR 45671, https://bugs.launchpad.net/qemu/+bug/897771 */ 781 /* PR 45671, https://bugs.launchpad.net/qemu/+bug/897771 */
782 reason = "QEMU"; 782 reason = "QEMU";
783 else if ((system_vendor = pmf_get_platform("system-vendor")) != NULL && 783 else if ((system_vendor = pmf_get_platform("system-vendor")) != NULL &&
784 strcmp(system_vendor, "Xen") == 0 && 784 strcmp(system_vendor, "Xen") == 0 &&
785 (system_product = pmf_get_platform("system-product")) != NULL && 785 (system_product = pmf_get_platform("system-product")) != NULL &&
786 strcmp(system_product, "HVM domU") == 0) 786 strcmp(system_product, "HVM domU") == 0)
787 reason = "Xen"; 787 reason = "Xen";
788 else 788 else
789 reason = NULL; 789 reason = NULL;
790 790
791 if (reason) { 791 if (reason) {
792#ifdef DEBUG 792#ifdef DEBUG
793 printf("%s: forcing PCI mode 1 for %s\n", __func__, reason); 793 printf("%s: forcing PCI mode 1 for %s\n", __func__, reason);
794#endif 794#endif
795 return (pci_mode); 795 return (pci_mode);
796 } 796 }
797#endif 797#endif
798 /* 798 /*
799 * Strong check for standard compliant mode 1: 799 * Strong check for standard compliant mode 1:
800 * 1. bit 31 ("enable") can be set 800 * 1. bit 31 ("enable") can be set
801 * 2. byte/word access does not affect register 801 * 2. byte/word access does not affect register
802 */ 802 */
803 outl(PCI_MODE1_ADDRESS_REG, PCI_MODE1_ENABLE); 803 outl(PCI_MODE1_ADDRESS_REG, PCI_MODE1_ENABLE);
804 outb(PCI_MODE1_ADDRESS_REG + 3, 0); 804 outb(PCI_MODE1_ADDRESS_REG + 3, 0);
805 outw(PCI_MODE1_ADDRESS_REG + 2, 0); 805 outw(PCI_MODE1_ADDRESS_REG + 2, 0);
806 val = inl(PCI_MODE1_ADDRESS_REG); 806 val = inl(PCI_MODE1_ADDRESS_REG);
807 if ((val & 0x80fffffc) != PCI_MODE1_ENABLE) { 807 if ((val & 0x80fffffc) != PCI_MODE1_ENABLE) {
808#ifdef DEBUG 808#ifdef DEBUG
809 printf("%s: mode 1 enable failed (%x)\n", __func__, val); 809 printf("%s: mode 1 enable failed (%x)\n", __func__, val);
810#endif 810#endif
811 /* Try out mode 1 to see if we can find a host bridge. */ 811 /* Try out mode 1 to see if we can find a host bridge. */
812 if (pci_mode_check() == 0) { 812 if (pci_mode_check() == 0) {
813#ifdef DEBUG 813#ifdef DEBUG
814 printf("%s: mode 1 functional, using\n", __func__); 814 printf("%s: mode 1 functional, using\n", __func__);
815#endif 815#endif
816 return (pci_mode); 816 return (pci_mode);
817 } 817 }
818 goto not1; 818 goto not1;
819 } 819 }
820 outl(PCI_MODE1_ADDRESS_REG, 0); 820 outl(PCI_MODE1_ADDRESS_REG, 0);
821 val = inl(PCI_MODE1_ADDRESS_REG); 821 val = inl(PCI_MODE1_ADDRESS_REG);
822 if ((val & 0x80fffffc) != 0) 822 if ((val & 0x80fffffc) != 0)
823 goto not1; 823 goto not1;
824 return (pci_mode); 824 return (pci_mode);
825not1: 825not1:
826 outl(PCI_MODE1_ADDRESS_REG, sav); 826 outl(PCI_MODE1_ADDRESS_REG, sav);
827 827
828 /* 828 /*
829 * This mode 2 check is quite weak (and known to give false 829 * This mode 2 check is quite weak (and known to give false
830 * positives on some Compaq machines). 830 * positives on some Compaq machines).
831 * However, this doesn't matter, because this is the 831 * However, this doesn't matter, because this is the
832 * last test, and simply no PCI devices will be found if 832 * last test, and simply no PCI devices will be found if
833 * this happens. 833 * this happens.
834 */ 834 */
835 outb(PCI_MODE2_ENABLE_REG, 0); 835 outb(PCI_MODE2_ENABLE_REG, 0);
836 outb(PCI_MODE2_FORWARD_REG, 0); 836 outb(PCI_MODE2_FORWARD_REG, 0);
837 if (inb(PCI_MODE2_ENABLE_REG) != 0 || 837 if (inb(PCI_MODE2_ENABLE_REG) != 0 ||
838 inb(PCI_MODE2_FORWARD_REG) != 0) 838 inb(PCI_MODE2_FORWARD_REG) != 0)
839 goto not2; 839 goto not2;
840 return (pci_mode = 2); 840 return (pci_mode = 2);
841not2: 841not2:
842 842
843 return (pci_mode = 0); 843 return (pci_mode = 0);
844} 844}
845 845
846void 846void
847pci_device_foreach(pci_chipset_tag_t pc, int maxbus, 847pci_device_foreach(pci_chipset_tag_t pc, int maxbus,
848 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *context) 848 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *context)
849{ 849{
850 pci_device_foreach_min(pc, 0, maxbus, func, context); 850 pci_device_foreach_min(pc, 0, maxbus, func, context);
851} 851}
852 852
853void 853void
854pci_device_foreach_min(pci_chipset_tag_t pc, int minbus, int maxbus, 854pci_device_foreach_min(pci_chipset_tag_t pc, int minbus, int maxbus,
855 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *context) 855 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *context)
856{ 856{
857 const struct pci_quirkdata *qd; 857 const struct pci_quirkdata *qd;
858 int bus, device, function, maxdevs, nfuncs; 858 int bus, device, function, maxdevs, nfuncs;
859 pcireg_t id, bhlcr; 859 pcireg_t id, bhlcr;
860 pcitag_t tag; 860 pcitag_t tag;
861 861
862 for (bus = minbus; bus <= maxbus; bus++) { 862 for (bus = minbus; bus <= maxbus; bus++) {
863 maxdevs = pci_bus_maxdevs(pc, bus); 863 maxdevs = pci_bus_maxdevs(pc, bus);
864 for (device = 0; device < maxdevs; device++) { 864 for (device = 0; device < maxdevs; device++) {
865 tag = pci_make_tag(pc, bus, device, 0); 865 tag = pci_make_tag(pc, bus, device, 0);
866 id = pci_conf_read(pc, tag, PCI_ID_REG); 866 id = pci_conf_read(pc, tag, PCI_ID_REG);
867 867
868 /* Invalid vendor ID value? */ 868 /* Invalid vendor ID value? */
869 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) 869 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
870 continue; 870 continue;
871 /* XXX Not invalid, but we've done this ~forever. */ 871 /* XXX Not invalid, but we've done this ~forever. */
872 if (PCI_VENDOR(id) == 0) 872 if (PCI_VENDOR(id) == 0)
873 continue; 873 continue;
874 874
875 qd = pci_lookup_quirkdata(PCI_VENDOR(id), 875 qd = pci_lookup_quirkdata(PCI_VENDOR(id),
876 PCI_PRODUCT(id)); 876 PCI_PRODUCT(id));
877 877
878 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG); 878 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
879 if (PCI_HDRTYPE_MULTIFN(bhlcr) || 879 if (PCI_HDRTYPE_MULTIFN(bhlcr) ||
880 (qd != NULL && 880 (qd != NULL &&
881 (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0)) 881 (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0))
882 nfuncs = 8; 882 nfuncs = 8;
883 else 883 else
884 nfuncs = 1; 884 nfuncs = 1;
885 885
886 for (function = 0; function < nfuncs; function++) { 886 for (function = 0; function < nfuncs; function++) {
887 tag = pci_make_tag(pc, bus, device, function); 887 tag = pci_make_tag(pc, bus, device, function);
888 id = pci_conf_read(pc, tag, PCI_ID_REG); 888 id = pci_conf_read(pc, tag, PCI_ID_REG);
889 889
890 /* Invalid vendor ID value? */ 890 /* Invalid vendor ID value? */
891 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) 891 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
892 continue; 892 continue;
893 /* 893 /*
894 * XXX Not invalid, but we've done this 894 * XXX Not invalid, but we've done this
895 * ~forever. 895 * ~forever.
896 */ 896 */
897 if (PCI_VENDOR(id) == 0) 897 if (PCI_VENDOR(id) == 0)
898 continue; 898 continue;
899 (*func)(pc, tag, context); 899 (*func)(pc, tag, context);
900 } 900 }
901 } 901 }
902 } 902 }
903} 903}
904 904
905void 905void
906pci_bridge_foreach(pci_chipset_tag_t pc, int minbus, int maxbus, 906pci_bridge_foreach(pci_chipset_tag_t pc, int minbus, int maxbus,
907 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *ctx) 907 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *ctx)
908{ 908{
909 struct pci_bridge_hook_arg bridge_hook; 909 struct pci_bridge_hook_arg bridge_hook;
910 910
911 bridge_hook.func = func; 911 bridge_hook.func = func;
912 bridge_hook.arg = ctx; 912 bridge_hook.arg = ctx;
913 913
914 pci_device_foreach_min(pc, minbus, maxbus, pci_bridge_hook, 914 pci_device_foreach_min(pc, minbus, maxbus, pci_bridge_hook,
915 &bridge_hook); 915 &bridge_hook);
916} 916}
917 917
918static void 918static void
919pci_bridge_hook(pci_chipset_tag_t pc, pcitag_t tag, void *ctx) 919pci_bridge_hook(pci_chipset_tag_t pc, pcitag_t tag, void *ctx)
920{ 920{
921 struct pci_bridge_hook_arg *bridge_hook = (void *)ctx; 921 struct pci_bridge_hook_arg *bridge_hook = (void *)ctx;
922 pcireg_t reg; 922 pcireg_t reg;
923 923
924 reg = pci_conf_read(pc, tag, PCI_CLASS_REG); 924 reg = pci_conf_read(pc, tag, PCI_CLASS_REG);
925 if (PCI_CLASS(reg) == PCI_CLASS_BRIDGE && 925 if (PCI_CLASS(reg) == PCI_CLASS_BRIDGE &&
926 (PCI_SUBCLASS(reg) == PCI_SUBCLASS_BRIDGE_PCI || 926 (PCI_SUBCLASS(reg) == PCI_SUBCLASS_BRIDGE_PCI ||
927 PCI_SUBCLASS(reg) == PCI_SUBCLASS_BRIDGE_CARDBUS)) { 927 PCI_SUBCLASS(reg) == PCI_SUBCLASS_BRIDGE_CARDBUS)) {
928 (*bridge_hook->func)(pc, tag, bridge_hook->arg); 928 (*bridge_hook->func)(pc, tag, bridge_hook->arg);
929 } 929 }
930} 930}
931 931
932static const void * 932static const void *
933bit_to_function_pointer(const struct pci_overrides *ov, uint64_t bit) 933bit_to_function_pointer(const struct pci_overrides *ov, uint64_t bit)
934{ 934{
935 switch (bit) { 935 switch (bit) {
936 case PCI_OVERRIDE_CONF_READ: 936 case PCI_OVERRIDE_CONF_READ:
937 return ov->ov_conf_read; 937 return ov->ov_conf_read;
938 case PCI_OVERRIDE_CONF_WRITE: 938 case PCI_OVERRIDE_CONF_WRITE:
939 return ov->ov_conf_write; 939 return ov->ov_conf_write;
940 case PCI_OVERRIDE_INTR_MAP: 940 case PCI_OVERRIDE_INTR_MAP:
941 return ov->ov_intr_map; 941 return ov->ov_intr_map;
942 case PCI_OVERRIDE_INTR_STRING: 942 case PCI_OVERRIDE_INTR_STRING:
943 return ov->ov_intr_string; 943 return ov->ov_intr_string;
944 case PCI_OVERRIDE_INTR_EVCNT: 944 case PCI_OVERRIDE_INTR_EVCNT:
945 return ov->ov_intr_evcnt; 945 return ov->ov_intr_evcnt;
946 case PCI_OVERRIDE_INTR_ESTABLISH: 946 case PCI_OVERRIDE_INTR_ESTABLISH:
947 return ov->ov_intr_establish; 947 return ov->ov_intr_establish;
948 case PCI_OVERRIDE_INTR_DISESTABLISH: 948 case PCI_OVERRIDE_INTR_DISESTABLISH:
949 return ov->ov_intr_disestablish; 949 return ov->ov_intr_disestablish;
950 case PCI_OVERRIDE_MAKE_TAG: 950 case PCI_OVERRIDE_MAKE_TAG:
951 return ov->ov_make_tag; 951 return ov->ov_make_tag;
952 case PCI_OVERRIDE_DECOMPOSE_TAG: 952 case PCI_OVERRIDE_DECOMPOSE_TAG:
953 return ov->ov_decompose_tag; 953 return ov->ov_decompose_tag;
954 default: 954 default:
955 return NULL; 955 return NULL;
956 } 956 }
957} 957}
958 958
959void 959void
960pci_chipset_tag_destroy(pci_chipset_tag_t pc) 960pci_chipset_tag_destroy(pci_chipset_tag_t pc)
961{ 961{
962 kmem_free(pc, sizeof(struct pci_chipset_tag)); 962 kmem_free(pc, sizeof(struct pci_chipset_tag));
963} 963}
964 964
965int 965int
966pci_chipset_tag_create(pci_chipset_tag_t opc, const uint64_t present, 966pci_chipset_tag_create(pci_chipset_tag_t opc, const uint64_t present,
967 const struct pci_overrides *ov, void *ctx, pci_chipset_tag_t *pcp) 967 const struct pci_overrides *ov, void *ctx, pci_chipset_tag_t *pcp)
968{ 968{
969 uint64_t bit, bits, nbits; 969 uint64_t bit, bits, nbits;
970 pci_chipset_tag_t pc; 970 pci_chipset_tag_t pc;
971 const void *fp; 971 const void *fp;
972 972
973 if (ov == NULL || present == 0) 973 if (ov == NULL || present == 0)
974 return EINVAL; 974 return EINVAL;
975 975
976 pc = kmem_alloc(sizeof(struct pci_chipset_tag), KM_SLEEP); 976 pc = kmem_alloc(sizeof(struct pci_chipset_tag), KM_SLEEP);
977 pc->pc_super = opc; 977 pc->pc_super = opc;
978 978
979 for (bits = present; bits != 0; bits = nbits) { 979 for (bits = present; bits != 0; bits = nbits) {
980 nbits = bits & (bits - 1); 980 nbits = bits & (bits - 1);
981 bit = nbits ^ bits; 981 bit = nbits ^ bits;
982 if ((fp = bit_to_function_pointer(ov, bit)) == NULL) { 982 if ((fp = bit_to_function_pointer(ov, bit)) == NULL) {
983#ifdef DEBUG 983#ifdef DEBUG
984 printf("%s: missing bit %" PRIx64 "\n", __func__, bit); 984 printf("%s: missing bit %" PRIx64 "\n", __func__, bit);
985#endif 985#endif
986 goto einval; 986 goto einval;
987 } 987 }
988 } 988 }
989 989
990 pc->pc_ov = ov; 990 pc->pc_ov = ov;
991 pc->pc_present = present; 991 pc->pc_present = present;
992 pc->pc_ctx = ctx; 992 pc->pc_ctx = ctx;
993 993
994 *pcp = pc; 994 *pcp = pc;
995 995
996 return 0; 996 return 0;
997einval: 997einval:
998 kmem_free(pc, sizeof(struct pci_chipset_tag)); 998 kmem_free(pc, sizeof(struct pci_chipset_tag));
999 return EINVAL; 999 return EINVAL;
1000} 1000}
1001 1001
1002static void 1002static void
1003x86_genfb_set_mapreg(void *opaque, int index, int r, int g, int b) 1003x86_genfb_set_mapreg(void *opaque, int index, int r, int g, int b)
1004{ 1004{
1005 outb(IO_VGA + VGA_DAC_ADDRW, index); 1005 outb(IO_VGA + VGA_DAC_ADDRW, index);
1006 outb(IO_VGA + VGA_DAC_PALETTE, (uint8_t)r >> 2); 1006 outb(IO_VGA + VGA_DAC_PALETTE, (uint8_t)r >> 2);
1007 outb(IO_VGA + VGA_DAC_PALETTE, (uint8_t)g >> 2); 1007 outb(IO_VGA + VGA_DAC_PALETTE, (uint8_t)g >> 2);
1008 outb(IO_VGA + VGA_DAC_PALETTE, (uint8_t)b >> 2); 1008 outb(IO_VGA + VGA_DAC_PALETTE, (uint8_t)b >> 2);
1009} 1009}
1010 1010
1011static bool 1011static bool
1012x86_genfb_setmode(struct genfb_softc *sc, int newmode) 1012x86_genfb_setmode(struct genfb_softc *sc, int newmode)
1013{ 1013{
1014#if NGENFB > 0 1014#if NGENFB > 0
1015# if NACPICA > 0 && defined(VGA_POST) 1015# if NACPICA > 0 && defined(VGA_POST)
1016 static int curmode = WSDISPLAYIO_MODE_EMUL; 1016 static int curmode = WSDISPLAYIO_MODE_EMUL;
1017# endif 1017# endif
1018 1018
1019 switch (newmode) { 1019 switch (newmode) {
1020 case WSDISPLAYIO_MODE_EMUL: 1020 case WSDISPLAYIO_MODE_EMUL:
1021 x86_genfb_mtrr_init(sc->sc_fboffset, 
1022 sc->sc_height * sc->sc_stride); 
1023# if NACPICA > 0 && defined(VGA_POST) 1021# if NACPICA > 0 && defined(VGA_POST)
1024 if (curmode != newmode) { 1022 if (curmode != newmode) {
1025 if (vga_posth != NULL && acpi_md_vesa_modenum != 0) { 1023 if (vga_posth != NULL && acpi_md_vesa_modenum != 0) {
1026 vga_post_set_vbe(vga_posth, 1024 vga_post_set_vbe(vga_posth,
1027 acpi_md_vesa_modenum); 1025 acpi_md_vesa_modenum);
1028 } 1026 }
1029 } 1027 }
1030# endif 1028# endif
1031 break; 1029 break;
1032 } 1030 }
1033 1031
1034# if NACPICA > 0 && defined(VGA_POST) 1032# if NACPICA > 0 && defined(VGA_POST)
1035 curmode = newmode; 1033 curmode = newmode;
1036# endif 1034# endif
1037#endif 1035#endif
1038 return true; 1036 return true;
1039} 1037}
1040 1038
1041static bool 1039static bool
1042x86_genfb_suspend(device_t dev, const pmf_qual_t *qual) 1040x86_genfb_suspend(device_t dev, const pmf_qual_t *qual)
1043{ 1041{
1044 return true; 1042 return true;
1045} 1043}
1046 1044
1047static bool 1045static bool
1048x86_genfb_resume(device_t dev, const pmf_qual_t *qual) 1046x86_genfb_resume(device_t dev, const pmf_qual_t *qual)
1049{ 1047{
1050#if NGENFB > 0 1048#if NGENFB > 0
1051 struct pci_genfb_softc *psc = device_private(dev); 1049 struct pci_genfb_softc *psc = device_private(dev);
1052 1050
1053#if NACPICA > 0 && defined(VGA_POST) 1051#if NACPICA > 0 && defined(VGA_POST)
1054 if (vga_posth != NULL && acpi_md_vbios_reset == 2) { 1052 if (vga_posth != NULL && acpi_md_vbios_reset == 2) {
1055 vga_post_call(vga_posth); 1053 vga_post_call(vga_posth);
1056 if (acpi_md_vesa_modenum != 0) 1054 if (acpi_md_vesa_modenum != 0)
1057 vga_post_set_vbe(vga_posth, acpi_md_vesa_modenum); 1055 vga_post_set_vbe(vga_posth, acpi_md_vesa_modenum);
1058 } 1056 }
1059#endif 1057#endif
1060 genfb_restore_palette(&psc->sc_gen); 1058 genfb_restore_palette(&psc->sc_gen);
1061#endif 1059#endif
1062 1060
1063 return true; 1061 return true;
1064} 1062}
1065 1063
1066static void 1064static void
1067populate_fbinfo(device_t dev, prop_dictionary_t dict) 1065populate_fbinfo(device_t dev, prop_dictionary_t dict)
1068{ 1066{
1069#if NWSDISPLAY > 0 && NGENFB > 0 1067#if NWSDISPLAY > 0 && NGENFB > 0
1070 extern struct vcons_screen x86_genfb_console_screen; 1068 extern struct vcons_screen x86_genfb_console_screen;
1071 struct rasops_info *ri = &x86_genfb_console_screen.scr_ri; 1069 struct rasops_info *ri = &x86_genfb_console_screen.scr_ri;
1072#endif 1070#endif
1073 const void *fbptr = lookup_bootinfo(BTINFO_FRAMEBUFFER); 1071 const void *fbptr = lookup_bootinfo(BTINFO_FRAMEBUFFER);
1074 struct btinfo_framebuffer fbinfo; 1072 struct btinfo_framebuffer fbinfo;
1075 1073
1076 if (fbptr == NULL) 1074 if (fbptr == NULL)
1077 return; 1075 return;
1078 1076
1079 memcpy(&fbinfo, fbptr, sizeof(fbinfo)); 1077 memcpy(&fbinfo, fbptr, sizeof(fbinfo));
1080 1078
1081 if (fbinfo.physaddr != 0) { 1079 if (fbinfo.physaddr != 0) {
1082 prop_dictionary_set_uint32(dict, "width", fbinfo.width); 1080 prop_dictionary_set_uint32(dict, "width", fbinfo.width);
1083 prop_dictionary_set_uint32(dict, "height", fbinfo.height); 1081 prop_dictionary_set_uint32(dict, "height", fbinfo.height);
1084 prop_dictionary_set_uint8(dict, "depth", fbinfo.depth); 1082 prop_dictionary_set_uint8(dict, "depth", fbinfo.depth);
1085 prop_dictionary_set_uint16(dict, "linebytes", fbinfo.stride); 1083 prop_dictionary_set_uint16(dict, "linebytes", fbinfo.stride);
1086 1084
1087 prop_dictionary_set_uint64(dict, "address", fbinfo.physaddr); 1085 prop_dictionary_set_uint64(dict, "address", fbinfo.physaddr);
1088#if NWSDISPLAY > 0 && NGENFB > 0 1086#if NWSDISPLAY > 0 && NGENFB > 0
1089 if (ri->ri_bits != NULL) { 1087 if (ri->ri_bits != NULL) {
1090 prop_dictionary_set_uint64(dict, "virtual_address", 1088 prop_dictionary_set_uint64(dict, "virtual_address",
1091 ri->ri_hwbits != NULL ? 1089 ri->ri_hwbits != NULL ?
1092 (vaddr_t)ri->ri_hworigbits : 1090 (vaddr_t)ri->ri_hworigbits :
1093 (vaddr_t)ri->ri_origbits); 1091 (vaddr_t)ri->ri_origbits);
1094 } 1092 }
1095#endif 1093#endif
1096 } 1094 }
1097#if notyet 1095#if notyet
1098 prop_dictionary_set_bool(dict, "splash",  1096 prop_dictionary_set_bool(dict, "splash",
1099 (fbinfo.flags & BI_FB_SPLASH) != 0); 1097 (fbinfo.flags & BI_FB_SPLASH) != 0);
1100#endif 1098#endif
1101 if (fbinfo.depth == 8) { 1099 if (fbinfo.depth == 8) {
1102 gfb_cb.gcc_cookie = NULL; 1100 gfb_cb.gcc_cookie = NULL;
1103 gfb_cb.gcc_set_mapreg = x86_genfb_set_mapreg; 1101 gfb_cb.gcc_set_mapreg = x86_genfb_set_mapreg;
1104 prop_dictionary_set_uint64(dict, "cmap_callback", 1102 prop_dictionary_set_uint64(dict, "cmap_callback",
1105 (uint64_t)(uintptr_t)&gfb_cb); 1103 (uint64_t)(uintptr_t)&gfb_cb);
1106 } 1104 }
1107 if (fbinfo.physaddr != 0) { 1105 if (fbinfo.physaddr != 0) {
1108 mode_cb.gmc_setmode = x86_genfb_setmode; 1106 mode_cb.gmc_setmode = x86_genfb_setmode;
1109 prop_dictionary_set_uint64(dict, "mode_callback", 1107 prop_dictionary_set_uint64(dict, "mode_callback",
1110 (uint64_t)(uintptr_t)&mode_cb); 1108 (uint64_t)(uintptr_t)&mode_cb);
1111 } 1109 }
1112 1110
1113#if NWSDISPLAY > 0 && NGENFB > 0 1111#if NWSDISPLAY > 0 && NGENFB > 0
1114 if (device_is_a(dev, "genfb")) { 1112 if (device_is_a(dev, "genfb")) {
1115 prop_dictionary_set_bool(dict, "enable_shadowfb", 1113 prop_dictionary_set_bool(dict, "enable_shadowfb",
1116 ri->ri_hwbits != NULL); 1114 ri->ri_hwbits != NULL);
1117 1115
1118 x86_genfb_set_console_dev(dev); 1116 x86_genfb_set_console_dev(dev);
1119#ifdef DDB 1117#ifdef DDB
1120 db_trap_callback = x86_genfb_ddb_trap_callback; 1118 db_trap_callback = x86_genfb_ddb_trap_callback;
1121#endif 1119#endif
1122 } 1120 }
1123#endif 1121#endif
1124} 1122}
1125 1123
1126device_t 1124device_t
1127device_pci_register(device_t dev, void *aux) 1125device_pci_register(device_t dev, void *aux)
1128{ 1126{
1129 device_t parent = device_parent(dev); 1127 device_t parent = device_parent(dev);
1130 1128
1131 device_pci_props_register(dev, aux); 1129 device_pci_props_register(dev, aux);
1132 1130
1133 /* 1131 /*
1134 * Handle network interfaces here, the attachment information is 1132 * Handle network interfaces here, the attachment information is
1135 * not available driver-independently later. 1133 * not available driver-independently later.
1136 * 1134 *
1137 * For disks, there is nothing useful available at attach time. 1135 * For disks, there is nothing useful available at attach time.
1138 */ 1136 */
1139 if (device_class(dev) == DV_IFNET) { 1137 if (device_class(dev) == DV_IFNET) {
1140 struct btinfo_netif *bin = lookup_bootinfo(BTINFO_NETIF); 1138 struct btinfo_netif *bin = lookup_bootinfo(BTINFO_NETIF);
1141 if (bin == NULL) 1139 if (bin == NULL)
1142 return NULL; 1140 return NULL;
1143 1141
1144 /* 1142 /*
1145 * We don't check the driver name against the device name 1143 * We don't check the driver name against the device name
1146 * passed by the boot ROM. The ROM should stay usable if 1144 * passed by the boot ROM. The ROM should stay usable if
1147 * the driver becomes obsolete. The physical attachment 1145 * the driver becomes obsolete. The physical attachment
1148 * information (checked below) must be sufficient to 1146 * information (checked below) must be sufficient to
1149 * identify the device. 1147 * identify the device.
1150 */ 1148 */
1151 if (bin->bus == BI_BUS_PCI && device_is_a(parent, "pci")) { 1149 if (bin->bus == BI_BUS_PCI && device_is_a(parent, "pci")) {
1152 struct pci_attach_args *paa = aux; 1150 struct pci_attach_args *paa = aux;
1153 int b, d, f; 1151 int b, d, f;
1154 1152
1155 /* 1153 /*
1156 * Calculate BIOS representation of: 1154 * Calculate BIOS representation of:
1157 * 1155 *
1158 * <bus,device,function> 1156 * <bus,device,function>
1159 * 1157 *
1160 * and compare. 1158 * and compare.
1161 */ 1159 */
1162 pci_decompose_tag(paa->pa_pc, paa->pa_tag, &b, &d, &f); 1160 pci_decompose_tag(paa->pa_pc, paa->pa_tag, &b, &d, &f);
1163 if (bin->addr.tag == ((b << 8) | (d << 3) | f)) 1161 if (bin->addr.tag == ((b << 8) | (d << 3) | f))
1164 return dev; 1162 return dev;
1165 1163
1166#ifndef XENPV 1164#ifndef XENPV
1167 /* 1165 /*
1168 * efiboot reports parent ppb bus/device/function. 1166 * efiboot reports parent ppb bus/device/function.
1169 */ 1167 */
1170 device_t grand = device_parent(parent); 1168 device_t grand = device_parent(parent);
1171 if (efi_probe() && grand && device_is_a(grand, "ppb")) { 1169 if (efi_probe() && grand && device_is_a(grand, "ppb")) {
1172 struct ppb_softc *ppb_sc = device_private(grand); 1170 struct ppb_softc *ppb_sc = device_private(grand);
1173 pci_decompose_tag(ppb_sc->sc_pc, ppb_sc->sc_tag, 1171 pci_decompose_tag(ppb_sc->sc_pc, ppb_sc->sc_tag,
1174 &b, &d, &f); 1172 &b, &d, &f);
1175 if (bin->addr.tag == ((b << 8) | (d << 3) | f)) 1173 if (bin->addr.tag == ((b << 8) | (d << 3) | f))
1176 return dev; 1174 return dev;
1177 } 1175 }
1178#endif 1176#endif
1179 } 1177 }
1180 } 1178 }
1181 if (parent && device_is_a(parent, "pci") && 1179 if (parent && device_is_a(parent, "pci") &&
1182 x86_found_console == false) { 1180 x86_found_console == false) {
1183 struct pci_attach_args *pa = aux; 1181 struct pci_attach_args *pa = aux;
1184 1182
1185 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_DISPLAY) { 1183 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_DISPLAY) {
1186 prop_dictionary_t dict = device_properties(dev); 1184 prop_dictionary_t dict = device_properties(dev);
1187 /* 1185 /*
1188 * framebuffer drivers other than genfb can work 1186 * framebuffer drivers other than genfb can work
1189 * without the address property 1187 * without the address property
1190 */ 1188 */
1191 populate_fbinfo(dev, dict); 1189 populate_fbinfo(dev, dict);
1192 1190
1193#if 1 && NWSDISPLAY > 0 && NGENFB > 0 1191#if 1 && NWSDISPLAY > 0 && NGENFB > 0
1194 /* XXX */ 1192 /* XXX */
1195 if (device_is_a(dev, "genfb")) { 1193 if (device_is_a(dev, "genfb")) {
1196 prop_dictionary_set_bool(dict, "is_console", 1194 prop_dictionary_set_bool(dict, "is_console",
1197 genfb_is_console()); 1195 genfb_is_console());
1198 } else 1196 } else
1199#endif 1197#endif
1200 prop_dictionary_set_bool(dict, "is_console", true); 1198 prop_dictionary_set_bool(dict, "is_console", true);
1201 1199
1202 prop_dictionary_set_bool(dict, "clear-screen", false); 1200 prop_dictionary_set_bool(dict, "clear-screen", false);
1203#if NWSDISPLAY > 0 && NGENFB > 0 1201#if NWSDISPLAY > 0 && NGENFB > 0
1204 extern struct vcons_screen x86_genfb_console_screen; 1202 extern struct vcons_screen x86_genfb_console_screen;
1205 prop_dictionary_set_uint16(dict, "cursor-row", 1203 prop_dictionary_set_uint16(dict, "cursor-row",
1206 x86_genfb_console_screen.scr_ri.ri_crow); 1204 x86_genfb_console_screen.scr_ri.ri_crow);
1207#endif 1205#endif
1208#if notyet 1206#if notyet
1209 prop_dictionary_set_bool(dict, "splash", 1207 prop_dictionary_set_bool(dict, "splash",
1210 (fbinfo->flags & BI_FB_SPLASH) != 0); 1208 (fbinfo->flags & BI_FB_SPLASH) != 0);
1211#endif 1209#endif
1212 pmf_cb.gpc_suspend = x86_genfb_suspend; 1210 pmf_cb.gpc_suspend = x86_genfb_suspend;
1213 pmf_cb.gpc_resume = x86_genfb_resume; 1211 pmf_cb.gpc_resume = x86_genfb_resume;
1214 prop_dictionary_set_uint64(dict, 1212 prop_dictionary_set_uint64(dict,
1215 "pmf_callback", (uint64_t)(uintptr_t)&pmf_cb); 1213 "pmf_callback", (uint64_t)(uintptr_t)&pmf_cb);
1216#ifdef VGA_POST 1214#ifdef VGA_POST
1217 vga_posth = vga_post_init(pa->pa_bus, pa->pa_device, 1215 vga_posth = vga_post_init(pa->pa_bus, pa->pa_device,
1218 pa->pa_function); 1216 pa->pa_function);
1219#endif 1217#endif
1220 x86_found_console = true; 1218 x86_found_console = true;
1221 return NULL; 1219 return NULL;
1222 } 1220 }
1223 } 1221 }
1224 return NULL; 1222 return NULL;
1225} 1223}
1226 1224
1227#ifndef PUC_CNBUS 1225#ifndef PUC_CNBUS
1228#define PUC_CNBUS 0 1226#define PUC_CNBUS 0
1229#endif 1227#endif
1230 1228
1231#if NCOM > 0 1229#if NCOM > 0
1232int 1230int
1233cpu_puc_cnprobe(struct consdev *cn, struct pci_attach_args *pa) 1231cpu_puc_cnprobe(struct consdev *cn, struct pci_attach_args *pa)
1234{ 1232{
1235 pci_mode_detect(); 1233 pci_mode_detect();
1236 pa->pa_iot = x86_bus_space_io; 1234 pa->pa_iot = x86_bus_space_io;
1237 pa->pa_memt = x86_bus_space_mem; 1235 pa->pa_memt = x86_bus_space_mem;
1238 pa->pa_pc = 0; 1236 pa->pa_pc = 0;
1239 pa->pa_tag = pci_make_tag(0, PUC_CNBUS, pci_bus_maxdevs(NULL, 0) - 1, 1237 pa->pa_tag = pci_make_tag(0, PUC_CNBUS, pci_bus_maxdevs(NULL, 0) - 1,
1240 0); 1238 0);
1241 1239
1242 return 0; 1240 return 0;
1243} 1241}
1244#endif 1242#endif

cvs diff -r1.15 -r1.16 src/sys/arch/x86/x86/genfb_machdep.c (switch to unified diff)

--- src/sys/arch/x86/x86/genfb_machdep.c 2019/11/30 05:28:28 1.15
+++ src/sys/arch/x86/x86/genfb_machdep.c 2021/01/28 01:57:31 1.16
@@ -1,239 +1,200 @@ @@ -1,239 +1,200 @@
1/* $NetBSD: genfb_machdep.c,v 1.15 2019/11/30 05:28:28 nonaka Exp $ */ 1/* $NetBSD: genfb_machdep.c,v 1.16 2021/01/28 01:57:31 jmcneill Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2009 Jared D. McNeill <jmcneill@invisible.ca> 4 * Copyright (c) 2009 Jared D. McNeill <jmcneill@invisible.ca>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE. 26 * POSSIBILITY OF SUCH DAMAGE.
27 */ 27 */
28 28
29/* 29/*
30 * Early attach support for raster consoles 30 * Early attach support for raster consoles
31 */ 31 */
32 32
33#include <sys/cdefs.h> 33#include <sys/cdefs.h>
34__KERNEL_RCSID(0, "$NetBSD: genfb_machdep.c,v 1.15 2019/11/30 05:28:28 nonaka Exp $"); 34__KERNEL_RCSID(0, "$NetBSD: genfb_machdep.c,v 1.16 2021/01/28 01:57:31 jmcneill Exp $");
35 
36#include "opt_mtrr.h" 
37 35
38#include <sys/param.h> 36#include <sys/param.h>
39#include <sys/conf.h> 37#include <sys/conf.h>
40#include <sys/device.h> 38#include <sys/device.h>
41#include <sys/ioctl.h> 39#include <sys/ioctl.h>
42#include <sys/kernel.h> 40#include <sys/kernel.h>
43#include <sys/kmem.h> 41#include <sys/kmem.h>
44#include <sys/lwp.h> 42#include <sys/lwp.h>
45 43
46#include <sys/bus.h> 44#include <sys/bus.h>
47#include <machine/bootinfo.h> 45#include <machine/bootinfo.h>
48#include <machine/mtrr.h> 
49 46
50#include <dev/wscons/wsconsio.h> 47#include <dev/wscons/wsconsio.h>
51#include <dev/wscons/wsdisplayvar.h> 48#include <dev/wscons/wsdisplayvar.h>
52#include <dev/rasops/rasops.h> 49#include <dev/rasops/rasops.h>
53#include <dev/wsfont/wsfont.h> 50#include <dev/wsfont/wsfont.h>
54#include <dev/wscons/wsdisplay_vconsvar.h> 51#include <dev/wscons/wsdisplay_vconsvar.h>
55 52
56#include <dev/wsfb/genfbvar.h> 53#include <dev/wsfb/genfbvar.h>
57#include <arch/x86/include/genfb_machdep.h> 54#include <arch/x86/include/genfb_machdep.h>
58 55
59#include "wsdisplay.h" 56#include "wsdisplay.h"
60#include "genfb.h" 57#include "genfb.h"
61#include "acpica.h" 58#include "acpica.h"
62 59
63#if NWSDISPLAY > 0 && NGENFB > 0 60#if NWSDISPLAY > 0 && NGENFB > 0
64struct vcons_screen x86_genfb_console_screen; 61struct vcons_screen x86_genfb_console_screen;
65bool x86_genfb_use_shadowfb = true; 62bool x86_genfb_use_shadowfb = true;
66 63
67#if NACPICA > 0 64#if NACPICA > 0
68extern int acpi_md_vesa_modenum; 65extern int acpi_md_vesa_modenum;
69#endif 66#endif
70 67
71static device_t x86_genfb_console_dev = NULL; 68static device_t x86_genfb_console_dev = NULL;
72 69
73static struct wsscreen_descr x86_genfb_stdscreen = { 70static struct wsscreen_descr x86_genfb_stdscreen = {
74 "std", 71 "std",
75 0, 0, 72 0, 0,
76 0, 73 0,
77 0, 0, 74 0, 0,
78 0, 75 0,
79 NULL 76 NULL
80}; 77};
81 78
82void 79void
83x86_genfb_set_console_dev(device_t dev) 80x86_genfb_set_console_dev(device_t dev)
84{ 81{
85 KASSERT(x86_genfb_console_dev == NULL); 82 KASSERT(x86_genfb_console_dev == NULL);
86 x86_genfb_console_dev = dev; 83 x86_genfb_console_dev = dev;
87} 84}
88 85
89void 86void
90x86_genfb_ddb_trap_callback(int where) 87x86_genfb_ddb_trap_callback(int where)
91{ 88{
92 if (x86_genfb_console_dev == NULL) 89 if (x86_genfb_console_dev == NULL)
93 return; 90 return;
94 91
95 if (where) { 92 if (where) {
96 genfb_enable_polling(x86_genfb_console_dev); 93 genfb_enable_polling(x86_genfb_console_dev);
97 } else { 94 } else {
98 genfb_disable_polling(x86_genfb_console_dev); 95 genfb_disable_polling(x86_genfb_console_dev);
99 } 96 }
100} 97}
101 98
102void 
103x86_genfb_mtrr_init(uint64_t physaddr, uint32_t size) 
104{ 
105#if notyet 
106#ifdef MTRR 
107 struct mtrr mtrr; 
108 int error, n; 
109 
110 if (mtrr_funcs == NULL) { 
111 aprint_debug("%s: no mtrr funcs\n", __func__); 
112 return; 
113 } 
114 
115 mtrr.base = physaddr; 
116 mtrr.len = size; 
117 mtrr.type = MTRR_TYPE_WC; 
118 mtrr.flags = MTRR_VALID; 
119 mtrr.owner = 0; 
120 
121 aprint_debug("%s: 0x%" PRIx64 "-0x%" PRIx64 "\n", __func__, 
122 mtrr.base, mtrr.base + mtrr.len - 1); 
123 
124 n = 1; 
125 KERNEL_LOCK(1, NULL); 
126 error = mtrr_set(&mtrr, &n, curlwp->l_proc, MTRR_GETSET_KERNEL); 
127 if (n != 0) 
128 mtrr_commit(); 
129 KERNEL_UNLOCK_ONE(NULL); 
130 
131 aprint_debug("%s: mtrr_set returned %d\n", __func__, error); 
132#else 
133 aprint_debug("%s: kernel lacks MTRR option\n", __func__); 
134#endif 
135#endif 
136} 
137 
138int 99int
139x86_genfb_init(void) 100x86_genfb_init(void)
140{ 101{
141 static int inited, attached; 102 static int inited, attached;
142 struct rasops_info *ri = &x86_genfb_console_screen.scr_ri; 103 struct rasops_info *ri = &x86_genfb_console_screen.scr_ri;
143 const struct btinfo_framebuffer *fbinfo; 104 const struct btinfo_framebuffer *fbinfo;
144 bus_space_tag_t t = x86_bus_space_mem; 105 bus_space_tag_t t = x86_bus_space_mem;
145 bus_space_handle_t h; 106 bus_space_handle_t h;
146 void *bits; 107 void *bits;
147 int err; 108 int err;
148 109
149 if (inited) 110 if (inited)
150 return attached; 111 return attached;
151 inited = 1; 112 inited = 1;
152 113
153 memset(&x86_genfb_console_screen, 0, sizeof(x86_genfb_console_screen)); 114 memset(&x86_genfb_console_screen, 0, sizeof(x86_genfb_console_screen));
154 115
155 fbinfo = lookup_bootinfo(BTINFO_FRAMEBUFFER); 116 fbinfo = lookup_bootinfo(BTINFO_FRAMEBUFFER);
156 if (fbinfo == NULL || fbinfo->physaddr == 0) 117 if (fbinfo == NULL || fbinfo->physaddr == 0)
157 return 0; 118 return 0;
158 119
159 err = _x86_memio_map(t, (bus_addr_t)fbinfo->physaddr, 120 err = _x86_memio_map(t, (bus_addr_t)fbinfo->physaddr,
160 fbinfo->height * fbinfo->stride, 121 fbinfo->height * fbinfo->stride,
161 BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE, &h); 122 BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE, &h);
162 if (err) { 123 if (err) {
163 aprint_error("x86_genfb_cnattach: couldn't map framebuffer\n"); 124 aprint_error("x86_genfb_cnattach: couldn't map framebuffer\n");
164 return 0; 125 return 0;
165 } 126 }
166 127
167 bits = bus_space_vaddr(t, h); 128 bits = bus_space_vaddr(t, h);
168 if (bits == NULL) { 129 if (bits == NULL) {
169 aprint_error("x86_genfb_cnattach: couldn't get fb vaddr\n"); 130 aprint_error("x86_genfb_cnattach: couldn't get fb vaddr\n");
170 return 0; 131 return 0;
171 } 132 }
172 133
173#if NACPICA > 0 134#if NACPICA > 0
174 acpi_md_vesa_modenum = fbinfo->vbemode; 135 acpi_md_vesa_modenum = fbinfo->vbemode;
175#endif 136#endif
176 137
177 wsfont_init(); 138 wsfont_init();
178 139
179 ri->ri_width = fbinfo->width; 140 ri->ri_width = fbinfo->width;
180 ri->ri_height = fbinfo->height; 141 ri->ri_height = fbinfo->height;
181 ri->ri_depth = fbinfo->depth; 142 ri->ri_depth = fbinfo->depth;
182 ri->ri_stride = fbinfo->stride; 143 ri->ri_stride = fbinfo->stride;
183 if (x86_genfb_use_shadowfb && lookup_bootinfo(BTINFO_EFI) != NULL) { 144 if (x86_genfb_use_shadowfb && lookup_bootinfo(BTINFO_EFI) != NULL) {
184 /* XXX The allocated memory is never released... */ 145 /* XXX The allocated memory is never released... */
185 ri->ri_bits = kmem_alloc(ri->ri_stride * ri->ri_height, 146 ri->ri_bits = kmem_alloc(ri->ri_stride * ri->ri_height,
186 KM_SLEEP); 147 KM_SLEEP);
187 ri->ri_hwbits = bits; 148 ri->ri_hwbits = bits;
188 } else 149 } else
189 ri->ri_bits = bits; 150 ri->ri_bits = bits;
190 ri->ri_flg = RI_CENTER | RI_FULLCLEAR | RI_CLEAR; 151 ri->ri_flg = RI_CENTER | RI_FULLCLEAR | RI_CLEAR;
191 rasops_init(ri, ri->ri_height / 8, ri->ri_width / 8); 152 rasops_init(ri, ri->ri_height / 8, ri->ri_width / 8);
192 ri->ri_caps = WSSCREEN_WSCOLORS; 153 ri->ri_caps = WSSCREEN_WSCOLORS;
193 rasops_reconfig(ri, ri->ri_height / ri->ri_font->fontheight, 154 rasops_reconfig(ri, ri->ri_height / ri->ri_font->fontheight,
194 ri->ri_width / ri->ri_font->fontwidth); 155 ri->ri_width / ri->ri_font->fontwidth);
195 156
196 x86_genfb_stdscreen.nrows = ri->ri_rows; 157 x86_genfb_stdscreen.nrows = ri->ri_rows;
197 x86_genfb_stdscreen.ncols = ri->ri_cols; 158 x86_genfb_stdscreen.ncols = ri->ri_cols;
198 x86_genfb_stdscreen.textops = &ri->ri_ops; 159 x86_genfb_stdscreen.textops = &ri->ri_ops;
199 x86_genfb_stdscreen.capabilities = ri->ri_caps; 160 x86_genfb_stdscreen.capabilities = ri->ri_caps;
200 161
201 attached = 1; 162 attached = 1;
202 return 1; 163 return 1;
203} 164}
204 165
205int 166int
206x86_genfb_cnattach(void) 167x86_genfb_cnattach(void)
207{ 168{
208 static int ncalls = 0; 169 static int ncalls = 0;
209 struct rasops_info *ri = &x86_genfb_console_screen.scr_ri; 170 struct rasops_info *ri = &x86_genfb_console_screen.scr_ri;
210 long defattr; 171 long defattr;
211 172
212 /* XXX jmcneill 173 /* XXX jmcneill
213 * Defer console initialization until UVM is initialized 174 * Defer console initialization until UVM is initialized
214 */ 175 */
215 ++ncalls; 176 ++ncalls;
216 if (ncalls < 3) 177 if (ncalls < 3)
217 return -1; 178 return -1;
218 179
219 if (!x86_genfb_init()) 180 if (!x86_genfb_init())
220 return 0; 181 return 0;
221 182
222 ri->ri_ops.allocattr(ri, 0, 0, 0, &defattr); 183 ri->ri_ops.allocattr(ri, 0, 0, 0, &defattr);
223 wsdisplay_preattach(&x86_genfb_stdscreen, ri, 0, 0, defattr); 184 wsdisplay_preattach(&x86_genfb_stdscreen, ri, 0, 0, defattr);
224 185
225 return 1; 186 return 1;
226} 187}
227#else /* NWSDISPLAY > 0 && NGENFB > 0 */ 188#else /* NWSDISPLAY > 0 && NGENFB > 0 */
228int 189int
229x86_genfb_init(void) 190x86_genfb_init(void)
230{ 191{
231 return 0; 192 return 0;
232} 193}
233 194
234int 195int
235x86_genfb_cnattach(void) 196x86_genfb_cnattach(void)
236{ 197{
237 return 0; 198 return 0;
238} 199}
239#endif 200#endif

cvs diff -r1.12 -r1.13 src/sys/arch/x86/x86/hyperv.c (switch to unified diff)

--- src/sys/arch/x86/x86/hyperv.c 2020/10/12 12:11:03 1.12
+++ src/sys/arch/x86/x86/hyperv.c 2021/01/28 01:57:31 1.13
@@ -1,1197 +1,1189 @@ @@ -1,1197 +1,1189 @@
1/* $NetBSD: hyperv.c,v 1.12 2020/10/12 12:11:03 ryoon Exp $ */ 1/* $NetBSD: hyperv.c,v 1.13 2021/01/28 01:57:31 jmcneill Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2009-2012,2016-2017 Microsoft Corp. 4 * Copyright (c) 2009-2012,2016-2017 Microsoft Corp.
5 * Copyright (c) 2012 NetApp Inc. 5 * Copyright (c) 2012 NetApp Inc.
6 * Copyright (c) 2012 Citrix Inc. 6 * Copyright (c) 2012 Citrix Inc.
7 * All rights reserved. 7 * All rights reserved.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice unmodified, this list of conditions, and the following 13 * notice unmodified, this list of conditions, and the following
14 * disclaimer. 14 * disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */ 29 */
30 30
31/** 31/**
32 * Implements low-level interactions with Hyper-V/Azure 32 * Implements low-level interactions with Hyper-V/Azure
33 */ 33 */
34#include <sys/cdefs.h> 34#include <sys/cdefs.h>
35#ifdef __KERNEL_RCSID 35#ifdef __KERNEL_RCSID
36__KERNEL_RCSID(0, "$NetBSD: hyperv.c,v 1.12 2020/10/12 12:11:03 ryoon Exp $"); 36__KERNEL_RCSID(0, "$NetBSD: hyperv.c,v 1.13 2021/01/28 01:57:31 jmcneill Exp $");
37#endif 37#endif
38#ifdef __FBSDID 38#ifdef __FBSDID
39__FBSDID("$FreeBSD: head/sys/dev/hyperv/vmbus/hyperv.c 331757 2018-03-30 02:25:12Z emaste $"); 39__FBSDID("$FreeBSD: head/sys/dev/hyperv/vmbus/hyperv.c 331757 2018-03-30 02:25:12Z emaste $");
40#endif 40#endif
41 41
42#ifdef _KERNEL_OPT 42#ifdef _KERNEL_OPT
43#include "lapic.h" 43#include "lapic.h"
44#include "genfb.h" 44#include "genfb.h"
45#include "opt_ddb.h" 45#include "opt_ddb.h"
46#include "vmbus.h" 46#include "vmbus.h"
47#include "wsdisplay.h" 47#include "wsdisplay.h"
48#endif 48#endif
49 49
50#include <sys/param.h> 50#include <sys/param.h>
51#include <sys/systm.h> 51#include <sys/systm.h>
52#include <sys/kernel.h> 52#include <sys/kernel.h>
53#include <sys/device.h> 53#include <sys/device.h>
54#include <sys/bus.h> 54#include <sys/bus.h>
55#include <sys/cpu.h> 55#include <sys/cpu.h>
56#include <sys/kmem.h> 56#include <sys/kmem.h>
57#include <sys/module.h> 57#include <sys/module.h>
58#include <sys/pmf.h> 58#include <sys/pmf.h>
59#include <sys/sysctl.h> 59#include <sys/sysctl.h>
60#include <sys/timetc.h> 60#include <sys/timetc.h>
61 61
62#include <uvm/uvm_extern.h> 62#include <uvm/uvm_extern.h>
63 63
64#include <machine/autoconf.h> 64#include <machine/autoconf.h>
65#include <machine/bootinfo.h> 65#include <machine/bootinfo.h>
66#include <machine/cpufunc.h> 66#include <machine/cpufunc.h>
67#include <machine/cputypes.h> 67#include <machine/cputypes.h>
68#include <machine/cpuvar.h> 68#include <machine/cpuvar.h>
69#include <machine/cpu_counter.h> 69#include <machine/cpu_counter.h>
70#include <x86/apicvar.h> 70#include <x86/apicvar.h>
71#include <x86/efi.h> 71#include <x86/efi.h>
72 72
73#include <dev/wsfb/genfbvar.h> 73#include <dev/wsfb/genfbvar.h>
74#include <x86/genfb_machdep.h> 74#include <x86/genfb_machdep.h>
75 75
76#include <x86/x86/hypervreg.h> 76#include <x86/x86/hypervreg.h>
77#include <x86/x86/hypervvar.h> 77#include <x86/x86/hypervvar.h>
78#include <dev/hyperv/vmbusvar.h> 78#include <dev/hyperv/vmbusvar.h>
79#include <dev/hyperv/genfb_vmbusvar.h> 79#include <dev/hyperv/genfb_vmbusvar.h>
80 80
81#ifdef DDB 81#ifdef DDB
82#include <machine/db_machdep.h> 82#include <machine/db_machdep.h>
83#include <ddb/db_sym.h> 83#include <ddb/db_sym.h>
84#include <ddb/db_extern.h> 84#include <ddb/db_extern.h>
85#endif 85#endif
86 86
87struct hyperv_softc { 87struct hyperv_softc {
88 device_t sc_dev; 88 device_t sc_dev;
89 89
90 struct sysctllog *sc_log; 90 struct sysctllog *sc_log;
91}; 91};
92 92
93struct hyperv_hypercall_ctx { 93struct hyperv_hypercall_ctx {
94 void *hc_addr; 94 void *hc_addr;
95 paddr_t hc_paddr; 95 paddr_t hc_paddr;
96}; 96};
97 97
98struct hyperv_percpu_data { 98struct hyperv_percpu_data {
99 int pd_idtvec; 99 int pd_idtvec;
100}; 100};
101 101
102static struct hyperv_hypercall_ctx hyperv_hypercall_ctx; 102static struct hyperv_hypercall_ctx hyperv_hypercall_ctx;
103 103
104static char hyperv_hypercall_page[PAGE_SIZE] 104static char hyperv_hypercall_page[PAGE_SIZE]
105 __section(".text") __aligned(PAGE_SIZE) = { 0xcc }; 105 __section(".text") __aligned(PAGE_SIZE) = { 0xcc };
106 106
107static u_int hyperv_get_timecount(struct timecounter *); 107static u_int hyperv_get_timecount(struct timecounter *);
108 108
109static u_int hyperv_ver_major; 109static u_int hyperv_ver_major;
110 110
111static u_int hyperv_features; /* CPUID_HV_MSR_ */ 111static u_int hyperv_features; /* CPUID_HV_MSR_ */
112static u_int hyperv_recommends; 112static u_int hyperv_recommends;
113 113
114static u_int hyperv_pm_features; 114static u_int hyperv_pm_features;
115static u_int hyperv_features3; 115static u_int hyperv_features3;
116 116
117static char hyperv_version_str[64]; 117static char hyperv_version_str[64];
118static char hyperv_features_str[256]; 118static char hyperv_features_str[256];
119static char hyperv_pm_features_str[256]; 119static char hyperv_pm_features_str[256];
120static char hyperv_features3_str[256]; 120static char hyperv_features3_str[256];
121 121
122uint32_t hyperv_vcpuid[MAXCPUS]; 122uint32_t hyperv_vcpuid[MAXCPUS];
123 123
124static struct timecounter hyperv_timecounter = { 124static struct timecounter hyperv_timecounter = {
125 .tc_get_timecount = hyperv_get_timecount, 125 .tc_get_timecount = hyperv_get_timecount,
126 .tc_counter_mask = 0xffffffff, 126 .tc_counter_mask = 0xffffffff,
127 .tc_frequency = HYPERV_TIMER_FREQ, 127 .tc_frequency = HYPERV_TIMER_FREQ,
128 .tc_name = "Hyper-V", 128 .tc_name = "Hyper-V",
129 .tc_quality = 2000, 129 .tc_quality = 2000,
130}; 130};
131 131
132static void hyperv_proc_dummy(void *, struct cpu_info *); 132static void hyperv_proc_dummy(void *, struct cpu_info *);
133 133
134struct hyperv_proc { 134struct hyperv_proc {
135 hyperv_proc_t func; 135 hyperv_proc_t func;
136 void *arg; 136 void *arg;
137}; 137};
138 138
139static struct hyperv_proc hyperv_event_proc = { 139static struct hyperv_proc hyperv_event_proc = {
140 .func = hyperv_proc_dummy, 140 .func = hyperv_proc_dummy,
141}; 141};
142 142
143static struct hyperv_proc hyperv_message_proc = { 143static struct hyperv_proc hyperv_message_proc = {
144 .func = hyperv_proc_dummy, 144 .func = hyperv_proc_dummy,
145}; 145};
146 146
147static int hyperv_match(device_t, cfdata_t, void *); 147static int hyperv_match(device_t, cfdata_t, void *);
148static void hyperv_attach(device_t, device_t, void *); 148static void hyperv_attach(device_t, device_t, void *);
149static int hyperv_detach(device_t, int); 149static int hyperv_detach(device_t, int);
150 150
151CFATTACH_DECL_NEW(hyperv, sizeof(struct hyperv_softc), 151CFATTACH_DECL_NEW(hyperv, sizeof(struct hyperv_softc),
152 hyperv_match, hyperv_attach, hyperv_detach, NULL); 152 hyperv_match, hyperv_attach, hyperv_detach, NULL);
153 153
154static void hyperv_hypercall_memfree(void); 154static void hyperv_hypercall_memfree(void);
155static bool hyperv_init_hypercall(void); 155static bool hyperv_init_hypercall(void);
156static int hyperv_sysctl_setup_root(struct hyperv_softc *); 156static int hyperv_sysctl_setup_root(struct hyperv_softc *);
157 157
158static u_int 158static u_int
159hyperv_get_timecount(struct timecounter *tc) 159hyperv_get_timecount(struct timecounter *tc)
160{ 160{
161 161
162 return (u_int)rdmsr(MSR_HV_TIME_REF_COUNT); 162 return (u_int)rdmsr(MSR_HV_TIME_REF_COUNT);
163} 163}
164 164
165static uint64_t 165static uint64_t
166hyperv_tc64_rdmsr(void) 166hyperv_tc64_rdmsr(void)
167{ 167{
168 168
169 return rdmsr(MSR_HV_TIME_REF_COUNT); 169 return rdmsr(MSR_HV_TIME_REF_COUNT);
170} 170}
171 171
172#ifdef __amd64__ 172#ifdef __amd64__
173/* 173/*
174 * Reference TSC 174 * Reference TSC
175 */ 175 */
176struct hyperv_ref_tsc { 176struct hyperv_ref_tsc {
177 struct hyperv_reftsc *tsc_ref; 177 struct hyperv_reftsc *tsc_ref;
178 paddr_t tsc_paddr; 178 paddr_t tsc_paddr;
179}; 179};
180 180
181static struct hyperv_ref_tsc hyperv_ref_tsc; 181static struct hyperv_ref_tsc hyperv_ref_tsc;
182 182
183static u_int hyperv_tsc_timecount(struct timecounter *); 183static u_int hyperv_tsc_timecount(struct timecounter *);
184 184
185static struct timecounter hyperv_tsc_timecounter = { 185static struct timecounter hyperv_tsc_timecounter = {
186 .tc_get_timecount = hyperv_tsc_timecount, 186 .tc_get_timecount = hyperv_tsc_timecount,
187 .tc_counter_mask = 0xffffffff, 187 .tc_counter_mask = 0xffffffff,
188 .tc_frequency = HYPERV_TIMER_FREQ, 188 .tc_frequency = HYPERV_TIMER_FREQ,
189 .tc_name = "Hyper-V-TSC", 189 .tc_name = "Hyper-V-TSC",
190 .tc_quality = 3000, 190 .tc_quality = 3000,
191}; 191};
192 192
193static __inline u_int 193static __inline u_int
194atomic_load_acq_int(volatile u_int *p) 194atomic_load_acq_int(volatile u_int *p)
195{ 195{
196 u_int r = *p; 196 u_int r = *p;
197 __insn_barrier(); 197 __insn_barrier();
198 return r; 198 return r;
199} 199}
200 200
201static uint64_t 201static uint64_t
202hyperv_tc64_tsc(void) 202hyperv_tc64_tsc(void)
203{ 203{
204 struct hyperv_reftsc *tsc_ref = hyperv_ref_tsc.tsc_ref; 204 struct hyperv_reftsc *tsc_ref = hyperv_ref_tsc.tsc_ref;
205 uint32_t seq; 205 uint32_t seq;
206 206
207 while ((seq = atomic_load_acq_int(&tsc_ref->tsc_seq)) != 0) { 207 while ((seq = atomic_load_acq_int(&tsc_ref->tsc_seq)) != 0) {
208 uint64_t disc, ret, tsc; 208 uint64_t disc, ret, tsc;
209 uint64_t scale = tsc_ref->tsc_scale; 209 uint64_t scale = tsc_ref->tsc_scale;
210 int64_t ofs = tsc_ref->tsc_ofs; 210 int64_t ofs = tsc_ref->tsc_ofs;
211 211
212 tsc = cpu_counter(); 212 tsc = cpu_counter();
213 213
214 /* ret = ((tsc * scale) >> 64) + ofs */ 214 /* ret = ((tsc * scale) >> 64) + ofs */
215 __asm__ __volatile__ ("mulq %3" : 215 __asm__ __volatile__ ("mulq %3" :
216 "=d" (ret), "=a" (disc) : 216 "=d" (ret), "=a" (disc) :
217 "a" (tsc), "r" (scale)); 217 "a" (tsc), "r" (scale));
218 ret += ofs; 218 ret += ofs;
219 219
220 __insn_barrier(); 220 __insn_barrier();
221 if (tsc_ref->tsc_seq == seq) 221 if (tsc_ref->tsc_seq == seq)
222 return ret; 222 return ret;
223 223
224 /* Sequence changed; re-sync. */ 224 /* Sequence changed; re-sync. */
225 } 225 }
226 /* Fallback to the generic timecounter, i.e. rdmsr. */ 226 /* Fallback to the generic timecounter, i.e. rdmsr. */
227 return rdmsr(MSR_HV_TIME_REF_COUNT); 227 return rdmsr(MSR_HV_TIME_REF_COUNT);
228} 228}
229 229
230static u_int 230static u_int
231hyperv_tsc_timecount(struct timecounter *tc __unused) 231hyperv_tsc_timecount(struct timecounter *tc __unused)
232{ 232{
233 233
234 return hyperv_tc64_tsc(); 234 return hyperv_tc64_tsc();
235} 235}
236 236
237static bool 237static bool
238hyperv_tsc_tcinit(void) 238hyperv_tsc_tcinit(void)
239{ 239{
240 uint64_t orig_msr, msr; 240 uint64_t orig_msr, msr;
241 241
242 if ((hyperv_features & 242 if ((hyperv_features &
243 (CPUID_HV_MSR_TIME_REFCNT | CPUID_HV_MSR_REFERENCE_TSC)) != 243 (CPUID_HV_MSR_TIME_REFCNT | CPUID_HV_MSR_REFERENCE_TSC)) !=
244 (CPUID_HV_MSR_TIME_REFCNT | CPUID_HV_MSR_REFERENCE_TSC) || 244 (CPUID_HV_MSR_TIME_REFCNT | CPUID_HV_MSR_REFERENCE_TSC) ||
245 (cpu_feature[0] & CPUID_SSE2) == 0) /* SSE2 for mfence/lfence */ 245 (cpu_feature[0] & CPUID_SSE2) == 0) /* SSE2 for mfence/lfence */
246 return false; 246 return false;
247 247
248 hyperv_ref_tsc.tsc_ref = (void *)uvm_km_alloc(kernel_map, 248 hyperv_ref_tsc.tsc_ref = (void *)uvm_km_alloc(kernel_map,
249 PAGE_SIZE, PAGE_SIZE, UVM_KMF_WIRED | UVM_KMF_ZERO); 249 PAGE_SIZE, PAGE_SIZE, UVM_KMF_WIRED | UVM_KMF_ZERO);
250 if (hyperv_ref_tsc.tsc_ref == NULL) { 250 if (hyperv_ref_tsc.tsc_ref == NULL) {
251 aprint_error("Hyper-V: reference TSC page allocation failed\n"); 251 aprint_error("Hyper-V: reference TSC page allocation failed\n");
252 return false; 252 return false;
253 } 253 }
254 254
255 if (!pmap_extract(pmap_kernel(), (vaddr_t)hyperv_ref_tsc.tsc_ref, 255 if (!pmap_extract(pmap_kernel(), (vaddr_t)hyperv_ref_tsc.tsc_ref,
256 &hyperv_ref_tsc.tsc_paddr)) { 256 &hyperv_ref_tsc.tsc_paddr)) {
257 aprint_error("Hyper-V: reference TSC page setup failed\n"); 257 aprint_error("Hyper-V: reference TSC page setup failed\n");
258 uvm_km_free(kernel_map, (vaddr_t)hyperv_ref_tsc.tsc_ref, 258 uvm_km_free(kernel_map, (vaddr_t)hyperv_ref_tsc.tsc_ref,
259 PAGE_SIZE, UVM_KMF_WIRED); 259 PAGE_SIZE, UVM_KMF_WIRED);
260 hyperv_ref_tsc.tsc_ref = NULL; 260 hyperv_ref_tsc.tsc_ref = NULL;
261 return false; 261 return false;
262 } 262 }
263 263
264 orig_msr = rdmsr(MSR_HV_REFERENCE_TSC); 264 orig_msr = rdmsr(MSR_HV_REFERENCE_TSC);
265 msr = MSR_HV_REFTSC_ENABLE | (orig_msr & MSR_HV_REFTSC_RSVD_MASK) | 265 msr = MSR_HV_REFTSC_ENABLE | (orig_msr & MSR_HV_REFTSC_RSVD_MASK) |
266 (atop(hyperv_ref_tsc.tsc_paddr) << MSR_HV_REFTSC_PGSHIFT); 266 (atop(hyperv_ref_tsc.tsc_paddr) << MSR_HV_REFTSC_PGSHIFT);
267 wrmsr(MSR_HV_REFERENCE_TSC, msr); 267 wrmsr(MSR_HV_REFERENCE_TSC, msr);
268 268
269 /* Install 64 bits timecounter method for other modules to use. */ 269 /* Install 64 bits timecounter method for other modules to use. */
270 hyperv_tc64 = hyperv_tc64_tsc; 270 hyperv_tc64 = hyperv_tc64_tsc;
271 271
272 /* Register "enlightened" timecounter. */ 272 /* Register "enlightened" timecounter. */
273 tc_init(&hyperv_tsc_timecounter); 273 tc_init(&hyperv_tsc_timecounter);
274 274
275 return true; 275 return true;
276} 276}
277#endif /* __amd64__ */ 277#endif /* __amd64__ */
278 278
279static void 279static void
280delay_tc(unsigned int n) 280delay_tc(unsigned int n)
281{ 281{
282 struct timecounter *tc; 282 struct timecounter *tc;
283 uint64_t end, now; 283 uint64_t end, now;
284 u_int last, u; 284 u_int last, u;
285 285
286 tc = timecounter; 286 tc = timecounter;
287 if (tc->tc_quality <= 0) { 287 if (tc->tc_quality <= 0) {
288 x86_delay(n); 288 x86_delay(n);
289 return; 289 return;
290 } 290 }
291 291
292 now = 0; 292 now = 0;
293 end = tc->tc_frequency * n / 1000000; 293 end = tc->tc_frequency * n / 1000000;
294 last = tc->tc_get_timecount(tc) & tc->tc_counter_mask; 294 last = tc->tc_get_timecount(tc) & tc->tc_counter_mask;
295 do { 295 do {
296 x86_pause(); 296 x86_pause();
297 u = tc->tc_get_timecount(tc) & tc->tc_counter_mask; 297 u = tc->tc_get_timecount(tc) & tc->tc_counter_mask;
298 if (u < last) 298 if (u < last)
299 now += tc->tc_counter_mask - last + u + 1; 299 now += tc->tc_counter_mask - last + u + 1;
300 else 300 else
301 now += u - last; 301 now += u - last;
302 last = u; 302 last = u;
303 } while (now < end); 303 } while (now < end);
304} 304}
305 305
306static void 306static void
307delay_msr(unsigned int n) 307delay_msr(unsigned int n)
308{ 308{
309 uint64_t end, now; 309 uint64_t end, now;
310 u_int last, u; 310 u_int last, u;
311 311
312 now = 0; 312 now = 0;
313 end = HYPERV_TIMER_FREQ * n / 1000000ULL; 313 end = HYPERV_TIMER_FREQ * n / 1000000ULL;
314 last = (u_int)rdmsr(MSR_HV_TIME_REF_COUNT); 314 last = (u_int)rdmsr(MSR_HV_TIME_REF_COUNT);
315 do { 315 do {
316 x86_pause(); 316 x86_pause();
317 u = (u_int)rdmsr(MSR_HV_TIME_REF_COUNT); 317 u = (u_int)rdmsr(MSR_HV_TIME_REF_COUNT);
318 if (u < last) 318 if (u < last)
319 now += 0xffffffff - last + u + 1; 319 now += 0xffffffff - last + u + 1;
320 else 320 else
321 now += u - last; 321 now += u - last;
322 last = u; 322 last = u;
323 } while (now < end); 323 } while (now < end);
324} 324}
325 325
326static __inline uint64_t 326static __inline uint64_t
327hyperv_hypercall_md(volatile void *hc_addr, uint64_t in_val, uint64_t in_paddr, 327hyperv_hypercall_md(volatile void *hc_addr, uint64_t in_val, uint64_t in_paddr,
328 uint64_t out_paddr) 328 uint64_t out_paddr)
329{ 329{
330 uint64_t status; 330 uint64_t status;
331 331
332#ifdef __amd64__ 332#ifdef __amd64__
333 __asm__ __volatile__ ("mov %0, %%r8" : : "r" (out_paddr): "r8"); 333 __asm__ __volatile__ ("mov %0, %%r8" : : "r" (out_paddr): "r8");
334 __asm__ __volatile__ ("call *%3" : "=a" (status) : "c" (in_val), 334 __asm__ __volatile__ ("call *%3" : "=a" (status) : "c" (in_val),
335 "d" (in_paddr), "m" (hc_addr)); 335 "d" (in_paddr), "m" (hc_addr));
336#else 336#else
337 uint32_t in_val_hi = in_val >> 32; 337 uint32_t in_val_hi = in_val >> 32;
338 uint32_t in_val_lo = in_val & 0xFFFFFFFF; 338 uint32_t in_val_lo = in_val & 0xFFFFFFFF;
339 uint32_t status_hi, status_lo; 339 uint32_t status_hi, status_lo;
340 uint32_t in_paddr_hi = in_paddr >> 32; 340 uint32_t in_paddr_hi = in_paddr >> 32;
341 uint32_t in_paddr_lo = in_paddr & 0xFFFFFFFF; 341 uint32_t in_paddr_lo = in_paddr & 0xFFFFFFFF;
342 uint32_t out_paddr_hi = out_paddr >> 32; 342 uint32_t out_paddr_hi = out_paddr >> 32;
343 uint32_t out_paddr_lo = out_paddr & 0xFFFFFFFF; 343 uint32_t out_paddr_lo = out_paddr & 0xFFFFFFFF;
344 344
345 __asm__ __volatile__ ("call *%8" : "=d" (status_hi), "=a" (status_lo) : 345 __asm__ __volatile__ ("call *%8" : "=d" (status_hi), "=a" (status_lo) :
346 "d" (in_val_hi), "a" (in_val_lo), 346 "d" (in_val_hi), "a" (in_val_lo),
347 "b" (in_paddr_hi), "c" (in_paddr_lo), 347 "b" (in_paddr_hi), "c" (in_paddr_lo),
348 "D" (out_paddr_hi), "S" (out_paddr_lo), 348 "D" (out_paddr_hi), "S" (out_paddr_lo),
349 "m" (hc_addr)); 349 "m" (hc_addr));
350 status = status_lo | ((uint64_t)status_hi << 32); 350 status = status_lo | ((uint64_t)status_hi << 32);
351#endif 351#endif
352 352
353 return status; 353 return status;
354} 354}
355 355
356uint64_t 356uint64_t
357hyperv_hypercall(uint64_t control, paddr_t in_paddr, paddr_t out_paddr) 357hyperv_hypercall(uint64_t control, paddr_t in_paddr, paddr_t out_paddr)
358{ 358{
359 359
360 if (hyperv_hypercall_ctx.hc_addr == NULL) 360 if (hyperv_hypercall_ctx.hc_addr == NULL)
361 return ~HYPERCALL_STATUS_SUCCESS; 361 return ~HYPERCALL_STATUS_SUCCESS;
362 362
363 return hyperv_hypercall_md(hyperv_hypercall_ctx.hc_addr, control, 363 return hyperv_hypercall_md(hyperv_hypercall_ctx.hc_addr, control,
364 in_paddr, out_paddr); 364 in_paddr, out_paddr);
365} 365}
366 366
367static bool 367static bool
368hyperv_probe(u_int *maxleaf, u_int *features, u_int *pm_features, 368hyperv_probe(u_int *maxleaf, u_int *features, u_int *pm_features,
369 u_int *features3) 369 u_int *features3)
370{ 370{
371 u_int regs[4]; 371 u_int regs[4];
372 372
373 if (vm_guest != VM_GUEST_HV) 373 if (vm_guest != VM_GUEST_HV)
374 return false; 374 return false;
375 375
376 x86_cpuid(CPUID_LEAF_HV_MAXLEAF, regs); 376 x86_cpuid(CPUID_LEAF_HV_MAXLEAF, regs);
377 *maxleaf = regs[0]; 377 *maxleaf = regs[0];
378 if (*maxleaf < CPUID_LEAF_HV_LIMITS) 378 if (*maxleaf < CPUID_LEAF_HV_LIMITS)
379 return false; 379 return false;
380 380
381 x86_cpuid(CPUID_LEAF_HV_INTERFACE, regs); 381 x86_cpuid(CPUID_LEAF_HV_INTERFACE, regs);
382 if (regs[0] != CPUID_HV_IFACE_HYPERV) 382 if (regs[0] != CPUID_HV_IFACE_HYPERV)
383 return false; 383 return false;
384 384
385 x86_cpuid(CPUID_LEAF_HV_FEATURES, regs); 385 x86_cpuid(CPUID_LEAF_HV_FEATURES, regs);
386 if (!(regs[0] & CPUID_HV_MSR_HYPERCALL)) { 386 if (!(regs[0] & CPUID_HV_MSR_HYPERCALL)) {
387 /* 387 /*
388 * Hyper-V w/o Hypercall is impossible; someone 388 * Hyper-V w/o Hypercall is impossible; someone
389 * is faking Hyper-V. 389 * is faking Hyper-V.
390 */ 390 */
391 return false; 391 return false;
392 } 392 }
393 393
394 *features = regs[0]; 394 *features = regs[0];
395 *pm_features = regs[2]; 395 *pm_features = regs[2];
396 *features3 = regs[3]; 396 *features3 = regs[3];
397 397
398 return true; 398 return true;
399} 399}
400 400
401static bool 401static bool
402hyperv_identify(void) 402hyperv_identify(void)
403{ 403{
404 char buf[256]; 404 char buf[256];
405 u_int regs[4]; 405 u_int regs[4];
406 u_int maxleaf; 406 u_int maxleaf;
407 407
408 if (!hyperv_probe(&maxleaf, &hyperv_features, &hyperv_pm_features, 408 if (!hyperv_probe(&maxleaf, &hyperv_features, &hyperv_pm_features,
409 &hyperv_features3)) 409 &hyperv_features3))
410 return false; 410 return false;
411 411
412 x86_cpuid(CPUID_LEAF_HV_IDENTITY, regs); 412 x86_cpuid(CPUID_LEAF_HV_IDENTITY, regs);
413 hyperv_ver_major = regs[1] >> 16; 413 hyperv_ver_major = regs[1] >> 16;
414 snprintf(hyperv_version_str, sizeof(hyperv_version_str), 414 snprintf(hyperv_version_str, sizeof(hyperv_version_str),
415 "%d.%d.%d [SP%d]", 415 "%d.%d.%d [SP%d]",
416 hyperv_ver_major, regs[1] & 0xffff, regs[0], regs[2]); 416 hyperv_ver_major, regs[1] & 0xffff, regs[0], regs[2]);
417 aprint_verbose("Hyper-V Version: %s\n", hyperv_version_str); 417 aprint_verbose("Hyper-V Version: %s\n", hyperv_version_str);
418 418
419 snprintb(hyperv_features_str, sizeof(hyperv_features_str), 419 snprintb(hyperv_features_str, sizeof(hyperv_features_str),
420 "\020" 420 "\020"
421 "\001VPRUNTIME" /* MSR_HV_VP_RUNTIME */ 421 "\001VPRUNTIME" /* MSR_HV_VP_RUNTIME */
422 "\002TMREFCNT" /* MSR_HV_TIME_REF_COUNT */ 422 "\002TMREFCNT" /* MSR_HV_TIME_REF_COUNT */
423 "\003SYNIC" /* MSRs for SynIC */ 423 "\003SYNIC" /* MSRs for SynIC */
424 "\004SYNTM" /* MSRs for SynTimer */ 424 "\004SYNTM" /* MSRs for SynTimer */
425 "\005APIC" /* MSR_HV_{EOI,ICR,TPR} */ 425 "\005APIC" /* MSR_HV_{EOI,ICR,TPR} */
426 "\006HYPERCALL" /* MSR_HV_{GUEST_OS_ID,HYPERCALL} */ 426 "\006HYPERCALL" /* MSR_HV_{GUEST_OS_ID,HYPERCALL} */
427 "\007VPINDEX" /* MSR_HV_VP_INDEX */ 427 "\007VPINDEX" /* MSR_HV_VP_INDEX */
428 "\010RESET" /* MSR_HV_RESET */ 428 "\010RESET" /* MSR_HV_RESET */
429 "\011STATS" /* MSR_HV_STATS_ */ 429 "\011STATS" /* MSR_HV_STATS_ */
430 "\012REFTSC" /* MSR_HV_REFERENCE_TSC */ 430 "\012REFTSC" /* MSR_HV_REFERENCE_TSC */
431 "\013IDLE" /* MSR_HV_GUEST_IDLE */ 431 "\013IDLE" /* MSR_HV_GUEST_IDLE */
432 "\014TMFREQ" /* MSR_HV_{TSC,APIC}_FREQUENCY */ 432 "\014TMFREQ" /* MSR_HV_{TSC,APIC}_FREQUENCY */
433 "\015DEBUG", /* MSR_HV_SYNTH_DEBUG_ */ 433 "\015DEBUG", /* MSR_HV_SYNTH_DEBUG_ */
434 hyperv_features); 434 hyperv_features);
435 aprint_verbose(" Features=%s\n", hyperv_features_str); 435 aprint_verbose(" Features=%s\n", hyperv_features_str);
436 snprintb(buf, sizeof(buf), 436 snprintb(buf, sizeof(buf),
437 "\020" 437 "\020"
438 "\005C3HPET", /* HPET is required for C3 state */ 438 "\005C3HPET", /* HPET is required for C3 state */
439 (hyperv_pm_features & ~CPUPM_HV_CSTATE_MASK)); 439 (hyperv_pm_features & ~CPUPM_HV_CSTATE_MASK));
440 snprintf(hyperv_pm_features_str, sizeof(hyperv_pm_features_str), 440 snprintf(hyperv_pm_features_str, sizeof(hyperv_pm_features_str),
441 "%s [C%u]", buf, CPUPM_HV_CSTATE(hyperv_pm_features)); 441 "%s [C%u]", buf, CPUPM_HV_CSTATE(hyperv_pm_features));
442 aprint_verbose(" PM Features=%s\n", hyperv_pm_features_str); 442 aprint_verbose(" PM Features=%s\n", hyperv_pm_features_str);
443 snprintb(hyperv_features3_str, sizeof(hyperv_features3_str), 443 snprintb(hyperv_features3_str, sizeof(hyperv_features3_str),
444 "\020" 444 "\020"
445 "\001MWAIT" /* MWAIT */ 445 "\001MWAIT" /* MWAIT */
446 "\002DEBUG" /* guest debug support */ 446 "\002DEBUG" /* guest debug support */
447 "\003PERFMON" /* performance monitor */ 447 "\003PERFMON" /* performance monitor */
448 "\004PCPUDPE" /* physical CPU dynamic partition event */ 448 "\004PCPUDPE" /* physical CPU dynamic partition event */
449 "\005XMMHC" /* hypercall input through XMM regs */ 449 "\005XMMHC" /* hypercall input through XMM regs */
450 "\006IDLE" /* guest idle support */ 450 "\006IDLE" /* guest idle support */
451 "\007SLEEP" /* hypervisor sleep support */ 451 "\007SLEEP" /* hypervisor sleep support */
452 "\010NUMA" /* NUMA distance query support */ 452 "\010NUMA" /* NUMA distance query support */
453 "\011TMFREQ" /* timer frequency query (TSC, LAPIC) */ 453 "\011TMFREQ" /* timer frequency query (TSC, LAPIC) */
454 "\012SYNCMC" /* inject synthetic machine checks */ 454 "\012SYNCMC" /* inject synthetic machine checks */
455 "\013CRASH" /* MSRs for guest crash */ 455 "\013CRASH" /* MSRs for guest crash */
456 "\014DEBUGMSR" /* MSRs for guest debug */ 456 "\014DEBUGMSR" /* MSRs for guest debug */
457 "\015NPIEP" /* NPIEP */ 457 "\015NPIEP" /* NPIEP */
458 "\016HVDIS", /* disabling hypervisor */ 458 "\016HVDIS", /* disabling hypervisor */
459 hyperv_features3); 459 hyperv_features3);
460 aprint_verbose(" Features3=%s\n", hyperv_features3_str); 460 aprint_verbose(" Features3=%s\n", hyperv_features3_str);
461 461
462 x86_cpuid(CPUID_LEAF_HV_RECOMMENDS, regs); 462 x86_cpuid(CPUID_LEAF_HV_RECOMMENDS, regs);
463 hyperv_recommends = regs[0]; 463 hyperv_recommends = regs[0];
464 aprint_verbose(" Recommends: %08x %08x\n", regs[0], regs[1]); 464 aprint_verbose(" Recommends: %08x %08x\n", regs[0], regs[1]);
465 465
466 x86_cpuid(CPUID_LEAF_HV_LIMITS, regs); 466 x86_cpuid(CPUID_LEAF_HV_LIMITS, regs);
467 aprint_verbose(" Limits: Vcpu:%d Lcpu:%d Int:%d\n", 467 aprint_verbose(" Limits: Vcpu:%d Lcpu:%d Int:%d\n",
468 regs[0], regs[1], regs[2]); 468 regs[0], regs[1], regs[2]);
469 469
470 if (maxleaf >= CPUID_LEAF_HV_HWFEATURES) { 470 if (maxleaf >= CPUID_LEAF_HV_HWFEATURES) {
471 x86_cpuid(CPUID_LEAF_HV_HWFEATURES, regs); 471 x86_cpuid(CPUID_LEAF_HV_HWFEATURES, regs);
472 aprint_verbose(" HW Features: %08x, AMD: %08x\n", 472 aprint_verbose(" HW Features: %08x, AMD: %08x\n",
473 regs[0], regs[3]); 473 regs[0], regs[3]);
474 } 474 }
475 475
476 return true; 476 return true;
477} 477}
478 478
479void 479void
480hyperv_early_init(void) 480hyperv_early_init(void)
481{ 481{
482 u_int features, pm_features, features3; 482 u_int features, pm_features, features3;
483 u_int maxleaf; 483 u_int maxleaf;
484 int i; 484 int i;
485 485
486 if (!hyperv_probe(&maxleaf, &features, &pm_features, &features3)) 486 if (!hyperv_probe(&maxleaf, &features, &pm_features, &features3))
487 return; 487 return;
488 488
489 if (features & CPUID_HV_MSR_TIME_REFCNT) 489 if (features & CPUID_HV_MSR_TIME_REFCNT)
490 x86_delay = delay_func = delay_msr; 490 x86_delay = delay_func = delay_msr;
491 491
492 if (features & CPUID_HV_MSR_VP_INDEX) { 492 if (features & CPUID_HV_MSR_VP_INDEX) {
493 /* Save virtual processor id. */ 493 /* Save virtual processor id. */
494 hyperv_vcpuid[0] = rdmsr(MSR_HV_VP_INDEX); 494 hyperv_vcpuid[0] = rdmsr(MSR_HV_VP_INDEX);
495 } else { 495 } else {
496 /* Set virtual processor id to 0 for compatibility. */ 496 /* Set virtual processor id to 0 for compatibility. */
497 hyperv_vcpuid[0] = 0; 497 hyperv_vcpuid[0] = 0;
498 } 498 }
499 for (i = 1; i < MAXCPUS; i++) 499 for (i = 1; i < MAXCPUS; i++)
500 hyperv_vcpuid[i] = hyperv_vcpuid[0]; 500 hyperv_vcpuid[i] = hyperv_vcpuid[0];
501} 501}
502 502
503void 503void
504hyperv_init_cpu(struct cpu_info *ci) 504hyperv_init_cpu(struct cpu_info *ci)
505{ 505{
506 u_int features, pm_features, features3; 506 u_int features, pm_features, features3;
507 u_int maxleaf; 507 u_int maxleaf;
508 508
509 if (!hyperv_probe(&maxleaf, &features, &pm_features, &features3)) 509 if (!hyperv_probe(&maxleaf, &features, &pm_features, &features3))
510 return; 510 return;
511 511
512 if (features & CPUID_HV_MSR_VP_INDEX) 512 if (features & CPUID_HV_MSR_VP_INDEX)
513 hyperv_vcpuid[ci->ci_index] = rdmsr(MSR_HV_VP_INDEX); 513 hyperv_vcpuid[ci->ci_index] = rdmsr(MSR_HV_VP_INDEX);
514} 514}
515 515
516uint32_t 516uint32_t
517hyperv_get_vcpuid(cpuid_t cpu) 517hyperv_get_vcpuid(cpuid_t cpu)
518{ 518{
519 519
520 if (cpu < MAXCPUS) 520 if (cpu < MAXCPUS)
521 return hyperv_vcpuid[cpu]; 521 return hyperv_vcpuid[cpu];
522 return 0; 522 return 0;
523} 523}
524 524
525static bool 525static bool
526hyperv_init(void) 526hyperv_init(void)
527{ 527{
528 528
529 if (!hyperv_identify()) { 529 if (!hyperv_identify()) {
530 /* Not Hyper-V; reset guest id to the generic one. */ 530 /* Not Hyper-V; reset guest id to the generic one. */
531 if (vm_guest == VM_GUEST_HV) 531 if (vm_guest == VM_GUEST_HV)
532 vm_guest = VM_GUEST_VM; 532 vm_guest = VM_GUEST_VM;
533 return false; 533 return false;
534 } 534 }
535 535
536 /* Set guest id */ 536 /* Set guest id */
537 wrmsr(MSR_HV_GUEST_OS_ID, MSR_HV_GUESTID_OSTYPE_NETBSD | 537 wrmsr(MSR_HV_GUEST_OS_ID, MSR_HV_GUESTID_OSTYPE_NETBSD |
538 (uint64_t)__NetBSD_Version__ << MSR_HV_GUESTID_VERSION_SHIFT); 538 (uint64_t)__NetBSD_Version__ << MSR_HV_GUESTID_VERSION_SHIFT);
539 539
540 if (hyperv_features & CPUID_HV_MSR_TIME_REFCNT) { 540 if (hyperv_features & CPUID_HV_MSR_TIME_REFCNT) {
541 /* Register Hyper-V timecounter */ 541 /* Register Hyper-V timecounter */
542 tc_init(&hyperv_timecounter); 542 tc_init(&hyperv_timecounter);
543 543
544 /* 544 /*
545 * Install 64 bits timecounter method for other modules to use. 545 * Install 64 bits timecounter method for other modules to use.
546 */ 546 */
547 hyperv_tc64 = hyperv_tc64_rdmsr; 547 hyperv_tc64 = hyperv_tc64_rdmsr;
548#ifdef __amd64__ 548#ifdef __amd64__
549 hyperv_tsc_tcinit(); 549 hyperv_tsc_tcinit();
550#endif 550#endif
551 551
552 /* delay with timecounter */ 552 /* delay with timecounter */
553 x86_delay = delay_func = delay_tc; 553 x86_delay = delay_func = delay_tc;
554 } 554 }
555 555
556#if NLAPIC > 0 556#if NLAPIC > 0
557 if ((hyperv_features & CPUID_HV_MSR_TIME_FREQ) && 557 if ((hyperv_features & CPUID_HV_MSR_TIME_FREQ) &&
558 (hyperv_features3 & CPUID3_HV_TIME_FREQ)) 558 (hyperv_features3 & CPUID3_HV_TIME_FREQ))
559 lapic_per_second = rdmsr(MSR_HV_APIC_FREQUENCY); 559 lapic_per_second = rdmsr(MSR_HV_APIC_FREQUENCY);
560#endif 560#endif
561 561
562 return hyperv_init_hypercall(); 562 return hyperv_init_hypercall();
563} 563}
564 564
565static bool 565static bool
566hyperv_is_initialized(void) 566hyperv_is_initialized(void)
567{ 567{
568 uint64_t msr; 568 uint64_t msr;
569 569
570 if (vm_guest != VM_GUEST_HV) 570 if (vm_guest != VM_GUEST_HV)
571 return false; 571 return false;
572 if (rdmsr_safe(MSR_HV_HYPERCALL, &msr) == EFAULT) 572 if (rdmsr_safe(MSR_HV_HYPERCALL, &msr) == EFAULT)
573 return false; 573 return false;
574 return (msr & MSR_HV_HYPERCALL_ENABLE) ? true : false; 574 return (msr & MSR_HV_HYPERCALL_ENABLE) ? true : false;
575} 575}
576 576
577static int 577static int
578hyperv_match(device_t parent, cfdata_t cf, void *aux) 578hyperv_match(device_t parent, cfdata_t cf, void *aux)
579{ 579{
580 struct cpufeature_attach_args *cfaa = aux; 580 struct cpufeature_attach_args *cfaa = aux;
581 struct cpu_info *ci = cfaa->ci; 581 struct cpu_info *ci = cfaa->ci;
582 582
583 if (strcmp(cfaa->name, "vm") != 0) 583 if (strcmp(cfaa->name, "vm") != 0)
584 return 0; 584 return 0;
585 if ((ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY)) == 0) 585 if ((ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY)) == 0)
586 return 0; 586 return 0;
587 if (vm_guest != VM_GUEST_HV) 587 if (vm_guest != VM_GUEST_HV)
588 return 0; 588 return 0;
589 589
590 return 1; 590 return 1;
591} 591}
592 592
593static void 593static void
594hyperv_attach(device_t parent, device_t self, void *aux) 594hyperv_attach(device_t parent, device_t self, void *aux)
595{ 595{
596 struct hyperv_softc *sc = device_private(self); 596 struct hyperv_softc *sc = device_private(self);
597 597
598 sc->sc_dev = self; 598 sc->sc_dev = self;
599 599
600 aprint_naive("\n"); 600 aprint_naive("\n");
601 aprint_normal(": Hyper-V\n"); 601 aprint_normal(": Hyper-V\n");
602 602
603 if (!hyperv_is_initialized()) { 603 if (!hyperv_is_initialized()) {
604 if (rdmsr(MSR_HV_GUEST_OS_ID) == 0) { 604 if (rdmsr(MSR_HV_GUEST_OS_ID) == 0) {
605 if (!hyperv_init()) { 605 if (!hyperv_init()) {
606 aprint_error_dev(self, "initialize failed\n"); 606 aprint_error_dev(self, "initialize failed\n");
607 return; 607 return;
608 } 608 }
609 } 609 }
610 hyperv_init_hypercall(); 610 hyperv_init_hypercall();
611 } 611 }
612 612
613 (void) pmf_device_register(self, NULL, NULL); 613 (void) pmf_device_register(self, NULL, NULL);
614 614
615 (void) hyperv_sysctl_setup_root(sc); 615 (void) hyperv_sysctl_setup_root(sc);
616} 616}
617 617
618static int 618static int
619hyperv_detach(device_t self, int flags) 619hyperv_detach(device_t self, int flags)
620{ 620{
621 struct hyperv_softc *sc = device_private(self); 621 struct hyperv_softc *sc = device_private(self);
622 uint64_t hc; 622 uint64_t hc;
623 623
624 /* Disable Hypercall */ 624 /* Disable Hypercall */
625 hc = rdmsr(MSR_HV_HYPERCALL); 625 hc = rdmsr(MSR_HV_HYPERCALL);
626 wrmsr(MSR_HV_HYPERCALL, hc & MSR_HV_HYPERCALL_RSVD_MASK); 626 wrmsr(MSR_HV_HYPERCALL, hc & MSR_HV_HYPERCALL_RSVD_MASK);
627 hyperv_hypercall_memfree(); 627 hyperv_hypercall_memfree();
628 628
629 if (hyperv_features & CPUID_HV_MSR_TIME_REFCNT) 629 if (hyperv_features & CPUID_HV_MSR_TIME_REFCNT)
630 tc_detach(&hyperv_timecounter); 630 tc_detach(&hyperv_timecounter);
631 631
632 wrmsr(MSR_HV_GUEST_OS_ID, 0); 632 wrmsr(MSR_HV_GUEST_OS_ID, 0);
633 633
634 pmf_device_deregister(self); 634 pmf_device_deregister(self);
635 635
636 if (sc->sc_log != NULL) { 636 if (sc->sc_log != NULL) {
637 sysctl_teardown(&sc->sc_log); 637 sysctl_teardown(&sc->sc_log);
638 sc->sc_log = NULL; 638 sc->sc_log = NULL;
639 } 639 }
640 640
641 return 0; 641 return 0;
642} 642}
643 643
644void 644void
645hyperv_intr(void) 645hyperv_intr(void)
646{ 646{
647 struct cpu_info *ci = curcpu(); 647 struct cpu_info *ci = curcpu();
648 648
649 (*hyperv_event_proc.func)(hyperv_event_proc.arg, ci); 649 (*hyperv_event_proc.func)(hyperv_event_proc.arg, ci);
650 (*hyperv_message_proc.func)(hyperv_message_proc.arg, ci); 650 (*hyperv_message_proc.func)(hyperv_message_proc.arg, ci);
651} 651}
652 652
653void hyperv_hypercall_intr(struct trapframe *); 653void hyperv_hypercall_intr(struct trapframe *);
654void 654void
655hyperv_hypercall_intr(struct trapframe *frame __unused) 655hyperv_hypercall_intr(struct trapframe *frame __unused)
656{ 656{
657 struct cpu_info *ci = curcpu(); 657 struct cpu_info *ci = curcpu();
658 658
659 ci->ci_isources[LIR_HV]->is_evcnt.ev_count++; 659 ci->ci_isources[LIR_HV]->is_evcnt.ev_count++;
660 660
661 hyperv_intr(); 661 hyperv_intr();
662} 662}
663 663
664static void 664static void
665hyperv_proc_dummy(void *arg __unused, struct cpu_info *ci __unused) 665hyperv_proc_dummy(void *arg __unused, struct cpu_info *ci __unused)
666{ 666{
667} 667}
668 668
669void 669void
670hyperv_set_event_proc(void (*func)(void *, struct cpu_info *), void *arg) 670hyperv_set_event_proc(void (*func)(void *, struct cpu_info *), void *arg)
671{ 671{
672 672
673 hyperv_event_proc.func = func; 673 hyperv_event_proc.func = func;
674 hyperv_event_proc.arg = arg; 674 hyperv_event_proc.arg = arg;
675} 675}
676 676
677void 677void
678hyperv_set_message_proc(void (*func)(void *, struct cpu_info *), void *arg) 678hyperv_set_message_proc(void (*func)(void *, struct cpu_info *), void *arg)
679{ 679{
680 680
681 hyperv_message_proc.func = func; 681 hyperv_message_proc.func = func;
682 hyperv_message_proc.arg = arg; 682 hyperv_message_proc.arg = arg;
683} 683}
684 684
685static void 685static void
686hyperv_hypercall_memfree(void) 686hyperv_hypercall_memfree(void)
687{ 687{
688 688
689 hyperv_hypercall_ctx.hc_addr = NULL; 689 hyperv_hypercall_ctx.hc_addr = NULL;
690} 690}
691 691
692static bool 692static bool
693hyperv_init_hypercall(void) 693hyperv_init_hypercall(void)
694{ 694{
695 uint64_t hc, hc_orig; 695 uint64_t hc, hc_orig;
696 696
697 hyperv_hypercall_ctx.hc_addr = hyperv_hypercall_page; 697 hyperv_hypercall_ctx.hc_addr = hyperv_hypercall_page;
698 hyperv_hypercall_ctx.hc_paddr = vtophys((vaddr_t)hyperv_hypercall_page); 698 hyperv_hypercall_ctx.hc_paddr = vtophys((vaddr_t)hyperv_hypercall_page);
699 KASSERT(hyperv_hypercall_ctx.hc_paddr != 0); 699 KASSERT(hyperv_hypercall_ctx.hc_paddr != 0);
700 700
701 /* Get the 'reserved' bits, which requires preservation. */ 701 /* Get the 'reserved' bits, which requires preservation. */
702 hc_orig = rdmsr(MSR_HV_HYPERCALL); 702 hc_orig = rdmsr(MSR_HV_HYPERCALL);
703 703
704 /* 704 /*
705 * Setup the Hypercall page. 705 * Setup the Hypercall page.
706 * 706 *
707 * NOTE: 'reserved' bits MUST be preserved. 707 * NOTE: 'reserved' bits MUST be preserved.
708 */ 708 */
709 hc = (atop(hyperv_hypercall_ctx.hc_paddr) << MSR_HV_HYPERCALL_PGSHIFT) | 709 hc = (atop(hyperv_hypercall_ctx.hc_paddr) << MSR_HV_HYPERCALL_PGSHIFT) |
710 (hc_orig & MSR_HV_HYPERCALL_RSVD_MASK) | 710 (hc_orig & MSR_HV_HYPERCALL_RSVD_MASK) |
711 MSR_HV_HYPERCALL_ENABLE; 711 MSR_HV_HYPERCALL_ENABLE;
712 wrmsr(MSR_HV_HYPERCALL, hc); 712 wrmsr(MSR_HV_HYPERCALL, hc);
713 713
714 /* 714 /*
715 * Confirm that Hypercall page did get setup. 715 * Confirm that Hypercall page did get setup.
716 */ 716 */
717 hc = rdmsr(MSR_HV_HYPERCALL); 717 hc = rdmsr(MSR_HV_HYPERCALL);
718 if (!(hc & MSR_HV_HYPERCALL_ENABLE)) { 718 if (!(hc & MSR_HV_HYPERCALL_ENABLE)) {
719 aprint_error("Hyper-V: Hypercall setup failed\n"); 719 aprint_error("Hyper-V: Hypercall setup failed\n");
720 hyperv_hypercall_memfree(); 720 hyperv_hypercall_memfree();
721 /* Can't perform any Hyper-V specific actions */ 721 /* Can't perform any Hyper-V specific actions */
722 vm_guest = VM_GUEST_VM; 722 vm_guest = VM_GUEST_VM;
723 return false; 723 return false;
724 } 724 }
725 725
726 return true; 726 return true;
727} 727}
728 728
729int 729int
730hyperv_hypercall_enabled(void) 730hyperv_hypercall_enabled(void)
731{ 731{
732 732
733 return hyperv_is_initialized(); 733 return hyperv_is_initialized();
734} 734}
735 735
736int 736int
737hyperv_synic_supported(void) 737hyperv_synic_supported(void)
738{ 738{
739 739
740 return (hyperv_features & CPUID_HV_MSR_SYNIC) ? 1 : 0; 740 return (hyperv_features & CPUID_HV_MSR_SYNIC) ? 1 : 0;
741} 741}
742 742
743int 743int
744hyperv_is_gen1(void) 744hyperv_is_gen1(void)
745{ 745{
746 746
747 return !efi_probe(); 747 return !efi_probe();
748} 748}
749 749
750void 750void
751hyperv_send_eom(void) 751hyperv_send_eom(void)
752{ 752{
753 753
754 wrmsr(MSR_HV_EOM, 0); 754 wrmsr(MSR_HV_EOM, 0);
755} 755}
756 756
757void 757void
758vmbus_init_interrupts_md(struct vmbus_softc *sc) 758vmbus_init_interrupts_md(struct vmbus_softc *sc)
759{ 759{
760 extern void Xintr_hyperv_hypercall(void); 760 extern void Xintr_hyperv_hypercall(void);
761 struct vmbus_percpu_data *pd; 761 struct vmbus_percpu_data *pd;
762 struct hyperv_percpu_data *hv_pd; 762 struct hyperv_percpu_data *hv_pd;
763 struct idt_vec *iv = &(cpu_info_primary.ci_idtvec); 763 struct idt_vec *iv = &(cpu_info_primary.ci_idtvec);
764 cpuid_t cid; 764 cpuid_t cid;
765 765
766 if (idt_vec_is_pcpu()) 766 if (idt_vec_is_pcpu())
767 return; 767 return;
768 /* 768 /*
769 * All Hyper-V ISR required resources are setup, now let's find a 769 * All Hyper-V ISR required resources are setup, now let's find a
770 * free IDT vector for Hyper-V ISR and set it up. 770 * free IDT vector for Hyper-V ISR and set it up.
771 */ 771 */
772 iv = &(cpu_info_primary.ci_idtvec); 772 iv = &(cpu_info_primary.ci_idtvec);
773 cid = cpu_index(&cpu_info_primary); 773 cid = cpu_index(&cpu_info_primary);
774 pd = &sc->sc_percpu[cid]; 774 pd = &sc->sc_percpu[cid];
775 775
776 hv_pd = kmem_zalloc(sizeof(*hv_pd), KM_SLEEP); 776 hv_pd = kmem_zalloc(sizeof(*hv_pd), KM_SLEEP);
777 mutex_enter(&cpu_lock); 777 mutex_enter(&cpu_lock);
778 hv_pd->pd_idtvec = idt_vec_alloc(iv, 778 hv_pd->pd_idtvec = idt_vec_alloc(iv,
779 APIC_LEVEL(NIPL), IDT_INTR_HIGH); 779 APIC_LEVEL(NIPL), IDT_INTR_HIGH);
780 mutex_exit(&cpu_lock); 780 mutex_exit(&cpu_lock);
781 KASSERT(hv_pd->pd_idtvec > 0); 781 KASSERT(hv_pd->pd_idtvec > 0);
782 idt_vec_set(iv, hv_pd->pd_idtvec, Xintr_hyperv_hypercall); 782 idt_vec_set(iv, hv_pd->pd_idtvec, Xintr_hyperv_hypercall);
783 pd->md_cookie = (void *)hv_pd; 783 pd->md_cookie = (void *)hv_pd;
784} 784}
785 785
786void 786void
787vmbus_deinit_interrupts_md(struct vmbus_softc *sc) 787vmbus_deinit_interrupts_md(struct vmbus_softc *sc)
788{ 788{
789 struct vmbus_percpu_data *pd; 789 struct vmbus_percpu_data *pd;
790 struct hyperv_percpu_data *hv_pd; 790 struct hyperv_percpu_data *hv_pd;
791 struct idt_vec *iv; 791 struct idt_vec *iv;
792 cpuid_t cid; 792 cpuid_t cid;
793 793
794 if (idt_vec_is_pcpu()) 794 if (idt_vec_is_pcpu())
795 return; 795 return;
796 796
797 iv = &(cpu_info_primary.ci_idtvec); 797 iv = &(cpu_info_primary.ci_idtvec);
798 cid = cpu_index(&cpu_info_primary); 798 cid = cpu_index(&cpu_info_primary);
799 pd = &sc->sc_percpu[cid]; 799 pd = &sc->sc_percpu[cid];
800 hv_pd = pd->md_cookie; 800 hv_pd = pd->md_cookie;
801 801
802 if (hv_pd->pd_idtvec > 0) 802 if (hv_pd->pd_idtvec > 0)
803 idt_vec_free(iv, hv_pd->pd_idtvec); 803 idt_vec_free(iv, hv_pd->pd_idtvec);
804 804
805 pd->md_cookie = NULL; 805 pd->md_cookie = NULL;
806 kmem_free(hv_pd, sizeof(*hv_pd)); 806 kmem_free(hv_pd, sizeof(*hv_pd));
807} 807}
808 808
809void 809void
810vmbus_init_synic_md(struct vmbus_softc *sc, cpuid_t cpu) 810vmbus_init_synic_md(struct vmbus_softc *sc, cpuid_t cpu)
811{ 811{
812 extern void Xintr_hyperv_hypercall(void); 812 extern void Xintr_hyperv_hypercall(void);
813 struct vmbus_percpu_data *pd, *pd0; 813 struct vmbus_percpu_data *pd, *pd0;
814 struct hyperv_percpu_data *hv_pd; 814 struct hyperv_percpu_data *hv_pd;
815 struct cpu_info *ci; 815 struct cpu_info *ci;
816 struct idt_vec *iv; 816 struct idt_vec *iv;
817 uint64_t val, orig; 817 uint64_t val, orig;
818 uint32_t sint; 818 uint32_t sint;
819 int hyperv_idtvec; 819 int hyperv_idtvec;
820 820
821 pd = &sc->sc_percpu[cpu]; 821 pd = &sc->sc_percpu[cpu];
822 822
823 hv_pd = kmem_alloc(sizeof(*hv_pd), KM_SLEEP); 823 hv_pd = kmem_alloc(sizeof(*hv_pd), KM_SLEEP);
824 pd->md_cookie = (void *)hv_pd; 824 pd->md_cookie = (void *)hv_pd;
825 825
826 /* Allocate IDT vector for ISR and set it up. */ 826 /* Allocate IDT vector for ISR and set it up. */
827 if (idt_vec_is_pcpu()) { 827 if (idt_vec_is_pcpu()) {
828 ci = curcpu(); 828 ci = curcpu();
829 iv = &ci->ci_idtvec; 829 iv = &ci->ci_idtvec;
830 830
831 mutex_enter(&cpu_lock); 831 mutex_enter(&cpu_lock);
832 hyperv_idtvec = idt_vec_alloc(iv, APIC_LEVEL(NIPL), IDT_INTR_HIGH); 832 hyperv_idtvec = idt_vec_alloc(iv, APIC_LEVEL(NIPL), IDT_INTR_HIGH);
833 mutex_exit(&cpu_lock); 833 mutex_exit(&cpu_lock);
834 KASSERT(hyperv_idtvec > 0); 834 KASSERT(hyperv_idtvec > 0);
835 idt_vec_set(iv, hyperv_idtvec, Xintr_hyperv_hypercall); 835 idt_vec_set(iv, hyperv_idtvec, Xintr_hyperv_hypercall);
836 836
837 hv_pd = kmem_alloc(sizeof(*hv_pd), KM_SLEEP); 837 hv_pd = kmem_alloc(sizeof(*hv_pd), KM_SLEEP);
838 hv_pd->pd_idtvec = hyperv_idtvec; 838 hv_pd->pd_idtvec = hyperv_idtvec;
839 pd->md_cookie = hv_pd; 839 pd->md_cookie = hv_pd;
840 } else { 840 } else {
841 pd0 = &sc->sc_percpu[cpu_index(&cpu_info_primary)]; 841 pd0 = &sc->sc_percpu[cpu_index(&cpu_info_primary)];
842 hv_pd = pd0->md_cookie; 842 hv_pd = pd0->md_cookie;
843 hyperv_idtvec = hv_pd->pd_idtvec; 843 hyperv_idtvec = hv_pd->pd_idtvec;
844 } 844 }
845 845
846 /* 846 /*
847 * Setup the SynIC message. 847 * Setup the SynIC message.
848 */ 848 */
849 orig = rdmsr(MSR_HV_SIMP); 849 orig = rdmsr(MSR_HV_SIMP);
850 val = MSR_HV_SIMP_ENABLE | (orig & MSR_HV_SIMP_RSVD_MASK) | 850 val = MSR_HV_SIMP_ENABLE | (orig & MSR_HV_SIMP_RSVD_MASK) |
851 (atop(hyperv_dma_get_paddr(&pd->simp_dma)) << MSR_HV_SIMP_PGSHIFT); 851 (atop(hyperv_dma_get_paddr(&pd->simp_dma)) << MSR_HV_SIMP_PGSHIFT);
852 wrmsr(MSR_HV_SIMP, val); 852 wrmsr(MSR_HV_SIMP, val);
853 853
854 /* 854 /*
855 * Setup the SynIC event flags. 855 * Setup the SynIC event flags.
856 */ 856 */
857 orig = rdmsr(MSR_HV_SIEFP); 857 orig = rdmsr(MSR_HV_SIEFP);
858 val = MSR_HV_SIEFP_ENABLE | (orig & MSR_HV_SIEFP_RSVD_MASK) | 858 val = MSR_HV_SIEFP_ENABLE | (orig & MSR_HV_SIEFP_RSVD_MASK) |
859 (atop(hyperv_dma_get_paddr(&pd->siep_dma)) << MSR_HV_SIEFP_PGSHIFT); 859 (atop(hyperv_dma_get_paddr(&pd->siep_dma)) << MSR_HV_SIEFP_PGSHIFT);
860 wrmsr(MSR_HV_SIEFP, val); 860 wrmsr(MSR_HV_SIEFP, val);
861 861
862 /* 862 /*
863 * Configure and unmask SINT for message and event flags. 863 * Configure and unmask SINT for message and event flags.
864 */ 864 */
865 sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE; 865 sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE;
866 orig = rdmsr(sint); 866 orig = rdmsr(sint);
867 val = hyperv_idtvec | MSR_HV_SINT_AUTOEOI | 867 val = hyperv_idtvec | MSR_HV_SINT_AUTOEOI |
868 (orig & MSR_HV_SINT_RSVD_MASK); 868 (orig & MSR_HV_SINT_RSVD_MASK);
869 wrmsr(sint, val); 869 wrmsr(sint, val);
870 870
871 /* 871 /*
872 * Configure and unmask SINT for timer. 872 * Configure and unmask SINT for timer.
873 */ 873 */
874 sint = MSR_HV_SINT0 + VMBUS_SINT_TIMER; 874 sint = MSR_HV_SINT0 + VMBUS_SINT_TIMER;
875 orig = rdmsr(sint); 875 orig = rdmsr(sint);
876 val = hyperv_idtvec | MSR_HV_SINT_AUTOEOI | 876 val = hyperv_idtvec | MSR_HV_SINT_AUTOEOI |
877 (orig & MSR_HV_SINT_RSVD_MASK); 877 (orig & MSR_HV_SINT_RSVD_MASK);
878 wrmsr(sint, val); 878 wrmsr(sint, val);
879 879
880 /* 880 /*
881 * All done; enable SynIC. 881 * All done; enable SynIC.
882 */ 882 */
883 orig = rdmsr(MSR_HV_SCONTROL); 883 orig = rdmsr(MSR_HV_SCONTROL);
884 val = MSR_HV_SCTRL_ENABLE | (orig & MSR_HV_SCTRL_RSVD_MASK); 884 val = MSR_HV_SCTRL_ENABLE | (orig & MSR_HV_SCTRL_RSVD_MASK);
885 wrmsr(MSR_HV_SCONTROL, val); 885 wrmsr(MSR_HV_SCONTROL, val);
886} 886}
887 887
888void 888void
889vmbus_deinit_synic_md(struct vmbus_softc *sc, cpuid_t cpu) 889vmbus_deinit_synic_md(struct vmbus_softc *sc, cpuid_t cpu)
890{ 890{
891 struct vmbus_percpu_data *pd; 891 struct vmbus_percpu_data *pd;
892 struct hyperv_percpu_data *hv_pd; 892 struct hyperv_percpu_data *hv_pd;
893 struct cpu_info *ci; 893 struct cpu_info *ci;
894 struct idt_vec *iv; 894 struct idt_vec *iv;
895 uint64_t orig; 895 uint64_t orig;
896 uint32_t sint; 896 uint32_t sint;
897 897
898 /* 898 /*
899 * Disable SynIC. 899 * Disable SynIC.
900 */ 900 */
901 orig = rdmsr(MSR_HV_SCONTROL); 901 orig = rdmsr(MSR_HV_SCONTROL);
902 wrmsr(MSR_HV_SCONTROL, (orig & MSR_HV_SCTRL_RSVD_MASK)); 902 wrmsr(MSR_HV_SCONTROL, (orig & MSR_HV_SCTRL_RSVD_MASK));
903 903
904 /* 904 /*
905 * Mask message and event flags SINT. 905 * Mask message and event flags SINT.
906 */ 906 */
907 sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE; 907 sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE;
908 orig = rdmsr(sint); 908 orig = rdmsr(sint);
909 wrmsr(sint, orig | MSR_HV_SINT_MASKED); 909 wrmsr(sint, orig | MSR_HV_SINT_MASKED);
910 910
911 /* 911 /*
912 * Mask timer SINT. 912 * Mask timer SINT.
913 */ 913 */
914 sint = MSR_HV_SINT0 + VMBUS_SINT_TIMER; 914 sint = MSR_HV_SINT0 + VMBUS_SINT_TIMER;
915 orig = rdmsr(sint); 915 orig = rdmsr(sint);
916 wrmsr(sint, orig | MSR_HV_SINT_MASKED); 916 wrmsr(sint, orig | MSR_HV_SINT_MASKED);
917 917
918 /* 918 /*
919 * Teardown SynIC message. 919 * Teardown SynIC message.
920 */ 920 */
921 orig = rdmsr(MSR_HV_SIMP); 921 orig = rdmsr(MSR_HV_SIMP);
922 wrmsr(MSR_HV_SIMP, (orig & MSR_HV_SIMP_RSVD_MASK)); 922 wrmsr(MSR_HV_SIMP, (orig & MSR_HV_SIMP_RSVD_MASK));
923 923
924 /* 924 /*
925 * Teardown SynIC event flags. 925 * Teardown SynIC event flags.
926 */ 926 */
927 orig = rdmsr(MSR_HV_SIEFP); 927 orig = rdmsr(MSR_HV_SIEFP);
928 wrmsr(MSR_HV_SIEFP, (orig & MSR_HV_SIEFP_RSVD_MASK)); 928 wrmsr(MSR_HV_SIEFP, (orig & MSR_HV_SIEFP_RSVD_MASK));
929 929
930 /* 930 /*
931 * Free IDT vector 931 * Free IDT vector
932 */ 932 */
933 if (idt_vec_is_pcpu()) { 933 if (idt_vec_is_pcpu()) {
934 ci = curcpu(); 934 ci = curcpu();
935 iv = &ci->ci_idtvec; 935 iv = &ci->ci_idtvec;
936 pd = &sc->sc_percpu[cpu_index(ci)]; 936 pd = &sc->sc_percpu[cpu_index(ci)];
937 hv_pd = pd->md_cookie; 937 hv_pd = pd->md_cookie;
938 938
939 if (hv_pd->pd_idtvec > 0) 939 if (hv_pd->pd_idtvec > 0)
940 idt_vec_free(iv, hv_pd->pd_idtvec); 940 idt_vec_free(iv, hv_pd->pd_idtvec);
941 941
942 pd->md_cookie = NULL; 942 pd->md_cookie = NULL;
943 kmem_free(hv_pd, sizeof(*hv_pd)); 943 kmem_free(hv_pd, sizeof(*hv_pd));
944 } 944 }
945} 945}
946 946
947static int 947static int
948hyperv_sysctl_setup(struct hyperv_softc *sc, 948hyperv_sysctl_setup(struct hyperv_softc *sc,
949 const struct sysctlnode *hyperv_node) 949 const struct sysctlnode *hyperv_node)
950{ 950{
951 int error; 951 int error;
952 952
953 error = sysctl_createv(&sc->sc_log, 0, &hyperv_node, NULL, 953 error = sysctl_createv(&sc->sc_log, 0, &hyperv_node, NULL,
954 CTLFLAG_READONLY, CTLTYPE_STRING, "version", NULL, 954 CTLFLAG_READONLY, CTLTYPE_STRING, "version", NULL,
955 NULL, 0, hyperv_version_str, 955 NULL, 0, hyperv_version_str,
956 0, CTL_CREATE, CTL_EOL); 956 0, CTL_CREATE, CTL_EOL);
957 if (error) 957 if (error)
958 return error; 958 return error;
959 959
960 error = sysctl_createv(&sc->sc_log, 0, &hyperv_node, NULL, 960 error = sysctl_createv(&sc->sc_log, 0, &hyperv_node, NULL,
961 CTLFLAG_READONLY, CTLTYPE_STRING, "features", NULL, 961 CTLFLAG_READONLY, CTLTYPE_STRING, "features", NULL,
962 NULL, 0, hyperv_features_str, 962 NULL, 0, hyperv_features_str,
963 0, CTL_CREATE, CTL_EOL); 963 0, CTL_CREATE, CTL_EOL);
964 if (error) 964 if (error)
965 return error; 965 return error;
966 966
967 error = sysctl_createv(&sc->sc_log, 0, &hyperv_node, NULL, 967 error = sysctl_createv(&sc->sc_log, 0, &hyperv_node, NULL,
968 CTLFLAG_READONLY, CTLTYPE_STRING, "pm_features", NULL, 968 CTLFLAG_READONLY, CTLTYPE_STRING, "pm_features", NULL,
969 NULL, 0, hyperv_pm_features_str, 969 NULL, 0, hyperv_pm_features_str,
970 0, CTL_CREATE, CTL_EOL); 970 0, CTL_CREATE, CTL_EOL);
971 if (error) 971 if (error)
972 return error; 972 return error;
973 973
974 error = sysctl_createv(&sc->sc_log, 0, &hyperv_node, NULL, 974 error = sysctl_createv(&sc->sc_log, 0, &hyperv_node, NULL,
975 CTLFLAG_READONLY, CTLTYPE_STRING, "features3", NULL, 975 CTLFLAG_READONLY, CTLTYPE_STRING, "features3", NULL,
976 NULL, 0, hyperv_features3_str, 976 NULL, 0, hyperv_features3_str,
977 0, CTL_CREATE, CTL_EOL); 977 0, CTL_CREATE, CTL_EOL);
978 if (error) 978 if (error)
979 return error; 979 return error;
980 980
981 return 0; 981 return 0;
982} 982}
983 983
984static int 984static int
985hyperv_sysctl_setup_root(struct hyperv_softc *sc) 985hyperv_sysctl_setup_root(struct hyperv_softc *sc)
986{ 986{
987 const struct sysctlnode *machdep_node, *hyperv_node; 987 const struct sysctlnode *machdep_node, *hyperv_node;
988 int error; 988 int error;
989 989
990 error = sysctl_createv(&sc->sc_log, 0, NULL, &machdep_node, 990 error = sysctl_createv(&sc->sc_log, 0, NULL, &machdep_node,
991 CTLFLAG_PERMANENT, CTLTYPE_NODE, "machdep", NULL, 991 CTLFLAG_PERMANENT, CTLTYPE_NODE, "machdep", NULL,
992 NULL, 0, NULL, 0, CTL_MACHDEP, CTL_EOL); 992 NULL, 0, NULL, 0, CTL_MACHDEP, CTL_EOL);
993 if (error) 993 if (error)
994 goto fail; 994 goto fail;
995 995
996 error = sysctl_createv(&sc->sc_log, 0, &machdep_node, &hyperv_node, 996 error = sysctl_createv(&sc->sc_log, 0, &machdep_node, &hyperv_node,
997 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hyperv", NULL, 997 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hyperv", NULL,
998 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 998 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
999 if (error) 999 if (error)
1000 goto fail; 1000 goto fail;
1001 1001
1002 error = hyperv_sysctl_setup(sc, hyperv_node); 1002 error = hyperv_sysctl_setup(sc, hyperv_node);
1003 if (error) 1003 if (error)
1004 goto fail; 1004 goto fail;
1005 1005
1006 return 0; 1006 return 0;
1007 1007
1008fail: 1008fail:
1009 sysctl_teardown(&sc->sc_log); 1009 sysctl_teardown(&sc->sc_log);
1010 sc->sc_log = NULL; 1010 sc->sc_log = NULL;
1011 return error; 1011 return error;
1012} 1012}
1013 1013
1014MODULE(MODULE_CLASS_DRIVER, hyperv, NULL); 1014MODULE(MODULE_CLASS_DRIVER, hyperv, NULL);
1015 1015
1016#ifdef _MODULE 1016#ifdef _MODULE
1017#include "ioconf.c" 1017#include "ioconf.c"
1018#endif 1018#endif
1019 1019
1020static int 1020static int
1021hyperv_modcmd(modcmd_t cmd, void *aux) 1021hyperv_modcmd(modcmd_t cmd, void *aux)
1022{ 1022{
1023 int rv = 0; 1023 int rv = 0;
1024 1024
1025 switch (cmd) { 1025 switch (cmd) {
1026 case MODULE_CMD_INIT: 1026 case MODULE_CMD_INIT:
1027#ifdef _MODULE 1027#ifdef _MODULE
1028 rv = config_init_component(cfdriver_ioconf_hyperv, 1028 rv = config_init_component(cfdriver_ioconf_hyperv,
1029 cfattach_ioconf_hyperv, cfdata_ioconf_hyperv); 1029 cfattach_ioconf_hyperv, cfdata_ioconf_hyperv);
1030#endif 1030#endif
1031 hyperv_init(); 1031 hyperv_init();
1032 break; 1032 break;
1033 1033
1034 case MODULE_CMD_FINI: 1034 case MODULE_CMD_FINI:
1035#ifdef _MODULE 1035#ifdef _MODULE
1036 rv = config_fini_component(cfdriver_ioconf_hyperv, 1036 rv = config_fini_component(cfdriver_ioconf_hyperv,
1037 cfattach_ioconf_hyperv, cfdata_ioconf_hyperv); 1037 cfattach_ioconf_hyperv, cfdata_ioconf_hyperv);
1038#endif 1038#endif
1039 break; 1039 break;
1040 1040
1041 default: 1041 default:
1042 rv = ENOTTY; 1042 rv = ENOTTY;
1043 break; 1043 break;
1044 } 1044 }
1045 1045
1046 return rv; 1046 return rv;
1047} 1047}
1048 1048
1049#if NVMBUS > 0 1049#if NVMBUS > 0
1050/* 1050/*
1051 * genfb at vmbus 1051 * genfb at vmbus
1052 */ 1052 */
1053static struct genfb_pmf_callback pmf_cb; 1053static struct genfb_pmf_callback pmf_cb;
1054static struct genfb_mode_callback mode_cb; 1054static struct genfb_mode_callback mode_cb;
1055 1055
1056static bool 1056static bool
1057x86_genfb_setmode(struct genfb_softc *sc, int newmode) 1057x86_genfb_setmode(struct genfb_softc *sc, int newmode)
1058{ 1058{
1059#if NGENFB > 0 
1060 switch (newmode) { 
1061 case WSDISPLAYIO_MODE_EMUL: 
1062 x86_genfb_mtrr_init(sc->sc_fboffset, 
1063 sc->sc_height * sc->sc_stride); 
1064 break; 
1065 } 
1066#endif 
1067 return true; 1059 return true;
1068} 1060}
1069 1061
1070static bool 1062static bool
1071x86_genfb_suspend(device_t dev, const pmf_qual_t *qual) 1063x86_genfb_suspend(device_t dev, const pmf_qual_t *qual)
1072{ 1064{
1073 return true; 1065 return true;
1074} 1066}
1075 1067
1076static bool 1068static bool
1077x86_genfb_resume(device_t dev, const pmf_qual_t *qual) 1069x86_genfb_resume(device_t dev, const pmf_qual_t *qual)
1078{ 1070{
1079#if NGENFB > 0 1071#if NGENFB > 0
1080 struct genfb_vmbus_softc *sc = device_private(dev); 1072 struct genfb_vmbus_softc *sc = device_private(dev);
1081 1073
1082 genfb_restore_palette(&sc->sc_gen); 1074 genfb_restore_palette(&sc->sc_gen);
1083#endif 1075#endif
1084 return true; 1076 return true;
1085} 1077}
1086 1078
1087static void 1079static void
1088populate_fbinfo(device_t dev, prop_dictionary_t dict) 1080populate_fbinfo(device_t dev, prop_dictionary_t dict)
1089{ 1081{
1090#if NWSDISPLAY > 0 && NGENFB > 0 1082#if NWSDISPLAY > 0 && NGENFB > 0
1091 extern struct vcons_screen x86_genfb_console_screen; 1083 extern struct vcons_screen x86_genfb_console_screen;
1092 struct rasops_info *ri = &x86_genfb_console_screen.scr_ri; 1084 struct rasops_info *ri = &x86_genfb_console_screen.scr_ri;
1093#endif 1085#endif
1094 const void *fbptr = lookup_bootinfo(BTINFO_FRAMEBUFFER); 1086 const void *fbptr = lookup_bootinfo(BTINFO_FRAMEBUFFER);
1095 struct btinfo_framebuffer fbinfo; 1087 struct btinfo_framebuffer fbinfo;
1096 1088
1097 if (fbptr == NULL) 1089 if (fbptr == NULL)
1098 return; 1090 return;
1099 1091
1100 memcpy(&fbinfo, fbptr, sizeof(fbinfo)); 1092 memcpy(&fbinfo, fbptr, sizeof(fbinfo));
1101 1093
1102 if (fbinfo.physaddr != 0) { 1094 if (fbinfo.physaddr != 0) {
1103 prop_dictionary_set_uint32(dict, "width", fbinfo.width); 1095 prop_dictionary_set_uint32(dict, "width", fbinfo.width);
1104 prop_dictionary_set_uint32(dict, "height", fbinfo.height); 1096 prop_dictionary_set_uint32(dict, "height", fbinfo.height);
1105 prop_dictionary_set_uint8(dict, "depth", fbinfo.depth); 1097 prop_dictionary_set_uint8(dict, "depth", fbinfo.depth);
1106 prop_dictionary_set_uint16(dict, "linebytes", fbinfo.stride); 1098 prop_dictionary_set_uint16(dict, "linebytes", fbinfo.stride);
1107 1099
1108 prop_dictionary_set_uint64(dict, "address", fbinfo.physaddr); 1100 prop_dictionary_set_uint64(dict, "address", fbinfo.physaddr);
1109#if NWSDISPLAY > 0 && NGENFB > 0 1101#if NWSDISPLAY > 0 && NGENFB > 0
1110 if (ri->ri_bits != NULL) { 1102 if (ri->ri_bits != NULL) {
1111 prop_dictionary_set_uint64(dict, "virtual_address", 1103 prop_dictionary_set_uint64(dict, "virtual_address",
1112 ri->ri_hwbits != NULL ? 1104 ri->ri_hwbits != NULL ?
1113 (vaddr_t)ri->ri_hworigbits : 1105 (vaddr_t)ri->ri_hworigbits :
1114 (vaddr_t)ri->ri_origbits); 1106 (vaddr_t)ri->ri_origbits);
1115 } 1107 }
1116#endif 1108#endif
1117 } 1109 }
1118#if notyet 1110#if notyet
1119 prop_dictionary_set_bool(dict, "splash", 1111 prop_dictionary_set_bool(dict, "splash",
1120 (fbinfo.flags & BI_FB_SPLASH) != 0); 1112 (fbinfo.flags & BI_FB_SPLASH) != 0);
1121#endif 1113#endif
1122#if 0 1114#if 0
1123 if (fbinfo.depth == 8) { 1115 if (fbinfo.depth == 8) {
1124 gfb_cb.gcc_cookie = NULL; 1116 gfb_cb.gcc_cookie = NULL;
1125 gfb_cb.gcc_set_mapreg = x86_genfb_set_mapreg; 1117 gfb_cb.gcc_set_mapreg = x86_genfb_set_mapreg;
1126 prop_dictionary_set_uint64(dict, "cmap_callback", 1118 prop_dictionary_set_uint64(dict, "cmap_callback",
1127 (uint64_t)(uintptr_t)&gfb_cb); 1119 (uint64_t)(uintptr_t)&gfb_cb);
1128 } 1120 }
1129#endif 1121#endif
1130 if (fbinfo.physaddr != 0) { 1122 if (fbinfo.physaddr != 0) {
1131 mode_cb.gmc_setmode = x86_genfb_setmode; 1123 mode_cb.gmc_setmode = x86_genfb_setmode;
1132 prop_dictionary_set_uint64(dict, "mode_callback", 1124 prop_dictionary_set_uint64(dict, "mode_callback",
1133 (uint64_t)(uintptr_t)&mode_cb); 1125 (uint64_t)(uintptr_t)&mode_cb);
1134 } 1126 }
1135 1127
1136#if NWSDISPLAY > 0 && NGENFB > 0 1128#if NWSDISPLAY > 0 && NGENFB > 0
1137 if (device_is_a(dev, "genfb")) { 1129 if (device_is_a(dev, "genfb")) {
1138 prop_dictionary_set_bool(dict, "enable_shadowfb", 1130 prop_dictionary_set_bool(dict, "enable_shadowfb",
1139 ri->ri_hwbits != NULL); 1131 ri->ri_hwbits != NULL);
1140 1132
1141 x86_genfb_set_console_dev(dev); 1133 x86_genfb_set_console_dev(dev);
1142#ifdef DDB 1134#ifdef DDB
1143 db_trap_callback = x86_genfb_ddb_trap_callback; 1135 db_trap_callback = x86_genfb_ddb_trap_callback;
1144#endif 1136#endif
1145 } 1137 }
1146#endif 1138#endif
1147} 1139}
1148#endif 1140#endif
1149 1141
1150device_t 1142device_t
1151device_hyperv_register(device_t dev, void *aux) 1143device_hyperv_register(device_t dev, void *aux)
1152{ 1144{
1153#if NVMBUS > 0 1145#if NVMBUS > 0
1154 device_t parent = device_parent(dev); 1146 device_t parent = device_parent(dev);
1155 1147
1156 if (parent && device_is_a(parent, "vmbus") && !x86_found_console) { 1148 if (parent && device_is_a(parent, "vmbus") && !x86_found_console) {
1157 struct vmbus_attach_args *aa = aux; 1149 struct vmbus_attach_args *aa = aux;
1158 1150
1159 if (memcmp(aa->aa_type, &hyperv_guid_video, 1151 if (memcmp(aa->aa_type, &hyperv_guid_video,
1160 sizeof(*aa->aa_type)) == 0) { 1152 sizeof(*aa->aa_type)) == 0) {
1161 prop_dictionary_t dict = device_properties(dev); 1153 prop_dictionary_t dict = device_properties(dev);
1162 1154
1163 /* Initialize genfb for serial console */ 1155 /* Initialize genfb for serial console */
1164 x86_genfb_init(); 1156 x86_genfb_init();
1165 1157
1166 /* 1158 /*
1167 * framebuffer drivers other than genfb can work 1159 * framebuffer drivers other than genfb can work
1168 * without the address property 1160 * without the address property
1169 */ 1161 */
1170 populate_fbinfo(dev, dict); 1162 populate_fbinfo(dev, dict);
1171 1163
1172#if 1 && NWSDISPLAY > 0 && NGENFB > 0 1164#if 1 && NWSDISPLAY > 0 && NGENFB > 0
1173 /* XXX */ 1165 /* XXX */
1174 if (device_is_a(dev, "genfb")) { 1166 if (device_is_a(dev, "genfb")) {
1175 prop_dictionary_set_bool(dict, "is_console", 1167 prop_dictionary_set_bool(dict, "is_console",
1176 genfb_is_console()); 1168 genfb_is_console());
1177 } else 1169 } else
1178#endif 1170#endif
1179 prop_dictionary_set_bool(dict, "is_console", true); 1171 prop_dictionary_set_bool(dict, "is_console", true);
1180 1172
1181 prop_dictionary_set_bool(dict, "clear-screen", false); 1173 prop_dictionary_set_bool(dict, "clear-screen", false);
1182#if NWSDISPLAY > 0 && NGENFB > 0 1174#if NWSDISPLAY > 0 && NGENFB > 0
1183 extern struct vcons_screen x86_genfb_console_screen; 1175 extern struct vcons_screen x86_genfb_console_screen;
1184 prop_dictionary_set_uint16(dict, "cursor-row", 1176 prop_dictionary_set_uint16(dict, "cursor-row",
1185 x86_genfb_console_screen.scr_ri.ri_crow); 1177 x86_genfb_console_screen.scr_ri.ri_crow);
1186#endif 1178#endif
1187 pmf_cb.gpc_suspend = x86_genfb_suspend; 1179 pmf_cb.gpc_suspend = x86_genfb_suspend;
1188 pmf_cb.gpc_resume = x86_genfb_resume; 1180 pmf_cb.gpc_resume = x86_genfb_resume;
1189 prop_dictionary_set_uint64(dict, "pmf_callback", 1181 prop_dictionary_set_uint64(dict, "pmf_callback",
1190 (uint64_t)(uintptr_t)&pmf_cb); 1182 (uint64_t)(uintptr_t)&pmf_cb);
1191 x86_found_console = true; 1183 x86_found_console = true;
1192 return NULL; 1184 return NULL;
1193 } 1185 }
1194 } 1186 }
1195#endif 1187#endif
1196 return NULL; 1188 return NULL;
1197} 1189}