Sun Aug 28 06:04:18 2011 UTC ()
Add some code for grovelling in the PCI configuration space for all
of the memory & I/O space reserved by the PCI BIOS for PCI devices
(including bridges) and recording that information for later use.

The code takes between 13k and 50k (depends on the architecture and,
bizarrely, the kernel configuration) so I am going to move it from
pci_machdep.c into its own module on Monday.


(dyoung)
diff -r1.7 -r1.8 src/sys/arch/x86/include/pci_machdep_common.h
diff -r1.47 -r1.48 src/sys/arch/x86/pci/pci_machdep.c

cvs diff -r1.7 -r1.8 src/sys/arch/x86/include/pci_machdep_common.h (switch to unified diff)

--- src/sys/arch/x86/include/pci_machdep_common.h 2011/08/01 11:08:03 1.7
+++ src/sys/arch/x86/include/pci_machdep_common.h 2011/08/28 06:04:17 1.8
@@ -1,146 +1,150 @@ @@ -1,146 +1,150 @@
1/* $NetBSD: pci_machdep_common.h,v 1.7 2011/08/01 11:08:03 drochner Exp $ */ 1/* $NetBSD: pci_machdep_common.h,v 1.8 2011/08/28 06:04:17 dyoung Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved. 4 * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
5 * Copyright (c) 1994 Charles M. Hannum. All rights reserved. 5 * Copyright (c) 1994 Charles M. Hannum. All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software 15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement: 16 * must display the following acknowledgement:
17 * This product includes software developed by Charles M. Hannum. 17 * This product includes software developed by Charles M. Hannum.
18 * 4. The name of the author may not be used to endorse or promote products 18 * 4. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission. 19 * derived from this software without specific prior written permission.
20 * 20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33#ifndef _X86_PCI_MACHDEP_COMMON_H_ 33#ifndef _X86_PCI_MACHDEP_COMMON_H_
34#define _X86_PCI_MACHDEP_COMMON_H_ 34#define _X86_PCI_MACHDEP_COMMON_H_
35 35
36/* 36/*
37 * Machine-specific definitions for PCI autoconfiguration. 37 * Machine-specific definitions for PCI autoconfiguration.
38 */ 38 */
39#define __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH 39#define __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
40#ifndef XEN 40#ifndef XEN
41#define __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_DISESTABLISH 41#define __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_DISESTABLISH
42#endif 42#endif
43 43
44/* 44/*
45 * i386-specific PCI structure and type definitions. 45 * i386-specific PCI structure and type definitions.
46 * NOT TO BE USED DIRECTLY BY MACHINE INDEPENDENT CODE. 46 * NOT TO BE USED DIRECTLY BY MACHINE INDEPENDENT CODE.
47 * 47 *
48 * Configuration tag; created from a {bus,device,function} triplet by 48 * Configuration tag; created from a {bus,device,function} triplet by
49 * pci_make_tag(), and passed to pci_conf_read() and pci_conf_write(). 49 * pci_make_tag(), and passed to pci_conf_read() and pci_conf_write().
50 * We could instead always pass the {bus,device,function} triplet to 50 * We could instead always pass the {bus,device,function} triplet to
51 * the read and write routines, but this would cause extra overhead. 51 * the read and write routines, but this would cause extra overhead.
52 * 52 *
53 * Mode 2 is historical and deprecated by the Revision 2.0 specification. 53 * Mode 2 is historical and deprecated by the Revision 2.0 specification.
54 */ 54 */
55union x86_pci_tag_u { 55union x86_pci_tag_u {
56 uint32_t mode1; 56 uint32_t mode1;
57 struct { 57 struct {
58 uint16_t port; 58 uint16_t port;
59 uint8_t enable; 59 uint8_t enable;
60 uint8_t forward; 60 uint8_t forward;
61 } mode2; 61 } mode2;
62}; 62};
63 63
64extern struct x86_bus_dma_tag pci_bus_dma_tag; 64extern struct x86_bus_dma_tag pci_bus_dma_tag;
65#ifdef _LP64 65#ifdef _LP64
66extern struct x86_bus_dma_tag pci_bus_dma64_tag; 66extern struct x86_bus_dma_tag pci_bus_dma64_tag;
67#endif 67#endif
68 68
69struct pci_attach_args; 69struct pci_attach_args;
70struct pci_chipset_tag; 70struct pci_chipset_tag;
71 71
72/* 72/*
73 * Types provided to machine-independent PCI code 73 * Types provided to machine-independent PCI code
74 */ 74 */
75typedef struct pci_chipset_tag *pci_chipset_tag_t; 75typedef struct pci_chipset_tag *pci_chipset_tag_t;
76typedef union x86_pci_tag_u pcitag_t; 76typedef union x86_pci_tag_u pcitag_t;
77 77
78struct pci_chipset_tag { 78struct pci_chipset_tag {
79 pci_chipset_tag_t pc_super; 79 pci_chipset_tag_t pc_super;
80 uint64_t pc_present; 80 uint64_t pc_present;
81 const struct pci_overrides *pc_ov; 81 const struct pci_overrides *pc_ov;
82 void *pc_ctx; 82 void *pc_ctx;
83}; 83};
84 84
85/* 85/*
86 * i386-specific PCI variables and functions. 86 * i386-specific PCI variables and functions.
87 * NOT TO BE USED DIRECTLY BY MACHINE INDEPENDENT CODE. 87 * NOT TO BE USED DIRECTLY BY MACHINE INDEPENDENT CODE.
88 */ 88 */
89int pci_bus_flags(void); 89int pci_bus_flags(void);
90int pci_mode_detect(void); 90int pci_mode_detect(void);
91void pci_mode_set(int); 91void pci_mode_set(int);
92 92
93/* 93/*
94 * Functions provided to machine-independent PCI code. 94 * Functions provided to machine-independent PCI code.
95 */ 95 */
96void pci_attach_hook(device_t, device_t, 96void pci_attach_hook(device_t, device_t,
97 struct pcibus_attach_args *); 97 struct pcibus_attach_args *);
98int pci_bus_maxdevs(pci_chipset_tag_t, int); 98int pci_bus_maxdevs(pci_chipset_tag_t, int);
99pcitag_t pci_make_tag(pci_chipset_tag_t, int, int, int); 99pcitag_t pci_make_tag(pci_chipset_tag_t, int, int, int);
100void pci_decompose_tag(pci_chipset_tag_t, pcitag_t, 100void pci_decompose_tag(pci_chipset_tag_t, pcitag_t,
101 int *, int *, int *); 101 int *, int *, int *);
102pcireg_t pci_conf_read(pci_chipset_tag_t, pcitag_t, int); 102pcireg_t pci_conf_read(pci_chipset_tag_t, pcitag_t, int);
103void pci_conf_write(pci_chipset_tag_t, pcitag_t, int, 103void pci_conf_write(pci_chipset_tag_t, pcitag_t, int,
104 pcireg_t); 104 pcireg_t);
105int pci_intr_map(const struct pci_attach_args *, 105int pci_intr_map(const struct pci_attach_args *,
106 pci_intr_handle_t *); 106 pci_intr_handle_t *);
107const char *pci_intr_string(pci_chipset_tag_t, pci_intr_handle_t); 107const char *pci_intr_string(pci_chipset_tag_t, pci_intr_handle_t);
108const struct evcnt *pci_intr_evcnt(pci_chipset_tag_t, pci_intr_handle_t); 108const struct evcnt *pci_intr_evcnt(pci_chipset_tag_t, pci_intr_handle_t);
109void *pci_intr_establish(pci_chipset_tag_t, pci_intr_handle_t, 109void *pci_intr_establish(pci_chipset_tag_t, pci_intr_handle_t,
110 int, int (*)(void *), void *); 110 int, int (*)(void *), void *);
111void pci_intr_disestablish(pci_chipset_tag_t, void *); 111void pci_intr_disestablish(pci_chipset_tag_t, void *);
112 112
113/* experimental MSI support */ 113/* experimental MSI support */
114void *pci_msi_establish(struct pci_attach_args *, int, int (*)(void *), void *); 114void *pci_msi_establish(struct pci_attach_args *, int, int (*)(void *), void *);
115void pci_msi_disestablish(void *); 115void pci_msi_disestablish(void *);
116 116
117/* 117/*
118 * ALL OF THE FOLLOWING ARE MACHINE-DEPENDENT, AND SHOULD NOT BE USED 118 * ALL OF THE FOLLOWING ARE MACHINE-DEPENDENT, AND SHOULD NOT BE USED
119 * BY PORTABLE CODE. 119 * BY PORTABLE CODE.
120 */ 120 */
121 121
122/* Extract Bus Number for a host bridge or -1 if unknown. */ 122/* Extract Bus Number for a host bridge or -1 if unknown. */
123int pchb_get_bus_number(pci_chipset_tag_t, pcitag_t); 123int pchb_get_bus_number(pci_chipset_tag_t, pcitag_t);
124 124
125/* 125/*
126 * Section 6.2.4, `Miscellaneous Functions' of the PCI Specification, 126 * Section 6.2.4, `Miscellaneous Functions' of the PCI Specification,
127 * says that 255 means `unknown' or `no connection' to the interrupt 127 * says that 255 means `unknown' or `no connection' to the interrupt
128 * controller on a PC. 128 * controller on a PC.
129 */ 129 */
130#define X86_PCI_INTERRUPT_LINE_NO_CONNECTION 0xff 130#define X86_PCI_INTERRUPT_LINE_NO_CONNECTION 0xff
131 131
132void pci_device_foreach(pci_chipset_tag_t, int, 132void pci_device_foreach(pci_chipset_tag_t, int,
133 void (*)(pci_chipset_tag_t, pcitag_t, void*), 133 void (*)(pci_chipset_tag_t, pcitag_t, void*),
134 void *); 134 void *);
135  135
136void pci_device_foreach_min(pci_chipset_tag_t, int, int, 136void pci_device_foreach_min(pci_chipset_tag_t, int, int,
137 void (*)(pci_chipset_tag_t, pcitag_t, void*), 137 void (*)(pci_chipset_tag_t, pcitag_t, void*),
138 void *); 138 void *);
139  139
140void pci_bridge_foreach(pci_chipset_tag_t, int, int, 140void pci_bridge_foreach(pci_chipset_tag_t, int, int,
141 void (*) (pci_chipset_tag_t, pcitag_t, void *), void *); 141 void (*) (pci_chipset_tag_t, pcitag_t, void *), void *);
142 142
143void pci_mmio_range_infer(pci_chipset_tag_t, int, int, bus_addr_t *, 143void pci_ranges_infer(pci_chipset_tag_t, int, int, bus_addr_t *,
144 bus_size_t *); 144 bus_size_t *, bus_addr_t *, bus_size_t *);
 145
 146extern prop_dictionary_t pci_rsrc_dict;
 147prop_dictionary_t pci_rsrc_filter(prop_dictionary_t,
 148 bool (*)(void *, prop_dictionary_t), void *arg);
145 149
146#endif /* _X86_PCI_MACHDEP_COMMON_H_ */ 150#endif /* _X86_PCI_MACHDEP_COMMON_H_ */

cvs diff -r1.47 -r1.48 src/sys/arch/x86/pci/pci_machdep.c (switch to unified diff)

--- src/sys/arch/x86/pci/pci_machdep.c 2011/08/28 04:59:37 1.47
+++ src/sys/arch/x86/pci/pci_machdep.c 2011/08/28 06:04:18 1.48
@@ -1,837 +1,1752 @@ @@ -1,837 +1,1752 @@
1/* $NetBSD: pci_machdep.c,v 1.47 2011/08/28 04:59:37 dyoung Exp $ */ 1/* $NetBSD: pci_machdep.c,v 1.48 2011/08/28 06:04:18 dyoung Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. 4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center. 9 * NASA Ames Research Center.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution. 18 * documentation and/or other materials provided with the distribution.
19 * 19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33/* 33/*
34 * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved. 34 * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
35 * Copyright (c) 1994 Charles M. Hannum. All rights reserved. 35 * Copyright (c) 1994 Charles M. Hannum. All rights reserved.
36 * 36 *
37 * Redistribution and use in source and binary forms, with or without 37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions 38 * modification, are permitted provided that the following conditions
39 * are met: 39 * are met:
40 * 1. Redistributions of source code must retain the above copyright 40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer. 41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright 42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the 43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution. 44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software 45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement: 46 * must display the following acknowledgement:
47 * This product includes software developed by Charles M. Hannum. 47 * This product includes software developed by Charles M. Hannum.
48 * 4. The name of the author may not be used to endorse or promote products 48 * 4. The name of the author may not be used to endorse or promote products
49 * derived from this software without specific prior written permission. 49 * derived from this software without specific prior written permission.
50 * 50 *
51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
53 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 53 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
54 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 54 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
55 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 55 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
56 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 56 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
60 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 */ 61 */
62 62
63/* 63/*
64 * Machine-specific functions for PCI autoconfiguration. 64 * Machine-specific functions for PCI autoconfiguration.
65 * 65 *
66 * On PCs, there are two methods of generating PCI configuration cycles. 66 * On PCs, there are two methods of generating PCI configuration cycles.
67 * We try to detect the appropriate mechanism for this machine and set 67 * We try to detect the appropriate mechanism for this machine and set
68 * up a few function pointers to access the correct method directly. 68 * up a few function pointers to access the correct method directly.
69 * 69 *
70 * The configuration method can be hard-coded in the config file by 70 * The configuration method can be hard-coded in the config file by
71 * using `options PCI_CONF_MODE=N', where `N' is the configuration mode 71 * using `options PCI_CONF_MODE=N', where `N' is the configuration mode
72 * as defined section 3.6.4.1, `Generating Configuration Cycles'. 72 * as defined section 3.6.4.1, `Generating Configuration Cycles'.
73 */ 73 */
74 74
75#include <sys/cdefs.h> 75#include <sys/cdefs.h>
76__KERNEL_RCSID(0, "$NetBSD: pci_machdep.c,v 1.47 2011/08/28 04:59:37 dyoung Exp $"); 76__KERNEL_RCSID(0, "$NetBSD: pci_machdep.c,v 1.48 2011/08/28 06:04:18 dyoung Exp $");
77 77
78#include <sys/types.h> 78#include <sys/types.h>
79#include <sys/param.h> 79#include <sys/param.h>
80#include <sys/time.h> 80#include <sys/time.h>
81#include <sys/systm.h> 81#include <sys/systm.h>
82#include <sys/errno.h> 82#include <sys/errno.h>
83#include <sys/device.h> 83#include <sys/device.h>
84#include <sys/bus.h> 84#include <sys/bus.h>
85#include <sys/cpu.h> 85#include <sys/cpu.h>
86#include <sys/kmem.h> 86#include <sys/kmem.h>
87 87
 88#include <prop/proplib.h>
 89#include <ppath/ppath.h>
 90
88#include <uvm/uvm_extern.h> 91#include <uvm/uvm_extern.h>
89 92
90#include <machine/bus_private.h> 93#include <machine/bus_private.h>
91 94
92#include <machine/pio.h> 95#include <machine/pio.h>
93#include <machine/lock.h> 96#include <machine/lock.h>
94 97
95#include <dev/isa/isareg.h> 98#include <dev/isa/isareg.h>
96#include <dev/isa/isavar.h> 99#include <dev/isa/isavar.h>
97#include <dev/pci/pcivar.h> 100#include <dev/pci/pcivar.h>
98#include <dev/pci/pcireg.h> 101#include <dev/pci/pcireg.h>
99#include <dev/pci/pccbbreg.h> 102#include <dev/pci/pccbbreg.h>
100#include <dev/pci/pcidevs.h> 103#include <dev/pci/pcidevs.h>
101 104
102#include "acpica.h" 105#include "acpica.h"
103#include "opt_mpbios.h" 106#include "opt_mpbios.h"
104#include "opt_acpi.h" 107#include "opt_acpi.h"
105 108
106#ifdef MPBIOS 109#ifdef MPBIOS
107#include <machine/mpbiosvar.h> 110#include <machine/mpbiosvar.h>
108#endif 111#endif
109 112
110#if NACPICA > 0 113#if NACPICA > 0
111#include <machine/mpacpi.h> 114#include <machine/mpacpi.h>
112#endif 115#endif
113 116
114#include <machine/mpconfig.h> 117#include <machine/mpconfig.h>
115 118
116#include "opt_pci_conf_mode.h" 119#include "opt_pci_conf_mode.h"
117 120
118#ifdef __i386__ 121#ifdef __i386__
119#include "opt_xbox.h" 122#include "opt_xbox.h"
120#ifdef XBOX 123#ifdef XBOX
121#include <machine/xbox.h> 124#include <machine/xbox.h>
122#endif 125#endif
123#endif 126#endif
124 127
125#ifdef PCI_CONF_MODE 128#ifdef PCI_CONF_MODE
126#if (PCI_CONF_MODE == 1) || (PCI_CONF_MODE == 2) 129#if (PCI_CONF_MODE == 1) || (PCI_CONF_MODE == 2)
127static int pci_mode = PCI_CONF_MODE; 130static int pci_mode = PCI_CONF_MODE;
128#else 131#else
129#error Invalid PCI configuration mode. 132#error Invalid PCI configuration mode.
130#endif 133#endif
131#else 134#else
132static int pci_mode = -1; 135static int pci_mode = -1;
133#endif 136#endif
134 137
135struct pci_conf_lock { 138struct pci_conf_lock {
136 uint32_t cl_cpuno; /* 0: unlocked 139 uint32_t cl_cpuno; /* 0: unlocked
137 * 1 + n: locked by CPU n (0 <= n) 140 * 1 + n: locked by CPU n (0 <= n)
138 */ 141 */
139 uint32_t cl_sel; /* the address that's being read. */ 142 uint32_t cl_sel; /* the address that's being read. */
140}; 143};
141 144
142static void pci_conf_unlock(struct pci_conf_lock *); 145static void pci_conf_unlock(struct pci_conf_lock *);
143static uint32_t pci_conf_selector(pcitag_t, int); 146static uint32_t pci_conf_selector(pcitag_t, int);
144static unsigned int pci_conf_port(pcitag_t, int); 147static unsigned int pci_conf_port(pcitag_t, int);
145static void pci_conf_select(uint32_t); 148static void pci_conf_select(uint32_t);
146static void pci_conf_lock(struct pci_conf_lock *, uint32_t); 149static void pci_conf_lock(struct pci_conf_lock *, uint32_t);
147static void pci_bridge_hook(pci_chipset_tag_t, pcitag_t, void *); 150static void pci_bridge_hook(pci_chipset_tag_t, pcitag_t, void *);
148struct pci_bridge_hook_arg { 151struct pci_bridge_hook_arg {
149 void (*func)(pci_chipset_tag_t, pcitag_t, void *);  152 void (*func)(pci_chipset_tag_t, pcitag_t, void *);
150 void *arg;  153 void *arg;
151};  154};
152 155
153#define PCI_MODE1_ENABLE 0x80000000UL 156#define PCI_MODE1_ENABLE 0x80000000UL
154#define PCI_MODE1_ADDRESS_REG 0x0cf8 157#define PCI_MODE1_ADDRESS_REG 0x0cf8
155#define PCI_MODE1_DATA_REG 0x0cfc 158#define PCI_MODE1_DATA_REG 0x0cfc
156 159
157#define PCI_MODE2_ENABLE_REG 0x0cf8 160#define PCI_MODE2_ENABLE_REG 0x0cf8
158#define PCI_MODE2_FORWARD_REG 0x0cfa 161#define PCI_MODE2_FORWARD_REG 0x0cfa
159 162
160#define _m1tag(b, d, f) \ 163#define _m1tag(b, d, f) \
161 (PCI_MODE1_ENABLE | ((b) << 16) | ((d) << 11) | ((f) << 8)) 164 (PCI_MODE1_ENABLE | ((b) << 16) | ((d) << 11) | ((f) << 8))
162#define _qe(bus, dev, fcn, vend, prod) \ 165#define _qe(bus, dev, fcn, vend, prod) \
163 {_m1tag(bus, dev, fcn), PCI_ID_CODE(vend, prod)} 166 {_m1tag(bus, dev, fcn), PCI_ID_CODE(vend, prod)}
164struct { 167struct {
165 uint32_t tag; 168 uint32_t tag;
166 pcireg_t id; 169 pcireg_t id;
167} pcim1_quirk_tbl[] = { 170} pcim1_quirk_tbl[] = {
168 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX1), 171 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX1),
169 /* XXX Triflex2 not tested */ 172 /* XXX Triflex2 not tested */
170 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX2), 173 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX2),
171 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX4), 174 _qe(0, 0, 0, PCI_VENDOR_COMPAQ, PCI_PRODUCT_COMPAQ_TRIFLEX4),
172 /* Triton needed for Connectix Virtual PC */ 175 /* Triton needed for Connectix Virtual PC */
173 _qe(0, 0, 0, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437FX), 176 _qe(0, 0, 0, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82437FX),
174 /* Connectix Virtual PC 5 has a 440BX */ 177 /* Connectix Virtual PC 5 has a 440BX */
175 _qe(0, 0, 0, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX_NOAGP), 178 _qe(0, 0, 0, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82443BX_NOAGP),
176 /* Parallels Desktop for Mac */ 179 /* Parallels Desktop for Mac */
177 _qe(0, 2, 0, PCI_VENDOR_PARALLELS, PCI_PRODUCT_PARALLELS_VIDEO), 180 _qe(0, 2, 0, PCI_VENDOR_PARALLELS, PCI_PRODUCT_PARALLELS_VIDEO),
178 _qe(0, 3, 0, PCI_VENDOR_PARALLELS, PCI_PRODUCT_PARALLELS_TOOLS), 181 _qe(0, 3, 0, PCI_VENDOR_PARALLELS, PCI_PRODUCT_PARALLELS_TOOLS),
179 /* SIS 740 */ 182 /* SIS 740 */
180 _qe(0, 0, 0, PCI_VENDOR_SIS, PCI_PRODUCT_SIS_740), 183 _qe(0, 0, 0, PCI_VENDOR_SIS, PCI_PRODUCT_SIS_740),
181 /* SIS 741 */ 184 /* SIS 741 */
182 _qe(0, 0, 0, PCI_VENDOR_SIS, PCI_PRODUCT_SIS_741), 185 _qe(0, 0, 0, PCI_VENDOR_SIS, PCI_PRODUCT_SIS_741),
183 {0, 0xffffffff} /* patchable */ 186 {0, 0xffffffff} /* patchable */
184}; 187};
185#undef _m1tag 188#undef _m1tag
186#undef _id 189#undef _id
187#undef _qe 190#undef _qe
188 191
189/* 192/*
190 * PCI doesn't have any special needs; just use the generic versions 193 * PCI doesn't have any special needs; just use the generic versions
191 * of these functions. 194 * of these functions.
192 */ 195 */
193struct x86_bus_dma_tag pci_bus_dma_tag = { 196struct x86_bus_dma_tag pci_bus_dma_tag = {
194 ._tag_needs_free = 0, 197 ._tag_needs_free = 0,
195#if defined(_LP64) || defined(PAE) 198#if defined(_LP64) || defined(PAE)
196 ._bounce_thresh = PCI32_DMA_BOUNCE_THRESHOLD, 199 ._bounce_thresh = PCI32_DMA_BOUNCE_THRESHOLD,
197 ._bounce_alloc_lo = ISA_DMA_BOUNCE_THRESHOLD, 200 ._bounce_alloc_lo = ISA_DMA_BOUNCE_THRESHOLD,
198 ._bounce_alloc_hi = PCI32_DMA_BOUNCE_THRESHOLD, 201 ._bounce_alloc_hi = PCI32_DMA_BOUNCE_THRESHOLD,
199#else 202#else
200 ._bounce_thresh = 0, 203 ._bounce_thresh = 0,
201 ._bounce_alloc_lo = 0, 204 ._bounce_alloc_lo = 0,
202 ._bounce_alloc_hi = 0, 205 ._bounce_alloc_hi = 0,
203#endif 206#endif
204 ._may_bounce = NULL, 207 ._may_bounce = NULL,
205 208
206 ._dmamap_create = _bus_dmamap_create, 209 ._dmamap_create = _bus_dmamap_create,
207 ._dmamap_destroy = _bus_dmamap_destroy, 210 ._dmamap_destroy = _bus_dmamap_destroy,
208 ._dmamap_load = _bus_dmamap_load, 211 ._dmamap_load = _bus_dmamap_load,
209 ._dmamap_load_mbuf = _bus_dmamap_load_mbuf, 212 ._dmamap_load_mbuf = _bus_dmamap_load_mbuf,
210 ._dmamap_load_uio = _bus_dmamap_load_uio, 213 ._dmamap_load_uio = _bus_dmamap_load_uio,
211 ._dmamap_load_raw = _bus_dmamap_load_raw, 214 ._dmamap_load_raw = _bus_dmamap_load_raw,
212 ._dmamap_unload = _bus_dmamap_unload, 215 ._dmamap_unload = _bus_dmamap_unload,
213 ._dmamap_sync = _bus_dmamap_sync, 216 ._dmamap_sync = _bus_dmamap_sync,
214 217
215 ._dmamem_alloc = _bus_dmamem_alloc, 218 ._dmamem_alloc = _bus_dmamem_alloc,
216 ._dmamem_free = _bus_dmamem_free, 219 ._dmamem_free = _bus_dmamem_free,
217 ._dmamem_map = _bus_dmamem_map, 220 ._dmamem_map = _bus_dmamem_map,
218 ._dmamem_unmap = _bus_dmamem_unmap, 221 ._dmamem_unmap = _bus_dmamem_unmap,
219 ._dmamem_mmap = _bus_dmamem_mmap, 222 ._dmamem_mmap = _bus_dmamem_mmap,
220 223
221 ._dmatag_subregion = _bus_dmatag_subregion, 224 ._dmatag_subregion = _bus_dmatag_subregion,
222 ._dmatag_destroy = _bus_dmatag_destroy, 225 ._dmatag_destroy = _bus_dmatag_destroy,
223}; 226};
224 227
225#ifdef _LP64 228#ifdef _LP64
226struct x86_bus_dma_tag pci_bus_dma64_tag = { 229struct x86_bus_dma_tag pci_bus_dma64_tag = {
227 ._tag_needs_free = 0, 230 ._tag_needs_free = 0,
228 ._bounce_thresh = 0, 231 ._bounce_thresh = 0,
229 ._bounce_alloc_lo = 0, 232 ._bounce_alloc_lo = 0,
230 ._bounce_alloc_hi = 0, 233 ._bounce_alloc_hi = 0,
231 ._may_bounce = NULL, 234 ._may_bounce = NULL,
232 235
233 ._dmamap_create = _bus_dmamap_create, 236 ._dmamap_create = _bus_dmamap_create,
234 ._dmamap_destroy = _bus_dmamap_destroy, 237 ._dmamap_destroy = _bus_dmamap_destroy,
235 ._dmamap_load = _bus_dmamap_load, 238 ._dmamap_load = _bus_dmamap_load,
236 ._dmamap_load_mbuf = _bus_dmamap_load_mbuf, 239 ._dmamap_load_mbuf = _bus_dmamap_load_mbuf,
237 ._dmamap_load_uio = _bus_dmamap_load_uio, 240 ._dmamap_load_uio = _bus_dmamap_load_uio,
238 ._dmamap_load_raw = _bus_dmamap_load_raw, 241 ._dmamap_load_raw = _bus_dmamap_load_raw,
239 ._dmamap_unload = _bus_dmamap_unload, 242 ._dmamap_unload = _bus_dmamap_unload,
240 ._dmamap_sync = NULL, 243 ._dmamap_sync = NULL,
241 244
242 ._dmamem_alloc = _bus_dmamem_alloc, 245 ._dmamem_alloc = _bus_dmamem_alloc,
243 ._dmamem_free = _bus_dmamem_free, 246 ._dmamem_free = _bus_dmamem_free,
244 ._dmamem_map = _bus_dmamem_map, 247 ._dmamem_map = _bus_dmamem_map,
245 ._dmamem_unmap = _bus_dmamem_unmap, 248 ._dmamem_unmap = _bus_dmamem_unmap,
246 ._dmamem_mmap = _bus_dmamem_mmap, 249 ._dmamem_mmap = _bus_dmamem_mmap,
247 250
248 ._dmatag_subregion = _bus_dmatag_subregion, 251 ._dmatag_subregion = _bus_dmatag_subregion,
249 ._dmatag_destroy = _bus_dmatag_destroy, 252 ._dmatag_destroy = _bus_dmatag_destroy,
250}; 253};
251#endif 254#endif
252 255
253static struct pci_conf_lock cl0 = { 256static struct pci_conf_lock cl0 = {
254 .cl_cpuno = 0UL 257 .cl_cpuno = 0UL
255 , .cl_sel = 0UL 258 , .cl_sel = 0UL
256}; 259};
257 260
258static struct pci_conf_lock * const cl = &cl0; 261static struct pci_conf_lock * const cl = &cl0;
259 262
260static void 263static void
261pci_conf_lock(struct pci_conf_lock *ocl, uint32_t sel) 264pci_conf_lock(struct pci_conf_lock *ocl, uint32_t sel)
262{ 265{
263 uint32_t cpuno; 266 uint32_t cpuno;
264 267
265 KASSERT(sel != 0); 268 KASSERT(sel != 0);
266 269
267 kpreempt_disable(); 270 kpreempt_disable();
268 cpuno = cpu_number() + 1; 271 cpuno = cpu_number() + 1;
269 /* If the kernel enters pci_conf_lock() through an interrupt 272 /* If the kernel enters pci_conf_lock() through an interrupt
270 * handler, then the CPU may already hold the lock. 273 * handler, then the CPU may already hold the lock.
271 * 274 *
272 * If the CPU does not already hold the lock, spin until 275 * If the CPU does not already hold the lock, spin until
273 * we can acquire it. 276 * we can acquire it.
274 */ 277 */
275 if (cpuno == cl->cl_cpuno) { 278 if (cpuno == cl->cl_cpuno) {
276 ocl->cl_cpuno = cpuno; 279 ocl->cl_cpuno = cpuno;
277 } else { 280 } else {
278 u_int spins; 281 u_int spins;
279 282
280 ocl->cl_cpuno = 0; 283 ocl->cl_cpuno = 0;
281 284
282 spins = SPINLOCK_BACKOFF_MIN; 285 spins = SPINLOCK_BACKOFF_MIN;
283 while (atomic_cas_32(&cl->cl_cpuno, 0, cpuno) != 0) { 286 while (atomic_cas_32(&cl->cl_cpuno, 0, cpuno) != 0) {
284 SPINLOCK_BACKOFF(spins); 287 SPINLOCK_BACKOFF(spins);
285#ifdef LOCKDEBUG 288#ifdef LOCKDEBUG
286 if (SPINLOCK_SPINOUT(spins)) { 289 if (SPINLOCK_SPINOUT(spins)) {
287 panic("%s: cpu %" PRId32 290 panic("%s: cpu %" PRId32
288 " spun out waiting for cpu %" PRId32, 291 " spun out waiting for cpu %" PRId32,
289 __func__, cpuno, cl->cl_cpuno); 292 __func__, cpuno, cl->cl_cpuno);
290 } 293 }
291#endif /* LOCKDEBUG */ 294#endif /* LOCKDEBUG */
292 } 295 }
293 } 296 }
294 297
295 /* Only one CPU can be here, so an interlocked atomic_swap(3) 298 /* Only one CPU can be here, so an interlocked atomic_swap(3)
296 * is not necessary. 299 * is not necessary.
297 * 300 *
298 * Evaluating atomic_cas_32_ni()'s argument, cl->cl_sel, 301 * Evaluating atomic_cas_32_ni()'s argument, cl->cl_sel,
299 * and applying atomic_cas_32_ni() is not an atomic operation, 302 * and applying atomic_cas_32_ni() is not an atomic operation,
300 * however, any interrupt that, in the middle of the 303 * however, any interrupt that, in the middle of the
301 * operation, modifies cl->cl_sel, will also restore 304 * operation, modifies cl->cl_sel, will also restore
302 * cl->cl_sel. So cl->cl_sel will have the same value when 305 * cl->cl_sel. So cl->cl_sel will have the same value when
303 * we apply atomic_cas_32_ni() as when we evaluated it, 306 * we apply atomic_cas_32_ni() as when we evaluated it,
304 * before. 307 * before.
305 */ 308 */
306 ocl->cl_sel = atomic_cas_32_ni(&cl->cl_sel, cl->cl_sel, sel); 309 ocl->cl_sel = atomic_cas_32_ni(&cl->cl_sel, cl->cl_sel, sel);
307 pci_conf_select(sel); 310 pci_conf_select(sel);
308} 311}
309 312
310static void 313static void
311pci_conf_unlock(struct pci_conf_lock *ocl) 314pci_conf_unlock(struct pci_conf_lock *ocl)
312{ 315{
313 uint32_t sel; 316 uint32_t sel;
314 317
315 sel = atomic_cas_32_ni(&cl->cl_sel, cl->cl_sel, ocl->cl_sel); 318 sel = atomic_cas_32_ni(&cl->cl_sel, cl->cl_sel, ocl->cl_sel);
316 pci_conf_select(ocl->cl_sel); 319 pci_conf_select(ocl->cl_sel);
317 if (ocl->cl_cpuno != cl->cl_cpuno) 320 if (ocl->cl_cpuno != cl->cl_cpuno)
318 atomic_cas_32(&cl->cl_cpuno, cl->cl_cpuno, ocl->cl_cpuno); 321 atomic_cas_32(&cl->cl_cpuno, cl->cl_cpuno, ocl->cl_cpuno);
319 kpreempt_enable(); 322 kpreempt_enable();
320} 323}
321 324
322static uint32_t 325static uint32_t
323pci_conf_selector(pcitag_t tag, int reg) 326pci_conf_selector(pcitag_t tag, int reg)
324{ 327{
325 static const pcitag_t mode2_mask = { 328 static const pcitag_t mode2_mask = {
326 .mode2 = { 329 .mode2 = {
327 .enable = 0xff 330 .enable = 0xff
328 , .forward = 0xff 331 , .forward = 0xff
329 } 332 }
330 }; 333 };
331 334
332 switch (pci_mode) { 335 switch (pci_mode) {
333 case 1: 336 case 1:
334 return tag.mode1 | reg; 337 return tag.mode1 | reg;
335 case 2: 338 case 2:
336 return tag.mode1 & mode2_mask.mode1; 339 return tag.mode1 & mode2_mask.mode1;
337 default: 340 default:
338 panic("%s: mode not configured", __func__); 341 panic("%s: mode not configured", __func__);
339 } 342 }
340} 343}
341 344
342static unsigned int 345static unsigned int
343pci_conf_port(pcitag_t tag, int reg) 346pci_conf_port(pcitag_t tag, int reg)
344{ 347{
345 switch (pci_mode) { 348 switch (pci_mode) {
346 case 1: 349 case 1:
347 return PCI_MODE1_DATA_REG; 350 return PCI_MODE1_DATA_REG;
348 case 2: 351 case 2:
349 return tag.mode2.port | reg; 352 return tag.mode2.port | reg;
350 default: 353 default:
351 panic("%s: mode not configured", __func__); 354 panic("%s: mode not configured", __func__);
352 } 355 }
353} 356}
354 357
355static void 358static void
356pci_conf_select(uint32_t sel) 359pci_conf_select(uint32_t sel)
357{ 360{
358 pcitag_t tag; 361 pcitag_t tag;
359 362
360 switch (pci_mode) { 363 switch (pci_mode) {
361 case 1: 364 case 1:
362 outl(PCI_MODE1_ADDRESS_REG, sel); 365 outl(PCI_MODE1_ADDRESS_REG, sel);
363 return; 366 return;
364 case 2: 367 case 2:
365 tag.mode1 = sel; 368 tag.mode1 = sel;
366 outb(PCI_MODE2_ENABLE_REG, tag.mode2.enable); 369 outb(PCI_MODE2_ENABLE_REG, tag.mode2.enable);
367 if (tag.mode2.enable != 0) 370 if (tag.mode2.enable != 0)
368 outb(PCI_MODE2_FORWARD_REG, tag.mode2.forward); 371 outb(PCI_MODE2_FORWARD_REG, tag.mode2.forward);
369 return; 372 return;
370 default: 373 default:
371 panic("%s: mode not configured", __func__); 374 panic("%s: mode not configured", __func__);
372 } 375 }
373} 376}
374 377
375void 378void
376pci_attach_hook(device_t parent, device_t self, struct pcibus_attach_args *pba) 379pci_attach_hook(device_t parent, device_t self, struct pcibus_attach_args *pba)
377{ 380{
378 381
379 if (pba->pba_bus == 0) 382 if (pba->pba_bus == 0)
380 aprint_normal(": configuration mode %d", pci_mode); 383 aprint_normal(": configuration mode %d", pci_mode);
381#ifdef MPBIOS 384#ifdef MPBIOS
382 mpbios_pci_attach_hook(parent, self, pba); 385 mpbios_pci_attach_hook(parent, self, pba);
383#endif 386#endif
384#if NACPICA > 0 387#if NACPICA > 0
385 mpacpi_pci_attach_hook(parent, self, pba); 388 mpacpi_pci_attach_hook(parent, self, pba);
386#endif 389#endif
387} 390}
388 391
389int 392int
390pci_bus_maxdevs(pci_chipset_tag_t pc, int busno) 393pci_bus_maxdevs(pci_chipset_tag_t pc, int busno)
391{ 394{
392 395
393#if defined(__i386__) && defined(XBOX) 396#if defined(__i386__) && defined(XBOX)
394 /* 397 /*
395 * Scanning above the first device is fatal on the Microsoft Xbox. 398 * Scanning above the first device is fatal on the Microsoft Xbox.
396 * If busno=1, only allow for one device. 399 * If busno=1, only allow for one device.
397 */ 400 */
398 if (arch_i386_is_xbox) { 401 if (arch_i386_is_xbox) {
399 if (busno == 1) 402 if (busno == 1)
400 return 1; 403 return 1;
401 else if (busno > 1) 404 else if (busno > 1)
402 return 0; 405 return 0;
403 } 406 }
404#endif 407#endif
405 408
406 /* 409 /*
407 * Bus number is irrelevant. If Configuration Mechanism 2 is in 410 * Bus number is irrelevant. If Configuration Mechanism 2 is in
408 * use, can only have devices 0-15 on any bus. If Configuration 411 * use, can only have devices 0-15 on any bus. If Configuration
409 * Mechanism 1 is in use, can have devices 0-32 (i.e. the `normal' 412 * Mechanism 1 is in use, can have devices 0-32 (i.e. the `normal'
410 * range). 413 * range).
411 */ 414 */
412 if (pci_mode == 2) 415 if (pci_mode == 2)
413 return (16); 416 return (16);
414 else 417 else
415 return (32); 418 return (32);
416} 419}
417 420
418pcitag_t 421pcitag_t
419pci_make_tag(pci_chipset_tag_t pc, int bus, int device, int function) 422pci_make_tag(pci_chipset_tag_t pc, int bus, int device, int function)
420{ 423{
421 pci_chipset_tag_t ipc; 424 pci_chipset_tag_t ipc;
422 pcitag_t tag; 425 pcitag_t tag;
423 426
424 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 427 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) {
425 if ((ipc->pc_present & PCI_OVERRIDE_MAKE_TAG) == 0) 428 if ((ipc->pc_present & PCI_OVERRIDE_MAKE_TAG) == 0)
426 continue; 429 continue;
427 return (*ipc->pc_ov->ov_make_tag)(ipc->pc_ctx, 430 return (*ipc->pc_ov->ov_make_tag)(ipc->pc_ctx,
428 pc, bus, device, function); 431 pc, bus, device, function);
429 } 432 }
430 433
431 switch (pci_mode) { 434 switch (pci_mode) {
432 case 1: 435 case 1:
433 if (bus >= 256 || device >= 32 || function >= 8) 436 if (bus >= 256 || device >= 32 || function >= 8)
434 panic("%s: bad request", __func__); 437 panic("%s: bad request", __func__);
435 438
436 tag.mode1 = PCI_MODE1_ENABLE | 439 tag.mode1 = PCI_MODE1_ENABLE |
437 (bus << 16) | (device << 11) | (function << 8); 440 (bus << 16) | (device << 11) | (function << 8);
438 return tag; 441 return tag;
439 case 2: 442 case 2:
440 if (bus >= 256 || device >= 16 || function >= 8) 443 if (bus >= 256 || device >= 16 || function >= 8)
441 panic("%s: bad request", __func__); 444 panic("%s: bad request", __func__);
442 445
443 tag.mode2.port = 0xc000 | (device << 8); 446 tag.mode2.port = 0xc000 | (device << 8);
444 tag.mode2.enable = 0xf0 | (function << 1); 447 tag.mode2.enable = 0xf0 | (function << 1);
445 tag.mode2.forward = bus; 448 tag.mode2.forward = bus;
446 return tag; 449 return tag;
447 default: 450 default:
448 panic("%s: mode not configured", __func__); 451 panic("%s: mode not configured", __func__);
449 } 452 }
450} 453}
451 454
452void 455void
453pci_decompose_tag(pci_chipset_tag_t pc, pcitag_t tag, 456pci_decompose_tag(pci_chipset_tag_t pc, pcitag_t tag,
454 int *bp, int *dp, int *fp) 457 int *bp, int *dp, int *fp)
455{ 458{
456 pci_chipset_tag_t ipc; 459 pci_chipset_tag_t ipc;
457 460
458 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 461 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) {
459 if ((ipc->pc_present & PCI_OVERRIDE_DECOMPOSE_TAG) == 0) 462 if ((ipc->pc_present & PCI_OVERRIDE_DECOMPOSE_TAG) == 0)
460 continue; 463 continue;
461 (*ipc->pc_ov->ov_decompose_tag)(ipc->pc_ctx, 464 (*ipc->pc_ov->ov_decompose_tag)(ipc->pc_ctx,
462 pc, tag, bp, dp, fp); 465 pc, tag, bp, dp, fp);
463 return; 466 return;
464 } 467 }
465 468
466 switch (pci_mode) { 469 switch (pci_mode) {
467 case 1: 470 case 1:
468 if (bp != NULL) 471 if (bp != NULL)
469 *bp = (tag.mode1 >> 16) & 0xff; 472 *bp = (tag.mode1 >> 16) & 0xff;
470 if (dp != NULL) 473 if (dp != NULL)
471 *dp = (tag.mode1 >> 11) & 0x1f; 474 *dp = (tag.mode1 >> 11) & 0x1f;
472 if (fp != NULL) 475 if (fp != NULL)
473 *fp = (tag.mode1 >> 8) & 0x7; 476 *fp = (tag.mode1 >> 8) & 0x7;
474 return; 477 return;
475 case 2: 478 case 2:
476 if (bp != NULL) 479 if (bp != NULL)
477 *bp = tag.mode2.forward & 0xff; 480 *bp = tag.mode2.forward & 0xff;
478 if (dp != NULL) 481 if (dp != NULL)
479 *dp = (tag.mode2.port >> 8) & 0xf; 482 *dp = (tag.mode2.port >> 8) & 0xf;
480 if (fp != NULL) 483 if (fp != NULL)
481 *fp = (tag.mode2.enable >> 1) & 0x7; 484 *fp = (tag.mode2.enable >> 1) & 0x7;
482 return; 485 return;
483 default: 486 default:
484 panic("%s: mode not configured", __func__); 487 panic("%s: mode not configured", __func__);
485 } 488 }
486} 489}
487 490
488pcireg_t 491pcireg_t
489pci_conf_read(pci_chipset_tag_t pc, pcitag_t tag, int reg) 492pci_conf_read(pci_chipset_tag_t pc, pcitag_t tag, int reg)
490{ 493{
491 pci_chipset_tag_t ipc; 494 pci_chipset_tag_t ipc;
492 pcireg_t data; 495 pcireg_t data;
493 struct pci_conf_lock ocl; 496 struct pci_conf_lock ocl;
494 497
495 KASSERT((reg & 0x3) == 0); 498 KASSERT((reg & 0x3) == 0);
496 499
497 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 500 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) {
498 if ((ipc->pc_present & PCI_OVERRIDE_CONF_READ) == 0) 501 if ((ipc->pc_present & PCI_OVERRIDE_CONF_READ) == 0)
499 continue; 502 continue;
500 return (*ipc->pc_ov->ov_conf_read)(ipc->pc_ctx, pc, tag, reg); 503 return (*ipc->pc_ov->ov_conf_read)(ipc->pc_ctx, pc, tag, reg);
501 } 504 }
502 505
503#if defined(__i386__) && defined(XBOX) 506#if defined(__i386__) && defined(XBOX)
504 if (arch_i386_is_xbox) { 507 if (arch_i386_is_xbox) {
505 int bus, dev, fn; 508 int bus, dev, fn;
506 pci_decompose_tag(pc, tag, &bus, &dev, &fn); 509 pci_decompose_tag(pc, tag, &bus, &dev, &fn);
507 if (bus == 0 && dev == 0 && (fn == 1 || fn == 2)) 510 if (bus == 0 && dev == 0 && (fn == 1 || fn == 2))
508 return (pcireg_t)-1; 511 return (pcireg_t)-1;
509 } 512 }
510#endif 513#endif
511 514
512 pci_conf_lock(&ocl, pci_conf_selector(tag, reg)); 515 pci_conf_lock(&ocl, pci_conf_selector(tag, reg));
513 data = inl(pci_conf_port(tag, reg)); 516 data = inl(pci_conf_port(tag, reg));
514 pci_conf_unlock(&ocl); 517 pci_conf_unlock(&ocl);
515 return data; 518 return data;
516} 519}
517 520
518void 521void
519pci_conf_write(pci_chipset_tag_t pc, pcitag_t tag, int reg, pcireg_t data) 522pci_conf_write(pci_chipset_tag_t pc, pcitag_t tag, int reg, pcireg_t data)
520{ 523{
521 pci_chipset_tag_t ipc; 524 pci_chipset_tag_t ipc;
522 struct pci_conf_lock ocl; 525 struct pci_conf_lock ocl;
523 526
524 KASSERT((reg & 0x3) == 0); 527 KASSERT((reg & 0x3) == 0);
525 528
526 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) { 529 for (ipc = pc; ipc != NULL; ipc = ipc->pc_super) {
527 if ((ipc->pc_present & PCI_OVERRIDE_CONF_WRITE) == 0) 530 if ((ipc->pc_present & PCI_OVERRIDE_CONF_WRITE) == 0)
528 continue; 531 continue;
529 (*ipc->pc_ov->ov_conf_write)(ipc->pc_ctx, pc, tag, reg, 532 (*ipc->pc_ov->ov_conf_write)(ipc->pc_ctx, pc, tag, reg,
530 data); 533 data);
531 return; 534 return;
532 } 535 }
533 536
534#if defined(__i386__) && defined(XBOX) 537#if defined(__i386__) && defined(XBOX)
535 if (arch_i386_is_xbox) { 538 if (arch_i386_is_xbox) {
536 int bus, dev, fn; 539 int bus, dev, fn;
537 pci_decompose_tag(pc, tag, &bus, &dev, &fn); 540 pci_decompose_tag(pc, tag, &bus, &dev, &fn);
538 if (bus == 0 && dev == 0 && (fn == 1 || fn == 2)) 541 if (bus == 0 && dev == 0 && (fn == 1 || fn == 2))
539 return; 542 return;
540 } 543 }
541#endif 544#endif
542 545
543 pci_conf_lock(&ocl, pci_conf_selector(tag, reg)); 546 pci_conf_lock(&ocl, pci_conf_selector(tag, reg));
544 outl(pci_conf_port(tag, reg), data); 547 outl(pci_conf_port(tag, reg), data);
545 pci_conf_unlock(&ocl); 548 pci_conf_unlock(&ocl);
546} 549}
547 550
548void 551void
549pci_mode_set(int mode) 552pci_mode_set(int mode)
550{ 553{
551 KASSERT(pci_mode == -1 || pci_mode == mode); 554 KASSERT(pci_mode == -1 || pci_mode == mode);
552 555
553 pci_mode = mode; 556 pci_mode = mode;
554} 557}
555 558
556int 559int
557pci_mode_detect(void) 560pci_mode_detect(void)
558{ 561{
559 uint32_t sav, val; 562 uint32_t sav, val;
560 int i; 563 int i;
561 pcireg_t idreg; 564 pcireg_t idreg;
562 565
563 if (pci_mode != -1) 566 if (pci_mode != -1)
564 return pci_mode; 567 return pci_mode;
565 568
566 /* 569 /*
567 * We try to divine which configuration mode the host bridge wants. 570 * We try to divine which configuration mode the host bridge wants.
568 */ 571 */
569 572
570 sav = inl(PCI_MODE1_ADDRESS_REG); 573 sav = inl(PCI_MODE1_ADDRESS_REG);
571 574
572 pci_mode = 1; /* assume this for now */ 575 pci_mode = 1; /* assume this for now */
573 /* 576 /*
574 * catch some known buggy implementations of mode 1 577 * catch some known buggy implementations of mode 1
575 */ 578 */
576 for (i = 0; i < __arraycount(pcim1_quirk_tbl); i++) { 579 for (i = 0; i < __arraycount(pcim1_quirk_tbl); i++) {
577 pcitag_t t; 580 pcitag_t t;
578 581
579 if (!pcim1_quirk_tbl[i].tag) 582 if (!pcim1_quirk_tbl[i].tag)
580 break; 583 break;
581 t.mode1 = pcim1_quirk_tbl[i].tag; 584 t.mode1 = pcim1_quirk_tbl[i].tag;
582 idreg = pci_conf_read(0, t, PCI_ID_REG); /* needs "pci_mode" */ 585 idreg = pci_conf_read(0, t, PCI_ID_REG); /* needs "pci_mode" */
583 if (idreg == pcim1_quirk_tbl[i].id) { 586 if (idreg == pcim1_quirk_tbl[i].id) {
584#ifdef DEBUG 587#ifdef DEBUG
585 printf("known mode 1 PCI chipset (%08x)\n", 588 printf("known mode 1 PCI chipset (%08x)\n",
586 idreg); 589 idreg);
587#endif 590#endif
588 return (pci_mode); 591 return (pci_mode);
589 } 592 }
590 } 593 }
591 594
592 /* 595 /*
593 * Strong check for standard compliant mode 1: 596 * Strong check for standard compliant mode 1:
594 * 1. bit 31 ("enable") can be set 597 * 1. bit 31 ("enable") can be set
595 * 2. byte/word access does not affect register 598 * 2. byte/word access does not affect register
596 */ 599 */
597 outl(PCI_MODE1_ADDRESS_REG, PCI_MODE1_ENABLE); 600 outl(PCI_MODE1_ADDRESS_REG, PCI_MODE1_ENABLE);
598 outb(PCI_MODE1_ADDRESS_REG + 3, 0); 601 outb(PCI_MODE1_ADDRESS_REG + 3, 0);
599 outw(PCI_MODE1_ADDRESS_REG + 2, 0); 602 outw(PCI_MODE1_ADDRESS_REG + 2, 0);
600 val = inl(PCI_MODE1_ADDRESS_REG); 603 val = inl(PCI_MODE1_ADDRESS_REG);
601 if ((val & 0x80fffffc) != PCI_MODE1_ENABLE) { 604 if ((val & 0x80fffffc) != PCI_MODE1_ENABLE) {
602#ifdef DEBUG 605#ifdef DEBUG
603 printf("pci_mode_detect: mode 1 enable failed (%x)\n", 606 printf("pci_mode_detect: mode 1 enable failed (%x)\n",
604 val); 607 val);
605#endif 608#endif
606 goto not1; 609 goto not1;
607 } 610 }
608 outl(PCI_MODE1_ADDRESS_REG, 0); 611 outl(PCI_MODE1_ADDRESS_REG, 0);
609 val = inl(PCI_MODE1_ADDRESS_REG); 612 val = inl(PCI_MODE1_ADDRESS_REG);
610 if ((val & 0x80fffffc) != 0) 613 if ((val & 0x80fffffc) != 0)
611 goto not1; 614 goto not1;
612 return (pci_mode); 615 return (pci_mode);
613not1: 616not1:
614 outl(PCI_MODE1_ADDRESS_REG, sav); 617 outl(PCI_MODE1_ADDRESS_REG, sav);
615 618
616 /* 619 /*
617 * This mode 2 check is quite weak (and known to give false 620 * This mode 2 check is quite weak (and known to give false
618 * positives on some Compaq machines). 621 * positives on some Compaq machines).
619 * However, this doesn't matter, because this is the 622 * However, this doesn't matter, because this is the
620 * last test, and simply no PCI devices will be found if 623 * last test, and simply no PCI devices will be found if
621 * this happens. 624 * this happens.
622 */ 625 */
623 outb(PCI_MODE2_ENABLE_REG, 0); 626 outb(PCI_MODE2_ENABLE_REG, 0);
624 outb(PCI_MODE2_FORWARD_REG, 0); 627 outb(PCI_MODE2_FORWARD_REG, 0);
625 if (inb(PCI_MODE2_ENABLE_REG) != 0 || 628 if (inb(PCI_MODE2_ENABLE_REG) != 0 ||
626 inb(PCI_MODE2_FORWARD_REG) != 0) 629 inb(PCI_MODE2_FORWARD_REG) != 0)
627 goto not2; 630 goto not2;
628 return (pci_mode = 2); 631 return (pci_mode = 2);
629not2: 632not2:
630 633
631 return (pci_mode = 0); 634 return (pci_mode = 0);
632} 635}
633 636
634/* 637/*
635 * Determine which flags should be passed to the primary PCI bus's 638 * Determine which flags should be passed to the primary PCI bus's
636 * autoconfiguration node. We use this to detect broken chipsets 639 * autoconfiguration node. We use this to detect broken chipsets
637 * which cannot safely use memory-mapped device access. 640 * which cannot safely use memory-mapped device access.
638 */ 641 */
639int 642int
640pci_bus_flags(void) 643pci_bus_flags(void)
641{ 644{
642 int rval = PCI_FLAGS_IO_OKAY | PCI_FLAGS_MEM_OKAY | 645 int rval = PCI_FLAGS_IO_OKAY | PCI_FLAGS_MEM_OKAY |
643 PCI_FLAGS_MRL_OKAY | PCI_FLAGS_MRM_OKAY | PCI_FLAGS_MWI_OKAY; 646 PCI_FLAGS_MRL_OKAY | PCI_FLAGS_MRM_OKAY | PCI_FLAGS_MWI_OKAY;
644 int device, maxndevs; 647 int device, maxndevs;
645 pcitag_t tag; 648 pcitag_t tag;
646 pcireg_t id; 649 pcireg_t id;
647 650
648 maxndevs = pci_bus_maxdevs(NULL, 0); 651 maxndevs = pci_bus_maxdevs(NULL, 0);
649 652
650 for (device = 0; device < maxndevs; device++) { 653 for (device = 0; device < maxndevs; device++) {
651 tag = pci_make_tag(NULL, 0, device, 0); 654 tag = pci_make_tag(NULL, 0, device, 0);
652 id = pci_conf_read(NULL, tag, PCI_ID_REG); 655 id = pci_conf_read(NULL, tag, PCI_ID_REG);
653 656
654 /* Invalid vendor ID value? */ 657 /* Invalid vendor ID value? */
655 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) 658 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
656 continue; 659 continue;
657 /* XXX Not invalid, but we've done this ~forever. */ 660 /* XXX Not invalid, but we've done this ~forever. */
658 if (PCI_VENDOR(id) == 0) 661 if (PCI_VENDOR(id) == 0)
659 continue; 662 continue;
660 663
661 switch (PCI_VENDOR(id)) { 664 switch (PCI_VENDOR(id)) {
662 case PCI_VENDOR_SIS: 665 case PCI_VENDOR_SIS:
663 switch (PCI_PRODUCT(id)) { 666 switch (PCI_PRODUCT(id)) {
664 case PCI_PRODUCT_SIS_85C496: 667 case PCI_PRODUCT_SIS_85C496:
665 goto disable_mem; 668 goto disable_mem;
666 } 669 }
667 break; 670 break;
668 } 671 }
669 } 672 }
670 673
671 return (rval); 674 return (rval);
672 675
673 disable_mem: 676 disable_mem:
674 printf("Warning: broken PCI-Host bridge detected; " 677 printf("Warning: broken PCI-Host bridge detected; "
675 "disabling memory-mapped access\n"); 678 "disabling memory-mapped access\n");
676 rval &= ~(PCI_FLAGS_MEM_OKAY|PCI_FLAGS_MRL_OKAY|PCI_FLAGS_MRM_OKAY| 679 rval &= ~(PCI_FLAGS_MEM_OKAY|PCI_FLAGS_MRL_OKAY|PCI_FLAGS_MRM_OKAY|
677 PCI_FLAGS_MWI_OKAY); 680 PCI_FLAGS_MWI_OKAY);
678 return (rval); 681 return (rval);
679} 682}
680 683
681void 684void
682pci_device_foreach(pci_chipset_tag_t pc, int maxbus, 685pci_device_foreach(pci_chipset_tag_t pc, int maxbus,
683 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *context) 686 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *context)
684{ 687{
685 pci_device_foreach_min(pc, 0, maxbus, func, context); 688 pci_device_foreach_min(pc, 0, maxbus, func, context);
686} 689}
687 690
688void 691void
689pci_device_foreach_min(pci_chipset_tag_t pc, int minbus, int maxbus, 692pci_device_foreach_min(pci_chipset_tag_t pc, int minbus, int maxbus,
690 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *context) 693 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *context)
691{ 694{
692 const struct pci_quirkdata *qd; 695 const struct pci_quirkdata *qd;
693 int bus, device, function, maxdevs, nfuncs; 696 int bus, device, function, maxdevs, nfuncs;
694 pcireg_t id, bhlcr; 697 pcireg_t id, bhlcr;
695 pcitag_t tag; 698 pcitag_t tag;
696 699
697 for (bus = minbus; bus <= maxbus; bus++) { 700 for (bus = minbus; bus <= maxbus; bus++) {
698 maxdevs = pci_bus_maxdevs(pc, bus); 701 maxdevs = pci_bus_maxdevs(pc, bus);
699 for (device = 0; device < maxdevs; device++) { 702 for (device = 0; device < maxdevs; device++) {
700 tag = pci_make_tag(pc, bus, device, 0); 703 tag = pci_make_tag(pc, bus, device, 0);
701 id = pci_conf_read(pc, tag, PCI_ID_REG); 704 id = pci_conf_read(pc, tag, PCI_ID_REG);
702 705
703 /* Invalid vendor ID value? */ 706 /* Invalid vendor ID value? */
704 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) 707 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
705 continue; 708 continue;
706 /* XXX Not invalid, but we've done this ~forever. */ 709 /* XXX Not invalid, but we've done this ~forever. */
707 if (PCI_VENDOR(id) == 0) 710 if (PCI_VENDOR(id) == 0)
708 continue; 711 continue;
709 712
710 qd = pci_lookup_quirkdata(PCI_VENDOR(id), 713 qd = pci_lookup_quirkdata(PCI_VENDOR(id),
711 PCI_PRODUCT(id)); 714 PCI_PRODUCT(id));
712 715
713 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG); 716 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
714 if (PCI_HDRTYPE_MULTIFN(bhlcr) || 717 if (PCI_HDRTYPE_MULTIFN(bhlcr) ||
715 (qd != NULL && 718 (qd != NULL &&
716 (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0)) 719 (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0))
717 nfuncs = 8; 720 nfuncs = 8;
718 else 721 else
719 nfuncs = 1; 722 nfuncs = 1;
720 723
721 for (function = 0; function < nfuncs; function++) { 724 for (function = 0; function < nfuncs; function++) {
722 tag = pci_make_tag(pc, bus, device, function); 725 tag = pci_make_tag(pc, bus, device, function);
723 id = pci_conf_read(pc, tag, PCI_ID_REG); 726 id = pci_conf_read(pc, tag, PCI_ID_REG);
724 727
725 /* Invalid vendor ID value? */ 728 /* Invalid vendor ID value? */
726 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) 729 if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
727 continue; 730 continue;
728 /* 731 /*
729 * XXX Not invalid, but we've done this 732 * XXX Not invalid, but we've done this
730 * ~forever. 733 * ~forever.
731 */ 734 */
732 if (PCI_VENDOR(id) == 0) 735 if (PCI_VENDOR(id) == 0)
733 continue; 736 continue;
734 (*func)(pc, tag, context); 737 (*func)(pc, tag, context);
735 } 738 }
736 } 739 }
737 } 740 }
738} 741}
739 742
740void 743void
741pci_bridge_foreach(pci_chipset_tag_t pc, int minbus, int maxbus, 744pci_bridge_foreach(pci_chipset_tag_t pc, int minbus, int maxbus,
742 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *ctx) 745 void (*func)(pci_chipset_tag_t, pcitag_t, void *), void *ctx)
743{ 746{
744 struct pci_bridge_hook_arg bridge_hook; 747 struct pci_bridge_hook_arg bridge_hook;
745 748
746 bridge_hook.func = func; 749 bridge_hook.func = func;
747 bridge_hook.arg = ctx;  750 bridge_hook.arg = ctx;
748 751
749 pci_device_foreach_min(pc, minbus, maxbus, pci_bridge_hook, 752 pci_device_foreach_min(pc, minbus, maxbus, pci_bridge_hook,
750 &bridge_hook);  753 &bridge_hook);
751} 754}
752 755
 756typedef enum pci_alloc_regtype {
 757 PCI_ALLOC_REGTYPE_NONE = 0
 758 , PCI_ALLOC_REGTYPE_BAR = 1
 759 , PCI_ALLOC_REGTYPE_WIN = 2
 760 , PCI_ALLOC_REGTYPE_CBWIN = 3
 761 , PCI_ALLOC_REGTYPE_VGA_EN = 4
 762} pci_alloc_regtype_t;
 763
 764typedef enum pci_alloc_space {
 765 PCI_ALLOC_SPACE_IO = 0
 766 , PCI_ALLOC_SPACE_MEM = 1
 767} pci_alloc_space_t;
 768
 769typedef enum pci_alloc_flags {
 770 PCI_ALLOC_F_PREFETCHABLE = 0x1
 771} pci_alloc_flags_t;
 772
 773typedef struct pci_alloc {
 774 TAILQ_ENTRY(pci_alloc) pal_link;
 775 pcitag_t pal_tag;
 776 uint64_t pal_addr;
 777 uint64_t pal_size;
 778 pci_alloc_regtype_t pal_type;
 779 struct pci_alloc_reg {
 780 int r_ofs;
 781 pcireg_t r_val;
 782 pcireg_t r_mask;
 783 } pal_reg[3];
 784 pci_alloc_space_t pal_space;
 785 pci_alloc_flags_t pal_flags;
 786} pci_alloc_t;
 787
 788typedef struct pci_alloc_reg pci_alloc_reg_t;
 789
 790TAILQ_HEAD(pci_alloc_list, pci_alloc);
 791
 792typedef struct pci_alloc_list pci_alloc_list_t;
 793
 794static pci_alloc_t *
 795pci_alloc_dup(const pci_alloc_t *pal)
 796{
 797 pci_alloc_t *npal;
 798
 799 if ((npal = kmem_alloc(sizeof(*npal), KM_SLEEP)) == NULL)
 800 return NULL;
 801
 802 *npal = *pal;
 803
 804 return npal;
 805}
 806
 807static bool
 808pci_alloc_linkdup(pci_alloc_list_t *pals, const pci_alloc_t *pal)
 809{
 810 pci_alloc_t *npal;
 811
 812 if ((npal = pci_alloc_dup(pal)) == NULL)
 813 return false;
 814
 815 TAILQ_INSERT_TAIL(pals, npal, pal_link);
 816
 817 return true;
 818}
 819
 820struct range_infer_ctx {
 821 pci_chipset_tag_t ric_pc;
 822 pci_alloc_list_t ric_pals;
 823 bus_addr_t ric_mmio_bottom;
 824 bus_addr_t ric_mmio_top;
 825 bus_addr_t ric_io_bottom;
 826 bus_addr_t ric_io_top;
 827};
 828
 829#if 1
 830static bool
 831io_range_extend(struct range_infer_ctx *ric, const pci_alloc_t *pal)
 832{
 833 if (ric->ric_io_bottom > pal->pal_addr)
 834 ric->ric_io_bottom = pal->pal_addr;
 835 if (ric->ric_io_top < pal->pal_addr + pal->pal_size)
 836 ric->ric_io_top = pal->pal_addr + pal->pal_size;
 837
 838 return pci_alloc_linkdup(&ric->ric_pals, pal);
 839}
 840
 841static bool
 842io_range_extend_by_bar(struct range_infer_ctx *ric, int bus, int dev, int fun,
 843 int ofs, pcireg_t curbar, pcireg_t sizebar)
 844{
 845 pci_alloc_reg_t *r;
 846 pci_alloc_t pal = {
 847 .pal_flags = 0
 848 , .pal_space = PCI_ALLOC_SPACE_IO
 849 , .pal_type = PCI_ALLOC_REGTYPE_BAR
 850 , .pal_reg = {{
 851 .r_mask = ~(pcireg_t)0
 852 }}
 853 };
 854
 855 r = &pal.pal_reg[0];
 856
 857 pal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
 858 r->r_ofs = ofs;
 859 r->r_val = curbar;
 860
 861 pal.pal_addr = PCI_MAPREG_IO_ADDR(curbar);
 862 pal.pal_size = PCI_MAPREG_IO_SIZE(sizebar);
 863
 864 aprint_debug("%s: %d.%d.%d base at %" PRIx64 " size %" PRIx64 "\n",
 865 __func__, bus, dev, fun, pal.pal_addr, pal.pal_size);
 866
 867 return (pal.pal_size == 0) || io_range_extend(ric, &pal);
 868}
 869
 870static bool
 871io_range_extend_by_vga_enable(struct range_infer_ctx *ric,
 872 int bus, int dev, int fun, pcireg_t csr, pcireg_t bcr)
 873{
 874 pci_alloc_reg_t *r;
 875 pci_alloc_t tpal = {
 876 .pal_flags = 0
 877 , .pal_space = PCI_ALLOC_SPACE_IO
 878 , .pal_type = PCI_ALLOC_REGTYPE_VGA_EN
 879 , .pal_reg = {{
 880 .r_ofs = PCI_COMMAND_STATUS_REG
 881 , .r_mask = PCI_COMMAND_IO_ENABLE
 882 }, {
 883 .r_ofs = PCI_BRIDGE_CONTROL_REG
 884 , .r_mask =
 885 PCI_BRIDGE_CONTROL_VGA << PCI_BRIDGE_CONTROL_SHIFT
 886 }}
 887 }, pal[2];
 888
 889 aprint_debug("%s: %d.%d.%d enter\n", __func__, bus, dev, fun);
 890
 891 if ((csr & PCI_COMMAND_IO_ENABLE) == 0 ||
 892 (bcr & (PCI_BRIDGE_CONTROL_VGA << PCI_BRIDGE_CONTROL_SHIFT)) == 0) {
 893 aprint_debug("%s: %d.%d.%d I/O or VGA disabled\n",
 894 __func__, bus, dev, fun);
 895 return true;
 896 }
 897
 898 r = &tpal.pal_reg[0];
 899 tpal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
 900 r[0].r_val = csr;
 901 r[1].r_val = bcr;
 902
 903 pal[0] = pal[1] = tpal;
 904
 905 pal[0].pal_addr = 0x3b0;
 906 pal[0].pal_size = 0x3bb - 0x3b0 + 1;
 907
 908 pal[1].pal_addr = 0x3c0;
 909 pal[1].pal_size = 0x3df - 0x3c0 + 1;
 910
 911 /* XXX add aliases for pal[0..1] */
 912
 913 return io_range_extend(ric, &pal[0]) && io_range_extend(ric, &pal[1]);
 914}
 915
 916static bool
 917io_range_extend_by_win(struct range_infer_ctx *ric,
 918 int bus, int dev, int fun, int ofs, int ofshigh,
 919 pcireg_t io, pcireg_t iohigh)
 920{
 921 const int fourkb = 4 * 1024;
 922 pcireg_t baser, limitr;
 923 pci_alloc_reg_t *r;
 924 pci_alloc_t pal = {
 925 .pal_flags = 0
 926 , .pal_space = PCI_ALLOC_SPACE_IO
 927 , .pal_type = PCI_ALLOC_REGTYPE_WIN
 928 , .pal_reg = {{
 929 .r_mask = ~(pcireg_t)0
 930 }}
 931 };
 932
 933 r = &pal.pal_reg[0];
 934
 935 pal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
 936 r[0].r_ofs = ofs;
 937 r[0].r_val = io;
 938
 939 baser = ((io >> PCI_BRIDGE_STATIO_IOBASE_SHIFT) &
 940 PCI_BRIDGE_STATIO_IOBASE_MASK) >> 4;
 941 limitr = ((io >> PCI_BRIDGE_STATIO_IOLIMIT_SHIFT) &
 942 PCI_BRIDGE_STATIO_IOLIMIT_MASK) >> 4;
 943
 944 if (PCI_BRIDGE_IO_32BITS(io)) {
 945 pcireg_t baseh, limith;
 946
 947 r[1].r_mask = ~(pcireg_t)0;
 948 r[1].r_ofs = ofshigh;
 949 r[1].r_val = iohigh;
 950
 951 baseh = (iohigh >> PCI_BRIDGE_IOHIGH_BASE_SHIFT) & PCI_BRIDGE_IOHIGH_BASE_MASK;
 952 limith = (iohigh >> PCI_BRIDGE_IOHIGH_LIMIT_SHIFT) & PCI_BRIDGE_IOHIGH_LIMIT_MASK;
 953
 954 baser |= baseh << 4;
 955 limitr |= limith << 4;
 956 }
 957
 958 /* XXX check with the PCI standard */
 959 if (baser > limitr)
 960 return true;
 961
 962 pal.pal_addr = baser * fourkb;
 963 pal.pal_size = (limitr - baser + 1) * fourkb;
 964
 965 aprint_debug("%s: %d.%d.%d window at %" PRIx64 " size %" PRIx64 "\n",
 966 __func__, bus, dev, fun, pal.pal_addr, pal.pal_size);
 967
 968 return io_range_extend(ric, &pal);
 969}
 970
 971static bool
 972io_range_extend_by_cbwin(struct range_infer_ctx *ric,
 973 int bus, int dev, int fun, int ofs, pcireg_t base0, pcireg_t limit0)
 974{
 975 pcireg_t base, limit;
 976 pci_alloc_reg_t *r;
 977 pci_alloc_t pal = {
 978 .pal_flags = 0
 979 , .pal_space = PCI_ALLOC_SPACE_IO
 980 , .pal_type = PCI_ALLOC_REGTYPE_CBWIN
 981 , .pal_reg = {{
 982 .r_mask = ~(pcireg_t)0
 983 }, {
 984 .r_mask = ~(pcireg_t)0
 985 }}
 986 };
 987
 988 r = &pal.pal_reg[0];
 989
 990 pal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
 991 r[0].r_ofs = ofs;
 992 r[0].r_val = base0;
 993 r[1].r_ofs = ofs + 4;
 994 r[1].r_val = limit0;
 995
 996 base = base0 & __BITS(31, 2);
 997 limit = limit0 & __BITS(31, 2);
 998
 999 if (base > limit)
 1000 return true;
 1001
 1002 pal.pal_addr = base;
 1003 pal.pal_size = limit - base + 4; /* XXX */
 1004
 1005 aprint_debug("%s: %d.%d.%d window at %" PRIx64 " size %" PRIx64 "\n",
 1006 __func__, bus, dev, fun, pal.pal_addr, pal.pal_size);
 1007
 1008 return io_range_extend(ric, &pal);
 1009}
 1010
 1011static void
 1012io_range_infer(pci_chipset_tag_t pc, pcitag_t tag, void *ctx)
 1013{
 1014 struct range_infer_ctx *ric = ctx;
 1015 pcireg_t bhlcr, limit, io;
 1016 int bar, bus, dev, fun, hdrtype, nbar;
 1017 bool ok = true;
 1018
 1019 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
 1020
 1021 hdrtype = PCI_HDRTYPE_TYPE(bhlcr);
 1022
 1023 pci_decompose_tag(pc, tag, &bus, &dev, &fun);
 1024
 1025 switch (hdrtype) {
 1026 case PCI_HDRTYPE_PPB:
 1027 nbar = 2;
 1028 /* Extract I/O windows */
 1029 ok = ok && io_range_extend_by_win(ric, bus, dev, fun,
 1030 PCI_BRIDGE_STATIO_REG,
 1031 PCI_BRIDGE_IOHIGH_REG,
 1032 pci_conf_read(pc, tag, PCI_BRIDGE_STATIO_REG),
 1033 pci_conf_read(pc, tag, PCI_BRIDGE_IOHIGH_REG));
 1034 ok = ok && io_range_extend_by_vga_enable(ric, bus, dev, fun,
 1035 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG),
 1036 pci_conf_read(pc, tag, PCI_BRIDGE_CONTROL_REG));
 1037 break;
 1038 case PCI_HDRTYPE_PCB:
 1039 /* Extract I/O windows */
 1040 io = pci_conf_read(pc, tag, PCI_CB_IOBASE0);
 1041 limit = pci_conf_read(pc, tag, PCI_CB_IOLIMIT0);
 1042 ok = ok && io_range_extend_by_cbwin(ric, bus, dev, fun,
 1043 PCI_CB_IOBASE0, io, limit);
 1044 io = pci_conf_read(pc, tag, PCI_CB_IOBASE1);
 1045 limit = pci_conf_read(pc, tag, PCI_CB_IOLIMIT1);
 1046 ok = ok && io_range_extend_by_cbwin(ric, bus, dev, fun,
 1047 PCI_CB_IOBASE1, io, limit);
 1048 nbar = 1;
 1049 break;
 1050 case PCI_HDRTYPE_DEVICE:
 1051 nbar = 6;
 1052 break;
 1053 default:
 1054 aprint_debug("%s: unknown header type %d at %d.%d.%d\n",
 1055 __func__, hdrtype, bus, dev, fun);
 1056 return;
 1057 }
 1058
 1059 for (bar = 0; bar < nbar; bar++) {
 1060 pcireg_t basebar, sizebar;
 1061
 1062 basebar = pci_conf_read(pc, tag, PCI_BAR(bar));
 1063 pci_conf_write(pc, tag, PCI_BAR(bar), 0xffffffff);
 1064 sizebar = pci_conf_read(pc, tag, PCI_BAR(bar));
 1065 pci_conf_write(pc, tag, PCI_BAR(bar), basebar);
 1066
 1067 if (sizebar == 0)
 1068 continue;
 1069 if (PCI_MAPREG_TYPE(sizebar) != PCI_MAPREG_TYPE_IO)
 1070 continue;
 1071
 1072 ok = ok && io_range_extend_by_bar(ric, bus, dev, fun,
 1073 PCI_BAR(bar), basebar, sizebar);
 1074 }
 1075 if (!ok) {
 1076 aprint_verbose("I/O range inference failed at PCI %d.%d.%d\n",
 1077 bus, dev, fun);
 1078 }
 1079}
 1080#endif
 1081
 1082static bool
 1083mmio_range_extend(struct range_infer_ctx *ric, const pci_alloc_t *pal)
 1084{
 1085 if (ric->ric_mmio_bottom > pal->pal_addr)
 1086 ric->ric_mmio_bottom = pal->pal_addr;
 1087 if (ric->ric_mmio_top < pal->pal_addr + pal->pal_size)
 1088 ric->ric_mmio_top = pal->pal_addr + pal->pal_size;
 1089
 1090 return pci_alloc_linkdup(&ric->ric_pals, pal);
 1091}
 1092
 1093static bool
 1094mmio_range_extend_by_bar(struct range_infer_ctx *ric, int bus, int dev, int fun,
 1095 int ofs, pcireg_t curbar, pcireg_t sizebar)
 1096{
 1097 int type;
 1098 bool prefetchable;
 1099 pci_alloc_reg_t *r;
 1100 pci_alloc_t pal = {
 1101 .pal_flags = 0
 1102 , .pal_space = PCI_ALLOC_SPACE_MEM
 1103 , .pal_type = PCI_ALLOC_REGTYPE_BAR
 1104 , .pal_reg = {{
 1105 .r_mask = ~(pcireg_t)0
 1106 }}
 1107 };
 1108
 1109 r = &pal.pal_reg[0];
 1110
 1111 pal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
 1112 r->r_ofs = ofs;
 1113 r->r_val = curbar;
 1114
 1115 pal.pal_addr = PCI_MAPREG_MEM_ADDR(curbar);
 1116
 1117 type = PCI_MAPREG_MEM_TYPE(curbar);
 1118 prefetchable = PCI_MAPREG_MEM_PREFETCHABLE(curbar);
 1119
 1120 if (prefetchable)
 1121 pal.pal_flags |= PCI_ALLOC_F_PREFETCHABLE;
 1122
 1123 switch (type) {
 1124 case PCI_MAPREG_MEM_TYPE_32BIT:
 1125 pal.pal_size = PCI_MAPREG_MEM_SIZE(sizebar);
 1126 break;
 1127 case PCI_MAPREG_MEM_TYPE_64BIT:
 1128 pal.pal_size = PCI_MAPREG_MEM64_SIZE(sizebar);
 1129 break;
 1130 case PCI_MAPREG_MEM_TYPE_32BIT_1M:
 1131 default:
 1132 aprint_debug("%s: ignored memory type %d at %d.%d.%d\n",
 1133 __func__, type, bus, dev, fun);
 1134 return false;
 1135 }
 1136
 1137 aprint_debug("%s: %d.%d.%d base at %" PRIx64 " size %" PRIx64 "\n",
 1138 __func__, bus, dev, fun, pal.pal_addr, pal.pal_size);
 1139
 1140 return (pal.pal_size == 0) || mmio_range_extend(ric, &pal);
 1141}
 1142
 1143static bool
 1144mmio_range_extend_by_vga_enable(struct range_infer_ctx *ric,
 1145 int bus, int dev, int fun, pcireg_t csr, pcireg_t bcr)
 1146{
 1147 pci_alloc_reg_t *r;
 1148 pci_alloc_t tpal = {
 1149 .pal_flags = PCI_ALLOC_F_PREFETCHABLE /* XXX a guess */
 1150 , .pal_space = PCI_ALLOC_SPACE_MEM
 1151 , .pal_type = PCI_ALLOC_REGTYPE_VGA_EN
 1152 , .pal_reg = {{
 1153 .r_ofs = PCI_COMMAND_STATUS_REG
 1154 , .r_mask = PCI_COMMAND_MEM_ENABLE
 1155 }, {
 1156 .r_ofs = PCI_BRIDGE_CONTROL_REG
 1157 , .r_mask =
 1158 PCI_BRIDGE_CONTROL_VGA << PCI_BRIDGE_CONTROL_SHIFT
 1159 }}
 1160 }, pal;
 1161
 1162 aprint_debug("%s: %d.%d.%d enter\n", __func__, bus, dev, fun);
 1163
 1164 if ((csr & PCI_COMMAND_MEM_ENABLE) == 0 ||
 1165 (bcr & (PCI_BRIDGE_CONTROL_VGA << PCI_BRIDGE_CONTROL_SHIFT)) == 0) {
 1166 aprint_debug("%s: %d.%d.%d memory or VGA disabled\n",
 1167 __func__, bus, dev, fun);
 1168 return true;
 1169 }
 1170
 1171 r = &tpal.pal_reg[0];
 1172 tpal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
 1173 r[0].r_val = csr;
 1174 r[1].r_val = bcr;
 1175
 1176 pal = tpal;
 1177
 1178 pal.pal_addr = 0xa0000;
 1179 pal.pal_size = 0xbffff - 0xa0000 + 1;
 1180
 1181 return mmio_range_extend(ric, &pal);
 1182}
 1183
 1184static bool
 1185mmio_range_extend_by_win(struct range_infer_ctx *ric,
 1186 int bus, int dev, int fun, int ofs, pcireg_t mem)
 1187{
 1188 const int onemeg = 1024 * 1024;
 1189 pcireg_t baser, limitr;
 1190 pci_alloc_reg_t *r;
 1191 pci_alloc_t pal = {
 1192 .pal_flags = 0
 1193 , .pal_space = PCI_ALLOC_SPACE_MEM
 1194 , .pal_type = PCI_ALLOC_REGTYPE_WIN
 1195 , .pal_reg = {{
 1196 .r_mask = ~(pcireg_t)0
 1197 }}
 1198 };
 1199
 1200 r = &pal.pal_reg[0];
 1201
 1202 pal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
 1203 r->r_ofs = ofs;
 1204 r->r_val = mem;
 1205
 1206 baser = (mem >> PCI_BRIDGE_MEMORY_BASE_SHIFT) &
 1207 PCI_BRIDGE_MEMORY_BASE_MASK;
 1208 limitr = (mem >> PCI_BRIDGE_MEMORY_LIMIT_SHIFT) &
 1209 PCI_BRIDGE_MEMORY_LIMIT_MASK;
 1210
 1211 /* XXX check with the PCI standard */
 1212 if (baser > limitr || limitr == 0)
 1213 return true;
 1214
 1215 pal.pal_addr = baser * onemeg;
 1216 pal.pal_size = (limitr - baser + 1) * onemeg;
 1217
 1218 aprint_debug("%s: %d.%d.%d window at %" PRIx64 " size %" PRIx64 "\n",
 1219 __func__, bus, dev, fun, pal.pal_addr, pal.pal_size);
 1220
 1221 return mmio_range_extend(ric, &pal);
 1222}
 1223
 1224static bool
 1225mmio_range_extend_by_prememwin(struct range_infer_ctx *ric,
 1226 int bus, int dev, int fun, int ofs, pcireg_t mem,
 1227 int hibaseofs, pcireg_t hibase,
 1228 int hilimitofs, pcireg_t hilimit)
 1229{
 1230 const int onemeg = 1024 * 1024;
 1231 uint64_t baser, limitr;
 1232 pci_alloc_reg_t *r;
 1233 pci_alloc_t pal = {
 1234 .pal_flags = PCI_ALLOC_F_PREFETCHABLE
 1235 , .pal_space = PCI_ALLOC_SPACE_MEM
 1236 , .pal_type = PCI_ALLOC_REGTYPE_WIN
 1237 , .pal_reg = {{
 1238 .r_mask = ~(pcireg_t)0
 1239 }}
 1240 };
 1241
 1242 r = &pal.pal_reg[0];
 1243
 1244 pal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
 1245 r[0].r_ofs = ofs;
 1246 r[0].r_val = mem;
 1247
 1248 baser = (mem >> PCI_BRIDGE_PREFETCHMEM_BASE_SHIFT) &
 1249 PCI_BRIDGE_PREFETCHMEM_BASE_MASK;
 1250 limitr = (mem >> PCI_BRIDGE_PREFETCHMEM_LIMIT_SHIFT) &
 1251 PCI_BRIDGE_PREFETCHMEM_LIMIT_MASK;
 1252
 1253 if (PCI_BRIDGE_PREFETCHMEM_64BITS(mem)) {
 1254 r[1].r_mask = r[2].r_mask = ~(pcireg_t)0;
 1255 r[1].r_ofs = hibaseofs;
 1256 r[1].r_val = hibase;
 1257 r[2].r_ofs = hilimitofs;
 1258 r[2].r_val = hilimit;
 1259
 1260 baser |= hibase << 12;
 1261 limitr |= hibase << 12;
 1262 }
 1263
 1264 /* XXX check with the PCI standard */
 1265 if (baser > limitr || limitr == 0)
 1266 return true;
 1267
 1268 pal.pal_addr = baser * onemeg;
 1269 pal.pal_size = (limitr - baser + 1) * onemeg;
 1270
 1271 aprint_debug("%s: %d.%d.%d window at %" PRIx64 " size %" PRIx64 "\n",
 1272 __func__, bus, dev, fun, pal.pal_addr, pal.pal_size);
 1273
 1274 return mmio_range_extend(ric, &pal);
 1275}
 1276
 1277static bool
 1278mmio_range_extend_by_cbwin(struct range_infer_ctx *ric,
 1279 int bus, int dev, int fun, int ofs, pcireg_t base, pcireg_t limit,
 1280 bool prefetchable)
 1281{
 1282 pci_alloc_reg_t *r;
 1283 pci_alloc_t pal = {
 1284 .pal_flags = 0
 1285 , .pal_space = PCI_ALLOC_SPACE_MEM
 1286 , .pal_type = PCI_ALLOC_REGTYPE_CBWIN
 1287 , .pal_reg = {{
 1288 .r_mask = ~(pcireg_t)0
 1289 }, {
 1290 .r_mask = ~(pcireg_t)0
 1291 }}
 1292 };
 1293
 1294 r = &pal.pal_reg[0];
 1295
 1296 if (prefetchable)
 1297 pal.pal_flags |= PCI_ALLOC_F_PREFETCHABLE;
 1298
 1299 pal.pal_tag = pci_make_tag(ric->ric_pc, bus, dev, fun);
 1300 r[0].r_ofs = ofs;
 1301 r[0].r_val = base;
 1302 r[1].r_ofs = ofs + 4;
 1303 r[1].r_val = limit;
 1304
 1305 if (base > limit)
 1306 return true;
 1307
 1308 if (limit == 0)
 1309 return true;
 1310
 1311 pal.pal_addr = base;
 1312 pal.pal_size = limit - base + 4096;
 1313
 1314 aprint_debug("%s: %d.%d.%d window at %" PRIx64 " size %" PRIx64 "\n",
 1315 __func__, bus, dev, fun, pal.pal_addr, pal.pal_size);
 1316
 1317 return mmio_range_extend(ric, &pal);
 1318}
 1319
 1320static void
 1321mmio_range_infer(pci_chipset_tag_t pc, pcitag_t tag, void *ctx)
 1322{
 1323 struct range_infer_ctx *ric = ctx;
 1324 pcireg_t bcr, bhlcr, limit, mem, premem, hiprebase, hiprelimit;
 1325 int bar, bus, dev, fun, hdrtype, nbar;
 1326 bool ok = true;
 1327
 1328 bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
 1329
 1330 hdrtype = PCI_HDRTYPE_TYPE(bhlcr);
 1331
 1332 pci_decompose_tag(pc, tag, &bus, &dev, &fun);
 1333
 1334 switch (hdrtype) {
 1335 case PCI_HDRTYPE_PPB:
 1336 nbar = 2;
 1337 /* Extract memory windows */
 1338 ok = ok && mmio_range_extend_by_win(ric, bus, dev, fun,
 1339 PCI_BRIDGE_MEMORY_REG,
 1340 pci_conf_read(pc, tag, PCI_BRIDGE_MEMORY_REG));
 1341 premem = pci_conf_read(pc, tag, PCI_BRIDGE_PREFETCHMEM_REG);
 1342 if (PCI_BRIDGE_PREFETCHMEM_64BITS(premem)) {
 1343 aprint_debug("%s: 64-bit prefetchable memory window "
 1344 "at %d.%d.%d\n", __func__, bus, dev, fun);
 1345 hiprebase = pci_conf_read(pc, tag,
 1346 PCI_BRIDGE_PREFETCHBASE32_REG);
 1347 hiprelimit = pci_conf_read(pc, tag,
 1348 PCI_BRIDGE_PREFETCHLIMIT32_REG);
 1349 } else
 1350 hiprebase = hiprelimit = 0;
 1351 ok = ok &&
 1352 mmio_range_extend_by_prememwin(ric, bus, dev, fun,
 1353 PCI_BRIDGE_PREFETCHMEM_REG, premem,
 1354 PCI_BRIDGE_PREFETCHBASE32_REG, hiprebase,
 1355 PCI_BRIDGE_PREFETCHLIMIT32_REG, hiprelimit) &&
 1356 mmio_range_extend_by_vga_enable(ric, bus, dev, fun,
 1357 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG),
 1358 pci_conf_read(pc, tag, PCI_BRIDGE_CONTROL_REG));
 1359 break;
 1360 case PCI_HDRTYPE_PCB:
 1361 /* Extract memory windows */
 1362 bcr = pci_conf_read(pc, tag, PCI_BRIDGE_CONTROL_REG);
 1363 mem = pci_conf_read(pc, tag, PCI_CB_MEMBASE0);
 1364 limit = pci_conf_read(pc, tag, PCI_CB_MEMLIMIT0);
 1365 ok = ok && mmio_range_extend_by_cbwin(ric, bus, dev, fun,
 1366 PCI_CB_MEMBASE0, mem, limit,
 1367 (bcr & CB_BCR_PREFETCH_MEMWIN0) != 0);
 1368 mem = pci_conf_read(pc, tag, PCI_CB_MEMBASE1);
 1369 limit = pci_conf_read(pc, tag, PCI_CB_MEMLIMIT1);
 1370 ok = ok && mmio_range_extend_by_cbwin(ric, bus, dev, fun,
 1371 PCI_CB_MEMBASE1, mem, limit,
 1372 (bcr & CB_BCR_PREFETCH_MEMWIN1) != 0);
 1373 nbar = 1;
 1374 break;
 1375 case PCI_HDRTYPE_DEVICE:
 1376 nbar = 6;
 1377 break;
 1378 default:
 1379 aprint_debug("%s: unknown header type %d at %d.%d.%d\n",
 1380 __func__, hdrtype, bus, dev, fun);
 1381 return;
 1382 }
 1383
 1384 for (bar = 0; bar < nbar; bar++) {
 1385 pcireg_t basebar, sizebar;
 1386
 1387 basebar = pci_conf_read(pc, tag, PCI_BAR(bar));
 1388 pci_conf_write(pc, tag, PCI_BAR(bar), 0xffffffff);
 1389 sizebar = pci_conf_read(pc, tag, PCI_BAR(bar));
 1390 pci_conf_write(pc, tag, PCI_BAR(bar), basebar);
 1391
 1392 if (sizebar == 0)
 1393 continue;
 1394 if (PCI_MAPREG_TYPE(sizebar) != PCI_MAPREG_TYPE_MEM)
 1395 continue;
 1396
 1397 ok = ok && mmio_range_extend_by_bar(ric, bus, dev, fun,
 1398 PCI_BAR(bar), basebar, sizebar);
 1399 }
 1400 if (!ok) {
 1401 aprint_verbose("MMIO range inference failed at PCI %d.%d.%d\n",
 1402 bus, dev, fun);
 1403 }
 1404}
 1405
 1406static const char *
 1407pci_alloc_regtype_string(const pci_alloc_regtype_t t)
 1408{
 1409 switch (t) {
 1410 case PCI_ALLOC_REGTYPE_BAR:
 1411 return "bar";
 1412 case PCI_ALLOC_REGTYPE_WIN:
 1413 case PCI_ALLOC_REGTYPE_CBWIN:
 1414 return "window";
 1415 case PCI_ALLOC_REGTYPE_VGA_EN:
 1416 return "vga-enable";
 1417 default:
 1418 return "<unknown>";
 1419 }
 1420}
 1421
 1422static void
 1423pci_alloc_print(pci_chipset_tag_t pc, const pci_alloc_t *pal)
 1424{
 1425 int bus, dev, fun;
 1426 const pci_alloc_reg_t *r;
 1427
 1428 pci_decompose_tag(pc, pal->pal_tag, &bus, &dev, &fun);
 1429 r = &pal->pal_reg[0];
 1430
 1431 aprint_normal("%s range [0x%08" PRIx64 ", 0x%08" PRIx64 ")"
 1432 " at %d.%d.%d %s%s 0x%02x\n",
 1433 (pal->pal_space == PCI_ALLOC_SPACE_IO) ? "IO" : "MMIO",
 1434 pal->pal_addr, pal->pal_addr + pal->pal_size,
 1435 bus, dev, fun,
 1436 (pal->pal_flags & PCI_ALLOC_F_PREFETCHABLE) ? "prefetchable " : "",
 1437 pci_alloc_regtype_string(pal->pal_type),
 1438 r->r_ofs);
 1439}
 1440
 1441prop_dictionary_t pci_rsrc_dict = NULL;
 1442
 1443static bool
 1444pci_range_record(pci_chipset_tag_t pc, prop_array_t rsvns,
 1445 pci_alloc_list_t *pals, pci_alloc_space_t space)
 1446{
 1447 int bus, dev, fun, i;
 1448 prop_array_t regs;
 1449 prop_dictionary_t reg;
 1450 const pci_alloc_t *pal;
 1451 const pci_alloc_reg_t *r;
 1452 prop_dictionary_t rsvn;
 1453
 1454 TAILQ_FOREACH(pal, pals, pal_link) {
 1455 bool ok = true;
 1456
 1457 r = &pal->pal_reg[0];
 1458
 1459 if (pal->pal_space != space)
 1460 continue;
 1461
 1462 if ((rsvn = prop_dictionary_create()) == NULL)
 1463 return false;
 1464
 1465 if ((regs = prop_array_create()) == NULL) {
 1466 prop_object_release(rsvn);
 1467 return false;
 1468 }
 1469
 1470 if (!prop_dictionary_set(rsvn, "regs", regs)) {
 1471 prop_object_release(rsvn);
 1472 prop_object_release(regs);
 1473 return false;
 1474 }
 1475
 1476 for (i = 0; i < __arraycount(pal->pal_reg); i++) {
 1477 r = &pal->pal_reg[i];
 1478
 1479 if (r->r_mask == 0)
 1480 break;
 1481
 1482 ok = (reg = prop_dictionary_create()) != NULL;
 1483 if (!ok)
 1484 break;
 1485
 1486 ok = prop_dictionary_set_uint16(reg, "offset",
 1487 r->r_ofs) &&
 1488 prop_dictionary_set_uint32(reg, "val", r->r_val) &&
 1489 prop_dictionary_set_uint32(reg, "mask",
 1490 r->r_mask) && prop_array_add(regs, reg);
 1491 if (!ok) {
 1492 prop_object_release(reg);
 1493 break;
 1494 }
 1495 }
 1496
 1497 pci_decompose_tag(pc, pal->pal_tag, &bus, &dev, &fun);
 1498
 1499 ok = ok &&
 1500 prop_dictionary_set_cstring_nocopy(rsvn, "type",
 1501 pci_alloc_regtype_string(pal->pal_type)) &&
 1502 prop_dictionary_set_uint64(rsvn, "address",
 1503 pal->pal_addr) &&
 1504 prop_dictionary_set_uint64(rsvn, "size", pal->pal_size) &&
 1505 prop_dictionary_set_uint8(rsvn, "bus", bus) &&
 1506 prop_dictionary_set_uint8(rsvn, "device", dev) &&
 1507 prop_dictionary_set_uint8(rsvn, "function", fun) &&
 1508 prop_array_add(rsvns, rsvn);
 1509 prop_object_release(rsvn);
 1510 if (!ok)
 1511 return false;
 1512 }
 1513 return true;
 1514}
 1515
 1516prop_dictionary_t
 1517pci_rsrc_filter(prop_dictionary_t rsrcs0,
 1518 bool (*predicate)(void *, prop_dictionary_t), void *arg)
 1519{
 1520 int i, space;
 1521 prop_dictionary_t rsrcs;
 1522 prop_array_t rsvns;
 1523 ppath_t *op, *p;
 1524
 1525 if ((rsrcs = prop_dictionary_copy(rsrcs0)) == NULL)
 1526 return NULL;
 1527
 1528 for (space = 0; space < 2; space++) {
 1529 op = p = ppath_create();
 1530 p = ppath_push_key(p, (space == 0) ? "memory" : "io");
 1531 p = ppath_push_key(p, "bios-reservations");
 1532 if (p == NULL) {
 1533 ppath_release(op);
 1534 return NULL;
 1535 }
 1536 if ((rsvns = ppath_lookup(rsrcs0, p)) == NULL) {
 1537 printf("%s: reservations not found\n", __func__);
 1538 ppath_release(p);
 1539 return NULL;
 1540 }
 1541 for (i = prop_array_count(rsvns); --i >= 0; ) {
 1542 prop_dictionary_t rsvn;
 1543
 1544 if ((p = ppath_push_idx(p, i)) == NULL) {
 1545 printf("%s: ppath_push_idx\n", __func__);
 1546 ppath_release(op);
 1547 prop_object_release(rsrcs);
 1548 return NULL;
 1549 }
 1550
 1551 rsvn = ppath_lookup(rsrcs0, p);
 1552
 1553 KASSERT(rsvn != NULL);
 1554
 1555 if (!(*predicate)(arg, rsvn)) {
 1556 ppath_copydel_object((prop_object_t)rsrcs0,
 1557 (prop_object_t *)&rsrcs, p);
 1558 }
 1559
 1560 if ((p = ppath_pop(p, NULL)) == NULL) {
 1561 printf("%s: ppath_pop\n", __func__);
 1562 ppath_release(p);
 1563 prop_object_release(rsrcs);
 1564 return NULL;
 1565 }
 1566 }
 1567 ppath_release(op);
 1568 }
 1569 return rsrcs;
 1570}
 1571
 1572void
 1573pci_ranges_infer(pci_chipset_tag_t pc, int minbus, int maxbus,
 1574 bus_addr_t *iobasep, bus_size_t *iosizep,
 1575 bus_addr_t *membasep, bus_size_t *memsizep)
 1576{
 1577 prop_dictionary_t iodict = NULL, memdict = NULL;
 1578 prop_array_t iorsvns, memrsvns;
 1579 struct range_infer_ctx ric = {
 1580 .ric_io_bottom = ~((bus_addr_t)0)
 1581 , .ric_io_top = 0
 1582 , .ric_mmio_bottom = ~((bus_addr_t)0)
 1583 , .ric_mmio_top = 0
 1584 , .ric_pals = TAILQ_HEAD_INITIALIZER(ric.ric_pals)
 1585 };
 1586 const pci_alloc_t *pal;
 1587
 1588 ric.ric_pc = pc;
 1589 pci_device_foreach_min(pc, minbus, maxbus, mmio_range_infer, &ric);
 1590 pci_device_foreach_min(pc, minbus, maxbus, io_range_infer, &ric);
 1591 if (membasep != NULL)
 1592 *membasep = ric.ric_mmio_bottom;
 1593 if (memsizep != NULL)
 1594 *memsizep = ric.ric_mmio_top - ric.ric_mmio_bottom;
 1595 if (iobasep != NULL)
 1596 *iobasep = ric.ric_io_bottom;
 1597 if (iosizep != NULL)
 1598 *iosizep = ric.ric_io_top - ric.ric_io_bottom;
 1599 aprint_verbose("%s: inferred %" PRIuMAX
 1600 " bytes of memory-mapped PCI space at 0x%" PRIxMAX "\n", __func__,
 1601 (uintmax_t)(ric.ric_mmio_top - ric.ric_mmio_bottom),
 1602 (uintmax_t)ric.ric_mmio_bottom);
 1603 aprint_verbose("%s: inferred %" PRIuMAX
 1604 " bytes of PCI I/O space at 0x%" PRIxMAX "\n", __func__,
 1605 (uintmax_t)(ric.ric_io_top - ric.ric_io_bottom),
 1606 (uintmax_t)ric.ric_io_bottom);
 1607 TAILQ_FOREACH(pal, &ric.ric_pals, pal_link)
 1608 pci_alloc_print(pc, pal);
 1609
 1610 if ((memdict = prop_dictionary_create()) == NULL) {
 1611 aprint_error("%s: could not create PCI MMIO "
 1612 "resources dictionary\n", __func__);
 1613 } else if ((memrsvns = prop_array_create()) == NULL) {
 1614 aprint_error("%s: could not create PCI BIOS memory "
 1615 "reservations array\n", __func__);
 1616 } else if (!prop_dictionary_set(memdict, "bios-reservations",
 1617 memrsvns)) {
 1618 aprint_error("%s: could not record PCI BIOS memory "
 1619 "reservations array\n", __func__);
 1620 } else if (!pci_range_record(pc, memrsvns, &ric.ric_pals,
 1621 PCI_ALLOC_SPACE_MEM)) {
 1622 aprint_error("%s: could not record PCI BIOS memory "
 1623 "reservations\n", __func__);
 1624 } else if (!prop_dictionary_set_uint64(memdict,
 1625 "start", ric.ric_mmio_bottom) ||
 1626 !prop_dictionary_set_uint64(memdict, "size",
 1627 ric.ric_mmio_top - ric.ric_mmio_bottom)) {
 1628 aprint_error("%s: could not record PCI memory min & max\n",
 1629 __func__);
 1630 } else if ((iodict = prop_dictionary_create()) == NULL) {
 1631 aprint_error("%s: could not create PCI I/O "
 1632 "resources dictionary\n", __func__);
 1633 } else if ((iorsvns = prop_array_create()) == NULL) {
 1634 aprint_error("%s: could not create PCI BIOS I/O "
 1635 "reservations array\n", __func__);
 1636 } else if (!prop_dictionary_set(iodict, "bios-reservations",
 1637 iorsvns)) {
 1638 aprint_error("%s: could not record PCI BIOS I/O "
 1639 "reservations array\n", __func__);
 1640 } else if (!pci_range_record(pc, iorsvns, &ric.ric_pals,
 1641 PCI_ALLOC_SPACE_IO)) {
 1642 aprint_error("%s: could not record PCI BIOS I/O "
 1643 "reservations\n", __func__);
 1644 } else if (!prop_dictionary_set_uint64(iodict,
 1645 "start", ric.ric_io_bottom) ||
 1646 !prop_dictionary_set_uint64(iodict, "size",
 1647 ric.ric_io_top - ric.ric_io_bottom)) {
 1648 aprint_error("%s: could not record PCI I/O min & max\n",
 1649 __func__);
 1650 } else if ((pci_rsrc_dict = prop_dictionary_create()) == NULL) {
 1651 aprint_error("%s: could not create PCI resources dictionary\n",
 1652 __func__);
 1653 } else if (!prop_dictionary_set(pci_rsrc_dict, "memory", memdict) ||
 1654 !prop_dictionary_set(pci_rsrc_dict, "io", iodict)) {
 1655 aprint_error("%s: could not record PCI memory- or I/O-"
 1656 "resources dictionary\n", __func__);
 1657 prop_object_release(pci_rsrc_dict);
 1658 pci_rsrc_dict = NULL;
 1659 }
 1660
 1661 if (iodict != NULL)
 1662 prop_object_release(iodict);
 1663 if (memdict != NULL)
 1664 prop_object_release(memdict);
 1665 /* XXX release iorsvns, memrsvns */
 1666}
 1667
753static void 1668static void
754pci_bridge_hook(pci_chipset_tag_t pc, pcitag_t tag, void *ctx) 1669pci_bridge_hook(pci_chipset_tag_t pc, pcitag_t tag, void *ctx)
755{ 1670{
756 struct pci_bridge_hook_arg *bridge_hook = (void *)ctx; 1671 struct pci_bridge_hook_arg *bridge_hook = (void *)ctx;
757 pcireg_t reg; 1672 pcireg_t reg;
758 1673
759 reg = pci_conf_read(pc, tag, PCI_CLASS_REG); 1674 reg = pci_conf_read(pc, tag, PCI_CLASS_REG);
760 if (PCI_CLASS(reg) == PCI_CLASS_BRIDGE && 1675 if (PCI_CLASS(reg) == PCI_CLASS_BRIDGE &&
761 (PCI_SUBCLASS(reg) == PCI_SUBCLASS_BRIDGE_PCI || 1676 (PCI_SUBCLASS(reg) == PCI_SUBCLASS_BRIDGE_PCI ||
762 PCI_SUBCLASS(reg) == PCI_SUBCLASS_BRIDGE_CARDBUS)) { 1677 PCI_SUBCLASS(reg) == PCI_SUBCLASS_BRIDGE_CARDBUS)) {
763 (*bridge_hook->func)(pc, tag, bridge_hook->arg); 1678 (*bridge_hook->func)(pc, tag, bridge_hook->arg);
764 } 1679 }
765} 1680}
766 1681
767static const void * 1682static const void *
768bit_to_function_pointer(const struct pci_overrides *ov, uint64_t bit) 1683bit_to_function_pointer(const struct pci_overrides *ov, uint64_t bit)
769{ 1684{
770 switch (bit) { 1685 switch (bit) {
771 case PCI_OVERRIDE_CONF_READ: 1686 case PCI_OVERRIDE_CONF_READ:
772 return ov->ov_conf_read; 1687 return ov->ov_conf_read;
773 case PCI_OVERRIDE_CONF_WRITE: 1688 case PCI_OVERRIDE_CONF_WRITE:
774 return ov->ov_conf_write; 1689 return ov->ov_conf_write;
775 case PCI_OVERRIDE_INTR_MAP: 1690 case PCI_OVERRIDE_INTR_MAP:
776 return ov->ov_intr_map; 1691 return ov->ov_intr_map;
777 case PCI_OVERRIDE_INTR_STRING: 1692 case PCI_OVERRIDE_INTR_STRING:
778 return ov->ov_intr_string; 1693 return ov->ov_intr_string;
779 case PCI_OVERRIDE_INTR_EVCNT: 1694 case PCI_OVERRIDE_INTR_EVCNT:
780 return ov->ov_intr_evcnt; 1695 return ov->ov_intr_evcnt;
781 case PCI_OVERRIDE_INTR_ESTABLISH: 1696 case PCI_OVERRIDE_INTR_ESTABLISH:
782 return ov->ov_intr_establish; 1697 return ov->ov_intr_establish;
783 case PCI_OVERRIDE_INTR_DISESTABLISH: 1698 case PCI_OVERRIDE_INTR_DISESTABLISH:
784 return ov->ov_intr_disestablish; 1699 return ov->ov_intr_disestablish;
785 case PCI_OVERRIDE_MAKE_TAG: 1700 case PCI_OVERRIDE_MAKE_TAG:
786 return ov->ov_make_tag; 1701 return ov->ov_make_tag;
787 case PCI_OVERRIDE_DECOMPOSE_TAG: 1702 case PCI_OVERRIDE_DECOMPOSE_TAG:
788 return ov->ov_decompose_tag; 1703 return ov->ov_decompose_tag;
789 default: 1704 default:
790 return NULL; 1705 return NULL;
791 } 1706 }
792} 1707}
793 1708
794void 1709void
795pci_chipset_tag_destroy(pci_chipset_tag_t pc) 1710pci_chipset_tag_destroy(pci_chipset_tag_t pc)
796{ 1711{
797 kmem_free(pc, sizeof(struct pci_chipset_tag)); 1712 kmem_free(pc, sizeof(struct pci_chipset_tag));
798} 1713}
799 1714
800int 1715int
801pci_chipset_tag_create(pci_chipset_tag_t opc, const uint64_t present, 1716pci_chipset_tag_create(pci_chipset_tag_t opc, const uint64_t present,
802 const struct pci_overrides *ov, void *ctx, pci_chipset_tag_t *pcp) 1717 const struct pci_overrides *ov, void *ctx, pci_chipset_tag_t *pcp)
803{ 1718{
804 uint64_t bit, bits, nbits; 1719 uint64_t bit, bits, nbits;
805 pci_chipset_tag_t pc; 1720 pci_chipset_tag_t pc;
806 const void *fp; 1721 const void *fp;
807 1722
808 if (ov == NULL || present == 0) 1723 if (ov == NULL || present == 0)
809 return EINVAL; 1724 return EINVAL;
810 1725
811 pc = kmem_alloc(sizeof(struct pci_chipset_tag), KM_SLEEP); 1726 pc = kmem_alloc(sizeof(struct pci_chipset_tag), KM_SLEEP);
812 1727
813 if (pc == NULL) 1728 if (pc == NULL)
814 return ENOMEM; 1729 return ENOMEM;
815 1730
816 pc->pc_super = opc; 1731 pc->pc_super = opc;
817 1732
818 for (bits = present; bits != 0; bits = nbits) { 1733 for (bits = present; bits != 0; bits = nbits) {
819 nbits = bits & (bits - 1); 1734 nbits = bits & (bits - 1);
820 bit = nbits ^ bits; 1735 bit = nbits ^ bits;
821 if ((fp = bit_to_function_pointer(ov, bit)) == NULL) { 1736 if ((fp = bit_to_function_pointer(ov, bit)) == NULL) {
822 printf("%s: missing bit %" PRIx64 "\n", __func__, bit); 1737 printf("%s: missing bit %" PRIx64 "\n", __func__, bit);
823 goto einval; 1738 goto einval;
824 } 1739 }
825 } 1740 }
826 1741
827 pc->pc_ov = ov; 1742 pc->pc_ov = ov;
828 pc->pc_present = present; 1743 pc->pc_present = present;
829 pc->pc_ctx = ctx; 1744 pc->pc_ctx = ctx;
830 1745
831 *pcp = pc; 1746 *pcp = pc;
832 1747
833 return 0; 1748 return 0;
834einval: 1749einval:
835 kmem_free(pc, sizeof(struct pci_chipset_tag)); 1750 kmem_free(pc, sizeof(struct pci_chipset_tag));
836 return EINVAL; 1751 return EINVAL;
837} 1752}