Thu Oct 13 17:11:09 2016 UTC ()
provide intr xname


(jdolecek)
diff -r1.37 -r1.38 src/sys/dev/pci/ahcisata_pci.c
diff -r1.40 -r1.41 src/sys/dev/pci/bha_pci.c
diff -r1.61 -r1.62 src/sys/dev/pci/pciide_common.c
diff -r1.50 -r1.51 src/sys/dev/pci/piixpm.c

cvs diff -r1.37 -r1.38 src/sys/dev/pci/ahcisata_pci.c (switch to unified diff)

--- src/sys/dev/pci/ahcisata_pci.c 2016/08/23 09:47:50 1.37
+++ src/sys/dev/pci/ahcisata_pci.c 2016/10/13 17:11:09 1.38
@@ -1,365 +1,366 @@ @@ -1,365 +1,366 @@
1/* $NetBSD: ahcisata_pci.c,v 1.37 2016/08/23 09:47:50 msaitoh Exp $ */ 1/* $NetBSD: ahcisata_pci.c,v 1.38 2016/10/13 17:11:09 jdolecek Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2006 Manuel Bouyer. 4 * Copyright (c) 2006 Manuel Bouyer.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 * 25 *
26 */ 26 */
27 27
28#include <sys/cdefs.h> 28#include <sys/cdefs.h>
29__KERNEL_RCSID(0, "$NetBSD: ahcisata_pci.c,v 1.37 2016/08/23 09:47:50 msaitoh Exp $"); 29__KERNEL_RCSID(0, "$NetBSD: ahcisata_pci.c,v 1.38 2016/10/13 17:11:09 jdolecek Exp $");
30 30
31#include <sys/types.h> 31#include <sys/types.h>
32#include <sys/malloc.h> 32#include <sys/malloc.h>
33#include <sys/param.h> 33#include <sys/param.h>
34#include <sys/kernel.h> 34#include <sys/kernel.h>
35#include <sys/systm.h> 35#include <sys/systm.h>
36#include <sys/disklabel.h> 36#include <sys/disklabel.h>
37#include <sys/pmf.h> 37#include <sys/pmf.h>
38 38
39#include <dev/pci/pcivar.h> 39#include <dev/pci/pcivar.h>
40#include <dev/pci/pcidevs.h> 40#include <dev/pci/pcidevs.h>
41#include <dev/pci/pciidereg.h> 41#include <dev/pci/pciidereg.h>
42#include <dev/pci/pciidevar.h> 42#include <dev/pci/pciidevar.h>
43#include <dev/ic/ahcisatavar.h> 43#include <dev/ic/ahcisatavar.h>
44 44
45struct ahci_pci_quirk {  45struct ahci_pci_quirk {
46 pci_vendor_id_t vendor; /* Vendor ID */ 46 pci_vendor_id_t vendor; /* Vendor ID */
47 pci_product_id_t product; /* Product ID */ 47 pci_product_id_t product; /* Product ID */
48 int quirks; /* quirks; same as sc_ahci_quirks */ 48 int quirks; /* quirks; same as sc_ahci_quirks */
49}; 49};
50 50
51static const struct ahci_pci_quirk ahci_pci_quirks[] = { 51static const struct ahci_pci_quirk ahci_pci_quirks[] = {
52 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_SATA, 52 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_SATA,
53 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP }, 53 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP },
54 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_SATA2, 54 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_SATA2,
55 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP }, 55 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP },
56 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_SATA3, 56 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_SATA3,
57 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP }, 57 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP },
58 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_SATA4, 58 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_SATA4,
59 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP }, 59 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP },
60 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_1, 60 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_1,
61 AHCI_QUIRK_BADPMP }, 61 AHCI_QUIRK_BADPMP },
62 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_2, 62 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_2,
63 AHCI_QUIRK_BADPMP }, 63 AHCI_QUIRK_BADPMP },
64 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_3, 64 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_3,
65 AHCI_QUIRK_BADPMP }, 65 AHCI_QUIRK_BADPMP },
66 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_4, 66 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_AHCI_4,
67 AHCI_QUIRK_BADPMP }, 67 AHCI_QUIRK_BADPMP },
68 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_SATA, 68 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_SATA,
69 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP }, 69 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP },
70 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_SATA2, 70 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_SATA2,
71 AHCI_QUIRK_BADPMP }, 71 AHCI_QUIRK_BADPMP },
72 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_SATA3, 72 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_SATA3,
73 AHCI_QUIRK_BADPMP }, 73 AHCI_QUIRK_BADPMP },
74 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_SATA4, 74 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_SATA4,
75 AHCI_QUIRK_BADPMP }, 75 AHCI_QUIRK_BADPMP },
76 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_AHCI_1, 76 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_AHCI_1,
77 AHCI_QUIRK_BADPMP }, 77 AHCI_QUIRK_BADPMP },
78 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_AHCI_2, 78 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_AHCI_2,
79 AHCI_QUIRK_BADPMP }, 79 AHCI_QUIRK_BADPMP },
80 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_AHCI_3, 80 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_AHCI_3,
81 AHCI_QUIRK_BADPMP }, 81 AHCI_QUIRK_BADPMP },
82 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_AHCI_4, 82 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_AHCI_4,
83 AHCI_QUIRK_BADPMP }, 83 AHCI_QUIRK_BADPMP },
84 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_AHCI_5, 84 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_AHCI_5,
85 AHCI_QUIRK_BADPMP }, 85 AHCI_QUIRK_BADPMP },
86 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_AHCI_6, 86 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_AHCI_6,
87 AHCI_QUIRK_BADPMP }, 87 AHCI_QUIRK_BADPMP },
88 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_AHCI_7, 88 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_AHCI_7,
89 AHCI_QUIRK_BADPMP }, 89 AHCI_QUIRK_BADPMP },
90 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_AHCI_8, 90 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_AHCI_8,
91 AHCI_QUIRK_BADPMP }, 91 AHCI_QUIRK_BADPMP },
92 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_AHCI_1, 92 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_AHCI_1,
93 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP }, 93 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP },
94 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_AHCI_2, 94 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_AHCI_2,
95 AHCI_QUIRK_BADPMP }, 95 AHCI_QUIRK_BADPMP },
96 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_AHCI_3, 96 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_AHCI_3,
97 AHCI_QUIRK_BADPMP }, 97 AHCI_QUIRK_BADPMP },
98 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_AHCI_4, 98 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_AHCI_4,
99 AHCI_QUIRK_BADPMP }, 99 AHCI_QUIRK_BADPMP },
100 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_AHCI_5, 100 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_AHCI_5,
101 AHCI_QUIRK_BADPMP }, 101 AHCI_QUIRK_BADPMP },
102 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_AHCI_6, 102 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_AHCI_6,
103 AHCI_QUIRK_BADPMP }, 103 AHCI_QUIRK_BADPMP },
104 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_AHCI_7, 104 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_AHCI_7,
105 AHCI_QUIRK_BADPMP }, 105 AHCI_QUIRK_BADPMP },
106 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_AHCI_8, 106 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_AHCI_8,
107 AHCI_QUIRK_BADPMP }, 107 AHCI_QUIRK_BADPMP },
108 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_AHCI_9, 108 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_AHCI_9,
109 AHCI_QUIRK_BADPMP }, 109 AHCI_QUIRK_BADPMP },
110 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_AHCI_10, 110 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_AHCI_10,
111 AHCI_QUIRK_BADPMP }, 111 AHCI_QUIRK_BADPMP },
112 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_AHCI_11, 112 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_AHCI_11,
113 AHCI_QUIRK_BADPMP }, 113 AHCI_QUIRK_BADPMP },
114 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_AHCI_12, 114 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_AHCI_12,
115 AHCI_QUIRK_BADPMP }, 115 AHCI_QUIRK_BADPMP },
116 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_1, 116 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_1,
117 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP }, 117 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP },
118 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_2, 118 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_2,
119 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP }, 119 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP },
120 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_3, 120 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_3,
121 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP }, 121 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP },
122 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_4, 122 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_4,
123 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP }, 123 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP },
124 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_5, 124 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_5,
125 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP }, 125 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP },
126 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_6, 126 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_6,
127 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP }, 127 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP },
128 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_7, 128 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_7,
129 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP }, 129 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP },
130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_8, 130 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_8,
131 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP }, 131 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP },
132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_9, 132 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_9,
133 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP }, 133 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP },
134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_10, 134 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_10,
135 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP }, 135 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP },
136 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_11, 136 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_11,
137 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP }, 137 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP },
138 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_12, 138 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_AHCI_12,
139 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP }, 139 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP },
140 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_1, 140 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_1,
141 AHCI_QUIRK_BADPMP }, 141 AHCI_QUIRK_BADPMP },
142 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_2, 142 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_2,
143 AHCI_QUIRK_BADPMP }, 143 AHCI_QUIRK_BADPMP },
144 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_3, 144 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_3,
145 AHCI_QUIRK_BADPMP }, 145 AHCI_QUIRK_BADPMP },
146 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_4, 146 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_4,
147 AHCI_QUIRK_BADPMP }, 147 AHCI_QUIRK_BADPMP },
148 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_5, 148 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_5,
149 AHCI_QUIRK_BADPMP }, 149 AHCI_QUIRK_BADPMP },
150 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_6, 150 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_6,
151 AHCI_QUIRK_BADPMP }, 151 AHCI_QUIRK_BADPMP },
152 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_7, 152 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_7,
153 AHCI_QUIRK_BADPMP }, 153 AHCI_QUIRK_BADPMP },
154 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_8, 154 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_8,
155 AHCI_QUIRK_BADPMP }, 155 AHCI_QUIRK_BADPMP },
156 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_9, 156 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_9,
157 AHCI_QUIRK_BADPMP }, 157 AHCI_QUIRK_BADPMP },
158 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_10, 158 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_10,
159 AHCI_QUIRK_BADPMP }, 159 AHCI_QUIRK_BADPMP },
160 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_11, 160 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_11,
161 AHCI_QUIRK_BADPMP }, 161 AHCI_QUIRK_BADPMP },
162 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_12, 162 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_AHCI_12,
163 AHCI_QUIRK_BADPMP }, 163 AHCI_QUIRK_BADPMP },
164 { PCI_VENDOR_ALI, PCI_PRODUCT_ALI_M5288, 164 { PCI_VENDOR_ALI, PCI_PRODUCT_ALI_M5288,
165 AHCI_PCI_QUIRK_FORCE }, 165 AHCI_PCI_QUIRK_FORCE },
166 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_88SE6121, 166 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_88SE6121,
167 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP }, 167 AHCI_PCI_QUIRK_FORCE | AHCI_QUIRK_BADPMP },
168 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_88SE6145, 168 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_88SE6145,
169 AHCI_QUIRK_BADPMP }, 169 AHCI_QUIRK_BADPMP },
170 { PCI_VENDOR_MARVELL2, PCI_PRODUCT_MARVELL2_88SE91XX, 170 { PCI_VENDOR_MARVELL2, PCI_PRODUCT_MARVELL2_88SE91XX,
171 AHCI_PCI_QUIRK_FORCE }, 171 AHCI_PCI_QUIRK_FORCE },
172 /* ATI SB600 AHCI 64-bit DMA only works on some boards/BIOSes */ 172 /* ATI SB600 AHCI 64-bit DMA only works on some boards/BIOSes */
173 { PCI_VENDOR_ATI, PCI_PRODUCT_ATI_SB600_SATA_1, 173 { PCI_VENDOR_ATI, PCI_PRODUCT_ATI_SB600_SATA_1,
174 AHCI_PCI_QUIRK_BAD64 | AHCI_QUIRK_BADPMPRESET }, 174 AHCI_PCI_QUIRK_BAD64 | AHCI_QUIRK_BADPMPRESET },
175 { PCI_VENDOR_ATI, PCI_PRODUCT_ATI_SB700_SATA_AHCI, 175 { PCI_VENDOR_ATI, PCI_PRODUCT_ATI_SB700_SATA_AHCI,
176 AHCI_QUIRK_BADPMPRESET }, 176 AHCI_QUIRK_BADPMPRESET },
177 { PCI_VENDOR_ATI, PCI_PRODUCT_ATI_SB700_SATA_RAID, 177 { PCI_VENDOR_ATI, PCI_PRODUCT_ATI_SB700_SATA_RAID,
178 AHCI_QUIRK_BADPMPRESET }, 178 AHCI_QUIRK_BADPMPRESET },
179 { PCI_VENDOR_ATI, PCI_PRODUCT_ATI_SB700_SATA_RAID5, 179 { PCI_VENDOR_ATI, PCI_PRODUCT_ATI_SB700_SATA_RAID5,
180 AHCI_QUIRK_BADPMPRESET }, 180 AHCI_QUIRK_BADPMPRESET },
181 { PCI_VENDOR_ATI, PCI_PRODUCT_ATI_SB700_SATA_AHCI2, 181 { PCI_VENDOR_ATI, PCI_PRODUCT_ATI_SB700_SATA_AHCI2,
182 AHCI_QUIRK_BADPMPRESET }, 182 AHCI_QUIRK_BADPMPRESET },
183 { PCI_VENDOR_ATI, PCI_PRODUCT_ATI_SB700_SATA_STORAGE, 183 { PCI_VENDOR_ATI, PCI_PRODUCT_ATI_SB700_SATA_STORAGE,
184 AHCI_QUIRK_BADPMPRESET }, 184 AHCI_QUIRK_BADPMPRESET },
185 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT8237R_SATA, 185 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT8237R_SATA,
186 AHCI_QUIRK_BADPMP }, 186 AHCI_QUIRK_BADPMP },
187 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT8251_SATA, 187 { PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT8251_SATA,
188 AHCI_QUIRK_BADPMP }, 188 AHCI_QUIRK_BADPMP },
189 { PCI_VENDOR_ASMEDIA, PCI_PRODUCT_ASMEDIA_ASM1061_01, 189 { PCI_VENDOR_ASMEDIA, PCI_PRODUCT_ASMEDIA_ASM1061_01,
190 AHCI_PCI_QUIRK_FORCE }, 190 AHCI_PCI_QUIRK_FORCE },
191 { PCI_VENDOR_ASMEDIA, PCI_PRODUCT_ASMEDIA_ASM1061_02, 191 { PCI_VENDOR_ASMEDIA, PCI_PRODUCT_ASMEDIA_ASM1061_02,
192 AHCI_PCI_QUIRK_FORCE }, 192 AHCI_PCI_QUIRK_FORCE },
193 { PCI_VENDOR_ASMEDIA, PCI_PRODUCT_ASMEDIA_ASM1061_11, 193 { PCI_VENDOR_ASMEDIA, PCI_PRODUCT_ASMEDIA_ASM1061_11,
194 AHCI_PCI_QUIRK_FORCE }, 194 AHCI_PCI_QUIRK_FORCE },
195 { PCI_VENDOR_ASMEDIA, PCI_PRODUCT_ASMEDIA_ASM1061_12, 195 { PCI_VENDOR_ASMEDIA, PCI_PRODUCT_ASMEDIA_ASM1061_12,
196 AHCI_PCI_QUIRK_FORCE }, 196 AHCI_PCI_QUIRK_FORCE },
197}; 197};
198 198
199struct ahci_pci_softc { 199struct ahci_pci_softc {
200 struct ahci_softc ah_sc; 200 struct ahci_softc ah_sc;
201 pci_chipset_tag_t sc_pc; 201 pci_chipset_tag_t sc_pc;
202 pcitag_t sc_pcitag; 202 pcitag_t sc_pcitag;
203 void * sc_ih; 203 void * sc_ih;
204}; 204};
205 205
206static int ahci_pci_has_quirk(pci_vendor_id_t, pci_product_id_t); 206static int ahci_pci_has_quirk(pci_vendor_id_t, pci_product_id_t);
207static int ahci_pci_match(device_t, cfdata_t, void *); 207static int ahci_pci_match(device_t, cfdata_t, void *);
208static void ahci_pci_attach(device_t, device_t, void *); 208static void ahci_pci_attach(device_t, device_t, void *);
209static int ahci_pci_detach(device_t, int); 209static int ahci_pci_detach(device_t, int);
210static bool ahci_pci_resume(device_t, const pmf_qual_t *); 210static bool ahci_pci_resume(device_t, const pmf_qual_t *);
211 211
212 212
213CFATTACH_DECL_NEW(ahcisata_pci, sizeof(struct ahci_pci_softc), 213CFATTACH_DECL_NEW(ahcisata_pci, sizeof(struct ahci_pci_softc),
214 ahci_pci_match, ahci_pci_attach, ahci_pci_detach, NULL); 214 ahci_pci_match, ahci_pci_attach, ahci_pci_detach, NULL);
215 215
216static int 216static int
217ahci_pci_has_quirk(pci_vendor_id_t vendor, pci_product_id_t product) 217ahci_pci_has_quirk(pci_vendor_id_t vendor, pci_product_id_t product)
218{ 218{
219 int i; 219 int i;
220 220
221 for (i = 0; i < __arraycount(ahci_pci_quirks); i++) 221 for (i = 0; i < __arraycount(ahci_pci_quirks); i++)
222 if (vendor == ahci_pci_quirks[i].vendor && 222 if (vendor == ahci_pci_quirks[i].vendor &&
223 product == ahci_pci_quirks[i].product) 223 product == ahci_pci_quirks[i].product)
224 return ahci_pci_quirks[i].quirks; 224 return ahci_pci_quirks[i].quirks;
225 return 0; 225 return 0;
226} 226}
227 227
228static int 228static int
229ahci_pci_match(device_t parent, cfdata_t match, void *aux) 229ahci_pci_match(device_t parent, cfdata_t match, void *aux)
230{ 230{
231 struct pci_attach_args *pa = aux; 231 struct pci_attach_args *pa = aux;
232 bus_space_tag_t regt; 232 bus_space_tag_t regt;
233 bus_space_handle_t regh; 233 bus_space_handle_t regh;
234 bus_size_t size; 234 bus_size_t size;
235 int ret = 0; 235 int ret = 0;
236 bool force; 236 bool force;
237 237
238 force = ((ahci_pci_has_quirk( PCI_VENDOR(pa->pa_id), 238 force = ((ahci_pci_has_quirk( PCI_VENDOR(pa->pa_id),
239 PCI_PRODUCT(pa->pa_id)) & AHCI_PCI_QUIRK_FORCE) != 0); 239 PCI_PRODUCT(pa->pa_id)) & AHCI_PCI_QUIRK_FORCE) != 0);
240 240
241 /* if wrong class and not forced by quirks, don't match */ 241 /* if wrong class and not forced by quirks, don't match */
242 if ((PCI_CLASS(pa->pa_class) != PCI_CLASS_MASS_STORAGE || 242 if ((PCI_CLASS(pa->pa_class) != PCI_CLASS_MASS_STORAGE ||
243 ((PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_SATA || 243 ((PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_SATA ||
244 PCI_INTERFACE(pa->pa_class) != PCI_INTERFACE_SATA_AHCI) && 244 PCI_INTERFACE(pa->pa_class) != PCI_INTERFACE_SATA_AHCI) &&
245 PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_RAID)) && 245 PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_RAID)) &&
246 (force == false)) 246 (force == false))
247 return 0; 247 return 0;
248 248
249 if (pci_mapreg_map(pa, AHCI_PCI_ABAR, 249 if (pci_mapreg_map(pa, AHCI_PCI_ABAR,
250 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0, 250 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
251 &regt, &regh, NULL, &size) != 0) 251 &regt, &regh, NULL, &size) != 0)
252 return 0; 252 return 0;
253 253
254 if ((PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_SATA && 254 if ((PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_SATA &&
255 PCI_INTERFACE(pa->pa_class) == PCI_INTERFACE_SATA_AHCI) || 255 PCI_INTERFACE(pa->pa_class) == PCI_INTERFACE_SATA_AHCI) ||
256 (bus_space_read_4(regt, regh, AHCI_GHC) & AHCI_GHC_AE) || 256 (bus_space_read_4(regt, regh, AHCI_GHC) & AHCI_GHC_AE) ||
257 (force == true)) 257 (force == true))
258 ret = 3; 258 ret = 3;
259 259
260 bus_space_unmap(regt, regh, size); 260 bus_space_unmap(regt, regh, size);
261 return ret; 261 return ret;
262} 262}
263 263
264static void 264static void
265ahci_pci_attach(device_t parent, device_t self, void *aux) 265ahci_pci_attach(device_t parent, device_t self, void *aux)
266{ 266{
267 struct pci_attach_args *pa = aux; 267 struct pci_attach_args *pa = aux;
268 struct ahci_pci_softc *psc = device_private(self); 268 struct ahci_pci_softc *psc = device_private(self);
269 struct ahci_softc *sc = &psc->ah_sc; 269 struct ahci_softc *sc = &psc->ah_sc;
270 const char *intrstr; 270 const char *intrstr;
271 bool ahci_cap_64bit; 271 bool ahci_cap_64bit;
272 bool ahci_bad_64bit; 272 bool ahci_bad_64bit;
273 pci_intr_handle_t intrhandle; 273 pci_intr_handle_t intrhandle;
274 char intrbuf[PCI_INTRSTR_LEN]; 274 char intrbuf[PCI_INTRSTR_LEN];
275 275
276 sc->sc_atac.atac_dev = self; 276 sc->sc_atac.atac_dev = self;
277 277
278 if (pci_mapreg_map(pa, AHCI_PCI_ABAR, 278 if (pci_mapreg_map(pa, AHCI_PCI_ABAR,
279 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0, 279 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0,
280 &sc->sc_ahcit, &sc->sc_ahcih, NULL, &sc->sc_ahcis) != 0) { 280 &sc->sc_ahcit, &sc->sc_ahcih, NULL, &sc->sc_ahcis) != 0) {
281 aprint_error_dev(self, "can't map ahci registers\n"); 281 aprint_error_dev(self, "can't map ahci registers\n");
282 return; 282 return;
283 } 283 }
284 psc->sc_pc = pa->pa_pc; 284 psc->sc_pc = pa->pa_pc;
285 psc->sc_pcitag = pa->pa_tag; 285 psc->sc_pcitag = pa->pa_tag;
286 286
287 pci_aprint_devinfo(pa, "AHCI disk controller"); 287 pci_aprint_devinfo(pa, "AHCI disk controller");
288  288
289 if (pci_intr_map(pa, &intrhandle) != 0) { 289 if (pci_intr_map(pa, &intrhandle) != 0) {
290 aprint_error_dev(self, "couldn't map interrupt\n"); 290 aprint_error_dev(self, "couldn't map interrupt\n");
291 return; 291 return;
292 } 292 }
293 intrstr = pci_intr_string(pa->pa_pc, intrhandle, 293 intrstr = pci_intr_string(pa->pa_pc, intrhandle,
294 intrbuf, sizeof(intrbuf)); 294 intrbuf, sizeof(intrbuf));
295 psc->sc_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_BIO, ahci_intr, sc); 295 psc->sc_ih = pci_intr_establish_xname(pa->pa_pc, intrhandle, IPL_BIO,
 296 ahci_intr, sc, device_xname(sc->sc_atac.atac_dev));
296 if (psc->sc_ih == NULL) { 297 if (psc->sc_ih == NULL) {
297 aprint_error_dev(self, "couldn't establish interrupt\n"); 298 aprint_error_dev(self, "couldn't establish interrupt\n");
298 return; 299 return;
299 } 300 }
300 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 301 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
301 302
302 sc->sc_dmat = pa->pa_dmat; 303 sc->sc_dmat = pa->pa_dmat;
303 304
304 sc->sc_ahci_quirks = ahci_pci_has_quirk(PCI_VENDOR(pa->pa_id), 305 sc->sc_ahci_quirks = ahci_pci_has_quirk(PCI_VENDOR(pa->pa_id),
305 PCI_PRODUCT(pa->pa_id)); 306 PCI_PRODUCT(pa->pa_id));
306 307
307 ahci_cap_64bit = (AHCI_READ(sc, AHCI_CAP) & AHCI_CAP_64BIT) != 0; 308 ahci_cap_64bit = (AHCI_READ(sc, AHCI_CAP) & AHCI_CAP_64BIT) != 0;
308 ahci_bad_64bit = ((sc->sc_ahci_quirks & AHCI_PCI_QUIRK_BAD64) != 0); 309 ahci_bad_64bit = ((sc->sc_ahci_quirks & AHCI_PCI_QUIRK_BAD64) != 0);
309 310
310 if (pci_dma64_available(pa) && ahci_cap_64bit) { 311 if (pci_dma64_available(pa) && ahci_cap_64bit) {
311 if (!ahci_bad_64bit) 312 if (!ahci_bad_64bit)
312 sc->sc_dmat = pa->pa_dmat64; 313 sc->sc_dmat = pa->pa_dmat64;
313 aprint_verbose_dev(self, "64-bit DMA%s\n", 314 aprint_verbose_dev(self, "64-bit DMA%s\n",
314 (sc->sc_dmat == pa->pa_dmat) ? " unavailable" : ""); 315 (sc->sc_dmat == pa->pa_dmat) ? " unavailable" : "");
315 } 316 }
316 317
317 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_RAID) { 318 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_RAID) {
318 AHCIDEBUG_PRINT(("%s: RAID mode\n", AHCINAME(sc)), DEBUG_PROBE); 319 AHCIDEBUG_PRINT(("%s: RAID mode\n", AHCINAME(sc)), DEBUG_PROBE);
319 sc->sc_atac_capflags = ATAC_CAP_RAID; 320 sc->sc_atac_capflags = ATAC_CAP_RAID;
320 } else { 321 } else {
321 AHCIDEBUG_PRINT(("%s: SATA mode\n", AHCINAME(sc)), DEBUG_PROBE); 322 AHCIDEBUG_PRINT(("%s: SATA mode\n", AHCINAME(sc)), DEBUG_PROBE);
322 } 323 }
323 324
324 ahci_attach(sc); 325 ahci_attach(sc);
325 326
326 if (!pmf_device_register(self, NULL, ahci_pci_resume)) 327 if (!pmf_device_register(self, NULL, ahci_pci_resume))
327 aprint_error_dev(self, "couldn't establish power handler\n"); 328 aprint_error_dev(self, "couldn't establish power handler\n");
328} 329}
329 330
330static int 331static int
331ahci_pci_detach(device_t dv, int flags) 332ahci_pci_detach(device_t dv, int flags)
332{ 333{
333 struct ahci_pci_softc *psc; 334 struct ahci_pci_softc *psc;
334 struct ahci_softc *sc; 335 struct ahci_softc *sc;
335 int rv; 336 int rv;
336 337
337 psc = device_private(dv); 338 psc = device_private(dv);
338 sc = &psc->ah_sc; 339 sc = &psc->ah_sc;
339 340
340 if ((rv = ahci_detach(sc, flags))) 341 if ((rv = ahci_detach(sc, flags)))
341 return rv; 342 return rv;
342 343
343 pmf_device_deregister(dv); 344 pmf_device_deregister(dv);
344 345
345 if (psc->sc_ih != NULL) 346 if (psc->sc_ih != NULL)
346 pci_intr_disestablish(psc->sc_pc, psc->sc_ih); 347 pci_intr_disestablish(psc->sc_pc, psc->sc_ih);
347 348
348 bus_space_unmap(sc->sc_ahcit, sc->sc_ahcih, sc->sc_ahcis); 349 bus_space_unmap(sc->sc_ahcit, sc->sc_ahcih, sc->sc_ahcis);
349 350
350 return 0; 351 return 0;
351} 352}
352 353
353static bool 354static bool
354ahci_pci_resume(device_t dv, const pmf_qual_t *qual) 355ahci_pci_resume(device_t dv, const pmf_qual_t *qual)
355{ 356{
356 struct ahci_pci_softc *psc = device_private(dv); 357 struct ahci_pci_softc *psc = device_private(dv);
357 struct ahci_softc *sc = &psc->ah_sc; 358 struct ahci_softc *sc = &psc->ah_sc;
358 int s; 359 int s;
359 360
360 s = splbio(); 361 s = splbio();
361 ahci_resume(sc); 362 ahci_resume(sc);
362 splx(s); 363 splx(s);
363 364
364 return true; 365 return true;
365} 366}

cvs diff -r1.40 -r1.41 src/sys/dev/pci/bha_pci.c (switch to unified diff)

--- src/sys/dev/pci/bha_pci.c 2014/10/18 08:33:28 1.40
+++ src/sys/dev/pci/bha_pci.c 2016/10/13 17:11:09 1.41
@@ -1,152 +1,153 @@ @@ -1,152 +1,153 @@
1/* $NetBSD: bha_pci.c,v 1.40 2014/10/18 08:33:28 snj Exp $ */ 1/* $NetBSD: bha_pci.c,v 1.41 2016/10/13 17:11:09 jdolecek Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum. 8 * by Charles M. Hannum.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: bha_pci.c,v 1.40 2014/10/18 08:33:28 snj Exp $"); 33__KERNEL_RCSID(0, "$NetBSD: bha_pci.c,v 1.41 2016/10/13 17:11:09 jdolecek Exp $");
34 34
35#include <sys/param.h> 35#include <sys/param.h>
36#include <sys/systm.h> 36#include <sys/systm.h>
37#include <sys/device.h> 37#include <sys/device.h>
38 38
39#include <sys/bus.h> 39#include <sys/bus.h>
40#include <sys/intr.h> 40#include <sys/intr.h>
41 41
42#include <dev/scsipi/scsipi_all.h> 42#include <dev/scsipi/scsipi_all.h>
43#include <dev/scsipi/scsiconf.h> 43#include <dev/scsipi/scsiconf.h>
44 44
45#include <dev/pci/pcivar.h> 45#include <dev/pci/pcivar.h>
46#include <dev/pci/pcidevs.h> 46#include <dev/pci/pcidevs.h>
47 47
48#include <dev/ic/bhareg.h> 48#include <dev/ic/bhareg.h>
49#include <dev/ic/bhavar.h> 49#include <dev/ic/bhavar.h>
50 50
51#define PCI_CBIO 0x10 51#define PCI_CBIO 0x10
52 52
53/* 53/*
54 * Check the slots looking for a board we recognise 54 * Check the slots looking for a board we recognise
55 * If we find one, note its address (slot) and call 55 * If we find one, note its address (slot) and call
56 * the actual probe routine to check it out. 56 * the actual probe routine to check it out.
57 */ 57 */
58static int 58static int
59bha_pci_match(device_t parent, cfdata_t match, void *aux) 59bha_pci_match(device_t parent, cfdata_t match, void *aux)
60{ 60{
61 struct pci_attach_args *pa = aux; 61 struct pci_attach_args *pa = aux;
62 bus_space_tag_t iot; 62 bus_space_tag_t iot;
63 bus_space_handle_t ioh; 63 bus_space_handle_t ioh;
64 bus_size_t iosize; 64 bus_size_t iosize;
65 int rv; 65 int rv;
66 66
67 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_BUSLOGIC) 67 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_BUSLOGIC)
68 return (0); 68 return (0);
69 69
70 if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BUSLOGIC_MULTIMASTER_NC && 70 if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BUSLOGIC_MULTIMASTER_NC &&
71 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BUSLOGIC_MULTIMASTER) 71 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BUSLOGIC_MULTIMASTER)
72 return (0); 72 return (0);
73 73
74 if (pci_mapreg_map(pa, PCI_CBIO, PCI_MAPREG_TYPE_IO, 0, &iot, &ioh, 74 if (pci_mapreg_map(pa, PCI_CBIO, PCI_MAPREG_TYPE_IO, 0, &iot, &ioh,
75 NULL, &iosize)) 75 NULL, &iosize))
76 return (0); 76 return (0);
77 77
78 rv = bha_find(iot, ioh); 78 rv = bha_find(iot, ioh);
79 79
80 bus_space_unmap(iot, ioh, iosize); 80 bus_space_unmap(iot, ioh, iosize);
81 81
82 return (rv); 82 return (rv);
83} 83}
84 84
85/* 85/*
86 * Attach all the sub-devices we can find 86 * Attach all the sub-devices we can find
87 */ 87 */
88static void 88static void
89bha_pci_attach(device_t parent, device_t self, void *aux) 89bha_pci_attach(device_t parent, device_t self, void *aux)
90{ 90{
91 struct pci_attach_args *pa = aux; 91 struct pci_attach_args *pa = aux;
92 struct bha_softc *sc = device_private(self); 92 struct bha_softc *sc = device_private(self);
93 bus_space_tag_t iot; 93 bus_space_tag_t iot;
94 bus_space_handle_t ioh; 94 bus_space_handle_t ioh;
95 pci_chipset_tag_t pc = pa->pa_pc; 95 pci_chipset_tag_t pc = pa->pa_pc;
96 pci_intr_handle_t ih; 96 pci_intr_handle_t ih;
97 pcireg_t csr; 97 pcireg_t csr;
98 const char *model, *intrstr; 98 const char *model, *intrstr;
99 char intrbuf[PCI_INTRSTR_LEN]; 99 char intrbuf[PCI_INTRSTR_LEN];
100 100
101 sc->sc_dev = self; 101 sc->sc_dev = self;
102 102
103 aprint_naive(": SCSI controller\n"); 103 aprint_naive(": SCSI controller\n");
104 104
105 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BUSLOGIC_MULTIMASTER_NC) 105 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BUSLOGIC_MULTIMASTER_NC)
106 model = "BusLogic 9xxC SCSI"; 106 model = "BusLogic 9xxC SCSI";
107 else if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BUSLOGIC_MULTIMASTER) 107 else if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BUSLOGIC_MULTIMASTER)
108 model = "BusLogic 9xxC SCSI"; 108 model = "BusLogic 9xxC SCSI";
109 else 109 else
110 model = "unknown model!"; 110 model = "unknown model!";
111 aprint_normal(": %s\n", model); 111 aprint_normal(": %s\n", model);
112 112
113 if (pci_mapreg_map(pa, PCI_CBIO, PCI_MAPREG_TYPE_IO, 0, &iot, &ioh, 113 if (pci_mapreg_map(pa, PCI_CBIO, PCI_MAPREG_TYPE_IO, 0, &iot, &ioh,
114 NULL, NULL)) { 114 NULL, NULL)) {
115 aprint_error_dev(sc->sc_dev, "unable to map device registers\n"); 115 aprint_error_dev(sc->sc_dev, "unable to map device registers\n");
116 return; 116 return;
117 } 117 }
118 118
119 sc->sc_iot = iot; 119 sc->sc_iot = iot;
120 sc->sc_ioh = ioh; 120 sc->sc_ioh = ioh;
121 sc->sc_dmat = pa->pa_dmat; 121 sc->sc_dmat = pa->pa_dmat;
122 if (!bha_find(iot, ioh)) 122 if (!bha_find(iot, ioh))
123 panic("bha_pci_attach: bha_find failed"); 123 panic("bha_pci_attach: bha_find failed");
124 124
125 sc->sc_dmaflags = 0; 125 sc->sc_dmaflags = 0;
126 126
127 csr = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 127 csr = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
128 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 128 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
129 csr | PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_IO_ENABLE); 129 csr | PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_IO_ENABLE);
130 130
131 if (pci_intr_map(pa, &ih)) { 131 if (pci_intr_map(pa, &ih)) {
132 aprint_error_dev(sc->sc_dev, "couldn't map interrupt\n"); 132 aprint_error_dev(sc->sc_dev, "couldn't map interrupt\n");
133 return; 133 return;
134 } 134 }
135 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); 135 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
136 sc->sc_ih = pci_intr_establish(pc, ih, IPL_BIO, bha_intr, sc); 136 sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_BIO, bha_intr, sc,
 137 device_xname(sc->sc_dev));
137 if (sc->sc_ih == NULL) { 138 if (sc->sc_ih == NULL) {
138 aprint_error_dev(sc->sc_dev, "couldn't establish interrupt"); 139 aprint_error_dev(sc->sc_dev, "couldn't establish interrupt");
139 if (intrstr != NULL) 140 if (intrstr != NULL)
140 aprint_error(" at %s", intrstr); 141 aprint_error(" at %s", intrstr);
141 aprint_error("\n"); 142 aprint_error("\n");
142 return; 143 return;
143 } 144 }
144 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); 145 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
145 146
146 bha_attach(sc); 147 bha_attach(sc);
147 148
148 bha_disable_isacompat(sc); 149 bha_disable_isacompat(sc);
149} 150}
150 151
151CFATTACH_DECL_NEW(bha_pci, sizeof(struct bha_softc), 152CFATTACH_DECL_NEW(bha_pci, sizeof(struct bha_softc),
152 bha_pci_match, bha_pci_attach, NULL, NULL); 153 bha_pci_match, bha_pci_attach, NULL, NULL);

cvs diff -r1.61 -r1.62 src/sys/dev/pci/pciide_common.c (switch to unified diff)

--- src/sys/dev/pci/pciide_common.c 2016/07/07 06:55:41 1.61
+++ src/sys/dev/pci/pciide_common.c 2016/10/13 17:11:09 1.62
@@ -1,1160 +1,1161 @@ @@ -1,1160 +1,1161 @@
1/* $NetBSD: pciide_common.c,v 1.61 2016/07/07 06:55:41 msaitoh Exp $ */ 1/* $NetBSD: pciide_common.c,v 1.62 2016/10/13 17:11:09 jdolecek Exp $ */
2 2
3 3
4/* 4/*
5 * Copyright (c) 1999, 2000, 2001, 2003 Manuel Bouyer. 5 * Copyright (c) 1999, 2000, 2001, 2003 Manuel Bouyer.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * 26 *
27 */ 27 */
28 28
29 29
30/* 30/*
31 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved. 31 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
32 * 32 *
33 * Redistribution and use in source and binary forms, with or without 33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions 34 * modification, are permitted provided that the following conditions
35 * are met: 35 * are met:
36 * 1. Redistributions of source code must retain the above copyright 36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer. 37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright 38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the 39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution. 40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software 41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement: 42 * must display the following acknowledgement:
43 * This product includes software developed by Christopher G. Demetriou 43 * This product includes software developed by Christopher G. Demetriou
44 * for the NetBSD Project. 44 * for the NetBSD Project.
45 * 4. The name of the author may not be used to endorse or promote products 45 * 4. The name of the author may not be used to endorse or promote products
46 * derived from this software without specific prior written permission 46 * derived from this software without specific prior written permission
47 * 47 *
48 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 48 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
49 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 49 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
50 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 50 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
51 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 51 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
52 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 52 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
53 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 53 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
54 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 54 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
55 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 55 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
56 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 56 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
57 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 */ 58 */
59 59
60/* 60/*
61 * PCI IDE controller driver. 61 * PCI IDE controller driver.
62 * 62 *
63 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD 63 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
64 * sys/dev/pci/ppb.c, revision 1.16). 64 * sys/dev/pci/ppb.c, revision 1.16).
65 * 65 *
66 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and 66 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
67 * "Programming Interface for Bus Master IDE Controller, Revision 1.0 67 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
68 * 5/16/94" from the PCI SIG. 68 * 5/16/94" from the PCI SIG.
69 * 69 *
70 */ 70 */
71 71
72#include <sys/cdefs.h> 72#include <sys/cdefs.h>
73__KERNEL_RCSID(0, "$NetBSD: pciide_common.c,v 1.61 2016/07/07 06:55:41 msaitoh Exp $"); 73__KERNEL_RCSID(0, "$NetBSD: pciide_common.c,v 1.62 2016/10/13 17:11:09 jdolecek Exp $");
74 74
75#include <sys/param.h> 75#include <sys/param.h>
76#include <sys/malloc.h> 76#include <sys/malloc.h>
77 77
78#include <dev/pci/pcireg.h> 78#include <dev/pci/pcireg.h>
79#include <dev/pci/pcivar.h> 79#include <dev/pci/pcivar.h>
80#include <dev/pci/pcidevs.h> 80#include <dev/pci/pcidevs.h>
81#include <dev/pci/pciidereg.h> 81#include <dev/pci/pciidereg.h>
82#include <dev/pci/pciidevar.h> 82#include <dev/pci/pciidevar.h>
83 83
84#include <dev/ic/wdcreg.h> 84#include <dev/ic/wdcreg.h>
85 85
86#ifdef ATADEBUG 86#ifdef ATADEBUG
87int atadebug_pciide_mask = 0; 87int atadebug_pciide_mask = 0;
88#endif 88#endif
89 89
90#if NATA_DMA 90#if NATA_DMA
91static const char dmaerrfmt[] = 91static const char dmaerrfmt[] =
92 "%s:%d: unable to %s table DMA map for drive %d, error=%d\n"; 92 "%s:%d: unable to %s table DMA map for drive %d, error=%d\n";
93#endif 93#endif
94 94
95/* Default product description for devices not known from this controller */ 95/* Default product description for devices not known from this controller */
96const struct pciide_product_desc default_product_desc = { 96const struct pciide_product_desc default_product_desc = {
97 0, 97 0,
98 0, 98 0,
99 "Generic PCI IDE controller", 99 "Generic PCI IDE controller",
100 default_chip_map, 100 default_chip_map,
101}; 101};
102 102
103const struct pciide_product_desc * 103const struct pciide_product_desc *
104pciide_lookup_product(pcireg_t id, const struct pciide_product_desc *pp) 104pciide_lookup_product(pcireg_t id, const struct pciide_product_desc *pp)
105{ 105{
106 for (; pp->chip_map != NULL; pp++) 106 for (; pp->chip_map != NULL; pp++)
107 if (PCI_PRODUCT(id) == pp->ide_product) 107 if (PCI_PRODUCT(id) == pp->ide_product)
108 break; 108 break;
109 109
110 if (pp->chip_map == NULL) 110 if (pp->chip_map == NULL)
111 return NULL; 111 return NULL;
112 return pp; 112 return pp;
113} 113}
114 114
115void 115void
116pciide_common_attach(struct pciide_softc *sc, const struct pci_attach_args *pa, 116pciide_common_attach(struct pciide_softc *sc, const struct pci_attach_args *pa,
117 const struct pciide_product_desc *pp) 117 const struct pciide_product_desc *pp)
118{ 118{
119 pci_chipset_tag_t pc = pa->pa_pc; 119 pci_chipset_tag_t pc = pa->pa_pc;
120 pcitag_t tag = pa->pa_tag; 120 pcitag_t tag = pa->pa_tag;
121#if NATA_DMA 121#if NATA_DMA
122 pcireg_t csr; 122 pcireg_t csr;
123#endif 123#endif
124 const char *displaydev = NULL; 124 const char *displaydev = NULL;
125 int dontprint = 0; 125 int dontprint = 0;
126 126
127 sc->sc_pci_id = pa->pa_id; 127 sc->sc_pci_id = pa->pa_id;
128 if (pp == NULL) { 128 if (pp == NULL) {
129 /* should only happen for generic pciide devices */ 129 /* should only happen for generic pciide devices */
130 sc->sc_pp = &default_product_desc; 130 sc->sc_pp = &default_product_desc;
131 } else { 131 } else {
132 sc->sc_pp = pp; 132 sc->sc_pp = pp;
133 /* if ide_name == NULL, printf is done in chip-specific map */ 133 /* if ide_name == NULL, printf is done in chip-specific map */
134 if (pp->ide_name) 134 if (pp->ide_name)
135 displaydev = pp->ide_name; 135 displaydev = pp->ide_name;
136 else 136 else
137 dontprint = 1; 137 dontprint = 1;
138 } 138 }
139 139
140 if (dontprint) { 140 if (dontprint) {
141 aprint_naive("disk controller\n"); 141 aprint_naive("disk controller\n");
142 aprint_normal("\n"); /* ??? */ 142 aprint_normal("\n"); /* ??? */
143 } else 143 } else
144 pci_aprint_devinfo_fancy(pa, "disk controller", displaydev, 1); 144 pci_aprint_devinfo_fancy(pa, "disk controller", displaydev, 1);
145 145
146 sc->sc_pc = pa->pa_pc; 146 sc->sc_pc = pa->pa_pc;
147 sc->sc_tag = pa->pa_tag; 147 sc->sc_tag = pa->pa_tag;
148 148
149#if NATA_DMA 149#if NATA_DMA
150 /* Set up DMA defaults; these might be adjusted by chip_map. */ 150 /* Set up DMA defaults; these might be adjusted by chip_map. */
151 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX; 151 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX;
152 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN; 152 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN;
153#endif 153#endif
154 154
155#ifdef ATADEBUG 155#ifdef ATADEBUG
156 if (atadebug_pciide_mask & DEBUG_PROBE) 156 if (atadebug_pciide_mask & DEBUG_PROBE)
157 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL); 157 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
158#endif 158#endif
159 sc->sc_pp->chip_map(sc, pa); 159 sc->sc_pp->chip_map(sc, pa);
160 160
161#if NATA_DMA 161#if NATA_DMA
162 if (sc->sc_dma_ok) { 162 if (sc->sc_dma_ok) {
163 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 163 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
164 csr |= PCI_COMMAND_MASTER_ENABLE; 164 csr |= PCI_COMMAND_MASTER_ENABLE;
165 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr); 165 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
166 } 166 }
167#endif 167#endif
168 ATADEBUG_PRINT(("pciide: command/status register=%x\n", 168 ATADEBUG_PRINT(("pciide: command/status register=%x\n",
169 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE); 169 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
170} 170}
171 171
172int 172int
173pciide_common_detach(struct pciide_softc *sc, int flags) 173pciide_common_detach(struct pciide_softc *sc, int flags)
174{ 174{
175 struct pciide_channel *cp; 175 struct pciide_channel *cp;
176 struct ata_channel *wdc_cp; 176 struct ata_channel *wdc_cp;
177 struct wdc_regs *wdr; 177 struct wdc_regs *wdr;
178 int channel, drive; 178 int channel, drive;
179 int rv; 179 int rv;
180 180
181 rv = wdcdetach(sc->sc_wdcdev.sc_atac.atac_dev, flags); 181 rv = wdcdetach(sc->sc_wdcdev.sc_atac.atac_dev, flags);
182 if (rv) 182 if (rv)
183 return rv; 183 return rv;
184 184
185 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; 185 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
186 channel++) { 186 channel++) {
187 cp = &sc->pciide_channels[channel]; 187 cp = &sc->pciide_channels[channel];
188 wdc_cp = &cp->ata_channel; 188 wdc_cp = &cp->ata_channel;
189 wdr = CHAN_TO_WDC_REGS(wdc_cp); 189 wdr = CHAN_TO_WDC_REGS(wdc_cp);
190 190
191 if (wdc_cp->ch_flags & ATACH_DISABLED) 191 if (wdc_cp->ch_flags & ATACH_DISABLED)
192 continue; 192 continue;
193 193
194 if (wdr->cmd_ios != 0) 194 if (wdr->cmd_ios != 0)
195 bus_space_unmap(wdr->cmd_iot, 195 bus_space_unmap(wdr->cmd_iot,
196 wdr->cmd_baseioh, wdr->cmd_ios); 196 wdr->cmd_baseioh, wdr->cmd_ios);
197 if (cp->compat != 0) { 197 if (cp->compat != 0) {
198 if (wdr->ctl_ios != 0) 198 if (wdr->ctl_ios != 0)
199 bus_space_unmap(wdr->ctl_iot, 199 bus_space_unmap(wdr->ctl_iot,
200 wdr->ctl_ioh, wdr->ctl_ios); 200 wdr->ctl_ioh, wdr->ctl_ios);
201 } else { 201 } else {
202 if (cp->ctl_ios != 0) 202 if (cp->ctl_ios != 0)
203 bus_space_unmap(wdr->ctl_iot, 203 bus_space_unmap(wdr->ctl_iot,
204 cp->ctl_baseioh, cp->ctl_ios); 204 cp->ctl_baseioh, cp->ctl_ios);
205 } 205 }
206 206
207 for (drive = 0; drive < sc->sc_wdcdev.wdc_maxdrives; drive++) { 207 for (drive = 0; drive < sc->sc_wdcdev.wdc_maxdrives; drive++) {
208#if NATA_DMA 208#if NATA_DMA
209 pciide_dma_table_teardown(sc, channel, drive); 209 pciide_dma_table_teardown(sc, channel, drive);
210#endif 210#endif
211 } 211 }
212 212
213 free(cp->ata_channel.ch_queue, M_DEVBUF); 213 free(cp->ata_channel.ch_queue, M_DEVBUF);
214 cp->ata_channel.atabus = NULL; 214 cp->ata_channel.atabus = NULL;
215 } 215 }
216 216
217#if NATA_DMA 217#if NATA_DMA
218 if (sc->sc_dma_ios != 0) 218 if (sc->sc_dma_ios != 0)
219 bus_space_unmap(sc->sc_dma_iot, sc->sc_dma_ioh, sc->sc_dma_ios); 219 bus_space_unmap(sc->sc_dma_iot, sc->sc_dma_ioh, sc->sc_dma_ios);
220 if (sc->sc_ba5_ss != 0) 220 if (sc->sc_ba5_ss != 0)
221 bus_space_unmap(sc->sc_ba5_st, sc->sc_ba5_sh, sc->sc_ba5_ss); 221 bus_space_unmap(sc->sc_ba5_st, sc->sc_ba5_sh, sc->sc_ba5_ss);
222#endif 222#endif
223 223
224 return 0; 224 return 0;
225} 225}
226 226
227int 227int
228pciide_detach(device_t self, int flags) 228pciide_detach(device_t self, int flags)
229{ 229{
230 struct pciide_softc *sc = device_private(self); 230 struct pciide_softc *sc = device_private(self);
231 struct pciide_channel *cp; 231 struct pciide_channel *cp;
232 int channel; 232 int channel;
233#ifndef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_DISESTABLISH 233#ifndef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_DISESTABLISH
234 bool has_compat_chan; 234 bool has_compat_chan;
235 235
236 has_compat_chan = false; 236 has_compat_chan = false;
237 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; 237 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
238 channel++) { 238 channel++) {
239 cp = &sc->pciide_channels[channel]; 239 cp = &sc->pciide_channels[channel];
240 if (cp->compat != 0) { 240 if (cp->compat != 0) {
241 has_compat_chan = true; 241 has_compat_chan = true;
242 } 242 }
243 } 243 }
244 244
245 if (has_compat_chan != false) 245 if (has_compat_chan != false)
246 return EBUSY; 246 return EBUSY;
247#endif 247#endif
248 248
249 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; 249 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
250 channel++) { 250 channel++) {
251 cp = &sc->pciide_channels[channel]; 251 cp = &sc->pciide_channels[channel];
252 if (cp->compat != 0) 252 if (cp->compat != 0)
253 if (cp->ih != NULL) { 253 if (cp->ih != NULL) {
254 pciide_unmap_compat_intr(sc->sc_pc, cp, channel); 254 pciide_unmap_compat_intr(sc->sc_pc, cp, channel);
255 cp->ih = NULL; 255 cp->ih = NULL;
256 } 256 }
257 } 257 }
258 258
259 if (sc->sc_pci_ih != NULL) { 259 if (sc->sc_pci_ih != NULL) {
260 pci_intr_disestablish(sc->sc_pc, sc->sc_pci_ih); 260 pci_intr_disestablish(sc->sc_pc, sc->sc_pci_ih);
261 sc->sc_pci_ih = NULL; 261 sc->sc_pci_ih = NULL;
262 } 262 }
263 263
264 return pciide_common_detach(sc, flags); 264 return pciide_common_detach(sc, flags);
265} 265}
266 266
267/* tell whether the chip is enabled or not */ 267/* tell whether the chip is enabled or not */
268int 268int
269pciide_chipen(struct pciide_softc *sc, const struct pci_attach_args *pa) 269pciide_chipen(struct pciide_softc *sc, const struct pci_attach_args *pa)
270{ 270{
271 pcireg_t csr; 271 pcireg_t csr;
272 272
273 if ((pa->pa_flags & PCI_FLAGS_IO_OKAY) == 0) { 273 if ((pa->pa_flags & PCI_FLAGS_IO_OKAY) == 0) {
274 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, 274 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
275 "I/O access disabled at bridge\n"); 275 "I/O access disabled at bridge\n");
276 return 0; 276 return 0;
277 } 277 }
278 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG); 278 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG);
279 if ((csr & PCI_COMMAND_IO_ENABLE) == 0) { 279 if ((csr & PCI_COMMAND_IO_ENABLE) == 0) {
280 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, 280 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
281 "I/O access disabled at device\n"); 281 "I/O access disabled at device\n");
282 return 0; 282 return 0;
283 } 283 }
284 return 1; 284 return 1;
285} 285}
286 286
287void 287void
288pciide_mapregs_compat(const struct pci_attach_args *pa, 288pciide_mapregs_compat(const struct pci_attach_args *pa,
289 struct pciide_channel *cp, int compatchan) 289 struct pciide_channel *cp, int compatchan)
290{ 290{
291 struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel); 291 struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel);
292 struct ata_channel *wdc_cp = &cp->ata_channel; 292 struct ata_channel *wdc_cp = &cp->ata_channel;
293 struct wdc_regs *wdr = CHAN_TO_WDC_REGS(wdc_cp); 293 struct wdc_regs *wdr = CHAN_TO_WDC_REGS(wdc_cp);
294 int i; 294 int i;
295 295
296 cp->compat = 1; 296 cp->compat = 1;
297 297
298 wdr->cmd_iot = pa->pa_iot; 298 wdr->cmd_iot = pa->pa_iot;
299 if (bus_space_map(wdr->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan), 299 if (bus_space_map(wdr->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
300 PCIIDE_COMPAT_CMD_SIZE, 0, &wdr->cmd_baseioh) != 0) { 300 PCIIDE_COMPAT_CMD_SIZE, 0, &wdr->cmd_baseioh) != 0) {
301 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 301 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
302 "couldn't map %s channel cmd regs\n", cp->name); 302 "couldn't map %s channel cmd regs\n", cp->name);
303 goto bad; 303 goto bad;
304 } 304 }
305 wdr->cmd_ios = PCIIDE_COMPAT_CMD_SIZE; 305 wdr->cmd_ios = PCIIDE_COMPAT_CMD_SIZE;
306 306
307 wdr->ctl_iot = pa->pa_iot; 307 wdr->ctl_iot = pa->pa_iot;
308 if (bus_space_map(wdr->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan), 308 if (bus_space_map(wdr->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
309 PCIIDE_COMPAT_CTL_SIZE, 0, &wdr->ctl_ioh) != 0) { 309 PCIIDE_COMPAT_CTL_SIZE, 0, &wdr->ctl_ioh) != 0) {
310 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 310 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
311 "couldn't map %s channel ctl regs\n", cp->name); 311 "couldn't map %s channel ctl regs\n", cp->name);
312 bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, wdr->cmd_ios); 312 bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, wdr->cmd_ios);
313 goto bad; 313 goto bad;
314 } 314 }
315 wdr->ctl_ios = PCIIDE_COMPAT_CTL_SIZE; 315 wdr->ctl_ios = PCIIDE_COMPAT_CTL_SIZE;
316 316
317 for (i = 0; i < WDC_NREG; i++) { 317 for (i = 0; i < WDC_NREG; i++) {
318 if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i, 318 if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i,
319 i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) { 319 i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) {
320 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 320 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
321 "couldn't subregion %s channel cmd regs\n", 321 "couldn't subregion %s channel cmd regs\n",
322 cp->name); 322 cp->name);
323 goto bad; 323 goto bad;
324 } 324 }
325 } 325 }
326 wdc_init_shadow_regs(wdc_cp); 326 wdc_init_shadow_regs(wdc_cp);
327 wdr->data32iot = wdr->cmd_iot; 327 wdr->data32iot = wdr->cmd_iot;
328 wdr->data32ioh = wdr->cmd_iohs[0]; 328 wdr->data32ioh = wdr->cmd_iohs[0];
329 return; 329 return;
330 330
331bad: 331bad:
332 cp->ata_channel.ch_flags |= ATACH_DISABLED; 332 cp->ata_channel.ch_flags |= ATACH_DISABLED;
333 return; 333 return;
334} 334}
335 335
336void 336void
337pciide_mapregs_native(const struct pci_attach_args *pa, 337pciide_mapregs_native(const struct pci_attach_args *pa,
338 struct pciide_channel *cp, int (*pci_intr)(void *)) 338 struct pciide_channel *cp, int (*pci_intr)(void *))
339{ 339{
340 struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel); 340 struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel);
341 struct ata_channel *wdc_cp = &cp->ata_channel; 341 struct ata_channel *wdc_cp = &cp->ata_channel;
342 struct wdc_regs *wdr = CHAN_TO_WDC_REGS(wdc_cp); 342 struct wdc_regs *wdr = CHAN_TO_WDC_REGS(wdc_cp);
343 const char *intrstr; 343 const char *intrstr;
344 pci_intr_handle_t intrhandle; 344 pci_intr_handle_t intrhandle;
345 int i; 345 int i;
346 char intrbuf[PCI_INTRSTR_LEN]; 346 char intrbuf[PCI_INTRSTR_LEN];
347 347
348 cp->compat = 0; 348 cp->compat = 0;
349 349
350 if (sc->sc_pci_ih == NULL) { 350 if (sc->sc_pci_ih == NULL) {
351 if (pci_intr_map(pa, &intrhandle) != 0) { 351 if (pci_intr_map(pa, &intrhandle) != 0) {
352 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 352 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
353 "couldn't map native-PCI interrupt\n"); 353 "couldn't map native-PCI interrupt\n");
354 goto bad; 354 goto bad;
355 } 355 }
356 intrstr = pci_intr_string(pa->pa_pc, intrhandle, intrbuf, sizeof(intrbuf)); 356 intrstr = pci_intr_string(pa->pa_pc, intrhandle, intrbuf, sizeof(intrbuf));
357 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 357 sc->sc_pci_ih = pci_intr_establish_xname(pa->pa_pc,
358 intrhandle, IPL_BIO, pci_intr, sc); 358 intrhandle, IPL_BIO, pci_intr, sc,
 359 device_xname(sc->sc_wdcdev.sc_atac.atac_dev));
359 if (sc->sc_pci_ih != NULL) { 360 if (sc->sc_pci_ih != NULL) {
360 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, 361 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
361 "using %s for native-PCI interrupt\n", 362 "using %s for native-PCI interrupt\n",
362 intrstr ? intrstr : "unknown interrupt"); 363 intrstr ? intrstr : "unknown interrupt");
363 } else { 364 } else {
364 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 365 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
365 "couldn't establish native-PCI interrupt"); 366 "couldn't establish native-PCI interrupt");
366 if (intrstr != NULL) 367 if (intrstr != NULL)
367 aprint_error(" at %s", intrstr); 368 aprint_error(" at %s", intrstr);
368 aprint_error("\n"); 369 aprint_error("\n");
369 goto bad; 370 goto bad;
370 } 371 }
371 } 372 }
372 cp->ih = sc->sc_pci_ih; 373 cp->ih = sc->sc_pci_ih;
373 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->ch_channel), 374 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->ch_channel),
374 PCI_MAPREG_TYPE_IO, 0, 375 PCI_MAPREG_TYPE_IO, 0,
375 &wdr->cmd_iot, &wdr->cmd_baseioh, NULL, &wdr->cmd_ios) != 0) { 376 &wdr->cmd_iot, &wdr->cmd_baseioh, NULL, &wdr->cmd_ios) != 0) {
376 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 377 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
377 "couldn't map %s channel cmd regs\n", cp->name); 378 "couldn't map %s channel cmd regs\n", cp->name);
378 goto bad; 379 goto bad;
379 } 380 }
380 381
381 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->ch_channel), 382 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->ch_channel),
382 PCI_MAPREG_TYPE_IO, 0, 383 PCI_MAPREG_TYPE_IO, 0,
383 &wdr->ctl_iot, &cp->ctl_baseioh, NULL, &cp->ctl_ios) != 0) { 384 &wdr->ctl_iot, &cp->ctl_baseioh, NULL, &cp->ctl_ios) != 0) {
384 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 385 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
385 "couldn't map %s channel ctl regs\n", cp->name); 386 "couldn't map %s channel ctl regs\n", cp->name);
386 bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, wdr->cmd_ios); 387 bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, wdr->cmd_ios);
387 goto bad; 388 goto bad;
388 } 389 }
389 /* 390 /*
390 * In native mode, 4 bytes of I/O space are mapped for the control 391 * In native mode, 4 bytes of I/O space are mapped for the control
391 * register, the control register is at offset 2. Pass the generic 392 * register, the control register is at offset 2. Pass the generic
392 * code a handle for only one byte at the right offset. 393 * code a handle for only one byte at the right offset.
393 */ 394 */
394 if (bus_space_subregion(wdr->ctl_iot, cp->ctl_baseioh, 2, 1, 395 if (bus_space_subregion(wdr->ctl_iot, cp->ctl_baseioh, 2, 1,
395 &wdr->ctl_ioh) != 0) { 396 &wdr->ctl_ioh) != 0) {
396 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 397 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
397 "unable to subregion %s channel ctl regs\n", cp->name); 398 "unable to subregion %s channel ctl regs\n", cp->name);
398 bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, wdr->cmd_ios); 399 bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, wdr->cmd_ios);
399 bus_space_unmap(wdr->cmd_iot, cp->ctl_baseioh, cp->ctl_ios); 400 bus_space_unmap(wdr->cmd_iot, cp->ctl_baseioh, cp->ctl_ios);
400 goto bad; 401 goto bad;
401 } 402 }
402 403
403 for (i = 0; i < WDC_NREG; i++) { 404 for (i = 0; i < WDC_NREG; i++) {
404 if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i, 405 if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i,
405 i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) { 406 i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) {
406 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 407 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
407 "couldn't subregion %s channel cmd regs\n", 408 "couldn't subregion %s channel cmd regs\n",
408 cp->name); 409 cp->name);
409 goto bad; 410 goto bad;
410 } 411 }
411 } 412 }
412 wdc_init_shadow_regs(wdc_cp); 413 wdc_init_shadow_regs(wdc_cp);
413 wdr->data32iot = wdr->cmd_iot; 414 wdr->data32iot = wdr->cmd_iot;
414 wdr->data32ioh = wdr->cmd_iohs[0]; 415 wdr->data32ioh = wdr->cmd_iohs[0];
415 return; 416 return;
416 417
417bad: 418bad:
418 cp->ata_channel.ch_flags |= ATACH_DISABLED; 419 cp->ata_channel.ch_flags |= ATACH_DISABLED;
419 return; 420 return;
420} 421}
421 422
422#if NATA_DMA 423#if NATA_DMA
423void 424void
424pciide_mapreg_dma(struct pciide_softc *sc, const struct pci_attach_args *pa) 425pciide_mapreg_dma(struct pciide_softc *sc, const struct pci_attach_args *pa)
425{ 426{
426 pcireg_t maptype; 427 pcireg_t maptype;
427 bus_addr_t addr; 428 bus_addr_t addr;
428 struct pciide_channel *pc; 429 struct pciide_channel *pc;
429 int reg, chan; 430 int reg, chan;
430 bus_size_t size; 431 bus_size_t size;
431 432
432 /* 433 /*
433 * Map DMA registers 434 * Map DMA registers
434 * 435 *
435 * Note that sc_dma_ok is the right variable to test to see if 436 * Note that sc_dma_ok is the right variable to test to see if
436 * DMA can be done. If the interface doesn't support DMA, 437 * DMA can be done. If the interface doesn't support DMA,
437 * sc_dma_ok will never be non-zero. If the DMA regs couldn't 438 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
438 * be mapped, it'll be zero. I.e., sc_dma_ok will only be 439 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
439 * non-zero if the interface supports DMA and the registers 440 * non-zero if the interface supports DMA and the registers
440 * could be mapped. 441 * could be mapped.
441 * 442 *
442 * XXX Note that despite the fact that the Bus Master IDE specs 443 * XXX Note that despite the fact that the Bus Master IDE specs
443 * XXX say that "The bus master IDE function uses 16 bytes of IO 444 * XXX say that "The bus master IDE function uses 16 bytes of IO
444 * XXX space," some controllers (at least the United 445 * XXX space," some controllers (at least the United
445 * XXX Microelectronics UM8886BF) place it in memory space. 446 * XXX Microelectronics UM8886BF) place it in memory space.
446 */ 447 */
447 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 448 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
448 PCIIDE_REG_BUS_MASTER_DMA); 449 PCIIDE_REG_BUS_MASTER_DMA);
449 450
450 switch (maptype) { 451 switch (maptype) {
451 case PCI_MAPREG_TYPE_IO: 452 case PCI_MAPREG_TYPE_IO:
452 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag, 453 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
453 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO, 454 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
454 &addr, NULL, NULL) == 0); 455 &addr, NULL, NULL) == 0);
455 if (sc->sc_dma_ok == 0) { 456 if (sc->sc_dma_ok == 0) {
456 aprint_verbose( 457 aprint_verbose(
457 ", but unused (couldn't query registers)"); 458 ", but unused (couldn't query registers)");
458 break; 459 break;
459 } 460 }
460 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE) 461 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
461 && addr >= 0x10000) { 462 && addr >= 0x10000) {
462 sc->sc_dma_ok = 0; 463 sc->sc_dma_ok = 0;
463 aprint_verbose( 464 aprint_verbose(
464 ", but unused (registers at unsafe address " 465 ", but unused (registers at unsafe address "
465 "%#lx)", (unsigned long)addr); 466 "%#lx)", (unsigned long)addr);
466 break; 467 break;
467 } 468 }
468 /* FALLTHROUGH */ 469 /* FALLTHROUGH */
469 470
470 case PCI_MAPREG_MEM_TYPE_32BIT: 471 case PCI_MAPREG_MEM_TYPE_32BIT:
471 sc->sc_dma_ok = (pci_mapreg_map(pa, 472 sc->sc_dma_ok = (pci_mapreg_map(pa,
472 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0, 473 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
473 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, &sc->sc_dma_ios) 474 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, &sc->sc_dma_ios)
474 == 0); 475 == 0);
475 sc->sc_dmat = pa->pa_dmat; 476 sc->sc_dmat = pa->pa_dmat;
476 if (sc->sc_dma_ok == 0) { 477 if (sc->sc_dma_ok == 0) {
477 aprint_verbose(", but unused (couldn't map registers)"); 478 aprint_verbose(", but unused (couldn't map registers)");
478 } else { 479 } else {
479 sc->sc_wdcdev.dma_arg = sc; 480 sc->sc_wdcdev.dma_arg = sc;
480 sc->sc_wdcdev.dma_init = pciide_dma_init; 481 sc->sc_wdcdev.dma_init = pciide_dma_init;
481 sc->sc_wdcdev.dma_start = pciide_dma_start; 482 sc->sc_wdcdev.dma_start = pciide_dma_start;
482 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 483 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
483 } 484 }
484 485
485 if (device_cfdata(sc->sc_wdcdev.sc_atac.atac_dev)->cf_flags & 486 if (device_cfdata(sc->sc_wdcdev.sc_atac.atac_dev)->cf_flags &
486 PCIIDE_OPTIONS_NODMA) { 487 PCIIDE_OPTIONS_NODMA) {
487 aprint_verbose( 488 aprint_verbose(
488 ", but unused (forced off by config file)"); 489 ", but unused (forced off by config file)");
489 sc->sc_dma_ok = 0; 490 sc->sc_dma_ok = 0;
490 } 491 }
491 break; 492 break;
492 493
493 default: 494 default:
494 sc->sc_dma_ok = 0; 495 sc->sc_dma_ok = 0;
495 aprint_verbose( 496 aprint_verbose(
496 ", but unsupported register maptype (0x%x)", maptype); 497 ", but unsupported register maptype (0x%x)", maptype);
497 } 498 }
498 499
499 if (sc->sc_dma_ok == 0) 500 if (sc->sc_dma_ok == 0)
500 return; 501 return;
501 502
502 /* 503 /*
503 * Set up the default handles for the DMA registers. 504 * Set up the default handles for the DMA registers.
504 * Just reserve 32 bits for each handle, unless space 505 * Just reserve 32 bits for each handle, unless space
505 * doesn't permit it. 506 * doesn't permit it.
506 */ 507 */
507 for (chan = 0; chan < PCIIDE_NUM_CHANNELS; chan++) { 508 for (chan = 0; chan < PCIIDE_NUM_CHANNELS; chan++) {
508 pc = &sc->pciide_channels[chan]; 509 pc = &sc->pciide_channels[chan];
509 for (reg = 0; reg < IDEDMA_NREGS; reg++) { 510 for (reg = 0; reg < IDEDMA_NREGS; reg++) {
510 size = 4; 511 size = 4;
511 if (size > (IDEDMA_SCH_OFFSET - reg)) 512 if (size > (IDEDMA_SCH_OFFSET - reg))
512 size = IDEDMA_SCH_OFFSET - reg; 513 size = IDEDMA_SCH_OFFSET - reg;
513 if (bus_space_subregion(sc->sc_dma_iot, sc->sc_dma_ioh, 514 if (bus_space_subregion(sc->sc_dma_iot, sc->sc_dma_ioh,
514 IDEDMA_SCH_OFFSET * chan + reg, size, 515 IDEDMA_SCH_OFFSET * chan + reg, size,
515 &pc->dma_iohs[reg]) != 0) { 516 &pc->dma_iohs[reg]) != 0) {
516 sc->sc_dma_ok = 0; 517 sc->sc_dma_ok = 0;
517 aprint_verbose(", but can't subregion offset %d " 518 aprint_verbose(", but can't subregion offset %d "
518 "size %lu", reg, (u_long)size); 519 "size %lu", reg, (u_long)size);
519 return; 520 return;
520 } 521 }
521 } 522 }
522 } 523 }
523} 524}
524#endif /* NATA_DMA */ 525#endif /* NATA_DMA */
525 526
526int 527int
527pciide_compat_intr(void *arg) 528pciide_compat_intr(void *arg)
528{ 529{
529 struct pciide_channel *cp = arg; 530 struct pciide_channel *cp = arg;
530 531
531#ifdef DIAGNOSTIC 532#ifdef DIAGNOSTIC
532 /* should only be called for a compat channel */ 533 /* should only be called for a compat channel */
533 if (cp->compat == 0) 534 if (cp->compat == 0)
534 panic("pciide compat intr called for non-compat chan %p", cp); 535 panic("pciide compat intr called for non-compat chan %p", cp);
535#endif 536#endif
536 return (wdcintr(&cp->ata_channel)); 537 return (wdcintr(&cp->ata_channel));
537} 538}
538 539
539int 540int
540pciide_pci_intr(void *arg) 541pciide_pci_intr(void *arg)
541{ 542{
542 struct pciide_softc *sc = arg; 543 struct pciide_softc *sc = arg;
543 struct pciide_channel *cp; 544 struct pciide_channel *cp;
544 struct ata_channel *wdc_cp; 545 struct ata_channel *wdc_cp;
545 int i, rv, crv; 546 int i, rv, crv;
546 547
547 rv = 0; 548 rv = 0;
548 for (i = 0; i < sc->sc_wdcdev.sc_atac.atac_nchannels; i++) { 549 for (i = 0; i < sc->sc_wdcdev.sc_atac.atac_nchannels; i++) {
549 cp = &sc->pciide_channels[i]; 550 cp = &sc->pciide_channels[i];
550 wdc_cp = &cp->ata_channel; 551 wdc_cp = &cp->ata_channel;
551 552
552 /* If a compat channel skip. */ 553 /* If a compat channel skip. */
553 if (cp->compat) 554 if (cp->compat)
554 continue; 555 continue;
555 /* if this channel not waiting for intr, skip */ 556 /* if this channel not waiting for intr, skip */
556 if ((wdc_cp->ch_flags & ATACH_IRQ_WAIT) == 0) 557 if ((wdc_cp->ch_flags & ATACH_IRQ_WAIT) == 0)
557 continue; 558 continue;
558 559
559 crv = wdcintr(wdc_cp); 560 crv = wdcintr(wdc_cp);
560 if (crv == 0) 561 if (crv == 0)
561 ; /* leave rv alone */ 562 ; /* leave rv alone */
562 else if (crv == 1) 563 else if (crv == 1)
563 rv = 1; /* claim the intr */ 564 rv = 1; /* claim the intr */
564 else if (rv == 0) /* crv should be -1 in this case */ 565 else if (rv == 0) /* crv should be -1 in this case */
565 rv = crv; /* if we've done no better, take it */ 566 rv = crv; /* if we've done no better, take it */
566 } 567 }
567 return (rv); 568 return (rv);
568} 569}
569 570
570#if NATA_DMA 571#if NATA_DMA
571void 572void
572pciide_channel_dma_setup(struct pciide_channel *cp) 573pciide_channel_dma_setup(struct pciide_channel *cp)
573{ 574{
574 int drive, s; 575 int drive, s;
575 struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel); 576 struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel);
576 struct ata_drive_datas *drvp; 577 struct ata_drive_datas *drvp;
577 578
578 KASSERT(cp->ata_channel.ch_ndrives != 0); 579 KASSERT(cp->ata_channel.ch_ndrives != 0);
579 580
580 for (drive = 0; drive < cp->ata_channel.ch_ndrives; drive++) { 581 for (drive = 0; drive < cp->ata_channel.ch_ndrives; drive++) {
581 drvp = &cp->ata_channel.ch_drive[drive]; 582 drvp = &cp->ata_channel.ch_drive[drive];
582 /* If no drive, skip */ 583 /* If no drive, skip */
583 if (drvp->drive_type == ATA_DRIVET_NONE) 584 if (drvp->drive_type == ATA_DRIVET_NONE)
584 continue; 585 continue;
585 /* setup DMA if needed */ 586 /* setup DMA if needed */
586 if (((drvp->drive_flags & ATA_DRIVE_DMA) == 0 && 587 if (((drvp->drive_flags & ATA_DRIVE_DMA) == 0 &&
587 (drvp->drive_flags & ATA_DRIVE_UDMA) == 0) || 588 (drvp->drive_flags & ATA_DRIVE_UDMA) == 0) ||
588 sc->sc_dma_ok == 0) { 589 sc->sc_dma_ok == 0) {
589 s = splbio(); 590 s = splbio();
590 drvp->drive_flags &= ~(ATA_DRIVE_DMA | ATA_DRIVE_UDMA); 591 drvp->drive_flags &= ~(ATA_DRIVE_DMA | ATA_DRIVE_UDMA);
591 splx(s); 592 splx(s);
592 continue; 593 continue;
593 } 594 }
594 if (pciide_dma_table_setup(sc, cp->ata_channel.ch_channel, 595 if (pciide_dma_table_setup(sc, cp->ata_channel.ch_channel,
595 drive) != 0) { 596 drive) != 0) {
596 /* Abort DMA setup */ 597 /* Abort DMA setup */
597 s = splbio(); 598 s = splbio();
598 drvp->drive_flags &= ~(ATA_DRIVE_DMA | ATA_DRIVE_UDMA); 599 drvp->drive_flags &= ~(ATA_DRIVE_DMA | ATA_DRIVE_UDMA);
599 splx(s); 600 splx(s);
600 continue; 601 continue;
601 } 602 }
602 } 603 }
603} 604}
604 605
605#define NIDEDMA_TABLES(sc) \ 606#define NIDEDMA_TABLES(sc) \
606 (MAXPHYS/(min((sc)->sc_dma_maxsegsz, PAGE_SIZE)) + 1) 607 (MAXPHYS/(min((sc)->sc_dma_maxsegsz, PAGE_SIZE)) + 1)
607 608
608int 609int
609pciide_dma_table_setup(struct pciide_softc *sc, int channel, int drive) 610pciide_dma_table_setup(struct pciide_softc *sc, int channel, int drive)
610{ 611{
611 int error; 612 int error;
612 const bus_size_t dma_table_size = 613 const bus_size_t dma_table_size =
613 sizeof(struct idedma_table) * NIDEDMA_TABLES(sc); 614 sizeof(struct idedma_table) * NIDEDMA_TABLES(sc);
614 struct pciide_dma_maps *dma_maps = 615 struct pciide_dma_maps *dma_maps =
615 &sc->pciide_channels[channel].dma_maps[drive]; 616 &sc->pciide_channels[channel].dma_maps[drive];
616 617
617 /* If table was already allocated, just return */ 618 /* If table was already allocated, just return */
618 if (dma_maps->dma_table) 619 if (dma_maps->dma_table)
619 return 0; 620 return 0;
620 621
621 /* Allocate memory for the DMA tables and map it */ 622 /* Allocate memory for the DMA tables and map it */
622 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size, 623 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
623 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &dma_maps->dmamap_table_seg, 624 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &dma_maps->dmamap_table_seg,
624 1, &dma_maps->dmamap_table_nseg, BUS_DMA_NOWAIT)) != 0) { 625 1, &dma_maps->dmamap_table_nseg, BUS_DMA_NOWAIT)) != 0) {
625 aprint_error(dmaerrfmt, 626 aprint_error(dmaerrfmt,
626 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, 627 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel,
627 "allocate", drive, error); 628 "allocate", drive, error);
628 return error; 629 return error;
629 } 630 }
630 if ((error = bus_dmamem_map(sc->sc_dmat, &dma_maps->dmamap_table_seg, 631 if ((error = bus_dmamem_map(sc->sc_dmat, &dma_maps->dmamap_table_seg,
631 dma_maps->dmamap_table_nseg, dma_table_size, 632 dma_maps->dmamap_table_nseg, dma_table_size,
632 (void **)&dma_maps->dma_table, 633 (void **)&dma_maps->dma_table,
633 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 634 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
634 aprint_error(dmaerrfmt, 635 aprint_error(dmaerrfmt,
635 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, 636 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel,
636 "map", drive, error); 637 "map", drive, error);
637 return error; 638 return error;
638 } 639 }
639 ATADEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, " 640 ATADEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
640 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size, 641 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
641 (unsigned long)dma_maps->dmamap_table_seg.ds_addr), DEBUG_PROBE); 642 (unsigned long)dma_maps->dmamap_table_seg.ds_addr), DEBUG_PROBE);
642 /* Create and load table DMA map for this disk */ 643 /* Create and load table DMA map for this disk */
643 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size, 644 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
644 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT, 645 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
645 &dma_maps->dmamap_table)) != 0) { 646 &dma_maps->dmamap_table)) != 0) {
646 aprint_error(dmaerrfmt, 647 aprint_error(dmaerrfmt,
647 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, 648 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel,
648 "create", drive, error); 649 "create", drive, error);
649 return error; 650 return error;
650 } 651 }
651 if ((error = bus_dmamap_load(sc->sc_dmat, 652 if ((error = bus_dmamap_load(sc->sc_dmat,
652 dma_maps->dmamap_table, 653 dma_maps->dmamap_table,
653 dma_maps->dma_table, 654 dma_maps->dma_table,
654 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) { 655 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
655 aprint_error(dmaerrfmt, 656 aprint_error(dmaerrfmt,
656 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, 657 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel,
657 "load", drive, error); 658 "load", drive, error);
658 return error; 659 return error;
659 } 660 }
660 ATADEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n", 661 ATADEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
661 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr), 662 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
662 DEBUG_PROBE); 663 DEBUG_PROBE);
663 /* Create a xfer DMA map for this drive */ 664 /* Create a xfer DMA map for this drive */
664 if ((error = bus_dmamap_create(sc->sc_dmat, MAXPHYS, 665 if ((error = bus_dmamap_create(sc->sc_dmat, MAXPHYS,
665 NIDEDMA_TABLES(sc), sc->sc_dma_maxsegsz, sc->sc_dma_boundary, 666 NIDEDMA_TABLES(sc), sc->sc_dma_maxsegsz, sc->sc_dma_boundary,
666 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 667 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
667 &dma_maps->dmamap_xfer)) != 0) { 668 &dma_maps->dmamap_xfer)) != 0) {
668 aprint_error(dmaerrfmt, 669 aprint_error(dmaerrfmt,
669 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, 670 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel,
670 "create xfer", drive, error); 671 "create xfer", drive, error);
671 return error; 672 return error;
672 } 673 }
673 return 0; 674 return 0;
674} 675}
675 676
676void 677void
677pciide_dma_table_teardown(struct pciide_softc *sc, int channel, int drive) 678pciide_dma_table_teardown(struct pciide_softc *sc, int channel, int drive)
678{ 679{
679 struct pciide_channel *cp; 680 struct pciide_channel *cp;
680 struct pciide_dma_maps *dma_maps; 681 struct pciide_dma_maps *dma_maps;
681 682
682 cp = &sc->pciide_channels[channel]; 683 cp = &sc->pciide_channels[channel];
683 dma_maps = &cp->dma_maps[drive]; 684 dma_maps = &cp->dma_maps[drive];
684 685
685 if (dma_maps->dma_table == NULL) 686 if (dma_maps->dma_table == NULL)
686 return; 687 return;
687 688
688 bus_dmamap_destroy(sc->sc_dmat, dma_maps->dmamap_xfer); 689 bus_dmamap_destroy(sc->sc_dmat, dma_maps->dmamap_xfer);
689 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_table); 690 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_table);
690 bus_dmamap_destroy(sc->sc_dmat, dma_maps->dmamap_table); 691 bus_dmamap_destroy(sc->sc_dmat, dma_maps->dmamap_table);
691 bus_dmamem_unmap(sc->sc_dmat, dma_maps->dma_table, 692 bus_dmamem_unmap(sc->sc_dmat, dma_maps->dma_table,
692 sizeof(struct idedma_table) * NIDEDMA_TABLES(sc)); 693 sizeof(struct idedma_table) * NIDEDMA_TABLES(sc));
693 bus_dmamem_free(sc->sc_dmat, &dma_maps->dmamap_table_seg, 694 bus_dmamem_free(sc->sc_dmat, &dma_maps->dmamap_table_seg,
694 dma_maps->dmamap_table_nseg); 695 dma_maps->dmamap_table_nseg);
695 696
696 dma_maps->dma_table = NULL; 697 dma_maps->dma_table = NULL;
697 698
698 return; 699 return;
699} 700}
700 701
701int 702int
702pciide_dma_dmamap_setup(struct pciide_softc *sc, int channel, int drive, 703pciide_dma_dmamap_setup(struct pciide_softc *sc, int channel, int drive,
703 void *databuf, size_t datalen, int flags) 704 void *databuf, size_t datalen, int flags)
704{ 705{
705 int error, seg; 706 int error, seg;
706 struct pciide_channel *cp = &sc->pciide_channels[channel]; 707 struct pciide_channel *cp = &sc->pciide_channels[channel];
707 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 708 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive];
708 709
709 error = bus_dmamap_load(sc->sc_dmat, 710 error = bus_dmamap_load(sc->sc_dmat,
710 dma_maps->dmamap_xfer, 711 dma_maps->dmamap_xfer,
711 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING | 712 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
712 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE)); 713 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
713 if (error) { 714 if (error) {
714 aprint_error(dmaerrfmt, 715 aprint_error(dmaerrfmt,
715 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, 716 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel,
716 "load xfer", drive, error); 717 "load xfer", drive, error);
717 return error; 718 return error;
718 } 719 }
719 720
720 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 721 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
721 dma_maps->dmamap_xfer->dm_mapsize, 722 dma_maps->dmamap_xfer->dm_mapsize,
722 (flags & WDC_DMA_READ) ? 723 (flags & WDC_DMA_READ) ?
723 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 724 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
724 725
725 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) { 726 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
726#ifdef DIAGNOSTIC 727#ifdef DIAGNOSTIC
727 /* A segment must not cross a 64k boundary */ 728 /* A segment must not cross a 64k boundary */
728 { 729 {
729 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr; 730 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
730 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len; 731 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
731 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) != 732 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
732 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) { 733 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
733 printf("pciide_dma: segment %d physical addr 0x%lx" 734 printf("pciide_dma: segment %d physical addr 0x%lx"
734 " len 0x%lx not properly aligned\n", 735 " len 0x%lx not properly aligned\n",
735 seg, phys, len); 736 seg, phys, len);
736 panic("pciide_dma: buf align"); 737 panic("pciide_dma: buf align");
737 } 738 }
738 } 739 }
739#endif 740#endif
740 dma_maps->dma_table[seg].base_addr = 741 dma_maps->dma_table[seg].base_addr =
741 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr); 742 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
742 dma_maps->dma_table[seg].byte_count = 743 dma_maps->dma_table[seg].byte_count =
743 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len & 744 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
744 IDEDMA_BYTE_COUNT_MASK); 745 IDEDMA_BYTE_COUNT_MASK);
745 ATADEBUG_PRINT(("\t seg %d len %d addr 0x%x\n", 746 ATADEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
746 seg, le32toh(dma_maps->dma_table[seg].byte_count), 747 seg, le32toh(dma_maps->dma_table[seg].byte_count),
747 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA); 748 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
748 749
749 } 750 }
750 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |= 751 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
751 htole32(IDEDMA_BYTE_COUNT_EOT); 752 htole32(IDEDMA_BYTE_COUNT_EOT);
752 753
753 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0, 754 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
754 dma_maps->dmamap_table->dm_mapsize, 755 dma_maps->dmamap_table->dm_mapsize,
755 BUS_DMASYNC_PREWRITE); 756 BUS_DMASYNC_PREWRITE);
756 757
757#ifdef DIAGNOSTIC 758#ifdef DIAGNOSTIC
758 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) { 759 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
759 printf("pciide_dma_dmamap_setup: addr 0x%lx " 760 printf("pciide_dma_dmamap_setup: addr 0x%lx "
760 "not properly aligned\n", 761 "not properly aligned\n",
761 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr); 762 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
762 panic("pciide_dma_init: table align"); 763 panic("pciide_dma_init: table align");
763 } 764 }
764#endif 765#endif
765 /* remember flags */ 766 /* remember flags */
766 dma_maps->dma_flags = flags; 767 dma_maps->dma_flags = flags;
767 768
768 return 0; 769 return 0;
769} 770}
770 771
771int 772int
772pciide_dma_init(void *v, int channel, int drive, void *databuf, size_t datalen, 773pciide_dma_init(void *v, int channel, int drive, void *databuf, size_t datalen,
773 int flags) 774 int flags)
774{ 775{
775 struct pciide_softc *sc = v; 776 struct pciide_softc *sc = v;
776 int error; 777 int error;
777 struct pciide_channel *cp = &sc->pciide_channels[channel]; 778 struct pciide_channel *cp = &sc->pciide_channels[channel];
778 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 779 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive];
779 780
780 if ((error = pciide_dma_dmamap_setup(sc, channel, drive, 781 if ((error = pciide_dma_dmamap_setup(sc, channel, drive,
781 databuf, datalen, flags)) != 0) 782 databuf, datalen, flags)) != 0)
782 return error; 783 return error;
783 /* Maps are ready. Start DMA function */ 784 /* Maps are ready. Start DMA function */
784 /* Clear status bits */ 785 /* Clear status bits */
785 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0, 786 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0,
786 bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0)); 787 bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0));
787 /* Write table addr */ 788 /* Write table addr */
788 bus_space_write_4(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_TBL], 0, 789 bus_space_write_4(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_TBL], 0,
789 dma_maps->dmamap_table->dm_segs[0].ds_addr); 790 dma_maps->dmamap_table->dm_segs[0].ds_addr);
790 /* set read/write */ 791 /* set read/write */
791 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0, 792 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0,
792 ((flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE : 0) | cp->idedma_cmd); 793 ((flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE : 0) | cp->idedma_cmd);
793 return 0; 794 return 0;
794} 795}
795 796
796void 797void
797pciide_dma_start(void *v, int channel, int drive) 798pciide_dma_start(void *v, int channel, int drive)
798{ 799{
799 struct pciide_softc *sc = v; 800 struct pciide_softc *sc = v;
800 struct pciide_channel *cp = &sc->pciide_channels[channel]; 801 struct pciide_channel *cp = &sc->pciide_channels[channel];
801 802
802 ATADEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS); 803 ATADEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
803 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0, 804 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0,
804 bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0) 805 bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0)
805 | IDEDMA_CMD_START); 806 | IDEDMA_CMD_START);
806} 807}
807 808
808int 809int
809pciide_dma_finish(void *v, int channel, int drive, int force) 810pciide_dma_finish(void *v, int channel, int drive, int force)
810{ 811{
811 struct pciide_softc *sc = v; 812 struct pciide_softc *sc = v;
812 u_int8_t status; 813 u_int8_t status;
813 int error = 0; 814 int error = 0;
814 struct pciide_channel *cp = &sc->pciide_channels[channel]; 815 struct pciide_channel *cp = &sc->pciide_channels[channel];
815 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 816 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive];
816 817
817 status = bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0); 818 status = bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0);
818 ATADEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status), 819 ATADEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
819 DEBUG_XFERS); 820 DEBUG_XFERS);
820 821
821 if (force == WDC_DMAEND_END && (status & IDEDMA_CTL_INTR) == 0) 822 if (force == WDC_DMAEND_END && (status & IDEDMA_CTL_INTR) == 0)
822 return WDC_DMAST_NOIRQ; 823 return WDC_DMAST_NOIRQ;
823 824
824 /* stop DMA channel */ 825 /* stop DMA channel */
825 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0, 826 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0,
826 bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0) 827 bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CMD], 0)
827 & ~IDEDMA_CMD_START); 828 & ~IDEDMA_CMD_START);
828 829
829 /* Unload the map of the data buffer */ 830 /* Unload the map of the data buffer */
830 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 831 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
831 dma_maps->dmamap_xfer->dm_mapsize, 832 dma_maps->dmamap_xfer->dm_mapsize,
832 (dma_maps->dma_flags & WDC_DMA_READ) ? 833 (dma_maps->dma_flags & WDC_DMA_READ) ?
833 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 834 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
834 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 835 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
835 836
836 if ((status & IDEDMA_CTL_ERR) != 0 && force != WDC_DMAEND_ABRT_QUIET) { 837 if ((status & IDEDMA_CTL_ERR) != 0 && force != WDC_DMAEND_ABRT_QUIET) {
837 aprint_error("%s:%d:%d: bus-master DMA error: status=0x%x\n", 838 aprint_error("%s:%d:%d: bus-master DMA error: status=0x%x\n",
838 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel, 839 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), channel,
839 drive, status); 840 drive, status);
840 error |= WDC_DMAST_ERR; 841 error |= WDC_DMAST_ERR;
841 } 842 }
842 843
843 if ((status & IDEDMA_CTL_INTR) == 0 && force != WDC_DMAEND_ABRT_QUIET) { 844 if ((status & IDEDMA_CTL_INTR) == 0 && force != WDC_DMAEND_ABRT_QUIET) {
844 aprint_error("%s:%d:%d: bus-master DMA error: missing " 845 aprint_error("%s:%d:%d: bus-master DMA error: missing "
845 "interrupt, status=0x%x\n",  846 "interrupt, status=0x%x\n",
846 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), 847 device_xname(sc->sc_wdcdev.sc_atac.atac_dev),
847 channel, drive, status); 848 channel, drive, status);
848 error |= WDC_DMAST_NOIRQ; 849 error |= WDC_DMAST_NOIRQ;
849 } 850 }
850 851
851 if ((status & IDEDMA_CTL_ACT) != 0 && force != WDC_DMAEND_ABRT_QUIET) { 852 if ((status & IDEDMA_CTL_ACT) != 0 && force != WDC_DMAEND_ABRT_QUIET) {
852 /* data underrun, may be a valid condition for ATAPI */ 853 /* data underrun, may be a valid condition for ATAPI */
853 error |= WDC_DMAST_UNDER; 854 error |= WDC_DMAST_UNDER;
854 } 855 }
855 return error; 856 return error;
856} 857}
857 858
858void 859void
859pciide_irqack(struct ata_channel *chp) 860pciide_irqack(struct ata_channel *chp)
860{ 861{
861 struct pciide_channel *cp = CHAN_TO_PCHAN(chp); 862 struct pciide_channel *cp = CHAN_TO_PCHAN(chp);
862 struct pciide_softc *sc = CHAN_TO_PCIIDE(chp); 863 struct pciide_softc *sc = CHAN_TO_PCIIDE(chp);
863 864
864 /* clear status bits in IDE DMA registers */ 865 /* clear status bits in IDE DMA registers */
865 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0, 866 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0,
866 bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0)); 867 bus_space_read_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0));
867} 868}
868#endif /* NATA_DMA */ 869#endif /* NATA_DMA */
869 870
870/* some common code used by several chip_map */ 871/* some common code used by several chip_map */
871int 872int
872pciide_chansetup(struct pciide_softc *sc, int channel, pcireg_t interface) 873pciide_chansetup(struct pciide_softc *sc, int channel, pcireg_t interface)
873{ 874{
874 struct pciide_channel *cp = &sc->pciide_channels[channel]; 875 struct pciide_channel *cp = &sc->pciide_channels[channel];
875 sc->wdc_chanarray[channel] = &cp->ata_channel; 876 sc->wdc_chanarray[channel] = &cp->ata_channel;
876 cp->name = PCIIDE_CHANNEL_NAME(channel); 877 cp->name = PCIIDE_CHANNEL_NAME(channel);
877 cp->ata_channel.ch_channel = channel; 878 cp->ata_channel.ch_channel = channel;
878 cp->ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac; 879 cp->ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac;
879 cp->ata_channel.ch_queue = 880 cp->ata_channel.ch_queue =
880 malloc(sizeof(struct ata_queue), M_DEVBUF, M_NOWAIT|M_ZERO); 881 malloc(sizeof(struct ata_queue), M_DEVBUF, M_NOWAIT|M_ZERO);
881 if (cp->ata_channel.ch_queue == NULL) { 882 if (cp->ata_channel.ch_queue == NULL) {
882 aprint_error("%s %s channel: " 883 aprint_error("%s %s channel: "
883 "can't allocate memory for command queue", 884 "can't allocate memory for command queue",
884 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), cp->name); 885 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), cp->name);
885 return 0; 886 return 0;
886 } 887 }
887 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, 888 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
888 "%s channel %s to %s mode\n", cp->name, 889 "%s channel %s to %s mode\n", cp->name,
889 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 890 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
890 "configured" : "wired", 891 "configured" : "wired",
891 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 892 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
892 "native-PCI" : "compatibility"); 893 "native-PCI" : "compatibility");
893 return 1; 894 return 1;
894} 895}
895 896
896/* some common code used by several chip channel_map */ 897/* some common code used by several chip channel_map */
897void 898void
898pciide_mapchan(const struct pci_attach_args *pa, struct pciide_channel *cp, 899pciide_mapchan(const struct pci_attach_args *pa, struct pciide_channel *cp,
899 pcireg_t interface, int (*pci_intr)(void *)) 900 pcireg_t interface, int (*pci_intr)(void *))
900{ 901{
901 struct ata_channel *wdc_cp = &cp->ata_channel; 902 struct ata_channel *wdc_cp = &cp->ata_channel;
902 903
903 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->ch_channel)) 904 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->ch_channel))
904 pciide_mapregs_native(pa, cp, pci_intr); 905 pciide_mapregs_native(pa, cp, pci_intr);
905 else { 906 else {
906 pciide_mapregs_compat(pa, cp, wdc_cp->ch_channel); 907 pciide_mapregs_compat(pa, cp, wdc_cp->ch_channel);
907 if ((cp->ata_channel.ch_flags & ATACH_DISABLED) == 0) 908 if ((cp->ata_channel.ch_flags & ATACH_DISABLED) == 0)
908 pciide_map_compat_intr(pa, cp, wdc_cp->ch_channel); 909 pciide_map_compat_intr(pa, cp, wdc_cp->ch_channel);
909 } 910 }
910 wdcattach(wdc_cp); 911 wdcattach(wdc_cp);
911} 912}
912 913
913/* 914/*
914 * generic code to map the compat intr. 915 * generic code to map the compat intr.
915 */ 916 */
916void 917void
917pciide_map_compat_intr(const struct pci_attach_args *pa, 918pciide_map_compat_intr(const struct pci_attach_args *pa,
918 struct pciide_channel *cp, int compatchan) 919 struct pciide_channel *cp, int compatchan)
919{ 920{
920 struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel); 921 struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel);
921 922
922#ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH 923#ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
923 cp->ih = 924 cp->ih =
924 pciide_machdep_compat_intr_establish(sc->sc_wdcdev.sc_atac.atac_dev, 925 pciide_machdep_compat_intr_establish(sc->sc_wdcdev.sc_atac.atac_dev,
925 pa, compatchan, pciide_compat_intr, cp); 926 pa, compatchan, pciide_compat_intr, cp);
926 if (cp->ih == NULL) { 927 if (cp->ih == NULL) {
927#endif 928#endif
928 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 929 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
929 "no compatibility interrupt for use by %s " 930 "no compatibility interrupt for use by %s "
930 "channel\n", cp->name); 931 "channel\n", cp->name);
931 cp->ata_channel.ch_flags |= ATACH_DISABLED; 932 cp->ata_channel.ch_flags |= ATACH_DISABLED;
932#ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH 933#ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
933 } 934 }
934#endif 935#endif
935} 936}
936 937
937void 938void
938pciide_unmap_compat_intr(pci_chipset_tag_t pc, struct pciide_channel *cp, 939pciide_unmap_compat_intr(pci_chipset_tag_t pc, struct pciide_channel *cp,
939 int compatchan) 940 int compatchan)
940{ 941{
941#ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_DISESTABLISH 942#ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_DISESTABLISH
942 struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel); 943 struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel);
943 944
944 pciide_machdep_compat_intr_disestablish(sc->sc_wdcdev.sc_atac.atac_dev, 945 pciide_machdep_compat_intr_disestablish(sc->sc_wdcdev.sc_atac.atac_dev,
945 sc->sc_pc, compatchan, cp->ih); 946 sc->sc_pc, compatchan, cp->ih);
946#endif 947#endif
947} 948}
948 949
949void 950void
950default_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) 951default_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
951{ 952{
952 struct pciide_channel *cp; 953 struct pciide_channel *cp;
953 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 954 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
954 pcireg_t csr; 955 pcireg_t csr;
955 int channel; 956 int channel;
956#if NATA_DMA 957#if NATA_DMA
957 int drive; 958 int drive;
958 u_int8_t idedma_ctl; 959 u_int8_t idedma_ctl;
959#endif 960#endif
960 const char *failreason; 961 const char *failreason;
961 struct wdc_regs *wdr; 962 struct wdc_regs *wdr;
962 963
963 if (pciide_chipen(sc, pa) == 0) 964 if (pciide_chipen(sc, pa) == 0)
964 return; 965 return;
965 966
966 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 967 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
967#if NATA_DMA 968#if NATA_DMA
968 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, 969 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
969 "bus-master DMA support present"); 970 "bus-master DMA support present");
970 if (sc->sc_pp == &default_product_desc && 971 if (sc->sc_pp == &default_product_desc &&
971 (device_cfdata(sc->sc_wdcdev.sc_atac.atac_dev)->cf_flags & 972 (device_cfdata(sc->sc_wdcdev.sc_atac.atac_dev)->cf_flags &
972 PCIIDE_OPTIONS_DMA) == 0) { 973 PCIIDE_OPTIONS_DMA) == 0) {
973 aprint_verbose(", but unused (no driver support)"); 974 aprint_verbose(", but unused (no driver support)");
974 sc->sc_dma_ok = 0; 975 sc->sc_dma_ok = 0;
975 } else { 976 } else {
976 pciide_mapreg_dma(sc, pa); 977 pciide_mapreg_dma(sc, pa);
977 if (sc->sc_dma_ok != 0) 978 if (sc->sc_dma_ok != 0)
978 aprint_verbose(", used without full driver " 979 aprint_verbose(", used without full driver "
979 "support"); 980 "support");
980 } 981 }
981#else 982#else
982 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, 983 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
983 "bus-master DMA support present, but unused (no driver " 984 "bus-master DMA support present, but unused (no driver "
984 "support)"); 985 "support)");
985#endif /* NATA_DMA */ 986#endif /* NATA_DMA */
986 } else { 987 } else {
987 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, 988 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
988 "hardware does not support DMA"); 989 "hardware does not support DMA");
989#if NATA_DMA 990#if NATA_DMA
990 sc->sc_dma_ok = 0; 991 sc->sc_dma_ok = 0;
991#endif 992#endif
992 } 993 }
993 aprint_verbose("\n"); 994 aprint_verbose("\n");
994#if NATA_DMA 995#if NATA_DMA
995 if (sc->sc_dma_ok) { 996 if (sc->sc_dma_ok) {
996 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA; 997 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA;
997 sc->sc_wdcdev.irqack = pciide_irqack; 998 sc->sc_wdcdev.irqack = pciide_irqack;
998 } 999 }
999#endif 1000#endif
1000 sc->sc_wdcdev.sc_atac.atac_pio_cap = 0; 1001 sc->sc_wdcdev.sc_atac.atac_pio_cap = 0;
1001#if NATA_DMA 1002#if NATA_DMA
1002 sc->sc_wdcdev.sc_atac.atac_dma_cap = 0; 1003 sc->sc_wdcdev.sc_atac.atac_dma_cap = 0;
1003#endif 1004#endif
1004 1005
1005 sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; 1006 sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
1006 sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; 1007 sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
1007 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16; 1008 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16;
1008 sc->sc_wdcdev.wdc_maxdrives = 2; 1009 sc->sc_wdcdev.wdc_maxdrives = 2;
1009 1010
1010 wdc_allocate_regs(&sc->sc_wdcdev); 1011 wdc_allocate_regs(&sc->sc_wdcdev);
1011 1012
1012 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; 1013 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
1013 channel++) { 1014 channel++) {
1014 cp = &sc->pciide_channels[channel]; 1015 cp = &sc->pciide_channels[channel];
1015 if (pciide_chansetup(sc, channel, interface) == 0) 1016 if (pciide_chansetup(sc, channel, interface) == 0)
1016 continue; 1017 continue;
1017 wdr = CHAN_TO_WDC_REGS(&cp->ata_channel); 1018 wdr = CHAN_TO_WDC_REGS(&cp->ata_channel);
1018 if (interface & PCIIDE_INTERFACE_PCI(channel)) 1019 if (interface & PCIIDE_INTERFACE_PCI(channel))
1019 pciide_mapregs_native(pa, cp, pciide_pci_intr); 1020 pciide_mapregs_native(pa, cp, pciide_pci_intr);
1020 else 1021 else
1021 pciide_mapregs_compat(pa, cp, 1022 pciide_mapregs_compat(pa, cp,
1022 cp->ata_channel.ch_channel); 1023 cp->ata_channel.ch_channel);
1023 if (cp->ata_channel.ch_flags & ATACH_DISABLED) 1024 if (cp->ata_channel.ch_flags & ATACH_DISABLED)
1024 continue; 1025 continue;
1025 /* 1026 /*
1026 * Check to see if something appears to be there. 1027 * Check to see if something appears to be there.
1027 */ 1028 */
1028 failreason = NULL; 1029 failreason = NULL;
1029 /* 1030 /*
1030 * In native mode, always enable the controller. It's 1031 * In native mode, always enable the controller. It's
1031 * not possible to have an ISA board using the same address 1032 * not possible to have an ISA board using the same address
1032 * anyway. 1033 * anyway.
1033 */ 1034 */
1034 if (interface & PCIIDE_INTERFACE_PCI(channel)) { 1035 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1035 wdcattach(&cp->ata_channel); 1036 wdcattach(&cp->ata_channel);
1036 continue; 1037 continue;
1037 } 1038 }
1038 if (!wdcprobe(&cp->ata_channel)) { 1039 if (!wdcprobe(&cp->ata_channel)) {
1039 failreason = "not responding; disabled or no drives?"; 1040 failreason = "not responding; disabled or no drives?";
1040 goto next; 1041 goto next;
1041 } 1042 }
1042 /* 1043 /*
1043 * Now, make sure it's actually attributable to this PCI IDE 1044 * Now, make sure it's actually attributable to this PCI IDE
1044 * channel by trying to access the channel again while the 1045 * channel by trying to access the channel again while the
1045 * PCI IDE controller's I/O space is disabled. (If the 1046 * PCI IDE controller's I/O space is disabled. (If the
1046 * channel no longer appears to be there, it belongs to 1047 * channel no longer appears to be there, it belongs to
1047 * this controller.) YUCK! 1048 * this controller.) YUCK!
1048 */ 1049 */
1049 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 1050 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1050 PCI_COMMAND_STATUS_REG); 1051 PCI_COMMAND_STATUS_REG);
1051 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 1052 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1052 csr & ~PCI_COMMAND_IO_ENABLE); 1053 csr & ~PCI_COMMAND_IO_ENABLE);
1053 if (wdcprobe(&cp->ata_channel)) 1054 if (wdcprobe(&cp->ata_channel))
1054 failreason = "other hardware responding at addresses"; 1055 failreason = "other hardware responding at addresses";
1055 pci_conf_write(sc->sc_pc, sc->sc_tag, 1056 pci_conf_write(sc->sc_pc, sc->sc_tag,
1056 PCI_COMMAND_STATUS_REG, csr); 1057 PCI_COMMAND_STATUS_REG, csr);
1057next: 1058next:
1058 if (failreason) { 1059 if (failreason) {
1059 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, 1060 aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
1060 "%s channel ignored (%s)\n", cp->name, failreason); 1061 "%s channel ignored (%s)\n", cp->name, failreason);
1061 cp->ata_channel.ch_flags |= ATACH_DISABLED; 1062 cp->ata_channel.ch_flags |= ATACH_DISABLED;
1062 bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, 1063 bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh,
1063 wdr->cmd_ios); 1064 wdr->cmd_ios);
1064 bus_space_unmap(wdr->ctl_iot, wdr->ctl_ioh, 1065 bus_space_unmap(wdr->ctl_iot, wdr->ctl_ioh,
1065 wdr->ctl_ios); 1066 wdr->ctl_ios);
1066 } else { 1067 } else {
1067 pciide_map_compat_intr(pa, cp, 1068 pciide_map_compat_intr(pa, cp,
1068 cp->ata_channel.ch_channel); 1069 cp->ata_channel.ch_channel);
1069 wdcattach(&cp->ata_channel); 1070 wdcattach(&cp->ata_channel);
1070 } 1071 }
1071 } 1072 }
1072 1073
1073#if NATA_DMA 1074#if NATA_DMA
1074 if (sc->sc_dma_ok == 0) 1075 if (sc->sc_dma_ok == 0)
1075 return; 1076 return;
1076 1077
1077 /* Allocate DMA maps */ 1078 /* Allocate DMA maps */
1078 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; 1079 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
1079 channel++) { 1080 channel++) {
1080 idedma_ctl = 0; 1081 idedma_ctl = 0;
1081 cp = &sc->pciide_channels[channel]; 1082 cp = &sc->pciide_channels[channel];
1082 for (drive = 0; drive < sc->sc_wdcdev.wdc_maxdrives; drive++) { 1083 for (drive = 0; drive < sc->sc_wdcdev.wdc_maxdrives; drive++) {
1083 /* 1084 /*
1084 * we have not probed the drives yet, allocate 1085 * we have not probed the drives yet, allocate
1085 * ressources for all of them. 1086 * ressources for all of them.
1086 */ 1087 */
1087 if (pciide_dma_table_setup(sc, channel, drive) != 0) { 1088 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1088 /* Abort DMA setup */ 1089 /* Abort DMA setup */
1089 aprint_error( 1090 aprint_error(
1090 "%s:%d:%d: can't allocate DMA maps, " 1091 "%s:%d:%d: can't allocate DMA maps, "
1091 "using PIO transfers\n", 1092 "using PIO transfers\n",
1092 device_xname( 1093 device_xname(
1093 sc->sc_wdcdev.sc_atac.atac_dev), 1094 sc->sc_wdcdev.sc_atac.atac_dev),
1094 channel, drive); 1095 channel, drive);
1095 sc->sc_dma_ok = 0; 1096 sc->sc_dma_ok = 0;
1096 sc->sc_wdcdev.sc_atac.atac_cap &= ~ATAC_CAP_DMA; 1097 sc->sc_wdcdev.sc_atac.atac_cap &= ~ATAC_CAP_DMA;
1097 sc->sc_wdcdev.irqack = NULL; 1098 sc->sc_wdcdev.irqack = NULL;
1098 break; 1099 break;
1099 } 1100 }
1100 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1101 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1101 } 1102 }
1102 if (idedma_ctl != 0) { 1103 if (idedma_ctl != 0) {
1103 /* Add software bits in status register */ 1104 /* Add software bits in status register */
1104 bus_space_write_1(sc->sc_dma_iot, 1105 bus_space_write_1(sc->sc_dma_iot,
1105 cp->dma_iohs[IDEDMA_CTL], 0, idedma_ctl); 1106 cp->dma_iohs[IDEDMA_CTL], 0, idedma_ctl);
1106 } 1107 }
1107 } 1108 }
1108#endif /* NATA_DMA */ 1109#endif /* NATA_DMA */
1109} 1110}
1110 1111
1111void 1112void
1112sata_setup_channel(struct ata_channel *chp) 1113sata_setup_channel(struct ata_channel *chp)
1113{ 1114{
1114#if NATA_DMA 1115#if NATA_DMA
1115 struct ata_drive_datas *drvp; 1116 struct ata_drive_datas *drvp;
1116 int drive; 1117 int drive;
1117#if NATA_UDMA 1118#if NATA_UDMA
1118 int s; 1119 int s;
1119#endif 1120#endif
1120 u_int32_t idedma_ctl; 1121 u_int32_t idedma_ctl;
1121 struct pciide_channel *cp = CHAN_TO_PCHAN(chp); 1122 struct pciide_channel *cp = CHAN_TO_PCHAN(chp);
1122 struct pciide_softc *sc = CHAN_TO_PCIIDE(chp); 1123 struct pciide_softc *sc = CHAN_TO_PCIIDE(chp);
1123 1124
1124 /* setup DMA if needed */ 1125 /* setup DMA if needed */
1125 pciide_channel_dma_setup(cp); 1126 pciide_channel_dma_setup(cp);
1126 1127
1127 idedma_ctl = 0; 1128 idedma_ctl = 0;
1128 1129
1129 KASSERT(cp->ata_channel.ch_ndrives != 0); 1130 KASSERT(cp->ata_channel.ch_ndrives != 0);
1130 for (drive = 0; drive < cp->ata_channel.ch_ndrives; drive++) { 1131 for (drive = 0; drive < cp->ata_channel.ch_ndrives; drive++) {
1131 drvp = &chp->ch_drive[drive]; 1132 drvp = &chp->ch_drive[drive];
1132 /* If no drive, skip */ 1133 /* If no drive, skip */
1133 if (drvp->drive_type == ATA_DRIVET_NONE) 1134 if (drvp->drive_type == ATA_DRIVET_NONE)
1134 continue; 1135 continue;
1135#if NATA_UDMA 1136#if NATA_UDMA
1136 if (drvp->drive_flags & ATA_DRIVE_UDMA) { 1137 if (drvp->drive_flags & ATA_DRIVE_UDMA) {
1137 /* use Ultra/DMA */ 1138 /* use Ultra/DMA */
1138 s = splbio(); 1139 s = splbio();
1139 drvp->drive_flags &= ~ATA_DRIVE_DMA; 1140 drvp->drive_flags &= ~ATA_DRIVE_DMA;
1140 splx(s); 1141 splx(s);
1141 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1142 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1142 } else 1143 } else
1143#endif /* NATA_UDMA */ 1144#endif /* NATA_UDMA */
1144 if (drvp->drive_flags & ATA_DRIVE_DMA) { 1145 if (drvp->drive_flags & ATA_DRIVE_DMA) {
1145 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1146 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1146 } 1147 }
1147 } 1148 }
1148 1149
1149 /* 1150 /*
1150 * Nothing to do to setup modes; it is meaningless in S-ATA 1151 * Nothing to do to setup modes; it is meaningless in S-ATA
1151 * (but many S-ATA drives still want to get the SET_FEATURE 1152 * (but many S-ATA drives still want to get the SET_FEATURE
1152 * command). 1153 * command).
1153 */ 1154 */
1154 if (idedma_ctl != 0) { 1155 if (idedma_ctl != 0) {
1155 /* Add software bits in status register */ 1156 /* Add software bits in status register */
1156 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0, 1157 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0,
1157 idedma_ctl); 1158 idedma_ctl);
1158 } 1159 }
1159#endif /* NATA_DMA */ 1160#endif /* NATA_DMA */
1160} 1161}

cvs diff -r1.50 -r1.51 src/sys/dev/pci/piixpm.c (switch to unified diff)

--- src/sys/dev/pci/piixpm.c 2016/07/18 21:09:05 1.50
+++ src/sys/dev/pci/piixpm.c 2016/10/13 17:11:09 1.51
@@ -1,649 +1,649 @@ @@ -1,649 +1,649 @@
1/* $NetBSD: piixpm.c,v 1.50 2016/07/18 21:09:05 pgoyette Exp $ */ 1/* $NetBSD: piixpm.c,v 1.51 2016/10/13 17:11:09 jdolecek Exp $ */
2/* $OpenBSD: piixpm.c,v 1.20 2006/02/27 08:25:02 grange Exp $ */ 2/* $OpenBSD: piixpm.c,v 1.20 2006/02/27 08:25:02 grange Exp $ */
3 3
4/* 4/*
5 * Copyright (c) 2005, 2006 Alexander Yurchenko <grange@openbsd.org> 5 * Copyright (c) 2005, 2006 Alexander Yurchenko <grange@openbsd.org>
6 * 6 *
7 * Permission to use, copy, modify, and distribute this software for any 7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above 8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies. 9 * copyright notice and this permission notice appear in all copies.
10 * 10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */ 18 */
19 19
20/* 20/*
21 * Intel PIIX and compatible Power Management controller driver. 21 * Intel PIIX and compatible Power Management controller driver.
22 */ 22 */
23 23
24#include <sys/cdefs.h> 24#include <sys/cdefs.h>
25__KERNEL_RCSID(0, "$NetBSD: piixpm.c,v 1.50 2016/07/18 21:09:05 pgoyette Exp $"); 25__KERNEL_RCSID(0, "$NetBSD: piixpm.c,v 1.51 2016/10/13 17:11:09 jdolecek Exp $");
26 26
27#include <sys/param.h> 27#include <sys/param.h>
28#include <sys/systm.h> 28#include <sys/systm.h>
29#include <sys/device.h> 29#include <sys/device.h>
30#include <sys/kernel.h> 30#include <sys/kernel.h>
31#include <sys/mutex.h> 31#include <sys/mutex.h>
32#include <sys/proc.h> 32#include <sys/proc.h>
33 33
34#include <sys/bus.h> 34#include <sys/bus.h>
35 35
36#include <dev/pci/pcidevs.h> 36#include <dev/pci/pcidevs.h>
37#include <dev/pci/pcireg.h> 37#include <dev/pci/pcireg.h>
38#include <dev/pci/pcivar.h> 38#include <dev/pci/pcivar.h>
39 39
40#include <dev/pci/piixpmreg.h> 40#include <dev/pci/piixpmreg.h>
41 41
42#include <dev/i2c/i2cvar.h> 42#include <dev/i2c/i2cvar.h>
43 43
44#include <dev/ic/acpipmtimer.h> 44#include <dev/ic/acpipmtimer.h>
45 45
46#ifdef PIIXPM_DEBUG 46#ifdef PIIXPM_DEBUG
47#define DPRINTF(x) printf x 47#define DPRINTF(x) printf x
48#else 48#else
49#define DPRINTF(x) 49#define DPRINTF(x)
50#endif 50#endif
51 51
52#define PIIXPM_IS_CSB5(id) \ 52#define PIIXPM_IS_CSB5(id) \
53 (PCI_VENDOR((id)) == PCI_VENDOR_SERVERWORKS && \ 53 (PCI_VENDOR((id)) == PCI_VENDOR_SERVERWORKS && \
54 PCI_PRODUCT((id)) == PCI_PRODUCT_SERVERWORKS_CSB5) 54 PCI_PRODUCT((id)) == PCI_PRODUCT_SERVERWORKS_CSB5)
55#define PIIXPM_DELAY 200 55#define PIIXPM_DELAY 200
56#define PIIXPM_TIMEOUT 1 56#define PIIXPM_TIMEOUT 1
57 57
58struct piixpm_smbus { 58struct piixpm_smbus {
59 int sda; 59 int sda;
60 struct piixpm_softc *softc; 60 struct piixpm_softc *softc;
61}; 61};
62 62
63struct piixpm_softc { 63struct piixpm_softc {
64 device_t sc_dev; 64 device_t sc_dev;
65 65
66 bus_space_tag_t sc_iot; 66 bus_space_tag_t sc_iot;
67#define sc_pm_iot sc_iot 67#define sc_pm_iot sc_iot
68#define sc_smb_iot sc_iot 68#define sc_smb_iot sc_iot
69 bus_space_handle_t sc_pm_ioh; 69 bus_space_handle_t sc_pm_ioh;
70 bus_space_handle_t sc_sb800_ioh; 70 bus_space_handle_t sc_sb800_ioh;
71 bus_space_handle_t sc_smb_ioh; 71 bus_space_handle_t sc_smb_ioh;
72 void * sc_smb_ih; 72 void * sc_smb_ih;
73 int sc_poll; 73 int sc_poll;
74 74
75 pci_chipset_tag_t sc_pc; 75 pci_chipset_tag_t sc_pc;
76 pcitag_t sc_pcitag; 76 pcitag_t sc_pcitag;
77 pcireg_t sc_id; 77 pcireg_t sc_id;
78 78
79 int sc_numbusses; 79 int sc_numbusses;
80 device_t sc_i2c_device[4]; 80 device_t sc_i2c_device[4];
81 struct piixpm_smbus sc_busses[4]; 81 struct piixpm_smbus sc_busses[4];
82 struct i2c_controller sc_i2c_tags[4]; 82 struct i2c_controller sc_i2c_tags[4];
83 83
84 kmutex_t sc_i2c_mutex; 84 kmutex_t sc_i2c_mutex;
85 struct { 85 struct {
86 i2c_op_t op; 86 i2c_op_t op;
87 void * buf; 87 void * buf;
88 size_t len; 88 size_t len;
89 int flags; 89 int flags;
90 volatile int error; 90 volatile int error;
91 } sc_i2c_xfer; 91 } sc_i2c_xfer;
92 92
93 pcireg_t sc_devact[2]; 93 pcireg_t sc_devact[2];
94}; 94};
95 95
96static int piixpm_match(device_t, cfdata_t, void *); 96static int piixpm_match(device_t, cfdata_t, void *);
97static void piixpm_attach(device_t, device_t, void *); 97static void piixpm_attach(device_t, device_t, void *);
98static int piixpm_rescan(device_t, const char *, const int *); 98static int piixpm_rescan(device_t, const char *, const int *);
99static void piixpm_chdet(device_t, device_t); 99static void piixpm_chdet(device_t, device_t);
100 100
101static bool piixpm_suspend(device_t, const pmf_qual_t *); 101static bool piixpm_suspend(device_t, const pmf_qual_t *);
102static bool piixpm_resume(device_t, const pmf_qual_t *); 102static bool piixpm_resume(device_t, const pmf_qual_t *);
103 103
104static int piixpm_sb800_init(struct piixpm_softc *); 104static int piixpm_sb800_init(struct piixpm_softc *);
105static void piixpm_csb5_reset(void *); 105static void piixpm_csb5_reset(void *);
106static int piixpm_i2c_acquire_bus(void *, int); 106static int piixpm_i2c_acquire_bus(void *, int);
107static void piixpm_i2c_release_bus(void *, int); 107static void piixpm_i2c_release_bus(void *, int);
108static int piixpm_i2c_exec(void *, i2c_op_t, i2c_addr_t, const void *, 108static int piixpm_i2c_exec(void *, i2c_op_t, i2c_addr_t, const void *,
109 size_t, void *, size_t, int); 109 size_t, void *, size_t, int);
110 110
111static int piixpm_intr(void *); 111static int piixpm_intr(void *);
112 112
113CFATTACH_DECL3_NEW(piixpm, sizeof(struct piixpm_softc), 113CFATTACH_DECL3_NEW(piixpm, sizeof(struct piixpm_softc),
114 piixpm_match, piixpm_attach, NULL, NULL, piixpm_rescan, piixpm_chdet, 0); 114 piixpm_match, piixpm_attach, NULL, NULL, piixpm_rescan, piixpm_chdet, 0);
115 115
116static int 116static int
117piixpm_match(device_t parent, cfdata_t match, void *aux) 117piixpm_match(device_t parent, cfdata_t match, void *aux)
118{ 118{
119 struct pci_attach_args *pa; 119 struct pci_attach_args *pa;
120 120
121 pa = (struct pci_attach_args *)aux; 121 pa = (struct pci_attach_args *)aux;
122 switch (PCI_VENDOR(pa->pa_id)) { 122 switch (PCI_VENDOR(pa->pa_id)) {
123 case PCI_VENDOR_INTEL: 123 case PCI_VENDOR_INTEL:
124 switch (PCI_PRODUCT(pa->pa_id)) { 124 switch (PCI_PRODUCT(pa->pa_id)) {
125 case PCI_PRODUCT_INTEL_82371AB_PMC: 125 case PCI_PRODUCT_INTEL_82371AB_PMC:
126 case PCI_PRODUCT_INTEL_82440MX_PMC: 126 case PCI_PRODUCT_INTEL_82440MX_PMC:
127 return 1; 127 return 1;
128 } 128 }
129 break; 129 break;
130 case PCI_VENDOR_ATI: 130 case PCI_VENDOR_ATI:
131 switch (PCI_PRODUCT(pa->pa_id)) { 131 switch (PCI_PRODUCT(pa->pa_id)) {
132 case PCI_PRODUCT_ATI_SB200_SMB: 132 case PCI_PRODUCT_ATI_SB200_SMB:
133 case PCI_PRODUCT_ATI_SB300_SMB: 133 case PCI_PRODUCT_ATI_SB300_SMB:
134 case PCI_PRODUCT_ATI_SB400_SMB: 134 case PCI_PRODUCT_ATI_SB400_SMB:
135 case PCI_PRODUCT_ATI_SB600_SMB: /* matches SB600/SB700/SB800 */ 135 case PCI_PRODUCT_ATI_SB600_SMB: /* matches SB600/SB700/SB800 */
136 return 1; 136 return 1;
137 } 137 }
138 break; 138 break;
139 case PCI_VENDOR_SERVERWORKS: 139 case PCI_VENDOR_SERVERWORKS:
140 switch (PCI_PRODUCT(pa->pa_id)) { 140 switch (PCI_PRODUCT(pa->pa_id)) {
141 case PCI_PRODUCT_SERVERWORKS_OSB4: 141 case PCI_PRODUCT_SERVERWORKS_OSB4:
142 case PCI_PRODUCT_SERVERWORKS_CSB5: 142 case PCI_PRODUCT_SERVERWORKS_CSB5:
143 case PCI_PRODUCT_SERVERWORKS_CSB6: 143 case PCI_PRODUCT_SERVERWORKS_CSB6:
144 case PCI_PRODUCT_SERVERWORKS_HT1000SB: 144 case PCI_PRODUCT_SERVERWORKS_HT1000SB:
145 return 1; 145 return 1;
146 } 146 }
147 break; 147 break;
148 case PCI_VENDOR_AMD: 148 case PCI_VENDOR_AMD:
149 switch (PCI_PRODUCT(pa->pa_id)) { 149 switch (PCI_PRODUCT(pa->pa_id)) {
150 case PCI_PRODUCT_AMD_HUDSON_SMB: 150 case PCI_PRODUCT_AMD_HUDSON_SMB:
151 return 1; 151 return 1;
152 } 152 }
153 break; 153 break;
154 } 154 }
155 155
156 return 0; 156 return 0;
157} 157}
158 158
159static void 159static void
160piixpm_attach(device_t parent, device_t self, void *aux) 160piixpm_attach(device_t parent, device_t self, void *aux)
161{ 161{
162 struct piixpm_softc *sc = device_private(self); 162 struct piixpm_softc *sc = device_private(self);
163 struct pci_attach_args *pa = aux; 163 struct pci_attach_args *pa = aux;
164 pcireg_t base, conf; 164 pcireg_t base, conf;
165 pcireg_t pmmisc; 165 pcireg_t pmmisc;
166 pci_intr_handle_t ih; 166 pci_intr_handle_t ih;
167 const char *intrstr = NULL; 167 const char *intrstr = NULL;
168 int i, flags; 168 int i, flags;
169 char intrbuf[PCI_INTRSTR_LEN]; 169 char intrbuf[PCI_INTRSTR_LEN];
170 170
171 sc->sc_dev = self; 171 sc->sc_dev = self;
172 sc->sc_iot = pa->pa_iot; 172 sc->sc_iot = pa->pa_iot;
173 sc->sc_id = pa->pa_id; 173 sc->sc_id = pa->pa_id;
174 sc->sc_pc = pa->pa_pc; 174 sc->sc_pc = pa->pa_pc;
175 sc->sc_pcitag = pa->pa_tag; 175 sc->sc_pcitag = pa->pa_tag;
176 sc->sc_numbusses = 1; 176 sc->sc_numbusses = 1;
177 177
178 pci_aprint_devinfo(pa, NULL); 178 pci_aprint_devinfo(pa, NULL);
179 179
180 if (!pmf_device_register(self, piixpm_suspend, piixpm_resume)) 180 if (!pmf_device_register(self, piixpm_suspend, piixpm_resume))
181 aprint_error_dev(self, "couldn't establish power handler\n"); 181 aprint_error_dev(self, "couldn't establish power handler\n");
182 182
183 /* Read configuration */ 183 /* Read configuration */
184 conf = pci_conf_read(pa->pa_pc, pa->pa_tag, PIIX_SMB_HOSTC); 184 conf = pci_conf_read(pa->pa_pc, pa->pa_tag, PIIX_SMB_HOSTC);
185 DPRINTF(("%s: conf 0x%x\n", device_xname(self), conf)); 185 DPRINTF(("%s: conf 0x%x\n", device_xname(self), conf));
186 186
187 if ((PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL) || 187 if ((PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL) ||
188 (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_INTEL_82371AB_PMC)) 188 (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_INTEL_82371AB_PMC))
189 goto nopowermanagement; 189 goto nopowermanagement;
190 190
191 /* check whether I/O access to PM regs is enabled */ 191 /* check whether I/O access to PM regs is enabled */
192 pmmisc = pci_conf_read(pa->pa_pc, pa->pa_tag, PIIX_PMREGMISC); 192 pmmisc = pci_conf_read(pa->pa_pc, pa->pa_tag, PIIX_PMREGMISC);
193 if (!(pmmisc & 1)) 193 if (!(pmmisc & 1))
194 goto nopowermanagement; 194 goto nopowermanagement;
195 195
196 /* Map I/O space */ 196 /* Map I/O space */
197 base = pci_conf_read(pa->pa_pc, pa->pa_tag, PIIX_PM_BASE); 197 base = pci_conf_read(pa->pa_pc, pa->pa_tag, PIIX_PM_BASE);
198 if (bus_space_map(sc->sc_pm_iot, PCI_MAPREG_IO_ADDR(base), 198 if (bus_space_map(sc->sc_pm_iot, PCI_MAPREG_IO_ADDR(base),
199 PIIX_PM_SIZE, 0, &sc->sc_pm_ioh)) { 199 PIIX_PM_SIZE, 0, &sc->sc_pm_ioh)) {
200 aprint_error_dev(self, 200 aprint_error_dev(self,
201 "can't map power management I/O space\n"); 201 "can't map power management I/O space\n");
202 goto nopowermanagement; 202 goto nopowermanagement;
203 } 203 }
204 204
205 /* 205 /*
206 * Revision 0 and 1 are PIIX4, 2 is PIIX4E, 3 is PIIX4M. 206 * Revision 0 and 1 are PIIX4, 2 is PIIX4E, 3 is PIIX4M.
207 * PIIX4 and PIIX4E have a bug in the timer latch, see Errata #20 207 * PIIX4 and PIIX4E have a bug in the timer latch, see Errata #20
208 * in the "Specification update" (document #297738). 208 * in the "Specification update" (document #297738).
209 */ 209 */
210 acpipmtimer_attach(self, sc->sc_pm_iot, sc->sc_pm_ioh, 210 acpipmtimer_attach(self, sc->sc_pm_iot, sc->sc_pm_ioh,
211 PIIX_PM_PMTMR, 211 PIIX_PM_PMTMR,
212 (PCI_REVISION(pa->pa_class) < 3) ? ACPIPMT_BADLATCH : 0 ); 212 (PCI_REVISION(pa->pa_class) < 3) ? ACPIPMT_BADLATCH : 0 );
213 213
214nopowermanagement: 214nopowermanagement:
215 215
216 /* SB800 rev 0x40+ and AMD HUDSON need special initialization */ 216 /* SB800 rev 0x40+ and AMD HUDSON need special initialization */
217 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_AMD && 217 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_AMD &&
218 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_AMD_HUDSON_SMB) { 218 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_AMD_HUDSON_SMB) {
219 if (piixpm_sb800_init(sc) == 0) { 219 if (piixpm_sb800_init(sc) == 0) {
220 goto attach_i2c; 220 goto attach_i2c;
221 } 221 }
222 aprint_normal_dev(self, "SMBus initialization failed\n"); 222 aprint_normal_dev(self, "SMBus initialization failed\n");
223 return; 223 return;
224 } 224 }
225 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ATI && 225 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ATI &&
226 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ATI_SB600_SMB && 226 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ATI_SB600_SMB &&
227 PCI_REVISION(pa->pa_class) >= 0x40) { 227 PCI_REVISION(pa->pa_class) >= 0x40) {
228 if (piixpm_sb800_init(sc) == 0) { 228 if (piixpm_sb800_init(sc) == 0) {
229 sc->sc_numbusses = 4; 229 sc->sc_numbusses = 4;
230 goto attach_i2c; 230 goto attach_i2c;
231 } 231 }
232 aprint_normal_dev(self, "SMBus initialization failed\n"); 232 aprint_normal_dev(self, "SMBus initialization failed\n");
233 return; 233 return;
234 } 234 }
235 235
236 if ((conf & PIIX_SMB_HOSTC_HSTEN) == 0) { 236 if ((conf & PIIX_SMB_HOSTC_HSTEN) == 0) {
237 aprint_normal_dev(self, "SMBus disabled\n"); 237 aprint_normal_dev(self, "SMBus disabled\n");
238 return; 238 return;
239 } 239 }
240 240
241 /* Map I/O space */ 241 /* Map I/O space */
242 base = pci_conf_read(pa->pa_pc, pa->pa_tag, PIIX_SMB_BASE) & 0xffff; 242 base = pci_conf_read(pa->pa_pc, pa->pa_tag, PIIX_SMB_BASE) & 0xffff;
243 if (bus_space_map(sc->sc_smb_iot, PCI_MAPREG_IO_ADDR(base), 243 if (bus_space_map(sc->sc_smb_iot, PCI_MAPREG_IO_ADDR(base),
244 PIIX_SMB_SIZE, 0, &sc->sc_smb_ioh)) { 244 PIIX_SMB_SIZE, 0, &sc->sc_smb_ioh)) {
245 aprint_error_dev(self, "can't map smbus I/O space\n"); 245 aprint_error_dev(self, "can't map smbus I/O space\n");
246 return; 246 return;
247 } 247 }
248 248
249 sc->sc_poll = 1; 249 sc->sc_poll = 1;
250 aprint_normal_dev(self, ""); 250 aprint_normal_dev(self, "");
251 if ((conf & PIIX_SMB_HOSTC_INTMASK) == PIIX_SMB_HOSTC_SMI) { 251 if ((conf & PIIX_SMB_HOSTC_INTMASK) == PIIX_SMB_HOSTC_SMI) {
252 /* No PCI IRQ */ 252 /* No PCI IRQ */
253 aprint_normal("interrupting at SMI, "); 253 aprint_normal("interrupting at SMI, ");
254 } else if ((conf & PIIX_SMB_HOSTC_INTMASK) == PIIX_SMB_HOSTC_IRQ) { 254 } else if ((conf & PIIX_SMB_HOSTC_INTMASK) == PIIX_SMB_HOSTC_IRQ) {
255 /* Install interrupt handler */ 255 /* Install interrupt handler */
256 if (pci_intr_map(pa, &ih) == 0) { 256 if (pci_intr_map(pa, &ih) == 0) {
257 intrstr = pci_intr_string(pa->pa_pc, ih, intrbuf, 257 intrstr = pci_intr_string(pa->pa_pc, ih, intrbuf,
258 sizeof(intrbuf)); 258 sizeof(intrbuf));
259 sc->sc_smb_ih = pci_intr_establish(pa->pa_pc, ih, 259 sc->sc_smb_ih = pci_intr_establish_xname(pa->pa_pc, ih,
260 IPL_BIO, piixpm_intr, sc); 260 IPL_BIO, piixpm_intr, sc, device_xname(sc->sc_dev));
261 if (sc->sc_smb_ih != NULL) { 261 if (sc->sc_smb_ih != NULL) {
262 aprint_normal("interrupting at %s", intrstr); 262 aprint_normal("interrupting at %s", intrstr);
263 sc->sc_poll = 0; 263 sc->sc_poll = 0;
264 } 264 }
265 } 265 }
266 } 266 }
267 if (sc->sc_poll) 267 if (sc->sc_poll)
268 aprint_normal("polling"); 268 aprint_normal("polling");
269 269
270 aprint_normal("\n"); 270 aprint_normal("\n");
271 271
272attach_i2c: 272attach_i2c:
273 for (i = 0; i < sc->sc_numbusses; i++) 273 for (i = 0; i < sc->sc_numbusses; i++)
274 sc->sc_i2c_device[i] = NULL; 274 sc->sc_i2c_device[i] = NULL;
275 275
276 flags = 0; 276 flags = 0;
277 mutex_init(&sc->sc_i2c_mutex, MUTEX_DEFAULT, IPL_NONE); 277 mutex_init(&sc->sc_i2c_mutex, MUTEX_DEFAULT, IPL_NONE);
278 piixpm_rescan(self, "i2cbus", &flags); 278 piixpm_rescan(self, "i2cbus", &flags);
279} 279}
280 280
281static int 281static int
282piixpm_rescan(device_t self, const char *ifattr, const int *flags) 282piixpm_rescan(device_t self, const char *ifattr, const int *flags)
283{ 283{
284 struct piixpm_softc *sc = device_private(self); 284 struct piixpm_softc *sc = device_private(self);
285 struct i2cbus_attach_args iba; 285 struct i2cbus_attach_args iba;
286 int i; 286 int i;
287 287
288 if (!ifattr_match(ifattr, "i2cbus")) 288 if (!ifattr_match(ifattr, "i2cbus"))
289 return 0; 289 return 0;
290 290
291 /* Attach I2C bus */ 291 /* Attach I2C bus */
292 292
293 for (i = 0; i < sc->sc_numbusses; i++) { 293 for (i = 0; i < sc->sc_numbusses; i++) {
294 if (sc->sc_i2c_device[i]) 294 if (sc->sc_i2c_device[i])
295 continue; 295 continue;
296 sc->sc_busses[i].sda = i; 296 sc->sc_busses[i].sda = i;
297 sc->sc_busses[i].softc = sc; 297 sc->sc_busses[i].softc = sc;
298 sc->sc_i2c_tags[i].ic_cookie = &sc->sc_busses[i]; 298 sc->sc_i2c_tags[i].ic_cookie = &sc->sc_busses[i];
299 sc->sc_i2c_tags[i].ic_acquire_bus = piixpm_i2c_acquire_bus; 299 sc->sc_i2c_tags[i].ic_acquire_bus = piixpm_i2c_acquire_bus;
300 sc->sc_i2c_tags[i].ic_release_bus = piixpm_i2c_release_bus; 300 sc->sc_i2c_tags[i].ic_release_bus = piixpm_i2c_release_bus;
301 sc->sc_i2c_tags[i].ic_exec = piixpm_i2c_exec; 301 sc->sc_i2c_tags[i].ic_exec = piixpm_i2c_exec;
302 memset(&iba, 0, sizeof(iba)); 302 memset(&iba, 0, sizeof(iba));
303 iba.iba_type = I2C_TYPE_SMBUS; 303 iba.iba_type = I2C_TYPE_SMBUS;
304 iba.iba_tag = &sc->sc_i2c_tags[i]; 304 iba.iba_tag = &sc->sc_i2c_tags[i];
305 sc->sc_i2c_device[i] = config_found_ia(self, ifattr, &iba, 305 sc->sc_i2c_device[i] = config_found_ia(self, ifattr, &iba,
306 iicbus_print); 306 iicbus_print);
307 } 307 }
308 308
309 return 0; 309 return 0;
310} 310}
311 311
312static void 312static void
313piixpm_chdet(device_t self, device_t child) 313piixpm_chdet(device_t self, device_t child)
314{ 314{
315 struct piixpm_softc *sc = device_private(self); 315 struct piixpm_softc *sc = device_private(self);
316 int i; 316 int i;
317 317
318 for (i = 0; i < sc->sc_numbusses; i++) { 318 for (i = 0; i < sc->sc_numbusses; i++) {
319 if (sc->sc_i2c_device[i] == child) { 319 if (sc->sc_i2c_device[i] == child) {
320 sc->sc_i2c_device[i] = NULL; 320 sc->sc_i2c_device[i] = NULL;
321 break; 321 break;
322 } 322 }
323 } 323 }
324} 324}
325 325
326 326
327static bool 327static bool
328piixpm_suspend(device_t dv, const pmf_qual_t *qual) 328piixpm_suspend(device_t dv, const pmf_qual_t *qual)
329{ 329{
330 struct piixpm_softc *sc = device_private(dv); 330 struct piixpm_softc *sc = device_private(dv);
331 331
332 sc->sc_devact[0] = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 332 sc->sc_devact[0] = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
333 PIIX_DEVACTA); 333 PIIX_DEVACTA);
334 sc->sc_devact[1] = pci_conf_read(sc->sc_pc, sc->sc_pcitag, 334 sc->sc_devact[1] = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
335 PIIX_DEVACTB); 335 PIIX_DEVACTB);
336 336
337 return true; 337 return true;
338} 338}
339 339
340static bool 340static bool
341piixpm_resume(device_t dv, const pmf_qual_t *qual) 341piixpm_resume(device_t dv, const pmf_qual_t *qual)
342{ 342{
343 struct piixpm_softc *sc = device_private(dv); 343 struct piixpm_softc *sc = device_private(dv);
344 344
345 pci_conf_write(sc->sc_pc, sc->sc_pcitag, PIIX_DEVACTA, 345 pci_conf_write(sc->sc_pc, sc->sc_pcitag, PIIX_DEVACTA,
346 sc->sc_devact[0]); 346 sc->sc_devact[0]);
347 pci_conf_write(sc->sc_pc, sc->sc_pcitag, PIIX_DEVACTB, 347 pci_conf_write(sc->sc_pc, sc->sc_pcitag, PIIX_DEVACTB,
348 sc->sc_devact[1]); 348 sc->sc_devact[1]);
349 349
350 return true; 350 return true;
351} 351}
352 352
353/* 353/*
354 * Extract SMBus base address from SB800 Power Management (PM) registers. 354 * Extract SMBus base address from SB800 Power Management (PM) registers.
355 * The PM registers can be accessed either through indirect I/O (CD6/CD7) or 355 * The PM registers can be accessed either through indirect I/O (CD6/CD7) or
356 * direct mapping if AcpiMMioDecodeEn is enabled. Since this function is only 356 * direct mapping if AcpiMMioDecodeEn is enabled. Since this function is only
357 * called once it uses indirect I/O for simplicity. 357 * called once it uses indirect I/O for simplicity.
358 */ 358 */
359static int 359static int
360piixpm_sb800_init(struct piixpm_softc *sc) 360piixpm_sb800_init(struct piixpm_softc *sc)
361{ 361{
362 bus_space_tag_t iot = sc->sc_iot; 362 bus_space_tag_t iot = sc->sc_iot;
363 bus_space_handle_t ioh; /* indirect I/O handle */ 363 bus_space_handle_t ioh; /* indirect I/O handle */
364 uint16_t val, base_addr; 364 uint16_t val, base_addr;
365 365
366 /* Fetch SMB base address */ 366 /* Fetch SMB base address */
367 if (bus_space_map(iot, 367 if (bus_space_map(iot,
368 PIIXPM_INDIRECTIO_BASE, PIIXPM_INDIRECTIO_SIZE, 0, &ioh)) { 368 PIIXPM_INDIRECTIO_BASE, PIIXPM_INDIRECTIO_SIZE, 0, &ioh)) {
369 device_printf(sc->sc_dev, "couldn't map indirect I/O space\n"); 369 device_printf(sc->sc_dev, "couldn't map indirect I/O space\n");
370 return EBUSY; 370 return EBUSY;
371 } 371 }
372 bus_space_write_1(iot, ioh, PIIXPM_INDIRECTIO_INDEX, 372 bus_space_write_1(iot, ioh, PIIXPM_INDIRECTIO_INDEX,
373 SB800_PM_SMBUS0EN_LO); 373 SB800_PM_SMBUS0EN_LO);
374 val = bus_space_read_1(iot, ioh, PIIXPM_INDIRECTIO_DATA); 374 val = bus_space_read_1(iot, ioh, PIIXPM_INDIRECTIO_DATA);
375 bus_space_write_1(iot, ioh, PIIXPM_INDIRECTIO_INDEX, 375 bus_space_write_1(iot, ioh, PIIXPM_INDIRECTIO_INDEX,
376 SB800_PM_SMBUS0EN_HI); 376 SB800_PM_SMBUS0EN_HI);
377 val |= bus_space_read_1(iot, ioh, PIIXPM_INDIRECTIO_DATA) << 8; 377 val |= bus_space_read_1(iot, ioh, PIIXPM_INDIRECTIO_DATA) << 8;
378 sc->sc_sb800_ioh = ioh; 378 sc->sc_sb800_ioh = ioh;
379 379
380 if ((val & SB800_PM_SMBUS0EN_ENABLE) == 0) 380 if ((val & SB800_PM_SMBUS0EN_ENABLE) == 0)
381 return ENOENT; 381 return ENOENT;
382 382
383 base_addr = val & SB800_PM_SMBUS0EN_BADDR; 383 base_addr = val & SB800_PM_SMBUS0EN_BADDR;
384 384
385 aprint_debug_dev(sc->sc_dev, "SMBus @ 0x%04x\n", base_addr); 385 aprint_debug_dev(sc->sc_dev, "SMBus @ 0x%04x\n", base_addr);
386 386
387 bus_space_write_1(iot, ioh, PIIXPM_INDIRECTIO_INDEX, 387 bus_space_write_1(iot, ioh, PIIXPM_INDIRECTIO_INDEX,
388 SB800_PM_SMBUS0SELEN); 388 SB800_PM_SMBUS0SELEN);
389 bus_space_write_1(iot, ioh, PIIXPM_INDIRECTIO_DATA, 1); /* SMBUS0SEL */ 389 bus_space_write_1(iot, ioh, PIIXPM_INDIRECTIO_DATA, 1); /* SMBUS0SEL */
390 390
391 if (bus_space_map(iot, PCI_MAPREG_IO_ADDR(base_addr), 391 if (bus_space_map(iot, PCI_MAPREG_IO_ADDR(base_addr),
392 PIIX_SMB_SIZE, 0, &sc->sc_smb_ioh)) { 392 PIIX_SMB_SIZE, 0, &sc->sc_smb_ioh)) {
393 aprint_error_dev(sc->sc_dev, "can't map smbus I/O space\n"); 393 aprint_error_dev(sc->sc_dev, "can't map smbus I/O space\n");
394 return EBUSY; 394 return EBUSY;
395 } 395 }
396 aprint_normal_dev(sc->sc_dev, "polling (SB800)\n"); 396 aprint_normal_dev(sc->sc_dev, "polling (SB800)\n");
397 sc->sc_poll = 1; 397 sc->sc_poll = 1;
398 398
399 return 0; 399 return 0;
400} 400}
401 401
402static void 402static void
403piixpm_csb5_reset(void *arg) 403piixpm_csb5_reset(void *arg)
404{ 404{
405 struct piixpm_softc *sc = arg; 405 struct piixpm_softc *sc = arg;
406 pcireg_t base, hostc, pmbase; 406 pcireg_t base, hostc, pmbase;
407 407
408 base = pci_conf_read(sc->sc_pc, sc->sc_pcitag, PIIX_SMB_BASE); 408 base = pci_conf_read(sc->sc_pc, sc->sc_pcitag, PIIX_SMB_BASE);
409 hostc = pci_conf_read(sc->sc_pc, sc->sc_pcitag, PIIX_SMB_HOSTC); 409 hostc = pci_conf_read(sc->sc_pc, sc->sc_pcitag, PIIX_SMB_HOSTC);
410 410
411 pmbase = pci_conf_read(sc->sc_pc, sc->sc_pcitag, PIIX_PM_BASE); 411 pmbase = pci_conf_read(sc->sc_pc, sc->sc_pcitag, PIIX_PM_BASE);
412 pmbase |= PIIX_PM_BASE_CSB5_RESET; 412 pmbase |= PIIX_PM_BASE_CSB5_RESET;
413 pci_conf_write(sc->sc_pc, sc->sc_pcitag, PIIX_PM_BASE, pmbase); 413 pci_conf_write(sc->sc_pc, sc->sc_pcitag, PIIX_PM_BASE, pmbase);
414 pmbase &= ~PIIX_PM_BASE_CSB5_RESET; 414 pmbase &= ~PIIX_PM_BASE_CSB5_RESET;
415 pci_conf_write(sc->sc_pc, sc->sc_pcitag, PIIX_PM_BASE, pmbase); 415 pci_conf_write(sc->sc_pc, sc->sc_pcitag, PIIX_PM_BASE, pmbase);
416 416
417 pci_conf_write(sc->sc_pc, sc->sc_pcitag, PIIX_SMB_BASE, base); 417 pci_conf_write(sc->sc_pc, sc->sc_pcitag, PIIX_SMB_BASE, base);
418 pci_conf_write(sc->sc_pc, sc->sc_pcitag, PIIX_SMB_HOSTC, hostc); 418 pci_conf_write(sc->sc_pc, sc->sc_pcitag, PIIX_SMB_HOSTC, hostc);
419 419
420 (void) tsleep(&sc, PRIBIO, "csb5reset", hz/2); 420 (void) tsleep(&sc, PRIBIO, "csb5reset", hz/2);
421} 421}
422 422
423static int 423static int
424piixpm_i2c_acquire_bus(void *cookie, int flags) 424piixpm_i2c_acquire_bus(void *cookie, int flags)
425{ 425{
426 struct piixpm_smbus *smbus = cookie; 426 struct piixpm_smbus *smbus = cookie;
427 struct piixpm_softc *sc = smbus->softc; 427 struct piixpm_softc *sc = smbus->softc;
428 428
429 if (!cold) 429 if (!cold)
430 mutex_enter(&sc->sc_i2c_mutex); 430 mutex_enter(&sc->sc_i2c_mutex);
431 431
432 if (smbus->sda > 0) /* SB800 */ 432 if (smbus->sda > 0) /* SB800 */
433 { 433 {
434 bus_space_write_1(sc->sc_iot, sc->sc_sb800_ioh, 434 bus_space_write_1(sc->sc_iot, sc->sc_sb800_ioh,
435 PIIXPM_INDIRECTIO_INDEX, SB800_PM_SMBUS0SEL); 435 PIIXPM_INDIRECTIO_INDEX, SB800_PM_SMBUS0SEL);
436 bus_space_write_1(sc->sc_iot, sc->sc_sb800_ioh, 436 bus_space_write_1(sc->sc_iot, sc->sc_sb800_ioh,
437 PIIXPM_INDIRECTIO_DATA, smbus->sda << 1); 437 PIIXPM_INDIRECTIO_DATA, smbus->sda << 1);
438 } 438 }
439 439
440 return 0; 440 return 0;
441} 441}
442 442
443static void 443static void
444piixpm_i2c_release_bus(void *cookie, int flags) 444piixpm_i2c_release_bus(void *cookie, int flags)
445{ 445{
446 struct piixpm_smbus *smbus = cookie; 446 struct piixpm_smbus *smbus = cookie;
447 struct piixpm_softc *sc = smbus->softc; 447 struct piixpm_softc *sc = smbus->softc;
448 448
449 if (smbus->sda > 0) /* SB800 */ 449 if (smbus->sda > 0) /* SB800 */
450 { 450 {
451 /* 451 /*
452 * HP Microserver hangs after reboot if not set to SDA0. 452 * HP Microserver hangs after reboot if not set to SDA0.
453 * Also add shutdown hook? 453 * Also add shutdown hook?
454 */ 454 */
455 bus_space_write_1(sc->sc_iot, sc->sc_sb800_ioh, 455 bus_space_write_1(sc->sc_iot, sc->sc_sb800_ioh,
456 PIIXPM_INDIRECTIO_INDEX, SB800_PM_SMBUS0SEL); 456 PIIXPM_INDIRECTIO_INDEX, SB800_PM_SMBUS0SEL);
457 bus_space_write_1(sc->sc_iot, sc->sc_sb800_ioh, 457 bus_space_write_1(sc->sc_iot, sc->sc_sb800_ioh,
458 PIIXPM_INDIRECTIO_DATA, 0); 458 PIIXPM_INDIRECTIO_DATA, 0);
459 } 459 }
460 460
461 if (!cold) 461 if (!cold)
462 mutex_exit(&sc->sc_i2c_mutex); 462 mutex_exit(&sc->sc_i2c_mutex);
463} 463}
464 464
465static int 465static int
466piixpm_i2c_exec(void *cookie, i2c_op_t op, i2c_addr_t addr, 466piixpm_i2c_exec(void *cookie, i2c_op_t op, i2c_addr_t addr,
467 const void *cmdbuf, size_t cmdlen, void *buf, size_t len, int flags) 467 const void *cmdbuf, size_t cmdlen, void *buf, size_t len, int flags)
468{ 468{
469 struct piixpm_smbus *smbus = cookie; 469 struct piixpm_smbus *smbus = cookie;
470 struct piixpm_softc *sc = smbus->softc; 470 struct piixpm_softc *sc = smbus->softc;
471 const u_int8_t *b; 471 const u_int8_t *b;
472 u_int8_t ctl = 0, st; 472 u_int8_t ctl = 0, st;
473 int retries; 473 int retries;
474 474
475 DPRINTF(("%s: exec: op %d, addr 0x%x, cmdlen %zu, len %zu, flags 0x%x\n", 475 DPRINTF(("%s: exec: op %d, addr 0x%x, cmdlen %zu, len %zu, flags 0x%x\n",
476 device_xname(sc->sc_dev), op, addr, cmdlen, len, flags)); 476 device_xname(sc->sc_dev), op, addr, cmdlen, len, flags));
477 477
478 /* Clear status bits */ 478 /* Clear status bits */
479 bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh, PIIX_SMB_HS, 479 bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh, PIIX_SMB_HS,
480 PIIX_SMB_HS_INTR | PIIX_SMB_HS_DEVERR | 480 PIIX_SMB_HS_INTR | PIIX_SMB_HS_DEVERR |
481 PIIX_SMB_HS_BUSERR | PIIX_SMB_HS_FAILED); 481 PIIX_SMB_HS_BUSERR | PIIX_SMB_HS_FAILED);
482 bus_space_barrier(sc->sc_smb_iot, sc->sc_smb_ioh, PIIX_SMB_HS, 1, 482 bus_space_barrier(sc->sc_smb_iot, sc->sc_smb_ioh, PIIX_SMB_HS, 1,
483 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 483 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
484 484
485 /* Wait for bus to be idle */ 485 /* Wait for bus to be idle */
486 for (retries = 100; retries > 0; retries--) { 486 for (retries = 100; retries > 0; retries--) {
487 st = bus_space_read_1(sc->sc_smb_iot, sc->sc_smb_ioh, 487 st = bus_space_read_1(sc->sc_smb_iot, sc->sc_smb_ioh,
488 PIIX_SMB_HS); 488 PIIX_SMB_HS);
489 if (!(st & PIIX_SMB_HS_BUSY)) 489 if (!(st & PIIX_SMB_HS_BUSY))
490 break; 490 break;
491 DELAY(PIIXPM_DELAY); 491 DELAY(PIIXPM_DELAY);
492 } 492 }
493 DPRINTF(("%s: exec: st 0x%d\n", device_xname(sc->sc_dev), st & 0xff)); 493 DPRINTF(("%s: exec: st 0x%d\n", device_xname(sc->sc_dev), st & 0xff));
494 if (st & PIIX_SMB_HS_BUSY) 494 if (st & PIIX_SMB_HS_BUSY)
495 return (1); 495 return (1);
496 496
497 if (cold || sc->sc_poll) 497 if (cold || sc->sc_poll)
498 flags |= I2C_F_POLL; 498 flags |= I2C_F_POLL;
499 499
500 if (!I2C_OP_STOP_P(op) || cmdlen > 1 || len > 2 || 500 if (!I2C_OP_STOP_P(op) || cmdlen > 1 || len > 2 ||
501 (cmdlen == 0 && len > 1)) 501 (cmdlen == 0 && len > 1))
502 return (1); 502 return (1);
503 503
504 /* Setup transfer */ 504 /* Setup transfer */
505 sc->sc_i2c_xfer.op = op; 505 sc->sc_i2c_xfer.op = op;
506 sc->sc_i2c_xfer.buf = buf; 506 sc->sc_i2c_xfer.buf = buf;
507 sc->sc_i2c_xfer.len = len; 507 sc->sc_i2c_xfer.len = len;
508 sc->sc_i2c_xfer.flags = flags; 508 sc->sc_i2c_xfer.flags = flags;
509 sc->sc_i2c_xfer.error = 0; 509 sc->sc_i2c_xfer.error = 0;
510 510
511 /* Set slave address and transfer direction */ 511 /* Set slave address and transfer direction */
512 bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh, PIIX_SMB_TXSLVA, 512 bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh, PIIX_SMB_TXSLVA,
513 PIIX_SMB_TXSLVA_ADDR(addr) | 513 PIIX_SMB_TXSLVA_ADDR(addr) |
514 (I2C_OP_READ_P(op) ? PIIX_SMB_TXSLVA_READ : 0)); 514 (I2C_OP_READ_P(op) ? PIIX_SMB_TXSLVA_READ : 0));
515 515
516 b = cmdbuf; 516 b = cmdbuf;
517 if (cmdlen > 0) 517 if (cmdlen > 0)
518 /* Set command byte */ 518 /* Set command byte */
519 bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh, 519 bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh,
520 PIIX_SMB_HCMD, b[0]); 520 PIIX_SMB_HCMD, b[0]);
521 521
522 if (I2C_OP_WRITE_P(op)) { 522 if (I2C_OP_WRITE_P(op)) {
523 /* Write data */ 523 /* Write data */
524 b = buf; 524 b = buf;
525 if (cmdlen == 0 && len == 1) 525 if (cmdlen == 0 && len == 1)
526 bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh, 526 bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh,
527 PIIX_SMB_HCMD, b[0]); 527 PIIX_SMB_HCMD, b[0]);
528 else if (len > 0) 528 else if (len > 0)
529 bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh, 529 bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh,
530 PIIX_SMB_HD0, b[0]); 530 PIIX_SMB_HD0, b[0]);
531 if (len > 1) 531 if (len > 1)
532 bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh, 532 bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh,
533 PIIX_SMB_HD1, b[1]); 533 PIIX_SMB_HD1, b[1]);
534 } 534 }
535 535
536 /* Set SMBus command */ 536 /* Set SMBus command */
537 if (cmdlen == 0) { 537 if (cmdlen == 0) {
538 if (len == 0) 538 if (len == 0)
539 ctl = PIIX_SMB_HC_CMD_QUICK; 539 ctl = PIIX_SMB_HC_CMD_QUICK;
540 else 540 else
541 ctl = PIIX_SMB_HC_CMD_BYTE; 541 ctl = PIIX_SMB_HC_CMD_BYTE;
542 } else if (len == 1) 542 } else if (len == 1)
543 ctl = PIIX_SMB_HC_CMD_BDATA; 543 ctl = PIIX_SMB_HC_CMD_BDATA;
544 else if (len == 2) 544 else if (len == 2)
545 ctl = PIIX_SMB_HC_CMD_WDATA; 545 ctl = PIIX_SMB_HC_CMD_WDATA;
546 546
547 if ((flags & I2C_F_POLL) == 0) 547 if ((flags & I2C_F_POLL) == 0)
548 ctl |= PIIX_SMB_HC_INTREN; 548 ctl |= PIIX_SMB_HC_INTREN;
549 549
550 /* Start transaction */ 550 /* Start transaction */
551 ctl |= PIIX_SMB_HC_START; 551 ctl |= PIIX_SMB_HC_START;
552 bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh, PIIX_SMB_HC, ctl); 552 bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh, PIIX_SMB_HC, ctl);
553 553
554 if (flags & I2C_F_POLL) { 554 if (flags & I2C_F_POLL) {
555 /* Poll for completion */ 555 /* Poll for completion */
556 if (PIIXPM_IS_CSB5(sc->sc_id)) 556 if (PIIXPM_IS_CSB5(sc->sc_id))
557 DELAY(2*PIIXPM_DELAY); 557 DELAY(2*PIIXPM_DELAY);
558 else 558 else
559 DELAY(PIIXPM_DELAY); 559 DELAY(PIIXPM_DELAY);
560 for (retries = 1000; retries > 0; retries--) { 560 for (retries = 1000; retries > 0; retries--) {
561 st = bus_space_read_1(sc->sc_smb_iot, sc->sc_smb_ioh, 561 st = bus_space_read_1(sc->sc_smb_iot, sc->sc_smb_ioh,
562 PIIX_SMB_HS); 562 PIIX_SMB_HS);
563 if ((st & PIIX_SMB_HS_BUSY) == 0) 563 if ((st & PIIX_SMB_HS_BUSY) == 0)
564 break; 564 break;
565 DELAY(PIIXPM_DELAY); 565 DELAY(PIIXPM_DELAY);
566 } 566 }
567 if (st & PIIX_SMB_HS_BUSY) 567 if (st & PIIX_SMB_HS_BUSY)
568 goto timeout; 568 goto timeout;
569 piixpm_intr(sc); 569 piixpm_intr(sc);
570 } else { 570 } else {
571 /* Wait for interrupt */ 571 /* Wait for interrupt */
572 if (tsleep(sc, PRIBIO, "iicexec", PIIXPM_TIMEOUT * hz)) 572 if (tsleep(sc, PRIBIO, "iicexec", PIIXPM_TIMEOUT * hz))
573 goto timeout; 573 goto timeout;
574 } 574 }
575 575
576 if (sc->sc_i2c_xfer.error) 576 if (sc->sc_i2c_xfer.error)
577 return (1); 577 return (1);
578 578
579 return (0); 579 return (0);
580 580
581timeout: 581timeout:
582 /* 582 /*
583 * Transfer timeout. Kill the transaction and clear status bits. 583 * Transfer timeout. Kill the transaction and clear status bits.
584 */ 584 */
585 aprint_error_dev(sc->sc_dev, "timeout, status 0x%x\n", st); 585 aprint_error_dev(sc->sc_dev, "timeout, status 0x%x\n", st);
586 bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh, PIIX_SMB_HC, 586 bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh, PIIX_SMB_HC,
587 PIIX_SMB_HC_KILL); 587 PIIX_SMB_HC_KILL);
588 DELAY(PIIXPM_DELAY); 588 DELAY(PIIXPM_DELAY);
589 st = bus_space_read_1(sc->sc_smb_iot, sc->sc_smb_ioh, PIIX_SMB_HS); 589 st = bus_space_read_1(sc->sc_smb_iot, sc->sc_smb_ioh, PIIX_SMB_HS);
590 if ((st & PIIX_SMB_HS_FAILED) == 0) 590 if ((st & PIIX_SMB_HS_FAILED) == 0)
591 aprint_error_dev(sc->sc_dev, 591 aprint_error_dev(sc->sc_dev,
592 "transaction abort failed, status 0x%x\n", st); 592 "transaction abort failed, status 0x%x\n", st);
593 bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh, PIIX_SMB_HS, st); 593 bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh, PIIX_SMB_HS, st);
594 /* 594 /*
595 * CSB5 needs hard reset to unlock the smbus after timeout. 595 * CSB5 needs hard reset to unlock the smbus after timeout.
596 */ 596 */
597 if (PIIXPM_IS_CSB5(sc->sc_id)) 597 if (PIIXPM_IS_CSB5(sc->sc_id))
598 piixpm_csb5_reset(sc); 598 piixpm_csb5_reset(sc);
599 return (1); 599 return (1);
600} 600}
601 601
602static int 602static int
603piixpm_intr(void *arg) 603piixpm_intr(void *arg)
604{ 604{
605 struct piixpm_softc *sc = arg; 605 struct piixpm_softc *sc = arg;
606 u_int8_t st; 606 u_int8_t st;
607 u_int8_t *b; 607 u_int8_t *b;
608 size_t len; 608 size_t len;
609 609
610 /* Read status */ 610 /* Read status */
611 st = bus_space_read_1(sc->sc_smb_iot, sc->sc_smb_ioh, PIIX_SMB_HS); 611 st = bus_space_read_1(sc->sc_smb_iot, sc->sc_smb_ioh, PIIX_SMB_HS);
612 if ((st & PIIX_SMB_HS_BUSY) != 0 || (st & (PIIX_SMB_HS_INTR | 612 if ((st & PIIX_SMB_HS_BUSY) != 0 || (st & (PIIX_SMB_HS_INTR |
613 PIIX_SMB_HS_DEVERR | PIIX_SMB_HS_BUSERR | 613 PIIX_SMB_HS_DEVERR | PIIX_SMB_HS_BUSERR |
614 PIIX_SMB_HS_FAILED)) == 0) 614 PIIX_SMB_HS_FAILED)) == 0)
615 /* Interrupt was not for us */ 615 /* Interrupt was not for us */
616 return (0); 616 return (0);
617 617
618 DPRINTF(("%s: intr st 0x%d\n", device_xname(sc->sc_dev), st & 0xff)); 618 DPRINTF(("%s: intr st 0x%d\n", device_xname(sc->sc_dev), st & 0xff));
619 619
620 /* Clear status bits */ 620 /* Clear status bits */
621 bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh, PIIX_SMB_HS, st); 621 bus_space_write_1(sc->sc_smb_iot, sc->sc_smb_ioh, PIIX_SMB_HS, st);
622 622
623 /* Check for errors */ 623 /* Check for errors */
624 if (st & (PIIX_SMB_HS_DEVERR | PIIX_SMB_HS_BUSERR | 624 if (st & (PIIX_SMB_HS_DEVERR | PIIX_SMB_HS_BUSERR |
625 PIIX_SMB_HS_FAILED)) { 625 PIIX_SMB_HS_FAILED)) {
626 sc->sc_i2c_xfer.error = 1; 626 sc->sc_i2c_xfer.error = 1;
627 goto done; 627 goto done;
628 } 628 }
629 629
630 if (st & PIIX_SMB_HS_INTR) { 630 if (st & PIIX_SMB_HS_INTR) {
631 if (I2C_OP_WRITE_P(sc->sc_i2c_xfer.op)) 631 if (I2C_OP_WRITE_P(sc->sc_i2c_xfer.op))
632 goto done; 632 goto done;
633 633
634 /* Read data */ 634 /* Read data */
635 b = sc->sc_i2c_xfer.buf; 635 b = sc->sc_i2c_xfer.buf;
636 len = sc->sc_i2c_xfer.len; 636 len = sc->sc_i2c_xfer.len;
637 if (len > 0) 637 if (len > 0)
638 b[0] = bus_space_read_1(sc->sc_smb_iot, sc->sc_smb_ioh, 638 b[0] = bus_space_read_1(sc->sc_smb_iot, sc->sc_smb_ioh,
639 PIIX_SMB_HD0); 639 PIIX_SMB_HD0);
640 if (len > 1) 640 if (len > 1)
641 b[1] = bus_space_read_1(sc->sc_smb_iot, sc->sc_smb_ioh, 641 b[1] = bus_space_read_1(sc->sc_smb_iot, sc->sc_smb_ioh,
642 PIIX_SMB_HD1); 642 PIIX_SMB_HD1);
643 } 643 }
644 644
645done: 645done:
646 if ((sc->sc_i2c_xfer.flags & I2C_F_POLL) == 0) 646 if ((sc->sc_i2c_xfer.flags & I2C_F_POLL) == 0)
647 wakeup(sc); 647 wakeup(sc);
648 return (1); 648 return (1);
649} 649}