| @@ -1,753 +1,749 @@ | | | @@ -1,753 +1,749 @@ |
1 | /* $NetBSD: acpi_cpu_cstate.c,v 1.15 2010/08/08 16:58:42 jruoho Exp $ */ | | 1 | /* $NetBSD: acpi_cpu_cstate.c,v 1.16 2010/08/08 18:25:06 jruoho Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 2010 Jukka Ruohonen <jruohonen@iki.fi> | | 4 | * Copyright (c) 2010 Jukka Ruohonen <jruohonen@iki.fi> |
5 | * All rights reserved. | | 5 | * All rights reserved. |
6 | * | | 6 | * |
7 | * Redistribution and use in source and binary forms, with or without | | 7 | * Redistribution and use in source and binary forms, with or without |
8 | * modification, are permitted provided that the following conditions | | 8 | * modification, are permitted provided that the following conditions |
9 | * are met: | | 9 | * are met: |
10 | * | | 10 | * |
11 | * 1. Redistributions of source code must retain the above copyright | | 11 | * 1. Redistributions of source code must retain the above copyright |
12 | * notice, this list of conditions and the following disclaimer. | | 12 | * notice, this list of conditions and the following disclaimer. |
13 | * 2. Redistributions in binary form must reproduce the above copyright | | 13 | * 2. Redistributions in binary form must reproduce the above copyright |
14 | * notice, this list of conditions and the following disclaimer in the | | 14 | * notice, this list of conditions and the following disclaimer in the |
15 | * documentation and/or other materials provided with the distribution. | | 15 | * documentation and/or other materials provided with the distribution. |
16 | * | | 16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND | | 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
18 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 18 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
19 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 19 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
20 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE | | 20 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
21 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | | 21 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
22 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | | 22 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
23 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 23 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
24 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 24 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
25 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 25 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
26 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 26 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
27 | * SUCH DAMAGE. | | 27 | * SUCH DAMAGE. |
28 | */ | | 28 | */ |
29 | #include <sys/cdefs.h> | | 29 | #include <sys/cdefs.h> |
30 | __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_cstate.c,v 1.15 2010/08/08 16:58:42 jruoho Exp $"); | | 30 | __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_cstate.c,v 1.16 2010/08/08 18:25:06 jruoho Exp $"); |
31 | | | 31 | |
32 | #include <sys/param.h> | | 32 | #include <sys/param.h> |
33 | #include <sys/cpu.h> | | 33 | #include <sys/cpu.h> |
34 | #include <sys/device.h> | | 34 | #include <sys/device.h> |
35 | #include <sys/kernel.h> | | 35 | #include <sys/kernel.h> |
36 | #include <sys/once.h> | | 36 | #include <sys/once.h> |
37 | #include <sys/mutex.h> | | 37 | #include <sys/mutex.h> |
38 | #include <sys/timetc.h> | | 38 | #include <sys/timetc.h> |
39 | | | 39 | |
40 | #include <dev/pci/pcivar.h> | | 40 | #include <dev/pci/pcivar.h> |
41 | #include <dev/pci/pcidevs.h> | | 41 | #include <dev/pci/pcidevs.h> |
42 | | | 42 | |
43 | #include <dev/acpi/acpireg.h> | | 43 | #include <dev/acpi/acpireg.h> |
44 | #include <dev/acpi/acpivar.h> | | 44 | #include <dev/acpi/acpivar.h> |
45 | #include <dev/acpi/acpi_cpu.h> | | 45 | #include <dev/acpi/acpi_cpu.h> |
46 | #include <dev/acpi/acpi_timer.h> | | 46 | #include <dev/acpi/acpi_timer.h> |
47 | | | 47 | |
48 | #include <machine/acpi_machdep.h> | | 48 | #include <machine/acpi_machdep.h> |
49 | | | 49 | |
50 | #define _COMPONENT ACPI_BUS_COMPONENT | | 50 | #define _COMPONENT ACPI_BUS_COMPONENT |
51 | ACPI_MODULE_NAME ("acpi_cpu_cstate") | | 51 | ACPI_MODULE_NAME ("acpi_cpu_cstate") |
52 | | | 52 | |
53 | static void acpicpu_cstate_attach_print(struct acpicpu_softc *); | | 53 | static void acpicpu_cstate_attach_print(struct acpicpu_softc *); |
54 | static ACPI_STATUS acpicpu_cstate_cst(struct acpicpu_softc *); | | 54 | static ACPI_STATUS acpicpu_cstate_cst(struct acpicpu_softc *); |
55 | static ACPI_STATUS acpicpu_cstate_cst_add(struct acpicpu_softc *, | | 55 | static ACPI_STATUS acpicpu_cstate_cst_add(struct acpicpu_softc *, |
56 | ACPI_OBJECT *); | | 56 | ACPI_OBJECT *); |
57 | static void acpicpu_cstate_cst_bios(void); | | 57 | static void acpicpu_cstate_cst_bios(void); |
58 | static void acpicpu_cstate_fadt(struct acpicpu_softc *); | | 58 | static void acpicpu_cstate_fadt(struct acpicpu_softc *); |
59 | static void acpicpu_cstate_quirks(struct acpicpu_softc *); | | 59 | static void acpicpu_cstate_quirks(struct acpicpu_softc *); |
60 | static int acpicpu_cstate_quirks_piix4(struct pci_attach_args *); | | 60 | static int acpicpu_cstate_quirks_piix4(struct pci_attach_args *); |
61 | static int acpicpu_cstate_latency(struct acpicpu_softc *); | | 61 | static int acpicpu_cstate_latency(struct acpicpu_softc *); |
62 | static bool acpicpu_cstate_bm_check(void); | | 62 | static bool acpicpu_cstate_bm_check(void); |
63 | static void acpicpu_cstate_idle_enter(struct acpicpu_softc *,int); | | 63 | static void acpicpu_cstate_idle_enter(struct acpicpu_softc *,int); |
64 | | | 64 | |
65 | extern struct acpicpu_softc **acpicpu_sc; | | 65 | extern struct acpicpu_softc **acpicpu_sc; |
66 | | | 66 | |
67 | /* | | 67 | /* |
68 | * XXX: The local APIC timer (as well as TSC) is typically | | 68 | * XXX: The local APIC timer (as well as TSC) is typically |
69 | * stopped in C3. For now, we cannot but disable C3. | | 69 | * stopped in C3. For now, we cannot but disable C3. |
70 | */ | | 70 | */ |
71 | #ifdef ACPICPU_ENABLE_C3 | | 71 | #ifdef ACPICPU_ENABLE_C3 |
72 | static int cs_state_max = ACPI_STATE_C3; | | 72 | static int cs_state_max = ACPI_STATE_C3; |
73 | #else | | 73 | #else |
74 | static int cs_state_max = ACPI_STATE_C2; | | 74 | static int cs_state_max = ACPI_STATE_C2; |
75 | #endif | | 75 | #endif |
76 | | | 76 | |
77 | void | | 77 | void |
78 | acpicpu_cstate_attach(device_t self) | | 78 | acpicpu_cstate_attach(device_t self) |
79 | { | | 79 | { |
80 | struct acpicpu_softc *sc = device_private(self); | | 80 | struct acpicpu_softc *sc = device_private(self); |
81 | ACPI_STATUS rv; | | 81 | ACPI_STATUS rv; |
82 | | | 82 | |
83 | /* | | 83 | /* |
84 | * Either use the preferred _CST or resort to FADT. | | 84 | * Either use the preferred _CST or resort to FADT. |
85 | */ | | 85 | */ |
86 | rv = acpicpu_cstate_cst(sc); | | 86 | rv = acpicpu_cstate_cst(sc); |
87 | | | 87 | |
88 | switch (rv) { | | 88 | switch (rv) { |
89 | | | 89 | |
90 | case AE_OK: | | 90 | case AE_OK: |
91 | sc->sc_flags |= ACPICPU_FLAG_C_CST; | | 91 | sc->sc_flags |= ACPICPU_FLAG_C_CST; |
92 | acpicpu_cstate_cst_bios(); | | 92 | acpicpu_cstate_cst_bios(); |
93 | break; | | 93 | break; |
94 | | | 94 | |
95 | default: | | 95 | default: |
96 | sc->sc_flags |= ACPICPU_FLAG_C_FADT; | | 96 | sc->sc_flags |= ACPICPU_FLAG_C_FADT; |
97 | acpicpu_cstate_fadt(sc); | | 97 | acpicpu_cstate_fadt(sc); |
98 | break; | | 98 | break; |
99 | } | | 99 | } |
100 | | | 100 | |
101 | acpicpu_cstate_quirks(sc); | | 101 | acpicpu_cstate_quirks(sc); |
102 | acpicpu_cstate_attach_print(sc); | | 102 | acpicpu_cstate_attach_print(sc); |
103 | } | | 103 | } |
104 | | | 104 | |
105 | void | | 105 | void |
106 | acpicpu_cstate_attach_print(struct acpicpu_softc *sc) | | 106 | acpicpu_cstate_attach_print(struct acpicpu_softc *sc) |
107 | { | | 107 | { |
108 | struct acpicpu_cstate *cs; | | 108 | struct acpicpu_cstate *cs; |
109 | const char *str; | | 109 | const char *str; |
110 | int i; | | 110 | int i; |
111 | | | 111 | |
112 | for (i = 0; i < ACPI_C_STATE_COUNT; i++) { | | 112 | for (i = 0; i < ACPI_C_STATE_COUNT; i++) { |
113 | | | 113 | |
114 | cs = &sc->sc_cstate[i]; | | 114 | cs = &sc->sc_cstate[i]; |
115 | | | 115 | |
116 | if (cs->cs_method == 0) | | 116 | if (cs->cs_method == 0) |
117 | continue; | | 117 | continue; |
118 | | | 118 | |
119 | switch (cs->cs_method) { | | 119 | switch (cs->cs_method) { |
120 | | | 120 | |
121 | case ACPICPU_C_STATE_HALT: | | 121 | case ACPICPU_C_STATE_HALT: |
122 | str = "HALT"; | | 122 | str = "HALT"; |
123 | break; | | 123 | break; |
124 | | | 124 | |
125 | case ACPICPU_C_STATE_FFH: | | 125 | case ACPICPU_C_STATE_FFH: |
126 | str = "FFH"; | | 126 | str = "FFH"; |
127 | break; | | 127 | break; |
128 | | | 128 | |
129 | case ACPICPU_C_STATE_SYSIO: | | 129 | case ACPICPU_C_STATE_SYSIO: |
130 | str = "SYSIO"; | | 130 | str = "SYSIO"; |
131 | break; | | 131 | break; |
132 | | | 132 | |
133 | default: | | 133 | default: |
134 | panic("NOTREACHED"); | | 134 | panic("NOTREACHED"); |
135 | } | | 135 | } |
136 | | | 136 | |
137 | aprint_verbose_dev(sc->sc_dev, "C%d: %5s, " | | 137 | aprint_debug_dev(sc->sc_dev, "C%d: %5s, " |
138 | "lat %3u us, pow %5u mW, addr 0x%06x, flags 0x%02x\n", | | 138 | "lat %3u us, pow %5u mW, addr 0x%06x, flags 0x%02x\n", |
139 | i, str, cs->cs_latency, cs->cs_power, | | 139 | i, str, cs->cs_latency, cs->cs_power, |
140 | (uint32_t)cs->cs_addr, cs->cs_flags); | | 140 | (uint32_t)cs->cs_addr, cs->cs_flags); |
141 | } | | 141 | } |
142 | } | | 142 | } |
143 | | | 143 | |
144 | int | | 144 | int |
145 | acpicpu_cstate_detach(device_t self) | | 145 | acpicpu_cstate_detach(device_t self) |
146 | { | | 146 | { |
147 | struct acpicpu_softc *sc = device_private(self); | | 147 | struct acpicpu_softc *sc = device_private(self); |
148 | static ONCE_DECL(once_detach); | | 148 | static ONCE_DECL(once_detach); |
149 | int rv; | | 149 | int rv; |
150 | | | 150 | |
151 | rv = RUN_ONCE(&once_detach, acpicpu_md_idle_stop); | | 151 | rv = RUN_ONCE(&once_detach, acpicpu_md_idle_stop); |
152 | | | 152 | |
153 | if (rv != 0) | | 153 | if (rv != 0) |
154 | return rv; | | 154 | return rv; |
155 | | | 155 | |
156 | sc->sc_flags &= ~ACPICPU_FLAG_C; | | 156 | sc->sc_flags &= ~ACPICPU_FLAG_C; |
157 | | | 157 | |
158 | return 0; | | 158 | return 0; |
159 | } | | 159 | } |
160 | | | 160 | |
161 | int | | 161 | int |
162 | acpicpu_cstate_start(device_t self) | | 162 | acpicpu_cstate_start(device_t self) |
163 | { | | 163 | { |
164 | struct acpicpu_softc *sc = device_private(self); | | 164 | struct acpicpu_softc *sc = device_private(self); |
165 | static ONCE_DECL(once_start); | | 165 | static ONCE_DECL(once_start); |
166 | static ONCE_DECL(once_save); | | 166 | static ONCE_DECL(once_save); |
167 | int rv; | | 167 | int rv; |
168 | | | 168 | |
169 | /* | | 169 | /* |
170 | * Save the existing idle-mechanism and claim the idle_loop(9). | | 170 | * Save the existing idle-mechanism and claim the idle_loop(9). |
171 | * This should be called after all ACPI CPUs have been attached. | | 171 | * This should be called after all ACPI CPUs have been attached. |
172 | */ | | 172 | */ |
173 | rv = RUN_ONCE(&once_save, acpicpu_md_idle_init); | | 173 | rv = RUN_ONCE(&once_save, acpicpu_md_idle_init); |
174 | | | 174 | |
175 | if (rv != 0) | | 175 | if (rv != 0) |
176 | return rv; | | 176 | return rv; |
177 | | | 177 | |
178 | rv = RUN_ONCE(&once_start, acpicpu_md_idle_start); | | 178 | rv = RUN_ONCE(&once_start, acpicpu_md_idle_start); |
179 | | | 179 | |
180 | if (rv == 0) | | 180 | if (rv == 0) |
181 | sc->sc_flags |= ACPICPU_FLAG_C; | | 181 | sc->sc_flags |= ACPICPU_FLAG_C; |
182 | | | 182 | |
183 | return rv; | | 183 | return rv; |
184 | } | | 184 | } |
185 | | | 185 | |
186 | bool | | 186 | bool |
187 | acpicpu_cstate_suspend(device_t self) | | 187 | acpicpu_cstate_suspend(device_t self) |
188 | { | | 188 | { |
189 | | | 189 | |
190 | return true; | | 190 | return true; |
191 | } | | 191 | } |
192 | | | 192 | |
193 | bool | | 193 | bool |
194 | acpicpu_cstate_resume(device_t self) | | 194 | acpicpu_cstate_resume(device_t self) |
195 | { | | 195 | { |
196 | static const ACPI_OSD_EXEC_CALLBACK func = acpicpu_cstate_callback; | | 196 | static const ACPI_OSD_EXEC_CALLBACK func = acpicpu_cstate_callback; |
197 | struct acpicpu_softc *sc = device_private(self); | | 197 | struct acpicpu_softc *sc = device_private(self); |
198 | | | 198 | |
199 | if ((sc->sc_flags & ACPICPU_FLAG_C_CST) != 0) | | 199 | if ((sc->sc_flags & ACPICPU_FLAG_C_CST) != 0) |
200 | (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev); | | 200 | (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev); |
201 | | | 201 | |
202 | return true; | | 202 | return true; |
203 | } | | 203 | } |
204 | | | 204 | |
205 | void | | 205 | void |
206 | acpicpu_cstate_callback(void *aux) | | 206 | acpicpu_cstate_callback(void *aux) |
207 | { | | 207 | { |
208 | struct acpicpu_softc *sc; | | 208 | struct acpicpu_softc *sc; |
209 | device_t self = aux; | | 209 | device_t self = aux; |
210 | | | 210 | |
211 | sc = device_private(self); | | 211 | sc = device_private(self); |
212 | | | 212 | |
213 | if ((sc->sc_flags & ACPICPU_FLAG_C_FADT) != 0) { | | 213 | if ((sc->sc_flags & ACPICPU_FLAG_C_FADT) != 0) { |
214 | KASSERT((sc->sc_flags & ACPICPU_FLAG_C_CST) == 0); | | 214 | KASSERT((sc->sc_flags & ACPICPU_FLAG_C_CST) == 0); |
215 | return; | | 215 | return; |
216 | } | | 216 | } |
217 | | | 217 | |
218 | mutex_enter(&sc->sc_mtx); | | 218 | mutex_enter(&sc->sc_mtx); |
219 | (void)acpicpu_cstate_cst(sc); | | 219 | (void)acpicpu_cstate_cst(sc); |
220 | mutex_exit(&sc->sc_mtx); | | 220 | mutex_exit(&sc->sc_mtx); |
221 | } | | 221 | } |
222 | | | 222 | |
223 | static ACPI_STATUS | | 223 | static ACPI_STATUS |
224 | acpicpu_cstate_cst(struct acpicpu_softc *sc) | | 224 | acpicpu_cstate_cst(struct acpicpu_softc *sc) |
225 | { | | 225 | { |
226 | ACPI_OBJECT *elm, *obj; | | 226 | ACPI_OBJECT *elm, *obj; |
227 | ACPI_BUFFER buf; | | 227 | ACPI_BUFFER buf; |
228 | ACPI_STATUS rv; | | 228 | ACPI_STATUS rv; |
229 | uint32_t i, n; | | 229 | uint32_t i, n; |
230 | uint8_t count; | | 230 | uint8_t count; |
231 | | | 231 | |
232 | rv = acpi_eval_struct(sc->sc_node->ad_handle, "_CST", &buf); | | 232 | rv = acpi_eval_struct(sc->sc_node->ad_handle, "_CST", &buf); |
233 | | | 233 | |
234 | if (ACPI_FAILURE(rv)) | | 234 | if (ACPI_FAILURE(rv)) |
235 | return rv; | | 235 | return rv; |
236 | | | 236 | |
237 | obj = buf.Pointer; | | 237 | obj = buf.Pointer; |
238 | | | 238 | |
239 | if (obj->Type != ACPI_TYPE_PACKAGE) { | | 239 | if (obj->Type != ACPI_TYPE_PACKAGE) { |
240 | rv = AE_TYPE; | | 240 | rv = AE_TYPE; |
241 | goto out; | | 241 | goto out; |
242 | } | | 242 | } |
243 | | | 243 | |
244 | if (obj->Package.Count < 2) { | | 244 | if (obj->Package.Count < 2) { |
245 | rv = AE_LIMIT; | | 245 | rv = AE_LIMIT; |
246 | goto out; | | 246 | goto out; |
247 | } | | 247 | } |
248 | | | 248 | |
249 | elm = obj->Package.Elements; | | 249 | elm = obj->Package.Elements; |
250 | | | 250 | |
251 | if (elm[0].Type != ACPI_TYPE_INTEGER) { | | 251 | if (elm[0].Type != ACPI_TYPE_INTEGER) { |
252 | rv = AE_TYPE; | | 252 | rv = AE_TYPE; |
253 | goto out; | | 253 | goto out; |
254 | } | | 254 | } |
255 | | | 255 | |
256 | n = elm[0].Integer.Value; | | 256 | n = elm[0].Integer.Value; |
257 | | | 257 | |
258 | if (n != obj->Package.Count - 1) { | | 258 | if (n != obj->Package.Count - 1) { |
259 | rv = AE_BAD_VALUE; | | 259 | rv = AE_BAD_VALUE; |
260 | goto out; | | 260 | goto out; |
261 | } | | 261 | } |
262 | | | 262 | |
263 | if (n > ACPI_C_STATES_MAX) { | | 263 | if (n > ACPI_C_STATES_MAX) { |
264 | rv = AE_LIMIT; | | 264 | rv = AE_LIMIT; |
265 | goto out; | | 265 | goto out; |
266 | } | | 266 | } |
267 | | | 267 | |
268 | (void)memset(sc->sc_cstate, 0, | | 268 | (void)memset(sc->sc_cstate, 0, |
269 | sizeof(*sc->sc_cstate) * ACPI_C_STATE_COUNT); | | 269 | sizeof(*sc->sc_cstate) * ACPI_C_STATE_COUNT); |
270 | | | 270 | |
271 | CTASSERT(ACPI_STATE_C0 == 0 && ACPI_STATE_C1 == 1); | | 271 | CTASSERT(ACPI_STATE_C0 == 0 && ACPI_STATE_C1 == 1); |
272 | CTASSERT(ACPI_STATE_C2 == 2 && ACPI_STATE_C3 == 3); | | 272 | CTASSERT(ACPI_STATE_C2 == 2 && ACPI_STATE_C3 == 3); |
273 | | | 273 | |
274 | for (count = 0, i = 1; i <= n; i++) { | | 274 | for (count = 0, i = 1; i <= n; i++) { |
275 | | | 275 | |
276 | elm = &obj->Package.Elements[i]; | | 276 | elm = &obj->Package.Elements[i]; |
277 | rv = acpicpu_cstate_cst_add(sc, elm); | | 277 | rv = acpicpu_cstate_cst_add(sc, elm); |
278 | | | 278 | |
279 | if (ACPI_SUCCESS(rv)) | | 279 | if (ACPI_SUCCESS(rv)) |
280 | count++; | | 280 | count++; |
281 | } | | 281 | } |
282 | | | 282 | |
283 | rv = (count != 0) ? AE_OK : AE_NOT_EXIST; | | 283 | rv = (count != 0) ? AE_OK : AE_NOT_EXIST; |
284 | | | 284 | |
285 | out: | | 285 | out: |
286 | if (buf.Pointer != NULL) | | 286 | if (buf.Pointer != NULL) |
287 | ACPI_FREE(buf.Pointer); | | 287 | ACPI_FREE(buf.Pointer); |
288 | | | 288 | |
289 | if (ACPI_FAILURE(rv)) | | | |
290 | aprint_error_dev(sc->sc_dev, "failed to evaluate " | | | |
291 | "_CST: %s\n", AcpiFormatException(rv)); | | | |
292 | | | | |
293 | return rv; | | 289 | return rv; |
294 | } | | 290 | } |
295 | | | 291 | |
296 | static ACPI_STATUS | | 292 | static ACPI_STATUS |
297 | acpicpu_cstate_cst_add(struct acpicpu_softc *sc, ACPI_OBJECT *elm) | | 293 | acpicpu_cstate_cst_add(struct acpicpu_softc *sc, ACPI_OBJECT *elm) |
298 | { | | 294 | { |
299 | const struct acpicpu_object *ao = &sc->sc_object; | | 295 | const struct acpicpu_object *ao = &sc->sc_object; |
300 | struct acpicpu_cstate *cs = sc->sc_cstate; | | 296 | struct acpicpu_cstate *cs = sc->sc_cstate; |
301 | struct acpicpu_cstate state; | | 297 | struct acpicpu_cstate state; |
302 | struct acpicpu_reg *reg; | | 298 | struct acpicpu_reg *reg; |
303 | ACPI_STATUS rv = AE_OK; | | 299 | ACPI_STATUS rv = AE_OK; |
304 | ACPI_OBJECT *obj; | | 300 | ACPI_OBJECT *obj; |
305 | uint32_t type; | | 301 | uint32_t type; |
306 | | | 302 | |
307 | (void)memset(&state, 0, sizeof(*cs)); | | 303 | (void)memset(&state, 0, sizeof(*cs)); |
308 | | | 304 | |
309 | state.cs_flags = ACPICPU_FLAG_C_BM_STS; | | 305 | state.cs_flags = ACPICPU_FLAG_C_BM_STS; |
310 | | | 306 | |
311 | if (elm->Type != ACPI_TYPE_PACKAGE) { | | 307 | if (elm->Type != ACPI_TYPE_PACKAGE) { |
312 | rv = AE_TYPE; | | 308 | rv = AE_TYPE; |
313 | goto out; | | 309 | goto out; |
314 | } | | 310 | } |
315 | | | 311 | |
316 | if (elm->Package.Count != 4) { | | 312 | if (elm->Package.Count != 4) { |
317 | rv = AE_LIMIT; | | 313 | rv = AE_LIMIT; |
318 | goto out; | | 314 | goto out; |
319 | } | | 315 | } |
320 | | | 316 | |
321 | /* | | 317 | /* |
322 | * Type. | | 318 | * Type. |
323 | */ | | 319 | */ |
324 | obj = &elm->Package.Elements[1]; | | 320 | obj = &elm->Package.Elements[1]; |
325 | | | 321 | |
326 | if (obj->Type != ACPI_TYPE_INTEGER) { | | 322 | if (obj->Type != ACPI_TYPE_INTEGER) { |
327 | rv = AE_TYPE; | | 323 | rv = AE_TYPE; |
328 | goto out; | | 324 | goto out; |
329 | } | | 325 | } |
330 | | | 326 | |
331 | type = obj->Integer.Value; | | 327 | type = obj->Integer.Value; |
332 | | | 328 | |
333 | if (type < ACPI_STATE_C1 || type > ACPI_STATE_C3) { | | 329 | if (type < ACPI_STATE_C1 || type > ACPI_STATE_C3) { |
334 | rv = AE_TYPE; | | 330 | rv = AE_TYPE; |
335 | goto out; | | 331 | goto out; |
336 | } | | 332 | } |
337 | | | 333 | |
338 | /* | | 334 | /* |
339 | * Latency. | | 335 | * Latency. |
340 | */ | | 336 | */ |
341 | obj = &elm->Package.Elements[2]; | | 337 | obj = &elm->Package.Elements[2]; |
342 | | | 338 | |
343 | if (obj->Type != ACPI_TYPE_INTEGER) { | | 339 | if (obj->Type != ACPI_TYPE_INTEGER) { |
344 | rv = AE_TYPE; | | 340 | rv = AE_TYPE; |
345 | goto out; | | 341 | goto out; |
346 | } | | 342 | } |
347 | | | 343 | |
348 | state.cs_latency = obj->Integer.Value; | | 344 | state.cs_latency = obj->Integer.Value; |
349 | | | 345 | |
350 | /* | | 346 | /* |
351 | * Power. | | 347 | * Power. |
352 | */ | | 348 | */ |
353 | obj = &elm->Package.Elements[3]; | | 349 | obj = &elm->Package.Elements[3]; |
354 | | | 350 | |
355 | if (obj->Type != ACPI_TYPE_INTEGER) { | | 351 | if (obj->Type != ACPI_TYPE_INTEGER) { |
356 | rv = AE_TYPE; | | 352 | rv = AE_TYPE; |
357 | goto out; | | 353 | goto out; |
358 | } | | 354 | } |
359 | | | 355 | |
360 | state.cs_power = obj->Integer.Value; | | 356 | state.cs_power = obj->Integer.Value; |
361 | | | 357 | |
362 | /* | | 358 | /* |
363 | * Register. | | 359 | * Register. |
364 | */ | | 360 | */ |
365 | obj = &elm->Package.Elements[0]; | | 361 | obj = &elm->Package.Elements[0]; |
366 | | | 362 | |
367 | if (obj->Type != ACPI_TYPE_BUFFER) { | | 363 | if (obj->Type != ACPI_TYPE_BUFFER) { |
368 | rv = AE_TYPE; | | 364 | rv = AE_TYPE; |
369 | goto out; | | 365 | goto out; |
370 | } | | 366 | } |
371 | | | 367 | |
372 | CTASSERT(sizeof(struct acpicpu_reg) == 15); | | 368 | CTASSERT(sizeof(struct acpicpu_reg) == 15); |
373 | | | 369 | |
374 | if (obj->Buffer.Length < sizeof(struct acpicpu_reg)) { | | 370 | if (obj->Buffer.Length < sizeof(struct acpicpu_reg)) { |
375 | rv = AE_LIMIT; | | 371 | rv = AE_LIMIT; |
376 | goto out; | | 372 | goto out; |
377 | } | | 373 | } |
378 | | | 374 | |
379 | reg = (struct acpicpu_reg *)obj->Buffer.Pointer; | | 375 | reg = (struct acpicpu_reg *)obj->Buffer.Pointer; |
380 | | | 376 | |
381 | switch (reg->reg_spaceid) { | | 377 | switch (reg->reg_spaceid) { |
382 | | | 378 | |
383 | case ACPI_ADR_SPACE_SYSTEM_IO: | | 379 | case ACPI_ADR_SPACE_SYSTEM_IO: |
384 | state.cs_method = ACPICPU_C_STATE_SYSIO; | | 380 | state.cs_method = ACPICPU_C_STATE_SYSIO; |
385 | | | 381 | |
386 | if (reg->reg_addr == 0) { | | 382 | if (reg->reg_addr == 0) { |
387 | rv = AE_AML_ILLEGAL_ADDRESS; | | 383 | rv = AE_AML_ILLEGAL_ADDRESS; |
388 | goto out; | | 384 | goto out; |
389 | } | | 385 | } |
390 | | | 386 | |
391 | if (reg->reg_bitwidth != 8) { | | 387 | if (reg->reg_bitwidth != 8) { |
392 | rv = AE_AML_BAD_RESOURCE_LENGTH; | | 388 | rv = AE_AML_BAD_RESOURCE_LENGTH; |
393 | goto out; | | 389 | goto out; |
394 | } | | 390 | } |
395 | | | 391 | |
396 | /* | | 392 | /* |
397 | * Check only that the address is in the mapped space. | | 393 | * Check only that the address is in the mapped space. |
398 | * Systems are allowed to change it when operating | | 394 | * Systems are allowed to change it when operating |
399 | * with _CST (see ACPI 4.0, pp. 94-95). For instance, | | 395 | * with _CST (see ACPI 4.0, pp. 94-95). For instance, |
400 | * the offset of P_LVL3 may change depending on whether | | 396 | * the offset of P_LVL3 may change depending on whether |
401 | * acpiacad(4) is connected or disconnected. | | 397 | * acpiacad(4) is connected or disconnected. |
402 | */ | | 398 | */ |
403 | if (reg->reg_addr > ao->ao_pblkaddr + ao->ao_pblklen) { | | 399 | if (reg->reg_addr > ao->ao_pblkaddr + ao->ao_pblklen) { |
404 | rv = AE_BAD_ADDRESS; | | 400 | rv = AE_BAD_ADDRESS; |
405 | goto out; | | 401 | goto out; |
406 | } | | 402 | } |
407 | | | 403 | |
408 | state.cs_addr = reg->reg_addr; | | 404 | state.cs_addr = reg->reg_addr; |
409 | break; | | 405 | break; |
410 | | | 406 | |
411 | case ACPI_ADR_SPACE_FIXED_HARDWARE: | | 407 | case ACPI_ADR_SPACE_FIXED_HARDWARE: |
412 | state.cs_method = ACPICPU_C_STATE_FFH; | | 408 | state.cs_method = ACPICPU_C_STATE_FFH; |
413 | | | 409 | |
414 | switch (type) { | | 410 | switch (type) { |
415 | | | 411 | |
416 | case ACPI_STATE_C1: | | 412 | case ACPI_STATE_C1: |
417 | | | 413 | |
418 | if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0) | | 414 | if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0) |
419 | state.cs_method = ACPICPU_C_STATE_HALT; | | 415 | state.cs_method = ACPICPU_C_STATE_HALT; |
420 | | | 416 | |
421 | break; | | 417 | break; |
422 | | | 418 | |
423 | default: | | 419 | default: |
424 | | | 420 | |
425 | if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0) { | | 421 | if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0) { |
426 | rv = AE_AML_BAD_RESOURCE_VALUE; | | 422 | rv = AE_SUPPORT; |
427 | goto out; | | 423 | goto out; |
428 | } | | 424 | } |
429 | } | | 425 | } |
430 | | | 426 | |
431 | if (sc->sc_cap != 0) { | | 427 | if (sc->sc_cap != 0) { |
432 | | | 428 | |
433 | /* | | 429 | /* |
434 | * The _CST FFH GAS encoding may contain | | 430 | * The _CST FFH GAS encoding may contain |
435 | * additional hints on Intel processors. | | 431 | * additional hints on Intel processors. |
436 | * Use these to determine whether we can | | 432 | * Use these to determine whether we can |
437 | * avoid the bus master activity check. | | 433 | * avoid the bus master activity check. |
438 | */ | | 434 | */ |
439 | if ((reg->reg_accesssize & ACPICPU_PDC_GAS_BM) == 0) | | 435 | if ((reg->reg_accesssize & ACPICPU_PDC_GAS_BM) == 0) |
440 | state.cs_flags &= ~ACPICPU_FLAG_C_BM_STS; | | 436 | state.cs_flags &= ~ACPICPU_FLAG_C_BM_STS; |
441 | } | | 437 | } |
442 | | | 438 | |
443 | break; | | 439 | break; |
444 | | | 440 | |
445 | default: | | 441 | default: |
446 | rv = AE_AML_INVALID_SPACE_ID; | | 442 | rv = AE_AML_INVALID_SPACE_ID; |
447 | goto out; | | 443 | goto out; |
448 | } | | 444 | } |
449 | | | 445 | |
450 | if (cs[type].cs_method != 0) { | | 446 | if (cs[type].cs_method != 0) { |
451 | rv = AE_ALREADY_EXISTS; | | 447 | rv = AE_ALREADY_EXISTS; |
452 | goto out; | | 448 | goto out; |
453 | } | | 449 | } |
454 | | | 450 | |
455 | cs[type].cs_addr = state.cs_addr; | | 451 | cs[type].cs_addr = state.cs_addr; |
456 | cs[type].cs_power = state.cs_power; | | 452 | cs[type].cs_power = state.cs_power; |
457 | cs[type].cs_flags = state.cs_flags; | | 453 | cs[type].cs_flags = state.cs_flags; |
458 | cs[type].cs_method = state.cs_method; | | 454 | cs[type].cs_method = state.cs_method; |
459 | cs[type].cs_latency = state.cs_latency; | | 455 | cs[type].cs_latency = state.cs_latency; |
460 | | | 456 | |
461 | out: | | 457 | out: |
462 | if (ACPI_FAILURE(rv)) | | 458 | if (ACPI_FAILURE(rv)) |
463 | aprint_verbose_dev(sc->sc_dev, | | 459 | aprint_debug_dev(sc->sc_dev, "invalid " |
464 | "invalid _CST: %s\n", AcpiFormatException(rv)); | | 460 | "_CST: %s\n", AcpiFormatException(rv)); |
465 | | | 461 | |
466 | return rv; | | 462 | return rv; |
467 | } | | 463 | } |
468 | | | 464 | |
469 | static void | | 465 | static void |
470 | acpicpu_cstate_cst_bios(void) | | 466 | acpicpu_cstate_cst_bios(void) |
471 | { | | 467 | { |
472 | const uint8_t val = AcpiGbl_FADT.CstControl; | | 468 | const uint8_t val = AcpiGbl_FADT.CstControl; |
473 | const uint32_t addr = AcpiGbl_FADT.SmiCommand; | | 469 | const uint32_t addr = AcpiGbl_FADT.SmiCommand; |
474 | | | 470 | |
475 | if (addr == 0) | | 471 | if (addr == 0) |
476 | return; | | 472 | return; |
477 | | | 473 | |
478 | (void)AcpiOsWritePort(addr, val, 8); | | 474 | (void)AcpiOsWritePort(addr, val, 8); |
479 | } | | 475 | } |
480 | | | 476 | |
481 | static void | | 477 | static void |
482 | acpicpu_cstate_fadt(struct acpicpu_softc *sc) | | 478 | acpicpu_cstate_fadt(struct acpicpu_softc *sc) |
483 | { | | 479 | { |
484 | struct acpicpu_cstate *cs = sc->sc_cstate; | | 480 | struct acpicpu_cstate *cs = sc->sc_cstate; |
485 | | | 481 | |
486 | (void)memset(cs, 0, sizeof(*cs) * ACPI_C_STATE_COUNT); | | 482 | (void)memset(cs, 0, sizeof(*cs) * ACPI_C_STATE_COUNT); |
487 | | | 483 | |
488 | /* | | 484 | /* |
489 | * All x86 processors should support C1 (a.k.a. HALT). | | 485 | * All x86 processors should support C1 (a.k.a. HALT). |
490 | */ | | 486 | */ |
491 | if ((AcpiGbl_FADT.Flags & ACPI_FADT_C1_SUPPORTED) != 0) | | 487 | if ((AcpiGbl_FADT.Flags & ACPI_FADT_C1_SUPPORTED) != 0) |
492 | cs[ACPI_STATE_C1].cs_method = ACPICPU_C_STATE_HALT; | | 488 | cs[ACPI_STATE_C1].cs_method = ACPICPU_C_STATE_HALT; |
493 | | | 489 | |
494 | if ((acpicpu_md_cpus_running() > 1) && | | 490 | if ((acpicpu_md_cpus_running() > 1) && |
495 | (AcpiGbl_FADT.Flags & ACPI_FADT_C2_MP_SUPPORTED) == 0) | | 491 | (AcpiGbl_FADT.Flags & ACPI_FADT_C2_MP_SUPPORTED) == 0) |
496 | return; | | 492 | return; |
497 | | | 493 | |
498 | cs[ACPI_STATE_C2].cs_method = ACPICPU_C_STATE_SYSIO; | | 494 | cs[ACPI_STATE_C2].cs_method = ACPICPU_C_STATE_SYSIO; |
499 | cs[ACPI_STATE_C3].cs_method = ACPICPU_C_STATE_SYSIO; | | 495 | cs[ACPI_STATE_C3].cs_method = ACPICPU_C_STATE_SYSIO; |
500 | | | 496 | |
501 | cs[ACPI_STATE_C2].cs_latency = AcpiGbl_FADT.C2Latency; | | 497 | cs[ACPI_STATE_C2].cs_latency = AcpiGbl_FADT.C2Latency; |
502 | cs[ACPI_STATE_C3].cs_latency = AcpiGbl_FADT.C3Latency; | | 498 | cs[ACPI_STATE_C3].cs_latency = AcpiGbl_FADT.C3Latency; |
503 | | | 499 | |
504 | cs[ACPI_STATE_C2].cs_addr = sc->sc_object.ao_pblkaddr + 4; | | 500 | cs[ACPI_STATE_C2].cs_addr = sc->sc_object.ao_pblkaddr + 4; |
505 | cs[ACPI_STATE_C3].cs_addr = sc->sc_object.ao_pblkaddr + 5; | | 501 | cs[ACPI_STATE_C3].cs_addr = sc->sc_object.ao_pblkaddr + 5; |
506 | | | 502 | |
507 | /* | | 503 | /* |
508 | * The P_BLK length should always be 6. If it | | 504 | * The P_BLK length should always be 6. If it |
509 | * is not, reduce functionality accordingly. | | 505 | * is not, reduce functionality accordingly. |
510 | * Sanity check also FADT's latency levels. | | 506 | * Sanity check also FADT's latency levels. |
511 | */ | | 507 | */ |
512 | if (sc->sc_object.ao_pblklen < 5) | | 508 | if (sc->sc_object.ao_pblklen < 5) |
513 | cs[ACPI_STATE_C2].cs_method = 0; | | 509 | cs[ACPI_STATE_C2].cs_method = 0; |
514 | | | 510 | |
515 | if (sc->sc_object.ao_pblklen < 6) | | 511 | if (sc->sc_object.ao_pblklen < 6) |
516 | cs[ACPI_STATE_C3].cs_method = 0; | | 512 | cs[ACPI_STATE_C3].cs_method = 0; |
517 | | | 513 | |
518 | CTASSERT(ACPICPU_C_C2_LATENCY_MAX == 100); | | 514 | CTASSERT(ACPICPU_C_C2_LATENCY_MAX == 100); |
519 | CTASSERT(ACPICPU_C_C3_LATENCY_MAX == 1000); | | 515 | CTASSERT(ACPICPU_C_C3_LATENCY_MAX == 1000); |
520 | | | 516 | |
521 | if (AcpiGbl_FADT.C2Latency > ACPICPU_C_C2_LATENCY_MAX) | | 517 | if (AcpiGbl_FADT.C2Latency > ACPICPU_C_C2_LATENCY_MAX) |
522 | cs[ACPI_STATE_C2].cs_method = 0; | | 518 | cs[ACPI_STATE_C2].cs_method = 0; |
523 | | | 519 | |
524 | if (AcpiGbl_FADT.C3Latency > ACPICPU_C_C3_LATENCY_MAX) | | 520 | if (AcpiGbl_FADT.C3Latency > ACPICPU_C_C3_LATENCY_MAX) |
525 | cs[ACPI_STATE_C3].cs_method = 0; | | 521 | cs[ACPI_STATE_C3].cs_method = 0; |
526 | } | | 522 | } |
527 | | | 523 | |
528 | static void | | 524 | static void |
529 | acpicpu_cstate_quirks(struct acpicpu_softc *sc) | | 525 | acpicpu_cstate_quirks(struct acpicpu_softc *sc) |
530 | { | | 526 | { |
531 | const uint32_t reg = AcpiGbl_FADT.Pm2ControlBlock; | | 527 | const uint32_t reg = AcpiGbl_FADT.Pm2ControlBlock; |
532 | const uint32_t len = AcpiGbl_FADT.Pm2ControlLength; | | 528 | const uint32_t len = AcpiGbl_FADT.Pm2ControlLength; |
533 | struct pci_attach_args pa; | | 529 | struct pci_attach_args pa; |
534 | | | 530 | |
535 | /* | | 531 | /* |
536 | * Check bus master arbitration. If ARB_DIS | | 532 | * Check bus master arbitration. If ARB_DIS |
537 | * is not available, processor caches must be | | 533 | * is not available, processor caches must be |
538 | * flushed before C3 (ACPI 4.0, section 8.2). | | 534 | * flushed before C3 (ACPI 4.0, section 8.2). |
539 | */ | | 535 | */ |
540 | if (reg != 0 && len != 0) | | 536 | if (reg != 0 && len != 0) |
541 | sc->sc_flags |= ACPICPU_FLAG_C_ARB; | | 537 | sc->sc_flags |= ACPICPU_FLAG_C_ARB; |
542 | else { | | 538 | else { |
543 | /* | | 539 | /* |
544 | * Disable C3 entirely if WBINVD is not present. | | 540 | * Disable C3 entirely if WBINVD is not present. |
545 | */ | | 541 | */ |
546 | if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) == 0) | | 542 | if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) == 0) |
547 | sc->sc_flags |= ACPICPU_FLAG_C_NOC3; | | 543 | sc->sc_flags |= ACPICPU_FLAG_C_NOC3; |
548 | else { | | 544 | else { |
549 | /* | | 545 | /* |
550 | * If WBINVD is present and functioning properly, | | 546 | * If WBINVD is present and functioning properly, |
551 | * flush all processor caches before entering C3. | | 547 | * flush all processor caches before entering C3. |
552 | */ | | 548 | */ |
553 | if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) | | 549 | if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) |
554 | sc->sc_flags &= ~ACPICPU_FLAG_C_BM; | | 550 | sc->sc_flags &= ~ACPICPU_FLAG_C_BM; |
555 | else | | 551 | else |
556 | sc->sc_flags |= ACPICPU_FLAG_C_NOC3; | | 552 | sc->sc_flags |= ACPICPU_FLAG_C_NOC3; |
557 | } | | 553 | } |
558 | } | | 554 | } |
559 | | | 555 | |
560 | /* | | 556 | /* |
561 | * There are several erratums for PIIX4. | | 557 | * There are several erratums for PIIX4. |
562 | */ | | 558 | */ |
563 | if (pci_find_device(&pa, acpicpu_cstate_quirks_piix4) != 0) | | 559 | if (pci_find_device(&pa, acpicpu_cstate_quirks_piix4) != 0) |
564 | sc->sc_flags |= ACPICPU_FLAG_C_NOC3; | | 560 | sc->sc_flags |= ACPICPU_FLAG_C_NOC3; |
565 | | | 561 | |
566 | if ((sc->sc_flags & ACPICPU_FLAG_C_NOC3) != 0) | | 562 | if ((sc->sc_flags & ACPICPU_FLAG_C_NOC3) != 0) |
567 | sc->sc_cstate[ACPI_STATE_C3].cs_method = 0; | | 563 | sc->sc_cstate[ACPI_STATE_C3].cs_method = 0; |
568 | } | | 564 | } |
569 | | | 565 | |
570 | static int | | 566 | static int |
571 | acpicpu_cstate_quirks_piix4(struct pci_attach_args *pa) | | 567 | acpicpu_cstate_quirks_piix4(struct pci_attach_args *pa) |
572 | { | | 568 | { |
573 | | | 569 | |
574 | if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL) | | 570 | if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL) |
575 | return 0; | | 571 | return 0; |
576 | | | 572 | |
577 | if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82371AB_ISA || | | 573 | if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82371AB_ISA || |
578 | PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82440MX_PMC) | | 574 | PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82440MX_PMC) |
579 | return 1; | | 575 | return 1; |
580 | | | 576 | |
581 | return 0; | | 577 | return 0; |
582 | } | | 578 | } |
583 | | | 579 | |
584 | static int | | 580 | static int |
585 | acpicpu_cstate_latency(struct acpicpu_softc *sc) | | 581 | acpicpu_cstate_latency(struct acpicpu_softc *sc) |
586 | { | | 582 | { |
587 | static const uint32_t cs_factor = 3; | | 583 | static const uint32_t cs_factor = 3; |
588 | struct acpicpu_cstate *cs; | | 584 | struct acpicpu_cstate *cs; |
589 | int i; | | 585 | int i; |
590 | | | 586 | |
591 | for (i = cs_state_max; i > 0; i--) { | | 587 | for (i = cs_state_max; i > 0; i--) { |
592 | | | 588 | |
593 | cs = &sc->sc_cstate[i]; | | 589 | cs = &sc->sc_cstate[i]; |
594 | | | 590 | |
595 | if (__predict_false(cs->cs_method == 0)) | | 591 | if (__predict_false(cs->cs_method == 0)) |
596 | continue; | | 592 | continue; |
597 | | | 593 | |
598 | /* | | 594 | /* |
599 | * Choose a state if we have previously slept | | 595 | * Choose a state if we have previously slept |
600 | * longer than the worst case latency of the | | 596 | * longer than the worst case latency of the |
601 | * state times an arbitrary multiplier. | | 597 | * state times an arbitrary multiplier. |
602 | */ | | 598 | */ |
603 | if (sc->sc_cstate_sleep > cs->cs_latency * cs_factor) | | 599 | if (sc->sc_cstate_sleep > cs->cs_latency * cs_factor) |
604 | return i; | | 600 | return i; |
605 | } | | 601 | } |
606 | | | 602 | |
607 | return ACPI_STATE_C1; | | 603 | return ACPI_STATE_C1; |
608 | } | | 604 | } |
609 | | | 605 | |
610 | /* | | 606 | /* |
611 | * The main idle loop. | | 607 | * The main idle loop. |
612 | */ | | 608 | */ |
613 | void | | 609 | void |
614 | acpicpu_cstate_idle(void) | | 610 | acpicpu_cstate_idle(void) |
615 | { | | 611 | { |
616 | struct cpu_info *ci = curcpu(); | | 612 | struct cpu_info *ci = curcpu(); |
617 | struct acpicpu_softc *sc; | | 613 | struct acpicpu_softc *sc; |
618 | int state; | | 614 | int state; |
619 | | | 615 | |
620 | if (__predict_false(ci->ci_want_resched) != 0) | | 616 | if (__predict_false(ci->ci_want_resched) != 0) |
621 | return; | | 617 | return; |
622 | | | 618 | |
623 | acpi_md_OsDisableInterrupt(); | | 619 | acpi_md_OsDisableInterrupt(); |
624 | | | 620 | |
625 | KASSERT(acpicpu_sc != NULL); | | 621 | KASSERT(acpicpu_sc != NULL); |
626 | KASSERT(ci->ci_acpiid < maxcpus); | | 622 | KASSERT(ci->ci_acpiid < maxcpus); |
627 | KASSERT(ci->ci_ilevel == IPL_NONE); | | 623 | KASSERT(ci->ci_ilevel == IPL_NONE); |
628 | | | 624 | |
629 | sc = acpicpu_sc[ci->ci_acpiid]; | | 625 | sc = acpicpu_sc[ci->ci_acpiid]; |
630 | | | 626 | |
631 | if (__predict_false(sc == NULL)) | | 627 | if (__predict_false(sc == NULL)) |
632 | goto halt; | | 628 | goto halt; |
633 | | | 629 | |
634 | if (__predict_false(sc->sc_cold != false)) | | 630 | if (__predict_false(sc->sc_cold != false)) |
635 | goto halt; | | 631 | goto halt; |
636 | | | 632 | |
637 | if (__predict_false((sc->sc_flags & ACPICPU_FLAG_C) == 0)) | | 633 | if (__predict_false((sc->sc_flags & ACPICPU_FLAG_C) == 0)) |
638 | goto halt; | | 634 | goto halt; |
639 | | | 635 | |
640 | if (__predict_false(mutex_tryenter(&sc->sc_mtx) == 0)) | | 636 | if (__predict_false(mutex_tryenter(&sc->sc_mtx) == 0)) |
641 | goto halt; | | 637 | goto halt; |
642 | | | 638 | |
643 | mutex_exit(&sc->sc_mtx); | | 639 | mutex_exit(&sc->sc_mtx); |
644 | state = acpicpu_cstate_latency(sc); | | 640 | state = acpicpu_cstate_latency(sc); |
645 | | | 641 | |
646 | /* | | 642 | /* |
647 | * Check for bus master activity. Note that particularly usb(4) | | 643 | * Check for bus master activity. Note that particularly usb(4) |
648 | * causes high activity, which may prevent the use of C3 states. | | 644 | * causes high activity, which may prevent the use of C3 states. |
649 | */ | | 645 | */ |
650 | if ((sc->sc_cstate[state].cs_flags & ACPICPU_FLAG_C_BM_STS) != 0) { | | 646 | if ((sc->sc_cstate[state].cs_flags & ACPICPU_FLAG_C_BM_STS) != 0) { |
651 | | | 647 | |
652 | if (acpicpu_cstate_bm_check() != false) | | 648 | if (acpicpu_cstate_bm_check() != false) |
653 | state--; | | 649 | state--; |
654 | | | 650 | |
655 | if (__predict_false(sc->sc_cstate[state].cs_method == 0)) | | 651 | if (__predict_false(sc->sc_cstate[state].cs_method == 0)) |
656 | state = ACPI_STATE_C1; | | 652 | state = ACPI_STATE_C1; |
657 | } | | 653 | } |
658 | | | 654 | |
659 | KASSERT(state != ACPI_STATE_C0); | | 655 | KASSERT(state != ACPI_STATE_C0); |
660 | | | 656 | |
661 | if (state != ACPI_STATE_C3) { | | 657 | if (state != ACPI_STATE_C3) { |
662 | acpicpu_cstate_idle_enter(sc, state); | | 658 | acpicpu_cstate_idle_enter(sc, state); |
663 | return; | | 659 | return; |
664 | } | | 660 | } |
665 | | | 661 | |
666 | /* | | 662 | /* |
667 | * On all recent (Intel) CPUs caches are shared | | 663 | * On all recent (Intel) CPUs caches are shared |
668 | * by CPUs and bus master control is required to | | 664 | * by CPUs and bus master control is required to |
669 | * keep these coherent while in C3. Flushing the | | 665 | * keep these coherent while in C3. Flushing the |
670 | * CPU caches is only the last resort. | | 666 | * CPU caches is only the last resort. |
671 | */ | | 667 | */ |
672 | if ((sc->sc_flags & ACPICPU_FLAG_C_BM) == 0) | | 668 | if ((sc->sc_flags & ACPICPU_FLAG_C_BM) == 0) |
673 | ACPI_FLUSH_CPU_CACHE(); | | 669 | ACPI_FLUSH_CPU_CACHE(); |
674 | | | 670 | |
675 | /* | | 671 | /* |
676 | * Allow the bus master to request that any given | | 672 | * Allow the bus master to request that any given |
677 | * CPU should return immediately to C0 from C3. | | 673 | * CPU should return immediately to C0 from C3. |
678 | */ | | 674 | */ |
679 | if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0) | | 675 | if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0) |
680 | (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1); | | 676 | (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1); |
681 | | | 677 | |
682 | /* | | 678 | /* |
683 | * It may be necessary to disable bus master arbitration | | 679 | * It may be necessary to disable bus master arbitration |
684 | * to ensure that bus master cycles do not occur while | | 680 | * to ensure that bus master cycles do not occur while |
685 | * sleeping in C3 (see ACPI 4.0, section 8.1.4). | | 681 | * sleeping in C3 (see ACPI 4.0, section 8.1.4). |
686 | */ | | 682 | */ |
687 | if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0) | | 683 | if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0) |
688 | (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1); | | 684 | (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1); |
689 | | | 685 | |
690 | acpicpu_cstate_idle_enter(sc, state); | | 686 | acpicpu_cstate_idle_enter(sc, state); |
691 | | | 687 | |
692 | /* | | 688 | /* |
693 | * Disable bus master wake and re-enable the arbiter. | | 689 | * Disable bus master wake and re-enable the arbiter. |
694 | */ | | 690 | */ |
695 | if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0) | | 691 | if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0) |
696 | (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0); | | 692 | (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0); |
697 | | | 693 | |
698 | if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0) | | 694 | if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0) |
699 | (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0); | | 695 | (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0); |
700 | | | 696 | |
701 | return; | | 697 | return; |
702 | | | 698 | |
703 | halt: | | 699 | halt: |
704 | acpicpu_md_idle_enter(ACPICPU_C_STATE_HALT, ACPI_STATE_C1); | | 700 | acpicpu_md_idle_enter(ACPICPU_C_STATE_HALT, ACPI_STATE_C1); |
705 | } | | 701 | } |
706 | | | 702 | |
707 | static void | | 703 | static void |
708 | acpicpu_cstate_idle_enter(struct acpicpu_softc *sc, int state) | | 704 | acpicpu_cstate_idle_enter(struct acpicpu_softc *sc, int state) |
709 | { | | 705 | { |
710 | struct acpicpu_cstate *cs = &sc->sc_cstate[state]; | | 706 | struct acpicpu_cstate *cs = &sc->sc_cstate[state]; |
711 | uint32_t end, start, val; | | 707 | uint32_t end, start, val; |
712 | | | 708 | |
713 | start = acpitimer_read_safe(NULL); | | 709 | start = acpitimer_read_safe(NULL); |
714 | | | 710 | |
715 | switch (cs->cs_method) { | | 711 | switch (cs->cs_method) { |
716 | | | 712 | |
717 | case ACPICPU_C_STATE_FFH: | | 713 | case ACPICPU_C_STATE_FFH: |
718 | case ACPICPU_C_STATE_HALT: | | 714 | case ACPICPU_C_STATE_HALT: |
719 | acpicpu_md_idle_enter(cs->cs_method, state); | | 715 | acpicpu_md_idle_enter(cs->cs_method, state); |
720 | break; | | 716 | break; |
721 | | | 717 | |
722 | case ACPICPU_C_STATE_SYSIO: | | 718 | case ACPICPU_C_STATE_SYSIO: |
723 | (void)AcpiOsReadPort(cs->cs_addr, &val, 8); | | 719 | (void)AcpiOsReadPort(cs->cs_addr, &val, 8); |
724 | break; | | 720 | break; |
725 | | | 721 | |
726 | default: | | 722 | default: |
727 | acpicpu_md_idle_enter(ACPICPU_C_STATE_HALT, ACPI_STATE_C1); | | 723 | acpicpu_md_idle_enter(ACPICPU_C_STATE_HALT, ACPI_STATE_C1); |
728 | break; | | 724 | break; |
729 | } | | 725 | } |
730 | | | 726 | |
731 | cs->cs_stat++; | | 727 | cs->cs_stat++; |
732 | | | 728 | |
733 | end = acpitimer_read_safe(NULL); | | 729 | end = acpitimer_read_safe(NULL); |
734 | sc->sc_cstate_sleep = hztoms(acpitimer_delta(end, start)) * 1000; | | 730 | sc->sc_cstate_sleep = hztoms(acpitimer_delta(end, start)) * 1000; |
735 | | | 731 | |
736 | acpi_md_OsEnableInterrupt(); | | 732 | acpi_md_OsEnableInterrupt(); |
737 | } | | 733 | } |
738 | | | 734 | |
739 | static bool | | 735 | static bool |
740 | acpicpu_cstate_bm_check(void) | | 736 | acpicpu_cstate_bm_check(void) |
741 | { | | 737 | { |
742 | uint32_t val = 0; | | 738 | uint32_t val = 0; |
743 | ACPI_STATUS rv; | | 739 | ACPI_STATUS rv; |
744 | | | 740 | |
745 | rv = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &val); | | 741 | rv = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &val); |
746 | | | 742 | |
747 | if (ACPI_FAILURE(rv) || val == 0) | | 743 | if (ACPI_FAILURE(rv) || val == 0) |
748 | return false; | | 744 | return false; |
749 | | | 745 | |
750 | (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1); | | 746 | (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1); |
751 | | | 747 | |
752 | return true; | | 748 | return true; |
753 | } | | 749 | } |