Mon Aug 9 05:00:24 2010 UTC ()
Downgrade the currently supported maximum C-state to C1. There appears to be
timer-related interrupt issues also in C2. With C1 it is guaranteed that
acpicpu(4) will not cause any slowdowns due stalled local APIC timer.


(jruoho)
diff -r1.16 -r1.17 src/sys/dev/acpi/acpi_cpu_cstate.c

cvs diff -r1.16 -r1.17 src/sys/dev/acpi/acpi_cpu_cstate.c (switch to unified diff)

--- src/sys/dev/acpi/acpi_cpu_cstate.c 2010/08/08 18:25:06 1.16
+++ src/sys/dev/acpi/acpi_cpu_cstate.c 2010/08/09 05:00:24 1.17
@@ -1,749 +1,751 @@ @@ -1,749 +1,751 @@
1/* $NetBSD: acpi_cpu_cstate.c,v 1.16 2010/08/08 18:25:06 jruoho Exp $ */ 1/* $NetBSD: acpi_cpu_cstate.c,v 1.17 2010/08/09 05:00:24 jruoho Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2010 Jukka Ruohonen <jruohonen@iki.fi> 4 * Copyright (c) 2010 Jukka Ruohonen <jruohonen@iki.fi>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 10 *
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution. 15 * documentation and/or other materials provided with the distribution.
16 * 16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE. 27 * SUCH DAMAGE.
28 */ 28 */
29#include <sys/cdefs.h> 29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_cstate.c,v 1.16 2010/08/08 18:25:06 jruoho Exp $"); 30__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_cstate.c,v 1.17 2010/08/09 05:00:24 jruoho Exp $");
31 31
32#include <sys/param.h> 32#include <sys/param.h>
33#include <sys/cpu.h> 33#include <sys/cpu.h>
34#include <sys/device.h> 34#include <sys/device.h>
35#include <sys/kernel.h> 35#include <sys/kernel.h>
36#include <sys/once.h> 36#include <sys/once.h>
37#include <sys/mutex.h> 37#include <sys/mutex.h>
38#include <sys/timetc.h> 38#include <sys/timetc.h>
39 39
40#include <dev/pci/pcivar.h> 40#include <dev/pci/pcivar.h>
41#include <dev/pci/pcidevs.h> 41#include <dev/pci/pcidevs.h>
42 42
43#include <dev/acpi/acpireg.h> 43#include <dev/acpi/acpireg.h>
44#include <dev/acpi/acpivar.h> 44#include <dev/acpi/acpivar.h>
45#include <dev/acpi/acpi_cpu.h> 45#include <dev/acpi/acpi_cpu.h>
46#include <dev/acpi/acpi_timer.h> 46#include <dev/acpi/acpi_timer.h>
47 47
48#include <machine/acpi_machdep.h> 48#include <machine/acpi_machdep.h>
49 49
50#define _COMPONENT ACPI_BUS_COMPONENT 50#define _COMPONENT ACPI_BUS_COMPONENT
51ACPI_MODULE_NAME ("acpi_cpu_cstate") 51ACPI_MODULE_NAME ("acpi_cpu_cstate")
52 52
53static void acpicpu_cstate_attach_print(struct acpicpu_softc *); 53static void acpicpu_cstate_attach_print(struct acpicpu_softc *);
54static ACPI_STATUS acpicpu_cstate_cst(struct acpicpu_softc *); 54static ACPI_STATUS acpicpu_cstate_cst(struct acpicpu_softc *);
55static ACPI_STATUS acpicpu_cstate_cst_add(struct acpicpu_softc *, 55static ACPI_STATUS acpicpu_cstate_cst_add(struct acpicpu_softc *,
56 ACPI_OBJECT *); 56 ACPI_OBJECT *);
57static void acpicpu_cstate_cst_bios(void); 57static void acpicpu_cstate_cst_bios(void);
58static void acpicpu_cstate_fadt(struct acpicpu_softc *); 58static void acpicpu_cstate_fadt(struct acpicpu_softc *);
59static void acpicpu_cstate_quirks(struct acpicpu_softc *); 59static void acpicpu_cstate_quirks(struct acpicpu_softc *);
60static int acpicpu_cstate_quirks_piix4(struct pci_attach_args *); 60static int acpicpu_cstate_quirks_piix4(struct pci_attach_args *);
61static int acpicpu_cstate_latency(struct acpicpu_softc *); 61static int acpicpu_cstate_latency(struct acpicpu_softc *);
62static bool acpicpu_cstate_bm_check(void); 62static bool acpicpu_cstate_bm_check(void);
63static void acpicpu_cstate_idle_enter(struct acpicpu_softc *,int); 63static void acpicpu_cstate_idle_enter(struct acpicpu_softc *,int);
64 64
65extern struct acpicpu_softc **acpicpu_sc; 65extern struct acpicpu_softc **acpicpu_sc;
66 66
67/* 67/*
68 * XXX: The local APIC timer (as well as TSC) is typically 68 * XXX: The local APIC timer (as well as TSC) is typically stopped in C3.
69 * stopped in C3. For now, we cannot but disable C3. 69 * For now, we cannot but disable C3. But there appears to be timer-
 70 * related interrupt issues also in C2. The only entirely safe option
 71 * at the moment is to use C1.
70 */ 72 */
71#ifdef ACPICPU_ENABLE_C3 73#ifdef ACPICPU_ENABLE_C3
72static int cs_state_max = ACPI_STATE_C3; 74static int cs_state_max = ACPI_STATE_C3;
73#else 75#else
74static int cs_state_max = ACPI_STATE_C2; 76static int cs_state_max = ACPI_STATE_C1;
75#endif 77#endif
76 78
77void 79void
78acpicpu_cstate_attach(device_t self) 80acpicpu_cstate_attach(device_t self)
79{ 81{
80 struct acpicpu_softc *sc = device_private(self); 82 struct acpicpu_softc *sc = device_private(self);
81 ACPI_STATUS rv; 83 ACPI_STATUS rv;
82 84
83 /* 85 /*
84 * Either use the preferred _CST or resort to FADT. 86 * Either use the preferred _CST or resort to FADT.
85 */ 87 */
86 rv = acpicpu_cstate_cst(sc); 88 rv = acpicpu_cstate_cst(sc);
87 89
88 switch (rv) { 90 switch (rv) {
89 91
90 case AE_OK: 92 case AE_OK:
91 sc->sc_flags |= ACPICPU_FLAG_C_CST; 93 sc->sc_flags |= ACPICPU_FLAG_C_CST;
92 acpicpu_cstate_cst_bios(); 94 acpicpu_cstate_cst_bios();
93 break; 95 break;
94 96
95 default: 97 default:
96 sc->sc_flags |= ACPICPU_FLAG_C_FADT; 98 sc->sc_flags |= ACPICPU_FLAG_C_FADT;
97 acpicpu_cstate_fadt(sc); 99 acpicpu_cstate_fadt(sc);
98 break; 100 break;
99 } 101 }
100 102
101 acpicpu_cstate_quirks(sc); 103 acpicpu_cstate_quirks(sc);
102 acpicpu_cstate_attach_print(sc); 104 acpicpu_cstate_attach_print(sc);
103} 105}
104 106
105void 107void
106acpicpu_cstate_attach_print(struct acpicpu_softc *sc) 108acpicpu_cstate_attach_print(struct acpicpu_softc *sc)
107{ 109{
108 struct acpicpu_cstate *cs; 110 struct acpicpu_cstate *cs;
109 const char *str; 111 const char *str;
110 int i; 112 int i;
111 113
112 for (i = 0; i < ACPI_C_STATE_COUNT; i++) { 114 for (i = 0; i < ACPI_C_STATE_COUNT; i++) {
113 115
114 cs = &sc->sc_cstate[i]; 116 cs = &sc->sc_cstate[i];
115 117
116 if (cs->cs_method == 0) 118 if (cs->cs_method == 0)
117 continue; 119 continue;
118 120
119 switch (cs->cs_method) { 121 switch (cs->cs_method) {
120 122
121 case ACPICPU_C_STATE_HALT: 123 case ACPICPU_C_STATE_HALT:
122 str = "HALT"; 124 str = "HALT";
123 break; 125 break;
124 126
125 case ACPICPU_C_STATE_FFH: 127 case ACPICPU_C_STATE_FFH:
126 str = "FFH"; 128 str = "FFH";
127 break; 129 break;
128 130
129 case ACPICPU_C_STATE_SYSIO: 131 case ACPICPU_C_STATE_SYSIO:
130 str = "SYSIO"; 132 str = "SYSIO";
131 break; 133 break;
132 134
133 default: 135 default:
134 panic("NOTREACHED"); 136 panic("NOTREACHED");
135 } 137 }
136 138
137 aprint_debug_dev(sc->sc_dev, "C%d: %5s, " 139 aprint_debug_dev(sc->sc_dev, "C%d: %5s, "
138 "lat %3u us, pow %5u mW, addr 0x%06x, flags 0x%02x\n", 140 "lat %3u us, pow %5u mW, addr 0x%06x, flags 0x%02x\n",
139 i, str, cs->cs_latency, cs->cs_power, 141 i, str, cs->cs_latency, cs->cs_power,
140 (uint32_t)cs->cs_addr, cs->cs_flags); 142 (uint32_t)cs->cs_addr, cs->cs_flags);
141 } 143 }
142} 144}
143 145
144int 146int
145acpicpu_cstate_detach(device_t self) 147acpicpu_cstate_detach(device_t self)
146{ 148{
147 struct acpicpu_softc *sc = device_private(self); 149 struct acpicpu_softc *sc = device_private(self);
148 static ONCE_DECL(once_detach); 150 static ONCE_DECL(once_detach);
149 int rv; 151 int rv;
150 152
151 rv = RUN_ONCE(&once_detach, acpicpu_md_idle_stop); 153 rv = RUN_ONCE(&once_detach, acpicpu_md_idle_stop);
152 154
153 if (rv != 0) 155 if (rv != 0)
154 return rv; 156 return rv;
155 157
156 sc->sc_flags &= ~ACPICPU_FLAG_C; 158 sc->sc_flags &= ~ACPICPU_FLAG_C;
157 159
158 return 0; 160 return 0;
159} 161}
160 162
161int 163int
162acpicpu_cstate_start(device_t self) 164acpicpu_cstate_start(device_t self)
163{ 165{
164 struct acpicpu_softc *sc = device_private(self); 166 struct acpicpu_softc *sc = device_private(self);
165 static ONCE_DECL(once_start); 167 static ONCE_DECL(once_start);
166 static ONCE_DECL(once_save); 168 static ONCE_DECL(once_save);
167 int rv; 169 int rv;
168 170
169 /* 171 /*
170 * Save the existing idle-mechanism and claim the idle_loop(9). 172 * Save the existing idle-mechanism and claim the idle_loop(9).
171 * This should be called after all ACPI CPUs have been attached. 173 * This should be called after all ACPI CPUs have been attached.
172 */ 174 */
173 rv = RUN_ONCE(&once_save, acpicpu_md_idle_init); 175 rv = RUN_ONCE(&once_save, acpicpu_md_idle_init);
174 176
175 if (rv != 0) 177 if (rv != 0)
176 return rv; 178 return rv;
177 179
178 rv = RUN_ONCE(&once_start, acpicpu_md_idle_start); 180 rv = RUN_ONCE(&once_start, acpicpu_md_idle_start);
179 181
180 if (rv == 0) 182 if (rv == 0)
181 sc->sc_flags |= ACPICPU_FLAG_C; 183 sc->sc_flags |= ACPICPU_FLAG_C;
182 184
183 return rv; 185 return rv;
184} 186}
185 187
186bool 188bool
187acpicpu_cstate_suspend(device_t self) 189acpicpu_cstate_suspend(device_t self)
188{ 190{
189 191
190 return true; 192 return true;
191} 193}
192 194
193bool 195bool
194acpicpu_cstate_resume(device_t self) 196acpicpu_cstate_resume(device_t self)
195{ 197{
196 static const ACPI_OSD_EXEC_CALLBACK func = acpicpu_cstate_callback; 198 static const ACPI_OSD_EXEC_CALLBACK func = acpicpu_cstate_callback;
197 struct acpicpu_softc *sc = device_private(self); 199 struct acpicpu_softc *sc = device_private(self);
198 200
199 if ((sc->sc_flags & ACPICPU_FLAG_C_CST) != 0) 201 if ((sc->sc_flags & ACPICPU_FLAG_C_CST) != 0)
200 (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev); 202 (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
201 203
202 return true; 204 return true;
203} 205}
204 206
205void 207void
206acpicpu_cstate_callback(void *aux) 208acpicpu_cstate_callback(void *aux)
207{ 209{
208 struct acpicpu_softc *sc; 210 struct acpicpu_softc *sc;
209 device_t self = aux; 211 device_t self = aux;
210 212
211 sc = device_private(self); 213 sc = device_private(self);
212 214
213 if ((sc->sc_flags & ACPICPU_FLAG_C_FADT) != 0) { 215 if ((sc->sc_flags & ACPICPU_FLAG_C_FADT) != 0) {
214 KASSERT((sc->sc_flags & ACPICPU_FLAG_C_CST) == 0); 216 KASSERT((sc->sc_flags & ACPICPU_FLAG_C_CST) == 0);
215 return; 217 return;
216 } 218 }
217 219
218 mutex_enter(&sc->sc_mtx); 220 mutex_enter(&sc->sc_mtx);
219 (void)acpicpu_cstate_cst(sc); 221 (void)acpicpu_cstate_cst(sc);
220 mutex_exit(&sc->sc_mtx); 222 mutex_exit(&sc->sc_mtx);
221} 223}
222 224
223static ACPI_STATUS 225static ACPI_STATUS
224acpicpu_cstate_cst(struct acpicpu_softc *sc) 226acpicpu_cstate_cst(struct acpicpu_softc *sc)
225{ 227{
226 ACPI_OBJECT *elm, *obj; 228 ACPI_OBJECT *elm, *obj;
227 ACPI_BUFFER buf; 229 ACPI_BUFFER buf;
228 ACPI_STATUS rv; 230 ACPI_STATUS rv;
229 uint32_t i, n; 231 uint32_t i, n;
230 uint8_t count; 232 uint8_t count;
231 233
232 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_CST", &buf); 234 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_CST", &buf);
233 235
234 if (ACPI_FAILURE(rv)) 236 if (ACPI_FAILURE(rv))
235 return rv; 237 return rv;
236 238
237 obj = buf.Pointer; 239 obj = buf.Pointer;
238 240
239 if (obj->Type != ACPI_TYPE_PACKAGE) { 241 if (obj->Type != ACPI_TYPE_PACKAGE) {
240 rv = AE_TYPE; 242 rv = AE_TYPE;
241 goto out; 243 goto out;
242 } 244 }
243 245
244 if (obj->Package.Count < 2) { 246 if (obj->Package.Count < 2) {
245 rv = AE_LIMIT; 247 rv = AE_LIMIT;
246 goto out; 248 goto out;
247 } 249 }
248 250
249 elm = obj->Package.Elements; 251 elm = obj->Package.Elements;
250 252
251 if (elm[0].Type != ACPI_TYPE_INTEGER) { 253 if (elm[0].Type != ACPI_TYPE_INTEGER) {
252 rv = AE_TYPE; 254 rv = AE_TYPE;
253 goto out; 255 goto out;
254 } 256 }
255 257
256 n = elm[0].Integer.Value; 258 n = elm[0].Integer.Value;
257 259
258 if (n != obj->Package.Count - 1) { 260 if (n != obj->Package.Count - 1) {
259 rv = AE_BAD_VALUE; 261 rv = AE_BAD_VALUE;
260 goto out; 262 goto out;
261 } 263 }
262 264
263 if (n > ACPI_C_STATES_MAX) { 265 if (n > ACPI_C_STATES_MAX) {
264 rv = AE_LIMIT; 266 rv = AE_LIMIT;
265 goto out; 267 goto out;
266 } 268 }
267 269
268 (void)memset(sc->sc_cstate, 0, 270 (void)memset(sc->sc_cstate, 0,
269 sizeof(*sc->sc_cstate) * ACPI_C_STATE_COUNT); 271 sizeof(*sc->sc_cstate) * ACPI_C_STATE_COUNT);
270 272
271 CTASSERT(ACPI_STATE_C0 == 0 && ACPI_STATE_C1 == 1); 273 CTASSERT(ACPI_STATE_C0 == 0 && ACPI_STATE_C1 == 1);
272 CTASSERT(ACPI_STATE_C2 == 2 && ACPI_STATE_C3 == 3); 274 CTASSERT(ACPI_STATE_C2 == 2 && ACPI_STATE_C3 == 3);
273 275
274 for (count = 0, i = 1; i <= n; i++) { 276 for (count = 0, i = 1; i <= n; i++) {
275 277
276 elm = &obj->Package.Elements[i]; 278 elm = &obj->Package.Elements[i];
277 rv = acpicpu_cstate_cst_add(sc, elm); 279 rv = acpicpu_cstate_cst_add(sc, elm);
278 280
279 if (ACPI_SUCCESS(rv)) 281 if (ACPI_SUCCESS(rv))
280 count++; 282 count++;
281 } 283 }
282 284
283 rv = (count != 0) ? AE_OK : AE_NOT_EXIST; 285 rv = (count != 0) ? AE_OK : AE_NOT_EXIST;
284 286
285out: 287out:
286 if (buf.Pointer != NULL) 288 if (buf.Pointer != NULL)
287 ACPI_FREE(buf.Pointer); 289 ACPI_FREE(buf.Pointer);
288 290
289 return rv; 291 return rv;
290} 292}
291 293
292static ACPI_STATUS 294static ACPI_STATUS
293acpicpu_cstate_cst_add(struct acpicpu_softc *sc, ACPI_OBJECT *elm) 295acpicpu_cstate_cst_add(struct acpicpu_softc *sc, ACPI_OBJECT *elm)
294{ 296{
295 const struct acpicpu_object *ao = &sc->sc_object; 297 const struct acpicpu_object *ao = &sc->sc_object;
296 struct acpicpu_cstate *cs = sc->sc_cstate; 298 struct acpicpu_cstate *cs = sc->sc_cstate;
297 struct acpicpu_cstate state; 299 struct acpicpu_cstate state;
298 struct acpicpu_reg *reg; 300 struct acpicpu_reg *reg;
299 ACPI_STATUS rv = AE_OK; 301 ACPI_STATUS rv = AE_OK;
300 ACPI_OBJECT *obj; 302 ACPI_OBJECT *obj;
301 uint32_t type; 303 uint32_t type;
302 304
303 (void)memset(&state, 0, sizeof(*cs)); 305 (void)memset(&state, 0, sizeof(*cs));
304 306
305 state.cs_flags = ACPICPU_FLAG_C_BM_STS; 307 state.cs_flags = ACPICPU_FLAG_C_BM_STS;
306 308
307 if (elm->Type != ACPI_TYPE_PACKAGE) { 309 if (elm->Type != ACPI_TYPE_PACKAGE) {
308 rv = AE_TYPE; 310 rv = AE_TYPE;
309 goto out; 311 goto out;
310 } 312 }
311 313
312 if (elm->Package.Count != 4) { 314 if (elm->Package.Count != 4) {
313 rv = AE_LIMIT; 315 rv = AE_LIMIT;
314 goto out; 316 goto out;
315 } 317 }
316 318
317 /* 319 /*
318 * Type. 320 * Type.
319 */ 321 */
320 obj = &elm->Package.Elements[1]; 322 obj = &elm->Package.Elements[1];
321 323
322 if (obj->Type != ACPI_TYPE_INTEGER) { 324 if (obj->Type != ACPI_TYPE_INTEGER) {
323 rv = AE_TYPE; 325 rv = AE_TYPE;
324 goto out; 326 goto out;
325 } 327 }
326 328
327 type = obj->Integer.Value; 329 type = obj->Integer.Value;
328 330
329 if (type < ACPI_STATE_C1 || type > ACPI_STATE_C3) { 331 if (type < ACPI_STATE_C1 || type > ACPI_STATE_C3) {
330 rv = AE_TYPE; 332 rv = AE_TYPE;
331 goto out; 333 goto out;
332 } 334 }
333 335
334 /* 336 /*
335 * Latency. 337 * Latency.
336 */ 338 */
337 obj = &elm->Package.Elements[2]; 339 obj = &elm->Package.Elements[2];
338 340
339 if (obj->Type != ACPI_TYPE_INTEGER) { 341 if (obj->Type != ACPI_TYPE_INTEGER) {
340 rv = AE_TYPE; 342 rv = AE_TYPE;
341 goto out; 343 goto out;
342 } 344 }
343 345
344 state.cs_latency = obj->Integer.Value; 346 state.cs_latency = obj->Integer.Value;
345 347
346 /* 348 /*
347 * Power. 349 * Power.
348 */ 350 */
349 obj = &elm->Package.Elements[3]; 351 obj = &elm->Package.Elements[3];
350 352
351 if (obj->Type != ACPI_TYPE_INTEGER) { 353 if (obj->Type != ACPI_TYPE_INTEGER) {
352 rv = AE_TYPE; 354 rv = AE_TYPE;
353 goto out; 355 goto out;
354 } 356 }
355 357
356 state.cs_power = obj->Integer.Value; 358 state.cs_power = obj->Integer.Value;
357 359
358 /* 360 /*
359 * Register. 361 * Register.
360 */ 362 */
361 obj = &elm->Package.Elements[0]; 363 obj = &elm->Package.Elements[0];
362 364
363 if (obj->Type != ACPI_TYPE_BUFFER) { 365 if (obj->Type != ACPI_TYPE_BUFFER) {
364 rv = AE_TYPE; 366 rv = AE_TYPE;
365 goto out; 367 goto out;
366 } 368 }
367 369
368 CTASSERT(sizeof(struct acpicpu_reg) == 15); 370 CTASSERT(sizeof(struct acpicpu_reg) == 15);
369 371
370 if (obj->Buffer.Length < sizeof(struct acpicpu_reg)) { 372 if (obj->Buffer.Length < sizeof(struct acpicpu_reg)) {
371 rv = AE_LIMIT; 373 rv = AE_LIMIT;
372 goto out; 374 goto out;
373 } 375 }
374 376
375 reg = (struct acpicpu_reg *)obj->Buffer.Pointer; 377 reg = (struct acpicpu_reg *)obj->Buffer.Pointer;
376 378
377 switch (reg->reg_spaceid) { 379 switch (reg->reg_spaceid) {
378 380
379 case ACPI_ADR_SPACE_SYSTEM_IO: 381 case ACPI_ADR_SPACE_SYSTEM_IO:
380 state.cs_method = ACPICPU_C_STATE_SYSIO; 382 state.cs_method = ACPICPU_C_STATE_SYSIO;
381 383
382 if (reg->reg_addr == 0) { 384 if (reg->reg_addr == 0) {
383 rv = AE_AML_ILLEGAL_ADDRESS; 385 rv = AE_AML_ILLEGAL_ADDRESS;
384 goto out; 386 goto out;
385 } 387 }
386 388
387 if (reg->reg_bitwidth != 8) { 389 if (reg->reg_bitwidth != 8) {
388 rv = AE_AML_BAD_RESOURCE_LENGTH; 390 rv = AE_AML_BAD_RESOURCE_LENGTH;
389 goto out; 391 goto out;
390 } 392 }
391 393
392 /* 394 /*
393 * Check only that the address is in the mapped space. 395 * Check only that the address is in the mapped space.
394 * Systems are allowed to change it when operating 396 * Systems are allowed to change it when operating
395 * with _CST (see ACPI 4.0, pp. 94-95). For instance, 397 * with _CST (see ACPI 4.0, pp. 94-95). For instance,
396 * the offset of P_LVL3 may change depending on whether 398 * the offset of P_LVL3 may change depending on whether
397 * acpiacad(4) is connected or disconnected. 399 * acpiacad(4) is connected or disconnected.
398 */ 400 */
399 if (reg->reg_addr > ao->ao_pblkaddr + ao->ao_pblklen) { 401 if (reg->reg_addr > ao->ao_pblkaddr + ao->ao_pblklen) {
400 rv = AE_BAD_ADDRESS; 402 rv = AE_BAD_ADDRESS;
401 goto out; 403 goto out;
402 } 404 }
403 405
404 state.cs_addr = reg->reg_addr; 406 state.cs_addr = reg->reg_addr;
405 break; 407 break;
406 408
407 case ACPI_ADR_SPACE_FIXED_HARDWARE: 409 case ACPI_ADR_SPACE_FIXED_HARDWARE:
408 state.cs_method = ACPICPU_C_STATE_FFH; 410 state.cs_method = ACPICPU_C_STATE_FFH;
409 411
410 switch (type) { 412 switch (type) {
411 413
412 case ACPI_STATE_C1: 414 case ACPI_STATE_C1:
413 415
414 if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0) 416 if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0)
415 state.cs_method = ACPICPU_C_STATE_HALT; 417 state.cs_method = ACPICPU_C_STATE_HALT;
416 418
417 break; 419 break;
418 420
419 default: 421 default:
420 422
421 if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0) { 423 if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0) {
422 rv = AE_SUPPORT; 424 rv = AE_SUPPORT;
423 goto out; 425 goto out;
424 } 426 }
425 } 427 }
426 428
427 if (sc->sc_cap != 0) { 429 if (sc->sc_cap != 0) {
428 430
429 /* 431 /*
430 * The _CST FFH GAS encoding may contain 432 * The _CST FFH GAS encoding may contain
431 * additional hints on Intel processors. 433 * additional hints on Intel processors.
432 * Use these to determine whether we can 434 * Use these to determine whether we can
433 * avoid the bus master activity check. 435 * avoid the bus master activity check.
434 */ 436 */
435 if ((reg->reg_accesssize & ACPICPU_PDC_GAS_BM) == 0) 437 if ((reg->reg_accesssize & ACPICPU_PDC_GAS_BM) == 0)
436 state.cs_flags &= ~ACPICPU_FLAG_C_BM_STS; 438 state.cs_flags &= ~ACPICPU_FLAG_C_BM_STS;
437 } 439 }
438 440
439 break; 441 break;
440 442
441 default: 443 default:
442 rv = AE_AML_INVALID_SPACE_ID; 444 rv = AE_AML_INVALID_SPACE_ID;
443 goto out; 445 goto out;
444 } 446 }
445 447
446 if (cs[type].cs_method != 0) { 448 if (cs[type].cs_method != 0) {
447 rv = AE_ALREADY_EXISTS; 449 rv = AE_ALREADY_EXISTS;
448 goto out; 450 goto out;
449 } 451 }
450 452
451 cs[type].cs_addr = state.cs_addr; 453 cs[type].cs_addr = state.cs_addr;
452 cs[type].cs_power = state.cs_power; 454 cs[type].cs_power = state.cs_power;
453 cs[type].cs_flags = state.cs_flags; 455 cs[type].cs_flags = state.cs_flags;
454 cs[type].cs_method = state.cs_method; 456 cs[type].cs_method = state.cs_method;
455 cs[type].cs_latency = state.cs_latency; 457 cs[type].cs_latency = state.cs_latency;
456 458
457out: 459out:
458 if (ACPI_FAILURE(rv)) 460 if (ACPI_FAILURE(rv))
459 aprint_debug_dev(sc->sc_dev, "invalid " 461 aprint_debug_dev(sc->sc_dev, "invalid "
460 "_CST: %s\n", AcpiFormatException(rv)); 462 "_CST: %s\n", AcpiFormatException(rv));
461 463
462 return rv; 464 return rv;
463} 465}
464 466
465static void 467static void
466acpicpu_cstate_cst_bios(void) 468acpicpu_cstate_cst_bios(void)
467{ 469{
468 const uint8_t val = AcpiGbl_FADT.CstControl; 470 const uint8_t val = AcpiGbl_FADT.CstControl;
469 const uint32_t addr = AcpiGbl_FADT.SmiCommand; 471 const uint32_t addr = AcpiGbl_FADT.SmiCommand;
470 472
471 if (addr == 0) 473 if (addr == 0)
472 return; 474 return;
473 475
474 (void)AcpiOsWritePort(addr, val, 8); 476 (void)AcpiOsWritePort(addr, val, 8);
475} 477}
476 478
477static void 479static void
478acpicpu_cstate_fadt(struct acpicpu_softc *sc) 480acpicpu_cstate_fadt(struct acpicpu_softc *sc)
479{ 481{
480 struct acpicpu_cstate *cs = sc->sc_cstate; 482 struct acpicpu_cstate *cs = sc->sc_cstate;
481 483
482 (void)memset(cs, 0, sizeof(*cs) * ACPI_C_STATE_COUNT); 484 (void)memset(cs, 0, sizeof(*cs) * ACPI_C_STATE_COUNT);
483 485
484 /* 486 /*
485 * All x86 processors should support C1 (a.k.a. HALT). 487 * All x86 processors should support C1 (a.k.a. HALT).
486 */ 488 */
487 if ((AcpiGbl_FADT.Flags & ACPI_FADT_C1_SUPPORTED) != 0) 489 if ((AcpiGbl_FADT.Flags & ACPI_FADT_C1_SUPPORTED) != 0)
488 cs[ACPI_STATE_C1].cs_method = ACPICPU_C_STATE_HALT; 490 cs[ACPI_STATE_C1].cs_method = ACPICPU_C_STATE_HALT;
489 491
490 if ((acpicpu_md_cpus_running() > 1) && 492 if ((acpicpu_md_cpus_running() > 1) &&
491 (AcpiGbl_FADT.Flags & ACPI_FADT_C2_MP_SUPPORTED) == 0) 493 (AcpiGbl_FADT.Flags & ACPI_FADT_C2_MP_SUPPORTED) == 0)
492 return; 494 return;
493 495
494 cs[ACPI_STATE_C2].cs_method = ACPICPU_C_STATE_SYSIO; 496 cs[ACPI_STATE_C2].cs_method = ACPICPU_C_STATE_SYSIO;
495 cs[ACPI_STATE_C3].cs_method = ACPICPU_C_STATE_SYSIO; 497 cs[ACPI_STATE_C3].cs_method = ACPICPU_C_STATE_SYSIO;
496 498
497 cs[ACPI_STATE_C2].cs_latency = AcpiGbl_FADT.C2Latency; 499 cs[ACPI_STATE_C2].cs_latency = AcpiGbl_FADT.C2Latency;
498 cs[ACPI_STATE_C3].cs_latency = AcpiGbl_FADT.C3Latency; 500 cs[ACPI_STATE_C3].cs_latency = AcpiGbl_FADT.C3Latency;
499 501
500 cs[ACPI_STATE_C2].cs_addr = sc->sc_object.ao_pblkaddr + 4; 502 cs[ACPI_STATE_C2].cs_addr = sc->sc_object.ao_pblkaddr + 4;
501 cs[ACPI_STATE_C3].cs_addr = sc->sc_object.ao_pblkaddr + 5; 503 cs[ACPI_STATE_C3].cs_addr = sc->sc_object.ao_pblkaddr + 5;
502 504
503 /* 505 /*
504 * The P_BLK length should always be 6. If it 506 * The P_BLK length should always be 6. If it
505 * is not, reduce functionality accordingly. 507 * is not, reduce functionality accordingly.
506 * Sanity check also FADT's latency levels. 508 * Sanity check also FADT's latency levels.
507 */ 509 */
508 if (sc->sc_object.ao_pblklen < 5) 510 if (sc->sc_object.ao_pblklen < 5)
509 cs[ACPI_STATE_C2].cs_method = 0; 511 cs[ACPI_STATE_C2].cs_method = 0;
510 512
511 if (sc->sc_object.ao_pblklen < 6) 513 if (sc->sc_object.ao_pblklen < 6)
512 cs[ACPI_STATE_C3].cs_method = 0; 514 cs[ACPI_STATE_C3].cs_method = 0;
513 515
514 CTASSERT(ACPICPU_C_C2_LATENCY_MAX == 100); 516 CTASSERT(ACPICPU_C_C2_LATENCY_MAX == 100);
515 CTASSERT(ACPICPU_C_C3_LATENCY_MAX == 1000); 517 CTASSERT(ACPICPU_C_C3_LATENCY_MAX == 1000);
516 518
517 if (AcpiGbl_FADT.C2Latency > ACPICPU_C_C2_LATENCY_MAX) 519 if (AcpiGbl_FADT.C2Latency > ACPICPU_C_C2_LATENCY_MAX)
518 cs[ACPI_STATE_C2].cs_method = 0; 520 cs[ACPI_STATE_C2].cs_method = 0;
519 521
520 if (AcpiGbl_FADT.C3Latency > ACPICPU_C_C3_LATENCY_MAX) 522 if (AcpiGbl_FADT.C3Latency > ACPICPU_C_C3_LATENCY_MAX)
521 cs[ACPI_STATE_C3].cs_method = 0; 523 cs[ACPI_STATE_C3].cs_method = 0;
522} 524}
523 525
524static void 526static void
525acpicpu_cstate_quirks(struct acpicpu_softc *sc) 527acpicpu_cstate_quirks(struct acpicpu_softc *sc)
526{ 528{
527 const uint32_t reg = AcpiGbl_FADT.Pm2ControlBlock; 529 const uint32_t reg = AcpiGbl_FADT.Pm2ControlBlock;
528 const uint32_t len = AcpiGbl_FADT.Pm2ControlLength; 530 const uint32_t len = AcpiGbl_FADT.Pm2ControlLength;
529 struct pci_attach_args pa; 531 struct pci_attach_args pa;
530 532
531 /* 533 /*
532 * Check bus master arbitration. If ARB_DIS 534 * Check bus master arbitration. If ARB_DIS
533 * is not available, processor caches must be 535 * is not available, processor caches must be
534 * flushed before C3 (ACPI 4.0, section 8.2). 536 * flushed before C3 (ACPI 4.0, section 8.2).
535 */ 537 */
536 if (reg != 0 && len != 0) 538 if (reg != 0 && len != 0)
537 sc->sc_flags |= ACPICPU_FLAG_C_ARB; 539 sc->sc_flags |= ACPICPU_FLAG_C_ARB;
538 else { 540 else {
539 /* 541 /*
540 * Disable C3 entirely if WBINVD is not present. 542 * Disable C3 entirely if WBINVD is not present.
541 */ 543 */
542 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) == 0) 544 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) == 0)
543 sc->sc_flags |= ACPICPU_FLAG_C_NOC3; 545 sc->sc_flags |= ACPICPU_FLAG_C_NOC3;
544 else { 546 else {
545 /* 547 /*
546 * If WBINVD is present and functioning properly, 548 * If WBINVD is present and functioning properly,
547 * flush all processor caches before entering C3. 549 * flush all processor caches before entering C3.
548 */ 550 */
549 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) 551 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0)
550 sc->sc_flags &= ~ACPICPU_FLAG_C_BM; 552 sc->sc_flags &= ~ACPICPU_FLAG_C_BM;
551 else 553 else
552 sc->sc_flags |= ACPICPU_FLAG_C_NOC3; 554 sc->sc_flags |= ACPICPU_FLAG_C_NOC3;
553 } 555 }
554 } 556 }
555 557
556 /* 558 /*
557 * There are several erratums for PIIX4. 559 * There are several erratums for PIIX4.
558 */ 560 */
559 if (pci_find_device(&pa, acpicpu_cstate_quirks_piix4) != 0) 561 if (pci_find_device(&pa, acpicpu_cstate_quirks_piix4) != 0)
560 sc->sc_flags |= ACPICPU_FLAG_C_NOC3; 562 sc->sc_flags |= ACPICPU_FLAG_C_NOC3;
561 563
562 if ((sc->sc_flags & ACPICPU_FLAG_C_NOC3) != 0) 564 if ((sc->sc_flags & ACPICPU_FLAG_C_NOC3) != 0)
563 sc->sc_cstate[ACPI_STATE_C3].cs_method = 0; 565 sc->sc_cstate[ACPI_STATE_C3].cs_method = 0;
564} 566}
565 567
566static int 568static int
567acpicpu_cstate_quirks_piix4(struct pci_attach_args *pa) 569acpicpu_cstate_quirks_piix4(struct pci_attach_args *pa)
568{ 570{
569 571
570 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL) 572 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
571 return 0; 573 return 0;
572 574
573 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82371AB_ISA || 575 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82371AB_ISA ||
574 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82440MX_PMC) 576 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82440MX_PMC)
575 return 1; 577 return 1;
576 578
577 return 0; 579 return 0;
578} 580}
579 581
580static int 582static int
581acpicpu_cstate_latency(struct acpicpu_softc *sc) 583acpicpu_cstate_latency(struct acpicpu_softc *sc)
582{ 584{
583 static const uint32_t cs_factor = 3; 585 static const uint32_t cs_factor = 3;
584 struct acpicpu_cstate *cs; 586 struct acpicpu_cstate *cs;
585 int i; 587 int i;
586 588
587 for (i = cs_state_max; i > 0; i--) { 589 for (i = cs_state_max; i > 0; i--) {
588 590
589 cs = &sc->sc_cstate[i]; 591 cs = &sc->sc_cstate[i];
590 592
591 if (__predict_false(cs->cs_method == 0)) 593 if (__predict_false(cs->cs_method == 0))
592 continue; 594 continue;
593 595
594 /* 596 /*
595 * Choose a state if we have previously slept 597 * Choose a state if we have previously slept
596 * longer than the worst case latency of the 598 * longer than the worst case latency of the
597 * state times an arbitrary multiplier. 599 * state times an arbitrary multiplier.
598 */ 600 */
599 if (sc->sc_cstate_sleep > cs->cs_latency * cs_factor) 601 if (sc->sc_cstate_sleep > cs->cs_latency * cs_factor)
600 return i; 602 return i;
601 } 603 }
602 604
603 return ACPI_STATE_C1; 605 return ACPI_STATE_C1;
604} 606}
605 607
606/* 608/*
607 * The main idle loop. 609 * The main idle loop.
608 */ 610 */
609void 611void
610acpicpu_cstate_idle(void) 612acpicpu_cstate_idle(void)
611{ 613{
612 struct cpu_info *ci = curcpu(); 614 struct cpu_info *ci = curcpu();
613 struct acpicpu_softc *sc; 615 struct acpicpu_softc *sc;
614 int state; 616 int state;
615 617
616 if (__predict_false(ci->ci_want_resched) != 0) 618 if (__predict_false(ci->ci_want_resched) != 0)
617 return; 619 return;
618 620
619 acpi_md_OsDisableInterrupt(); 621 acpi_md_OsDisableInterrupt();
620 622
621 KASSERT(acpicpu_sc != NULL); 623 KASSERT(acpicpu_sc != NULL);
622 KASSERT(ci->ci_acpiid < maxcpus); 624 KASSERT(ci->ci_acpiid < maxcpus);
623 KASSERT(ci->ci_ilevel == IPL_NONE); 625 KASSERT(ci->ci_ilevel == IPL_NONE);
624 626
625 sc = acpicpu_sc[ci->ci_acpiid]; 627 sc = acpicpu_sc[ci->ci_acpiid];
626 628
627 if (__predict_false(sc == NULL)) 629 if (__predict_false(sc == NULL))
628 goto halt; 630 goto halt;
629 631
630 if (__predict_false(sc->sc_cold != false)) 632 if (__predict_false(sc->sc_cold != false))
631 goto halt; 633 goto halt;
632 634
633 if (__predict_false((sc->sc_flags & ACPICPU_FLAG_C) == 0)) 635 if (__predict_false((sc->sc_flags & ACPICPU_FLAG_C) == 0))
634 goto halt; 636 goto halt;
635 637
636 if (__predict_false(mutex_tryenter(&sc->sc_mtx) == 0)) 638 if (__predict_false(mutex_tryenter(&sc->sc_mtx) == 0))
637 goto halt; 639 goto halt;
638 640
639 mutex_exit(&sc->sc_mtx); 641 mutex_exit(&sc->sc_mtx);
640 state = acpicpu_cstate_latency(sc); 642 state = acpicpu_cstate_latency(sc);
641 643
642 /* 644 /*
643 * Check for bus master activity. Note that particularly usb(4) 645 * Check for bus master activity. Note that particularly usb(4)
644 * causes high activity, which may prevent the use of C3 states. 646 * causes high activity, which may prevent the use of C3 states.
645 */ 647 */
646 if ((sc->sc_cstate[state].cs_flags & ACPICPU_FLAG_C_BM_STS) != 0) { 648 if ((sc->sc_cstate[state].cs_flags & ACPICPU_FLAG_C_BM_STS) != 0) {
647 649
648 if (acpicpu_cstate_bm_check() != false) 650 if (acpicpu_cstate_bm_check() != false)
649 state--; 651 state--;
650 652
651 if (__predict_false(sc->sc_cstate[state].cs_method == 0)) 653 if (__predict_false(sc->sc_cstate[state].cs_method == 0))
652 state = ACPI_STATE_C1; 654 state = ACPI_STATE_C1;
653 } 655 }
654 656
655 KASSERT(state != ACPI_STATE_C0); 657 KASSERT(state != ACPI_STATE_C0);
656 658
657 if (state != ACPI_STATE_C3) { 659 if (state != ACPI_STATE_C3) {
658 acpicpu_cstate_idle_enter(sc, state); 660 acpicpu_cstate_idle_enter(sc, state);
659 return; 661 return;
660 } 662 }
661 663
662 /* 664 /*
663 * On all recent (Intel) CPUs caches are shared 665 * On all recent (Intel) CPUs caches are shared
664 * by CPUs and bus master control is required to 666 * by CPUs and bus master control is required to
665 * keep these coherent while in C3. Flushing the 667 * keep these coherent while in C3. Flushing the
666 * CPU caches is only the last resort. 668 * CPU caches is only the last resort.
667 */ 669 */
668 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) == 0) 670 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) == 0)
669 ACPI_FLUSH_CPU_CACHE(); 671 ACPI_FLUSH_CPU_CACHE();
670 672
671 /* 673 /*
672 * Allow the bus master to request that any given 674 * Allow the bus master to request that any given
673 * CPU should return immediately to C0 from C3. 675 * CPU should return immediately to C0 from C3.
674 */ 676 */
675 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0) 677 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0)
676 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1); 678 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
677 679
678 /* 680 /*
679 * It may be necessary to disable bus master arbitration 681 * It may be necessary to disable bus master arbitration
680 * to ensure that bus master cycles do not occur while 682 * to ensure that bus master cycles do not occur while
681 * sleeping in C3 (see ACPI 4.0, section 8.1.4). 683 * sleeping in C3 (see ACPI 4.0, section 8.1.4).
682 */ 684 */
683 if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0) 685 if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0)
684 (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1); 686 (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
685 687
686 acpicpu_cstate_idle_enter(sc, state); 688 acpicpu_cstate_idle_enter(sc, state);
687 689
688 /* 690 /*
689 * Disable bus master wake and re-enable the arbiter. 691 * Disable bus master wake and re-enable the arbiter.
690 */ 692 */
691 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0) 693 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0)
692 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0); 694 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
693 695
694 if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0) 696 if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0)
695 (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0); 697 (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
696 698
697 return; 699 return;
698 700
699halt: 701halt:
700 acpicpu_md_idle_enter(ACPICPU_C_STATE_HALT, ACPI_STATE_C1); 702 acpicpu_md_idle_enter(ACPICPU_C_STATE_HALT, ACPI_STATE_C1);
701} 703}
702 704
703static void 705static void
704acpicpu_cstate_idle_enter(struct acpicpu_softc *sc, int state) 706acpicpu_cstate_idle_enter(struct acpicpu_softc *sc, int state)
705{ 707{
706 struct acpicpu_cstate *cs = &sc->sc_cstate[state]; 708 struct acpicpu_cstate *cs = &sc->sc_cstate[state];
707 uint32_t end, start, val; 709 uint32_t end, start, val;
708 710
709 start = acpitimer_read_safe(NULL); 711 start = acpitimer_read_safe(NULL);
710 712
711 switch (cs->cs_method) { 713 switch (cs->cs_method) {
712 714
713 case ACPICPU_C_STATE_FFH: 715 case ACPICPU_C_STATE_FFH:
714 case ACPICPU_C_STATE_HALT: 716 case ACPICPU_C_STATE_HALT:
715 acpicpu_md_idle_enter(cs->cs_method, state); 717 acpicpu_md_idle_enter(cs->cs_method, state);
716 break; 718 break;
717 719
718 case ACPICPU_C_STATE_SYSIO: 720 case ACPICPU_C_STATE_SYSIO:
719 (void)AcpiOsReadPort(cs->cs_addr, &val, 8); 721 (void)AcpiOsReadPort(cs->cs_addr, &val, 8);
720 break; 722 break;
721 723
722 default: 724 default:
723 acpicpu_md_idle_enter(ACPICPU_C_STATE_HALT, ACPI_STATE_C1); 725 acpicpu_md_idle_enter(ACPICPU_C_STATE_HALT, ACPI_STATE_C1);
724 break; 726 break;
725 } 727 }
726 728
727 cs->cs_stat++; 729 cs->cs_stat++;
728 730
729 end = acpitimer_read_safe(NULL); 731 end = acpitimer_read_safe(NULL);
730 sc->sc_cstate_sleep = hztoms(acpitimer_delta(end, start)) * 1000; 732 sc->sc_cstate_sleep = hztoms(acpitimer_delta(end, start)) * 1000;
731 733
732 acpi_md_OsEnableInterrupt(); 734 acpi_md_OsEnableInterrupt();
733} 735}
734 736
735static bool 737static bool
736acpicpu_cstate_bm_check(void) 738acpicpu_cstate_bm_check(void)
737{ 739{
738 uint32_t val = 0; 740 uint32_t val = 0;
739 ACPI_STATUS rv; 741 ACPI_STATUS rv;
740 742
741 rv = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &val); 743 rv = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &val);
742 744
743 if (ACPI_FAILURE(rv) || val == 0) 745 if (ACPI_FAILURE(rv) || val == 0)
744 return false; 746 return false;
745 747
746 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1); 748 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
747 749
748 return true; 750 return true;
749} 751}