Sun Aug 8 18:25:06 2010 UTC ()
Improve error and debug messages.


(jruoho)
diff -r1.15 -r1.16 src/sys/dev/acpi/acpi_cpu_cstate.c
diff -r1.3 -r1.4 src/sys/dev/acpi/acpi_cpu_pstate.c

cvs diff -r1.15 -r1.16 src/sys/dev/acpi/acpi_cpu_cstate.c (switch to unified diff)

--- src/sys/dev/acpi/acpi_cpu_cstate.c 2010/08/08 16:58:42 1.15
+++ src/sys/dev/acpi/acpi_cpu_cstate.c 2010/08/08 18:25:06 1.16
@@ -1,753 +1,749 @@ @@ -1,753 +1,749 @@
1/* $NetBSD: acpi_cpu_cstate.c,v 1.15 2010/08/08 16:58:42 jruoho Exp $ */ 1/* $NetBSD: acpi_cpu_cstate.c,v 1.16 2010/08/08 18:25:06 jruoho Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2010 Jukka Ruohonen <jruohonen@iki.fi> 4 * Copyright (c) 2010 Jukka Ruohonen <jruohonen@iki.fi>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 10 *
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution. 15 * documentation and/or other materials provided with the distribution.
16 * 16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE. 27 * SUCH DAMAGE.
28 */ 28 */
29#include <sys/cdefs.h> 29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_cstate.c,v 1.15 2010/08/08 16:58:42 jruoho Exp $"); 30__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_cstate.c,v 1.16 2010/08/08 18:25:06 jruoho Exp $");
31 31
32#include <sys/param.h> 32#include <sys/param.h>
33#include <sys/cpu.h> 33#include <sys/cpu.h>
34#include <sys/device.h> 34#include <sys/device.h>
35#include <sys/kernel.h> 35#include <sys/kernel.h>
36#include <sys/once.h> 36#include <sys/once.h>
37#include <sys/mutex.h> 37#include <sys/mutex.h>
38#include <sys/timetc.h> 38#include <sys/timetc.h>
39 39
40#include <dev/pci/pcivar.h> 40#include <dev/pci/pcivar.h>
41#include <dev/pci/pcidevs.h> 41#include <dev/pci/pcidevs.h>
42 42
43#include <dev/acpi/acpireg.h> 43#include <dev/acpi/acpireg.h>
44#include <dev/acpi/acpivar.h> 44#include <dev/acpi/acpivar.h>
45#include <dev/acpi/acpi_cpu.h> 45#include <dev/acpi/acpi_cpu.h>
46#include <dev/acpi/acpi_timer.h> 46#include <dev/acpi/acpi_timer.h>
47 47
48#include <machine/acpi_machdep.h> 48#include <machine/acpi_machdep.h>
49 49
50#define _COMPONENT ACPI_BUS_COMPONENT 50#define _COMPONENT ACPI_BUS_COMPONENT
51ACPI_MODULE_NAME ("acpi_cpu_cstate") 51ACPI_MODULE_NAME ("acpi_cpu_cstate")
52 52
53static void acpicpu_cstate_attach_print(struct acpicpu_softc *); 53static void acpicpu_cstate_attach_print(struct acpicpu_softc *);
54static ACPI_STATUS acpicpu_cstate_cst(struct acpicpu_softc *); 54static ACPI_STATUS acpicpu_cstate_cst(struct acpicpu_softc *);
55static ACPI_STATUS acpicpu_cstate_cst_add(struct acpicpu_softc *, 55static ACPI_STATUS acpicpu_cstate_cst_add(struct acpicpu_softc *,
56 ACPI_OBJECT *); 56 ACPI_OBJECT *);
57static void acpicpu_cstate_cst_bios(void); 57static void acpicpu_cstate_cst_bios(void);
58static void acpicpu_cstate_fadt(struct acpicpu_softc *); 58static void acpicpu_cstate_fadt(struct acpicpu_softc *);
59static void acpicpu_cstate_quirks(struct acpicpu_softc *); 59static void acpicpu_cstate_quirks(struct acpicpu_softc *);
60static int acpicpu_cstate_quirks_piix4(struct pci_attach_args *); 60static int acpicpu_cstate_quirks_piix4(struct pci_attach_args *);
61static int acpicpu_cstate_latency(struct acpicpu_softc *); 61static int acpicpu_cstate_latency(struct acpicpu_softc *);
62static bool acpicpu_cstate_bm_check(void); 62static bool acpicpu_cstate_bm_check(void);
63static void acpicpu_cstate_idle_enter(struct acpicpu_softc *,int); 63static void acpicpu_cstate_idle_enter(struct acpicpu_softc *,int);
64 64
65extern struct acpicpu_softc **acpicpu_sc; 65extern struct acpicpu_softc **acpicpu_sc;
66 66
67/* 67/*
68 * XXX: The local APIC timer (as well as TSC) is typically 68 * XXX: The local APIC timer (as well as TSC) is typically
69 * stopped in C3. For now, we cannot but disable C3. 69 * stopped in C3. For now, we cannot but disable C3.
70 */ 70 */
71#ifdef ACPICPU_ENABLE_C3 71#ifdef ACPICPU_ENABLE_C3
72static int cs_state_max = ACPI_STATE_C3; 72static int cs_state_max = ACPI_STATE_C3;
73#else 73#else
74static int cs_state_max = ACPI_STATE_C2; 74static int cs_state_max = ACPI_STATE_C2;
75#endif 75#endif
76 76
77void 77void
78acpicpu_cstate_attach(device_t self) 78acpicpu_cstate_attach(device_t self)
79{ 79{
80 struct acpicpu_softc *sc = device_private(self); 80 struct acpicpu_softc *sc = device_private(self);
81 ACPI_STATUS rv; 81 ACPI_STATUS rv;
82 82
83 /* 83 /*
84 * Either use the preferred _CST or resort to FADT. 84 * Either use the preferred _CST or resort to FADT.
85 */ 85 */
86 rv = acpicpu_cstate_cst(sc); 86 rv = acpicpu_cstate_cst(sc);
87 87
88 switch (rv) { 88 switch (rv) {
89 89
90 case AE_OK: 90 case AE_OK:
91 sc->sc_flags |= ACPICPU_FLAG_C_CST; 91 sc->sc_flags |= ACPICPU_FLAG_C_CST;
92 acpicpu_cstate_cst_bios(); 92 acpicpu_cstate_cst_bios();
93 break; 93 break;
94 94
95 default: 95 default:
96 sc->sc_flags |= ACPICPU_FLAG_C_FADT; 96 sc->sc_flags |= ACPICPU_FLAG_C_FADT;
97 acpicpu_cstate_fadt(sc); 97 acpicpu_cstate_fadt(sc);
98 break; 98 break;
99 } 99 }
100 100
101 acpicpu_cstate_quirks(sc); 101 acpicpu_cstate_quirks(sc);
102 acpicpu_cstate_attach_print(sc); 102 acpicpu_cstate_attach_print(sc);
103} 103}
104 104
105void 105void
106acpicpu_cstate_attach_print(struct acpicpu_softc *sc) 106acpicpu_cstate_attach_print(struct acpicpu_softc *sc)
107{ 107{
108 struct acpicpu_cstate *cs; 108 struct acpicpu_cstate *cs;
109 const char *str; 109 const char *str;
110 int i; 110 int i;
111 111
112 for (i = 0; i < ACPI_C_STATE_COUNT; i++) { 112 for (i = 0; i < ACPI_C_STATE_COUNT; i++) {
113 113
114 cs = &sc->sc_cstate[i]; 114 cs = &sc->sc_cstate[i];
115 115
116 if (cs->cs_method == 0) 116 if (cs->cs_method == 0)
117 continue; 117 continue;
118 118
119 switch (cs->cs_method) { 119 switch (cs->cs_method) {
120 120
121 case ACPICPU_C_STATE_HALT: 121 case ACPICPU_C_STATE_HALT:
122 str = "HALT"; 122 str = "HALT";
123 break; 123 break;
124 124
125 case ACPICPU_C_STATE_FFH: 125 case ACPICPU_C_STATE_FFH:
126 str = "FFH"; 126 str = "FFH";
127 break; 127 break;
128 128
129 case ACPICPU_C_STATE_SYSIO: 129 case ACPICPU_C_STATE_SYSIO:
130 str = "SYSIO"; 130 str = "SYSIO";
131 break; 131 break;
132 132
133 default: 133 default:
134 panic("NOTREACHED"); 134 panic("NOTREACHED");
135 } 135 }
136 136
137 aprint_verbose_dev(sc->sc_dev, "C%d: %5s, " 137 aprint_debug_dev(sc->sc_dev, "C%d: %5s, "
138 "lat %3u us, pow %5u mW, addr 0x%06x, flags 0x%02x\n", 138 "lat %3u us, pow %5u mW, addr 0x%06x, flags 0x%02x\n",
139 i, str, cs->cs_latency, cs->cs_power, 139 i, str, cs->cs_latency, cs->cs_power,
140 (uint32_t)cs->cs_addr, cs->cs_flags); 140 (uint32_t)cs->cs_addr, cs->cs_flags);
141 } 141 }
142} 142}
143 143
144int 144int
145acpicpu_cstate_detach(device_t self) 145acpicpu_cstate_detach(device_t self)
146{ 146{
147 struct acpicpu_softc *sc = device_private(self); 147 struct acpicpu_softc *sc = device_private(self);
148 static ONCE_DECL(once_detach); 148 static ONCE_DECL(once_detach);
149 int rv; 149 int rv;
150 150
151 rv = RUN_ONCE(&once_detach, acpicpu_md_idle_stop); 151 rv = RUN_ONCE(&once_detach, acpicpu_md_idle_stop);
152 152
153 if (rv != 0) 153 if (rv != 0)
154 return rv; 154 return rv;
155 155
156 sc->sc_flags &= ~ACPICPU_FLAG_C; 156 sc->sc_flags &= ~ACPICPU_FLAG_C;
157 157
158 return 0; 158 return 0;
159} 159}
160 160
161int 161int
162acpicpu_cstate_start(device_t self) 162acpicpu_cstate_start(device_t self)
163{ 163{
164 struct acpicpu_softc *sc = device_private(self); 164 struct acpicpu_softc *sc = device_private(self);
165 static ONCE_DECL(once_start); 165 static ONCE_DECL(once_start);
166 static ONCE_DECL(once_save); 166 static ONCE_DECL(once_save);
167 int rv; 167 int rv;
168 168
169 /* 169 /*
170 * Save the existing idle-mechanism and claim the idle_loop(9). 170 * Save the existing idle-mechanism and claim the idle_loop(9).
171 * This should be called after all ACPI CPUs have been attached. 171 * This should be called after all ACPI CPUs have been attached.
172 */ 172 */
173 rv = RUN_ONCE(&once_save, acpicpu_md_idle_init); 173 rv = RUN_ONCE(&once_save, acpicpu_md_idle_init);
174 174
175 if (rv != 0) 175 if (rv != 0)
176 return rv; 176 return rv;
177 177
178 rv = RUN_ONCE(&once_start, acpicpu_md_idle_start); 178 rv = RUN_ONCE(&once_start, acpicpu_md_idle_start);
179 179
180 if (rv == 0) 180 if (rv == 0)
181 sc->sc_flags |= ACPICPU_FLAG_C; 181 sc->sc_flags |= ACPICPU_FLAG_C;
182 182
183 return rv; 183 return rv;
184} 184}
185 185
186bool 186bool
187acpicpu_cstate_suspend(device_t self) 187acpicpu_cstate_suspend(device_t self)
188{ 188{
189 189
190 return true; 190 return true;
191} 191}
192 192
193bool 193bool
194acpicpu_cstate_resume(device_t self) 194acpicpu_cstate_resume(device_t self)
195{ 195{
196 static const ACPI_OSD_EXEC_CALLBACK func = acpicpu_cstate_callback; 196 static const ACPI_OSD_EXEC_CALLBACK func = acpicpu_cstate_callback;
197 struct acpicpu_softc *sc = device_private(self); 197 struct acpicpu_softc *sc = device_private(self);
198 198
199 if ((sc->sc_flags & ACPICPU_FLAG_C_CST) != 0) 199 if ((sc->sc_flags & ACPICPU_FLAG_C_CST) != 0)
200 (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev); 200 (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
201 201
202 return true; 202 return true;
203} 203}
204 204
205void 205void
206acpicpu_cstate_callback(void *aux) 206acpicpu_cstate_callback(void *aux)
207{ 207{
208 struct acpicpu_softc *sc; 208 struct acpicpu_softc *sc;
209 device_t self = aux; 209 device_t self = aux;
210 210
211 sc = device_private(self); 211 sc = device_private(self);
212 212
213 if ((sc->sc_flags & ACPICPU_FLAG_C_FADT) != 0) { 213 if ((sc->sc_flags & ACPICPU_FLAG_C_FADT) != 0) {
214 KASSERT((sc->sc_flags & ACPICPU_FLAG_C_CST) == 0); 214 KASSERT((sc->sc_flags & ACPICPU_FLAG_C_CST) == 0);
215 return; 215 return;
216 } 216 }
217 217
218 mutex_enter(&sc->sc_mtx); 218 mutex_enter(&sc->sc_mtx);
219 (void)acpicpu_cstate_cst(sc); 219 (void)acpicpu_cstate_cst(sc);
220 mutex_exit(&sc->sc_mtx); 220 mutex_exit(&sc->sc_mtx);
221} 221}
222 222
223static ACPI_STATUS 223static ACPI_STATUS
224acpicpu_cstate_cst(struct acpicpu_softc *sc) 224acpicpu_cstate_cst(struct acpicpu_softc *sc)
225{ 225{
226 ACPI_OBJECT *elm, *obj; 226 ACPI_OBJECT *elm, *obj;
227 ACPI_BUFFER buf; 227 ACPI_BUFFER buf;
228 ACPI_STATUS rv; 228 ACPI_STATUS rv;
229 uint32_t i, n; 229 uint32_t i, n;
230 uint8_t count; 230 uint8_t count;
231 231
232 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_CST", &buf); 232 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_CST", &buf);
233 233
234 if (ACPI_FAILURE(rv)) 234 if (ACPI_FAILURE(rv))
235 return rv; 235 return rv;
236 236
237 obj = buf.Pointer; 237 obj = buf.Pointer;
238 238
239 if (obj->Type != ACPI_TYPE_PACKAGE) { 239 if (obj->Type != ACPI_TYPE_PACKAGE) {
240 rv = AE_TYPE; 240 rv = AE_TYPE;
241 goto out; 241 goto out;
242 } 242 }
243 243
244 if (obj->Package.Count < 2) { 244 if (obj->Package.Count < 2) {
245 rv = AE_LIMIT; 245 rv = AE_LIMIT;
246 goto out; 246 goto out;
247 } 247 }
248 248
249 elm = obj->Package.Elements; 249 elm = obj->Package.Elements;
250 250
251 if (elm[0].Type != ACPI_TYPE_INTEGER) { 251 if (elm[0].Type != ACPI_TYPE_INTEGER) {
252 rv = AE_TYPE; 252 rv = AE_TYPE;
253 goto out; 253 goto out;
254 } 254 }
255 255
256 n = elm[0].Integer.Value; 256 n = elm[0].Integer.Value;
257 257
258 if (n != obj->Package.Count - 1) { 258 if (n != obj->Package.Count - 1) {
259 rv = AE_BAD_VALUE; 259 rv = AE_BAD_VALUE;
260 goto out; 260 goto out;
261 } 261 }
262 262
263 if (n > ACPI_C_STATES_MAX) { 263 if (n > ACPI_C_STATES_MAX) {
264 rv = AE_LIMIT; 264 rv = AE_LIMIT;
265 goto out; 265 goto out;
266 } 266 }
267 267
268 (void)memset(sc->sc_cstate, 0, 268 (void)memset(sc->sc_cstate, 0,
269 sizeof(*sc->sc_cstate) * ACPI_C_STATE_COUNT); 269 sizeof(*sc->sc_cstate) * ACPI_C_STATE_COUNT);
270 270
271 CTASSERT(ACPI_STATE_C0 == 0 && ACPI_STATE_C1 == 1); 271 CTASSERT(ACPI_STATE_C0 == 0 && ACPI_STATE_C1 == 1);
272 CTASSERT(ACPI_STATE_C2 == 2 && ACPI_STATE_C3 == 3); 272 CTASSERT(ACPI_STATE_C2 == 2 && ACPI_STATE_C3 == 3);
273 273
274 for (count = 0, i = 1; i <= n; i++) { 274 for (count = 0, i = 1; i <= n; i++) {
275 275
276 elm = &obj->Package.Elements[i]; 276 elm = &obj->Package.Elements[i];
277 rv = acpicpu_cstate_cst_add(sc, elm); 277 rv = acpicpu_cstate_cst_add(sc, elm);
278 278
279 if (ACPI_SUCCESS(rv)) 279 if (ACPI_SUCCESS(rv))
280 count++; 280 count++;
281 } 281 }
282 282
283 rv = (count != 0) ? AE_OK : AE_NOT_EXIST; 283 rv = (count != 0) ? AE_OK : AE_NOT_EXIST;
284 284
285out: 285out:
286 if (buf.Pointer != NULL) 286 if (buf.Pointer != NULL)
287 ACPI_FREE(buf.Pointer); 287 ACPI_FREE(buf.Pointer);
288 288
289 if (ACPI_FAILURE(rv)) 
290 aprint_error_dev(sc->sc_dev, "failed to evaluate " 
291 "_CST: %s\n", AcpiFormatException(rv)); 
292 
293 return rv; 289 return rv;
294} 290}
295 291
296static ACPI_STATUS 292static ACPI_STATUS
297acpicpu_cstate_cst_add(struct acpicpu_softc *sc, ACPI_OBJECT *elm) 293acpicpu_cstate_cst_add(struct acpicpu_softc *sc, ACPI_OBJECT *elm)
298{ 294{
299 const struct acpicpu_object *ao = &sc->sc_object; 295 const struct acpicpu_object *ao = &sc->sc_object;
300 struct acpicpu_cstate *cs = sc->sc_cstate; 296 struct acpicpu_cstate *cs = sc->sc_cstate;
301 struct acpicpu_cstate state; 297 struct acpicpu_cstate state;
302 struct acpicpu_reg *reg; 298 struct acpicpu_reg *reg;
303 ACPI_STATUS rv = AE_OK; 299 ACPI_STATUS rv = AE_OK;
304 ACPI_OBJECT *obj; 300 ACPI_OBJECT *obj;
305 uint32_t type; 301 uint32_t type;
306 302
307 (void)memset(&state, 0, sizeof(*cs)); 303 (void)memset(&state, 0, sizeof(*cs));
308 304
309 state.cs_flags = ACPICPU_FLAG_C_BM_STS; 305 state.cs_flags = ACPICPU_FLAG_C_BM_STS;
310 306
311 if (elm->Type != ACPI_TYPE_PACKAGE) { 307 if (elm->Type != ACPI_TYPE_PACKAGE) {
312 rv = AE_TYPE; 308 rv = AE_TYPE;
313 goto out; 309 goto out;
314 } 310 }
315 311
316 if (elm->Package.Count != 4) { 312 if (elm->Package.Count != 4) {
317 rv = AE_LIMIT; 313 rv = AE_LIMIT;
318 goto out; 314 goto out;
319 } 315 }
320 316
321 /* 317 /*
322 * Type. 318 * Type.
323 */ 319 */
324 obj = &elm->Package.Elements[1]; 320 obj = &elm->Package.Elements[1];
325 321
326 if (obj->Type != ACPI_TYPE_INTEGER) { 322 if (obj->Type != ACPI_TYPE_INTEGER) {
327 rv = AE_TYPE; 323 rv = AE_TYPE;
328 goto out; 324 goto out;
329 } 325 }
330 326
331 type = obj->Integer.Value; 327 type = obj->Integer.Value;
332 328
333 if (type < ACPI_STATE_C1 || type > ACPI_STATE_C3) { 329 if (type < ACPI_STATE_C1 || type > ACPI_STATE_C3) {
334 rv = AE_TYPE; 330 rv = AE_TYPE;
335 goto out; 331 goto out;
336 } 332 }
337 333
338 /* 334 /*
339 * Latency. 335 * Latency.
340 */ 336 */
341 obj = &elm->Package.Elements[2]; 337 obj = &elm->Package.Elements[2];
342 338
343 if (obj->Type != ACPI_TYPE_INTEGER) { 339 if (obj->Type != ACPI_TYPE_INTEGER) {
344 rv = AE_TYPE; 340 rv = AE_TYPE;
345 goto out; 341 goto out;
346 } 342 }
347 343
348 state.cs_latency = obj->Integer.Value; 344 state.cs_latency = obj->Integer.Value;
349 345
350 /* 346 /*
351 * Power. 347 * Power.
352 */ 348 */
353 obj = &elm->Package.Elements[3]; 349 obj = &elm->Package.Elements[3];
354 350
355 if (obj->Type != ACPI_TYPE_INTEGER) { 351 if (obj->Type != ACPI_TYPE_INTEGER) {
356 rv = AE_TYPE; 352 rv = AE_TYPE;
357 goto out; 353 goto out;
358 } 354 }
359 355
360 state.cs_power = obj->Integer.Value; 356 state.cs_power = obj->Integer.Value;
361 357
362 /* 358 /*
363 * Register. 359 * Register.
364 */ 360 */
365 obj = &elm->Package.Elements[0]; 361 obj = &elm->Package.Elements[0];
366 362
367 if (obj->Type != ACPI_TYPE_BUFFER) { 363 if (obj->Type != ACPI_TYPE_BUFFER) {
368 rv = AE_TYPE; 364 rv = AE_TYPE;
369 goto out; 365 goto out;
370 } 366 }
371 367
372 CTASSERT(sizeof(struct acpicpu_reg) == 15); 368 CTASSERT(sizeof(struct acpicpu_reg) == 15);
373 369
374 if (obj->Buffer.Length < sizeof(struct acpicpu_reg)) { 370 if (obj->Buffer.Length < sizeof(struct acpicpu_reg)) {
375 rv = AE_LIMIT; 371 rv = AE_LIMIT;
376 goto out; 372 goto out;
377 } 373 }
378 374
379 reg = (struct acpicpu_reg *)obj->Buffer.Pointer; 375 reg = (struct acpicpu_reg *)obj->Buffer.Pointer;
380 376
381 switch (reg->reg_spaceid) { 377 switch (reg->reg_spaceid) {
382 378
383 case ACPI_ADR_SPACE_SYSTEM_IO: 379 case ACPI_ADR_SPACE_SYSTEM_IO:
384 state.cs_method = ACPICPU_C_STATE_SYSIO; 380 state.cs_method = ACPICPU_C_STATE_SYSIO;
385 381
386 if (reg->reg_addr == 0) { 382 if (reg->reg_addr == 0) {
387 rv = AE_AML_ILLEGAL_ADDRESS; 383 rv = AE_AML_ILLEGAL_ADDRESS;
388 goto out; 384 goto out;
389 } 385 }
390 386
391 if (reg->reg_bitwidth != 8) { 387 if (reg->reg_bitwidth != 8) {
392 rv = AE_AML_BAD_RESOURCE_LENGTH; 388 rv = AE_AML_BAD_RESOURCE_LENGTH;
393 goto out; 389 goto out;
394 } 390 }
395 391
396 /* 392 /*
397 * Check only that the address is in the mapped space. 393 * Check only that the address is in the mapped space.
398 * Systems are allowed to change it when operating 394 * Systems are allowed to change it when operating
399 * with _CST (see ACPI 4.0, pp. 94-95). For instance, 395 * with _CST (see ACPI 4.0, pp. 94-95). For instance,
400 * the offset of P_LVL3 may change depending on whether 396 * the offset of P_LVL3 may change depending on whether
401 * acpiacad(4) is connected or disconnected. 397 * acpiacad(4) is connected or disconnected.
402 */ 398 */
403 if (reg->reg_addr > ao->ao_pblkaddr + ao->ao_pblklen) { 399 if (reg->reg_addr > ao->ao_pblkaddr + ao->ao_pblklen) {
404 rv = AE_BAD_ADDRESS; 400 rv = AE_BAD_ADDRESS;
405 goto out; 401 goto out;
406 } 402 }
407 403
408 state.cs_addr = reg->reg_addr; 404 state.cs_addr = reg->reg_addr;
409 break; 405 break;
410 406
411 case ACPI_ADR_SPACE_FIXED_HARDWARE: 407 case ACPI_ADR_SPACE_FIXED_HARDWARE:
412 state.cs_method = ACPICPU_C_STATE_FFH; 408 state.cs_method = ACPICPU_C_STATE_FFH;
413 409
414 switch (type) { 410 switch (type) {
415 411
416 case ACPI_STATE_C1: 412 case ACPI_STATE_C1:
417 413
418 if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0) 414 if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0)
419 state.cs_method = ACPICPU_C_STATE_HALT; 415 state.cs_method = ACPICPU_C_STATE_HALT;
420 416
421 break; 417 break;
422 418
423 default: 419 default:
424 420
425 if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0) { 421 if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0) {
426 rv = AE_AML_BAD_RESOURCE_VALUE; 422 rv = AE_SUPPORT;
427 goto out; 423 goto out;
428 } 424 }
429 } 425 }
430 426
431 if (sc->sc_cap != 0) { 427 if (sc->sc_cap != 0) {
432 428
433 /* 429 /*
434 * The _CST FFH GAS encoding may contain 430 * The _CST FFH GAS encoding may contain
435 * additional hints on Intel processors. 431 * additional hints on Intel processors.
436 * Use these to determine whether we can 432 * Use these to determine whether we can
437 * avoid the bus master activity check. 433 * avoid the bus master activity check.
438 */ 434 */
439 if ((reg->reg_accesssize & ACPICPU_PDC_GAS_BM) == 0) 435 if ((reg->reg_accesssize & ACPICPU_PDC_GAS_BM) == 0)
440 state.cs_flags &= ~ACPICPU_FLAG_C_BM_STS; 436 state.cs_flags &= ~ACPICPU_FLAG_C_BM_STS;
441 } 437 }
442 438
443 break; 439 break;
444 440
445 default: 441 default:
446 rv = AE_AML_INVALID_SPACE_ID; 442 rv = AE_AML_INVALID_SPACE_ID;
447 goto out; 443 goto out;
448 } 444 }
449 445
450 if (cs[type].cs_method != 0) { 446 if (cs[type].cs_method != 0) {
451 rv = AE_ALREADY_EXISTS; 447 rv = AE_ALREADY_EXISTS;
452 goto out; 448 goto out;
453 } 449 }
454 450
455 cs[type].cs_addr = state.cs_addr; 451 cs[type].cs_addr = state.cs_addr;
456 cs[type].cs_power = state.cs_power; 452 cs[type].cs_power = state.cs_power;
457 cs[type].cs_flags = state.cs_flags; 453 cs[type].cs_flags = state.cs_flags;
458 cs[type].cs_method = state.cs_method; 454 cs[type].cs_method = state.cs_method;
459 cs[type].cs_latency = state.cs_latency; 455 cs[type].cs_latency = state.cs_latency;
460 456
461out: 457out:
462 if (ACPI_FAILURE(rv)) 458 if (ACPI_FAILURE(rv))
463 aprint_verbose_dev(sc->sc_dev, 459 aprint_debug_dev(sc->sc_dev, "invalid "
464 "invalid _CST: %s\n", AcpiFormatException(rv)); 460 "_CST: %s\n", AcpiFormatException(rv));
465 461
466 return rv; 462 return rv;
467} 463}
468 464
469static void 465static void
470acpicpu_cstate_cst_bios(void) 466acpicpu_cstate_cst_bios(void)
471{ 467{
472 const uint8_t val = AcpiGbl_FADT.CstControl; 468 const uint8_t val = AcpiGbl_FADT.CstControl;
473 const uint32_t addr = AcpiGbl_FADT.SmiCommand; 469 const uint32_t addr = AcpiGbl_FADT.SmiCommand;
474 470
475 if (addr == 0) 471 if (addr == 0)
476 return; 472 return;
477 473
478 (void)AcpiOsWritePort(addr, val, 8); 474 (void)AcpiOsWritePort(addr, val, 8);
479} 475}
480 476
481static void 477static void
482acpicpu_cstate_fadt(struct acpicpu_softc *sc) 478acpicpu_cstate_fadt(struct acpicpu_softc *sc)
483{ 479{
484 struct acpicpu_cstate *cs = sc->sc_cstate; 480 struct acpicpu_cstate *cs = sc->sc_cstate;
485 481
486 (void)memset(cs, 0, sizeof(*cs) * ACPI_C_STATE_COUNT); 482 (void)memset(cs, 0, sizeof(*cs) * ACPI_C_STATE_COUNT);
487 483
488 /* 484 /*
489 * All x86 processors should support C1 (a.k.a. HALT). 485 * All x86 processors should support C1 (a.k.a. HALT).
490 */ 486 */
491 if ((AcpiGbl_FADT.Flags & ACPI_FADT_C1_SUPPORTED) != 0) 487 if ((AcpiGbl_FADT.Flags & ACPI_FADT_C1_SUPPORTED) != 0)
492 cs[ACPI_STATE_C1].cs_method = ACPICPU_C_STATE_HALT; 488 cs[ACPI_STATE_C1].cs_method = ACPICPU_C_STATE_HALT;
493 489
494 if ((acpicpu_md_cpus_running() > 1) && 490 if ((acpicpu_md_cpus_running() > 1) &&
495 (AcpiGbl_FADT.Flags & ACPI_FADT_C2_MP_SUPPORTED) == 0) 491 (AcpiGbl_FADT.Flags & ACPI_FADT_C2_MP_SUPPORTED) == 0)
496 return; 492 return;
497 493
498 cs[ACPI_STATE_C2].cs_method = ACPICPU_C_STATE_SYSIO; 494 cs[ACPI_STATE_C2].cs_method = ACPICPU_C_STATE_SYSIO;
499 cs[ACPI_STATE_C3].cs_method = ACPICPU_C_STATE_SYSIO; 495 cs[ACPI_STATE_C3].cs_method = ACPICPU_C_STATE_SYSIO;
500 496
501 cs[ACPI_STATE_C2].cs_latency = AcpiGbl_FADT.C2Latency; 497 cs[ACPI_STATE_C2].cs_latency = AcpiGbl_FADT.C2Latency;
502 cs[ACPI_STATE_C3].cs_latency = AcpiGbl_FADT.C3Latency; 498 cs[ACPI_STATE_C3].cs_latency = AcpiGbl_FADT.C3Latency;
503 499
504 cs[ACPI_STATE_C2].cs_addr = sc->sc_object.ao_pblkaddr + 4; 500 cs[ACPI_STATE_C2].cs_addr = sc->sc_object.ao_pblkaddr + 4;
505 cs[ACPI_STATE_C3].cs_addr = sc->sc_object.ao_pblkaddr + 5; 501 cs[ACPI_STATE_C3].cs_addr = sc->sc_object.ao_pblkaddr + 5;
506 502
507 /* 503 /*
508 * The P_BLK length should always be 6. If it 504 * The P_BLK length should always be 6. If it
509 * is not, reduce functionality accordingly. 505 * is not, reduce functionality accordingly.
510 * Sanity check also FADT's latency levels. 506 * Sanity check also FADT's latency levels.
511 */ 507 */
512 if (sc->sc_object.ao_pblklen < 5) 508 if (sc->sc_object.ao_pblklen < 5)
513 cs[ACPI_STATE_C2].cs_method = 0; 509 cs[ACPI_STATE_C2].cs_method = 0;
514 510
515 if (sc->sc_object.ao_pblklen < 6) 511 if (sc->sc_object.ao_pblklen < 6)
516 cs[ACPI_STATE_C3].cs_method = 0; 512 cs[ACPI_STATE_C3].cs_method = 0;
517 513
518 CTASSERT(ACPICPU_C_C2_LATENCY_MAX == 100); 514 CTASSERT(ACPICPU_C_C2_LATENCY_MAX == 100);
519 CTASSERT(ACPICPU_C_C3_LATENCY_MAX == 1000); 515 CTASSERT(ACPICPU_C_C3_LATENCY_MAX == 1000);
520 516
521 if (AcpiGbl_FADT.C2Latency > ACPICPU_C_C2_LATENCY_MAX) 517 if (AcpiGbl_FADT.C2Latency > ACPICPU_C_C2_LATENCY_MAX)
522 cs[ACPI_STATE_C2].cs_method = 0; 518 cs[ACPI_STATE_C2].cs_method = 0;
523 519
524 if (AcpiGbl_FADT.C3Latency > ACPICPU_C_C3_LATENCY_MAX) 520 if (AcpiGbl_FADT.C3Latency > ACPICPU_C_C3_LATENCY_MAX)
525 cs[ACPI_STATE_C3].cs_method = 0; 521 cs[ACPI_STATE_C3].cs_method = 0;
526} 522}
527 523
528static void 524static void
529acpicpu_cstate_quirks(struct acpicpu_softc *sc) 525acpicpu_cstate_quirks(struct acpicpu_softc *sc)
530{ 526{
531 const uint32_t reg = AcpiGbl_FADT.Pm2ControlBlock; 527 const uint32_t reg = AcpiGbl_FADT.Pm2ControlBlock;
532 const uint32_t len = AcpiGbl_FADT.Pm2ControlLength; 528 const uint32_t len = AcpiGbl_FADT.Pm2ControlLength;
533 struct pci_attach_args pa; 529 struct pci_attach_args pa;
534 530
535 /* 531 /*
536 * Check bus master arbitration. If ARB_DIS 532 * Check bus master arbitration. If ARB_DIS
537 * is not available, processor caches must be 533 * is not available, processor caches must be
538 * flushed before C3 (ACPI 4.0, section 8.2). 534 * flushed before C3 (ACPI 4.0, section 8.2).
539 */ 535 */
540 if (reg != 0 && len != 0) 536 if (reg != 0 && len != 0)
541 sc->sc_flags |= ACPICPU_FLAG_C_ARB; 537 sc->sc_flags |= ACPICPU_FLAG_C_ARB;
542 else { 538 else {
543 /* 539 /*
544 * Disable C3 entirely if WBINVD is not present. 540 * Disable C3 entirely if WBINVD is not present.
545 */ 541 */
546 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) == 0) 542 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) == 0)
547 sc->sc_flags |= ACPICPU_FLAG_C_NOC3; 543 sc->sc_flags |= ACPICPU_FLAG_C_NOC3;
548 else { 544 else {
549 /* 545 /*
550 * If WBINVD is present and functioning properly, 546 * If WBINVD is present and functioning properly,
551 * flush all processor caches before entering C3. 547 * flush all processor caches before entering C3.
552 */ 548 */
553 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) 549 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0)
554 sc->sc_flags &= ~ACPICPU_FLAG_C_BM; 550 sc->sc_flags &= ~ACPICPU_FLAG_C_BM;
555 else 551 else
556 sc->sc_flags |= ACPICPU_FLAG_C_NOC3; 552 sc->sc_flags |= ACPICPU_FLAG_C_NOC3;
557 } 553 }
558 } 554 }
559 555
560 /* 556 /*
561 * There are several erratums for PIIX4. 557 * There are several erratums for PIIX4.
562 */ 558 */
563 if (pci_find_device(&pa, acpicpu_cstate_quirks_piix4) != 0) 559 if (pci_find_device(&pa, acpicpu_cstate_quirks_piix4) != 0)
564 sc->sc_flags |= ACPICPU_FLAG_C_NOC3; 560 sc->sc_flags |= ACPICPU_FLAG_C_NOC3;
565 561
566 if ((sc->sc_flags & ACPICPU_FLAG_C_NOC3) != 0) 562 if ((sc->sc_flags & ACPICPU_FLAG_C_NOC3) != 0)
567 sc->sc_cstate[ACPI_STATE_C3].cs_method = 0; 563 sc->sc_cstate[ACPI_STATE_C3].cs_method = 0;
568} 564}
569 565
570static int 566static int
571acpicpu_cstate_quirks_piix4(struct pci_attach_args *pa) 567acpicpu_cstate_quirks_piix4(struct pci_attach_args *pa)
572{ 568{
573 569
574 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL) 570 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
575 return 0; 571 return 0;
576 572
577 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82371AB_ISA || 573 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82371AB_ISA ||
578 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82440MX_PMC) 574 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82440MX_PMC)
579 return 1; 575 return 1;
580 576
581 return 0; 577 return 0;
582} 578}
583 579
584static int 580static int
585acpicpu_cstate_latency(struct acpicpu_softc *sc) 581acpicpu_cstate_latency(struct acpicpu_softc *sc)
586{ 582{
587 static const uint32_t cs_factor = 3; 583 static const uint32_t cs_factor = 3;
588 struct acpicpu_cstate *cs; 584 struct acpicpu_cstate *cs;
589 int i; 585 int i;
590 586
591 for (i = cs_state_max; i > 0; i--) { 587 for (i = cs_state_max; i > 0; i--) {
592 588
593 cs = &sc->sc_cstate[i]; 589 cs = &sc->sc_cstate[i];
594 590
595 if (__predict_false(cs->cs_method == 0)) 591 if (__predict_false(cs->cs_method == 0))
596 continue; 592 continue;
597 593
598 /* 594 /*
599 * Choose a state if we have previously slept 595 * Choose a state if we have previously slept
600 * longer than the worst case latency of the 596 * longer than the worst case latency of the
601 * state times an arbitrary multiplier. 597 * state times an arbitrary multiplier.
602 */ 598 */
603 if (sc->sc_cstate_sleep > cs->cs_latency * cs_factor) 599 if (sc->sc_cstate_sleep > cs->cs_latency * cs_factor)
604 return i; 600 return i;
605 } 601 }
606 602
607 return ACPI_STATE_C1; 603 return ACPI_STATE_C1;
608} 604}
609 605
610/* 606/*
611 * The main idle loop. 607 * The main idle loop.
612 */ 608 */
613void 609void
614acpicpu_cstate_idle(void) 610acpicpu_cstate_idle(void)
615{ 611{
616 struct cpu_info *ci = curcpu(); 612 struct cpu_info *ci = curcpu();
617 struct acpicpu_softc *sc; 613 struct acpicpu_softc *sc;
618 int state; 614 int state;
619 615
620 if (__predict_false(ci->ci_want_resched) != 0) 616 if (__predict_false(ci->ci_want_resched) != 0)
621 return; 617 return;
622 618
623 acpi_md_OsDisableInterrupt(); 619 acpi_md_OsDisableInterrupt();
624 620
625 KASSERT(acpicpu_sc != NULL); 621 KASSERT(acpicpu_sc != NULL);
626 KASSERT(ci->ci_acpiid < maxcpus); 622 KASSERT(ci->ci_acpiid < maxcpus);
627 KASSERT(ci->ci_ilevel == IPL_NONE); 623 KASSERT(ci->ci_ilevel == IPL_NONE);
628 624
629 sc = acpicpu_sc[ci->ci_acpiid]; 625 sc = acpicpu_sc[ci->ci_acpiid];
630 626
631 if (__predict_false(sc == NULL)) 627 if (__predict_false(sc == NULL))
632 goto halt; 628 goto halt;
633 629
634 if (__predict_false(sc->sc_cold != false)) 630 if (__predict_false(sc->sc_cold != false))
635 goto halt; 631 goto halt;
636 632
637 if (__predict_false((sc->sc_flags & ACPICPU_FLAG_C) == 0)) 633 if (__predict_false((sc->sc_flags & ACPICPU_FLAG_C) == 0))
638 goto halt; 634 goto halt;
639 635
640 if (__predict_false(mutex_tryenter(&sc->sc_mtx) == 0)) 636 if (__predict_false(mutex_tryenter(&sc->sc_mtx) == 0))
641 goto halt; 637 goto halt;
642 638
643 mutex_exit(&sc->sc_mtx); 639 mutex_exit(&sc->sc_mtx);
644 state = acpicpu_cstate_latency(sc); 640 state = acpicpu_cstate_latency(sc);
645 641
646 /* 642 /*
647 * Check for bus master activity. Note that particularly usb(4) 643 * Check for bus master activity. Note that particularly usb(4)
648 * causes high activity, which may prevent the use of C3 states. 644 * causes high activity, which may prevent the use of C3 states.
649 */ 645 */
650 if ((sc->sc_cstate[state].cs_flags & ACPICPU_FLAG_C_BM_STS) != 0) { 646 if ((sc->sc_cstate[state].cs_flags & ACPICPU_FLAG_C_BM_STS) != 0) {
651 647
652 if (acpicpu_cstate_bm_check() != false) 648 if (acpicpu_cstate_bm_check() != false)
653 state--; 649 state--;
654 650
655 if (__predict_false(sc->sc_cstate[state].cs_method == 0)) 651 if (__predict_false(sc->sc_cstate[state].cs_method == 0))
656 state = ACPI_STATE_C1; 652 state = ACPI_STATE_C1;
657 } 653 }
658 654
659 KASSERT(state != ACPI_STATE_C0); 655 KASSERT(state != ACPI_STATE_C0);
660 656
661 if (state != ACPI_STATE_C3) { 657 if (state != ACPI_STATE_C3) {
662 acpicpu_cstate_idle_enter(sc, state); 658 acpicpu_cstate_idle_enter(sc, state);
663 return; 659 return;
664 } 660 }
665 661
666 /* 662 /*
667 * On all recent (Intel) CPUs caches are shared 663 * On all recent (Intel) CPUs caches are shared
668 * by CPUs and bus master control is required to 664 * by CPUs and bus master control is required to
669 * keep these coherent while in C3. Flushing the 665 * keep these coherent while in C3. Flushing the
670 * CPU caches is only the last resort. 666 * CPU caches is only the last resort.
671 */ 667 */
672 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) == 0) 668 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) == 0)
673 ACPI_FLUSH_CPU_CACHE(); 669 ACPI_FLUSH_CPU_CACHE();
674 670
675 /* 671 /*
676 * Allow the bus master to request that any given 672 * Allow the bus master to request that any given
677 * CPU should return immediately to C0 from C3. 673 * CPU should return immediately to C0 from C3.
678 */ 674 */
679 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0) 675 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0)
680 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1); 676 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
681 677
682 /* 678 /*
683 * It may be necessary to disable bus master arbitration 679 * It may be necessary to disable bus master arbitration
684 * to ensure that bus master cycles do not occur while 680 * to ensure that bus master cycles do not occur while
685 * sleeping in C3 (see ACPI 4.0, section 8.1.4). 681 * sleeping in C3 (see ACPI 4.0, section 8.1.4).
686 */ 682 */
687 if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0) 683 if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0)
688 (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1); 684 (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
689 685
690 acpicpu_cstate_idle_enter(sc, state); 686 acpicpu_cstate_idle_enter(sc, state);
691 687
692 /* 688 /*
693 * Disable bus master wake and re-enable the arbiter. 689 * Disable bus master wake and re-enable the arbiter.
694 */ 690 */
695 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0) 691 if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0)
696 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0); 692 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
697 693
698 if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0) 694 if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0)
699 (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0); 695 (void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
700 696
701 return; 697 return;
702 698
703halt: 699halt:
704 acpicpu_md_idle_enter(ACPICPU_C_STATE_HALT, ACPI_STATE_C1); 700 acpicpu_md_idle_enter(ACPICPU_C_STATE_HALT, ACPI_STATE_C1);
705} 701}
706 702
707static void 703static void
708acpicpu_cstate_idle_enter(struct acpicpu_softc *sc, int state) 704acpicpu_cstate_idle_enter(struct acpicpu_softc *sc, int state)
709{ 705{
710 struct acpicpu_cstate *cs = &sc->sc_cstate[state]; 706 struct acpicpu_cstate *cs = &sc->sc_cstate[state];
711 uint32_t end, start, val; 707 uint32_t end, start, val;
712 708
713 start = acpitimer_read_safe(NULL); 709 start = acpitimer_read_safe(NULL);
714 710
715 switch (cs->cs_method) { 711 switch (cs->cs_method) {
716 712
717 case ACPICPU_C_STATE_FFH: 713 case ACPICPU_C_STATE_FFH:
718 case ACPICPU_C_STATE_HALT: 714 case ACPICPU_C_STATE_HALT:
719 acpicpu_md_idle_enter(cs->cs_method, state); 715 acpicpu_md_idle_enter(cs->cs_method, state);
720 break; 716 break;
721 717
722 case ACPICPU_C_STATE_SYSIO: 718 case ACPICPU_C_STATE_SYSIO:
723 (void)AcpiOsReadPort(cs->cs_addr, &val, 8); 719 (void)AcpiOsReadPort(cs->cs_addr, &val, 8);
724 break; 720 break;
725 721
726 default: 722 default:
727 acpicpu_md_idle_enter(ACPICPU_C_STATE_HALT, ACPI_STATE_C1); 723 acpicpu_md_idle_enter(ACPICPU_C_STATE_HALT, ACPI_STATE_C1);
728 break; 724 break;
729 } 725 }
730 726
731 cs->cs_stat++; 727 cs->cs_stat++;
732 728
733 end = acpitimer_read_safe(NULL); 729 end = acpitimer_read_safe(NULL);
734 sc->sc_cstate_sleep = hztoms(acpitimer_delta(end, start)) * 1000; 730 sc->sc_cstate_sleep = hztoms(acpitimer_delta(end, start)) * 1000;
735 731
736 acpi_md_OsEnableInterrupt(); 732 acpi_md_OsEnableInterrupt();
737} 733}
738 734
739static bool 735static bool
740acpicpu_cstate_bm_check(void) 736acpicpu_cstate_bm_check(void)
741{ 737{
742 uint32_t val = 0; 738 uint32_t val = 0;
743 ACPI_STATUS rv; 739 ACPI_STATUS rv;
744 740
745 rv = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &val); 741 rv = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &val);
746 742
747 if (ACPI_FAILURE(rv) || val == 0) 743 if (ACPI_FAILURE(rv) || val == 0)
748 return false; 744 return false;
749 745
750 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1); 746 (void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
751 747
752 return true; 748 return true;
753} 749}

cvs diff -r1.3 -r1.4 src/sys/dev/acpi/acpi_cpu_pstate.c (switch to unified diff)

--- src/sys/dev/acpi/acpi_cpu_pstate.c 2010/08/08 18:10:34 1.3
+++ src/sys/dev/acpi/acpi_cpu_pstate.c 2010/08/08 18:25:06 1.4
@@ -1,661 +1,661 @@ @@ -1,661 +1,661 @@
1/* $NetBSD: acpi_cpu_pstate.c,v 1.3 2010/08/08 18:10:34 jruoho Exp $ */ 1/* $NetBSD: acpi_cpu_pstate.c,v 1.4 2010/08/08 18:25:06 jruoho Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2010 Jukka Ruohonen <jruohonen@iki.fi> 4 * Copyright (c) 2010 Jukka Ruohonen <jruohonen@iki.fi>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 10 *
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution. 15 * documentation and/or other materials provided with the distribution.
16 * 16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE. 27 * SUCH DAMAGE.
28 */ 28 */
29#include <sys/cdefs.h> 29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_pstate.c,v 1.3 2010/08/08 18:10:34 jruoho Exp $"); 30__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_pstate.c,v 1.4 2010/08/08 18:25:06 jruoho Exp $");
31 31
32#include <sys/param.h> 32#include <sys/param.h>
33#include <sys/kmem.h> 33#include <sys/kmem.h>
34#include <sys/once.h> 34#include <sys/once.h>
35 35
36#include <dev/acpi/acpireg.h> 36#include <dev/acpi/acpireg.h>
37#include <dev/acpi/acpivar.h> 37#include <dev/acpi/acpivar.h>
38#include <dev/acpi/acpi_cpu.h> 38#include <dev/acpi/acpi_cpu.h>
39 39
40#define _COMPONENT ACPI_BUS_COMPONENT 40#define _COMPONENT ACPI_BUS_COMPONENT
41ACPI_MODULE_NAME ("acpi_cpu_pstate") 41ACPI_MODULE_NAME ("acpi_cpu_pstate")
42 42
43static void acpicpu_pstate_attach_print(struct acpicpu_softc *); 43static void acpicpu_pstate_attach_print(struct acpicpu_softc *);
44static ACPI_STATUS acpicpu_pstate_pss(struct acpicpu_softc *sc); 44static ACPI_STATUS acpicpu_pstate_pss(struct acpicpu_softc *sc);
45static ACPI_STATUS acpicpu_pstate_pss_add(struct acpicpu_pstate *, 45static ACPI_STATUS acpicpu_pstate_pss_add(struct acpicpu_pstate *,
46 ACPI_OBJECT *); 46 ACPI_OBJECT *);
47static ACPI_STATUS acpicpu_pstate_pct(struct acpicpu_softc *); 47static ACPI_STATUS acpicpu_pstate_pct(struct acpicpu_softc *);
48static int acpicpu_pstate_max(struct acpicpu_softc *); 48static int acpicpu_pstate_max(struct acpicpu_softc *);
49static void acpicpu_pstate_change(struct acpicpu_softc *); 49static void acpicpu_pstate_change(struct acpicpu_softc *);
50static void acpicpu_pstate_bios(void); 50static void acpicpu_pstate_bios(void);
51 51
52void 52void
53acpicpu_pstate_attach(device_t self) 53acpicpu_pstate_attach(device_t self)
54{ 54{
55 struct acpicpu_softc *sc = device_private(self); 55 struct acpicpu_softc *sc = device_private(self);
56 const char *str; 56 const char *str;
57 ACPI_STATUS rv; 57 ACPI_STATUS rv;
58 58
59 rv = acpicpu_pstate_pss(sc); 59 rv = acpicpu_pstate_pss(sc);
60 60
61 if (ACPI_FAILURE(rv)) { 61 if (ACPI_FAILURE(rv)) {
62 str = "_PSS"; 62 str = "_PSS";
63 goto fail; 63 goto fail;
64 } 64 }
65 65
66 rv = acpicpu_pstate_pct(sc); 66 rv = acpicpu_pstate_pct(sc);
67 67
68 if (rv == AE_SUPPORT) { 68 if (rv == AE_SUPPORT) {
69 aprint_error_dev(sc->sc_dev, "CPU not supported\n"); 69 aprint_error_dev(sc->sc_dev, "CPU not supported\n");
70 return; 70 return;
71 } 71 }
72 72
73 if (ACPI_FAILURE(rv)) { 73 if (ACPI_FAILURE(rv)) {
74 str = "_PCT"; 74 str = "_PCT";
75 goto fail; 75 goto fail;
76 } 76 }
77 77
78 rv = acpicpu_pstate_max(sc); 78 rv = acpicpu_pstate_max(sc);
79 79
80 if (rv == 0) 80 if (rv == 0)
81 sc->sc_flags |= ACPICPU_FLAG_P_PPC; 81 sc->sc_flags |= ACPICPU_FLAG_P_PPC;
82 82
83 sc->sc_flags |= ACPICPU_FLAG_P; 83 sc->sc_flags |= ACPICPU_FLAG_P;
84 84
85 acpicpu_pstate_bios(); 85 acpicpu_pstate_bios();
86 acpicpu_pstate_attach_print(sc); 86 acpicpu_pstate_attach_print(sc);
87 87
88 return; 88 return;
89 89
90fail: 90fail:
91 aprint_error_dev(sc->sc_dev, "failed to evaluate " 91 aprint_error_dev(sc->sc_dev, "failed to evaluate "
92 "%s: %s\n", str, AcpiFormatException(rv)); 92 "%s: %s\n", str, AcpiFormatException(rv));
93} 93}
94 94
95static void 95static void
96acpicpu_pstate_attach_print(struct acpicpu_softc *sc) 96acpicpu_pstate_attach_print(struct acpicpu_softc *sc)
97{ 97{
98 const uint8_t method = sc->sc_pstate_control.reg_spaceid; 98 const uint8_t method = sc->sc_pstate_control.reg_spaceid;
99 struct acpicpu_pstate *ps; 99 struct acpicpu_pstate *ps;
100 const char *str; 100 const char *str;
101 uint32_t i; 101 uint32_t i;
102 102
103 str = (method != ACPI_ADR_SPACE_SYSTEM_IO) ? "FFH" : "SYSIO"; 103 str = (method != ACPI_ADR_SPACE_SYSTEM_IO) ? "FFH" : "SYSIO";
104 104
105 for (i = 0; i < sc->sc_pstate_count; i++) { 105 for (i = 0; i < sc->sc_pstate_count; i++) {
106 106
107 ps = &sc->sc_pstate[i]; 107 ps = &sc->sc_pstate[i];
108 108
109 if (ps->ps_freq == 0) 109 if (ps->ps_freq == 0)
110 continue; 110 continue;
111 111
112 aprint_debug_dev(sc->sc_dev, "P%d: %5s, " 112 aprint_debug_dev(sc->sc_dev, "P%d: %5s, "
113 "lat %3u us, pow %5u mW, %4u MHz\n", 113 "lat %3u us, pow %5u mW, %4u MHz\n",
114 i, str, ps->ps_latency, ps->ps_power, ps->ps_freq); 114 i, str, ps->ps_latency, ps->ps_power, ps->ps_freq);
115 } 115 }
116} 116}
117 117
118int 118int
119acpicpu_pstate_detach(device_t self) 119acpicpu_pstate_detach(device_t self)
120{ 120{
121 struct acpicpu_softc *sc = device_private(self); 121 struct acpicpu_softc *sc = device_private(self);
122 static ONCE_DECL(once_detach); 122 static ONCE_DECL(once_detach);
123 size_t size; 123 size_t size;
124 int rv; 124 int rv;
125 125
126 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0) 126 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
127 return 0; 127 return 0;
128 128
129 rv = RUN_ONCE(&once_detach, acpicpu_md_pstate_stop); 129 rv = RUN_ONCE(&once_detach, acpicpu_md_pstate_stop);
130 130
131 if (rv != 0) 131 if (rv != 0)
132 return rv; 132 return rv;
133 133
134 size = sc->sc_pstate_count * sizeof(*sc->sc_pstate); 134 size = sc->sc_pstate_count * sizeof(*sc->sc_pstate);
135 135
136 if (sc->sc_pstate != NULL) 136 if (sc->sc_pstate != NULL)
137 kmem_free(sc->sc_pstate, size); 137 kmem_free(sc->sc_pstate, size);
138 138
139 sc->sc_flags &= ~ACPICPU_FLAG_P; 139 sc->sc_flags &= ~ACPICPU_FLAG_P;
140 140
141 return 0; 141 return 0;
142} 142}
143 143
144int 144int
145acpicpu_pstate_start(device_t self) 145acpicpu_pstate_start(device_t self)
146{ 146{
147 struct acpicpu_softc *sc = device_private(self); 147 struct acpicpu_softc *sc = device_private(self);
148 static ONCE_DECL(once_start); 148 static ONCE_DECL(once_start);
149 149
150 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0) 150 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
151 return 0; 151 return 0;
152 152
153 return RUN_ONCE(&once_start, acpicpu_md_pstate_start); 153 return RUN_ONCE(&once_start, acpicpu_md_pstate_start);
154} 154}
155 155
156bool 156bool
157acpicpu_pstate_suspend(device_t self) 157acpicpu_pstate_suspend(device_t self)
158{ 158{
159 159
160 return true; 160 return true;
161} 161}
162 162
163bool 163bool
164acpicpu_pstate_resume(device_t self) 164acpicpu_pstate_resume(device_t self)
165{ 165{
166 static const ACPI_OSD_EXEC_CALLBACK func = acpicpu_pstate_callback; 166 static const ACPI_OSD_EXEC_CALLBACK func = acpicpu_pstate_callback;
167 struct acpicpu_softc *sc = device_private(self); 167 struct acpicpu_softc *sc = device_private(self);
168 168
169 KASSERT((sc->sc_flags & ACPICPU_FLAG_P) != 0); 169 KASSERT((sc->sc_flags & ACPICPU_FLAG_P) != 0);
170 170
171 if ((sc->sc_flags & ACPICPU_FLAG_P_PPC) != 0) 171 if ((sc->sc_flags & ACPICPU_FLAG_P_PPC) != 0)
172 (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev); 172 (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
173 173
174 return true; 174 return true;
175} 175}
176 176
177void 177void
178acpicpu_pstate_callback(void *aux) 178acpicpu_pstate_callback(void *aux)
179{ 179{
180 struct acpicpu_softc *sc; 180 struct acpicpu_softc *sc;
181 device_t self = aux; 181 device_t self = aux;
182 uint32_t old, new; 182 uint32_t old, new;
183 183
184 sc = device_private(self); 184 sc = device_private(self);
185 185
186 if ((sc->sc_flags & ACPICPU_FLAG_P_PPC) == 0) 186 if ((sc->sc_flags & ACPICPU_FLAG_P_PPC) == 0)
187 return; 187 return;
188 188
189 mutex_enter(&sc->sc_mtx); 189 mutex_enter(&sc->sc_mtx);
190 190
191 old = sc->sc_pstate_max; 191 old = sc->sc_pstate_max;
192 acpicpu_pstate_change(sc); 192 acpicpu_pstate_change(sc);
193 new = sc->sc_pstate_max; 193 new = sc->sc_pstate_max;
194 194
195 mutex_exit(&sc->sc_mtx); 195 mutex_exit(&sc->sc_mtx);
196 196
197#if 0 197#if 0
198 if (old != new) { 198 if (old != new) {
199 199
200 /* 200 /*
201 * If the maximum changed, proactively 201 * If the maximum changed, proactively
202 * raise or lower the target frequency. 202 * raise or lower the target frequency.
203 */ 203 */
204 acpicpu_pstate_set(sc, sc->sc_pstate[new].ps_freq); 204 acpicpu_pstate_set(sc, sc->sc_pstate[new].ps_freq);
205 205
206 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "frequency changed from " 206 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "frequency changed from "
207 "%u MHz to %u MHz\n", sc->sc_pstate[old].ps_freq, 207 "%u MHz to %u MHz\n", sc->sc_pstate[old].ps_freq,
208 sc->sc_pstate[sc->sc_pstate_max].ps_freq)); 208 sc->sc_pstate[sc->sc_pstate_max].ps_freq));
209 } 209 }
210#endif 210#endif
211} 211}
212 212
213ACPI_STATUS 213ACPI_STATUS
214acpicpu_pstate_pss(struct acpicpu_softc *sc) 214acpicpu_pstate_pss(struct acpicpu_softc *sc)
215{ 215{
216 struct acpicpu_pstate *ps; 216 struct acpicpu_pstate *ps;
217 ACPI_OBJECT *obj; 217 ACPI_OBJECT *obj;
218 ACPI_BUFFER buf; 218 ACPI_BUFFER buf;
219 ACPI_STATUS rv; 219 ACPI_STATUS rv;
220 uint32_t count; 220 uint32_t count;
221 uint32_t i, j; 221 uint32_t i, j;
222 222
223 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PSS", &buf); 223 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PSS", &buf);
224 224
225 if (ACPI_FAILURE(rv)) 225 if (ACPI_FAILURE(rv))
226 return rv; 226 return rv;
227 227
228 obj = buf.Pointer; 228 obj = buf.Pointer;
229 229
230 if (obj->Type != ACPI_TYPE_PACKAGE) { 230 if (obj->Type != ACPI_TYPE_PACKAGE) {
231 rv = AE_TYPE; 231 rv = AE_TYPE;
232 goto out; 232 goto out;
233 } 233 }
234 234
235 sc->sc_pstate_count = obj->Package.Count; 235 sc->sc_pstate_count = obj->Package.Count;
236 236
237 if (sc->sc_pstate_count == 0) { 237 if (sc->sc_pstate_count == 0) {
238 rv = AE_NOT_EXIST; 238 rv = AE_NOT_EXIST;
239 goto out; 239 goto out;
240 } 240 }
241 241
242 if (sc->sc_pstate_count > 0xFF) { 242 if (sc->sc_pstate_count > 0xFF) {
243 rv = AE_LIMIT; 243 rv = AE_LIMIT;
244 goto out; 244 goto out;
245 } 245 }
246 246
247 sc->sc_pstate = kmem_zalloc(sc->sc_pstate_count * 247 sc->sc_pstate = kmem_zalloc(sc->sc_pstate_count *
248 sizeof(struct acpicpu_pstate), KM_SLEEP); 248 sizeof(struct acpicpu_pstate), KM_SLEEP);
249 249
250 if (sc->sc_pstate == NULL) { 250 if (sc->sc_pstate == NULL) {
251 rv = AE_NO_MEMORY; 251 rv = AE_NO_MEMORY;
252 goto out; 252 goto out;
253 } 253 }
254 254
255 for (count = i = 0; i < sc->sc_pstate_count; i++) { 255 for (count = i = 0; i < sc->sc_pstate_count; i++) {
256 256
257 ps = &sc->sc_pstate[i]; 257 ps = &sc->sc_pstate[i];
258 rv = acpicpu_pstate_pss_add(ps, &obj->Package.Elements[i]); 258 rv = acpicpu_pstate_pss_add(ps, &obj->Package.Elements[i]);
259 259
260 if (ACPI_FAILURE(rv)) 260 if (ACPI_FAILURE(rv))
261 continue; 261 continue;
262 262
263 for (j = 0; j < i; j++) { 263 for (j = 0; j < i; j++) {
264 264
265 if (ps->ps_freq >= sc->sc_pstate[j].ps_freq) { 265 if (ps->ps_freq >= sc->sc_pstate[j].ps_freq) {
266 ps->ps_freq = 0; 266 ps->ps_freq = 0;
267 break; 267 break;
268 } 268 }
269 } 269 }
270 270
271 if (ps->ps_freq != 0) 271 if (ps->ps_freq != 0)
272 count++; 272 count++;
273 } 273 }
274 274
275 rv = (count != 0) ? AE_OK : AE_NOT_EXIST; 275 rv = (count != 0) ? AE_OK : AE_NOT_EXIST;
276 276
277out: 277out:
278 if (buf.Pointer != NULL) 278 if (buf.Pointer != NULL)
279 ACPI_FREE(buf.Pointer); 279 ACPI_FREE(buf.Pointer);
280 280
281 return rv; 281 return rv;
282} 282}
283 283
284static ACPI_STATUS 284static ACPI_STATUS
285acpicpu_pstate_pss_add(struct acpicpu_pstate *ps, ACPI_OBJECT *obj) 285acpicpu_pstate_pss_add(struct acpicpu_pstate *ps, ACPI_OBJECT *obj)
286{ 286{
287 ACPI_OBJECT *elm; 287 ACPI_OBJECT *elm;
288 uint32_t val[6]; 288 uint32_t val[6];
289 uint32_t *p; 289 uint32_t *p;
290 int i; 290 int i;
291 291
292 if (obj->Type != ACPI_TYPE_PACKAGE) 292 if (obj->Type != ACPI_TYPE_PACKAGE)
293 return AE_TYPE; 293 return AE_TYPE;
294 294
295 if (obj->Package.Count != 6) 295 if (obj->Package.Count != 6)
296 return AE_BAD_DATA; 296 return AE_BAD_DATA;
297 297
298 elm = obj->Package.Elements; 298 elm = obj->Package.Elements;
299 299
300 for (i = 0; i < 6; i++) { 300 for (i = 0; i < 6; i++) {
301 301
302 if (elm[i].Type != ACPI_TYPE_INTEGER) 302 if (elm[i].Type != ACPI_TYPE_INTEGER)
303 return AE_TYPE; 303 return AE_TYPE;
304 304
305 if (elm[i].Integer.Value > UINT32_MAX) 305 if (elm[i].Integer.Value > UINT32_MAX)
306 return AE_AML_NUMERIC_OVERFLOW; 306 return AE_AML_NUMERIC_OVERFLOW;
307 307
308 val[i] = elm[i].Integer.Value; 308 val[i] = elm[i].Integer.Value;
309 } 309 }
310 310
311 if (val[0] == 0 || val[0] >= 0xFFFF) 311 if (val[0] == 0 || val[0] >= 0xFFFF)
312 return AE_BAD_DECIMAL_CONSTANT; 312 return AE_BAD_DECIMAL_CONSTANT;
313 313
314 CTASSERT(sizeof(val) == sizeof(struct acpicpu_pstate) - 314 CTASSERT(sizeof(val) == sizeof(struct acpicpu_pstate) -
315 offsetof(struct acpicpu_pstate, ps_freq)); 315 offsetof(struct acpicpu_pstate, ps_freq));
316 316
317 p = &ps->ps_freq; 317 p = &ps->ps_freq;
318 318
319 for (i = 0; i < 6; i++, p++) 319 for (i = 0; i < 6; i++, p++)
320 *p = val[i]; 320 *p = val[i];
321 321
322 /* 322 /*
323 * The latency is typically around 10 usec 323 * The latency is typically around 10 usec
324 * on Intel CPUs. Use that as the minimum. 324 * on Intel CPUs. Use that as the minimum.
325 */ 325 */
326 if (ps->ps_latency < 10) 326 if (ps->ps_latency < 10)
327 ps->ps_latency = 10; 327 ps->ps_latency = 10;
328 328
329 return AE_OK; 329 return AE_OK;
330} 330}
331 331
332ACPI_STATUS 332ACPI_STATUS
333acpicpu_pstate_pct(struct acpicpu_softc *sc) 333acpicpu_pstate_pct(struct acpicpu_softc *sc)
334{ 334{
335 static const size_t size = sizeof(struct acpicpu_reg); 335 static const size_t size = sizeof(struct acpicpu_reg);
336 struct acpicpu_reg *reg[2]; 336 struct acpicpu_reg *reg[2];
337 ACPI_OBJECT *elm, *obj; 337 ACPI_OBJECT *elm, *obj;
338 ACPI_BUFFER buf; 338 ACPI_BUFFER buf;
339 ACPI_STATUS rv; 339 ACPI_STATUS rv;
340 uint8_t width; 340 uint8_t width;
341 int i; 341 int i;
342 342
343 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PCT", &buf); 343 rv = acpi_eval_struct(sc->sc_node->ad_handle, "_PCT", &buf);
344 344
345 if (ACPI_FAILURE(rv)) 345 if (ACPI_FAILURE(rv))
346 return rv; 346 return rv;
347 347
348 obj = buf.Pointer; 348 obj = buf.Pointer;
349 349
350 if (obj->Type != ACPI_TYPE_PACKAGE) { 350 if (obj->Type != ACPI_TYPE_PACKAGE) {
351 rv = AE_TYPE; 351 rv = AE_TYPE;
352 goto out; 352 goto out;
353 } 353 }
354 354
355 if (obj->Package.Count != 2) { 355 if (obj->Package.Count != 2) {
356 rv = AE_LIMIT; 356 rv = AE_LIMIT;
357 goto out; 357 goto out;
358 } 358 }
359 359
360 for (i = 0; i < 2; i++) { 360 for (i = 0; i < 2; i++) {
361 361
362 elm = &obj->Package.Elements[i]; 362 elm = &obj->Package.Elements[i];
363 363
364 if (elm->Type != ACPI_TYPE_BUFFER) { 364 if (elm->Type != ACPI_TYPE_BUFFER) {
365 rv = AE_TYPE; 365 rv = AE_TYPE;
366 goto out; 366 goto out;
367 } 367 }
368 368
369 if (size > elm->Buffer.Length) { 369 if (size > elm->Buffer.Length) {
370 rv = AE_AML_BAD_RESOURCE_LENGTH; 370 rv = AE_AML_BAD_RESOURCE_LENGTH;
371 goto out; 371 goto out;
372 } 372 }
373 373
374 reg[i] = (struct acpicpu_reg *)elm->Buffer.Pointer; 374 reg[i] = (struct acpicpu_reg *)elm->Buffer.Pointer;
375 375
376 switch (reg[i]->reg_spaceid) { 376 switch (reg[i]->reg_spaceid) {
377 377
378 case ACPI_ADR_SPACE_SYSTEM_IO: 378 case ACPI_ADR_SPACE_SYSTEM_IO:
379 379
380 if (reg[i]->reg_addr == 0) { 380 if (reg[i]->reg_addr == 0) {
381 rv = AE_AML_ILLEGAL_ADDRESS; 381 rv = AE_AML_ILLEGAL_ADDRESS;
382 goto out; 382 goto out;
383 } 383 }
384 384
385 width = reg[i]->reg_bitwidth; 385 width = reg[i]->reg_bitwidth;
386 386
387 if (width != 8 && width != 16 && width != 32) { 387 if (width != 8 && width != 16 && width != 32) {
388 rv = AE_SUPPORT; 388 rv = AE_AML_BAD_RESOURCE_VALUE;
389 goto out; 389 goto out;
390 } 390 }
391 391
392 break; 392 break;
393 393
394 case ACPI_ADR_SPACE_FIXED_HARDWARE: 394 case ACPI_ADR_SPACE_FIXED_HARDWARE:
395 395
396 if ((sc->sc_flags & ACPICPU_FLAG_P_FFH) == 0) { 396 if ((sc->sc_flags & ACPICPU_FLAG_P_FFH) == 0) {
397 rv = AE_AML_BAD_RESOURCE_VALUE; 397 rv = AE_SUPPORT;
398 goto out; 398 goto out;
399 } 399 }
400 400
401 break; 401 break;
402 402
403 default: 403 default:
404 rv = AE_AML_INVALID_SPACE_ID; 404 rv = AE_AML_INVALID_SPACE_ID;
405 goto out; 405 goto out;
406 } 406 }
407 } 407 }
408 408
409 if (reg[0]->reg_spaceid != reg[1]->reg_spaceid) { 409 if (reg[0]->reg_spaceid != reg[1]->reg_spaceid) {
410 rv = AE_AML_INVALID_SPACE_ID; 410 rv = AE_AML_INVALID_SPACE_ID;
411 goto out; 411 goto out;
412 } 412 }
413 413
414 (void)memcpy(&sc->sc_pstate_control, reg[0], size); /* PERF_CTRL */ 414 (void)memcpy(&sc->sc_pstate_control, reg[0], size); /* PERF_CTRL */
415 (void)memcpy(&sc->sc_pstate_status, reg[1], size); /* PERF_STATUS */ 415 (void)memcpy(&sc->sc_pstate_status, reg[1], size); /* PERF_STATUS */
416 416
417out: 417out:
418 if (buf.Pointer != NULL) 418 if (buf.Pointer != NULL)
419 ACPI_FREE(buf.Pointer); 419 ACPI_FREE(buf.Pointer);
420 420
421 return rv; 421 return rv;
422} 422}
423 423
424static int 424static int
425acpicpu_pstate_max(struct acpicpu_softc *sc) 425acpicpu_pstate_max(struct acpicpu_softc *sc)
426{ 426{
427 ACPI_INTEGER val; 427 ACPI_INTEGER val;
428 ACPI_STATUS rv; 428 ACPI_STATUS rv;
429 429
430 /* 430 /*
431 * Evaluate the currently highest P-state that can be used. 431 * Evaluate the currently highest P-state that can be used.
432 * If available, we can use either this state or any lower 432 * If available, we can use either this state or any lower
433 * power (i.e. higher numbered) state from the _PSS object. 433 * power (i.e. higher numbered) state from the _PSS object.
434 */ 434 */
435 rv = acpi_eval_integer(sc->sc_node->ad_handle, "_PPC", &val); 435 rv = acpi_eval_integer(sc->sc_node->ad_handle, "_PPC", &val);
436 436
437 sc->sc_pstate_max = 0; 437 sc->sc_pstate_max = 0;
438 438
439 if (ACPI_FAILURE(rv)) 439 if (ACPI_FAILURE(rv))
440 return 1; 440 return 1;
441 441
442 if (val > (uint64_t)sc->sc_pstate_count) 442 if (val > (uint64_t)sc->sc_pstate_count)
443 return 1; 443 return 1;
444 444
445 if (sc->sc_pstate[val].ps_freq == 0) 445 if (sc->sc_pstate[val].ps_freq == 0)
446 return 1; 446 return 1;
447 447
448 sc->sc_pstate_max = val; /* XXX: sysctl(8) knob? */ 448 sc->sc_pstate_max = val; /* XXX: sysctl(8) knob? */
449 449
450 return 0; 450 return 0;
451} 451}
452 452
453static void 453static void
454acpicpu_pstate_change(struct acpicpu_softc *sc) 454acpicpu_pstate_change(struct acpicpu_softc *sc)
455{ 455{
456 ACPI_OBJECT_LIST arg; 456 ACPI_OBJECT_LIST arg;
457 ACPI_OBJECT obj[2]; 457 ACPI_OBJECT obj[2];
458 458
459 arg.Count = 2; 459 arg.Count = 2;
460 arg.Pointer = obj; 460 arg.Pointer = obj;
461 461
462 obj[0].Type = ACPI_TYPE_INTEGER; 462 obj[0].Type = ACPI_TYPE_INTEGER;
463 obj[1].Type = ACPI_TYPE_INTEGER; 463 obj[1].Type = ACPI_TYPE_INTEGER;
464 464
465 obj[0].Integer.Value = ACPICPU_P_NOTIFY; 465 obj[0].Integer.Value = ACPICPU_P_NOTIFY;
466 obj[1].Integer.Value = acpicpu_pstate_max(sc); 466 obj[1].Integer.Value = acpicpu_pstate_max(sc);
467 467
468 (void)AcpiEvaluateObject(sc->sc_node->ad_handle, "_OST", &arg, NULL); 468 (void)AcpiEvaluateObject(sc->sc_node->ad_handle, "_OST", &arg, NULL);
469} 469}
470 470
471static void 471static void
472acpicpu_pstate_bios(void) 472acpicpu_pstate_bios(void)
473{ 473{
474 const uint8_t val = AcpiGbl_FADT.PstateControl; 474 const uint8_t val = AcpiGbl_FADT.PstateControl;
475 const uint32_t addr = AcpiGbl_FADT.SmiCommand; 475 const uint32_t addr = AcpiGbl_FADT.SmiCommand;
476 476
477 if (addr == 0) 477 if (addr == 0)
478 return; 478 return;
479 479
480 (void)AcpiOsWritePort(addr, val, 8); 480 (void)AcpiOsWritePort(addr, val, 8);
481} 481}
482 482
483int 483int
484acpicpu_pstate_get(struct acpicpu_softc *sc, uint32_t *freq) 484acpicpu_pstate_get(struct acpicpu_softc *sc, uint32_t *freq)
485{ 485{
486 const uint8_t method = sc->sc_pstate_control.reg_spaceid; 486 const uint8_t method = sc->sc_pstate_control.reg_spaceid;
487 struct acpicpu_pstate *ps = NULL; 487 struct acpicpu_pstate *ps = NULL;
488 uint32_t i, val = 0; 488 uint32_t i, val = 0;
489 uint64_t addr; 489 uint64_t addr;
490 uint8_t width; 490 uint8_t width;
491 int rv; 491 int rv;
492 492
493 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0) { 493 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0) {
494 rv = ENODEV; 494 rv = ENODEV;
495 goto fail; 495 goto fail;
496 } 496 }
497 497
498 if (sc->sc_pstate_current != ACPICPU_P_STATE_UNKNOWN) { 498 if (sc->sc_pstate_current != ACPICPU_P_STATE_UNKNOWN) {
499 *freq = sc->sc_pstate_current; 499 *freq = sc->sc_pstate_current;
500 return 0; 500 return 0;
501 } 501 }
502 502
503 switch (method) { 503 switch (method) {
504 504
505 case ACPI_ADR_SPACE_FIXED_HARDWARE: 505 case ACPI_ADR_SPACE_FIXED_HARDWARE:
506 506
507 rv = acpicpu_md_pstate_get(sc, freq); 507 rv = acpicpu_md_pstate_get(sc, freq);
508 508
509 if (rv != 0) 509 if (rv != 0)
510 goto fail; 510 goto fail;
511 511
512 break; 512 break;
513 513
514 case ACPI_ADR_SPACE_SYSTEM_IO: 514 case ACPI_ADR_SPACE_SYSTEM_IO:
515 515
516 addr = sc->sc_pstate_status.reg_addr; 516 addr = sc->sc_pstate_status.reg_addr;
517 width = sc->sc_pstate_status.reg_bitwidth; 517 width = sc->sc_pstate_status.reg_bitwidth;
518 518
519 (void)AcpiOsReadPort(addr, &val, width); 519 (void)AcpiOsReadPort(addr, &val, width);
520 520
521 if (val == 0) { 521 if (val == 0) {
522 rv = EIO; 522 rv = EIO;
523 goto fail; 523 goto fail;
524 } 524 }
525 525
526 mutex_enter(&sc->sc_mtx); 526 mutex_enter(&sc->sc_mtx);
527 527
528 for (i = sc->sc_pstate_max; i < sc->sc_pstate_count; i++) { 528 for (i = sc->sc_pstate_max; i < sc->sc_pstate_count; i++) {
529 529
530 if (sc->sc_pstate[i].ps_freq == 0) 530 if (sc->sc_pstate[i].ps_freq == 0)
531 continue; 531 continue;
532 532
533 if (val == sc->sc_pstate[i].ps_status) { 533 if (val == sc->sc_pstate[i].ps_status) {
534 ps = &sc->sc_pstate[i]; 534 ps = &sc->sc_pstate[i];
535 break; 535 break;
536 } 536 }
537 } 537 }
538 538
539 mutex_exit(&sc->sc_mtx); 539 mutex_exit(&sc->sc_mtx);
540 540
541 if (ps == NULL) { 541 if (ps == NULL) {
542 rv = EIO; 542 rv = EIO;
543 goto fail; 543 goto fail;
544 } 544 }
545 545
546 *freq = ps->ps_freq; 546 *freq = ps->ps_freq;
547 break; 547 break;
548 548
549 default: 549 default:
550 rv = ENOTTY; 550 rv = ENOTTY;
551 goto fail; 551 goto fail;
552 } 552 }
553 553
554 sc->sc_pstate_current = *freq; 554 sc->sc_pstate_current = *freq;
555 555
556 return 0; 556 return 0;
557 557
558fail: 558fail:
559 aprint_error_dev(sc->sc_dev, "failed " 559 aprint_error_dev(sc->sc_dev, "failed "
560 "to get frequency (err %d)\n", rv); 560 "to get frequency (err %d)\n", rv);
561 561
562 *freq = sc->sc_pstate_current = ACPICPU_P_STATE_UNKNOWN; 562 *freq = sc->sc_pstate_current = ACPICPU_P_STATE_UNKNOWN;
563 563
564 return rv; 564 return rv;
565} 565}
566 566
567int 567int
568acpicpu_pstate_set(struct acpicpu_softc *sc, uint32_t freq) 568acpicpu_pstate_set(struct acpicpu_softc *sc, uint32_t freq)
569{ 569{
570 const uint8_t method = sc->sc_pstate_control.reg_spaceid; 570 const uint8_t method = sc->sc_pstate_control.reg_spaceid;
571 struct acpicpu_pstate *ps = NULL; 571 struct acpicpu_pstate *ps = NULL;
572 uint32_t i, val; 572 uint32_t i, val;
573 uint64_t addr; 573 uint64_t addr;
574 uint8_t width; 574 uint8_t width;
575 int rv; 575 int rv;
576 576
577 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0) { 577 if ((sc->sc_flags & ACPICPU_FLAG_P) == 0) {
578 rv = ENODEV; 578 rv = ENODEV;
579 goto fail; 579 goto fail;
580 } 580 }
581 581
582 mutex_enter(&sc->sc_mtx); 582 mutex_enter(&sc->sc_mtx);
583 583
584 for (i = sc->sc_pstate_max; i < sc->sc_pstate_count; i++) { 584 for (i = sc->sc_pstate_max; i < sc->sc_pstate_count; i++) {
585 585
586 if (sc->sc_pstate[i].ps_freq == 0) 586 if (sc->sc_pstate[i].ps_freq == 0)
587 continue; 587 continue;
588 588
589 if (sc->sc_pstate[i].ps_freq == freq) { 589 if (sc->sc_pstate[i].ps_freq == freq) {
590 ps = &sc->sc_pstate[i]; 590 ps = &sc->sc_pstate[i];
591 break; 591 break;
592 } 592 }
593 } 593 }
594 594
595 mutex_exit(&sc->sc_mtx); 595 mutex_exit(&sc->sc_mtx);
596 596
597 if (ps == NULL) { 597 if (ps == NULL) {
598 rv = EINVAL; 598 rv = EINVAL;
599 goto fail; 599 goto fail;
600 } 600 }
601 601
602 switch (method) { 602 switch (method) {
603 603
604 case ACPI_ADR_SPACE_FIXED_HARDWARE: 604 case ACPI_ADR_SPACE_FIXED_HARDWARE:
605 605
606 rv = acpicpu_md_pstate_set(ps); 606 rv = acpicpu_md_pstate_set(ps);
607 607
608 if (rv != 0) 608 if (rv != 0)
609 goto fail; 609 goto fail;
610 610
611 break; 611 break;
612 612
613 case ACPI_ADR_SPACE_SYSTEM_IO: 613 case ACPI_ADR_SPACE_SYSTEM_IO:
614 614
615 addr = sc->sc_pstate_control.reg_addr; 615 addr = sc->sc_pstate_control.reg_addr;
616 width = sc->sc_pstate_control.reg_bitwidth; 616 width = sc->sc_pstate_control.reg_bitwidth;
617 617
618 (void)AcpiOsWritePort(addr, ps->ps_control, width); 618 (void)AcpiOsWritePort(addr, ps->ps_control, width);
619 619
620 addr = sc->sc_pstate_status.reg_addr; 620 addr = sc->sc_pstate_status.reg_addr;
621 width = sc->sc_pstate_status.reg_bitwidth; 621 width = sc->sc_pstate_status.reg_bitwidth;
622 622
623 /* 623 /*
624 * Some systems take longer to respond 624 * Some systems take longer to respond
625 * than the reported worst-case latency. 625 * than the reported worst-case latency.
626 */ 626 */
627 for (i = val = 0; i < ACPICPU_P_STATE_RETRY; i++) { 627 for (i = val = 0; i < ACPICPU_P_STATE_RETRY; i++) {
628 628
629 (void)AcpiOsReadPort(addr, &val, width); 629 (void)AcpiOsReadPort(addr, &val, width);
630 630
631 if (val == ps->ps_status) 631 if (val == ps->ps_status)
632 break; 632 break;
633 633
634 DELAY(ps->ps_latency); 634 DELAY(ps->ps_latency);
635 } 635 }
636 636
637 if (i == ACPICPU_P_STATE_RETRY) { 637 if (i == ACPICPU_P_STATE_RETRY) {
638 rv = EAGAIN; 638 rv = EAGAIN;
639 goto fail; 639 goto fail;
640 } 640 }
641 641
642 break; 642 break;
643 643
644 default: 644 default:
645 rv = ENOTTY; 645 rv = ENOTTY;
646 goto fail; 646 goto fail;
647 } 647 }
648 648
649 ps->ps_stat++; 649 ps->ps_stat++;
650 sc->sc_pstate_current = freq; 650 sc->sc_pstate_current = freq;
651 651
652 return 0; 652 return 0;
653 653
654fail: 654fail:
655 aprint_error_dev(sc->sc_dev, "failed to set " 655 aprint_error_dev(sc->sc_dev, "failed to set "
656 "frequency to %u (err %d)\n", freq, rv); 656 "frequency to %u (err %d)\n", freq, rv);
657 657
658 sc->sc_pstate_current = ACPICPU_P_STATE_UNKNOWN; 658 sc->sc_pstate_current = ACPICPU_P_STATE_UNKNOWN;
659 659
660 return rv; 660 return rv;
661} 661}