Sun Oct 6 11:28:24 2019 UTC ()
Change sysctl to be named after the first CPU in the DVFS domain.

  old: machdep.cpu.frequency.*, machdep.cpufreqdt4.frequency.*
  new: machdep.cpufreq.cpu0.*, machdep.cpufreq.cpu4.*


(jmcneill)
diff -r1.8 -r1.9 src/sys/dev/fdt/cpufreq_dt.c

cvs diff -r1.8 -r1.9 src/sys/dev/fdt/cpufreq_dt.c (switch to unified diff)

--- src/sys/dev/fdt/cpufreq_dt.c 2019/05/21 22:15:26 1.8
+++ src/sys/dev/fdt/cpufreq_dt.c 2019/10/06 11:28:24 1.9
@@ -1,497 +1,515 @@ @@ -1,497 +1,515 @@
1/* $NetBSD: cpufreq_dt.c,v 1.8 2019/05/21 22:15:26 jmcneill Exp $ */ 1/* $NetBSD: cpufreq_dt.c,v 1.9 2019/10/06 11:28:24 jmcneill Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2015-2017 Jared McNeill <jmcneill@invisible.ca> 4 * Copyright (c) 2015-2017 Jared McNeill <jmcneill@invisible.ca>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE. 26 * SUCH DAMAGE.
27 */ 27 */
28 28
29#include <sys/cdefs.h> 29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: cpufreq_dt.c,v 1.8 2019/05/21 22:15:26 jmcneill Exp $"); 30__KERNEL_RCSID(0, "$NetBSD: cpufreq_dt.c,v 1.9 2019/10/06 11:28:24 jmcneill Exp $");
31 31
32#include <sys/param.h> 32#include <sys/param.h>
33#include <sys/systm.h> 33#include <sys/systm.h>
34#include <sys/device.h> 34#include <sys/device.h>
35#include <sys/kmem.h> 35#include <sys/kmem.h>
36#include <sys/bus.h> 36#include <sys/bus.h>
37#include <sys/atomic.h> 37#include <sys/atomic.h>
38#include <sys/xcall.h> 38#include <sys/xcall.h>
39#include <sys/sysctl.h> 39#include <sys/sysctl.h>
40#include <sys/queue.h> 40#include <sys/queue.h>
41#include <sys/once.h> 41#include <sys/once.h>
 42#include <sys/cpu.h>
42 43
43#include <dev/fdt/fdtvar.h> 44#include <dev/fdt/fdtvar.h>
44 45
45struct cpufreq_dt_table { 46struct cpufreq_dt_table {
46 int phandle; 47 int phandle;
47 TAILQ_ENTRY(cpufreq_dt_table) next; 48 TAILQ_ENTRY(cpufreq_dt_table) next;
48}; 49};
49 50
50static TAILQ_HEAD(, cpufreq_dt_table) cpufreq_dt_tables = 51static TAILQ_HEAD(, cpufreq_dt_table) cpufreq_dt_tables =
51 TAILQ_HEAD_INITIALIZER(cpufreq_dt_tables); 52 TAILQ_HEAD_INITIALIZER(cpufreq_dt_tables);
52static kmutex_t cpufreq_dt_tables_lock; 53static kmutex_t cpufreq_dt_tables_lock;
53 54
54struct cpufreq_dt_opp { 55struct cpufreq_dt_opp {
55 u_int freq_khz; 56 u_int freq_khz;
56 u_int voltage_uv; 57 u_int voltage_uv;
57 u_int latency_ns; 58 u_int latency_ns;
58}; 59};
59 60
60struct cpufreq_dt_softc { 61struct cpufreq_dt_softc {
61 device_t sc_dev; 62 device_t sc_dev;
62 int sc_phandle; 63 int sc_phandle;
63 struct clk *sc_clk; 64 struct clk *sc_clk;
64 struct fdtbus_regulator *sc_supply; 65 struct fdtbus_regulator *sc_supply;
65 66
66 struct cpufreq_dt_opp *sc_opp; 67 struct cpufreq_dt_opp *sc_opp;
67 ssize_t sc_nopp; 68 ssize_t sc_nopp;
68 69
69 u_int sc_freq_target; 70 u_int sc_freq_target;
70 bool sc_freq_throttle; 71 bool sc_freq_throttle;
71 72
72 u_int sc_busy; 73 u_int sc_busy;
73 74
74 char *sc_freq_available; 75 char *sc_freq_available;
75 int sc_node_target; 76 int sc_node_target;
76 int sc_node_current; 77 int sc_node_current;
77 int sc_node_available; 78 int sc_node_available;
78 79
79 struct cpufreq_dt_table sc_table; 80 struct cpufreq_dt_table sc_table;
80}; 81};
81 82
82static void 83static void
83cpufreq_dt_change_cb(void *arg1, void *arg2) 84cpufreq_dt_change_cb(void *arg1, void *arg2)
84{ 85{
85#if notyet 86#if notyet
86 struct cpu_info *ci = curcpu(); 87 struct cpu_info *ci = curcpu();
87 ci->ci_data.cpu_cc_freq = cpufreq_get_rate() * 1000000; 88 ci->ci_data.cpu_cc_freq = cpufreq_get_rate() * 1000000;
88#endif 89#endif
89} 90}
90 91
91static int 92static int
92cpufreq_dt_set_rate(struct cpufreq_dt_softc *sc, u_int freq_khz) 93cpufreq_dt_set_rate(struct cpufreq_dt_softc *sc, u_int freq_khz)
93{ 94{
94 struct cpufreq_dt_opp *opp = NULL; 95 struct cpufreq_dt_opp *opp = NULL;
95 u_int old_rate, new_rate, old_uv, new_uv; 96 u_int old_rate, new_rate, old_uv, new_uv;
96 uint64_t xc; 97 uint64_t xc;
97 int error; 98 int error;
98 ssize_t n; 99 ssize_t n;
99 100
100 for (n = 0; n < sc->sc_nopp; n++) 101 for (n = 0; n < sc->sc_nopp; n++)
101 if (sc->sc_opp[n].freq_khz == freq_khz) { 102 if (sc->sc_opp[n].freq_khz == freq_khz) {
102 opp = &sc->sc_opp[n]; 103 opp = &sc->sc_opp[n];
103 break; 104 break;
104 } 105 }
105 if (opp == NULL) 106 if (opp == NULL)
106 return EINVAL; 107 return EINVAL;
107 108
108 old_rate = clk_get_rate(sc->sc_clk); 109 old_rate = clk_get_rate(sc->sc_clk);
109 new_rate = freq_khz * 1000; 110 new_rate = freq_khz * 1000;
110 new_uv = opp->voltage_uv; 111 new_uv = opp->voltage_uv;
111 112
112 if (old_rate == new_rate) 113 if (old_rate == new_rate)
113 return 0; 114 return 0;
114 115
115 if (sc->sc_supply != NULL) { 116 if (sc->sc_supply != NULL) {
116 error = fdtbus_regulator_get_voltage(sc->sc_supply, &old_uv); 117 error = fdtbus_regulator_get_voltage(sc->sc_supply, &old_uv);
117 if (error != 0) 118 if (error != 0)
118 return error; 119 return error;
119 120
120 if (new_uv > old_uv) { 121 if (new_uv > old_uv) {
121 error = fdtbus_regulator_set_voltage(sc->sc_supply, 122 error = fdtbus_regulator_set_voltage(sc->sc_supply,
122 new_uv, new_uv); 123 new_uv, new_uv);
123 if (error != 0) 124 if (error != 0)
124 return error; 125 return error;
125 } 126 }
126 } 127 }
127 128
128 error = clk_set_rate(sc->sc_clk, new_rate); 129 error = clk_set_rate(sc->sc_clk, new_rate);
129 if (error != 0) 130 if (error != 0)
130 return error; 131 return error;
131 132
132 const u_int latency_us = howmany(opp->latency_ns, 1000); 133 const u_int latency_us = howmany(opp->latency_ns, 1000);
133 if (latency_us > 0) 134 if (latency_us > 0)
134 delay(latency_us); 135 delay(latency_us);
135 136
136 if (sc->sc_supply != NULL) { 137 if (sc->sc_supply != NULL) {
137 if (new_uv < old_uv) { 138 if (new_uv < old_uv) {
138 error = fdtbus_regulator_set_voltage(sc->sc_supply, 139 error = fdtbus_regulator_set_voltage(sc->sc_supply,
139 new_uv, new_uv); 140 new_uv, new_uv);
140 if (error != 0) 141 if (error != 0)
141 return error; 142 return error;
142 } 143 }
143 } 144 }
144 145
145 if (error == 0) { 146 if (error == 0) {
146 xc = xc_broadcast(0, cpufreq_dt_change_cb, sc, NULL); 147 xc = xc_broadcast(0, cpufreq_dt_change_cb, sc, NULL);
147 xc_wait(xc); 148 xc_wait(xc);
148 149
149 pmf_event_inject(NULL, PMFE_SPEED_CHANGED); 150 pmf_event_inject(NULL, PMFE_SPEED_CHANGED);
150 } 151 }
151 152
152 return 0; 153 return 0;
153} 154}
154 155
155static void 156static void
156cpufreq_dt_throttle_enable(device_t dev) 157cpufreq_dt_throttle_enable(device_t dev)
157{ 158{
158 struct cpufreq_dt_softc * const sc = device_private(dev); 159 struct cpufreq_dt_softc * const sc = device_private(dev);
159 160
160 if (sc->sc_freq_throttle) 161 if (sc->sc_freq_throttle)
161 return; 162 return;
162 163
163 const u_int freq_khz = sc->sc_opp[sc->sc_nopp - 1].freq_khz; 164 const u_int freq_khz = sc->sc_opp[sc->sc_nopp - 1].freq_khz;
164 165
165 while (atomic_cas_uint(&sc->sc_busy, 0, 1) != 0) 166 while (atomic_cas_uint(&sc->sc_busy, 0, 1) != 0)
166 kpause("throttle", false, 1, NULL); 167 kpause("throttle", false, 1, NULL);
167 168
168 if (cpufreq_dt_set_rate(sc, freq_khz) == 0) { 169 if (cpufreq_dt_set_rate(sc, freq_khz) == 0) {
169 aprint_debug_dev(sc->sc_dev, "throttle enabled (%u.%03u MHz)\n", 170 aprint_debug_dev(sc->sc_dev, "throttle enabled (%u.%03u MHz)\n",
170 freq_khz / 1000, freq_khz % 1000); 171 freq_khz / 1000, freq_khz % 1000);
171 sc->sc_freq_throttle = true; 172 sc->sc_freq_throttle = true;
172 if (sc->sc_freq_target == 0) 173 if (sc->sc_freq_target == 0)
173 sc->sc_freq_target = clk_get_rate(sc->sc_clk) / 1000000; 174 sc->sc_freq_target = clk_get_rate(sc->sc_clk) / 1000000;
174 } 175 }
175 176
176 atomic_dec_uint(&sc->sc_busy); 177 atomic_dec_uint(&sc->sc_busy);
177} 178}
178 179
179static void 180static void
180cpufreq_dt_throttle_disable(device_t dev) 181cpufreq_dt_throttle_disable(device_t dev)
181{ 182{
182 struct cpufreq_dt_softc * const sc = device_private(dev); 183 struct cpufreq_dt_softc * const sc = device_private(dev);
183 184
184 if (!sc->sc_freq_throttle) 185 if (!sc->sc_freq_throttle)
185 return; 186 return;
186 187
187 while (atomic_cas_uint(&sc->sc_busy, 0, 1) != 0) 188 while (atomic_cas_uint(&sc->sc_busy, 0, 1) != 0)
188 kpause("throttle", false, 1, NULL); 189 kpause("throttle", false, 1, NULL);
189 190
190 const u_int freq_khz = sc->sc_freq_target * 1000; 191 const u_int freq_khz = sc->sc_freq_target * 1000;
191 192
192 if (cpufreq_dt_set_rate(sc, freq_khz) == 0) { 193 if (cpufreq_dt_set_rate(sc, freq_khz) == 0) {
193 aprint_debug_dev(sc->sc_dev, "throttle disabled (%u.%03u MHz)\n", 194 aprint_debug_dev(sc->sc_dev, "throttle disabled (%u.%03u MHz)\n",
194 freq_khz / 1000, freq_khz % 1000); 195 freq_khz / 1000, freq_khz % 1000);
195 sc->sc_freq_throttle = false; 196 sc->sc_freq_throttle = false;
196 } 197 }
197 198
198 atomic_dec_uint(&sc->sc_busy); 199 atomic_dec_uint(&sc->sc_busy);
199} 200}
200 201
201static int 202static int
202cpufreq_dt_sysctl_helper(SYSCTLFN_ARGS) 203cpufreq_dt_sysctl_helper(SYSCTLFN_ARGS)
203{ 204{
204 struct cpufreq_dt_softc * const sc = rnode->sysctl_data; 205 struct cpufreq_dt_softc * const sc = rnode->sysctl_data;
205 struct sysctlnode node; 206 struct sysctlnode node;
206 u_int fq, oldfq = 0; 207 u_int fq, oldfq = 0;
207 int error, n; 208 int error, n;
208 209
209 node = *rnode; 210 node = *rnode;
210 node.sysctl_data = &fq; 211 node.sysctl_data = &fq;
211 212
212 if (rnode->sysctl_num == sc->sc_node_target) { 213 if (rnode->sysctl_num == sc->sc_node_target) {
213 if (sc->sc_freq_target == 0) 214 if (sc->sc_freq_target == 0)
214 sc->sc_freq_target = clk_get_rate(sc->sc_clk) / 1000000; 215 sc->sc_freq_target = clk_get_rate(sc->sc_clk) / 1000000;
215 fq = sc->sc_freq_target; 216 fq = sc->sc_freq_target;
216 } else 217 } else
217 fq = clk_get_rate(sc->sc_clk) / 1000000; 218 fq = clk_get_rate(sc->sc_clk) / 1000000;
218 219
219 if (rnode->sysctl_num == sc->sc_node_target) 220 if (rnode->sysctl_num == sc->sc_node_target)
220 oldfq = fq; 221 oldfq = fq;
221 222
222 if (sc->sc_freq_target == 0) 223 if (sc->sc_freq_target == 0)
223 sc->sc_freq_target = fq; 224 sc->sc_freq_target = fq;
224 225
225 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 226 error = sysctl_lookup(SYSCTLFN_CALL(&node));
226 if (error || newp == NULL) 227 if (error || newp == NULL)
227 return error; 228 return error;
228 229
229 if (fq == oldfq || rnode->sysctl_num != sc->sc_node_target) 230 if (fq == oldfq || rnode->sysctl_num != sc->sc_node_target)
230 return 0; 231 return 0;
231 232
232 for (n = 0; n < sc->sc_nopp; n++) 233 for (n = 0; n < sc->sc_nopp; n++)
233 if (sc->sc_opp[n].freq_khz / 1000 == fq) 234 if (sc->sc_opp[n].freq_khz / 1000 == fq)
234 break; 235 break;
235 if (n == sc->sc_nopp) 236 if (n == sc->sc_nopp)
236 return EINVAL; 237 return EINVAL;
237 238
238 if (atomic_cas_uint(&sc->sc_busy, 0, 1) != 0) 239 if (atomic_cas_uint(&sc->sc_busy, 0, 1) != 0)
239 return EBUSY; 240 return EBUSY;
240 241
241 sc->sc_freq_target = fq; 242 sc->sc_freq_target = fq;
242 243
243 if (sc->sc_freq_throttle) 244 if (sc->sc_freq_throttle)
244 error = 0; 245 error = 0;
245 else 246 else
246 error = cpufreq_dt_set_rate(sc, fq * 1000); 247 error = cpufreq_dt_set_rate(sc, fq * 1000);
247 248
248 atomic_dec_uint(&sc->sc_busy); 249 atomic_dec_uint(&sc->sc_busy);
249 250
250 return error; 251 return error;
251} 252}
252 253
 254static struct cpu_info *
 255cpufreq_dt_cpu_lookup(cpuid_t mpidr)
 256{
 257 CPU_INFO_ITERATOR cii;
 258 struct cpu_info *ci;
 259
 260 for (CPU_INFO_FOREACH(cii, ci)) {
 261 if (ci->ci_cpuid == mpidr)
 262 return ci;
 263 }
 264
 265 return NULL;
 266}
 267
253static void 268static void
254cpufreq_dt_init_sysctl(struct cpufreq_dt_softc *sc) 269cpufreq_dt_init_sysctl(struct cpufreq_dt_softc *sc)
255{ 270{
256 const struct sysctlnode *node, *cpunode, *freqnode; 271 const struct sysctlnode *node, *cpunode;
257 struct sysctllog *cpufreq_log = NULL; 272 struct sysctllog *cpufreq_log = NULL;
258 const char *cpunodename; 273 struct cpu_info *ci;
 274 uint64_t mpidr;
259 int error, i; 275 int error, i;
260 276
 277 if (fdtbus_get_reg(sc->sc_phandle, 0, &mpidr, 0) != 0)
 278 return;
 279
 280 ci = cpufreq_dt_cpu_lookup(mpidr);
 281 if (ci == NULL)
 282 return;
 283
261 sc->sc_freq_available = kmem_zalloc(strlen("XXXX ") * sc->sc_nopp, KM_SLEEP); 284 sc->sc_freq_available = kmem_zalloc(strlen("XXXX ") * sc->sc_nopp, KM_SLEEP);
262 for (i = 0; i < sc->sc_nopp; i++) { 285 for (i = 0; i < sc->sc_nopp; i++) {
263 char buf[6]; 286 char buf[6];
264 snprintf(buf, sizeof(buf), i ? " %u" : "%u", sc->sc_opp[i].freq_khz / 1000); 287 snprintf(buf, sizeof(buf), i ? " %u" : "%u", sc->sc_opp[i].freq_khz / 1000);
265 strcat(sc->sc_freq_available, buf); 288 strcat(sc->sc_freq_available, buf);
266 } 289 }
267 290
268 if (device_unit(sc->sc_dev) == 0) 
269 cpunodename = "cpu"; 
270 else 
271 cpunodename = device_xname(sc->sc_dev); 
272 
273 error = sysctl_createv(&cpufreq_log, 0, NULL, &node, 291 error = sysctl_createv(&cpufreq_log, 0, NULL, &node,
274 CTLFLAG_PERMANENT, CTLTYPE_NODE, "machdep", NULL, 292 CTLFLAG_PERMANENT, CTLTYPE_NODE, "machdep", NULL,
275 NULL, 0, NULL, 0, CTL_MACHDEP, CTL_EOL); 293 NULL, 0, NULL, 0, CTL_MACHDEP, CTL_EOL);
276 if (error) 294 if (error)
277 goto sysctl_failed; 295 goto sysctl_failed;
278 error = sysctl_createv(&cpufreq_log, 0, &node, &cpunode, 296 error = sysctl_createv(&cpufreq_log, 0, &node, &node,
279 0, CTLTYPE_NODE, cpunodename, NULL, 297 0, CTLTYPE_NODE, "cpufreq", NULL,
280 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 298 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
281 if (error) 299 if (error)
282 goto sysctl_failed; 300 goto sysctl_failed;
283 error = sysctl_createv(&cpufreq_log, 0, &cpunode, &freqnode, 301 error = sysctl_createv(&cpufreq_log, 0, &node, &cpunode,
284 0, CTLTYPE_NODE, "frequency", NULL, 302 0, CTLTYPE_NODE, cpu_name(ci), NULL,
285 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL); 303 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
286 if (error) 304 if (error)
287 goto sysctl_failed; 305 goto sysctl_failed;
288 306
289 error = sysctl_createv(&cpufreq_log, 0, &freqnode, &node, 307 error = sysctl_createv(&cpufreq_log, 0, &cpunode, &node,
290 CTLFLAG_READWRITE, CTLTYPE_INT, "target", NULL, 308 CTLFLAG_READWRITE, CTLTYPE_INT, "target", NULL,
291 cpufreq_dt_sysctl_helper, 0, (void *)sc, 0, 309 cpufreq_dt_sysctl_helper, 0, (void *)sc, 0,
292 CTL_CREATE, CTL_EOL); 310 CTL_CREATE, CTL_EOL);
293 if (error) 311 if (error)
294 goto sysctl_failed; 312 goto sysctl_failed;
295 sc->sc_node_target = node->sysctl_num; 313 sc->sc_node_target = node->sysctl_num;
296 314
297 error = sysctl_createv(&cpufreq_log, 0, &freqnode, &node, 315 error = sysctl_createv(&cpufreq_log, 0, &cpunode, &node,
298 CTLFLAG_READWRITE, CTLTYPE_INT, "current", NULL, 316 CTLFLAG_READWRITE, CTLTYPE_INT, "current", NULL,
299 cpufreq_dt_sysctl_helper, 0, (void *)sc, 0, 317 cpufreq_dt_sysctl_helper, 0, (void *)sc, 0,
300 CTL_CREATE, CTL_EOL); 318 CTL_CREATE, CTL_EOL);
301 if (error) 319 if (error)
302 goto sysctl_failed; 320 goto sysctl_failed;
303 sc->sc_node_current = node->sysctl_num; 321 sc->sc_node_current = node->sysctl_num;
304 322
305 error = sysctl_createv(&cpufreq_log, 0, &freqnode, &node, 323 error = sysctl_createv(&cpufreq_log, 0, &cpunode, &node,
306 0, CTLTYPE_STRING, "available", NULL, 324 0, CTLTYPE_STRING, "available", NULL,
307 NULL, 0, sc->sc_freq_available, 0, 325 NULL, 0, sc->sc_freq_available, 0,
308 CTL_CREATE, CTL_EOL); 326 CTL_CREATE, CTL_EOL);
309 if (error) 327 if (error)
310 goto sysctl_failed; 328 goto sysctl_failed;
311 sc->sc_node_available = node->sysctl_num; 329 sc->sc_node_available = node->sysctl_num;
312 330
313 return; 331 return;
314 332
315sysctl_failed: 333sysctl_failed:
316 aprint_error_dev(sc->sc_dev, "couldn't create sysctl nodes: %d\n", error); 334 aprint_error_dev(sc->sc_dev, "couldn't create sysctl nodes: %d\n", error);
317 sysctl_teardown(&cpufreq_log); 335 sysctl_teardown(&cpufreq_log);
318} 336}
319 337
320static int 338static int
321cpufreq_dt_parse_opp(struct cpufreq_dt_softc *sc) 339cpufreq_dt_parse_opp(struct cpufreq_dt_softc *sc)
322{ 340{
323 const int phandle = sc->sc_phandle; 341 const int phandle = sc->sc_phandle;
324 const u_int *opp; 342 const u_int *opp;
325 int len, i; 343 int len, i;
326 344
327 opp = fdtbus_get_prop(phandle, "operating-points", &len); 345 opp = fdtbus_get_prop(phandle, "operating-points", &len);
328 if (len < 8) 346 if (len < 8)
329 return ENXIO; 347 return ENXIO;
330 348
331 sc->sc_nopp = len / 8; 349 sc->sc_nopp = len / 8;
332 sc->sc_opp = kmem_zalloc(sizeof(*sc->sc_opp) * sc->sc_nopp, KM_SLEEP); 350 sc->sc_opp = kmem_zalloc(sizeof(*sc->sc_opp) * sc->sc_nopp, KM_SLEEP);
333 for (i = 0; i < sc->sc_nopp; i++, opp += 2) { 351 for (i = 0; i < sc->sc_nopp; i++, opp += 2) {
334 sc->sc_opp[i].freq_khz = be32toh(opp[0]); 352 sc->sc_opp[i].freq_khz = be32toh(opp[0]);
335 sc->sc_opp[i].voltage_uv = be32toh(opp[1]); 353 sc->sc_opp[i].voltage_uv = be32toh(opp[1]);
336 } 354 }
337 355
338 return 0; 356 return 0;
339} 357}
340 358
341static int 359static int
342cpufreq_dt_parse_opp_v2(struct cpufreq_dt_softc *sc) 360cpufreq_dt_parse_opp_v2(struct cpufreq_dt_softc *sc)
343{ 361{
344 const int phandle = sc->sc_phandle; 362 const int phandle = sc->sc_phandle;
345 struct cpufreq_dt_table *table; 363 struct cpufreq_dt_table *table;
346 const u_int *opp_uv; 364 const u_int *opp_uv;
347 uint64_t opp_hz; 365 uint64_t opp_hz;
348 int opp_node, len, i; 366 int opp_node, len, i;
349 367
350 const int opp_table = fdtbus_get_phandle(phandle, "operating-points-v2"); 368 const int opp_table = fdtbus_get_phandle(phandle, "operating-points-v2");
351 if (opp_table < 0) 369 if (opp_table < 0)
352 return ENOENT; 370 return ENOENT;
353 371
354 /* If the table is shared, only setup a single instance */ 372 /* If the table is shared, only setup a single instance */
355 if (of_hasprop(opp_table, "opp-shared")) { 373 if (of_hasprop(opp_table, "opp-shared")) {
356 TAILQ_FOREACH(table, &cpufreq_dt_tables, next) 374 TAILQ_FOREACH(table, &cpufreq_dt_tables, next)
357 if (table->phandle == opp_table) 375 if (table->phandle == opp_table)
358 return EEXIST; 376 return EEXIST;
359 sc->sc_table.phandle = opp_table; 377 sc->sc_table.phandle = opp_table;
360 TAILQ_INSERT_TAIL(&cpufreq_dt_tables, &sc->sc_table, next); 378 TAILQ_INSERT_TAIL(&cpufreq_dt_tables, &sc->sc_table, next);
361 } 379 }
362 380
363 for (opp_node = OF_child(opp_table); opp_node; opp_node = OF_peer(opp_node)) { 381 for (opp_node = OF_child(opp_table); opp_node; opp_node = OF_peer(opp_node)) {
364 if (fdtbus_status_okay(opp_node)) 382 if (fdtbus_status_okay(opp_node))
365 sc->sc_nopp++; 383 sc->sc_nopp++;
366 } 384 }
367 385
368 if (sc->sc_nopp == 0) 386 if (sc->sc_nopp == 0)
369 return EINVAL; 387 return EINVAL;
370 388
371 sc->sc_opp = kmem_zalloc(sizeof(*sc->sc_opp) * sc->sc_nopp, KM_SLEEP); 389 sc->sc_opp = kmem_zalloc(sizeof(*sc->sc_opp) * sc->sc_nopp, KM_SLEEP);
372 for (opp_node = OF_child(opp_table), i = 0; opp_node; opp_node = OF_peer(opp_node), i++) { 390 for (opp_node = OF_child(opp_table), i = 0; opp_node; opp_node = OF_peer(opp_node), i++) {
373 if (!fdtbus_status_okay(opp_node)) 391 if (!fdtbus_status_okay(opp_node))
374 continue; 392 continue;
375 if (of_getprop_uint64(opp_node, "opp-hz", &opp_hz) != 0) 393 if (of_getprop_uint64(opp_node, "opp-hz", &opp_hz) != 0)
376 return EINVAL; 394 return EINVAL;
377 opp_uv = fdtbus_get_prop(opp_node, "opp-microvolt", &len); 395 opp_uv = fdtbus_get_prop(opp_node, "opp-microvolt", &len);
378 if (opp_uv == NULL || len < 1) 396 if (opp_uv == NULL || len < 1)
379 return EINVAL; 397 return EINVAL;
380 /* Table is in reverse order */ 398 /* Table is in reverse order */
381 const int index = sc->sc_nopp - i - 1; 399 const int index = sc->sc_nopp - i - 1;
382 sc->sc_opp[index].freq_khz = (u_int)(opp_hz / 1000); 400 sc->sc_opp[index].freq_khz = (u_int)(opp_hz / 1000);
383 sc->sc_opp[index].voltage_uv = be32toh(opp_uv[0]); 401 sc->sc_opp[index].voltage_uv = be32toh(opp_uv[0]);
384 of_getprop_uint32(opp_node, "clock-latency-ns", &sc->sc_opp[index].latency_ns); 402 of_getprop_uint32(opp_node, "clock-latency-ns", &sc->sc_opp[index].latency_ns);
385 } 403 }
386 404
387 return 0; 405 return 0;
388} 406}
389 407
390static int 408static int
391cpufreq_dt_parse(struct cpufreq_dt_softc *sc) 409cpufreq_dt_parse(struct cpufreq_dt_softc *sc)
392{ 410{
393 const int phandle = sc->sc_phandle; 411 const int phandle = sc->sc_phandle;
394 int error, i; 412 int error, i;
395 413
396 if (of_hasprop(phandle, "cpu-supply")) { 414 if (of_hasprop(phandle, "cpu-supply")) {
397 sc->sc_supply = fdtbus_regulator_acquire(phandle, "cpu-supply"); 415 sc->sc_supply = fdtbus_regulator_acquire(phandle, "cpu-supply");
398 if (sc->sc_supply == NULL) { 416 if (sc->sc_supply == NULL) {
399 aprint_error_dev(sc->sc_dev, 417 aprint_error_dev(sc->sc_dev,
400 "couldn't acquire cpu-supply\n"); 418 "couldn't acquire cpu-supply\n");
401 return ENXIO; 419 return ENXIO;
402 } 420 }
403 } 421 }
404 sc->sc_clk = fdtbus_clock_get_index(phandle, 0); 422 sc->sc_clk = fdtbus_clock_get_index(phandle, 0);
405 if (sc->sc_clk == NULL) { 423 if (sc->sc_clk == NULL) {
406 aprint_error_dev(sc->sc_dev, "couldn't acquire clock\n"); 424 aprint_error_dev(sc->sc_dev, "couldn't acquire clock\n");
407 return ENXIO; 425 return ENXIO;
408 } 426 }
409 427
410 mutex_enter(&cpufreq_dt_tables_lock); 428 mutex_enter(&cpufreq_dt_tables_lock);
411 if (of_hasprop(phandle, "operating-points")) 429 if (of_hasprop(phandle, "operating-points"))
412 error = cpufreq_dt_parse_opp(sc); 430 error = cpufreq_dt_parse_opp(sc);
413 else if (of_hasprop(phandle, "operating-points-v2")) 431 else if (of_hasprop(phandle, "operating-points-v2"))
414 error = cpufreq_dt_parse_opp_v2(sc); 432 error = cpufreq_dt_parse_opp_v2(sc);
415 else 433 else
416 error = EINVAL; 434 error = EINVAL;
417 mutex_exit(&cpufreq_dt_tables_lock); 435 mutex_exit(&cpufreq_dt_tables_lock);
418 436
419 if (error) { 437 if (error) {
420 if (error != EEXIST) 438 if (error != EEXIST)
421 aprint_error_dev(sc->sc_dev, 439 aprint_error_dev(sc->sc_dev,
422 "couldn't parse operating points: %d\n", error); 440 "couldn't parse operating points: %d\n", error);
423 return error; 441 return error;
424 } 442 }
425 443
426 for (i = 0; i < sc->sc_nopp; i++) { 444 for (i = 0; i < sc->sc_nopp; i++) {
427 aprint_verbose_dev(sc->sc_dev, "%u.%03u MHz, %u uV\n", 445 aprint_verbose_dev(sc->sc_dev, "%u.%03u MHz, %u uV\n",
428 sc->sc_opp[i].freq_khz / 1000, 446 sc->sc_opp[i].freq_khz / 1000,
429 sc->sc_opp[i].freq_khz % 1000, 447 sc->sc_opp[i].freq_khz % 1000,
430 sc->sc_opp[i].voltage_uv); 448 sc->sc_opp[i].voltage_uv);
431 } 449 }
432 450
433 return 0; 451 return 0;
434} 452}
435 453
436static int 454static int
437cpufreq_dt_match(device_t parent, cfdata_t cf, void *aux) 455cpufreq_dt_match(device_t parent, cfdata_t cf, void *aux)
438{ 456{
439 struct fdt_attach_args * const faa = aux; 457 struct fdt_attach_args * const faa = aux;
440 const int phandle = faa->faa_phandle; 458 const int phandle = faa->faa_phandle;
441 bus_addr_t addr; 459 bus_addr_t addr;
442 460
443 if (fdtbus_get_reg(phandle, 0, &addr, NULL) != 0) 461 if (fdtbus_get_reg(phandle, 0, &addr, NULL) != 0)
444 return 0; 462 return 0;
445 463
446 if (!of_hasprop(phandle, "clocks")) 464 if (!of_hasprop(phandle, "clocks"))
447 return 0; 465 return 0;
448 466
449 if (!of_hasprop(phandle, "operating-points") && 467 if (!of_hasprop(phandle, "operating-points") &&
450 !of_hasprop(phandle, "operating-points-v2")) 468 !of_hasprop(phandle, "operating-points-v2"))
451 return 0; 469 return 0;
452 470
453 return 1; 471 return 1;
454} 472}
455 473
456static void 474static void
457cpufreq_dt_init(device_t self) 475cpufreq_dt_init(device_t self)
458{ 476{
459 struct cpufreq_dt_softc * const sc = device_private(self); 477 struct cpufreq_dt_softc * const sc = device_private(self);
460 int error; 478 int error;
461 479
462 if ((error = cpufreq_dt_parse(sc)) != 0) 480 if ((error = cpufreq_dt_parse(sc)) != 0)
463 return; 481 return;
464 482
465 pmf_event_register(sc->sc_dev, PMFE_THROTTLE_ENABLE, cpufreq_dt_throttle_enable, true); 483 pmf_event_register(sc->sc_dev, PMFE_THROTTLE_ENABLE, cpufreq_dt_throttle_enable, true);
466 pmf_event_register(sc->sc_dev, PMFE_THROTTLE_DISABLE, cpufreq_dt_throttle_disable, true); 484 pmf_event_register(sc->sc_dev, PMFE_THROTTLE_DISABLE, cpufreq_dt_throttle_disable, true);
467 485
468 cpufreq_dt_init_sysctl(sc); 486 cpufreq_dt_init_sysctl(sc);
469} 487}
470 488
471static int 489static int
472cpufreq_dt_lock_init(void) 490cpufreq_dt_lock_init(void)
473{ 491{
474 mutex_init(&cpufreq_dt_tables_lock, MUTEX_DEFAULT, IPL_NONE); 492 mutex_init(&cpufreq_dt_tables_lock, MUTEX_DEFAULT, IPL_NONE);
475 return 0; 493 return 0;
476} 494}
477 495
478static void 496static void
479cpufreq_dt_attach(device_t parent, device_t self, void *aux) 497cpufreq_dt_attach(device_t parent, device_t self, void *aux)
480{ 498{
481 static ONCE_DECL(locks); 499 static ONCE_DECL(locks);
482 struct cpufreq_dt_softc * const sc = device_private(self); 500 struct cpufreq_dt_softc * const sc = device_private(self);
483 struct fdt_attach_args * const faa = aux; 501 struct fdt_attach_args * const faa = aux;
484 502
485 RUN_ONCE(&locks, cpufreq_dt_lock_init); 503 RUN_ONCE(&locks, cpufreq_dt_lock_init);
486 504
487 sc->sc_dev = self; 505 sc->sc_dev = self;
488 sc->sc_phandle = faa->faa_phandle; 506 sc->sc_phandle = faa->faa_phandle;
489 507
490 aprint_naive("\n"); 508 aprint_naive("\n");
491 aprint_normal("\n"); 509 aprint_normal("\n");
492 510
493 config_interrupts(self, cpufreq_dt_init); 511 config_interrupts(self, cpufreq_dt_init);
494} 512}
495 513
496CFATTACH_DECL_NEW(cpufreq_dt, sizeof(struct cpufreq_dt_softc), 514CFATTACH_DECL_NEW(cpufreq_dt, sizeof(struct cpufreq_dt_softc),
497 cpufreq_dt_match, cpufreq_dt_attach, NULL, NULL); 515 cpufreq_dt_match, cpufreq_dt_attach, NULL, NULL);