| @@ -1,14 +1,14 @@ | | | @@ -1,14 +1,14 @@ |
1 | /* $NetBSD: timer.c,v 1.29 2011/07/17 23:18:23 mrg Exp $ */ | | 1 | /* $NetBSD: timer.c,v 1.29.8.1 2018/03/13 16:48:05 snj Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (c) 1992, 1993 | | 4 | * Copyright (c) 1992, 1993 |
5 | * The Regents of the University of California. All rights reserved. | | 5 | * The Regents of the University of California. All rights reserved. |
6 | * Copyright (c) 1994 Gordon W. Ross | | 6 | * Copyright (c) 1994 Gordon W. Ross |
7 | * Copyright (c) 1993 Adam Glass | | 7 | * Copyright (c) 1993 Adam Glass |
8 | * Copyright (c) 1996 Paul Kranenburg | | 8 | * Copyright (c) 1996 Paul Kranenburg |
9 | * Copyright (c) 1996 | | 9 | * Copyright (c) 1996 |
10 | * The President and Fellows of Harvard College. All rights reserved. | | 10 | * The President and Fellows of Harvard College. All rights reserved. |
11 | * | | 11 | * |
12 | * This software was developed by the Computer Systems Engineering group | | 12 | * This software was developed by the Computer Systems Engineering group |
13 | * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and | | 13 | * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and |
14 | * contributed to Berkeley. | | 14 | * contributed to Berkeley. |
| @@ -50,108 +50,153 @@ | | | @@ -50,108 +50,153 @@ |
50 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 50 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
51 | * SUCH DAMAGE. | | 51 | * SUCH DAMAGE. |
52 | * | | 52 | * |
53 | * @(#)clock.c 8.1 (Berkeley) 6/11/93 | | 53 | * @(#)clock.c 8.1 (Berkeley) 6/11/93 |
54 | */ | | 54 | */ |
55 | | | 55 | |
56 | /* | | 56 | /* |
57 | * Kernel clocks provided by "timer" device. The hardclock is provided by | | 57 | * Kernel clocks provided by "timer" device. The hardclock is provided by |
58 | * the timer register (aka system counter). The statclock is provided by | | 58 | * the timer register (aka system counter). The statclock is provided by |
59 | * per CPU counter register(s) (aka processor counter(s)). | | 59 | * per CPU counter register(s) (aka processor counter(s)). |
60 | */ | | 60 | */ |
61 | | | 61 | |
62 | #include <sys/cdefs.h> | | 62 | #include <sys/cdefs.h> |
63 | __KERNEL_RCSID(0, "$NetBSD: timer.c,v 1.29 2011/07/17 23:18:23 mrg Exp $"); | | 63 | __KERNEL_RCSID(0, "$NetBSD: timer.c,v 1.29.8.1 2018/03/13 16:48:05 snj Exp $"); |
64 | | | 64 | |
65 | #include <sys/param.h> | | 65 | #include <sys/param.h> |
66 | #include <sys/kernel.h> | | 66 | #include <sys/kernel.h> |
67 | #include <sys/device.h> | | 67 | #include <sys/device.h> |
68 | #include <sys/systm.h> | | 68 | #include <sys/systm.h> |
69 | #include <sys/timetc.h> | | 69 | #include <sys/timetc.h> |
70 | | | 70 | |
71 | #include <machine/autoconf.h> | | 71 | #include <machine/autoconf.h> |
72 | #include <sys/bus.h> | | 72 | #include <sys/bus.h> |
73 | | | 73 | |
74 | #include <sparc/sparc/timerreg.h> | | 74 | #include <sparc/sparc/timerreg.h> |
75 | #include <sparc/sparc/timervar.h> | | 75 | #include <sparc/sparc/timervar.h> |
76 | | | 76 | |
77 | static struct intrhand level10; | | 77 | static struct intrhand level10; |
78 | static struct intrhand level14; | | 78 | static struct intrhand level14; |
79 | | | 79 | |
80 | static u_int timer_get_timecount(struct timecounter *); | | 80 | static u_int timer_get_timecount(struct timecounter *); |
81 | | | 81 | |
82 | /* | | 82 | /* |
83 | * timecounter local state | | 83 | * timecounter local state |
84 | */ | | 84 | */ |
85 | static struct counter { | | 85 | static struct counter { |
86 | volatile u_int *cntreg; /* counter register */ | | 86 | __cpu_simple_lock_t lock; /* protects access to offset, reg, last* */ |
| | | 87 | volatile u_int *cntreg; /* counter register to read */ |
87 | u_int limit; /* limit we count up to */ | | 88 | u_int limit; /* limit we count up to */ |
88 | u_int offset; /* accumulated offet due to wraps */ | | 89 | u_int offset; /* accumulated offet due to wraps */ |
89 | u_int shift; /* scaling for valid bits */ | | 90 | u_int shift; /* scaling for valid bits */ |
90 | u_int mask; /* valid bit mask */ | | 91 | u_int mask; /* valid bit mask */ |
91 | } cntr; | | 92 | u_int lastcnt; /* the last* values are used to notice */ |
| | | 93 | u_int lastres; /* and fix up cases where it would appear */ |
| | | 94 | u_int lastoffset; /* time went backwards. */ |
| | | 95 | } cntr __aligned(CACHE_LINE_SIZE); |
92 | | | 96 | |
93 | /* | | 97 | /* |
94 | * define timecounter | | 98 | * define timecounter |
95 | */ | | 99 | */ |
96 | | | 100 | |
97 | static struct timecounter counter_timecounter = { | | 101 | static struct timecounter counter_timecounter = { |
98 | timer_get_timecount, /* get_timecount */ | | 102 | .tc_get_timecount = timer_get_timecount, |
99 | 0, /* no poll_pps */ | | 103 | .tc_poll_pps = NULL, |
100 | ~0u, /* counter_mask */ | | 104 | .tc_counter_mask = ~0u, |
101 | 0, /* frequency - set at initialisation */ | | 105 | .tc_frequency = 0, |
102 | "timer-counter", /* name */ | | 106 | .tc_name = "timer-counter", |
103 | 100, /* quality */ | | 107 | .tc_quality = 100, |
104 | &cntr /* private reference */ | | 108 | .tc_priv = &cntr, |
105 | }; | | 109 | }; |
106 | | | 110 | |
107 | /* | | 111 | /* |
108 | * timer_get_timecount provide current counter value | | 112 | * timer_get_timecount provide current counter value |
109 | */ | | 113 | */ |
| | | 114 | __attribute__((__optimize__("Os"))) |
110 | static u_int | | 115 | static u_int |
111 | timer_get_timecount(struct timecounter *tc) | | 116 | timer_get_timecount(struct timecounter *tc) |
112 | { | | 117 | { |
113 | struct counter *ctr = (struct counter *)tc->tc_priv; | | 118 | u_int cnt, res, fixup, offset; |
114 | | | | |
115 | u_int c, res, r; | | | |
116 | int s; | | 119 | int s; |
117 | | | 120 | |
118 | | | 121 | /* |
| | | 122 | * We use splhigh/__cpu_simple_lock here as we don't want |
| | | 123 | * any mutex or lockdebug overhead. The lock protects a |
| | | 124 | * bunch of the members of cntr that are written here to |
| | | 125 | * deal with the various minor races to be observed and |
| | | 126 | * worked around. |
| | | 127 | */ |
119 | s = splhigh(); | | 128 | s = splhigh(); |
120 | | | 129 | |
121 | res = c = *ctr->cntreg; | | 130 | __cpu_simple_lock(&cntr.lock); |
| | | 131 | res = cnt = *cntr.cntreg; |
122 | | | 132 | |
123 | res &= ~TMR_LIMIT; | | 133 | res &= ~TMR_LIMIT; |
| | | 134 | offset = cntr.offset; |
124 | | | 135 | |
125 | if (c != res) { | | 136 | /* |
126 | r = ctr->limit; | | 137 | * There are 3 cases here: |
| | | 138 | * - limit reached, interrupt not yet processed. |
| | | 139 | * - count reset but offset the same, race between handling |
| | | 140 | * the interrupt and tickle_tc() updating the offset. |
| | | 141 | * - normal case. |
| | | 142 | * |
| | | 143 | * For the first two cases, add the limit so that we avoid |
| | | 144 | * time going backwards. |
| | | 145 | */ |
| | | 146 | if (cnt != res) { |
| | | 147 | fixup = cntr.limit; |
| | | 148 | } else if (res < cntr.lastcnt && offset == cntr.lastoffset) { |
| | | 149 | fixup = cntr.limit; |
127 | } else { | | 150 | } else { |
128 | r = 0; | | 151 | fixup = 0; |
129 | } | | 152 | } |
| | | 153 | |
| | | 154 | cntr.lastcnt = res; |
| | | 155 | cntr.lastoffset = offset; |
130 | | | 156 | |
131 | res >>= ctr->shift; | | 157 | res >>= cntr.shift; |
132 | res &= ctr->mask; | | 158 | res &= cntr.mask; |
133 | | | 159 | |
134 | res += r + ctr->offset; | | 160 | res += fixup + offset; |
135 | | | 161 | |
| | | 162 | /* |
| | | 163 | * This handles early-boot cases where the counter resets twice |
| | | 164 | * before the offset is updated, and we have a stupid check to |
| | | 165 | * ensure overflow hasn't happened. |
| | | 166 | */ |
| | | 167 | if (res < cntr.lastres && res > (TMR_MASK+1) << 3) |
| | | 168 | res = cntr.lastres + 1; |
| | | 169 | |
| | | 170 | cntr.lastres = res; |
| | | 171 | |
| | | 172 | __cpu_simple_unlock(&cntr.lock); |
136 | splx(s); | | 173 | splx(s); |
137 | | | 174 | |
138 | return res; | | 175 | return res; |
139 | } | | 176 | } |
140 | | | 177 | |
141 | void | | 178 | void |
142 | tickle_tc(void) | | 179 | tickle_tc(void) |
143 | { | | 180 | { |
| | | 181 | |
144 | if (timecounter->tc_get_timecount == timer_get_timecount) { | | 182 | if (timecounter->tc_get_timecount == timer_get_timecount) { |
| | | 183 | /* |
| | | 184 | * This could be protected by cntr.lock/splhigh but the update |
| | | 185 | * happens at IPL10 already and as a 32 bit value it should |
| | | 186 | * never be seen as a partial update, so skip it here. This |
| | | 187 | * also probably slows down the actual offset update, making |
| | | 188 | * one of the cases above more likely to need the workaround. |
| | | 189 | */ |
145 | cntr.offset += cntr.limit; | | 190 | cntr.offset += cntr.limit; |
146 | } | | 191 | } |
147 | } | | 192 | } |
148 | | | 193 | |
149 | /* | | 194 | /* |
150 | * sun4/sun4c/sun4m common timer attach code | | 195 | * sun4/sun4c/sun4m common timer attach code |
151 | */ | | 196 | */ |
152 | void | | 197 | void |
153 | timerattach(volatile int *cntreg, volatile int *limreg) | | 198 | timerattach(volatile int *cntreg, volatile int *limreg) |
154 | { | | 199 | { |
155 | u_int prec = 0, t0; | | 200 | u_int prec = 0, t0; |
156 | void (*sched_intr_fn)(void *); | | 201 | void (*sched_intr_fn)(void *); |
157 | | | 202 | |
| @@ -179,26 +224,28 @@ timerattach(volatile int *cntreg, volati | | | @@ -179,26 +224,28 @@ timerattach(volatile int *cntreg, volati |
179 | | | 224 | |
180 | t0 = (t0 >> TMR_SHIFT) & TMR_MASK; | | 225 | t0 = (t0 >> TMR_SHIFT) & TMR_MASK; |
181 | t1 = (t1 >> TMR_SHIFT) & TMR_MASK; | | 226 | t1 = (t1 >> TMR_SHIFT) & TMR_MASK; |
182 | | | 227 | |
183 | if (t1 >= t0 + 100) | | 228 | if (t1 >= t0 + 100) |
184 | break; | | 229 | break; |
185 | } | | 230 | } |
186 | | | 231 | |
187 | /* find lowest active bit */ | | 232 | /* find lowest active bit */ |
188 | for (t0 = 0; t0 < TMR_SHIFT; t0++) | | 233 | for (t0 = 0; t0 < TMR_SHIFT; t0++) |
189 | if ((1 << t0) & prec) | | 234 | if ((1 << t0) & prec) |
190 | break; | | 235 | break; |
191 | | | 236 | |
| | | 237 | __cpu_simple_lock_init(&cntr.lock); |
| | | 238 | |
192 | cntr.shift = t0; | | 239 | cntr.shift = t0; |
193 | cntr.mask = (1 << (31-t0))-1; | | 240 | cntr.mask = (1 << (31-t0))-1; |
194 | counter_timecounter.tc_frequency = 1000000 * (TMR_SHIFT - t0 + 1); | | 241 | counter_timecounter.tc_frequency = 1000000 * (TMR_SHIFT - t0 + 1); |
195 | | | 242 | |
196 | printf(": delay constant %d, frequency = %" PRIu64 " Hz\n", | | 243 | printf(": delay constant %d, frequency = %" PRIu64 " Hz\n", |
197 | timerblurb, counter_timecounter.tc_frequency); | | 244 | timerblurb, counter_timecounter.tc_frequency); |
198 | | | 245 | |
199 | #if defined(SUN4) || defined(SUN4C) | | 246 | #if defined(SUN4) || defined(SUN4C) |
200 | if (CPU_ISSUN4 || CPU_ISSUN4C) { | | 247 | if (CPU_ISSUN4 || CPU_ISSUN4C) { |
201 | timer_init = timer_init_4; | | 248 | timer_init = timer_init_4; |
202 | sched_intr_fn = schedintr; | | 249 | sched_intr_fn = schedintr; |
203 | level10.ih_fun = clockintr_4; | | 250 | level10.ih_fun = clockintr_4; |
204 | level14.ih_fun = statintr_4; | | 251 | level14.ih_fun = statintr_4; |
| @@ -209,38 +256,42 @@ timerattach(volatile int *cntreg, volati | | | @@ -209,38 +256,42 @@ timerattach(volatile int *cntreg, volati |
209 | if (CPU_ISSUN4M) { | | 256 | if (CPU_ISSUN4M) { |
210 | timer_init = timer_init_4m; | | 257 | timer_init = timer_init_4m; |
211 | #if defined(MULTIPROCESSOR) | | 258 | #if defined(MULTIPROCESSOR) |
212 | if (sparc_ncpus > 1) | | 259 | if (sparc_ncpus > 1) |
213 | sched_intr_fn = schedintr_4m; | | 260 | sched_intr_fn = schedintr_4m; |
214 | else | | 261 | else |
215 | #endif | | 262 | #endif |
216 | sched_intr_fn = schedintr; | | 263 | sched_intr_fn = schedintr; |
217 | level10.ih_fun = clockintr_4m; | | 264 | level10.ih_fun = clockintr_4m; |
218 | level14.ih_fun = statintr_4m; | | 265 | level14.ih_fun = statintr_4m; |
219 | cntr.limit = tmr_ustolim4m(tick); | | 266 | cntr.limit = tmr_ustolim4m(tick); |
220 | } | | 267 | } |
221 | #endif | | 268 | #endif |
| | | 269 | |
222 | /* link interrupt handlers */ | | 270 | /* link interrupt handlers */ |
223 | intr_establish(10, 0, &level10, NULL, true); | | 271 | intr_establish(10, 0, &level10, NULL, true); |
224 | intr_establish(14, 0, &level14, NULL, true); | | 272 | intr_establish(14, 0, &level14, NULL, true); |
225 | | | 273 | |
226 | /* Establish a soft interrupt at a lower level for schedclock */ | | 274 | /* Establish a soft interrupt at a lower level for schedclock */ |
227 | sched_cookie = sparc_softintr_establish(IPL_SCHED, sched_intr_fn, NULL); | | 275 | sched_cookie = sparc_softintr_establish(IPL_SCHED, sched_intr_fn, NULL); |
228 | if (sched_cookie == NULL) | | 276 | if (sched_cookie == NULL) |
229 | panic("timerattach: cannot establish schedintr"); | | 277 | panic("timerattach: cannot establish schedintr"); |
230 | | | 278 | |
231 | cntr.cntreg = cntreg; | | 279 | cntr.cntreg = cntreg; |
232 | cntr.limit >>= cntr.shift; | | 280 | cntr.limit >>= cntr.shift; |
233 | | | 281 | |
| | | 282 | /* start at non-zero, so that cntr.oldoffset is less */ |
| | | 283 | cntr.offset = cntr.limit; |
| | | 284 | |
234 | tc_init(&counter_timecounter); | | 285 | tc_init(&counter_timecounter); |
235 | } | | 286 | } |
236 | | | 287 | |
237 | /* | | 288 | /* |
238 | * Both sun4 and sun4m can attach a timer on obio. | | 289 | * Both sun4 and sun4m can attach a timer on obio. |
239 | * The sun4m OPENPROM calls the timer the "counter". | | 290 | * The sun4m OPENPROM calls the timer the "counter". |
240 | * The sun4 timer must be probed. | | 291 | * The sun4 timer must be probed. |
241 | */ | | 292 | */ |
242 | static int | | 293 | static int |
243 | timermatch_obio(device_t parent, cfdata_t cf, void *aux) | | 294 | timermatch_obio(device_t parent, cfdata_t cf, void *aux) |
244 | { | | 295 | { |
245 | #if defined(SUN4) || defined(SUN4M) | | 296 | #if defined(SUN4) || defined(SUN4M) |
246 | union obio_attach_args *uoba = aux; | | 297 | union obio_attach_args *uoba = aux; |