@@ -1,4 +1,4 @@
-/* $NetBSD: timer.c,v 1.29 2011/07/17 23:18:23 mrg Exp $ */
+/* $NetBSD: timer.c,v 1.29.8.1 2018/03/13 16:48:05 snj Exp $ */
/*
* Copyright (c) 1992, 1993
@@ -60,7 +60,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: timer.c,v 1.29 2011/07/17 23:18:23 mrg Exp $");
+__KERNEL_RCSID(0, "$NetBSD: timer.c,v 1.29.8.1 2018/03/13 16:48:05 snj Exp $");
#include <sys/param.h>
#include <sys/kernel.h>
@@ -83,56 +83,93 @@
* timecounter local state
*/
static struct counter {
- volatile u_int *cntreg; /* counter register */
+ __cpu_simple_lock_t lock; /* protects access to offset, reg, last* */
+ volatile u_int *cntreg; /* counter register to read */
u_int limit; /* limit we count up to */
u_int offset; /* accumulated offet due to wraps */
u_int shift; /* scaling for valid bits */
u_int mask; /* valid bit mask */
-} cntr;
+ u_int lastcnt; /* the last* values are used to notice */
+ u_int lastres; /* and fix up cases where it would appear */
+ u_int lastoffset; /* time went backwards. */
+} cntr __aligned(CACHE_LINE_SIZE);
/*
* define timecounter
*/
static struct timecounter counter_timecounter = {
- timer_get_timecount, /* get_timecount */
- 0, /* no poll_pps */
- ~0u, /* counter_mask */
- 0, /* frequency - set at initialisation */
- "timer-counter", /* name */
- 100, /* quality */
- &cntr /* private reference */
+ .tc_get_timecount = timer_get_timecount,
+ .tc_poll_pps = NULL,
+ .tc_counter_mask = ~0u,
+ .tc_frequency = 0,
+ .tc_name = "timer-counter",
+ .tc_quality = 100,
+ .tc_priv = &cntr,
};
/*
* timer_get_timecount provide current counter value
*/
+__attribute__((__optimize__("Os")))
static u_int
timer_get_timecount(struct timecounter *tc)
{
- struct counter *ctr = (struct counter *)tc->tc_priv;
-
- u_int c, res, r;
+ u_int cnt, res, fixup, offset;
int s;
-
+ /*
+ * We use splhigh/__cpu_simple_lock here as we don't want
+ * any mutex or lockdebug overhead. The lock protects a
+ * bunch of the members of cntr that are written here to
+ * deal with the various minor races to be observed and
+ * worked around.
+ */
s = splhigh();
- res = c = *ctr->cntreg;
+ __cpu_simple_lock(&cntr.lock);
+ res = cnt = *cntr.cntreg;
res &= ~TMR_LIMIT;
+ offset = cntr.offset;
- if (c != res) {
- r = ctr->limit;
+ /*
+ * There are 3 cases here:
+ * - limit reached, interrupt not yet processed.
+ * - count reset but offset the same, race between handling
+ * the interrupt and tickle_tc() updating the offset.
+ * - normal case.
+ *
+ * For the first two cases, add the limit so that we avoid
+ * time going backwards.
+ */
+ if (cnt != res) {
+ fixup = cntr.limit;
+ } else if (res < cntr.lastcnt && offset == cntr.lastoffset) {
+ fixup = cntr.limit;
} else {
- r = 0;
+ fixup = 0;
}
+
+ cntr.lastcnt = res;
+ cntr.lastoffset = offset;
- res >>= ctr->shift;
- res &= ctr->mask;
+ res >>= cntr.shift;
+ res &= cntr.mask;
- res += r + ctr->offset;
+ res += fixup + offset;
+ /*
+ * This handles early-boot cases where the counter resets twice
+ * before the offset is updated, and we have a stupid check to
+ * ensure overflow hasn't happened.
+ */
+ if (res < cntr.lastres && res > (TMR_MASK+1) << 3)
+ res = cntr.lastres + 1;
+
+ cntr.lastres = res;
+
+ __cpu_simple_unlock(&cntr.lock);
splx(s);
return res;
@@ -141,7 +178,15 @@
void
tickle_tc(void)
{
+
if (timecounter->tc_get_timecount == timer_get_timecount) {
+ /*
+ * This could be protected by cntr.lock/splhigh but the update
+ * happens at IPL10 already and as a 32 bit value it should
+ * never be seen as a partial update, so skip it here. This
+ * also probably slows down the actual offset update, making
+ * one of the cases above more likely to need the workaround.
+ */
cntr.offset += cntr.limit;
}
}
@@ -189,6 +234,8 @@
if ((1 << t0) & prec)
break;
+ __cpu_simple_lock_init(&cntr.lock);
+
cntr.shift = t0;
cntr.mask = (1 << (31-t0))-1;
counter_timecounter.tc_frequency = 1000000 * (TMR_SHIFT - t0 + 1);
@@ -219,6 +266,7 @@
cntr.limit = tmr_ustolim4m(tick);
}
#endif
+
/* link interrupt handlers */
intr_establish(10, 0, &level10, NULL, true);
intr_establish(14, 0, &level14, NULL, true);
@@ -230,6 +278,9 @@
cntr.cntreg = cntreg;
cntr.limit >>= cntr.shift;
+
+ /* start at non-zero, so that cntr.oldoffset is less */
+ cntr.offset = cntr.limit;
tc_init(&counter_timecounter);
}
@@ -1,4 +1,4 @@
-/* $NetBSD: timer_sun4m.c,v 1.28 2011/09/01 08:43:24 martin Exp $ */
+/* $NetBSD: timer_sun4m.c,v 1.28.8.1 2018/03/13 16:48:05 snj Exp $ */
/*
* Copyright (c) 1992, 1993
@@ -58,7 +58,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: timer_sun4m.c,v 1.28 2011/09/01 08:43:24 martin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: timer_sun4m.c,v 1.28.8.1 2018/03/13 16:48:05 snj Exp $");
#include <sys/param.h>
#include <sys/kernel.h>
@@ -74,7 +74,7 @@
#include <sparc/sparc/timerreg.h>
#include <sparc/sparc/timervar.h>
-struct timer_4m *timerreg4m;
+static struct timer_4m *timerreg4m;
#define counterreg4m cpuinfo.counterreg_4m
/*
@@ -142,9 +142,13 @@
*/
if (cold)
return 0;
+
kpreempt_disable();
- /* read the limit register to clear the interrupt */
+
+ /* Read the limit register to clear the interrupt. */
*((volatile int *)&timerreg4m->t_limit);
+
+ /* Update the timecounter offset. */
tickle_tc();
hardclock((struct clockframe *)cap);
kpreempt_enable();
@@ -1,4 +1,4 @@
-/* $NetBSD: timerreg.h,v 1.9 2005/11/16 03:00:23 uwe Exp $ */
+/* $NetBSD: timerreg.h,v 1.9.118.1 2018/03/13 16:48:05 snj Exp $ */
/*
* Copyright (c) 1992, 1993
@@ -120,9 +120,12 @@
*/
#define tmr_ustolim(n) (((n) + 1) << TMR_SHIFT)
-/*efine TMR_SHIFT4M 9 -* shift to obtain microseconds */
-/*efine tmr_ustolim4m(n) (((2*(n)) + 1) << TMR_SHIFT4M)*/
+#define TMR_SHIFT4M 9 /* shift to obtain microseconds */
+#if 1
+#define tmr_ustolim4m(n) (((2*(n)) + 1) << TMR_SHIFT4M)
+#else
#define tmr_ustolim4m(n) ((n) << TMR_SHIFT)
+#endif
/* The number of microseconds represented by a counter register value */
#define tmr_cnttous(c) ((((c) >> TMR_SHIFT) & TMR_MASK) - 1)