Tue Mar 13 16:48:05 2018 UTC ()
Pull up following revision(s) (requested by mrg in ticket #1519):
	sys/arch/sparc/sparc/timer_sun4m.c: 1.33 1.34 1.31
	sys/arch/sparc/sparc/timer.c: 1.33
	sys/arch/sparc/sparc/timer.c: 1.33 1.34
	sys/arch/sparc/sparc/timerreg.h: 1.33 1.34 1.31 1.10
fix time goes backwards problems on sparc.
there are a few things here:
- there's a race between reading the limit register (which clears
  the interrupt and the limit bit) and increasing the latest offset.
  this can happen easily if an interrupt comes between the read and
  the call to tickle_tc() that increases the offset (i obverved this
  actually happening.)
- in early boot, sometimes the counter can cycle twice before the
  tickle happens.
to handle these issues, add two workarounds:
- if the limit bit isn't set, but the counter value is less than
  the previous value, and the offset hasn't changed, use the same
  fixup as if the limit bit was set.  this handles the first case
  above.
- add a hard-workaround for never allowing returning a smaller
  value (except during 32 bit overflow): if the result is less than
  the last result, add fixups until it does (or until it would
  overflow.)
the first workaround fixes general run-time issues, and the second
fixes issues only seen during boot.
also expand some comments in timer_sun4m.c and re-enable the sun4m
sub-microsecond tmr_ustolim4m() support (but it's always called with
at least 'tick' microseconds, so the end result is the same.)
fix hang at 4B microseconds (1h12 or so), and simplify part of the previous


(snj)
diff -r1.29 -r1.29.8.1 src/sys/arch/sparc/sparc/timer.c
diff -r1.28 -r1.28.8.1 src/sys/arch/sparc/sparc/timer_sun4m.c
diff -r1.9 -r1.9.118.1 src/sys/arch/sparc/sparc/timerreg.h

cvs diff -r1.29 -r1.29.8.1 src/sys/arch/sparc/sparc/timer.c (expand / switch to unified diff)

--- src/sys/arch/sparc/sparc/timer.c 2011/07/17 23:18:23 1.29
+++ src/sys/arch/sparc/sparc/timer.c 2018/03/13 16:48:05 1.29.8.1
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: timer.c,v 1.29 2011/07/17 23:18:23 mrg Exp $ */ 1/* $NetBSD: timer.c,v 1.29.8.1 2018/03/13 16:48:05 snj Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1992, 1993 4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * Copyright (c) 1994 Gordon W. Ross 6 * Copyright (c) 1994 Gordon W. Ross
7 * Copyright (c) 1993 Adam Glass 7 * Copyright (c) 1993 Adam Glass
8 * Copyright (c) 1996 Paul Kranenburg 8 * Copyright (c) 1996 Paul Kranenburg
9 * Copyright (c) 1996 9 * Copyright (c) 1996
10 * The President and Fellows of Harvard College. All rights reserved. 10 * The President and Fellows of Harvard College. All rights reserved.
11 * 11 *
12 * This software was developed by the Computer Systems Engineering group 12 * This software was developed by the Computer Systems Engineering group
13 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 13 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
14 * contributed to Berkeley. 14 * contributed to Berkeley.
@@ -50,108 +50,153 @@ @@ -50,108 +50,153 @@
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE. 51 * SUCH DAMAGE.
52 * 52 *
53 * @(#)clock.c 8.1 (Berkeley) 6/11/93 53 * @(#)clock.c 8.1 (Berkeley) 6/11/93
54 */ 54 */
55 55
56/* 56/*
57 * Kernel clocks provided by "timer" device. The hardclock is provided by 57 * Kernel clocks provided by "timer" device. The hardclock is provided by
58 * the timer register (aka system counter). The statclock is provided by 58 * the timer register (aka system counter). The statclock is provided by
59 * per CPU counter register(s) (aka processor counter(s)). 59 * per CPU counter register(s) (aka processor counter(s)).
60 */ 60 */
61 61
62#include <sys/cdefs.h> 62#include <sys/cdefs.h>
63__KERNEL_RCSID(0, "$NetBSD: timer.c,v 1.29 2011/07/17 23:18:23 mrg Exp $"); 63__KERNEL_RCSID(0, "$NetBSD: timer.c,v 1.29.8.1 2018/03/13 16:48:05 snj Exp $");
64 64
65#include <sys/param.h> 65#include <sys/param.h>
66#include <sys/kernel.h> 66#include <sys/kernel.h>
67#include <sys/device.h> 67#include <sys/device.h>
68#include <sys/systm.h> 68#include <sys/systm.h>
69#include <sys/timetc.h> 69#include <sys/timetc.h>
70 70
71#include <machine/autoconf.h> 71#include <machine/autoconf.h>
72#include <sys/bus.h> 72#include <sys/bus.h>
73 73
74#include <sparc/sparc/timerreg.h> 74#include <sparc/sparc/timerreg.h>
75#include <sparc/sparc/timervar.h> 75#include <sparc/sparc/timervar.h>
76 76
77static struct intrhand level10; 77static struct intrhand level10;
78static struct intrhand level14; 78static struct intrhand level14;
79 79
80static u_int timer_get_timecount(struct timecounter *); 80static u_int timer_get_timecount(struct timecounter *);
81 81
82/* 82/*
83 * timecounter local state 83 * timecounter local state
84 */ 84 */
85static struct counter { 85static struct counter {
86 volatile u_int *cntreg; /* counter register */ 86 __cpu_simple_lock_t lock; /* protects access to offset, reg, last* */
 87 volatile u_int *cntreg; /* counter register to read */
87 u_int limit; /* limit we count up to */ 88 u_int limit; /* limit we count up to */
88 u_int offset; /* accumulated offet due to wraps */ 89 u_int offset; /* accumulated offet due to wraps */
89 u_int shift; /* scaling for valid bits */ 90 u_int shift; /* scaling for valid bits */
90 u_int mask; /* valid bit mask */ 91 u_int mask; /* valid bit mask */
91} cntr; 92 u_int lastcnt; /* the last* values are used to notice */
 93 u_int lastres; /* and fix up cases where it would appear */
 94 u_int lastoffset; /* time went backwards. */
 95} cntr __aligned(CACHE_LINE_SIZE);
92 96
93/* 97/*
94 * define timecounter 98 * define timecounter
95 */ 99 */
96 100
97static struct timecounter counter_timecounter = { 101static struct timecounter counter_timecounter = {
98 timer_get_timecount, /* get_timecount */ 102 .tc_get_timecount = timer_get_timecount,
99 0, /* no poll_pps */ 103 .tc_poll_pps = NULL,
100 ~0u, /* counter_mask */ 104 .tc_counter_mask = ~0u,
101 0, /* frequency - set at initialisation */ 105 .tc_frequency = 0,
102 "timer-counter", /* name */ 106 .tc_name = "timer-counter",
103 100, /* quality */ 107 .tc_quality = 100,
104 &cntr /* private reference */ 108 .tc_priv = &cntr,
105}; 109};
106 110
107/* 111/*
108 * timer_get_timecount provide current counter value 112 * timer_get_timecount provide current counter value
109 */ 113 */
 114__attribute__((__optimize__("Os")))
110static u_int 115static u_int
111timer_get_timecount(struct timecounter *tc) 116timer_get_timecount(struct timecounter *tc)
112{ 117{
113 struct counter *ctr = (struct counter *)tc->tc_priv; 118 u_int cnt, res, fixup, offset;
114 
115 u_int c, res, r; 
116 int s; 119 int s;
117 120
118 121 /*
 122 * We use splhigh/__cpu_simple_lock here as we don't want
 123 * any mutex or lockdebug overhead. The lock protects a
 124 * bunch of the members of cntr that are written here to
 125 * deal with the various minor races to be observed and
 126 * worked around.
 127 */
119 s = splhigh(); 128 s = splhigh();
120 129
121 res = c = *ctr->cntreg; 130 __cpu_simple_lock(&cntr.lock);
 131 res = cnt = *cntr.cntreg;
122 132
123 res &= ~TMR_LIMIT; 133 res &= ~TMR_LIMIT;
 134 offset = cntr.offset;
124 135
125 if (c != res) { 136 /*
126 r = ctr->limit; 137 * There are 3 cases here:
 138 * - limit reached, interrupt not yet processed.
 139 * - count reset but offset the same, race between handling
 140 * the interrupt and tickle_tc() updating the offset.
 141 * - normal case.
 142 *
 143 * For the first two cases, add the limit so that we avoid
 144 * time going backwards.
 145 */
 146 if (cnt != res) {
 147 fixup = cntr.limit;
 148 } else if (res < cntr.lastcnt && offset == cntr.lastoffset) {
 149 fixup = cntr.limit;
127 } else { 150 } else {
128 r = 0; 151 fixup = 0;
129 } 152 }
 153
 154 cntr.lastcnt = res;
 155 cntr.lastoffset = offset;
130  156
131 res >>= ctr->shift; 157 res >>= cntr.shift;
132 res &= ctr->mask; 158 res &= cntr.mask;
133 159
134 res += r + ctr->offset; 160 res += fixup + offset;
135 161
 162 /*
 163 * This handles early-boot cases where the counter resets twice
 164 * before the offset is updated, and we have a stupid check to
 165 * ensure overflow hasn't happened.
 166 */
 167 if (res < cntr.lastres && res > (TMR_MASK+1) << 3)
 168 res = cntr.lastres + 1;
 169
 170 cntr.lastres = res;
 171
 172 __cpu_simple_unlock(&cntr.lock);
136 splx(s); 173 splx(s);
137 174
138 return res; 175 return res;
139} 176}
140 177
141void 178void
142tickle_tc(void) 179tickle_tc(void)
143{ 180{
 181
144 if (timecounter->tc_get_timecount == timer_get_timecount) { 182 if (timecounter->tc_get_timecount == timer_get_timecount) {
 183 /*
 184 * This could be protected by cntr.lock/splhigh but the update
 185 * happens at IPL10 already and as a 32 bit value it should
 186 * never be seen as a partial update, so skip it here. This
 187 * also probably slows down the actual offset update, making
 188 * one of the cases above more likely to need the workaround.
 189 */
145 cntr.offset += cntr.limit; 190 cntr.offset += cntr.limit;
146 } 191 }
147} 192}
148 193
149/* 194/*
150 * sun4/sun4c/sun4m common timer attach code 195 * sun4/sun4c/sun4m common timer attach code
151 */ 196 */
152void 197void
153timerattach(volatile int *cntreg, volatile int *limreg) 198timerattach(volatile int *cntreg, volatile int *limreg)
154{ 199{
155 u_int prec = 0, t0; 200 u_int prec = 0, t0;
156 void (*sched_intr_fn)(void *); 201 void (*sched_intr_fn)(void *);
157 202
@@ -179,26 +224,28 @@ timerattach(volatile int *cntreg, volati @@ -179,26 +224,28 @@ timerattach(volatile int *cntreg, volati
179 224
180 t0 = (t0 >> TMR_SHIFT) & TMR_MASK; 225 t0 = (t0 >> TMR_SHIFT) & TMR_MASK;
181 t1 = (t1 >> TMR_SHIFT) & TMR_MASK; 226 t1 = (t1 >> TMR_SHIFT) & TMR_MASK;
182 227
183 if (t1 >= t0 + 100) 228 if (t1 >= t0 + 100)
184 break; 229 break;
185 } 230 }
186 231
187 /* find lowest active bit */ 232 /* find lowest active bit */
188 for (t0 = 0; t0 < TMR_SHIFT; t0++) 233 for (t0 = 0; t0 < TMR_SHIFT; t0++)
189 if ((1 << t0) & prec) 234 if ((1 << t0) & prec)
190 break; 235 break;
191 236
 237 __cpu_simple_lock_init(&cntr.lock);
 238
192 cntr.shift = t0; 239 cntr.shift = t0;
193 cntr.mask = (1 << (31-t0))-1; 240 cntr.mask = (1 << (31-t0))-1;
194 counter_timecounter.tc_frequency = 1000000 * (TMR_SHIFT - t0 + 1); 241 counter_timecounter.tc_frequency = 1000000 * (TMR_SHIFT - t0 + 1);
195  242
196 printf(": delay constant %d, frequency = %" PRIu64 " Hz\n", 243 printf(": delay constant %d, frequency = %" PRIu64 " Hz\n",
197 timerblurb, counter_timecounter.tc_frequency); 244 timerblurb, counter_timecounter.tc_frequency);
198 245
199#if defined(SUN4) || defined(SUN4C) 246#if defined(SUN4) || defined(SUN4C)
200 if (CPU_ISSUN4 || CPU_ISSUN4C) { 247 if (CPU_ISSUN4 || CPU_ISSUN4C) {
201 timer_init = timer_init_4; 248 timer_init = timer_init_4;
202 sched_intr_fn = schedintr; 249 sched_intr_fn = schedintr;
203 level10.ih_fun = clockintr_4; 250 level10.ih_fun = clockintr_4;
204 level14.ih_fun = statintr_4; 251 level14.ih_fun = statintr_4;
@@ -209,38 +256,42 @@ timerattach(volatile int *cntreg, volati @@ -209,38 +256,42 @@ timerattach(volatile int *cntreg, volati
209 if (CPU_ISSUN4M) { 256 if (CPU_ISSUN4M) {
210 timer_init = timer_init_4m; 257 timer_init = timer_init_4m;
211#if defined(MULTIPROCESSOR) 258#if defined(MULTIPROCESSOR)
212 if (sparc_ncpus > 1) 259 if (sparc_ncpus > 1)
213 sched_intr_fn = schedintr_4m; 260 sched_intr_fn = schedintr_4m;
214 else 261 else
215#endif 262#endif
216 sched_intr_fn = schedintr; 263 sched_intr_fn = schedintr;
217 level10.ih_fun = clockintr_4m; 264 level10.ih_fun = clockintr_4m;
218 level14.ih_fun = statintr_4m; 265 level14.ih_fun = statintr_4m;
219 cntr.limit = tmr_ustolim4m(tick); 266 cntr.limit = tmr_ustolim4m(tick);
220 } 267 }
221#endif 268#endif
 269
222 /* link interrupt handlers */ 270 /* link interrupt handlers */
223 intr_establish(10, 0, &level10, NULL, true); 271 intr_establish(10, 0, &level10, NULL, true);
224 intr_establish(14, 0, &level14, NULL, true); 272 intr_establish(14, 0, &level14, NULL, true);
225 273
226 /* Establish a soft interrupt at a lower level for schedclock */ 274 /* Establish a soft interrupt at a lower level for schedclock */
227 sched_cookie = sparc_softintr_establish(IPL_SCHED, sched_intr_fn, NULL); 275 sched_cookie = sparc_softintr_establish(IPL_SCHED, sched_intr_fn, NULL);
228 if (sched_cookie == NULL) 276 if (sched_cookie == NULL)
229 panic("timerattach: cannot establish schedintr"); 277 panic("timerattach: cannot establish schedintr");
230 278
231 cntr.cntreg = cntreg; 279 cntr.cntreg = cntreg;
232 cntr.limit >>= cntr.shift; 280 cntr.limit >>= cntr.shift;
233 281
 282 /* start at non-zero, so that cntr.oldoffset is less */
 283 cntr.offset = cntr.limit;
 284
234 tc_init(&counter_timecounter); 285 tc_init(&counter_timecounter);
235} 286}
236 287
237/* 288/*
238 * Both sun4 and sun4m can attach a timer on obio. 289 * Both sun4 and sun4m can attach a timer on obio.
239 * The sun4m OPENPROM calls the timer the "counter". 290 * The sun4m OPENPROM calls the timer the "counter".
240 * The sun4 timer must be probed. 291 * The sun4 timer must be probed.
241 */ 292 */
242static int 293static int
243timermatch_obio(device_t parent, cfdata_t cf, void *aux) 294timermatch_obio(device_t parent, cfdata_t cf, void *aux)
244{ 295{
245#if defined(SUN4) || defined(SUN4M) 296#if defined(SUN4) || defined(SUN4M)
246 union obio_attach_args *uoba = aux; 297 union obio_attach_args *uoba = aux;

cvs diff -r1.28 -r1.28.8.1 src/sys/arch/sparc/sparc/timer_sun4m.c (expand / switch to unified diff)

--- src/sys/arch/sparc/sparc/timer_sun4m.c 2011/09/01 08:43:24 1.28
+++ src/sys/arch/sparc/sparc/timer_sun4m.c 2018/03/13 16:48:05 1.28.8.1
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: timer_sun4m.c,v 1.28 2011/09/01 08:43:24 martin Exp $ */ 1/* $NetBSD: timer_sun4m.c,v 1.28.8.1 2018/03/13 16:48:05 snj Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1992, 1993 4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * Copyright (c) 1994 Gordon W. Ross 6 * Copyright (c) 1994 Gordon W. Ross
7 * Copyright (c) 1993 Adam Glass 7 * Copyright (c) 1993 Adam Glass
8 * Copyright (c) 1996 Paul Kranenburg 8 * Copyright (c) 1996 Paul Kranenburg
9 * Copyright (c) 1996 9 * Copyright (c) 1996
10 * The President and Fellows of Harvard College. All rights reserved. 10 * The President and Fellows of Harvard College. All rights reserved.
11 * 11 *
12 * This software was developed by the Computer Systems Engineering group 12 * This software was developed by the Computer Systems Engineering group
13 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 13 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
14 * contributed to Berkeley. 14 * contributed to Berkeley.
@@ -48,43 +48,43 @@ @@ -48,43 +48,43 @@
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE. 51 * SUCH DAMAGE.
52 * 52 *
53 * @(#)clock.c 8.1 (Berkeley) 6/11/93 53 * @(#)clock.c 8.1 (Berkeley) 6/11/93
54 */ 54 */
55 55
56/* 56/*
57 * Sun4m timer support. 57 * Sun4m timer support.
58 */ 58 */
59 59
60#include <sys/cdefs.h> 60#include <sys/cdefs.h>
61__KERNEL_RCSID(0, "$NetBSD: timer_sun4m.c,v 1.28 2011/09/01 08:43:24 martin Exp $"); 61__KERNEL_RCSID(0, "$NetBSD: timer_sun4m.c,v 1.28.8.1 2018/03/13 16:48:05 snj Exp $");
62 62
63#include <sys/param.h> 63#include <sys/param.h>
64#include <sys/kernel.h> 64#include <sys/kernel.h>
65#include <sys/device.h> 65#include <sys/device.h>
66#include <sys/systm.h> 66#include <sys/systm.h>
67#include <sys/cpu.h> 67#include <sys/cpu.h>
68 68
69#include <machine/autoconf.h> 69#include <machine/autoconf.h>
70#include <sys/bus.h> 70#include <sys/bus.h>
71 71
72#include <sparc/sparc/vaddrs.h> 72#include <sparc/sparc/vaddrs.h>
73#include <sparc/sparc/cpuvar.h> 73#include <sparc/sparc/cpuvar.h>
74#include <sparc/sparc/timerreg.h> 74#include <sparc/sparc/timerreg.h>
75#include <sparc/sparc/timervar.h> 75#include <sparc/sparc/timervar.h>
76 76
77struct timer_4m *timerreg4m; 77static struct timer_4m *timerreg4m;
78#define counterreg4m cpuinfo.counterreg_4m 78#define counterreg4m cpuinfo.counterreg_4m
79 79
80/* 80/*
81 * Set up the real-time and statistics clocks. 81 * Set up the real-time and statistics clocks.
82 * Leave stathz 0 only if no alternative timer is available. 82 * Leave stathz 0 only if no alternative timer is available.
83 * 83 *
84 * The frequencies of these clocks must be an even number of microseconds. 84 * The frequencies of these clocks must be an even number of microseconds.
85 */ 85 */
86void 86void
87timer_init_4m(void) 87timer_init_4m(void)
88{ 88{
89 struct cpu_info *cpi; 89 struct cpu_info *cpi;
90 int n; 90 int n;
@@ -132,29 +132,33 @@ clockintr_4m(void *cap) @@ -132,29 +132,33 @@ clockintr_4m(void *cap)
132 /* 132 /*
133 * XXX this needs to be fixed in a more general way 133 * XXX this needs to be fixed in a more general way
134 * problem is that the kernel enables interrupts and THEN 134 * problem is that the kernel enables interrupts and THEN
135 * sets up clocks. In between there's an opportunity to catch 135 * sets up clocks. In between there's an opportunity to catch
136 * a timer interrupt - if we call hardclock() at that point we'll 136 * a timer interrupt - if we call hardclock() at that point we'll
137 * panic 137 * panic
138 * so for now just bail when cold 138 * so for now just bail when cold
139 * 139 *
140 * For MP, we defer calling hardclock() to the schedintr so 140 * For MP, we defer calling hardclock() to the schedintr so
141 * that we call it on all cpus. 141 * that we call it on all cpus.
142 */ 142 */
143 if (cold) 143 if (cold)
144 return 0; 144 return 0;
 145
145 kpreempt_disable(); 146 kpreempt_disable();
146 /* read the limit register to clear the interrupt */ 147
 148 /* Read the limit register to clear the interrupt. */
147 *((volatile int *)&timerreg4m->t_limit); 149 *((volatile int *)&timerreg4m->t_limit);
 150
 151 /* Update the timecounter offset. */
148 tickle_tc(); 152 tickle_tc();
149 hardclock((struct clockframe *)cap); 153 hardclock((struct clockframe *)cap);
150 kpreempt_enable(); 154 kpreempt_enable();
151 return (1); 155 return (1);
152} 156}
153 157
154/* 158/*
155 * Level 14 (stat clock) interrupts from processor counter. 159 * Level 14 (stat clock) interrupts from processor counter.
156 */ 160 */
157int 161int
158statintr_4m(void *cap) 162statintr_4m(void *cap)
159{ 163{
160 struct clockframe *frame = cap; 164 struct clockframe *frame = cap;

cvs diff -r1.9 -r1.9.118.1 src/sys/arch/sparc/sparc/timerreg.h (expand / switch to unified diff)

--- src/sys/arch/sparc/sparc/timerreg.h 2005/11/16 03:00:23 1.9
+++ src/sys/arch/sparc/sparc/timerreg.h 2018/03/13 16:48:05 1.9.118.1
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: timerreg.h,v 1.9 2005/11/16 03:00:23 uwe Exp $ */ 1/* $NetBSD: timerreg.h,v 1.9.118.1 2018/03/13 16:48:05 snj Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1992, 1993 4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * 6 *
7 * This software was developed by the Computer Systems Engineering group 7 * This software was developed by the Computer Systems Engineering group
8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 * contributed to Berkeley. 9 * contributed to Berkeley.
10 * 10 *
11 * All advertising materials mentioning features or use of this software 11 * All advertising materials mentioning features or use of this software
12 * must display the following acknowledgement: 12 * must display the following acknowledgement:
13 * This product includes software developed by the University of 13 * This product includes software developed by the University of
14 * California, Lawrence Berkeley Laboratory. 14 * California, Lawrence Berkeley Laboratory.
@@ -110,19 +110,22 @@ struct counter_4m { /* counter that int @@ -110,19 +110,22 @@ struct counter_4m { /* counter that int
110 110
111/* 111/*
112 * Compute a limit that causes the timer to fire every n microseconds. 112 * Compute a limit that causes the timer to fire every n microseconds.
113 * The Sun4c requires that the timer register be initialized for n+1 113 * The Sun4c requires that the timer register be initialized for n+1
114 * microseconds, while the Sun4m requires it be initialized for n. Thus 114 * microseconds, while the Sun4m requires it be initialized for n. Thus
115 * the two versions of this function. 115 * the two versions of this function.
116 * 116 *
117 * Note that the manual for the chipset used in the Sun4m suggests that 117 * Note that the manual for the chipset used in the Sun4m suggests that
118 * the timer be set at n+0.5 microseconds; in practice, this produces 118 * the timer be set at n+0.5 microseconds; in practice, this produces
119 * a 50 ppm clock skew, which means that the 0.5 should not be there... 119 * a 50 ppm clock skew, which means that the 0.5 should not be there...
120 */ 120 */
121#define tmr_ustolim(n) (((n) + 1) << TMR_SHIFT) 121#define tmr_ustolim(n) (((n) + 1) << TMR_SHIFT)
122 122
123/*efine TMR_SHIFT4M 9 -* shift to obtain microseconds */ 123#define TMR_SHIFT4M 9 /* shift to obtain microseconds */
124/*efine tmr_ustolim4m(n) (((2*(n)) + 1) << TMR_SHIFT4M)*/ 124#if 1
 125#define tmr_ustolim4m(n) (((2*(n)) + 1) << TMR_SHIFT4M)
 126#else
125#define tmr_ustolim4m(n) ((n) << TMR_SHIFT) 127#define tmr_ustolim4m(n) ((n) << TMR_SHIFT)
 128#endif
126 129
127/* The number of microseconds represented by a counter register value */ 130/* The number of microseconds represented by a counter register value */
128#define tmr_cnttous(c) ((((c) >> TMR_SHIFT) & TMR_MASK) - 1) 131#define tmr_cnttous(c) ((((c) >> TMR_SHIFT) & TMR_MASK) - 1)