Sat May 23 17:08:05 2009 UTC ()
- Add lwp_pctr(), get an LWP's preemption/ctxsw counter.
- Fix a preemption bug in CURCPU_IDLE_P() that can lead to a bogus
  assertion failure on DEBUG kernels.
- Fix MP/preemption races with timecounter detachment.


(ad)
diff -r1.147 -r1.148 src/sys/kern/kern_lock.c
diff -r1.38 -r1.39 src/sys/kern/kern_tc.c
diff -r1.117 -r1.118 src/sys/sys/lwp.h

cvs diff -r1.147 -r1.148 src/sys/kern/kern_lock.c (expand / switch to unified diff)

--- src/sys/kern/kern_lock.c 2008/11/12 12:36:16 1.147
+++ src/sys/kern/kern_lock.c 2009/05/23 17:08:04 1.148
@@ -1,17 +1,17 @@ @@ -1,17 +1,17 @@
1/* $NetBSD: kern_lock.c,v 1.147 2008/11/12 12:36:16 ad Exp $ */ 1/* $NetBSD: kern_lock.c,v 1.148 2009/05/23 17:08:04 ad Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 2002, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Andrew Doran. 9 * NASA Ames Research Center, and by Andrew Doran.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer. 15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright 16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the 17 * notice, this list of conditions and the following disclaimer in the
@@ -21,69 +21,81 @@ @@ -21,69 +21,81 @@
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33#include <sys/cdefs.h> 33#include <sys/cdefs.h>
34__KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.147 2008/11/12 12:36:16 ad Exp $"); 34__KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.148 2009/05/23 17:08:04 ad Exp $");
35 35
36#include <sys/param.h> 36#include <sys/param.h>
37#include <sys/proc.h> 37#include <sys/proc.h>
38#include <sys/lock.h> 38#include <sys/lock.h>
39#include <sys/systm.h> 39#include <sys/systm.h>
40#include <sys/kernel.h> 40#include <sys/kernel.h>
41#include <sys/lockdebug.h> 41#include <sys/lockdebug.h>
42#include <sys/cpu.h> 42#include <sys/cpu.h>
43#include <sys/syslog.h> 43#include <sys/syslog.h>
44#include <sys/atomic.h> 44#include <sys/atomic.h>
 45#include <sys/lwp.h>
45 46
46#include <machine/stdarg.h> 47#include <machine/stdarg.h>
47#include <machine/lock.h> 48#include <machine/lock.h>
48 49
49#include <dev/lockstat.h> 50#include <dev/lockstat.h>
50 51
51#define RETURN_ADDRESS (uintptr_t)__builtin_return_address(0) 52#define RETURN_ADDRESS (uintptr_t)__builtin_return_address(0)
52 53
53bool kernel_lock_dodebug; 54bool kernel_lock_dodebug;
54 55
55__cpu_simple_lock_t kernel_lock[CACHE_LINE_SIZE / sizeof(__cpu_simple_lock_t)] 56__cpu_simple_lock_t kernel_lock[CACHE_LINE_SIZE / sizeof(__cpu_simple_lock_t)]
56 __aligned(CACHE_LINE_SIZE); 57 __aligned(CACHE_LINE_SIZE);
57 58
58void 59void
59assert_sleepable(void) 60assert_sleepable(void)
60{ 61{
61 const char *reason; 62 const char *reason;
 63 uint64_t pctr;
 64 bool idle;
62 65
63 if (panicstr != NULL) { 66 if (panicstr != NULL) {
64 return; 67 return;
65 } 68 }
66 69
67 LOCKDEBUG_BARRIER(kernel_lock, 1); 70 LOCKDEBUG_BARRIER(kernel_lock, 1);
68 71
 72 /*
 73 * Avoid disabling/re-enabling preemption here since this
 74 * routine may be called in delicate situatations.
 75 */
 76 do {
 77 pctr = lwp_pctr();
 78 idle = CURCPU_IDLE_P();
 79 } while (pctr != lwp_pctr());
 80
69 reason = NULL; 81 reason = NULL;
70 if (CURCPU_IDLE_P() && !cold) { 82 if (idle && !cold) {
71 reason = "idle"; 83 reason = "idle";
72 } 84 }
73 if (cpu_intr_p()) { 85 if (cpu_intr_p()) {
74 reason = "interrupt"; 86 reason = "interrupt";
75 } 87 }
76 if ((curlwp->l_pflag & LP_INTR) != 0) { 88 if (cpu_softintr_p()) {
77 reason = "softint"; 89 reason = "softint";
78 } 90 }
79 91
80 if (reason) { 92 if (reason) {
81 panic("%s: %s caller=%p", __func__, reason, 93 panic("%s: %s caller=%p", __func__, reason,
82 (void *)RETURN_ADDRESS); 94 (void *)RETURN_ADDRESS);
83 } 95 }
84} 96}
85 97
86/* 98/*
87 * Functions for manipulating the kernel_lock. We put them here 99 * Functions for manipulating the kernel_lock. We put them here
88 * so that they show up in profiles. 100 * so that they show up in profiles.
89 */ 101 */

cvs diff -r1.38 -r1.39 src/sys/kern/kern_tc.c (expand / switch to unified diff)

--- src/sys/kern/kern_tc.c 2009/01/11 02:45:52 1.38
+++ src/sys/kern/kern_tc.c 2009/05/23 17:08:04 1.39
@@ -1,19 +1,22 @@ @@ -1,19 +1,22 @@
1/* $NetBSD: kern_tc.c,v 1.38 2009/01/11 02:45:52 christos Exp $ */ 1/* $NetBSD: kern_tc.c,v 1.39 2009/05/23 17:08:04 ad Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
 7 * This code is derived from software contributed to The NetBSD Foundation
 8 * by Andrew Doran.
 9 *
7 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
9 * are met: 12 * are met:
10 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
15 * 18 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
@@ -27,43 +30,44 @@ @@ -27,43 +30,44 @@
27 */ 30 */
28 31
29/*- 32/*-
30 * ---------------------------------------------------------------------------- 33 * ----------------------------------------------------------------------------
31 * "THE BEER-WARE LICENSE" (Revision 42): 34 * "THE BEER-WARE LICENSE" (Revision 42):
32 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 35 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
33 * can do whatever you want with this stuff. If we meet some day, and you think 36 * can do whatever you want with this stuff. If we meet some day, and you think
34 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 37 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
35 * --------------------------------------------------------------------------- 38 * ---------------------------------------------------------------------------
36 */ 39 */
37 40
38#include <sys/cdefs.h> 41#include <sys/cdefs.h>
39/* __FBSDID("$FreeBSD: src/sys/kern/kern_tc.c,v 1.166 2005/09/19 22:16:31 andre Exp $"); */ 42/* __FBSDID("$FreeBSD: src/sys/kern/kern_tc.c,v 1.166 2005/09/19 22:16:31 andre Exp $"); */
40__KERNEL_RCSID(0, "$NetBSD: kern_tc.c,v 1.38 2009/01/11 02:45:52 christos Exp $"); 43__KERNEL_RCSID(0, "$NetBSD: kern_tc.c,v 1.39 2009/05/23 17:08:04 ad Exp $");
41 44
42#include "opt_ntp.h" 45#include "opt_ntp.h"
43 46
44#include <sys/param.h> 47#include <sys/param.h>
45#include <sys/kernel.h> 48#include <sys/kernel.h>
46#include <sys/reboot.h> /* XXX just to get AB_VERBOSE */ 49#include <sys/reboot.h> /* XXX just to get AB_VERBOSE */
47#include <sys/sysctl.h> 50#include <sys/sysctl.h>
48#include <sys/syslog.h> 51#include <sys/syslog.h>
49#include <sys/systm.h> 52#include <sys/systm.h>
50#include <sys/timepps.h> 53#include <sys/timepps.h>
51#include <sys/timetc.h> 54#include <sys/timetc.h>
52#include <sys/timex.h> 55#include <sys/timex.h>
53#include <sys/evcnt.h> 56#include <sys/evcnt.h>
54#include <sys/kauth.h> 57#include <sys/kauth.h>
55#include <sys/mutex.h> 58#include <sys/mutex.h>
56#include <sys/atomic.h> 59#include <sys/atomic.h>
 60#include <sys/xcall.h>
57 61
58/* 62/*
59 * A large step happens on boot. This constant detects such steps. 63 * A large step happens on boot. This constant detects such steps.
60 * It is relatively small so that ntp_update_second gets called enough 64 * It is relatively small so that ntp_update_second gets called enough
61 * in the typical 'missed a couple of seconds' case, but doesn't loop 65 * in the typical 'missed a couple of seconds' case, but doesn't loop
62 * forever when the time step is large. 66 * forever when the time step is large.
63 */ 67 */
64#define LARGE_STEP 200 68#define LARGE_STEP 200
65 69
66/* 70/*
67 * Implement a dummy timecounter which we can use until we get a real one 71 * Implement a dummy timecounter which we can use until we get a real one
68 * in the air. This allows the console and other early stuff to use 72 * in the air. This allows the console and other early stuff to use
69 * time services. 73 * time services.
@@ -116,26 +120,27 @@ static struct timehands th0 = { @@ -116,26 +120,27 @@ static struct timehands th0 = {
116static struct timehands *volatile timehands = &th0; 120static struct timehands *volatile timehands = &th0;
117struct timecounter *timecounter = &dummy_timecounter; 121struct timecounter *timecounter = &dummy_timecounter;
118static struct timecounter *timecounters = &dummy_timecounter; 122static struct timecounter *timecounters = &dummy_timecounter;
119 123
120time_t time_second = 1; 124time_t time_second = 1;
121time_t time_uptime = 1; 125time_t time_uptime = 1;
122 126
123static struct bintime timebasebin; 127static struct bintime timebasebin;
124 128
125static int timestepwarnings; 129static int timestepwarnings;
126 130
127kmutex_t timecounter_lock; 131kmutex_t timecounter_lock;
128static u_int timecounter_mods; 132static u_int timecounter_mods;
 133static volatile int timecounter_removals = 1;
129static u_int timecounter_bad; 134static u_int timecounter_bad;
130 135
131#ifdef __FreeBSD__ 136#ifdef __FreeBSD__
132SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW, 137SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW,
133 &timestepwarnings, 0, ""); 138 &timestepwarnings, 0, "");
134#endif /* __FreeBSD__ */ 139#endif /* __FreeBSD__ */
135 140
136/* 141/*
137 * sysctl helper routine for kern.timercounter.hardware 142 * sysctl helper routine for kern.timercounter.hardware
138 */ 143 */
139static int 144static int
140sysctl_kern_timecounter_hardware(SYSCTLFN_ARGS) 145sysctl_kern_timecounter_hardware(SYSCTLFN_ARGS)
141{ 146{
@@ -299,35 +304,69 @@ tc_delta(struct timehands *th) @@ -299,35 +304,69 @@ tc_delta(struct timehands *th)
299 th->th_offset_count) & tc->tc_counter_mask); 304 th->th_offset_count) & tc->tc_counter_mask);
300} 305}
301 306
302/* 307/*
303 * Functions for reading the time. We have to loop until we are sure that 308 * Functions for reading the time. We have to loop until we are sure that
304 * the timehands that we operated on was not updated under our feet. See 309 * the timehands that we operated on was not updated under our feet. See
305 * the comment in <sys/timevar.h> for a description of these 12 functions. 310 * the comment in <sys/timevar.h> for a description of these 12 functions.
306 */ 311 */
307 312
308void 313void
309binuptime(struct bintime *bt) 314binuptime(struct bintime *bt)
310{ 315{
311 struct timehands *th; 316 struct timehands *th;
312 u_int gen; 317 lwp_t *l;
 318 u_int lgen, gen;
313 319
314 TC_COUNT(nbinuptime); 320 TC_COUNT(nbinuptime);
 321
 322 /*
 323 * Provide exclusion against tc_detach().
 324 *
 325 * We record the number of timecounter removals before accessing
 326 * timecounter state. Note that the LWP can be using multiple
 327 * "generations" at once, due to interrupts (interrupted while in
 328 * this function). Hardware interrupts will borrow the interrupted
 329 * LWP's l_tcgen value for this purpose, and can themselves be
 330 * interrupted by higher priority interrupts. In this case we need
 331 * to ensure that the oldest generation in use is recorded.
 332 *
 333 * splsched() is too expensive to use, so we take care to structure
 334 * this code in such a way that it is not required. Likewise, we
 335 * do not disable preemption.
 336 *
 337 * Memory barriers are also too expensive to use for such a
 338 * performance critical function. The good news is that we do not
 339 * need memory barriers for this type of exclusion, as the thread
 340 * updating timecounter_removals will issue a broadcast cross call
 341 * before inspecting our l_tcgen value (this elides memory ordering
 342 * issues).
 343 */
 344 l = curlwp;
 345 lgen = l->l_tcgen;
 346 if (__predict_true(lgen == 0)) {
 347 l->l_tcgen = timecounter_removals;
 348 }
 349 __insn_barrier();
 350
315 do { 351 do {
316 th = timehands; 352 th = timehands;
317 gen = th->th_generation; 353 gen = th->th_generation;
318 *bt = th->th_offset; 354 *bt = th->th_offset;
319 bintime_addx(bt, th->th_scale * tc_delta(th)); 355 bintime_addx(bt, th->th_scale * tc_delta(th));
320 } while (gen == 0 || gen != th->th_generation); 356 } while (gen == 0 || gen != th->th_generation);
 357
 358 __insn_barrier();
 359 l->l_tcgen = lgen;
321} 360}
322 361
323void 362void
324nanouptime(struct timespec *tsp) 363nanouptime(struct timespec *tsp)
325{ 364{
326 struct bintime bt; 365 struct bintime bt;
327 366
328 TC_COUNT(nnanouptime); 367 TC_COUNT(nnanouptime);
329 binuptime(&bt); 368 binuptime(&bt);
330 bintime2timespec(&bt, tsp); 369 bintime2timespec(&bt, tsp);
331} 370}
332 371
333void 372void
@@ -533,47 +572,95 @@ tc_gonebad(struct timecounter *tc) @@ -533,47 +572,95 @@ tc_gonebad(struct timecounter *tc)
533 tc->tc_quality = -100; 572 tc->tc_quality = -100;
534 membar_producer(); 573 membar_producer();
535 atomic_inc_uint(&timecounter_bad); 574 atomic_inc_uint(&timecounter_bad);
536} 575}
537 576
538/* 577/*
539 * Stop using a timecounter and remove it from the timecounters list. 578 * Stop using a timecounter and remove it from the timecounters list.
540 */ 579 */
541int 580int
542tc_detach(struct timecounter *target) 581tc_detach(struct timecounter *target)
543{ 582{
544 struct timecounter *tc; 583 struct timecounter *tc;
545 struct timecounter **tcp = NULL; 584 struct timecounter **tcp = NULL;
546 int rc = 0; 585 int removals;
 586 uint64_t where;
 587 lwp_t *l;
547 588
 589 /* First, find the timecounter. */
548 mutex_spin_enter(&timecounter_lock); 590 mutex_spin_enter(&timecounter_lock);
549 for (tcp = &timecounters, tc = timecounters; 591 for (tcp = &timecounters, tc = timecounters;
550 tc != NULL; 592 tc != NULL;
551 tcp = &tc->tc_next, tc = tc->tc_next) { 593 tcp = &tc->tc_next, tc = tc->tc_next) {
552 if (tc == target) 594 if (tc == target)
553 break; 595 break;
554 } 596 }
555 if (tc == NULL) { 597 if (tc == NULL) {
556 rc = ESRCH; 598 mutex_spin_exit(&timecounter_lock);
557 } else { 599 return ESRCH;
558 *tcp = tc->tc_next; 600 }
559 if (timecounter == target) { 601
560 tc_pick(); 602 /* And now, remove it. */
561 tc_windup(); 603 *tcp = tc->tc_next;
562 } 604 if (timecounter == target) {
563 timecounter_mods++; 605 tc_pick();
 606 tc_windup();
564 } 607 }
 608 timecounter_mods++;
 609 removals = timecounter_removals++;
565 mutex_spin_exit(&timecounter_lock); 610 mutex_spin_exit(&timecounter_lock);
566 return rc; 611
 612 /*
 613 * We now have to determine if any threads in the system are still
 614 * making use of this timecounter.
 615 *
 616 * We issue a broadcast cross call to elide memory ordering issues,
 617 * then scan all LWPs in the system looking at each's timecounter
 618 * generation number. We need to see a value of zero (not actively
 619 * using a timecounter) or a value greater than our removal value.
 620 *
 621 * We may race with threads that read `timecounter_removals' and
 622 * and then get preempted before updating `l_tcgen'. This is not
 623 * a problem, since it means that these threads have not yet started
 624 * accessing timecounter state. All we do need is one clean
 625 * snapshot of the system where every thread appears not to be using
 626 * old timecounter state.
 627 */
 628 for (;;) {
 629 where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL);
 630 xc_wait(where);
 631
 632 mutex_enter(proc_lock);
 633 LIST_FOREACH(l, &alllwp, l_list) {
 634 if (l->l_tcgen == 0 || l->l_tcgen > removals) {
 635 /*
 636 * Not using timecounter or old timecounter
 637 * state at time of our xcall or later.
 638 */
 639 continue;
 640 }
 641 break;
 642 }
 643 mutex_exit(proc_lock);
 644
 645 /*
 646 * If the timecounter is still in use, wait at least 10ms
 647 * before retrying.
 648 */
 649 if (l == NULL) {
 650 return 0;
 651 }
 652 (void)kpause("tcdetach", false, mstohz(10), NULL);
 653 }
567} 654}
568 655
569/* Report the frequency of the current timecounter. */ 656/* Report the frequency of the current timecounter. */
570u_int64_t 657u_int64_t
571tc_getfrequency(void) 658tc_getfrequency(void)
572{ 659{
573 660
574 return (timehands->th_counter->tc_frequency); 661 return (timehands->th_counter->tc_frequency);
575} 662}
576 663
577/* 664/*
578 * Step our concept of UTC. This is done by modifying our estimate of 665 * Step our concept of UTC. This is done by modifying our estimate of
579 * when we booted. 666 * when we booted.

cvs diff -r1.117 -r1.118 src/sys/sys/lwp.h (expand / switch to unified diff)

--- src/sys/sys/lwp.h 2009/02/04 21:17:39 1.117
+++ src/sys/sys/lwp.h 2009/05/23 17:08:05 1.118
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: lwp.h,v 1.117 2009/02/04 21:17:39 ad Exp $ */ 1/* $NetBSD: lwp.h,v 1.118 2009/05/23 17:08:05 ad Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2001, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc. 4 * Copyright (c) 2001, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams and Andrew Doran. 8 * by Nathan J. Williams and Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -163,26 +163,28 @@ struct lwp { @@ -163,26 +163,28 @@ struct lwp {
163 u_short l_exlocks; /* !: lockdebug: excl. locks held */ 163 u_short l_exlocks; /* !: lockdebug: excl. locks held */
164 u_short l_unused; /* !: unused */ 164 u_short l_unused; /* !: unused */
165 u_short l_blcnt; /* !: count of kernel_lock held */ 165 u_short l_blcnt; /* !: count of kernel_lock held */
166 int l_nopreempt; /* !: don't preempt me! */ 166 int l_nopreempt; /* !: don't preempt me! */
167 u_int l_dopreempt; /* s: kernel preemption pending */ 167 u_int l_dopreempt; /* s: kernel preemption pending */
168 int l_pflag; /* !: LWP private flags */ 168 int l_pflag; /* !: LWP private flags */
169 int l_dupfd; /* !: side return from cloning devs XXX */ 169 int l_dupfd; /* !: side return from cloning devs XXX */
170 const struct sysent * volatile l_sysent;/* !: currently active syscall */ 170 const struct sysent * volatile l_sysent;/* !: currently active syscall */
171 struct rusage l_ru; /* !: accounting information */ 171 struct rusage l_ru; /* !: accounting information */
172 uint64_t l_pfailtime; /* !: for kernel preemption */ 172 uint64_t l_pfailtime; /* !: for kernel preemption */
173 uintptr_t l_pfailaddr; /* !: for kernel preemption */ 173 uintptr_t l_pfailaddr; /* !: for kernel preemption */
174 uintptr_t l_pfaillock; /* !: for kernel preemption */ 174 uintptr_t l_pfaillock; /* !: for kernel preemption */
175 _TAILQ_HEAD(,struct lockdebug,volatile) l_ld_locks;/* !: locks held by LWP */ 175 _TAILQ_HEAD(,struct lockdebug,volatile) l_ld_locks;/* !: locks held by LWP */
 176 int l_tcgen; /* !: for timecounter removal */
 177 int l_unused2; /* !: for future use */
176 178
177 /* These are only used by 'options SYSCALL_TIMES' */ 179 /* These are only used by 'options SYSCALL_TIMES' */
178 uint32_t l_syscall_time; /* !: time epoch for current syscall */ 180 uint32_t l_syscall_time; /* !: time epoch for current syscall */
179 uint64_t *l_syscall_counter; /* !: counter for current process */ 181 uint64_t *l_syscall_counter; /* !: counter for current process */
180}; 182};
181 183
182#if !defined(USER_TO_UAREA) 184#if !defined(USER_TO_UAREA)
183#if !defined(UAREA_USER_OFFSET) 185#if !defined(UAREA_USER_OFFSET)
184#define UAREA_USER_OFFSET 0 186#define UAREA_USER_OFFSET 0
185#endif /* !defined(UAREA_USER_OFFSET) */ 187#endif /* !defined(UAREA_USER_OFFSET) */
186#define USER_TO_UAREA(user) ((vaddr_t)(user) - UAREA_USER_OFFSET) 188#define USER_TO_UAREA(user) ((vaddr_t)(user) - UAREA_USER_OFFSET)
187#define UAREA_TO_USER(uarea) ((struct user *)((uarea) + UAREA_USER_OFFSET)) 189#define UAREA_TO_USER(uarea) ((struct user *)((uarea) + UAREA_USER_OFFSET))
188#endif /* !defined(UAREA_TO_USER) */ 190#endif /* !defined(UAREA_TO_USER) */
@@ -296,26 +298,27 @@ void upcallret(lwp_t *); @@ -296,26 +298,27 @@ void upcallret(lwp_t *);
296void lwp_exit(lwp_t *) __dead; 298void lwp_exit(lwp_t *) __dead;
297void lwp_exit_switchaway(lwp_t *) __dead; 299void lwp_exit_switchaway(lwp_t *) __dead;
298int lwp_suspend(lwp_t *, lwp_t *); 300int lwp_suspend(lwp_t *, lwp_t *);
299int lwp_create1(lwp_t *, const void *, size_t, u_long, lwpid_t *); 301int lwp_create1(lwp_t *, const void *, size_t, u_long, lwpid_t *);
300void lwp_update_creds(lwp_t *); 302void lwp_update_creds(lwp_t *);
301void lwp_migrate(lwp_t *, struct cpu_info *); 303void lwp_migrate(lwp_t *, struct cpu_info *);
302lwp_t *lwp_find2(pid_t, lwpid_t); 304lwp_t *lwp_find2(pid_t, lwpid_t);
303lwp_t *lwp_find(proc_t *, int); 305lwp_t *lwp_find(proc_t *, int);
304void lwp_userret(lwp_t *); 306void lwp_userret(lwp_t *);
305void lwp_need_userret(lwp_t *); 307void lwp_need_userret(lwp_t *);
306void lwp_free(lwp_t *, bool, bool); 308void lwp_free(lwp_t *, bool, bool);
307void lwp_sys_init(void); 309void lwp_sys_init(void);
308u_int lwp_unsleep(lwp_t *, bool); 310u_int lwp_unsleep(lwp_t *, bool);
 311uint64_t lwp_pctr(void);
309 312
310int lwp_specific_key_create(specificdata_key_t *, specificdata_dtor_t); 313int lwp_specific_key_create(specificdata_key_t *, specificdata_dtor_t);
311void lwp_specific_key_delete(specificdata_key_t); 314void lwp_specific_key_delete(specificdata_key_t);
312void lwp_initspecific(lwp_t *); 315void lwp_initspecific(lwp_t *);
313void lwp_finispecific(lwp_t *); 316void lwp_finispecific(lwp_t *);
314void *lwp_getspecific(specificdata_key_t); 317void *lwp_getspecific(specificdata_key_t);
315#if defined(_LWP_API_PRIVATE) 318#if defined(_LWP_API_PRIVATE)
316void *_lwp_getspecific_by_lwp(lwp_t *, specificdata_key_t); 319void *_lwp_getspecific_by_lwp(lwp_t *, specificdata_key_t);
317#endif 320#endif
318void lwp_setspecific(specificdata_key_t, void *); 321void lwp_setspecific(specificdata_key_t, void *);
319 322
320/* Syscalls */ 323/* Syscalls */
321int lwp_park(struct timespec *, const void *); 324int lwp_park(struct timespec *, const void *);