Thu Aug 30 02:24:48 2012 UTC ()
KASSERT -> KASSERTMSG


(matt)
diff -r1.11 -r1.12 src/sys/kern/subr_pcu.c

cvs diff -r1.11 -r1.12 src/sys/kern/subr_pcu.c (expand / switch to unified diff)

--- src/sys/kern/subr_pcu.c 2012/04/18 13:43:13 1.11
+++ src/sys/kern/subr_pcu.c 2012/08/30 02:24:48 1.12
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: subr_pcu.c,v 1.11 2012/04/18 13:43:13 yamt Exp $ */ 1/* $NetBSD: subr_pcu.c,v 1.12 2012/08/30 02:24:48 matt Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2011 The NetBSD Foundation, Inc. 4 * Copyright (c) 2011 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Mindaugas Rasiukevicius. 8 * by Mindaugas Rasiukevicius.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -47,27 +47,27 @@ @@ -47,27 +47,27 @@
47 * by the owner LWP. Therefore struct cpu_info::ci_pcu_curlwp[id] 47 * by the owner LWP. Therefore struct cpu_info::ci_pcu_curlwp[id]
48 * may only be changed by current CPU, and lwp_t::l_pcu_cpu[id] may 48 * may only be changed by current CPU, and lwp_t::l_pcu_cpu[id] may
49 * only be unset by the CPU which has PCU state loaded. 49 * only be unset by the CPU which has PCU state loaded.
50 * 50 *
51 * There is a race condition: LWP may have a PCU state on a remote CPU, 51 * There is a race condition: LWP may have a PCU state on a remote CPU,
52 * which it requests to be released via cross-call. At the same time, 52 * which it requests to be released via cross-call. At the same time,
53 * other LWP on remote CPU might release existing PCU state and load 53 * other LWP on remote CPU might release existing PCU state and load
54 * its own one. Cross-call may arrive after this and release different 54 * its own one. Cross-call may arrive after this and release different
55 * PCU state than intended. In such case, such LWP would re-load its 55 * PCU state than intended. In such case, such LWP would re-load its
56 * PCU state again. 56 * PCU state again.
57 */ 57 */
58 58
59#include <sys/cdefs.h> 59#include <sys/cdefs.h>
60__KERNEL_RCSID(0, "$NetBSD: subr_pcu.c,v 1.11 2012/04/18 13:43:13 yamt Exp $"); 60__KERNEL_RCSID(0, "$NetBSD: subr_pcu.c,v 1.12 2012/08/30 02:24:48 matt Exp $");
61 61
62#include <sys/param.h> 62#include <sys/param.h>
63#include <sys/cpu.h> 63#include <sys/cpu.h>
64#include <sys/lwp.h> 64#include <sys/lwp.h>
65#include <sys/pcu.h> 65#include <sys/pcu.h>
66#include <sys/xcall.h> 66#include <sys/xcall.h>
67 67
68#if PCU_UNIT_COUNT > 0 68#if PCU_UNIT_COUNT > 0
69 69
70static void pcu_lwp_op(const pcu_ops_t *, lwp_t *, int); 70static void pcu_lwp_op(const pcu_ops_t *, lwp_t *, int);
71 71
72#define PCU_SAVE 0x01 /* Save PCU state to the LWP. */ 72#define PCU_SAVE 0x01 /* Save PCU state to the LWP. */
73#define PCU_RELEASE 0x02 /* Release PCU state on the CPU. */ 73#define PCU_RELEASE 0x02 /* Release PCU state on the CPU. */
@@ -79,27 +79,27 @@ extern const pcu_ops_t * const pcu_ops_m @@ -79,27 +79,27 @@ extern const pcu_ops_t * const pcu_ops_m
79 * pcu_switchpoint: release PCU state if the LWP is being run on another CPU. 79 * pcu_switchpoint: release PCU state if the LWP is being run on another CPU.
80 * 80 *
81 * On each context switches, called by mi_switch() with IPL_SCHED. 81 * On each context switches, called by mi_switch() with IPL_SCHED.
82 * 'l' is an LWP which is just we switched to. (the new curlwp) 82 * 'l' is an LWP which is just we switched to. (the new curlwp)
83 */ 83 */
84 84
85void 85void
86pcu_switchpoint(lwp_t *l) 86pcu_switchpoint(lwp_t *l)
87{ 87{
88 const uint32_t pcu_inuse = l->l_pcu_used; 88 const uint32_t pcu_inuse = l->l_pcu_used;
89 u_int id; 89 u_int id;
90 /* int s; */ 90 /* int s; */
91 91
92 KASSERT(l == curlwp); 92 KASSERTMSG(l == curlwp, "l %p != curlwp %p", l, curlwp);
93 93
94 if (__predict_true(pcu_inuse == 0)) { 94 if (__predict_true(pcu_inuse == 0)) {
95 /* PCUs are not in use. */ 95 /* PCUs are not in use. */
96 return; 96 return;
97 } 97 }
98 /* commented out as we know we are already at IPL_SCHED */ 98 /* commented out as we know we are already at IPL_SCHED */
99 /* s = splsoftclock(); */ 99 /* s = splsoftclock(); */
100 for (id = 0; id < PCU_UNIT_COUNT; id++) { 100 for (id = 0; id < PCU_UNIT_COUNT; id++) {
101 if ((pcu_inuse & (1 << id)) == 0) { 101 if ((pcu_inuse & (1 << id)) == 0) {
102 continue; 102 continue;
103 } 103 }
104 struct cpu_info * const pcu_ci = l->l_pcu_cpu[id]; 104 struct cpu_info * const pcu_ci = l->l_pcu_cpu[id];
105 if (pcu_ci == NULL || pcu_ci == l->l_cpu) { 105 if (pcu_ci == NULL || pcu_ci == l->l_cpu) {