Sat Oct 3 01:30:25 2009 UTC ()
Move sched policy back to the subsystem.


(elad)
diff -r1.267 -r1.268 src/sys/kern/kern_synch.c
diff -r1.17 -r1.18 src/sys/secmodel/suser/secmodel_suser.c

cvs diff -r1.267 -r1.268 src/sys/kern/kern_synch.c (switch to unified diff)

--- src/sys/kern/kern_synch.c 2009/07/19 10:11:55 1.267
+++ src/sys/kern/kern_synch.c 2009/10/03 01:30:25 1.268
@@ -1,1159 +1,1214 @@ @@ -1,1159 +1,1214 @@
1/* $NetBSD: kern_synch.c,v 1.267 2009/07/19 10:11:55 yamt Exp $ */ 1/* $NetBSD: kern_synch.c,v 1.268 2009/10/03 01:30:25 elad Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009 4 * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009
5 * The NetBSD Foundation, Inc. 5 * The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and 10 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
11 * Daniel Sieger. 11 * Daniel Sieger.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions 14 * modification, are permitted provided that the following conditions
15 * are met: 15 * are met:
16 * 1. Redistributions of source code must retain the above copyright 16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer. 17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright 18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the 19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution. 20 * documentation and/or other materials provided with the distribution.
21 * 21 *
22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE. 32 * POSSIBILITY OF SUCH DAMAGE.
33 */ 33 */
34 34
35/*- 35/*-
36 * Copyright (c) 1982, 1986, 1990, 1991, 1993 36 * Copyright (c) 1982, 1986, 1990, 1991, 1993
37 * The Regents of the University of California. All rights reserved. 37 * The Regents of the University of California. All rights reserved.
38 * (c) UNIX System Laboratories, Inc. 38 * (c) UNIX System Laboratories, Inc.
39 * All or some portions of this file are derived from material licensed 39 * All or some portions of this file are derived from material licensed
40 * to the University of California by American Telephone and Telegraph 40 * to the University of California by American Telephone and Telegraph
41 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 41 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
42 * the permission of UNIX System Laboratories, Inc. 42 * the permission of UNIX System Laboratories, Inc.
43 * 43 *
44 * Redistribution and use in source and binary forms, with or without 44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions 45 * modification, are permitted provided that the following conditions
46 * are met: 46 * are met:
47 * 1. Redistributions of source code must retain the above copyright 47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer. 48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright 49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the 50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution. 51 * documentation and/or other materials provided with the distribution.
52 * 3. Neither the name of the University nor the names of its contributors 52 * 3. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software 53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission. 54 * without specific prior written permission.
55 * 55 *
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE. 66 * SUCH DAMAGE.
67 * 67 *
68 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 68 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
69 */ 69 */
70 70
71#include <sys/cdefs.h> 71#include <sys/cdefs.h>
72__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.267 2009/07/19 10:11:55 yamt Exp $"); 72__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.268 2009/10/03 01:30:25 elad Exp $");
73 73
74#include "opt_kstack.h" 74#include "opt_kstack.h"
75#include "opt_perfctrs.h" 75#include "opt_perfctrs.h"
76#include "opt_sa.h" 76#include "opt_sa.h"
77 77
78#define __MUTEX_PRIVATE 78#define __MUTEX_PRIVATE
79 79
80#include <sys/param.h> 80#include <sys/param.h>
81#include <sys/systm.h> 81#include <sys/systm.h>
82#include <sys/proc.h> 82#include <sys/proc.h>
83#include <sys/kernel.h> 83#include <sys/kernel.h>
84#if defined(PERFCTRS) 84#if defined(PERFCTRS)
85#include <sys/pmc.h> 85#include <sys/pmc.h>
86#endif 86#endif
87#include <sys/cpu.h> 87#include <sys/cpu.h>
88#include <sys/resourcevar.h> 88#include <sys/resourcevar.h>
89#include <sys/sched.h> 89#include <sys/sched.h>
90#include <sys/sa.h> 90#include <sys/sa.h>
91#include <sys/savar.h> 91#include <sys/savar.h>
92#include <sys/syscall_stats.h> 92#include <sys/syscall_stats.h>
93#include <sys/sleepq.h> 93#include <sys/sleepq.h>
94#include <sys/lockdebug.h> 94#include <sys/lockdebug.h>
95#include <sys/evcnt.h> 95#include <sys/evcnt.h>
96#include <sys/intr.h> 96#include <sys/intr.h>
97#include <sys/lwpctl.h> 97#include <sys/lwpctl.h>
98#include <sys/atomic.h> 98#include <sys/atomic.h>
99#include <sys/simplelock.h> 99#include <sys/simplelock.h>
 100#include <sys/kauth.h>
100 101
101#include <uvm/uvm_extern.h> 102#include <uvm/uvm_extern.h>
102 103
103#include <dev/lockstat.h> 104#include <dev/lockstat.h>
104 105
105static u_int sched_unsleep(struct lwp *, bool); 106static u_int sched_unsleep(struct lwp *, bool);
106static void sched_changepri(struct lwp *, pri_t); 107static void sched_changepri(struct lwp *, pri_t);
107static void sched_lendpri(struct lwp *, pri_t); 108static void sched_lendpri(struct lwp *, pri_t);
108static void resched_cpu(struct lwp *); 109static void resched_cpu(struct lwp *);
109 110
110syncobj_t sleep_syncobj = { 111syncobj_t sleep_syncobj = {
111 SOBJ_SLEEPQ_SORTED, 112 SOBJ_SLEEPQ_SORTED,
112 sleepq_unsleep, 113 sleepq_unsleep,
113 sleepq_changepri, 114 sleepq_changepri,
114 sleepq_lendpri, 115 sleepq_lendpri,
115 syncobj_noowner, 116 syncobj_noowner,
116}; 117};
117 118
118syncobj_t sched_syncobj = { 119syncobj_t sched_syncobj = {
119 SOBJ_SLEEPQ_SORTED, 120 SOBJ_SLEEPQ_SORTED,
120 sched_unsleep, 121 sched_unsleep,
121 sched_changepri, 122 sched_changepri,
122 sched_lendpri, 123 sched_lendpri,
123 syncobj_noowner, 124 syncobj_noowner,
124}; 125};
125 126
126callout_t sched_pstats_ch; 127callout_t sched_pstats_ch;
127unsigned sched_pstats_ticks; 128unsigned sched_pstats_ticks;
128kcondvar_t lbolt; /* once a second sleep address */ 129kcondvar_t lbolt; /* once a second sleep address */
129 130
 131kauth_listener_t sched_listener;
 132
130/* Preemption event counters */ 133/* Preemption event counters */
131static struct evcnt kpreempt_ev_crit; 134static struct evcnt kpreempt_ev_crit;
132static struct evcnt kpreempt_ev_klock; 135static struct evcnt kpreempt_ev_klock;
133static struct evcnt kpreempt_ev_immed; 136static struct evcnt kpreempt_ev_immed;
134 137
135/* 138/*
136 * During autoconfiguration or after a panic, a sleep will simply lower the 139 * During autoconfiguration or after a panic, a sleep will simply lower the
137 * priority briefly to allow interrupts, then return. The priority to be 140 * priority briefly to allow interrupts, then return. The priority to be
138 * used (safepri) is machine-dependent, thus this value is initialized and 141 * used (safepri) is machine-dependent, thus this value is initialized and
139 * maintained in the machine-dependent layers. This priority will typically 142 * maintained in the machine-dependent layers. This priority will typically
140 * be 0, or the lowest priority that is safe for use on the interrupt stack; 143 * be 0, or the lowest priority that is safe for use on the interrupt stack;
141 * it can be made higher to block network software interrupts after panics. 144 * it can be made higher to block network software interrupts after panics.
142 */ 145 */
143int safepri; 146int safepri;
144 147
 148static int
 149sched_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
 150 void *arg0, void *arg1, void *arg2, void *arg3)
 151{
 152 struct proc *p;
 153 int result;
 154
 155 result = KAUTH_RESULT_DEFER;
 156 p = arg0;
 157
 158 switch (action) {
 159 case KAUTH_PROCESS_SCHEDULER_GETPARAM:
 160 if (kauth_cred_uidmatch(cred, p->p_cred))
 161 result = KAUTH_RESULT_ALLOW;
 162 break;
 163
 164 case KAUTH_PROCESS_SCHEDULER_SETPARAM:
 165 if (kauth_cred_uidmatch(cred, p->p_cred)) {
 166 struct lwp *l;
 167 int policy;
 168 pri_t priority;
 169
 170 l = arg1;
 171 policy = (int)(unsigned long)arg2;
 172 priority = (pri_t)(unsigned long)arg3;
 173
 174 if ((policy == l->l_class ||
 175 (policy != SCHED_FIFO && policy != SCHED_RR)) &&
 176 priority <= l->l_priority)
 177 result = KAUTH_RESULT_ALLOW;
 178 }
 179
 180 break;
 181
 182 case KAUTH_PROCESS_SCHEDULER_GETAFFINITY:
 183 result = KAUTH_RESULT_ALLOW;
 184 break;
 185
 186 case KAUTH_PROCESS_SCHEDULER_SETAFFINITY:
 187 /* Privileged; we let the secmodel handle this. */
 188 break;
 189
 190 default:
 191 break;
 192 }
 193
 194 return result;
 195}
 196
145void 197void
146sched_init(void) 198sched_init(void)
147{ 199{
148 200
149 cv_init(&lbolt, "lbolt"); 201 cv_init(&lbolt, "lbolt");
150 callout_init(&sched_pstats_ch, CALLOUT_MPSAFE); 202 callout_init(&sched_pstats_ch, CALLOUT_MPSAFE);
151 callout_setfunc(&sched_pstats_ch, sched_pstats, NULL); 203 callout_setfunc(&sched_pstats_ch, sched_pstats, NULL);
152 204
153 evcnt_attach_dynamic(&kpreempt_ev_crit, EVCNT_TYPE_MISC, NULL, 205 evcnt_attach_dynamic(&kpreempt_ev_crit, EVCNT_TYPE_MISC, NULL,
154 "kpreempt", "defer: critical section"); 206 "kpreempt", "defer: critical section");
155 evcnt_attach_dynamic(&kpreempt_ev_klock, EVCNT_TYPE_MISC, NULL, 207 evcnt_attach_dynamic(&kpreempt_ev_klock, EVCNT_TYPE_MISC, NULL,
156 "kpreempt", "defer: kernel_lock"); 208 "kpreempt", "defer: kernel_lock");
157 evcnt_attach_dynamic(&kpreempt_ev_immed, EVCNT_TYPE_MISC, NULL, 209 evcnt_attach_dynamic(&kpreempt_ev_immed, EVCNT_TYPE_MISC, NULL,
158 "kpreempt", "immediate"); 210 "kpreempt", "immediate");
159 211
160 sched_pstats(NULL); 212 sched_pstats(NULL);
 213
 214 sched_listener = kauth_listen_scope(KAUTH_SCOPE_PROCESS,
 215 sched_listener_cb, NULL);
161} 216}
162 217
163/* 218/*
164 * OBSOLETE INTERFACE 219 * OBSOLETE INTERFACE
165 * 220 *
166 * General sleep call. Suspends the current LWP until a wakeup is 221 * General sleep call. Suspends the current LWP until a wakeup is
167 * performed on the specified identifier. The LWP will then be made 222 * performed on the specified identifier. The LWP will then be made
168 * runnable with the specified priority. Sleeps at most timo/hz seconds (0 223 * runnable with the specified priority. Sleeps at most timo/hz seconds (0
169 * means no timeout). If pri includes PCATCH flag, signals are checked 224 * means no timeout). If pri includes PCATCH flag, signals are checked
170 * before and after sleeping, else signals are not checked. Returns 0 if 225 * before and after sleeping, else signals are not checked. Returns 0 if
171 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 226 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
172 * signal needs to be delivered, ERESTART is returned if the current system 227 * signal needs to be delivered, ERESTART is returned if the current system
173 * call should be restarted if possible, and EINTR is returned if the system 228 * call should be restarted if possible, and EINTR is returned if the system
174 * call should be interrupted by the signal (return EINTR). 229 * call should be interrupted by the signal (return EINTR).
175 * 230 *
176 * The interlock is held until we are on a sleep queue. The interlock will 231 * The interlock is held until we are on a sleep queue. The interlock will
177 * be locked before returning back to the caller unless the PNORELOCK flag 232 * be locked before returning back to the caller unless the PNORELOCK flag
178 * is specified, in which case the interlock will always be unlocked upon 233 * is specified, in which case the interlock will always be unlocked upon
179 * return. 234 * return.
180 */ 235 */
181int 236int
182ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo, 237ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
183 volatile struct simplelock *interlock) 238 volatile struct simplelock *interlock)
184{ 239{
185 struct lwp *l = curlwp; 240 struct lwp *l = curlwp;
186 sleepq_t *sq; 241 sleepq_t *sq;
187 kmutex_t *mp; 242 kmutex_t *mp;
188 int error; 243 int error;
189 244
190 KASSERT((l->l_pflag & LP_INTR) == 0); 245 KASSERT((l->l_pflag & LP_INTR) == 0);
191 246
192 if (sleepq_dontsleep(l)) { 247 if (sleepq_dontsleep(l)) {
193 (void)sleepq_abort(NULL, 0); 248 (void)sleepq_abort(NULL, 0);
194 if ((priority & PNORELOCK) != 0) 249 if ((priority & PNORELOCK) != 0)
195 simple_unlock(interlock); 250 simple_unlock(interlock);
196 return 0; 251 return 0;
197 } 252 }
198 253
199 l->l_kpriority = true; 254 l->l_kpriority = true;
200 sq = sleeptab_lookup(&sleeptab, ident, &mp); 255 sq = sleeptab_lookup(&sleeptab, ident, &mp);
201 sleepq_enter(sq, l, mp); 256 sleepq_enter(sq, l, mp);
202 sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj); 257 sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
203 258
204 if (interlock != NULL) { 259 if (interlock != NULL) {
205 KASSERT(simple_lock_held(interlock)); 260 KASSERT(simple_lock_held(interlock));
206 simple_unlock(interlock); 261 simple_unlock(interlock);
207 } 262 }
208 263
209 error = sleepq_block(timo, priority & PCATCH); 264 error = sleepq_block(timo, priority & PCATCH);
210 265
211 if (interlock != NULL && (priority & PNORELOCK) == 0) 266 if (interlock != NULL && (priority & PNORELOCK) == 0)
212 simple_lock(interlock); 267 simple_lock(interlock);
213  268
214 return error; 269 return error;
215} 270}
216 271
217int 272int
218mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo, 273mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
219 kmutex_t *mtx) 274 kmutex_t *mtx)
220{ 275{
221 struct lwp *l = curlwp; 276 struct lwp *l = curlwp;
222 sleepq_t *sq; 277 sleepq_t *sq;
223 kmutex_t *mp; 278 kmutex_t *mp;
224 int error; 279 int error;
225 280
226 KASSERT((l->l_pflag & LP_INTR) == 0); 281 KASSERT((l->l_pflag & LP_INTR) == 0);
227 282
228 if (sleepq_dontsleep(l)) { 283 if (sleepq_dontsleep(l)) {
229 (void)sleepq_abort(mtx, (priority & PNORELOCK) != 0); 284 (void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
230 return 0; 285 return 0;
231 } 286 }
232 287
233 l->l_kpriority = true; 288 l->l_kpriority = true;
234 sq = sleeptab_lookup(&sleeptab, ident, &mp); 289 sq = sleeptab_lookup(&sleeptab, ident, &mp);
235 sleepq_enter(sq, l, mp); 290 sleepq_enter(sq, l, mp);
236 sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj); 291 sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
237 mutex_exit(mtx); 292 mutex_exit(mtx);
238 error = sleepq_block(timo, priority & PCATCH); 293 error = sleepq_block(timo, priority & PCATCH);
239 294
240 if ((priority & PNORELOCK) == 0) 295 if ((priority & PNORELOCK) == 0)
241 mutex_enter(mtx); 296 mutex_enter(mtx);
242  297
243 return error; 298 return error;
244} 299}
245 300
246/* 301/*
247 * General sleep call for situations where a wake-up is not expected. 302 * General sleep call for situations where a wake-up is not expected.
248 */ 303 */
249int 304int
250kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx) 305kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
251{ 306{
252 struct lwp *l = curlwp; 307 struct lwp *l = curlwp;
253 kmutex_t *mp; 308 kmutex_t *mp;
254 sleepq_t *sq; 309 sleepq_t *sq;
255 int error; 310 int error;
256 311
257 if (sleepq_dontsleep(l)) 312 if (sleepq_dontsleep(l))
258 return sleepq_abort(NULL, 0); 313 return sleepq_abort(NULL, 0);
259 314
260 if (mtx != NULL) 315 if (mtx != NULL)
261 mutex_exit(mtx); 316 mutex_exit(mtx);
262 l->l_kpriority = true; 317 l->l_kpriority = true;
263 sq = sleeptab_lookup(&sleeptab, l, &mp); 318 sq = sleeptab_lookup(&sleeptab, l, &mp);
264 sleepq_enter(sq, l, mp); 319 sleepq_enter(sq, l, mp);
265 sleepq_enqueue(sq, l, wmesg, &sleep_syncobj); 320 sleepq_enqueue(sq, l, wmesg, &sleep_syncobj);
266 error = sleepq_block(timo, intr); 321 error = sleepq_block(timo, intr);
267 if (mtx != NULL) 322 if (mtx != NULL)
268 mutex_enter(mtx); 323 mutex_enter(mtx);
269 324
270 return error; 325 return error;
271} 326}
272 327
273#ifdef KERN_SA 328#ifdef KERN_SA
274/* 329/*
275 * sa_awaken: 330 * sa_awaken:
276 * 331 *
277 * We believe this lwp is an SA lwp. If it's yielding, 332 * We believe this lwp is an SA lwp. If it's yielding,
278 * let it know it needs to wake up. 333 * let it know it needs to wake up.
279 * 334 *
280 * We are called and exit with the lwp locked. We are 335 * We are called and exit with the lwp locked. We are
281 * called in the middle of wakeup operations, so we need 336 * called in the middle of wakeup operations, so we need
282 * to not touch the locks at all. 337 * to not touch the locks at all.
283 */ 338 */
284void 339void
285sa_awaken(struct lwp *l) 340sa_awaken(struct lwp *l)
286{ 341{
287 /* LOCK_ASSERT(lwp_locked(l, NULL)); */ 342 /* LOCK_ASSERT(lwp_locked(l, NULL)); */
288 343
289 if (l == l->l_savp->savp_lwp && l->l_flag & LW_SA_YIELD) 344 if (l == l->l_savp->savp_lwp && l->l_flag & LW_SA_YIELD)
290 l->l_flag &= ~LW_SA_IDLE; 345 l->l_flag &= ~LW_SA_IDLE;
291} 346}
292#endif /* KERN_SA */ 347#endif /* KERN_SA */
293 348
294/* 349/*
295 * OBSOLETE INTERFACE 350 * OBSOLETE INTERFACE
296 * 351 *
297 * Make all LWPs sleeping on the specified identifier runnable. 352 * Make all LWPs sleeping on the specified identifier runnable.
298 */ 353 */
299void 354void
300wakeup(wchan_t ident) 355wakeup(wchan_t ident)
301{ 356{
302 sleepq_t *sq; 357 sleepq_t *sq;
303 kmutex_t *mp; 358 kmutex_t *mp;
304 359
305 if (__predict_false(cold)) 360 if (__predict_false(cold))
306 return; 361 return;
307 362
308 sq = sleeptab_lookup(&sleeptab, ident, &mp); 363 sq = sleeptab_lookup(&sleeptab, ident, &mp);
309 sleepq_wake(sq, ident, (u_int)-1, mp); 364 sleepq_wake(sq, ident, (u_int)-1, mp);
310} 365}
311 366
312/* 367/*
313 * OBSOLETE INTERFACE 368 * OBSOLETE INTERFACE
314 * 369 *
315 * Make the highest priority LWP first in line on the specified 370 * Make the highest priority LWP first in line on the specified
316 * identifier runnable. 371 * identifier runnable.
317 */ 372 */
318void  373void
319wakeup_one(wchan_t ident) 374wakeup_one(wchan_t ident)
320{ 375{
321 sleepq_t *sq; 376 sleepq_t *sq;
322 kmutex_t *mp; 377 kmutex_t *mp;
323 378
324 if (__predict_false(cold)) 379 if (__predict_false(cold))
325 return; 380 return;
326 381
327 sq = sleeptab_lookup(&sleeptab, ident, &mp); 382 sq = sleeptab_lookup(&sleeptab, ident, &mp);
328 sleepq_wake(sq, ident, 1, mp); 383 sleepq_wake(sq, ident, 1, mp);
329} 384}
330 385
331 386
332/* 387/*
333 * General yield call. Puts the current LWP back on its run queue and 388 * General yield call. Puts the current LWP back on its run queue and
334 * performs a voluntary context switch. Should only be called when the 389 * performs a voluntary context switch. Should only be called when the
335 * current LWP explicitly requests it (eg sched_yield(2)). 390 * current LWP explicitly requests it (eg sched_yield(2)).
336 */ 391 */
337void 392void
338yield(void) 393yield(void)
339{ 394{
340 struct lwp *l = curlwp; 395 struct lwp *l = curlwp;
341 396
342 KERNEL_UNLOCK_ALL(l, &l->l_biglocks); 397 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
343 lwp_lock(l); 398 lwp_lock(l);
344 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock)); 399 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
345 KASSERT(l->l_stat == LSONPROC); 400 KASSERT(l->l_stat == LSONPROC);
346 l->l_kpriority = false; 401 l->l_kpriority = false;
347 (void)mi_switch(l); 402 (void)mi_switch(l);
348 KERNEL_LOCK(l->l_biglocks, l); 403 KERNEL_LOCK(l->l_biglocks, l);
349} 404}
350 405
351/* 406/*
352 * General preemption call. Puts the current LWP back on its run queue 407 * General preemption call. Puts the current LWP back on its run queue
353 * and performs an involuntary context switch. 408 * and performs an involuntary context switch.
354 */ 409 */
355void 410void
356preempt(void) 411preempt(void)
357{ 412{
358 struct lwp *l = curlwp; 413 struct lwp *l = curlwp;
359 414
360 KERNEL_UNLOCK_ALL(l, &l->l_biglocks); 415 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
361 lwp_lock(l); 416 lwp_lock(l);
362 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock)); 417 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
363 KASSERT(l->l_stat == LSONPROC); 418 KASSERT(l->l_stat == LSONPROC);
364 l->l_kpriority = false; 419 l->l_kpriority = false;
365 l->l_nivcsw++; 420 l->l_nivcsw++;
366 (void)mi_switch(l); 421 (void)mi_switch(l);
367 KERNEL_LOCK(l->l_biglocks, l); 422 KERNEL_LOCK(l->l_biglocks, l);
368} 423}
369 424
370/* 425/*
371 * Handle a request made by another agent to preempt the current LWP 426 * Handle a request made by another agent to preempt the current LWP
372 * in-kernel. Usually called when l_dopreempt may be non-zero. 427 * in-kernel. Usually called when l_dopreempt may be non-zero.
373 * 428 *
374 * Character addresses for lockstat only. 429 * Character addresses for lockstat only.
375 */ 430 */
376static char in_critical_section; 431static char in_critical_section;
377static char kernel_lock_held; 432static char kernel_lock_held;
378static char is_softint; 433static char is_softint;
379static char cpu_kpreempt_enter_fail; 434static char cpu_kpreempt_enter_fail;
380 435
381bool 436bool
382kpreempt(uintptr_t where) 437kpreempt(uintptr_t where)
383{ 438{
384 uintptr_t failed; 439 uintptr_t failed;
385 lwp_t *l; 440 lwp_t *l;
386 int s, dop, lsflag; 441 int s, dop, lsflag;
387 442
388 l = curlwp; 443 l = curlwp;
389 failed = 0; 444 failed = 0;
390 while ((dop = l->l_dopreempt) != 0) { 445 while ((dop = l->l_dopreempt) != 0) {
391 if (l->l_stat != LSONPROC) { 446 if (l->l_stat != LSONPROC) {
392 /* 447 /*
393 * About to block (or die), let it happen. 448 * About to block (or die), let it happen.
394 * Doesn't really count as "preemption has 449 * Doesn't really count as "preemption has
395 * been blocked", since we're going to 450 * been blocked", since we're going to
396 * context switch. 451 * context switch.
397 */ 452 */
398 l->l_dopreempt = 0; 453 l->l_dopreempt = 0;
399 return true; 454 return true;
400 } 455 }
401 if (__predict_false((l->l_flag & LW_IDLE) != 0)) { 456 if (__predict_false((l->l_flag & LW_IDLE) != 0)) {
402 /* Can't preempt idle loop, don't count as failure. */ 457 /* Can't preempt idle loop, don't count as failure. */
403 l->l_dopreempt = 0; 458 l->l_dopreempt = 0;
404 return true; 459 return true;
405 } 460 }
406 if (__predict_false(l->l_nopreempt != 0)) { 461 if (__predict_false(l->l_nopreempt != 0)) {
407 /* LWP holds preemption disabled, explicitly. */ 462 /* LWP holds preemption disabled, explicitly. */
408 if ((dop & DOPREEMPT_COUNTED) == 0) { 463 if ((dop & DOPREEMPT_COUNTED) == 0) {
409 kpreempt_ev_crit.ev_count++; 464 kpreempt_ev_crit.ev_count++;
410 } 465 }
411 failed = (uintptr_t)&in_critical_section; 466 failed = (uintptr_t)&in_critical_section;
412 break; 467 break;
413 } 468 }
414 if (__predict_false((l->l_pflag & LP_INTR) != 0)) { 469 if (__predict_false((l->l_pflag & LP_INTR) != 0)) {
415 /* Can't preempt soft interrupts yet. */ 470 /* Can't preempt soft interrupts yet. */
416 l->l_dopreempt = 0; 471 l->l_dopreempt = 0;
417 failed = (uintptr_t)&is_softint; 472 failed = (uintptr_t)&is_softint;
418 break; 473 break;
419 } 474 }
420 s = splsched(); 475 s = splsched();
421 if (__predict_false(l->l_blcnt != 0 || 476 if (__predict_false(l->l_blcnt != 0 ||
422 curcpu()->ci_biglock_wanted != NULL)) { 477 curcpu()->ci_biglock_wanted != NULL)) {
423 /* Hold or want kernel_lock, code is not MT safe. */ 478 /* Hold or want kernel_lock, code is not MT safe. */
424 splx(s); 479 splx(s);
425 if ((dop & DOPREEMPT_COUNTED) == 0) { 480 if ((dop & DOPREEMPT_COUNTED) == 0) {
426 kpreempt_ev_klock.ev_count++; 481 kpreempt_ev_klock.ev_count++;
427 } 482 }
428 failed = (uintptr_t)&kernel_lock_held; 483 failed = (uintptr_t)&kernel_lock_held;
429 break; 484 break;
430 } 485 }
431 if (__predict_false(!cpu_kpreempt_enter(where, s))) { 486 if (__predict_false(!cpu_kpreempt_enter(where, s))) {
432 /* 487 /*
433 * It may be that the IPL is too high. 488 * It may be that the IPL is too high.
434 * kpreempt_enter() can schedule an 489 * kpreempt_enter() can schedule an
435 * interrupt to retry later. 490 * interrupt to retry later.
436 */ 491 */
437 splx(s); 492 splx(s);
438 failed = (uintptr_t)&cpu_kpreempt_enter_fail; 493 failed = (uintptr_t)&cpu_kpreempt_enter_fail;
439 break; 494 break;
440 } 495 }
441 /* Do it! */ 496 /* Do it! */
442 if (__predict_true((dop & DOPREEMPT_COUNTED) == 0)) { 497 if (__predict_true((dop & DOPREEMPT_COUNTED) == 0)) {
443 kpreempt_ev_immed.ev_count++; 498 kpreempt_ev_immed.ev_count++;
444 } 499 }
445 lwp_lock(l); 500 lwp_lock(l);
446 mi_switch(l); 501 mi_switch(l);
447 l->l_nopreempt++; 502 l->l_nopreempt++;
448 splx(s); 503 splx(s);
449 504
450 /* Take care of any MD cleanup. */ 505 /* Take care of any MD cleanup. */
451 cpu_kpreempt_exit(where); 506 cpu_kpreempt_exit(where);
452 l->l_nopreempt--; 507 l->l_nopreempt--;
453 } 508 }
454 509
455 if (__predict_true(!failed)) { 510 if (__predict_true(!failed)) {
456 return false; 511 return false;
457 } 512 }
458 513
459 /* Record preemption failure for reporting via lockstat. */ 514 /* Record preemption failure for reporting via lockstat. */
460 atomic_or_uint(&l->l_dopreempt, DOPREEMPT_COUNTED); 515 atomic_or_uint(&l->l_dopreempt, DOPREEMPT_COUNTED);
461 lsflag = 0; 516 lsflag = 0;
462 LOCKSTAT_ENTER(lsflag); 517 LOCKSTAT_ENTER(lsflag);
463 if (__predict_false(lsflag)) { 518 if (__predict_false(lsflag)) {
464 if (where == 0) { 519 if (where == 0) {
465 where = (uintptr_t)__builtin_return_address(0); 520 where = (uintptr_t)__builtin_return_address(0);
466 } 521 }
467 /* Preemption is on, might recurse, so make it atomic. */ 522 /* Preemption is on, might recurse, so make it atomic. */
468 if (atomic_cas_ptr_ni((void *)&l->l_pfailaddr, NULL, 523 if (atomic_cas_ptr_ni((void *)&l->l_pfailaddr, NULL,
469 (void *)where) == NULL) { 524 (void *)where) == NULL) {
470 LOCKSTAT_START_TIMER(lsflag, l->l_pfailtime); 525 LOCKSTAT_START_TIMER(lsflag, l->l_pfailtime);
471 l->l_pfaillock = failed; 526 l->l_pfaillock = failed;
472 } 527 }
473 } 528 }
474 LOCKSTAT_EXIT(lsflag); 529 LOCKSTAT_EXIT(lsflag);
475 return true; 530 return true;
476} 531}
477 532
478/* 533/*
479 * Return true if preemption is explicitly disabled. 534 * Return true if preemption is explicitly disabled.
480 */ 535 */
481bool 536bool
482kpreempt_disabled(void) 537kpreempt_disabled(void)
483{ 538{
484 const lwp_t *l = curlwp; 539 const lwp_t *l = curlwp;
485 540
486 return l->l_nopreempt != 0 || l->l_stat == LSZOMB || 541 return l->l_nopreempt != 0 || l->l_stat == LSZOMB ||
487 (l->l_flag & LW_IDLE) != 0 || cpu_kpreempt_disabled(); 542 (l->l_flag & LW_IDLE) != 0 || cpu_kpreempt_disabled();
488} 543}
489 544
490/* 545/*
491 * Disable kernel preemption. 546 * Disable kernel preemption.
492 */ 547 */
493void 548void
494kpreempt_disable(void) 549kpreempt_disable(void)
495{ 550{
496 551
497 KPREEMPT_DISABLE(curlwp); 552 KPREEMPT_DISABLE(curlwp);
498} 553}
499 554
500/* 555/*
501 * Reenable kernel preemption. 556 * Reenable kernel preemption.
502 */ 557 */
503void 558void
504kpreempt_enable(void) 559kpreempt_enable(void)
505{ 560{
506 561
507 KPREEMPT_ENABLE(curlwp); 562 KPREEMPT_ENABLE(curlwp);
508} 563}
509 564
510/* 565/*
511 * Compute the amount of time during which the current lwp was running. 566 * Compute the amount of time during which the current lwp was running.
512 * 567 *
513 * - update l_rtime unless it's an idle lwp. 568 * - update l_rtime unless it's an idle lwp.
514 */ 569 */
515 570
516void 571void
517updatertime(lwp_t *l, const struct bintime *now) 572updatertime(lwp_t *l, const struct bintime *now)
518{ 573{
519 574
520 if (__predict_false(l->l_flag & LW_IDLE)) 575 if (__predict_false(l->l_flag & LW_IDLE))
521 return; 576 return;
522 577
523 /* rtime += now - stime */ 578 /* rtime += now - stime */
524 bintime_add(&l->l_rtime, now); 579 bintime_add(&l->l_rtime, now);
525 bintime_sub(&l->l_rtime, &l->l_stime); 580 bintime_sub(&l->l_rtime, &l->l_stime);
526} 581}
527 582
528/* 583/*
529 * Select next LWP from the current CPU to run.. 584 * Select next LWP from the current CPU to run..
530 */ 585 */
531static inline lwp_t * 586static inline lwp_t *
532nextlwp(struct cpu_info *ci, struct schedstate_percpu *spc) 587nextlwp(struct cpu_info *ci, struct schedstate_percpu *spc)
533{ 588{
534 lwp_t *newl; 589 lwp_t *newl;
535 590
536 /* 591 /*
537 * Let sched_nextlwp() select the LWP to run the CPU next. 592 * Let sched_nextlwp() select the LWP to run the CPU next.
538 * If no LWP is runnable, select the idle LWP. 593 * If no LWP is runnable, select the idle LWP.
539 *  594 *
540 * Note that spc_lwplock might not necessary be held, and 595 * Note that spc_lwplock might not necessary be held, and
541 * new thread would be unlocked after setting the LWP-lock. 596 * new thread would be unlocked after setting the LWP-lock.
542 */ 597 */
543 newl = sched_nextlwp(); 598 newl = sched_nextlwp();
544 if (newl != NULL) { 599 if (newl != NULL) {
545 sched_dequeue(newl); 600 sched_dequeue(newl);
546 KASSERT(lwp_locked(newl, spc->spc_mutex)); 601 KASSERT(lwp_locked(newl, spc->spc_mutex));
547 newl->l_stat = LSONPROC; 602 newl->l_stat = LSONPROC;
548 newl->l_cpu = ci; 603 newl->l_cpu = ci;
549 newl->l_pflag |= LP_RUNNING; 604 newl->l_pflag |= LP_RUNNING;
550 lwp_setlock(newl, spc->spc_lwplock); 605 lwp_setlock(newl, spc->spc_lwplock);
551 } else { 606 } else {
552 newl = ci->ci_data.cpu_idlelwp; 607 newl = ci->ci_data.cpu_idlelwp;
553 newl->l_stat = LSONPROC; 608 newl->l_stat = LSONPROC;
554 newl->l_pflag |= LP_RUNNING; 609 newl->l_pflag |= LP_RUNNING;
555 } 610 }
556 611
557 /* 612 /*
558 * Only clear want_resched if there are no pending (slow) 613 * Only clear want_resched if there are no pending (slow)
559 * software interrupts. 614 * software interrupts.
560 */ 615 */
561 ci->ci_want_resched = ci->ci_data.cpu_softints; 616 ci->ci_want_resched = ci->ci_data.cpu_softints;
562 spc->spc_flags &= ~SPCF_SWITCHCLEAR; 617 spc->spc_flags &= ~SPCF_SWITCHCLEAR;
563 spc->spc_curpriority = lwp_eprio(newl); 618 spc->spc_curpriority = lwp_eprio(newl);
564 619
565 return newl; 620 return newl;
566} 621}
567 622
568/* 623/*
569 * The machine independent parts of context switch. 624 * The machine independent parts of context switch.
570 * 625 *
571 * Returns 1 if another LWP was actually run. 626 * Returns 1 if another LWP was actually run.
572 */ 627 */
573int 628int
574mi_switch(lwp_t *l) 629mi_switch(lwp_t *l)
575{ 630{
576 struct cpu_info *ci; 631 struct cpu_info *ci;
577 struct schedstate_percpu *spc; 632 struct schedstate_percpu *spc;
578 struct lwp *newl; 633 struct lwp *newl;
579 int retval, oldspl; 634 int retval, oldspl;
580 struct bintime bt; 635 struct bintime bt;
581 bool returning; 636 bool returning;
582 637
583 KASSERT(lwp_locked(l, NULL)); 638 KASSERT(lwp_locked(l, NULL));
584 KASSERT(kpreempt_disabled()); 639 KASSERT(kpreempt_disabled());
585 LOCKDEBUG_BARRIER(l->l_mutex, 1); 640 LOCKDEBUG_BARRIER(l->l_mutex, 1);
586 641
587 kstack_check_magic(l); 642 kstack_check_magic(l);
588 643
589 binuptime(&bt); 644 binuptime(&bt);
590 645
591 KASSERT((l->l_pflag & LP_RUNNING) != 0); 646 KASSERT((l->l_pflag & LP_RUNNING) != 0);
592 KASSERT(l->l_cpu == curcpu()); 647 KASSERT(l->l_cpu == curcpu());
593 ci = l->l_cpu; 648 ci = l->l_cpu;
594 spc = &ci->ci_schedstate; 649 spc = &ci->ci_schedstate;
595 returning = false; 650 returning = false;
596 newl = NULL; 651 newl = NULL;
597 652
598 /* 653 /*
599 * If we have been asked to switch to a specific LWP, then there 654 * If we have been asked to switch to a specific LWP, then there
600 * is no need to inspect the run queues. If a soft interrupt is 655 * is no need to inspect the run queues. If a soft interrupt is
601 * blocking, then return to the interrupted thread without adjusting 656 * blocking, then return to the interrupted thread without adjusting
602 * VM context or its start time: neither have been changed in order 657 * VM context or its start time: neither have been changed in order
603 * to take the interrupt. 658 * to take the interrupt.
604 */ 659 */
605 if (l->l_switchto != NULL) { 660 if (l->l_switchto != NULL) {
606 if ((l->l_pflag & LP_INTR) != 0) { 661 if ((l->l_pflag & LP_INTR) != 0) {
607 returning = true; 662 returning = true;
608 softint_block(l); 663 softint_block(l);
609 if ((l->l_pflag & LP_TIMEINTR) != 0) 664 if ((l->l_pflag & LP_TIMEINTR) != 0)
610 updatertime(l, &bt); 665 updatertime(l, &bt);
611 } 666 }
612 newl = l->l_switchto; 667 newl = l->l_switchto;
613 l->l_switchto = NULL; 668 l->l_switchto = NULL;
614 } 669 }
615#ifndef __HAVE_FAST_SOFTINTS 670#ifndef __HAVE_FAST_SOFTINTS
616 else if (ci->ci_data.cpu_softints != 0) { 671 else if (ci->ci_data.cpu_softints != 0) {
617 /* There are pending soft interrupts, so pick one. */ 672 /* There are pending soft interrupts, so pick one. */
618 newl = softint_picklwp(); 673 newl = softint_picklwp();
619 newl->l_stat = LSONPROC; 674 newl->l_stat = LSONPROC;
620 newl->l_pflag |= LP_RUNNING; 675 newl->l_pflag |= LP_RUNNING;
621 } 676 }
622#endif /* !__HAVE_FAST_SOFTINTS */ 677#endif /* !__HAVE_FAST_SOFTINTS */
623 678
624 /* Count time spent in current system call */ 679 /* Count time spent in current system call */
625 if (!returning) { 680 if (!returning) {
626 SYSCALL_TIME_SLEEP(l); 681 SYSCALL_TIME_SLEEP(l);
627 682
628 /* 683 /*
629 * XXXSMP If we are using h/w performance counters, 684 * XXXSMP If we are using h/w performance counters,
630 * save context. 685 * save context.
631 */ 686 */
632#if PERFCTRS 687#if PERFCTRS
633 if (PMC_ENABLED(l->l_proc)) { 688 if (PMC_ENABLED(l->l_proc)) {
634 pmc_save_context(l->l_proc); 689 pmc_save_context(l->l_proc);
635 } 690 }
636#endif 691#endif
637 updatertime(l, &bt); 692 updatertime(l, &bt);
638 } 693 }
639 694
640 /* Lock the runqueue */ 695 /* Lock the runqueue */
641 KASSERT(l->l_stat != LSRUN); 696 KASSERT(l->l_stat != LSRUN);
642 mutex_spin_enter(spc->spc_mutex); 697 mutex_spin_enter(spc->spc_mutex);
643 698
644 /* 699 /*
645 * If on the CPU and we have gotten this far, then we must yield. 700 * If on the CPU and we have gotten this far, then we must yield.
646 */ 701 */
647 if (l->l_stat == LSONPROC && l != newl) { 702 if (l->l_stat == LSONPROC && l != newl) {
648 KASSERT(lwp_locked(l, spc->spc_lwplock)); 703 KASSERT(lwp_locked(l, spc->spc_lwplock));
649 if ((l->l_flag & LW_IDLE) == 0) { 704 if ((l->l_flag & LW_IDLE) == 0) {
650 l->l_stat = LSRUN; 705 l->l_stat = LSRUN;
651 lwp_setlock(l, spc->spc_mutex); 706 lwp_setlock(l, spc->spc_mutex);
652 sched_enqueue(l, true); 707 sched_enqueue(l, true);
653 /* Handle migration case */ 708 /* Handle migration case */
654 KASSERT(spc->spc_migrating == NULL); 709 KASSERT(spc->spc_migrating == NULL);
655 if (l->l_target_cpu != NULL) { 710 if (l->l_target_cpu != NULL) {
656 spc->spc_migrating = l; 711 spc->spc_migrating = l;
657 } 712 }
658 } else 713 } else
659 l->l_stat = LSIDL; 714 l->l_stat = LSIDL;
660 } 715 }
661 716
662 /* Pick new LWP to run. */ 717 /* Pick new LWP to run. */
663 if (newl == NULL) { 718 if (newl == NULL) {
664 newl = nextlwp(ci, spc); 719 newl = nextlwp(ci, spc);
665 } 720 }
666 721
667 /* Items that must be updated with the CPU locked. */ 722 /* Items that must be updated with the CPU locked. */
668 if (!returning) { 723 if (!returning) {
669 /* Update the new LWP's start time. */ 724 /* Update the new LWP's start time. */
670 newl->l_stime = bt; 725 newl->l_stime = bt;
671 726
672 /* 727 /*
673 * ci_curlwp changes when a fast soft interrupt occurs. 728 * ci_curlwp changes when a fast soft interrupt occurs.
674 * We use cpu_onproc to keep track of which kernel or 729 * We use cpu_onproc to keep track of which kernel or
675 * user thread is running 'underneath' the software 730 * user thread is running 'underneath' the software
676 * interrupt. This is important for time accounting, 731 * interrupt. This is important for time accounting,
677 * itimers and forcing user threads to preempt (aston). 732 * itimers and forcing user threads to preempt (aston).
678 */ 733 */
679 ci->ci_data.cpu_onproc = newl; 734 ci->ci_data.cpu_onproc = newl;
680 } 735 }
681 736
682 /* 737 /*
683 * Preemption related tasks. Must be done with the current 738 * Preemption related tasks. Must be done with the current
684 * CPU locked. 739 * CPU locked.
685 */ 740 */
686 cpu_did_resched(l); 741 cpu_did_resched(l);
687 l->l_dopreempt = 0; 742 l->l_dopreempt = 0;
688 if (__predict_false(l->l_pfailaddr != 0)) { 743 if (__predict_false(l->l_pfailaddr != 0)) {
689 LOCKSTAT_FLAG(lsflag); 744 LOCKSTAT_FLAG(lsflag);
690 LOCKSTAT_ENTER(lsflag); 745 LOCKSTAT_ENTER(lsflag);
691 LOCKSTAT_STOP_TIMER(lsflag, l->l_pfailtime); 746 LOCKSTAT_STOP_TIMER(lsflag, l->l_pfailtime);
692 LOCKSTAT_EVENT_RA(lsflag, l->l_pfaillock, LB_NOPREEMPT|LB_SPIN, 747 LOCKSTAT_EVENT_RA(lsflag, l->l_pfaillock, LB_NOPREEMPT|LB_SPIN,
693 1, l->l_pfailtime, l->l_pfailaddr); 748 1, l->l_pfailtime, l->l_pfailaddr);
694 LOCKSTAT_EXIT(lsflag); 749 LOCKSTAT_EXIT(lsflag);
695 l->l_pfailtime = 0; 750 l->l_pfailtime = 0;
696 l->l_pfaillock = 0; 751 l->l_pfaillock = 0;
697 l->l_pfailaddr = 0; 752 l->l_pfailaddr = 0;
698 } 753 }
699 754
700 if (l != newl) { 755 if (l != newl) {
701 struct lwp *prevlwp; 756 struct lwp *prevlwp;
702 757
703 /* Release all locks, but leave the current LWP locked */ 758 /* Release all locks, but leave the current LWP locked */
704 if (l->l_mutex == spc->spc_mutex) { 759 if (l->l_mutex == spc->spc_mutex) {
705 /* 760 /*
706 * Drop spc_lwplock, if the current LWP has been moved 761 * Drop spc_lwplock, if the current LWP has been moved
707 * to the run queue (it is now locked by spc_mutex). 762 * to the run queue (it is now locked by spc_mutex).
708 */ 763 */
709 mutex_spin_exit(spc->spc_lwplock); 764 mutex_spin_exit(spc->spc_lwplock);
710 } else { 765 } else {
711 /* 766 /*
712 * Otherwise, drop the spc_mutex, we are done with the 767 * Otherwise, drop the spc_mutex, we are done with the
713 * run queues. 768 * run queues.
714 */ 769 */
715 mutex_spin_exit(spc->spc_mutex); 770 mutex_spin_exit(spc->spc_mutex);
716 } 771 }
717 772
718 /* 773 /*
719 * Mark that context switch is going to be performed 774 * Mark that context switch is going to be performed
720 * for this LWP, to protect it from being switched 775 * for this LWP, to protect it from being switched
721 * to on another CPU. 776 * to on another CPU.
722 */ 777 */
723 KASSERT(l->l_ctxswtch == 0); 778 KASSERT(l->l_ctxswtch == 0);
724 l->l_ctxswtch = 1; 779 l->l_ctxswtch = 1;
725 l->l_ncsw++; 780 l->l_ncsw++;
726 KASSERT((l->l_pflag & LP_RUNNING) != 0); 781 KASSERT((l->l_pflag & LP_RUNNING) != 0);
727 l->l_pflag &= ~LP_RUNNING; 782 l->l_pflag &= ~LP_RUNNING;
728 783
729 /* 784 /*
730 * Increase the count of spin-mutexes before the release 785 * Increase the count of spin-mutexes before the release
731 * of the last lock - we must remain at IPL_SCHED during 786 * of the last lock - we must remain at IPL_SCHED during
732 * the context switch. 787 * the context switch.
733 */ 788 */
734 oldspl = MUTEX_SPIN_OLDSPL(ci); 789 oldspl = MUTEX_SPIN_OLDSPL(ci);
735 ci->ci_mtx_count--; 790 ci->ci_mtx_count--;
736 lwp_unlock(l); 791 lwp_unlock(l);
737 792
738 /* Count the context switch on this CPU. */ 793 /* Count the context switch on this CPU. */
739 ci->ci_data.cpu_nswtch++; 794 ci->ci_data.cpu_nswtch++;
740 795
741 /* Update status for lwpctl, if present. */ 796 /* Update status for lwpctl, if present. */
742 if (l->l_lwpctl != NULL) 797 if (l->l_lwpctl != NULL)
743 l->l_lwpctl->lc_curcpu = LWPCTL_CPU_NONE; 798 l->l_lwpctl->lc_curcpu = LWPCTL_CPU_NONE;
744 799
745 /* 800 /*
746 * Save old VM context, unless a soft interrupt 801 * Save old VM context, unless a soft interrupt
747 * handler is blocking. 802 * handler is blocking.
748 */ 803 */
749 if (!returning) 804 if (!returning)
750 pmap_deactivate(l); 805 pmap_deactivate(l);
751 806
752 /* 807 /*
753 * We may need to spin-wait for if 'newl' is still 808 * We may need to spin-wait for if 'newl' is still
754 * context switching on another CPU. 809 * context switching on another CPU.
755 */ 810 */
756 if (__predict_false(newl->l_ctxswtch != 0)) { 811 if (__predict_false(newl->l_ctxswtch != 0)) {
757 u_int count; 812 u_int count;
758 count = SPINLOCK_BACKOFF_MIN; 813 count = SPINLOCK_BACKOFF_MIN;
759 while (newl->l_ctxswtch) 814 while (newl->l_ctxswtch)
760 SPINLOCK_BACKOFF(count); 815 SPINLOCK_BACKOFF(count);
761 } 816 }
762 817
763 /* Switch to the new LWP.. */ 818 /* Switch to the new LWP.. */
764 prevlwp = cpu_switchto(l, newl, returning); 819 prevlwp = cpu_switchto(l, newl, returning);
765 ci = curcpu(); 820 ci = curcpu();
766 821
767 /* 822 /*
768 * Switched away - we have new curlwp. 823 * Switched away - we have new curlwp.
769 * Restore VM context and IPL. 824 * Restore VM context and IPL.
770 */ 825 */
771 pmap_activate(l); 826 pmap_activate(l);
772 uvm_emap_switch(l); 827 uvm_emap_switch(l);
773 828
774 if (prevlwp != NULL) { 829 if (prevlwp != NULL) {
775 /* Normalize the count of the spin-mutexes */ 830 /* Normalize the count of the spin-mutexes */
776 ci->ci_mtx_count++; 831 ci->ci_mtx_count++;
777 /* Unmark the state of context switch */ 832 /* Unmark the state of context switch */
778 membar_exit(); 833 membar_exit();
779 prevlwp->l_ctxswtch = 0; 834 prevlwp->l_ctxswtch = 0;
780 } 835 }
781 836
782 /* Update status for lwpctl, if present. */ 837 /* Update status for lwpctl, if present. */
783 if (l->l_lwpctl != NULL) { 838 if (l->l_lwpctl != NULL) {
784 l->l_lwpctl->lc_curcpu = (int)cpu_index(ci); 839 l->l_lwpctl->lc_curcpu = (int)cpu_index(ci);
785 l->l_lwpctl->lc_pctr++; 840 l->l_lwpctl->lc_pctr++;
786 } 841 }
787 842
788 KASSERT(l->l_cpu == ci); 843 KASSERT(l->l_cpu == ci);
789 splx(oldspl); 844 splx(oldspl);
790 retval = 1; 845 retval = 1;
791 } else { 846 } else {
792 /* Nothing to do - just unlock and return. */ 847 /* Nothing to do - just unlock and return. */
793 mutex_spin_exit(spc->spc_mutex); 848 mutex_spin_exit(spc->spc_mutex);
794 lwp_unlock(l); 849 lwp_unlock(l);
795 retval = 0; 850 retval = 0;
796 } 851 }
797 852
798 KASSERT(l == curlwp); 853 KASSERT(l == curlwp);
799 KASSERT(l->l_stat == LSONPROC); 854 KASSERT(l->l_stat == LSONPROC);
800 855
801 /* 856 /*
802 * XXXSMP If we are using h/w performance counters, restore context. 857 * XXXSMP If we are using h/w performance counters, restore context.
803 * XXXSMP preemption problem. 858 * XXXSMP preemption problem.
804 */ 859 */
805#if PERFCTRS 860#if PERFCTRS
806 if (PMC_ENABLED(l->l_proc)) { 861 if (PMC_ENABLED(l->l_proc)) {
807 pmc_restore_context(l->l_proc); 862 pmc_restore_context(l->l_proc);
808 } 863 }
809#endif 864#endif
810 SYSCALL_TIME_WAKEUP(l); 865 SYSCALL_TIME_WAKEUP(l);
811 LOCKDEBUG_BARRIER(NULL, 1); 866 LOCKDEBUG_BARRIER(NULL, 1);
812 867
813 return retval; 868 return retval;
814} 869}
815 870
816/* 871/*
817 * The machine independent parts of context switch to oblivion. 872 * The machine independent parts of context switch to oblivion.
818 * Does not return. Call with the LWP unlocked. 873 * Does not return. Call with the LWP unlocked.
819 */ 874 */
820void 875void
821lwp_exit_switchaway(lwp_t *l) 876lwp_exit_switchaway(lwp_t *l)
822{ 877{
823 struct cpu_info *ci; 878 struct cpu_info *ci;
824 struct lwp *newl; 879 struct lwp *newl;
825 struct bintime bt; 880 struct bintime bt;
826 881
827 ci = l->l_cpu; 882 ci = l->l_cpu;
828 883
829 KASSERT(kpreempt_disabled()); 884 KASSERT(kpreempt_disabled());
830 KASSERT(l->l_stat == LSZOMB || l->l_stat == LSIDL); 885 KASSERT(l->l_stat == LSZOMB || l->l_stat == LSIDL);
831 KASSERT(ci == curcpu()); 886 KASSERT(ci == curcpu());
832 LOCKDEBUG_BARRIER(NULL, 0); 887 LOCKDEBUG_BARRIER(NULL, 0);
833 888
834 kstack_check_magic(l); 889 kstack_check_magic(l);
835 890
836 /* Count time spent in current system call */ 891 /* Count time spent in current system call */
837 SYSCALL_TIME_SLEEP(l); 892 SYSCALL_TIME_SLEEP(l);
838 binuptime(&bt); 893 binuptime(&bt);
839 updatertime(l, &bt); 894 updatertime(l, &bt);
840 895
841 /* Must stay at IPL_SCHED even after releasing run queue lock. */ 896 /* Must stay at IPL_SCHED even after releasing run queue lock. */
842 (void)splsched(); 897 (void)splsched();
843 898
844 /* 899 /*
845 * Let sched_nextlwp() select the LWP to run the CPU next. 900 * Let sched_nextlwp() select the LWP to run the CPU next.
846 * If no LWP is runnable, select the idle LWP. 901 * If no LWP is runnable, select the idle LWP.
847 *  902 *
848 * Note that spc_lwplock might not necessary be held, and 903 * Note that spc_lwplock might not necessary be held, and
849 * new thread would be unlocked after setting the LWP-lock. 904 * new thread would be unlocked after setting the LWP-lock.
850 */ 905 */
851 spc_lock(ci); 906 spc_lock(ci);
852#ifndef __HAVE_FAST_SOFTINTS 907#ifndef __HAVE_FAST_SOFTINTS
853 if (ci->ci_data.cpu_softints != 0) { 908 if (ci->ci_data.cpu_softints != 0) {
854 /* There are pending soft interrupts, so pick one. */ 909 /* There are pending soft interrupts, so pick one. */
855 newl = softint_picklwp(); 910 newl = softint_picklwp();
856 newl->l_stat = LSONPROC; 911 newl->l_stat = LSONPROC;
857 newl->l_pflag |= LP_RUNNING; 912 newl->l_pflag |= LP_RUNNING;
858 } else  913 } else
859#endif /* !__HAVE_FAST_SOFTINTS */ 914#endif /* !__HAVE_FAST_SOFTINTS */
860 { 915 {
861 newl = nextlwp(ci, &ci->ci_schedstate); 916 newl = nextlwp(ci, &ci->ci_schedstate);
862 } 917 }
863 918
864 /* Update the new LWP's start time. */ 919 /* Update the new LWP's start time. */
865 newl->l_stime = bt; 920 newl->l_stime = bt;
866 l->l_pflag &= ~LP_RUNNING; 921 l->l_pflag &= ~LP_RUNNING;
867 922
868 /* 923 /*
869 * ci_curlwp changes when a fast soft interrupt occurs. 924 * ci_curlwp changes when a fast soft interrupt occurs.
870 * We use cpu_onproc to keep track of which kernel or 925 * We use cpu_onproc to keep track of which kernel or
871 * user thread is running 'underneath' the software 926 * user thread is running 'underneath' the software
872 * interrupt. This is important for time accounting, 927 * interrupt. This is important for time accounting,
873 * itimers and forcing user threads to preempt (aston). 928 * itimers and forcing user threads to preempt (aston).
874 */ 929 */
875 ci->ci_data.cpu_onproc = newl; 930 ci->ci_data.cpu_onproc = newl;
876 931
877 /* 932 /*
878 * Preemption related tasks. Must be done with the current 933 * Preemption related tasks. Must be done with the current
879 * CPU locked. 934 * CPU locked.
880 */ 935 */
881 cpu_did_resched(l); 936 cpu_did_resched(l);
882 937
883 /* Unlock the run queue. */ 938 /* Unlock the run queue. */
884 spc_unlock(ci); 939 spc_unlock(ci);
885 940
886 /* Count the context switch on this CPU. */ 941 /* Count the context switch on this CPU. */
887 ci->ci_data.cpu_nswtch++; 942 ci->ci_data.cpu_nswtch++;
888 943
889 /* Update status for lwpctl, if present. */ 944 /* Update status for lwpctl, if present. */
890 if (l->l_lwpctl != NULL) 945 if (l->l_lwpctl != NULL)
891 l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED; 946 l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
892 947
893 /* 948 /*
894 * We may need to spin-wait for if 'newl' is still 949 * We may need to spin-wait for if 'newl' is still
895 * context switching on another CPU. 950 * context switching on another CPU.
896 */ 951 */
897 if (__predict_false(newl->l_ctxswtch != 0)) { 952 if (__predict_false(newl->l_ctxswtch != 0)) {
898 u_int count; 953 u_int count;
899 count = SPINLOCK_BACKOFF_MIN; 954 count = SPINLOCK_BACKOFF_MIN;
900 while (newl->l_ctxswtch) 955 while (newl->l_ctxswtch)
901 SPINLOCK_BACKOFF(count); 956 SPINLOCK_BACKOFF(count);
902 } 957 }
903 958
904 /* Switch to the new LWP.. */ 959 /* Switch to the new LWP.. */
905 (void)cpu_switchto(NULL, newl, false); 960 (void)cpu_switchto(NULL, newl, false);
906 961
907 for (;;) continue; /* XXX: convince gcc about "noreturn" */ 962 for (;;) continue; /* XXX: convince gcc about "noreturn" */
908 /* NOTREACHED */ 963 /* NOTREACHED */
909} 964}
910 965
911/* 966/*
912 * Change LWP state to be runnable, placing it on the run queue if it is 967 * Change LWP state to be runnable, placing it on the run queue if it is
913 * in memory, and awakening the swapper if it isn't in memory. 968 * in memory, and awakening the swapper if it isn't in memory.
914 * 969 *
915 * Call with the process and LWP locked. Will return with the LWP unlocked. 970 * Call with the process and LWP locked. Will return with the LWP unlocked.
916 */ 971 */
917void 972void
918setrunnable(struct lwp *l) 973setrunnable(struct lwp *l)
919{ 974{
920 struct proc *p = l->l_proc; 975 struct proc *p = l->l_proc;
921 struct cpu_info *ci; 976 struct cpu_info *ci;
922 977
923 KASSERT((l->l_flag & LW_IDLE) == 0); 978 KASSERT((l->l_flag & LW_IDLE) == 0);
924 KASSERT(mutex_owned(p->p_lock)); 979 KASSERT(mutex_owned(p->p_lock));
925 KASSERT(lwp_locked(l, NULL)); 980 KASSERT(lwp_locked(l, NULL));
926 KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex); 981 KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex);
927 982
928 switch (l->l_stat) { 983 switch (l->l_stat) {
929 case LSSTOP: 984 case LSSTOP:
930 /* 985 /*
931 * If we're being traced (possibly because someone attached us 986 * If we're being traced (possibly because someone attached us
932 * while we were stopped), check for a signal from the debugger. 987 * while we were stopped), check for a signal from the debugger.
933 */ 988 */
934 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) 989 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0)
935 signotify(l); 990 signotify(l);
936 p->p_nrlwps++; 991 p->p_nrlwps++;
937 break; 992 break;
938 case LSSUSPENDED: 993 case LSSUSPENDED:
939 l->l_flag &= ~LW_WSUSPEND; 994 l->l_flag &= ~LW_WSUSPEND;
940 p->p_nrlwps++; 995 p->p_nrlwps++;
941 cv_broadcast(&p->p_lwpcv); 996 cv_broadcast(&p->p_lwpcv);
942 break; 997 break;
943 case LSSLEEP: 998 case LSSLEEP:
944 KASSERT(l->l_wchan != NULL); 999 KASSERT(l->l_wchan != NULL);
945 break; 1000 break;
946 default: 1001 default:
947 panic("setrunnable: lwp %p state was %d", l, l->l_stat); 1002 panic("setrunnable: lwp %p state was %d", l, l->l_stat);
948 } 1003 }
949 1004
950#ifdef KERN_SA 1005#ifdef KERN_SA
951 if (l->l_proc->p_sa) 1006 if (l->l_proc->p_sa)
952 sa_awaken(l); 1007 sa_awaken(l);
953#endif /* KERN_SA */ 1008#endif /* KERN_SA */
954 1009
955 /* 1010 /*
956 * If the LWP was sleeping interruptably, then it's OK to start it 1011 * If the LWP was sleeping interruptably, then it's OK to start it
957 * again. If not, mark it as still sleeping. 1012 * again. If not, mark it as still sleeping.
958 */ 1013 */
959 if (l->l_wchan != NULL) { 1014 if (l->l_wchan != NULL) {
960 l->l_stat = LSSLEEP; 1015 l->l_stat = LSSLEEP;
961 /* lwp_unsleep() will release the lock. */ 1016 /* lwp_unsleep() will release the lock. */
962 lwp_unsleep(l, true); 1017 lwp_unsleep(l, true);
963 return; 1018 return;
964 } 1019 }
965 1020
966 /* 1021 /*
967 * If the LWP is still on the CPU, mark it as LSONPROC. It may be 1022 * If the LWP is still on the CPU, mark it as LSONPROC. It may be
968 * about to call mi_switch(), in which case it will yield. 1023 * about to call mi_switch(), in which case it will yield.
969 */ 1024 */
970 if ((l->l_pflag & LP_RUNNING) != 0) { 1025 if ((l->l_pflag & LP_RUNNING) != 0) {
971 l->l_stat = LSONPROC; 1026 l->l_stat = LSONPROC;
972 l->l_slptime = 0; 1027 l->l_slptime = 0;
973 lwp_unlock(l); 1028 lwp_unlock(l);
974 return; 1029 return;
975 } 1030 }
976 1031
977 /* 1032 /*
978 * Look for a CPU to run. 1033 * Look for a CPU to run.
979 * Set the LWP runnable. 1034 * Set the LWP runnable.
980 */ 1035 */
981 ci = sched_takecpu(l); 1036 ci = sched_takecpu(l);
982 l->l_cpu = ci; 1037 l->l_cpu = ci;
983 spc_lock(ci); 1038 spc_lock(ci);
984 lwp_unlock_to(l, ci->ci_schedstate.spc_mutex); 1039 lwp_unlock_to(l, ci->ci_schedstate.spc_mutex);
985 sched_setrunnable(l); 1040 sched_setrunnable(l);
986 l->l_stat = LSRUN; 1041 l->l_stat = LSRUN;
987 l->l_slptime = 0; 1042 l->l_slptime = 0;
988 1043
989 /* 1044 /*
990 * If thread is swapped out - wake the swapper to bring it back in. 1045 * If thread is swapped out - wake the swapper to bring it back in.
991 * Otherwise, enter it into a run queue. 1046 * Otherwise, enter it into a run queue.
992 */ 1047 */
993 if (l->l_flag & LW_INMEM) { 1048 if (l->l_flag & LW_INMEM) {
994 sched_enqueue(l, false); 1049 sched_enqueue(l, false);
995 resched_cpu(l); 1050 resched_cpu(l);
996 lwp_unlock(l); 1051 lwp_unlock(l);
997 } else { 1052 } else {
998 lwp_unlock(l); 1053 lwp_unlock(l);
999 uvm_kick_scheduler(); 1054 uvm_kick_scheduler();
1000 } 1055 }
1001} 1056}
1002 1057
1003/* 1058/*
1004 * suspendsched: 1059 * suspendsched:
1005 * 1060 *
1006 * Convert all non-LW_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.  1061 * Convert all non-LW_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
1007 */ 1062 */
1008void 1063void
1009suspendsched(void) 1064suspendsched(void)
1010{ 1065{
1011 CPU_INFO_ITERATOR cii; 1066 CPU_INFO_ITERATOR cii;
1012 struct cpu_info *ci; 1067 struct cpu_info *ci;
1013 struct lwp *l; 1068 struct lwp *l;
1014 struct proc *p; 1069 struct proc *p;
1015 1070
1016 /* 1071 /*
1017 * We do this by process in order not to violate the locking rules. 1072 * We do this by process in order not to violate the locking rules.
1018 */ 1073 */
1019 mutex_enter(proc_lock); 1074 mutex_enter(proc_lock);
1020 PROCLIST_FOREACH(p, &allproc) { 1075 PROCLIST_FOREACH(p, &allproc) {
1021 if ((p->p_flag & PK_MARKER) != 0) 1076 if ((p->p_flag & PK_MARKER) != 0)
1022 continue; 1077 continue;
1023 1078
1024 mutex_enter(p->p_lock); 1079 mutex_enter(p->p_lock);
1025 if ((p->p_flag & PK_SYSTEM) != 0) { 1080 if ((p->p_flag & PK_SYSTEM) != 0) {
1026 mutex_exit(p->p_lock); 1081 mutex_exit(p->p_lock);
1027 continue; 1082 continue;
1028 } 1083 }
1029 1084
1030 p->p_stat = SSTOP; 1085 p->p_stat = SSTOP;
1031 1086
1032 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 1087 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1033 if (l == curlwp) 1088 if (l == curlwp)
1034 continue; 1089 continue;
1035 1090
1036 lwp_lock(l); 1091 lwp_lock(l);
1037 1092
1038 /* 1093 /*
1039 * Set L_WREBOOT so that the LWP will suspend itself 1094 * Set L_WREBOOT so that the LWP will suspend itself
1040 * when it tries to return to user mode. We want to 1095 * when it tries to return to user mode. We want to
1041 * try and get to get as many LWPs as possible to 1096 * try and get to get as many LWPs as possible to
1042 * the user / kernel boundary, so that they will 1097 * the user / kernel boundary, so that they will
1043 * release any locks that they hold. 1098 * release any locks that they hold.
1044 */ 1099 */
1045 l->l_flag |= (LW_WREBOOT | LW_WSUSPEND); 1100 l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
1046 1101
1047 if (l->l_stat == LSSLEEP && 1102 if (l->l_stat == LSSLEEP &&
1048 (l->l_flag & LW_SINTR) != 0) { 1103 (l->l_flag & LW_SINTR) != 0) {
1049 /* setrunnable() will release the lock. */ 1104 /* setrunnable() will release the lock. */
1050 setrunnable(l); 1105 setrunnable(l);
1051 continue; 1106 continue;
1052 } 1107 }
1053 1108
1054 lwp_unlock(l); 1109 lwp_unlock(l);
1055 } 1110 }
1056 1111
1057 mutex_exit(p->p_lock); 1112 mutex_exit(p->p_lock);
1058 } 1113 }
1059 mutex_exit(proc_lock); 1114 mutex_exit(proc_lock);
1060 1115
1061 /* 1116 /*
1062 * Kick all CPUs to make them preempt any LWPs running in user mode.  1117 * Kick all CPUs to make them preempt any LWPs running in user mode.
1063 * They'll trap into the kernel and suspend themselves in userret(). 1118 * They'll trap into the kernel and suspend themselves in userret().
1064 */ 1119 */
1065 for (CPU_INFO_FOREACH(cii, ci)) { 1120 for (CPU_INFO_FOREACH(cii, ci)) {
1066 spc_lock(ci); 1121 spc_lock(ci);
1067 cpu_need_resched(ci, RESCHED_IMMED); 1122 cpu_need_resched(ci, RESCHED_IMMED);
1068 spc_unlock(ci); 1123 spc_unlock(ci);
1069 } 1124 }
1070} 1125}
1071 1126
1072/* 1127/*
1073 * sched_unsleep: 1128 * sched_unsleep:
1074 * 1129 *
1075 * The is called when the LWP has not been awoken normally but instead 1130 * The is called when the LWP has not been awoken normally but instead
1076 * interrupted: for example, if the sleep timed out. Because of this, 1131 * interrupted: for example, if the sleep timed out. Because of this,
1077 * it's not a valid action for running or idle LWPs. 1132 * it's not a valid action for running or idle LWPs.
1078 */ 1133 */
1079static u_int 1134static u_int
1080sched_unsleep(struct lwp *l, bool cleanup) 1135sched_unsleep(struct lwp *l, bool cleanup)
1081{ 1136{
1082 1137
1083 lwp_unlock(l); 1138 lwp_unlock(l);
1084 panic("sched_unsleep"); 1139 panic("sched_unsleep");
1085} 1140}
1086 1141
1087static void 1142static void
1088resched_cpu(struct lwp *l) 1143resched_cpu(struct lwp *l)
1089{ 1144{
1090 struct cpu_info *ci = ci = l->l_cpu; 1145 struct cpu_info *ci = ci = l->l_cpu;
1091 1146
1092 KASSERT(lwp_locked(l, NULL)); 1147 KASSERT(lwp_locked(l, NULL));
1093 if (lwp_eprio(l) > ci->ci_schedstate.spc_curpriority) 1148 if (lwp_eprio(l) > ci->ci_schedstate.spc_curpriority)
1094 cpu_need_resched(ci, 0); 1149 cpu_need_resched(ci, 0);
1095} 1150}
1096 1151
1097static void 1152static void
1098sched_changepri(struct lwp *l, pri_t pri) 1153sched_changepri(struct lwp *l, pri_t pri)
1099{ 1154{
1100 1155
1101 KASSERT(lwp_locked(l, NULL)); 1156 KASSERT(lwp_locked(l, NULL));
1102 1157
1103 if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) { 1158 if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
1104 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex)); 1159 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
1105 sched_dequeue(l); 1160 sched_dequeue(l);
1106 l->l_priority = pri; 1161 l->l_priority = pri;
1107 sched_enqueue(l, false); 1162 sched_enqueue(l, false);
1108 } else { 1163 } else {
1109 l->l_priority = pri; 1164 l->l_priority = pri;
1110 } 1165 }
1111 resched_cpu(l); 1166 resched_cpu(l);
1112} 1167}
1113 1168
1114static void 1169static void
1115sched_lendpri(struct lwp *l, pri_t pri) 1170sched_lendpri(struct lwp *l, pri_t pri)
1116{ 1171{
1117 1172
1118 KASSERT(lwp_locked(l, NULL)); 1173 KASSERT(lwp_locked(l, NULL));
1119 1174
1120 if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) { 1175 if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
1121 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex)); 1176 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
1122 sched_dequeue(l); 1177 sched_dequeue(l);
1123 l->l_inheritedprio = pri; 1178 l->l_inheritedprio = pri;
1124 sched_enqueue(l, false); 1179 sched_enqueue(l, false);
1125 } else { 1180 } else {
1126 l->l_inheritedprio = pri; 1181 l->l_inheritedprio = pri;
1127 } 1182 }
1128 resched_cpu(l); 1183 resched_cpu(l);
1129} 1184}
1130 1185
1131struct lwp * 1186struct lwp *
1132syncobj_noowner(wchan_t wchan) 1187syncobj_noowner(wchan_t wchan)
1133{ 1188{
1134 1189
1135 return NULL; 1190 return NULL;
1136} 1191}
1137 1192
1138/* Decay 95% of proc::p_pctcpu in 60 seconds, ccpu = exp(-1/20) */ 1193/* Decay 95% of proc::p_pctcpu in 60 seconds, ccpu = exp(-1/20) */
1139const fixpt_t ccpu = 0.95122942450071400909 * FSCALE; 1194const fixpt_t ccpu = 0.95122942450071400909 * FSCALE;
1140 1195
1141/* 1196/*
1142 * sched_pstats: 1197 * sched_pstats:
1143 * 1198 *
1144 * Update process statistics and check CPU resource allocation. 1199 * Update process statistics and check CPU resource allocation.
1145 * Call scheduler-specific hook to eventually adjust process/LWP 1200 * Call scheduler-specific hook to eventually adjust process/LWP
1146 * priorities. 1201 * priorities.
1147 */ 1202 */
1148/* ARGSUSED */ 1203/* ARGSUSED */
1149void 1204void
1150sched_pstats(void *arg) 1205sched_pstats(void *arg)
1151{ 1206{
1152 const int clkhz = (stathz != 0 ? stathz : hz); 1207 const int clkhz = (stathz != 0 ? stathz : hz);
1153 static bool backwards; 1208 static bool backwards;
1154 struct rlimit *rlim; 1209 struct rlimit *rlim;
1155 struct lwp *l; 1210 struct lwp *l;
1156 struct proc *p; 1211 struct proc *p;
1157 long runtm; 1212 long runtm;
1158 fixpt_t lpctcpu; 1213 fixpt_t lpctcpu;
1159 u_int lcpticks; 1214 u_int lcpticks;

cvs diff -r1.17 -r1.18 src/sys/secmodel/suser/secmodel_suser.c (switch to unified diff)

--- src/sys/secmodel/suser/secmodel_suser.c 2009/10/03 00:37:01 1.17
+++ src/sys/secmodel/suser/secmodel_suser.c 2009/10/03 01:30:25 1.18
@@ -1,1032 +1,1003 @@ @@ -1,1032 +1,1003 @@
1/* $NetBSD: secmodel_suser.c,v 1.17 2009/10/03 00:37:01 elad Exp $ */ 1/* $NetBSD: secmodel_suser.c,v 1.18 2009/10/03 01:30:25 elad Exp $ */
2/*- 2/*-
3 * Copyright (c) 2006 Elad Efrat <elad@NetBSD.org> 3 * Copyright (c) 2006 Elad Efrat <elad@NetBSD.org>
4 * All rights reserved. 4 * All rights reserved.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products 14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission. 15 * derived from this software without specific prior written permission.
16 * 16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */ 27 */
28 28
29/* 29/*
30 * This file contains kauth(9) listeners needed to implement the traditional 30 * This file contains kauth(9) listeners needed to implement the traditional
31 * NetBSD superuser access restrictions. 31 * NetBSD superuser access restrictions.
32 * 32 *
33 * There are two main resources a request can be issued to: user-owned and 33 * There are two main resources a request can be issued to: user-owned and
34 * system owned. For the first, traditional Unix access checks are done, as 34 * system owned. For the first, traditional Unix access checks are done, as
35 * well as superuser checks. If needed, the request context is examined before 35 * well as superuser checks. If needed, the request context is examined before
36 * a decision is made. For the latter, usually only superuser checks are done 36 * a decision is made. For the latter, usually only superuser checks are done
37 * as normal users are not allowed to access system resources. 37 * as normal users are not allowed to access system resources.
38 */ 38 */
39 39
40#include <sys/cdefs.h> 40#include <sys/cdefs.h>
41__KERNEL_RCSID(0, "$NetBSD: secmodel_suser.c,v 1.17 2009/10/03 00:37:01 elad Exp $"); 41__KERNEL_RCSID(0, "$NetBSD: secmodel_suser.c,v 1.18 2009/10/03 01:30:25 elad Exp $");
42 42
43#include <sys/types.h> 43#include <sys/types.h>
44#include <sys/param.h> 44#include <sys/param.h>
45#include <sys/kauth.h> 45#include <sys/kauth.h>
46 46
47#include <sys/mutex.h> 47#include <sys/mutex.h>
48#include <sys/mount.h> 48#include <sys/mount.h>
49#include <sys/socketvar.h> 49#include <sys/socketvar.h>
50#include <sys/sysctl.h> 50#include <sys/sysctl.h>
51#include <sys/vnode.h> 51#include <sys/vnode.h>
52#include <sys/proc.h> 52#include <sys/proc.h>
53#include <sys/uidinfo.h> 53#include <sys/uidinfo.h>
54#include <sys/module.h> 54#include <sys/module.h>
55 55
56#include <miscfs/procfs/procfs.h> 56#include <miscfs/procfs/procfs.h>
57 57
58#include <secmodel/suser/suser.h> 58#include <secmodel/suser/suser.h>
59 59
60MODULE(MODULE_CLASS_SECMODEL, suser, NULL); 60MODULE(MODULE_CLASS_SECMODEL, suser, NULL);
61 61
62static int secmodel_bsd44_curtain; 62static int secmodel_bsd44_curtain;
63/* static */ int dovfsusermount; 63/* static */ int dovfsusermount;
64 64
65static kauth_listener_t l_generic, l_system, l_process, l_network, l_machdep, 65static kauth_listener_t l_generic, l_system, l_process, l_network, l_machdep,
66 l_device, l_vnode; 66 l_device, l_vnode;
67 67
68static struct sysctllog *suser_sysctl_log; 68static struct sysctllog *suser_sysctl_log;
69 69
70void 70void
71sysctl_security_suser_setup(struct sysctllog **clog) 71sysctl_security_suser_setup(struct sysctllog **clog)
72{ 72{
73 const struct sysctlnode *rnode; 73 const struct sysctlnode *rnode;
74 74
75 sysctl_createv(clog, 0, NULL, &rnode, 75 sysctl_createv(clog, 0, NULL, &rnode,
76 CTLFLAG_PERMANENT, 76 CTLFLAG_PERMANENT,
77 CTLTYPE_NODE, "security", NULL, 77 CTLTYPE_NODE, "security", NULL,
78 NULL, 0, NULL, 0, 78 NULL, 0, NULL, 0,
79 CTL_SECURITY, CTL_EOL); 79 CTL_SECURITY, CTL_EOL);
80 80
81 sysctl_createv(clog, 0, &rnode, &rnode, 81 sysctl_createv(clog, 0, &rnode, &rnode,
82 CTLFLAG_PERMANENT, 82 CTLFLAG_PERMANENT,
83 CTLTYPE_NODE, "models", NULL, 83 CTLTYPE_NODE, "models", NULL,
84 NULL, 0, NULL, 0, 84 NULL, 0, NULL, 0,
85 CTL_CREATE, CTL_EOL); 85 CTL_CREATE, CTL_EOL);
86 86
87 sysctl_createv(clog, 0, &rnode, &rnode, 87 sysctl_createv(clog, 0, &rnode, &rnode,
88 CTLFLAG_PERMANENT, 88 CTLFLAG_PERMANENT,
89 CTLTYPE_NODE, "suser", NULL, 89 CTLTYPE_NODE, "suser", NULL,
90 NULL, 0, NULL, 0, 90 NULL, 0, NULL, 0,
91 CTL_CREATE, CTL_EOL); 91 CTL_CREATE, CTL_EOL);
92 92
93 sysctl_createv(clog, 0, &rnode, NULL, 93 sysctl_createv(clog, 0, &rnode, NULL,
94 CTLFLAG_PERMANENT, 94 CTLFLAG_PERMANENT,
95 CTLTYPE_STRING, "name", NULL, 95 CTLTYPE_STRING, "name", NULL,
96 NULL, 0, __UNCONST("Traditional NetBSD: Superuser"), 0, 96 NULL, 0, __UNCONST("Traditional NetBSD: Superuser"), 0,
97 CTL_CREATE, CTL_EOL); 97 CTL_CREATE, CTL_EOL);
98 98
99 sysctl_createv(clog, 0, &rnode, NULL, 99 sysctl_createv(clog, 0, &rnode, NULL,
100 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 100 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
101 CTLTYPE_INT, "curtain", 101 CTLTYPE_INT, "curtain",
102 SYSCTL_DESCR("Curtain information about objects to "\ 102 SYSCTL_DESCR("Curtain information about objects to "\
103 "users not owning them."), 103 "users not owning them."),
104 NULL, 0, &secmodel_bsd44_curtain, 0, 104 NULL, 0, &secmodel_bsd44_curtain, 0,
105 CTL_CREATE, CTL_EOL); 105 CTL_CREATE, CTL_EOL);
106 106
107 sysctl_createv(clog, 0, &rnode, NULL, 107 sysctl_createv(clog, 0, &rnode, NULL,
108 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 108 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
109 CTLTYPE_INT, "usermount", 109 CTLTYPE_INT, "usermount",
110 SYSCTL_DESCR("Whether unprivileged users may mount " 110 SYSCTL_DESCR("Whether unprivileged users may mount "
111 "filesystems"), 111 "filesystems"),
112 NULL, 0, &dovfsusermount, 0, 112 NULL, 0, &dovfsusermount, 0,
113 CTL_CREATE, CTL_EOL); 113 CTL_CREATE, CTL_EOL);
114 114
115 /* Compatibility: security.curtain */ 115 /* Compatibility: security.curtain */
116 sysctl_createv(clog, 0, NULL, &rnode, 116 sysctl_createv(clog, 0, NULL, &rnode,
117 CTLFLAG_PERMANENT, 117 CTLFLAG_PERMANENT,
118 CTLTYPE_NODE, "security", NULL, 118 CTLTYPE_NODE, "security", NULL,
119 NULL, 0, NULL, 0, 119 NULL, 0, NULL, 0,
120 CTL_SECURITY, CTL_EOL); 120 CTL_SECURITY, CTL_EOL);
121 121
122 sysctl_createv(clog, 0, &rnode, NULL, 122 sysctl_createv(clog, 0, &rnode, NULL,
123 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 123 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
124 CTLTYPE_INT, "curtain", 124 CTLTYPE_INT, "curtain",
125 SYSCTL_DESCR("Curtain information about objects to "\ 125 SYSCTL_DESCR("Curtain information about objects to "\
126 "users not owning them."), 126 "users not owning them."),
127 NULL, 0, &secmodel_bsd44_curtain, 0, 127 NULL, 0, &secmodel_bsd44_curtain, 0,
128 CTL_CREATE, CTL_EOL); 128 CTL_CREATE, CTL_EOL);
129 129
130 /* Compatibility: vfs.generic.usermount */ 130 /* Compatibility: vfs.generic.usermount */
131 sysctl_createv(clog, 0, NULL, NULL, 131 sysctl_createv(clog, 0, NULL, NULL,
132 CTLFLAG_PERMANENT, 132 CTLFLAG_PERMANENT,
133 CTLTYPE_NODE, "vfs", NULL, 133 CTLTYPE_NODE, "vfs", NULL,
134 NULL, 0, NULL, 0, 134 NULL, 0, NULL, 0,
135 CTL_VFS, CTL_EOL); 135 CTL_VFS, CTL_EOL);
136 136
137 sysctl_createv(clog, 0, NULL, NULL, 137 sysctl_createv(clog, 0, NULL, NULL,
138 CTLFLAG_PERMANENT, 138 CTLFLAG_PERMANENT,
139 CTLTYPE_NODE, "generic", 139 CTLTYPE_NODE, "generic",
140 SYSCTL_DESCR("Non-specific vfs related information"), 140 SYSCTL_DESCR("Non-specific vfs related information"),
141 NULL, 0, NULL, 0, 141 NULL, 0, NULL, 0,
142 CTL_VFS, VFS_GENERIC, CTL_EOL); 142 CTL_VFS, VFS_GENERIC, CTL_EOL);
143 143
144 sysctl_createv(clog, 0, NULL, NULL, 144 sysctl_createv(clog, 0, NULL, NULL,
145 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 145 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
146 CTLTYPE_INT, "usermount", 146 CTLTYPE_INT, "usermount",
147 SYSCTL_DESCR("Whether unprivileged users may mount " 147 SYSCTL_DESCR("Whether unprivileged users may mount "
148 "filesystems"), 148 "filesystems"),
149 NULL, 0, &dovfsusermount, 0, 149 NULL, 0, &dovfsusermount, 0,
150 CTL_VFS, VFS_GENERIC, VFS_USERMOUNT, CTL_EOL); 150 CTL_VFS, VFS_GENERIC, VFS_USERMOUNT, CTL_EOL);
151} 151}
152 152
153void 153void
154secmodel_suser_init(void) 154secmodel_suser_init(void)
155{ 155{
156 secmodel_bsd44_curtain = 0; 156 secmodel_bsd44_curtain = 0;
157 dovfsusermount = 0; 157 dovfsusermount = 0;
158} 158}
159 159
160void 160void
161secmodel_suser_start(void) 161secmodel_suser_start(void)
162{ 162{
163 l_generic = kauth_listen_scope(KAUTH_SCOPE_GENERIC, 163 l_generic = kauth_listen_scope(KAUTH_SCOPE_GENERIC,
164 secmodel_suser_generic_cb, NULL); 164 secmodel_suser_generic_cb, NULL);
165 l_system = kauth_listen_scope(KAUTH_SCOPE_SYSTEM, 165 l_system = kauth_listen_scope(KAUTH_SCOPE_SYSTEM,
166 secmodel_suser_system_cb, NULL); 166 secmodel_suser_system_cb, NULL);
167 l_process = kauth_listen_scope(KAUTH_SCOPE_PROCESS, 167 l_process = kauth_listen_scope(KAUTH_SCOPE_PROCESS,
168 secmodel_suser_process_cb, NULL); 168 secmodel_suser_process_cb, NULL);
169 l_network = kauth_listen_scope(KAUTH_SCOPE_NETWORK, 169 l_network = kauth_listen_scope(KAUTH_SCOPE_NETWORK,
170 secmodel_suser_network_cb, NULL); 170 secmodel_suser_network_cb, NULL);
171 l_machdep = kauth_listen_scope(KAUTH_SCOPE_MACHDEP, 171 l_machdep = kauth_listen_scope(KAUTH_SCOPE_MACHDEP,
172 secmodel_suser_machdep_cb, NULL); 172 secmodel_suser_machdep_cb, NULL);
173 l_device = kauth_listen_scope(KAUTH_SCOPE_DEVICE, 173 l_device = kauth_listen_scope(KAUTH_SCOPE_DEVICE,
174 secmodel_suser_device_cb, NULL); 174 secmodel_suser_device_cb, NULL);
175 l_vnode = kauth_listen_scope(KAUTH_SCOPE_VNODE, 175 l_vnode = kauth_listen_scope(KAUTH_SCOPE_VNODE,
176 secmodel_suser_vnode_cb, NULL); 176 secmodel_suser_vnode_cb, NULL);
177} 177}
178 178
179void 179void
180secmodel_suser_stop(void) 180secmodel_suser_stop(void)
181{ 181{
182 kauth_unlisten_scope(l_generic); 182 kauth_unlisten_scope(l_generic);
183 kauth_unlisten_scope(l_system); 183 kauth_unlisten_scope(l_system);
184 kauth_unlisten_scope(l_process); 184 kauth_unlisten_scope(l_process);
185 kauth_unlisten_scope(l_network); 185 kauth_unlisten_scope(l_network);
186 kauth_unlisten_scope(l_machdep); 186 kauth_unlisten_scope(l_machdep);
187 kauth_unlisten_scope(l_device); 187 kauth_unlisten_scope(l_device);
188 kauth_unlisten_scope(l_vnode); 188 kauth_unlisten_scope(l_vnode);
189} 189}
190 190
191static int 191static int
192suser_modcmd(modcmd_t cmd, void *arg) 192suser_modcmd(modcmd_t cmd, void *arg)
193{ 193{
194 int error = 0; 194 int error = 0;
195 195
196 switch (cmd) { 196 switch (cmd) {
197 case MODULE_CMD_INIT: 197 case MODULE_CMD_INIT:
198 secmodel_suser_init(); 198 secmodel_suser_init();
199 secmodel_suser_start(); 199 secmodel_suser_start();
200 sysctl_security_suser_setup(&suser_sysctl_log); 200 sysctl_security_suser_setup(&suser_sysctl_log);
201 break; 201 break;
202 202
203 case MODULE_CMD_FINI: 203 case MODULE_CMD_FINI:
204 sysctl_teardown(&suser_sysctl_log); 204 sysctl_teardown(&suser_sysctl_log);
205 secmodel_suser_stop(); 205 secmodel_suser_stop();
206 break; 206 break;
207 207
208 case MODULE_CMD_AUTOUNLOAD: 208 case MODULE_CMD_AUTOUNLOAD:
209 error = EPERM; 209 error = EPERM;
210 break; 210 break;
211 211
212 default: 212 default:
213 error = ENOTTY; 213 error = ENOTTY;
214 break; 214 break;
215 } 215 }
216 216
217 return (error); 217 return (error);
218} 218}
219 219
220/* 220/*
221 * kauth(9) listener 221 * kauth(9) listener
222 * 222 *
223 * Security model: Traditional NetBSD 223 * Security model: Traditional NetBSD
224 * Scope: Generic 224 * Scope: Generic
225 * Responsibility: Superuser access 225 * Responsibility: Superuser access
226 */ 226 */
227int 227int
228secmodel_suser_generic_cb(kauth_cred_t cred, kauth_action_t action, 228secmodel_suser_generic_cb(kauth_cred_t cred, kauth_action_t action,
229 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3) 229 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3)
230{ 230{
231 bool isroot; 231 bool isroot;
232 int result; 232 int result;
233 233
234 isroot = (kauth_cred_geteuid(cred) == 0); 234 isroot = (kauth_cred_geteuid(cred) == 0);
235 result = KAUTH_RESULT_DEFER; 235 result = KAUTH_RESULT_DEFER;
236 236
237 switch (action) { 237 switch (action) {
238 case KAUTH_GENERIC_ISSUSER: 238 case KAUTH_GENERIC_ISSUSER:
239 if (isroot) 239 if (isroot)
240 result = KAUTH_RESULT_ALLOW; 240 result = KAUTH_RESULT_ALLOW;
241 break; 241 break;
242 242
243 case KAUTH_GENERIC_CANSEE:  243 case KAUTH_GENERIC_CANSEE:
244 if (!secmodel_bsd44_curtain) 244 if (!secmodel_bsd44_curtain)
245 result = KAUTH_RESULT_ALLOW; 245 result = KAUTH_RESULT_ALLOW;
246 else if (isroot || kauth_cred_uidmatch(cred, arg0)) 246 else if (isroot || kauth_cred_uidmatch(cred, arg0))
247 result = KAUTH_RESULT_ALLOW; 247 result = KAUTH_RESULT_ALLOW;
248 248
249 break; 249 break;
250 250
251 default: 251 default:
252 break; 252 break;
253 } 253 }
254 254
255 return (result); 255 return (result);
256} 256}
257 257
258/* 258/*
259 * kauth(9) listener 259 * kauth(9) listener
260 * 260 *
261 * Security model: Traditional NetBSD 261 * Security model: Traditional NetBSD
262 * Scope: System 262 * Scope: System
263 * Responsibility: Superuser access 263 * Responsibility: Superuser access
264 */ 264 */
265int 265int
266secmodel_suser_system_cb(kauth_cred_t cred, kauth_action_t action, 266secmodel_suser_system_cb(kauth_cred_t cred, kauth_action_t action,
267 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3) 267 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3)
268{ 268{
269 bool isroot; 269 bool isroot;
270 int result; 270 int result;
271 enum kauth_system_req req; 271 enum kauth_system_req req;
272 272
273 isroot = (kauth_cred_geteuid(cred) == 0); 273 isroot = (kauth_cred_geteuid(cred) == 0);
274 result = KAUTH_RESULT_DEFER; 274 result = KAUTH_RESULT_DEFER;
275 req = (enum kauth_system_req)arg0; 275 req = (enum kauth_system_req)arg0;
276 276
277 switch (action) { 277 switch (action) {
278 case KAUTH_SYSTEM_CPU: 278 case KAUTH_SYSTEM_CPU:
279 switch (req) { 279 switch (req) {
280 case KAUTH_REQ_SYSTEM_CPU_SETSTATE: 280 case KAUTH_REQ_SYSTEM_CPU_SETSTATE:
281 if (isroot) 281 if (isroot)
282 result = KAUTH_RESULT_ALLOW; 282 result = KAUTH_RESULT_ALLOW;
283 283
284 break; 284 break;
285 285
286 default: 286 default:
287 break; 287 break;
288 } 288 }
289 289
290 break; 290 break;
291 291
292 case KAUTH_SYSTEM_FS_QUOTA: 292 case KAUTH_SYSTEM_FS_QUOTA:
293 switch (req) { 293 switch (req) {
294 case KAUTH_REQ_SYSTEM_FS_QUOTA_GET: 294 case KAUTH_REQ_SYSTEM_FS_QUOTA_GET:
295 case KAUTH_REQ_SYSTEM_FS_QUOTA_ONOFF: 295 case KAUTH_REQ_SYSTEM_FS_QUOTA_ONOFF:
296 case KAUTH_REQ_SYSTEM_FS_QUOTA_MANAGE: 296 case KAUTH_REQ_SYSTEM_FS_QUOTA_MANAGE:
297 case KAUTH_REQ_SYSTEM_FS_QUOTA_NOLIMIT: 297 case KAUTH_REQ_SYSTEM_FS_QUOTA_NOLIMIT:
298 if (isroot) 298 if (isroot)
299 result = KAUTH_RESULT_ALLOW; 299 result = KAUTH_RESULT_ALLOW;
300 break; 300 break;
301 301
302 default: 302 default:
303 break; 303 break;
304 } 304 }
305 305
306 break; 306 break;
307 307
308 case KAUTH_SYSTEM_FS_RESERVEDSPACE: 308 case KAUTH_SYSTEM_FS_RESERVEDSPACE:
309 if (isroot) 309 if (isroot)
310 result = KAUTH_RESULT_ALLOW; 310 result = KAUTH_RESULT_ALLOW;
311 break; 311 break;
312 312
313 case KAUTH_SYSTEM_MOUNT: 313 case KAUTH_SYSTEM_MOUNT:
314 switch (req) { 314 switch (req) {
315 case KAUTH_REQ_SYSTEM_MOUNT_GET: 315 case KAUTH_REQ_SYSTEM_MOUNT_GET:
316 result = KAUTH_RESULT_ALLOW; 316 result = KAUTH_RESULT_ALLOW;
317 break; 317 break;
318 318
319 case KAUTH_REQ_SYSTEM_MOUNT_NEW: 319 case KAUTH_REQ_SYSTEM_MOUNT_NEW:
320 if (isroot) 320 if (isroot)
321 result = KAUTH_RESULT_ALLOW; 321 result = KAUTH_RESULT_ALLOW;
322 else if (dovfsusermount) { 322 else if (dovfsusermount) {
323 struct vnode *vp = arg1; 323 struct vnode *vp = arg1;
324 u_long flags = (u_long)arg2; 324 u_long flags = (u_long)arg2;
325 325
326 if (!(flags & MNT_NODEV) || 326 if (!(flags & MNT_NODEV) ||
327 !(flags & MNT_NOSUID)) 327 !(flags & MNT_NOSUID))
328 break; 328 break;
329 329
330 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) && 330 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) &&
331 !(flags & MNT_NOEXEC)) 331 !(flags & MNT_NOEXEC))
332 break; 332 break;
333 333
334 result = KAUTH_RESULT_ALLOW; 334 result = KAUTH_RESULT_ALLOW;
335 } 335 }
336 336
337 break; 337 break;
338 338
339 case KAUTH_REQ_SYSTEM_MOUNT_UNMOUNT: 339 case KAUTH_REQ_SYSTEM_MOUNT_UNMOUNT:
340 if (isroot) 340 if (isroot)
341 result = KAUTH_RESULT_ALLOW; 341 result = KAUTH_RESULT_ALLOW;
342 else { 342 else {
343 struct mount *mp = arg1; 343 struct mount *mp = arg1;
344 344
345 if (mp->mnt_stat.f_owner == 345 if (mp->mnt_stat.f_owner ==
346 kauth_cred_geteuid(cred)) 346 kauth_cred_geteuid(cred))
347 result = KAUTH_RESULT_ALLOW; 347 result = KAUTH_RESULT_ALLOW;
348 } 348 }
349 349
350 break; 350 break;
351 351
352 case KAUTH_REQ_SYSTEM_MOUNT_UPDATE: 352 case KAUTH_REQ_SYSTEM_MOUNT_UPDATE:
353 if (isroot) 353 if (isroot)
354 result = KAUTH_RESULT_ALLOW; 354 result = KAUTH_RESULT_ALLOW;
355 else if (dovfsusermount) { 355 else if (dovfsusermount) {
356 struct mount *mp = arg1; 356 struct mount *mp = arg1;
357 u_long flags = (u_long)arg2; 357 u_long flags = (u_long)arg2;
358 358
359 /* No exporting for non-root. */ 359 /* No exporting for non-root. */
360 if (flags & MNT_EXPORTED) 360 if (flags & MNT_EXPORTED)
361 break; 361 break;
362 362
363 if (!(flags & MNT_NODEV) || 363 if (!(flags & MNT_NODEV) ||
364 !(flags & MNT_NOSUID)) 364 !(flags & MNT_NOSUID))
365 break; 365 break;
366 366
367 /* 367 /*
368 * Only super-user, or user that did the mount, 368 * Only super-user, or user that did the mount,
369 * can update. 369 * can update.
370 */ 370 */
371 if (mp->mnt_stat.f_owner != 371 if (mp->mnt_stat.f_owner !=
372 kauth_cred_geteuid(cred)) 372 kauth_cred_geteuid(cred))
373 break; 373 break;
374 374
375 /* Retain 'noexec'. */ 375 /* Retain 'noexec'. */
376 if ((mp->mnt_flag & MNT_NOEXEC) && 376 if ((mp->mnt_flag & MNT_NOEXEC) &&
377 !(flags & MNT_NOEXEC)) 377 !(flags & MNT_NOEXEC))
378 break; 378 break;
379 379
380 result = KAUTH_RESULT_ALLOW; 380 result = KAUTH_RESULT_ALLOW;
381 } 381 }
382 382
383 break; 383 break;
384 384
385 default: 385 default:
386 break; 386 break;
387 } 387 }
388 388
389 break; 389 break;
390 390
391 case KAUTH_SYSTEM_PSET: 391 case KAUTH_SYSTEM_PSET:
392 switch (req) { 392 switch (req) {
393 case KAUTH_REQ_SYSTEM_PSET_ASSIGN: 393 case KAUTH_REQ_SYSTEM_PSET_ASSIGN:
394 case KAUTH_REQ_SYSTEM_PSET_BIND: 394 case KAUTH_REQ_SYSTEM_PSET_BIND:
395 case KAUTH_REQ_SYSTEM_PSET_CREATE: 395 case KAUTH_REQ_SYSTEM_PSET_CREATE:
396 case KAUTH_REQ_SYSTEM_PSET_DESTROY: 396 case KAUTH_REQ_SYSTEM_PSET_DESTROY:
397 if (isroot) 397 if (isroot)
398 result = KAUTH_RESULT_ALLOW; 398 result = KAUTH_RESULT_ALLOW;
399 399
400 break; 400 break;
401 401
402 default: 402 default:
403 break; 403 break;
404 } 404 }
405 405
406 break; 406 break;
407 407
408 case KAUTH_SYSTEM_TIME: 408 case KAUTH_SYSTEM_TIME:
409 switch (req) { 409 switch (req) {
410 case KAUTH_REQ_SYSTEM_TIME_ADJTIME: 410 case KAUTH_REQ_SYSTEM_TIME_ADJTIME:
411 case KAUTH_REQ_SYSTEM_TIME_NTPADJTIME: 411 case KAUTH_REQ_SYSTEM_TIME_NTPADJTIME:
412 case KAUTH_REQ_SYSTEM_TIME_TIMECOUNTERS: 412 case KAUTH_REQ_SYSTEM_TIME_TIMECOUNTERS:
413 if (isroot) 413 if (isroot)
414 result = KAUTH_RESULT_ALLOW; 414 result = KAUTH_RESULT_ALLOW;
415 break; 415 break;
416 416
417 case KAUTH_REQ_SYSTEM_TIME_SYSTEM: { 417 case KAUTH_REQ_SYSTEM_TIME_SYSTEM: {
418 bool device_context = (bool)arg3; 418 bool device_context = (bool)arg3;
419 419
420 if (device_context || isroot) 420 if (device_context || isroot)
421 result = KAUTH_RESULT_ALLOW; 421 result = KAUTH_RESULT_ALLOW;
422 422
423 break; 423 break;
424 } 424 }
425 425
426 case KAUTH_REQ_SYSTEM_TIME_RTCOFFSET: 426 case KAUTH_REQ_SYSTEM_TIME_RTCOFFSET:
427 if (isroot) 427 if (isroot)
428 result = KAUTH_RESULT_ALLOW; 428 result = KAUTH_RESULT_ALLOW;
429 break; 429 break;
430 430
431 default: 431 default:
432 break; 432 break;
433 } 433 }
434 break; 434 break;
435 435
436 case KAUTH_SYSTEM_SYSCTL: 436 case KAUTH_SYSTEM_SYSCTL:
437 switch (req) { 437 switch (req) {
438 case KAUTH_REQ_SYSTEM_SYSCTL_ADD: 438 case KAUTH_REQ_SYSTEM_SYSCTL_ADD:
439 case KAUTH_REQ_SYSTEM_SYSCTL_DELETE: 439 case KAUTH_REQ_SYSTEM_SYSCTL_DELETE:
440 case KAUTH_REQ_SYSTEM_SYSCTL_DESC: 440 case KAUTH_REQ_SYSTEM_SYSCTL_DESC:
441 case KAUTH_REQ_SYSTEM_SYSCTL_MODIFY: 441 case KAUTH_REQ_SYSTEM_SYSCTL_MODIFY:
442 case KAUTH_REQ_SYSTEM_SYSCTL_PRVT: 442 case KAUTH_REQ_SYSTEM_SYSCTL_PRVT:
443 if (isroot) 443 if (isroot)
444 result = KAUTH_RESULT_ALLOW; 444 result = KAUTH_RESULT_ALLOW;
445 break; 445 break;
446 446
447 default: 447 default:
448 break; 448 break;
449 } 449 }
450 450
451 break; 451 break;
452 452
453 case KAUTH_SYSTEM_SWAPCTL: 453 case KAUTH_SYSTEM_SWAPCTL:
454 case KAUTH_SYSTEM_ACCOUNTING: 454 case KAUTH_SYSTEM_ACCOUNTING:
455 case KAUTH_SYSTEM_REBOOT: 455 case KAUTH_SYSTEM_REBOOT:
456 case KAUTH_SYSTEM_CHROOT: 456 case KAUTH_SYSTEM_CHROOT:
457 case KAUTH_SYSTEM_FILEHANDLE: 457 case KAUTH_SYSTEM_FILEHANDLE:
458 case KAUTH_SYSTEM_MKNOD: 458 case KAUTH_SYSTEM_MKNOD:
459 if (isroot) 459 if (isroot)
460 result = KAUTH_RESULT_ALLOW; 460 result = KAUTH_RESULT_ALLOW;
461 break; 461 break;
462 462
463 case KAUTH_SYSTEM_CHSYSFLAGS: 463 case KAUTH_SYSTEM_CHSYSFLAGS:
464 /* 464 /*
465 * Needs to be checked in conjunction with the immutable and 465 * Needs to be checked in conjunction with the immutable and
466 * append-only flags (usually). Should be handled differently. 466 * append-only flags (usually). Should be handled differently.
467 * Infects ufs, ext2fs, tmpfs, and rump. 467 * Infects ufs, ext2fs, tmpfs, and rump.
468 */ 468 */
469 if (isroot) 469 if (isroot)
470 result = KAUTH_RESULT_ALLOW; 470 result = KAUTH_RESULT_ALLOW;
471 471
472 break; 472 break;
473 473
474 case KAUTH_SYSTEM_SETIDCORE: 474 case KAUTH_SYSTEM_SETIDCORE:
475 if (isroot) 475 if (isroot)
476 result = KAUTH_RESULT_ALLOW; 476 result = KAUTH_RESULT_ALLOW;
477 477
478 break; 478 break;
479 479
480 case KAUTH_SYSTEM_MODULE: 480 case KAUTH_SYSTEM_MODULE:
481 if (isroot) 481 if (isroot)
482 result = KAUTH_RESULT_ALLOW; 482 result = KAUTH_RESULT_ALLOW;
483 483
484 break; 484 break;
485 485
486 default: 486 default:
487 break; 487 break;
488 } 488 }
489 489
490 return (result); 490 return (result);
491} 491}
492 492
493/* 493/*
494 * kauth(9) listener 494 * kauth(9) listener
495 * 495 *
496 * Security model: Traditional NetBSD 496 * Security model: Traditional NetBSD
497 * Scope: Process 497 * Scope: Process
498 * Responsibility: Superuser access 498 * Responsibility: Superuser access
499 */ 499 */
500int 500int
501secmodel_suser_process_cb(kauth_cred_t cred, kauth_action_t action, 501secmodel_suser_process_cb(kauth_cred_t cred, kauth_action_t action,
502 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3) 502 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3)
503{ 503{
504 struct proc *p; 504 struct proc *p;
505 bool isroot; 505 bool isroot;
506 int result; 506 int result;
507 507
508 isroot = (kauth_cred_geteuid(cred) == 0); 508 isroot = (kauth_cred_geteuid(cred) == 0);
509 result = KAUTH_RESULT_DEFER; 509 result = KAUTH_RESULT_DEFER;
510 p = arg0; 510 p = arg0;
511 511
512 switch (action) { 512 switch (action) {
513 case KAUTH_PROCESS_SIGNAL: 513 case KAUTH_PROCESS_SIGNAL:
514 if (isroot) 514 if (isroot)
515 result = KAUTH_RESULT_ALLOW; 515 result = KAUTH_RESULT_ALLOW;
516 516
517 break; 517 break;
518 518
519 case KAUTH_PROCESS_CANSEE: { 519 case KAUTH_PROCESS_CANSEE: {
520 unsigned long req; 520 unsigned long req;
521 521
522 req = (unsigned long)arg1; 522 req = (unsigned long)arg1;
523 523
524 switch (req) { 524 switch (req) {
525 case KAUTH_REQ_PROCESS_CANSEE_ARGS: 525 case KAUTH_REQ_PROCESS_CANSEE_ARGS:
526 case KAUTH_REQ_PROCESS_CANSEE_ENTRY: 526 case KAUTH_REQ_PROCESS_CANSEE_ENTRY:
527 case KAUTH_REQ_PROCESS_CANSEE_OPENFILES: 527 case KAUTH_REQ_PROCESS_CANSEE_OPENFILES:
528 if (!secmodel_bsd44_curtain) 528 if (!secmodel_bsd44_curtain)
529 result = KAUTH_RESULT_ALLOW; 529 result = KAUTH_RESULT_ALLOW;
530 else if (isroot || kauth_cred_uidmatch(cred, p->p_cred)) 530 else if (isroot || kauth_cred_uidmatch(cred, p->p_cred))
531 result = KAUTH_RESULT_ALLOW; 531 result = KAUTH_RESULT_ALLOW;
532 break; 532 break;
533 533
534 case KAUTH_REQ_PROCESS_CANSEE_ENV: 534 case KAUTH_REQ_PROCESS_CANSEE_ENV:
535 if (!isroot && 535 if (!isroot &&
536 (kauth_cred_getuid(cred) != 536 (kauth_cred_getuid(cred) !=
537 kauth_cred_getuid(p->p_cred) || 537 kauth_cred_getuid(p->p_cred) ||
538 kauth_cred_getuid(cred) != 538 kauth_cred_getuid(cred) !=
539 kauth_cred_getsvuid(p->p_cred))) 539 kauth_cred_getsvuid(p->p_cred)))
540 break; 540 break;
541 else 541 else
542 result = KAUTH_RESULT_ALLOW; 542 result = KAUTH_RESULT_ALLOW;
543 543
544 break; 544 break;
545 545
546 default: 546 default:
547 break; 547 break;
548 } 548 }
549 549
550 break; 550 break;
551 } 551 }
552 552
553 case KAUTH_PROCESS_KTRACE: 553 case KAUTH_PROCESS_KTRACE:
554 if (isroot) 554 if (isroot)
555 result = KAUTH_RESULT_ALLOW; 555 result = KAUTH_RESULT_ALLOW;
556 556
557 break; 557 break;
558 558
559 case KAUTH_PROCESS_PROCFS: 559 case KAUTH_PROCESS_PROCFS:
560 if (isroot) 560 if (isroot)
561 result = KAUTH_RESULT_ALLOW; 561 result = KAUTH_RESULT_ALLOW;
562 562
563 break; 563 break;
564 564
565 case KAUTH_PROCESS_PTRACE: 565 case KAUTH_PROCESS_PTRACE:
566 if (isroot) 566 if (isroot)
567 result = KAUTH_RESULT_ALLOW; 567 result = KAUTH_RESULT_ALLOW;
568 568
569 break; 569 break;
570 570
571 case KAUTH_PROCESS_CORENAME: 571 case KAUTH_PROCESS_CORENAME:
572 if (isroot || proc_uidmatch(cred, p->p_cred) == 0) 572 if (isroot || proc_uidmatch(cred, p->p_cred) == 0)
573 result = KAUTH_RESULT_ALLOW; 573 result = KAUTH_RESULT_ALLOW;
574 574
575 break; 575 break;
576 576
577 case KAUTH_PROCESS_FORK: { 577 case KAUTH_PROCESS_FORK: {
578 int lnprocs = (int)(unsigned long)arg2; 578 int lnprocs = (int)(unsigned long)arg2;
579 579
580 /* 580 /*
581 * Don't allow a nonprivileged user to use the last few 581 * Don't allow a nonprivileged user to use the last few
582 * processes. The variable lnprocs is the current number of 582 * processes. The variable lnprocs is the current number of
583 * processes, maxproc is the limit. 583 * processes, maxproc is the limit.
584 */ 584 */
585 if (__predict_false((lnprocs >= maxproc - 5) && !isroot)) 585 if (__predict_false((lnprocs >= maxproc - 5) && !isroot))
586 break; 586 break;
587 else 587 else
588 result = KAUTH_RESULT_ALLOW; 588 result = KAUTH_RESULT_ALLOW;
589 589
590 break; 590 break;
591 } 591 }
592 592
593 case KAUTH_PROCESS_KEVENT_FILTER: 593 case KAUTH_PROCESS_KEVENT_FILTER:
594 if (isroot) 594 if (isroot)
595 result = KAUTH_RESULT_ALLOW; 595 result = KAUTH_RESULT_ALLOW;
596 596
597 break; 597 break;
598 598
599 case KAUTH_PROCESS_NICE: 599 case KAUTH_PROCESS_NICE:
600 if (isroot) 600 if (isroot)
601 result = KAUTH_RESULT_ALLOW; 601 result = KAUTH_RESULT_ALLOW;
602 602
603 break; 603 break;
604 604
605 case KAUTH_PROCESS_RLIMIT: { 605 case KAUTH_PROCESS_RLIMIT: {
606 enum kauth_process_req req; 606 enum kauth_process_req req;
607 607
608 req = (enum kauth_process_req)(unsigned long)arg1; 608 req = (enum kauth_process_req)(unsigned long)arg1;
609 609
610 switch (req) { 610 switch (req) {
611 case KAUTH_REQ_PROCESS_RLIMIT_SET: 611 case KAUTH_REQ_PROCESS_RLIMIT_SET:
612 case KAUTH_REQ_PROCESS_RLIMIT_GET: 612 case KAUTH_REQ_PROCESS_RLIMIT_GET:
613 if (isroot) 613 if (isroot)
614 result = KAUTH_RESULT_ALLOW; 614 result = KAUTH_RESULT_ALLOW;
615 615
616 break; 616 break;
617 617
618 default: 618 default:
619 break; 619 break;
620 } 620 }
621 621
622 break; 622 break;
623 } 623 }
624 624
625 case KAUTH_PROCESS_SCHEDULER_GETPARAM: 625 case KAUTH_PROCESS_SCHEDULER_GETPARAM:
626 if (isroot || kauth_cred_uidmatch(cred, p->p_cred)) 
627 result = KAUTH_RESULT_ALLOW; 
628 
629 break; 
630 
631 case KAUTH_PROCESS_SCHEDULER_SETPARAM: 626 case KAUTH_PROCESS_SCHEDULER_SETPARAM:
632 if (isroot) 
633 result = KAUTH_RESULT_ALLOW; 
634 else if (kauth_cred_uidmatch(cred, p->p_cred)) { 
635 struct lwp *l; 
636 int policy; 
637 pri_t priority; 
638 
639 l = arg1; 
640 policy = (int)(unsigned long)arg2; 
641 priority = (pri_t)(unsigned long)arg3; 
642 
643 if ((policy == l->l_class || 
644 (policy != SCHED_FIFO && policy != SCHED_RR)) && 
645 priority <= l->l_priority) 
646 result = KAUTH_RESULT_ALLOW; 
647 } 
648 
649 break; 
650 
651 case KAUTH_PROCESS_SCHEDULER_GETAFFINITY: 
652 result = KAUTH_RESULT_ALLOW; 
653 
654 break; 
655 
656 case KAUTH_PROCESS_SCHEDULER_SETAFFINITY: 627 case KAUTH_PROCESS_SCHEDULER_SETAFFINITY:
657 if (isroot) 628 if (isroot)
658 result = KAUTH_RESULT_ALLOW; 629 result = KAUTH_RESULT_ALLOW;
659 630
660 break; 631 break;
661 632
662 case KAUTH_PROCESS_SETID: 633 case KAUTH_PROCESS_SETID:
663 if (isroot) 634 if (isroot)
664 result = KAUTH_RESULT_ALLOW; 635 result = KAUTH_RESULT_ALLOW;
665 break; 636 break;
666 637
667 case KAUTH_PROCESS_STOPFLAG: 638 case KAUTH_PROCESS_STOPFLAG:
668 if (isroot || proc_uidmatch(cred, p->p_cred) == 0) { 639 if (isroot || proc_uidmatch(cred, p->p_cred) == 0) {
669 result = KAUTH_RESULT_ALLOW; 640 result = KAUTH_RESULT_ALLOW;
670 break; 641 break;
671 } 642 }
672 break; 643 break;
673 644
674 default: 645 default:
675 break; 646 break;
676 } 647 }
677 648
678 return (result); 649 return (result);
679} 650}
680 651
681/* 652/*
682 * kauth(9) listener 653 * kauth(9) listener
683 * 654 *
684 * Security model: Traditional NetBSD 655 * Security model: Traditional NetBSD
685 * Scope: Network 656 * Scope: Network
686 * Responsibility: Superuser access 657 * Responsibility: Superuser access
687 */ 658 */
688int 659int
689secmodel_suser_network_cb(kauth_cred_t cred, kauth_action_t action, 660secmodel_suser_network_cb(kauth_cred_t cred, kauth_action_t action,
690 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3) 661 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3)
691{ 662{
692 bool isroot; 663 bool isroot;
693 int result; 664 int result;
694 enum kauth_network_req req; 665 enum kauth_network_req req;
695 666
696 isroot = (kauth_cred_geteuid(cred) == 0); 667 isroot = (kauth_cred_geteuid(cred) == 0);
697 result = KAUTH_RESULT_DEFER; 668 result = KAUTH_RESULT_DEFER;
698 req = (enum kauth_network_req)arg0; 669 req = (enum kauth_network_req)arg0;
699 670
700 switch (action) { 671 switch (action) {
701 case KAUTH_NETWORK_ALTQ: 672 case KAUTH_NETWORK_ALTQ:
702 switch (req) { 673 switch (req) {
703 case KAUTH_REQ_NETWORK_ALTQ_AFMAP: 674 case KAUTH_REQ_NETWORK_ALTQ_AFMAP:
704 case KAUTH_REQ_NETWORK_ALTQ_BLUE: 675 case KAUTH_REQ_NETWORK_ALTQ_BLUE:
705 case KAUTH_REQ_NETWORK_ALTQ_CBQ: 676 case KAUTH_REQ_NETWORK_ALTQ_CBQ:
706 case KAUTH_REQ_NETWORK_ALTQ_CDNR: 677 case KAUTH_REQ_NETWORK_ALTQ_CDNR:
707 case KAUTH_REQ_NETWORK_ALTQ_CONF: 678 case KAUTH_REQ_NETWORK_ALTQ_CONF:
708 case KAUTH_REQ_NETWORK_ALTQ_FIFOQ: 679 case KAUTH_REQ_NETWORK_ALTQ_FIFOQ:
709 case KAUTH_REQ_NETWORK_ALTQ_HFSC: 680 case KAUTH_REQ_NETWORK_ALTQ_HFSC:
710 case KAUTH_REQ_NETWORK_ALTQ_JOBS: 681 case KAUTH_REQ_NETWORK_ALTQ_JOBS:
711 case KAUTH_REQ_NETWORK_ALTQ_PRIQ: 682 case KAUTH_REQ_NETWORK_ALTQ_PRIQ:
712 case KAUTH_REQ_NETWORK_ALTQ_RED: 683 case KAUTH_REQ_NETWORK_ALTQ_RED:
713 case KAUTH_REQ_NETWORK_ALTQ_RIO: 684 case KAUTH_REQ_NETWORK_ALTQ_RIO:
714 case KAUTH_REQ_NETWORK_ALTQ_WFQ: 685 case KAUTH_REQ_NETWORK_ALTQ_WFQ:
715 if (isroot) 686 if (isroot)
716 result = KAUTH_RESULT_ALLOW; 687 result = KAUTH_RESULT_ALLOW;
717 break; 688 break;
718 689
719 default: 690 default:
720 break; 691 break;
721 } 692 }
722 693
723 break; 694 break;
724 695
725 case KAUTH_NETWORK_BIND: 696 case KAUTH_NETWORK_BIND:
726 switch (req) { 697 switch (req) {
727 case KAUTH_REQ_NETWORK_BIND_PORT: 698 case KAUTH_REQ_NETWORK_BIND_PORT:
728 result = KAUTH_RESULT_ALLOW; 699 result = KAUTH_RESULT_ALLOW;
729 break; 700 break;
730 701
731 case KAUTH_REQ_NETWORK_BIND_PRIVPORT: 702 case KAUTH_REQ_NETWORK_BIND_PRIVPORT:
732 if (isroot) 703 if (isroot)
733 result = KAUTH_RESULT_ALLOW; 704 result = KAUTH_RESULT_ALLOW;
734 break; 705 break;
735 706
736 default: 707 default:
737 break; 708 break;
738 } 709 }
739 break; 710 break;
740 711
741 case KAUTH_NETWORK_FORWSRCRT: 712 case KAUTH_NETWORK_FORWSRCRT:
742 if (isroot) 713 if (isroot)
743 result = KAUTH_RESULT_ALLOW; 714 result = KAUTH_RESULT_ALLOW;
744 715
745 break; 716 break;
746 717
747 case KAUTH_NETWORK_INTERFACE: 718 case KAUTH_NETWORK_INTERFACE:
748 switch (req) { 719 switch (req) {
749 case KAUTH_REQ_NETWORK_INTERFACE_GET: 720 case KAUTH_REQ_NETWORK_INTERFACE_GET:
750 case KAUTH_REQ_NETWORK_INTERFACE_SET: 721 case KAUTH_REQ_NETWORK_INTERFACE_SET:
751 result = KAUTH_RESULT_ALLOW; 722 result = KAUTH_RESULT_ALLOW;
752 break; 723 break;
753 724
754 case KAUTH_REQ_NETWORK_INTERFACE_GETPRIV: 725 case KAUTH_REQ_NETWORK_INTERFACE_GETPRIV:
755 case KAUTH_REQ_NETWORK_INTERFACE_SETPRIV: 726 case KAUTH_REQ_NETWORK_INTERFACE_SETPRIV:
756 if (isroot) 727 if (isroot)
757 result = KAUTH_RESULT_ALLOW; 728 result = KAUTH_RESULT_ALLOW;
758 break; 729 break;
759 730
760 default: 731 default:
761 break; 732 break;
762 } 733 }
763 break; 734 break;
764 735
765 case KAUTH_NETWORK_INTERFACE_PPP: 736 case KAUTH_NETWORK_INTERFACE_PPP:
766 switch (req) { 737 switch (req) {
767 case KAUTH_REQ_NETWORK_INTERFACE_PPP_ADD: 738 case KAUTH_REQ_NETWORK_INTERFACE_PPP_ADD:
768 if (isroot) 739 if (isroot)
769 result = KAUTH_RESULT_ALLOW; 740 result = KAUTH_RESULT_ALLOW;
770 break; 741 break;
771 742
772 default: 743 default:
773 break; 744 break;
774 } 745 }
775 746
776 break; 747 break;
777 748
778 case KAUTH_NETWORK_INTERFACE_SLIP: 749 case KAUTH_NETWORK_INTERFACE_SLIP:
779 switch (req) { 750 switch (req) {
780 case KAUTH_REQ_NETWORK_INTERFACE_SLIP_ADD: 751 case KAUTH_REQ_NETWORK_INTERFACE_SLIP_ADD:
781 if (isroot) 752 if (isroot)
782 result = KAUTH_RESULT_ALLOW; 753 result = KAUTH_RESULT_ALLOW;
783 break; 754 break;
784 755
785 default: 756 default:
786 break; 757 break;
787 } 758 }
788 759
789 break; 760 break;
790 761
791 case KAUTH_NETWORK_INTERFACE_STRIP: 762 case KAUTH_NETWORK_INTERFACE_STRIP:
792 switch (req) { 763 switch (req) {
793 case KAUTH_REQ_NETWORK_INTERFACE_STRIP_ADD: 764 case KAUTH_REQ_NETWORK_INTERFACE_STRIP_ADD:
794 if (isroot) 765 if (isroot)
795 result = KAUTH_RESULT_ALLOW; 766 result = KAUTH_RESULT_ALLOW;
796 break; 767 break;
797 768
798 default: 769 default:
799 break; 770 break;
800 } 771 }
801 772
802 break; 773 break;
803 774
804 case KAUTH_NETWORK_INTERFACE_TUN: 775 case KAUTH_NETWORK_INTERFACE_TUN:
805 switch (req) { 776 switch (req) {
806 case KAUTH_REQ_NETWORK_INTERFACE_TUN_ADD: 777 case KAUTH_REQ_NETWORK_INTERFACE_TUN_ADD:
807 if (isroot) 778 if (isroot)
808 result = KAUTH_RESULT_ALLOW; 779 result = KAUTH_RESULT_ALLOW;
809 break; 780 break;
810 781
811 default: 782 default:
812 break; 783 break;
813 } 784 }
814 785
815 break; 786 break;
816 787
817 case KAUTH_NETWORK_NFS: 788 case KAUTH_NETWORK_NFS:
818 switch (req) { 789 switch (req) {
819 case KAUTH_REQ_NETWORK_NFS_EXPORT: 790 case KAUTH_REQ_NETWORK_NFS_EXPORT:
820 case KAUTH_REQ_NETWORK_NFS_SVC: 791 case KAUTH_REQ_NETWORK_NFS_SVC:
821 if (isroot) 792 if (isroot)
822 result = KAUTH_RESULT_ALLOW; 793 result = KAUTH_RESULT_ALLOW;
823 794
824 break; 795 break;
825 796
826 default: 797 default:
827 break; 798 break;
828 } 799 }
829 break; 800 break;
830 801
831 case KAUTH_NETWORK_ROUTE: 802 case KAUTH_NETWORK_ROUTE:
832 if (isroot) 803 if (isroot)
833 result = KAUTH_RESULT_ALLOW; 804 result = KAUTH_RESULT_ALLOW;
834 805
835 break; 806 break;
836 807
837 case KAUTH_NETWORK_SOCKET: 808 case KAUTH_NETWORK_SOCKET:
838 switch (req) { 809 switch (req) {
839 case KAUTH_REQ_NETWORK_SOCKET_DROP: 810 case KAUTH_REQ_NETWORK_SOCKET_DROP:
840 case KAUTH_REQ_NETWORK_SOCKET_OPEN: 811 case KAUTH_REQ_NETWORK_SOCKET_OPEN:
841 case KAUTH_REQ_NETWORK_SOCKET_RAWSOCK: 812 case KAUTH_REQ_NETWORK_SOCKET_RAWSOCK:
842 case KAUTH_REQ_NETWORK_SOCKET_SETPRIV: 813 case KAUTH_REQ_NETWORK_SOCKET_SETPRIV:
843 if (isroot) 814 if (isroot)
844 result = KAUTH_RESULT_ALLOW; 815 result = KAUTH_RESULT_ALLOW;
845 break; 816 break;
846 817
847 case KAUTH_REQ_NETWORK_SOCKET_CANSEE: 818 case KAUTH_REQ_NETWORK_SOCKET_CANSEE:
848 if (isroot) { 819 if (isroot) {
849 result = KAUTH_RESULT_ALLOW; 820 result = KAUTH_RESULT_ALLOW;
850 break; 821 break;
851 } 822 }
852 823
853 if (secmodel_bsd44_curtain) { 824 if (secmodel_bsd44_curtain) {
854 uid_t so_uid; 825 uid_t so_uid;
855 826
856 so_uid = 827 so_uid =
857 ((struct socket *)arg1)->so_uidinfo->ui_uid; 828 ((struct socket *)arg1)->so_uidinfo->ui_uid;
858 if (kauth_cred_geteuid(cred) == so_uid) 829 if (kauth_cred_geteuid(cred) == so_uid)
859 result = KAUTH_RESULT_ALLOW; 830 result = KAUTH_RESULT_ALLOW;
860 } else 831 } else
861 result = KAUTH_RESULT_ALLOW; 832 result = KAUTH_RESULT_ALLOW;
862 833
863 break; 834 break;
864 835
865 default: 836 default:
866 break; 837 break;
867 } 838 }
868 839
869 break; 840 break;
870 841
871 842
872 default: 843 default:
873 break; 844 break;
874 } 845 }
875 846
876 return (result); 847 return (result);
877} 848}
878 849
879/* 850/*
880 * kauth(9) listener 851 * kauth(9) listener
881 * 852 *
882 * Security model: Traditional NetBSD 853 * Security model: Traditional NetBSD
883 * Scope: Machdep 854 * Scope: Machdep
884 * Responsibility: Superuser access 855 * Responsibility: Superuser access
885 */ 856 */
886int 857int
887secmodel_suser_machdep_cb(kauth_cred_t cred, kauth_action_t action, 858secmodel_suser_machdep_cb(kauth_cred_t cred, kauth_action_t action,
888 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3) 859 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3)
889{ 860{
890 bool isroot; 861 bool isroot;
891 int result; 862 int result;
892 863
893 isroot = (kauth_cred_geteuid(cred) == 0); 864 isroot = (kauth_cred_geteuid(cred) == 0);
894 result = KAUTH_RESULT_DEFER; 865 result = KAUTH_RESULT_DEFER;
895 866
896 switch (action) { 867 switch (action) {
897 case KAUTH_MACHDEP_IOPERM_GET: 868 case KAUTH_MACHDEP_IOPERM_GET:
898 case KAUTH_MACHDEP_LDT_GET: 869 case KAUTH_MACHDEP_LDT_GET:
899 case KAUTH_MACHDEP_LDT_SET: 870 case KAUTH_MACHDEP_LDT_SET:
900 case KAUTH_MACHDEP_MTRR_GET: 871 case KAUTH_MACHDEP_MTRR_GET:
901 result = KAUTH_RESULT_ALLOW; 872 result = KAUTH_RESULT_ALLOW;
902 break; 873 break;
903 874
904 case KAUTH_MACHDEP_CACHEFLUSH: 875 case KAUTH_MACHDEP_CACHEFLUSH:
905 case KAUTH_MACHDEP_IOPERM_SET: 876 case KAUTH_MACHDEP_IOPERM_SET:
906 case KAUTH_MACHDEP_IOPL: 877 case KAUTH_MACHDEP_IOPL:
907 case KAUTH_MACHDEP_MTRR_SET: 878 case KAUTH_MACHDEP_MTRR_SET:
908 case KAUTH_MACHDEP_NVRAM: 879 case KAUTH_MACHDEP_NVRAM:
909 case KAUTH_MACHDEP_UNMANAGEDMEM: 880 case KAUTH_MACHDEP_UNMANAGEDMEM:
910 if (isroot) 881 if (isroot)
911 result = KAUTH_RESULT_ALLOW; 882 result = KAUTH_RESULT_ALLOW;
912 break; 883 break;
913 884
914 default: 885 default:
915 break; 886 break;
916 } 887 }
917 888
918 return (result); 889 return (result);
919} 890}
920 891
921/* 892/*
922 * kauth(9) listener 893 * kauth(9) listener
923 * 894 *
924 * Security model: Traditional NetBSD 895 * Security model: Traditional NetBSD
925 * Scope: Device 896 * Scope: Device
926 * Responsibility: Superuser access 897 * Responsibility: Superuser access
927 */ 898 */
928int 899int
929secmodel_suser_device_cb(kauth_cred_t cred, kauth_action_t action, 900secmodel_suser_device_cb(kauth_cred_t cred, kauth_action_t action,
930 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3) 901 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3)
931{ 902{
932 bool isroot; 903 bool isroot;
933 int result; 904 int result;
934 905
935 isroot = (kauth_cred_geteuid(cred) == 0); 906 isroot = (kauth_cred_geteuid(cred) == 0);
936 result = KAUTH_RESULT_DEFER; 907 result = KAUTH_RESULT_DEFER;
937 908
938 switch (action) { 909 switch (action) {
939 case KAUTH_DEVICE_BLUETOOTH_SETPRIV: 910 case KAUTH_DEVICE_BLUETOOTH_SETPRIV:
940 case KAUTH_DEVICE_BLUETOOTH_SEND: 911 case KAUTH_DEVICE_BLUETOOTH_SEND:
941 case KAUTH_DEVICE_BLUETOOTH_RECV: 912 case KAUTH_DEVICE_BLUETOOTH_RECV:
942 if (isroot) 913 if (isroot)
943 result = KAUTH_RESULT_ALLOW; 914 result = KAUTH_RESULT_ALLOW;
944 break; 915 break;
945 916
946 case KAUTH_DEVICE_BLUETOOTH_BCSP: 917 case KAUTH_DEVICE_BLUETOOTH_BCSP:
947 case KAUTH_DEVICE_BLUETOOTH_BTUART: { 918 case KAUTH_DEVICE_BLUETOOTH_BTUART: {
948 enum kauth_device_req req; 919 enum kauth_device_req req;
949 920
950 req = (enum kauth_device_req)arg0; 921 req = (enum kauth_device_req)arg0;
951 switch (req) { 922 switch (req) {
952 case KAUTH_REQ_DEVICE_BLUETOOTH_BCSP_ADD: 923 case KAUTH_REQ_DEVICE_BLUETOOTH_BCSP_ADD:
953 case KAUTH_REQ_DEVICE_BLUETOOTH_BTUART_ADD: 924 case KAUTH_REQ_DEVICE_BLUETOOTH_BTUART_ADD:
954 if (isroot) 925 if (isroot)
955 result = KAUTH_RESULT_ALLOW; 926 result = KAUTH_RESULT_ALLOW;
956 break; 927 break;
957 928
958 default: 929 default:
959 break; 930 break;
960 } 931 }
961 932
962 break; 933 break;
963 } 934 }
964 935
965 case KAUTH_DEVICE_RAWIO_SPEC: 936 case KAUTH_DEVICE_RAWIO_SPEC:
966 case KAUTH_DEVICE_RAWIO_PASSTHRU: 937 case KAUTH_DEVICE_RAWIO_PASSTHRU:
967 /* 938 /*
968 * Decision is root-agnostic. 939 * Decision is root-agnostic.
969 * 940 *
970 * Both requests can be issued on devices subject to their 941 * Both requests can be issued on devices subject to their
971 * permission bits. 942 * permission bits.
972 */ 943 */
973 result = KAUTH_RESULT_ALLOW; 944 result = KAUTH_RESULT_ALLOW;
974 break; 945 break;
975 946
976 case KAUTH_DEVICE_TTY_OPEN: 947 case KAUTH_DEVICE_TTY_OPEN:
977 if (isroot) 948 if (isroot)
978 result = KAUTH_RESULT_ALLOW; 949 result = KAUTH_RESULT_ALLOW;
979 950
980 break; 951 break;
981 952
982 case KAUTH_DEVICE_TTY_PRIVSET: 953 case KAUTH_DEVICE_TTY_PRIVSET:
983 if (isroot) 954 if (isroot)
984 result = KAUTH_RESULT_ALLOW; 955 result = KAUTH_RESULT_ALLOW;
985 956
986 break; 957 break;
987 958
988 case KAUTH_DEVICE_TTY_STI: 959 case KAUTH_DEVICE_TTY_STI:
989 if (isroot) 960 if (isroot)
990 result = KAUTH_RESULT_ALLOW; 961 result = KAUTH_RESULT_ALLOW;
991 962
992 break; 963 break;
993 964
994 case KAUTH_DEVICE_RND_ADDDATA: 965 case KAUTH_DEVICE_RND_ADDDATA:
995 case KAUTH_DEVICE_RND_GETPRIV: 966 case KAUTH_DEVICE_RND_GETPRIV:
996 case KAUTH_DEVICE_RND_SETPRIV: 967 case KAUTH_DEVICE_RND_SETPRIV:
997 if (isroot) 968 if (isroot)
998 result = KAUTH_RESULT_ALLOW; 969 result = KAUTH_RESULT_ALLOW;
999 break; 970 break;
1000 971
1001 case KAUTH_DEVICE_GPIO_PINSET: 972 case KAUTH_DEVICE_GPIO_PINSET:
1002 /* 973 /*
1003 * root can access gpio pins, secmodel_securlevel can veto 974 * root can access gpio pins, secmodel_securlevel can veto
1004 * this decision. 975 * this decision.
1005 */ 976 */
1006 if (isroot) 977 if (isroot)
1007 result = KAUTH_RESULT_ALLOW; 978 result = KAUTH_RESULT_ALLOW;
1008 break; 979 break;
1009 980
1010 default: 981 default:
1011 break; 982 break;
1012 } 983 }
1013 984
1014 return (result); 985 return (result);
1015} 986}
1016 987
1017int 988int
1018secmodel_suser_vnode_cb(kauth_cred_t cred, kauth_action_t action, 989secmodel_suser_vnode_cb(kauth_cred_t cred, kauth_action_t action,
1019 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3) 990 void *cookie, void *arg0, void *arg1, void *arg2, void *arg3)
1020{ 991{
1021 bool isroot; 992 bool isroot;
1022 int result; 993 int result;
1023 994
1024 isroot = (kauth_cred_geteuid(cred) == 0); 995 isroot = (kauth_cred_geteuid(cred) == 0);
1025 result = KAUTH_RESULT_DEFER; 996 result = KAUTH_RESULT_DEFER;
1026 997
1027 if (isroot) 998 if (isroot)
1028 result = KAUTH_RESULT_ALLOW; 999 result = KAUTH_RESULT_ALLOW;
1029 1000
1030 return (result); 1001 return (result);
1031} 1002}
1032 1003