Sun Apr 9 09:18:10 2023 UTC ()
kern: KASSERT(A && B) -> KASSERT(A); KASSERT(B)


(riastradh)
diff -r1.84 -r1.85 src/sys/kern/exec_subr.c
diff -r1.146 -r1.147 src/sys/kern/kern_event.c
diff -r1.251 -r1.252 src/sys/kern/kern_lwp.c
diff -r1.269 -r1.270 src/sys/kern/kern_proc.c
diff -r1.404 -r1.405 src/sys/kern/kern_sig.c
diff -r1.73 -r1.74 src/sys/kern/kern_sleepq.c
diff -r1.72 -r1.73 src/sys/kern/kern_softint.c
diff -r1.268 -r1.269 src/sys/kern/kern_sysctl.c
diff -r1.45 -r1.46 src/sys/kern/kern_turnstile.c
diff -r1.45 -r1.46 src/sys/kern/tty_ptm.c
diff -r1.26 -r1.27 src/sys/kern/kern_veriexec.c
diff -r1.27 -r1.28 src/sys/kern/subr_asan.c
diff -r1.27 -r1.28 src/sys/kern/subr_pcu.c
diff -r1.9 -r1.10 src/sys/kern/subr_cpufreq.c
diff -r1.14 -r1.15 src/sys/kern/subr_kcpuset.c
diff -r1.287 -r1.288 src/sys/kern/subr_pool.c
diff -r1.198 -r1.199 src/sys/kern/subr_prf.c
diff -r1.35 -r1.36 src/sys/kern/subr_time.c
diff -r1.108 -r1.109 src/sys/kern/subr_vmem.c
diff -r1.34 -r1.35 src/sys/kern/subr_xcall.c
diff -r1.49 -r1.50 src/sys/kern/sys_sched.c
diff -r1.152 -r1.153 src/sys/kern/vfs_cache.c
diff -r1.232 -r1.233 src/sys/kern/vfs_lookup.c
diff -r1.557 -r1.558 src/sys/kern/vfs_syscalls.c

cvs diff -r1.84 -r1.85 src/sys/kern/exec_subr.c (expand / switch to unified diff)

--- src/sys/kern/exec_subr.c 2020/04/13 19:23:18 1.84
+++ src/sys/kern/exec_subr.c 2023/04/09 09:18:09 1.85
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: exec_subr.c,v 1.84 2020/04/13 19:23:18 ad Exp $ */ 1/* $NetBSD: exec_subr.c,v 1.85 2023/04/09 09:18:09 riastradh Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1993, 1994, 1996 Christopher G. Demetriou 4 * Copyright (c) 1993, 1994, 1996 Christopher G. Demetriou
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -21,27 +21,27 @@ @@ -21,27 +21,27 @@
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32 32
33#include <sys/cdefs.h> 33#include <sys/cdefs.h>
34__KERNEL_RCSID(0, "$NetBSD: exec_subr.c,v 1.84 2020/04/13 19:23:18 ad Exp $"); 34__KERNEL_RCSID(0, "$NetBSD: exec_subr.c,v 1.85 2023/04/09 09:18:09 riastradh Exp $");
35 35
36#include "opt_pax.h" 36#include "opt_pax.h"
37 37
38#include <sys/param.h> 38#include <sys/param.h>
39#include <sys/systm.h> 39#include <sys/systm.h>
40#include <sys/proc.h> 40#include <sys/proc.h>
41#include <sys/kmem.h> 41#include <sys/kmem.h>
42#include <sys/vnode.h> 42#include <sys/vnode.h>
43#include <sys/filedesc.h> 43#include <sys/filedesc.h>
44#include <sys/exec.h> 44#include <sys/exec.h>
45#include <sys/mman.h> 45#include <sys/mman.h>
46#include <sys/resourcevar.h> 46#include <sys/resourcevar.h>
47#include <sys/device.h> 47#include <sys/device.h>
@@ -450,20 +450,21 @@ exec_setup_stack(struct lwp *l, struct e @@ -450,20 +450,21 @@ exec_setup_stack(struct lwp *l, struct e
450 vsize_t guard_size = MIN(VM_MAXUSER_ADDRESS - epp->ep_maxsaddr, user_stack_guard_size); 450 vsize_t guard_size = MIN(VM_MAXUSER_ADDRESS - epp->ep_maxsaddr, user_stack_guard_size);
451 if (guard_size > 0) 451 if (guard_size > 0)
452 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, guard_size, 452 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, guard_size,
453 epp->ep_maxsaddr, NULL, 0, VM_PROT_NONE); 453 epp->ep_maxsaddr, NULL, 0, VM_PROT_NONE);
454#else 454#else
455 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, user_stack_guard_size, 455 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, user_stack_guard_size,
456 epp->ep_maxsaddr - user_stack_guard_size, NULL, 0, VM_PROT_NONE); 456 epp->ep_maxsaddr - user_stack_guard_size, NULL, 0, VM_PROT_NONE);
457#endif 457#endif
458 } 458 }
459 if (noaccess_size > 0 && noaccess_size <= MAXSSIZ) { 459 if (noaccess_size > 0 && noaccess_size <= MAXSSIZ) {
460 NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, noaccess_size, 460 NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, noaccess_size,
461 noaccess_linear_min, NULL, 0, VM_PROT_NONE, VMCMD_STACK); 461 noaccess_linear_min, NULL, 0, VM_PROT_NONE, VMCMD_STACK);
462 } 462 }
463 KASSERT(access_size > 0 && access_size <= MAXSSIZ); 463 KASSERT(access_size > 0);
 464 KASSERT(access_size <= MAXSSIZ);
464 NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, access_size, 465 NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, access_size,
465 access_linear_min, NULL, 0, VM_PROT_READ | VM_PROT_WRITE, 466 access_linear_min, NULL, 0, VM_PROT_READ | VM_PROT_WRITE,
466 VMCMD_STACK); 467 VMCMD_STACK);
467 468
468 return 0; 469 return 0;
469} 470}

cvs diff -r1.146 -r1.147 src/sys/kern/kern_event.c (expand / switch to unified diff)

--- src/sys/kern/kern_event.c 2022/07/24 19:23:44 1.146
+++ src/sys/kern/kern_event.c 2023/04/09 09:18:09 1.147
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_event.c,v 1.146 2022/07/24 19:23:44 riastradh Exp $ */ 1/* $NetBSD: kern_event.c,v 1.147 2023/04/09 09:18:09 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2008, 2009, 2021 The NetBSD Foundation, Inc. 4 * Copyright (c) 2008, 2009, 2021 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -53,27 +53,27 @@ @@ -53,27 +53,27 @@
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 54 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 55 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 * SUCH DAMAGE. 56 * SUCH DAMAGE.
57 * 57 *
58 * FreeBSD: src/sys/kern/kern_event.c,v 1.27 2001/07/05 17:10:44 rwatson Exp 58 * FreeBSD: src/sys/kern/kern_event.c,v 1.27 2001/07/05 17:10:44 rwatson Exp
59 */ 59 */
60 60
61#ifdef _KERNEL_OPT 61#ifdef _KERNEL_OPT
62#include "opt_ddb.h" 62#include "opt_ddb.h"
63#endif /* _KERNEL_OPT */ 63#endif /* _KERNEL_OPT */
64 64
65#include <sys/cdefs.h> 65#include <sys/cdefs.h>
66__KERNEL_RCSID(0, "$NetBSD: kern_event.c,v 1.146 2022/07/24 19:23:44 riastradh Exp $"); 66__KERNEL_RCSID(0, "$NetBSD: kern_event.c,v 1.147 2023/04/09 09:18:09 riastradh Exp $");
67 67
68#include <sys/param.h> 68#include <sys/param.h>
69#include <sys/systm.h> 69#include <sys/systm.h>
70#include <sys/kernel.h> 70#include <sys/kernel.h>
71#include <sys/wait.h> 71#include <sys/wait.h>
72#include <sys/proc.h> 72#include <sys/proc.h>
73#include <sys/file.h> 73#include <sys/file.h>
74#include <sys/select.h> 74#include <sys/select.h>
75#include <sys/queue.h> 75#include <sys/queue.h>
76#include <sys/event.h> 76#include <sys/event.h>
77#include <sys/eventvar.h> 77#include <sys/eventvar.h>
78#include <sys/poll.h> 78#include <sys/poll.h>
79#include <sys/kmem.h> 79#include <sys/kmem.h>
@@ -1390,27 +1390,28 @@ filt_timercompute(struct kevent *kev, ui @@ -1390,27 +1390,28 @@ filt_timercompute(struct kevent *kev, ui
1390 return 0; 1390 return 0;
1391} 1391}
1392 1392
1393static void 1393static void
1394filt_timerexpire(void *knx) 1394filt_timerexpire(void *knx)
1395{ 1395{
1396 struct knote *kn = knx; 1396 struct knote *kn = knx;
1397 struct kqueue *kq = kn->kn_kq; 1397 struct kqueue *kq = kn->kn_kq;
1398 1398
1399 mutex_spin_enter(&kq->kq_lock); 1399 mutex_spin_enter(&kq->kq_lock);
1400 kn->kn_data++; 1400 kn->kn_data++;
1401 knote_activate_locked(kn); 1401 knote_activate_locked(kn);
1402 if (kn->kn_sdata != FILT_TIMER_NOSCHED) { 1402 if (kn->kn_sdata != FILT_TIMER_NOSCHED) {
1403 KASSERT(kn->kn_sdata > 0 && kn->kn_sdata <= INT_MAX); 1403 KASSERT(kn->kn_sdata > 0);
 1404 KASSERT(kn->kn_sdata <= INT_MAX);
1404 callout_schedule((callout_t *)kn->kn_hook, 1405 callout_schedule((callout_t *)kn->kn_hook,
1405 (int)kn->kn_sdata); 1406 (int)kn->kn_sdata);
1406 } 1407 }
1407 mutex_spin_exit(&kq->kq_lock); 1408 mutex_spin_exit(&kq->kq_lock);
1408} 1409}
1409 1410
1410static inline void 1411static inline void
1411filt_timerstart(struct knote *kn, uintptr_t tticks) 1412filt_timerstart(struct knote *kn, uintptr_t tticks)
1412{ 1413{
1413 callout_t *calloutp = kn->kn_hook; 1414 callout_t *calloutp = kn->kn_hook;
1414 1415
1415 KASSERT(mutex_owned(&kn->kn_kq->kq_lock)); 1416 KASSERT(mutex_owned(&kn->kn_kq->kq_lock));
1416 KASSERT(!callout_pending(calloutp)); 1417 KASSERT(!callout_pending(calloutp));
@@ -2438,28 +2439,28 @@ relock: @@ -2438,28 +2439,28 @@ relock:
2438 (void)filter_touch(kn, kevp, EVENT_PROCESS); 2439 (void)filter_touch(kn, kevp, EVENT_PROCESS);
2439 } else { 2440 } else {
2440 *kevp = kn->kn_kevent; 2441 *kevp = kn->kn_kevent;
2441 } 2442 }
2442 kevp++; 2443 kevp++;
2443 nkev++; 2444 nkev++;
2444 influx = 1; 2445 influx = 1;
2445 if (kn->kn_flags & EV_ONESHOT) { 2446 if (kn->kn_flags & EV_ONESHOT) {
2446 /* delete ONESHOT events after retrieval */ 2447 /* delete ONESHOT events after retrieval */
2447 KNOTE_WILLDETACH(kn); 2448 KNOTE_WILLDETACH(kn);
2448 kn->kn_status &= ~KN_BUSY; 2449 kn->kn_status &= ~KN_BUSY;
2449 kq->kq_count--; 2450 kq->kq_count--;
2450 KASSERT(kn_in_flux(kn) == false); 2451 KASSERT(kn_in_flux(kn) == false);
2451 KASSERT((kn->kn_status & KN_WILLDETACH) != 0 && 2452 KASSERT((kn->kn_status & KN_WILLDETACH) != 0);
2452 kn->kn_kevent.udata == curlwp); 2453 KASSERT(kn->kn_kevent.udata == curlwp);
2453 mutex_spin_exit(&kq->kq_lock); 2454 mutex_spin_exit(&kq->kq_lock);
2454 knote_detach(kn, fdp, true); 2455 knote_detach(kn, fdp, true);
2455 mutex_enter(&fdp->fd_lock); 2456 mutex_enter(&fdp->fd_lock);
2456 mutex_spin_enter(&kq->kq_lock); 2457 mutex_spin_enter(&kq->kq_lock);
2457 } else if (kn->kn_flags & EV_CLEAR) { 2458 } else if (kn->kn_flags & EV_CLEAR) {
2458 /* clear state after retrieval */ 2459 /* clear state after retrieval */
2459 kn->kn_data = 0; 2460 kn->kn_data = 0;
2460 kn->kn_fflags = 0; 2461 kn->kn_fflags = 0;
2461 /* 2462 /*
2462 * Manually clear knotes who weren't 2463 * Manually clear knotes who weren't
2463 * 'touch'ed. 2464 * 'touch'ed.
2464 */ 2465 */
2465 if (touch == 0) { 2466 if (touch == 0) {

cvs diff -r1.251 -r1.252 src/sys/kern/kern_lwp.c (expand / switch to unified diff)

--- src/sys/kern/kern_lwp.c 2022/07/01 01:06:04 1.251
+++ src/sys/kern/kern_lwp.c 2023/04/09 09:18:09 1.252
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_lwp.c,v 1.251 2022/07/01 01:06:04 riastradh Exp $ */ 1/* $NetBSD: kern_lwp.c,v 1.252 2023/04/09 09:18:09 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019, 2020 4 * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2019, 2020
5 * The NetBSD Foundation, Inc. 5 * The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Nathan J. Williams, and Andrew Doran. 9 * by Nathan J. Williams, and Andrew Doran.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -207,27 +207,27 @@ @@ -207,27 +207,27 @@
207 * (But not always for kernel threads. There are some special cases 207 * (But not always for kernel threads. There are some special cases
208 * as mentioned above: soft interrupts, and the idle loops.) 208 * as mentioned above: soft interrupts, and the idle loops.)
209 * 209 *
210 * Note that an LWP is considered running or likely to run soon if in 210 * Note that an LWP is considered running or likely to run soon if in
211 * one of the following states. This affects the value of p_nrlwps: 211 * one of the following states. This affects the value of p_nrlwps:
212 * 212 *
213 * LSRUN, LSONPROC, LSSLEEP 213 * LSRUN, LSONPROC, LSSLEEP
214 * 214 *
215 * p_lock does not need to be held when transitioning among these 215 * p_lock does not need to be held when transitioning among these
216 * three states, hence p_lock is rarely taken for state transitions. 216 * three states, hence p_lock is rarely taken for state transitions.
217 */ 217 */
218 218
219#include <sys/cdefs.h> 219#include <sys/cdefs.h>
220__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.251 2022/07/01 01:06:04 riastradh Exp $"); 220__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.252 2023/04/09 09:18:09 riastradh Exp $");
221 221
222#include "opt_ddb.h" 222#include "opt_ddb.h"
223#include "opt_lockdebug.h" 223#include "opt_lockdebug.h"
224#include "opt_dtrace.h" 224#include "opt_dtrace.h"
225 225
226#define _LWP_API_PRIVATE 226#define _LWP_API_PRIVATE
227 227
228#include <sys/param.h> 228#include <sys/param.h>
229#include <sys/systm.h> 229#include <sys/systm.h>
230#include <sys/cpu.h> 230#include <sys/cpu.h>
231#include <sys/pool.h> 231#include <sys/pool.h>
232#include <sys/proc.h> 232#include <sys/proc.h>
233#include <sys/syscallargs.h> 233#include <sys/syscallargs.h>
@@ -1065,27 +1065,28 @@ lwp_startup(struct lwp *prev, struct lwp @@ -1065,27 +1065,28 @@ lwp_startup(struct lwp *prev, struct lwp
1065 * Exit an LWP. 1065 * Exit an LWP.
1066 * 1066 *
1067 * *** WARNING *** This can be called with (l != curlwp) in error paths. 1067 * *** WARNING *** This can be called with (l != curlwp) in error paths.
1068 */ 1068 */
1069void 1069void
1070lwp_exit(struct lwp *l) 1070lwp_exit(struct lwp *l)
1071{ 1071{
1072 struct proc *p = l->l_proc; 1072 struct proc *p = l->l_proc;
1073 struct lwp *l2; 1073 struct lwp *l2;
1074 bool current; 1074 bool current;
1075 1075
1076 current = (l == curlwp); 1076 current = (l == curlwp);
1077 1077
1078 KASSERT(current || (l->l_stat == LSIDL && l->l_target_cpu == NULL)); 1078 KASSERT(current || l->l_stat == LSIDL);
 1079 KASSERT(current || l->l_target_cpu == NULL);
1079 KASSERT(p == curproc); 1080 KASSERT(p == curproc);
1080 1081
1081 SDT_PROBE(proc, kernel, , lwp__exit, l, 0, 0, 0, 0); 1082 SDT_PROBE(proc, kernel, , lwp__exit, l, 0, 0, 0, 0);
1082 1083
1083 /* Verify that we hold no locks; for DIAGNOSTIC check kernel_lock. */ 1084 /* Verify that we hold no locks; for DIAGNOSTIC check kernel_lock. */
1084 LOCKDEBUG_BARRIER(NULL, 0); 1085 LOCKDEBUG_BARRIER(NULL, 0);
1085 KASSERTMSG(curcpu()->ci_biglock_count == 0, "kernel_lock leaked"); 1086 KASSERTMSG(curcpu()->ci_biglock_count == 0, "kernel_lock leaked");
1086 1087
1087 /* 1088 /*
1088 * If we are the last live LWP in a process, we need to exit the 1089 * If we are the last live LWP in a process, we need to exit the
1089 * entire process. We do so with an exit status of zero, because 1090 * entire process. We do so with an exit status of zero, because
1090 * it's a "controlled" exit, and because that's what Solaris does. 1091 * it's a "controlled" exit, and because that's what Solaris does.
1091 * 1092 *

cvs diff -r1.269 -r1.270 src/sys/kern/kern_proc.c (expand / switch to unified diff)

--- src/sys/kern/kern_proc.c 2022/10/26 23:20:36 1.269
+++ src/sys/kern/kern_proc.c 2023/04/09 09:18:09 1.270
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_proc.c,v 1.269 2022/10/26 23:20:36 riastradh Exp $ */ 1/* $NetBSD: kern_proc.c,v 1.270 2023/04/09 09:18:09 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1999, 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc. 4 * Copyright (c) 1999, 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, and by Andrew Doran. 9 * NASA Ames Research Center, and by Andrew Doran.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -52,27 +52,27 @@ @@ -52,27 +52,27 @@
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE. 59 * SUCH DAMAGE.
60 * 60 *
61 * @(#)kern_proc.c 8.7 (Berkeley) 2/14/95 61 * @(#)kern_proc.c 8.7 (Berkeley) 2/14/95
62 */ 62 */
63 63
64#include <sys/cdefs.h> 64#include <sys/cdefs.h>
65__KERNEL_RCSID(0, "$NetBSD: kern_proc.c,v 1.269 2022/10/26 23:20:36 riastradh Exp $"); 65__KERNEL_RCSID(0, "$NetBSD: kern_proc.c,v 1.270 2023/04/09 09:18:09 riastradh Exp $");
66 66
67#ifdef _KERNEL_OPT 67#ifdef _KERNEL_OPT
68#include "opt_kstack.h" 68#include "opt_kstack.h"
69#include "opt_maxuprc.h" 69#include "opt_maxuprc.h"
70#include "opt_dtrace.h" 70#include "opt_dtrace.h"
71#include "opt_compat_netbsd32.h" 71#include "opt_compat_netbsd32.h"
72#include "opt_kaslr.h" 72#include "opt_kaslr.h"
73#endif 73#endif
74 74
75#if defined(__HAVE_COMPAT_NETBSD32) && !defined(COMPAT_NETBSD32) \ 75#if defined(__HAVE_COMPAT_NETBSD32) && !defined(COMPAT_NETBSD32) \
76 && !defined(_RUMPKERNEL) 76 && !defined(_RUMPKERNEL)
77#define COMPAT_NETBSD32 77#define COMPAT_NETBSD32
78#endif 78#endif
@@ -2034,27 +2034,28 @@ proc_active_lwp(struct proc *p) @@ -2034,27 +2034,28 @@ proc_active_lwp(struct proc *p)
2034 0, 2034 0,
2035 2, /* LSIDL */ 2035 2, /* LSIDL */
2036 6, /* LSRUN */ 2036 6, /* LSRUN */
2037 5, /* LSSLEEP */ 2037 5, /* LSSLEEP */
2038 4, /* LSSTOP */ 2038 4, /* LSSTOP */
2039 0, /* LSZOMB */ 2039 0, /* LSZOMB */
2040 1, /* LSDEAD */ 2040 1, /* LSDEAD */
2041 7, /* LSONPROC */ 2041 7, /* LSONPROC */
2042 3 /* LSSUSPENDED */ 2042 3 /* LSSUSPENDED */
2043 }; 2043 };
2044 2044
2045 struct lwp *l, *lp = NULL; 2045 struct lwp *l, *lp = NULL;
2046 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 2046 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
2047 KASSERT(l->l_stat >= 0 && l->l_stat < __arraycount(ostat)); 2047 KASSERT(l->l_stat >= 0);
 2048 KASSERT(l->l_stat < __arraycount(ostat));
2048 if (lp == NULL || 2049 if (lp == NULL ||
2049 ostat[l->l_stat] > ostat[lp->l_stat] || 2050 ostat[l->l_stat] > ostat[lp->l_stat] ||
2050 (ostat[l->l_stat] == ostat[lp->l_stat] && 2051 (ostat[l->l_stat] == ostat[lp->l_stat] &&
2051 l->l_cpticks > lp->l_cpticks)) { 2052 l->l_cpticks > lp->l_cpticks)) {
2052 lp = l; 2053 lp = l;
2053 continue; 2054 continue;
2054 } 2055 }
2055 } 2056 }
2056 return lp; 2057 return lp;
2057} 2058}
2058 2059
2059static int 2060static int
2060sysctl_doeproc(SYSCTLFN_ARGS) 2061sysctl_doeproc(SYSCTLFN_ARGS)

cvs diff -r1.404 -r1.405 src/sys/kern/kern_sig.c (expand / switch to unified diff)

--- src/sys/kern/kern_sig.c 2022/04/09 23:38:33 1.404
+++ src/sys/kern/kern_sig.c 2023/04/09 09:18:09 1.405
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_sig.c,v 1.404 2022/04/09 23:38:33 riastradh Exp $ */ 1/* $NetBSD: kern_sig.c,v 1.405 2023/04/09 09:18:09 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2006, 2007, 2008, 2019 The NetBSD Foundation, Inc. 4 * Copyright (c) 2006, 2007, 2008, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -60,27 +60,27 @@ @@ -60,27 +60,27 @@
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE. 63 * SUCH DAMAGE.
64 * 64 *
65 * @(#)kern_sig.c 8.14 (Berkeley) 5/14/95 65 * @(#)kern_sig.c 8.14 (Berkeley) 5/14/95
66 */ 66 */
67 67
68/* 68/*
69 * Signal subsystem. 69 * Signal subsystem.
70 */ 70 */
71 71
72#include <sys/cdefs.h> 72#include <sys/cdefs.h>
73__KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.404 2022/04/09 23:38:33 riastradh Exp $"); 73__KERNEL_RCSID(0, "$NetBSD: kern_sig.c,v 1.405 2023/04/09 09:18:09 riastradh Exp $");
74 74
75#include "opt_execfmt.h" 75#include "opt_execfmt.h"
76#include "opt_ptrace.h" 76#include "opt_ptrace.h"
77#include "opt_dtrace.h" 77#include "opt_dtrace.h"
78#include "opt_compat_sunos.h" 78#include "opt_compat_sunos.h"
79#include "opt_compat_netbsd.h" 79#include "opt_compat_netbsd.h"
80#include "opt_compat_netbsd32.h" 80#include "opt_compat_netbsd32.h"
81#include "opt_pax.h" 81#include "opt_pax.h"
82 82
83#define SIGPROP /* include signal properties table */ 83#define SIGPROP /* include signal properties table */
84#include <sys/param.h> 84#include <sys/param.h>
85#include <sys/signalvar.h> 85#include <sys/signalvar.h>
86#include <sys/proc.h> 86#include <sys/proc.h>
@@ -1306,27 +1306,28 @@ kpsignal2(struct proc *p, ksiginfo_t *ks @@ -1306,27 +1306,28 @@ kpsignal2(struct proc *p, ksiginfo_t *ks
1306 int prop, signo = ksi->ksi_signo; 1306 int prop, signo = ksi->ksi_signo;
1307 struct lwp *l = NULL; 1307 struct lwp *l = NULL;
1308 ksiginfo_t *kp; 1308 ksiginfo_t *kp;
1309 lwpid_t lid; 1309 lwpid_t lid;
1310 sig_t action; 1310 sig_t action;
1311 bool toall; 1311 bool toall;
1312 bool traced; 1312 bool traced;
1313 int error = 0; 1313 int error = 0;
1314 1314
1315 KASSERT(!cpu_intr_p()); 1315 KASSERT(!cpu_intr_p());
1316 KASSERT(mutex_owned(&proc_lock)); 1316 KASSERT(mutex_owned(&proc_lock));
1317 KASSERT(mutex_owned(p->p_lock)); 1317 KASSERT(mutex_owned(p->p_lock));
1318 KASSERT((ksi->ksi_flags & KSI_QUEUED) == 0); 1318 KASSERT((ksi->ksi_flags & KSI_QUEUED) == 0);
1319 KASSERT(signo > 0 && signo < NSIG); 1319 KASSERT(signo > 0);
 1320 KASSERT(signo < NSIG);
1320 1321
1321 /* 1322 /*
1322 * If the process is being created by fork, is a zombie or is 1323 * If the process is being created by fork, is a zombie or is
1323 * exiting, then just drop the signal here and bail out. 1324 * exiting, then just drop the signal here and bail out.
1324 */ 1325 */
1325 if (p->p_stat != SACTIVE && p->p_stat != SSTOP) 1326 if (p->p_stat != SACTIVE && p->p_stat != SSTOP)
1326 return 0; 1327 return 0;
1327 1328
1328 /* 1329 /*
1329 * Notify any interested parties of the signal. 1330 * Notify any interested parties of the signal.
1330 */ 1331 */
1331 KNOTE(&p->p_klist, NOTE_SIGNAL | signo); 1332 KNOTE(&p->p_klist, NOTE_SIGNAL | signo);
1332 1333
@@ -1561,27 +1562,28 @@ proc_stop_lwps(struct proc *p) @@ -1561,27 +1562,28 @@ proc_stop_lwps(struct proc *p)
1561 1562
1562/* 1563/*
1563 * Finish stopping of a process. Mark it stopped and notify the parent. 1564 * Finish stopping of a process. Mark it stopped and notify the parent.
1564 * 1565 *
1565 * Drop p_lock briefly if ppsig is true. 1566 * Drop p_lock briefly if ppsig is true.
1566 */ 1567 */
1567static void 1568static void
1568proc_stop_done(struct proc *p, int ppmask) 1569proc_stop_done(struct proc *p, int ppmask)
1569{ 1570{
1570 1571
1571 KASSERT(mutex_owned(&proc_lock)); 1572 KASSERT(mutex_owned(&proc_lock));
1572 KASSERT(mutex_owned(p->p_lock)); 1573 KASSERT(mutex_owned(p->p_lock));
1573 KASSERT((p->p_sflag & PS_STOPPING) != 0); 1574 KASSERT((p->p_sflag & PS_STOPPING) != 0);
1574 KASSERT(p->p_nrlwps == 0 || (p->p_nrlwps == 1 && p == curproc)); 1575 KASSERT(p->p_nrlwps == 0 || p->p_nrlwps == 1);
 1576 KASSERT(p->p_nrlwps == 0 || p == curproc);
1575 1577
1576 p->p_sflag &= ~PS_STOPPING; 1578 p->p_sflag &= ~PS_STOPPING;
1577 p->p_stat = SSTOP; 1579 p->p_stat = SSTOP;
1578 p->p_waited = 0; 1580 p->p_waited = 0;
1579 p->p_pptr->p_nstopchild++; 1581 p->p_pptr->p_nstopchild++;
1580 1582
1581 /* child_psignal drops p_lock briefly. */ 1583 /* child_psignal drops p_lock briefly. */
1582 child_psignal(p, ppmask); 1584 child_psignal(p, ppmask);
1583 cv_broadcast(&p->p_pptr->p_waitcv); 1585 cv_broadcast(&p->p_pptr->p_waitcv);
1584} 1586}
1585 1587
1586/* 1588/*
1587 * Stop the current process and switch away to the debugger notifying 1589 * Stop the current process and switch away to the debugger notifying

cvs diff -r1.73 -r1.74 src/sys/kern/kern_sleepq.c (expand / switch to unified diff)

--- src/sys/kern/kern_sleepq.c 2022/06/29 22:27:01 1.73
+++ src/sys/kern/kern_sleepq.c 2023/04/09 09:18:09 1.74
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_sleepq.c,v 1.73 2022/06/29 22:27:01 riastradh Exp $ */ 1/* $NetBSD: kern_sleepq.c,v 1.74 2023/04/09 09:18:09 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2006, 2007, 2008, 2009, 2019, 2020 The NetBSD Foundation, Inc. 4 * Copyright (c) 2006, 2007, 2008, 2009, 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -25,27 +25,27 @@ @@ -25,27 +25,27 @@
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Sleep queue implementation, used by turnstiles and general sleep/wakeup 33 * Sleep queue implementation, used by turnstiles and general sleep/wakeup
34 * interfaces. 34 * interfaces.
35 */ 35 */
36 36
37#include <sys/cdefs.h> 37#include <sys/cdefs.h>
38__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.73 2022/06/29 22:27:01 riastradh Exp $"); 38__KERNEL_RCSID(0, "$NetBSD: kern_sleepq.c,v 1.74 2023/04/09 09:18:09 riastradh Exp $");
39 39
40#include <sys/param.h> 40#include <sys/param.h>
41#include <sys/kernel.h> 41#include <sys/kernel.h>
42#include <sys/cpu.h> 42#include <sys/cpu.h>
43#include <sys/intr.h> 43#include <sys/intr.h>
44#include <sys/pool.h> 44#include <sys/pool.h>
45#include <sys/proc.h>  45#include <sys/proc.h>
46#include <sys/resourcevar.h> 46#include <sys/resourcevar.h>
47#include <sys/sched.h> 47#include <sys/sched.h>
48#include <sys/systm.h> 48#include <sys/systm.h>
49#include <sys/sleepq.h> 49#include <sys/sleepq.h>
50#include <sys/ktrace.h> 50#include <sys/ktrace.h>
51 51
@@ -215,27 +215,28 @@ sleepq_insert(sleepq_t *sq, lwp_t *l, sy @@ -215,27 +215,28 @@ sleepq_insert(sleepq_t *sq, lwp_t *l, sy
215 * 215 *
216 * Enter an LWP into the sleep queue and prepare for sleep. The sleep 216 * Enter an LWP into the sleep queue and prepare for sleep. The sleep
217 * queue must already be locked, and any interlock (such as the kernel 217 * queue must already be locked, and any interlock (such as the kernel
218 * lock) must have be released (see sleeptab_lookup(), sleepq_enter()). 218 * lock) must have be released (see sleeptab_lookup(), sleepq_enter()).
219 */ 219 */
220void 220void
221sleepq_enqueue(sleepq_t *sq, wchan_t wchan, const char *wmesg, syncobj_t *sobj, 221sleepq_enqueue(sleepq_t *sq, wchan_t wchan, const char *wmesg, syncobj_t *sobj,
222 bool catch_p) 222 bool catch_p)
223{ 223{
224 lwp_t *l = curlwp; 224 lwp_t *l = curlwp;
225 225
226 KASSERT(lwp_locked(l, NULL)); 226 KASSERT(lwp_locked(l, NULL));
227 KASSERT(l->l_stat == LSONPROC); 227 KASSERT(l->l_stat == LSONPROC);
228 KASSERT(l->l_wchan == NULL && l->l_sleepq == NULL); 228 KASSERT(l->l_wchan == NULL);
 229 KASSERT(l->l_sleepq == NULL);
229 KASSERT((l->l_flag & LW_SINTR) == 0); 230 KASSERT((l->l_flag & LW_SINTR) == 0);
230 231
231 l->l_syncobj = sobj; 232 l->l_syncobj = sobj;
232 l->l_wchan = wchan; 233 l->l_wchan = wchan;
233 l->l_sleepq = sq; 234 l->l_sleepq = sq;
234 l->l_wmesg = wmesg; 235 l->l_wmesg = wmesg;
235 l->l_slptime = 0; 236 l->l_slptime = 0;
236 l->l_stat = LSSLEEP; 237 l->l_stat = LSSLEEP;
237 if (catch_p) 238 if (catch_p)
238 l->l_flag |= LW_SINTR; 239 l->l_flag |= LW_SINTR;
239 240
240 sleepq_insert(sq, l, sobj); 241 sleepq_insert(sq, l, sobj);
241 242

cvs diff -r1.72 -r1.73 src/sys/kern/kern_softint.c (expand / switch to unified diff)

--- src/sys/kern/kern_softint.c 2022/10/28 21:52:02 1.72
+++ src/sys/kern/kern_softint.c 2023/04/09 09:18:09 1.73
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_softint.c,v 1.72 2022/10/28 21:52:02 riastradh Exp $ */ 1/* $NetBSD: kern_softint.c,v 1.73 2023/04/09 09:18:09 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2007, 2008, 2019, 2020 The NetBSD Foundation, Inc. 4 * Copyright (c) 2007, 2008, 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -160,27 +160,27 @@ @@ -160,27 +160,27 @@
160 * interrupt; 160 * interrupt;
161 * } 161 * }
162 * 162 *
163 * Once the soft interrupt has fired (and even if it has blocked), 163 * Once the soft interrupt has fired (and even if it has blocked),
164 * no further soft interrupts at that level will be triggered by 164 * no further soft interrupts at that level will be triggered by
165 * MI code until the soft interrupt handler has ceased execution. 165 * MI code until the soft interrupt handler has ceased execution.
166 * If a soft interrupt handler blocks and is resumed, it resumes 166 * If a soft interrupt handler blocks and is resumed, it resumes
167 * execution as a normal LWP (kthread) and gains VM context. Only 167 * execution as a normal LWP (kthread) and gains VM context. Only
168 * when it has completed and is ready to fire again will it 168 * when it has completed and is ready to fire again will it
169 * interrupt other threads. 169 * interrupt other threads.
170 */ 170 */
171 171
172#include <sys/cdefs.h> 172#include <sys/cdefs.h>
173__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.72 2022/10/28 21:52:02 riastradh Exp $"); 173__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.73 2023/04/09 09:18:09 riastradh Exp $");
174 174
175#include <sys/param.h> 175#include <sys/param.h>
176#include <sys/proc.h> 176#include <sys/proc.h>
177#include <sys/intr.h> 177#include <sys/intr.h>
178#include <sys/ipi.h> 178#include <sys/ipi.h>
179#include <sys/lock.h> 179#include <sys/lock.h>
180#include <sys/mutex.h> 180#include <sys/mutex.h>
181#include <sys/kernel.h> 181#include <sys/kernel.h>
182#include <sys/kthread.h> 182#include <sys/kthread.h>
183#include <sys/evcnt.h> 183#include <sys/evcnt.h>
184#include <sys/cpu.h> 184#include <sys/cpu.h>
185#include <sys/xcall.h> 185#include <sys/xcall.h>
186#include <sys/psref.h> 186#include <sys/psref.h>
@@ -423,27 +423,28 @@ softint_establish(u_int flags, void (*fu @@ -423,27 +423,28 @@ softint_establish(u_int flags, void (*fu
423 * because we will wait for the softint to complete if it's still 423 * because we will wait for the softint to complete if it's still
424 * running. 424 * running.
425 */ 425 */
426void 426void
427softint_disestablish(void *arg) 427softint_disestablish(void *arg)
428{ 428{
429 CPU_INFO_ITERATOR cii; 429 CPU_INFO_ITERATOR cii;
430 struct cpu_info *ci; 430 struct cpu_info *ci;
431 softcpu_t *sc; 431 softcpu_t *sc;
432 softhand_t *sh; 432 softhand_t *sh;
433 uintptr_t offset; 433 uintptr_t offset;
434 434
435 offset = (uintptr_t)arg; 435 offset = (uintptr_t)arg;
436 KASSERTMSG(offset != 0 && offset < softint_bytes, "%"PRIuPTR" %u", 436 KASSERT(offset != 0);
 437 KASSERTMSG(offset < softint_bytes, "%"PRIuPTR" %u",
437 offset, softint_bytes); 438 offset, softint_bytes);
438 439
439 /* 440 /*
440 * Unregister IPI handler if there is any. Note: there is no need 441 * Unregister IPI handler if there is any. Note: there is no need
441 * to disable preemption here - ID is stable. 442 * to disable preemption here - ID is stable.
442 */ 443 */
443 sc = curcpu()->ci_data.cpu_softcpu; 444 sc = curcpu()->ci_data.cpu_softcpu;
444 sh = (softhand_t *)((uint8_t *)sc + offset); 445 sh = (softhand_t *)((uint8_t *)sc + offset);
445 if (sh->sh_ipi_id) { 446 if (sh->sh_ipi_id) {
446 ipi_unregister(sh->sh_ipi_id); 447 ipi_unregister(sh->sh_ipi_id);
447 } 448 }
448 449
449 /* 450 /*
@@ -486,27 +487,28 @@ softint_schedule(void *arg) @@ -486,27 +487,28 @@ softint_schedule(void *arg)
486 int s; 487 int s;
487 488
488 SDT_PROBE2(sdt, kernel, softint, schedule, arg, /*ci*/NULL); 489 SDT_PROBE2(sdt, kernel, softint, schedule, arg, /*ci*/NULL);
489 490
490 /* 491 /*
491 * If this assert fires, rather than disabling preemption explicitly 492 * If this assert fires, rather than disabling preemption explicitly
492 * to make it stop, consider that you are probably using a softint 493 * to make it stop, consider that you are probably using a softint
493 * when you don't need to. 494 * when you don't need to.
494 */ 495 */
495 KASSERT(kpreempt_disabled()); 496 KASSERT(kpreempt_disabled());
496 497
497 /* Find the handler record for this CPU. */ 498 /* Find the handler record for this CPU. */
498 offset = (uintptr_t)arg; 499 offset = (uintptr_t)arg;
499 KASSERTMSG(offset != 0 && offset < softint_bytes, "%"PRIuPTR" %u", 500 KASSERT(offset != 0);
 501 KASSERTMSG(offset < softint_bytes, "%"PRIuPTR" %u",
500 offset, softint_bytes); 502 offset, softint_bytes);
501 sh = (softhand_t *)((uint8_t *)curcpu()->ci_data.cpu_softcpu + offset); 503 sh = (softhand_t *)((uint8_t *)curcpu()->ci_data.cpu_softcpu + offset);
502 504
503 /* If it's already pending there's nothing to do. */ 505 /* If it's already pending there's nothing to do. */
504 if ((sh->sh_flags & SOFTINT_PENDING) != 0) { 506 if ((sh->sh_flags & SOFTINT_PENDING) != 0) {
505 return; 507 return;
506 } 508 }
507 509
508 /* 510 /*
509 * Enqueue the handler into the LWP's pending list. 511 * Enqueue the handler into the LWP's pending list.
510 * If the LWP is completely idle, then make it run. 512 * If the LWP is completely idle, then make it run.
511 */ 513 */
512 s = splhigh(); 514 s = splhigh();

cvs diff -r1.268 -r1.269 src/sys/kern/kern_sysctl.c (expand / switch to unified diff)

--- src/sys/kern/kern_sysctl.c 2023/02/17 06:20:31 1.268
+++ src/sys/kern/kern_sysctl.c 2023/04/09 09:18:09 1.269
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_sysctl.c,v 1.268 2023/02/17 06:20:31 skrll Exp $ */ 1/* $NetBSD: kern_sysctl.c,v 1.269 2023/04/09 09:18:09 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2003, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 2003, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Brown. 8 * by Andrew Brown.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -60,27 +60,27 @@ @@ -60,27 +60,27 @@
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * SUCH DAMAGE. 61 * SUCH DAMAGE.
62 * 62 *
63 * @(#)kern_sysctl.c 8.9 (Berkeley) 5/20/95 63 * @(#)kern_sysctl.c 8.9 (Berkeley) 5/20/95
64 */ 64 */
65 65
66/* 66/*
67 * sysctl system call. 67 * sysctl system call.
68 */ 68 */
69 69
70#define __COMPAT_SYSCTL 70#define __COMPAT_SYSCTL
71 71
72#include <sys/cdefs.h> 72#include <sys/cdefs.h>
73__KERNEL_RCSID(0, "$NetBSD: kern_sysctl.c,v 1.268 2023/02/17 06:20:31 skrll Exp $"); 73__KERNEL_RCSID(0, "$NetBSD: kern_sysctl.c,v 1.269 2023/04/09 09:18:09 riastradh Exp $");
74 74
75#ifdef _KERNEL_OPT 75#ifdef _KERNEL_OPT
76#include "opt_defcorename.h" 76#include "opt_defcorename.h"
77#endif 77#endif
78 78
79#include "ksyms.h" 79#include "ksyms.h"
80 80
81#include <sys/param.h> 81#include <sys/param.h>
82#include <sys/types.h> 82#include <sys/types.h>
83 83
84#include <sys/buf.h> 84#include <sys/buf.h>
85#include <sys/cprng.h> 85#include <sys/cprng.h>
86#include <sys/kauth.h> 86#include <sys/kauth.h>
@@ -2507,29 +2507,29 @@ sysctl_teardown(struct sysctllog **logp) @@ -2507,29 +2507,29 @@ sysctl_teardown(struct sysctllog **logp)
2507 struct sysctllog *log; 2507 struct sysctllog *log;
2508 uint namelen; 2508 uint namelen;
2509 int *name, t, v, error, ni; 2509 int *name, t, v, error, ni;
2510 size_t sz; 2510 size_t sz;
2511 2511
2512 if (logp == NULL || *logp == NULL) 2512 if (logp == NULL || *logp == NULL)
2513 return; 2513 return;
2514 log = *logp; 2514 log = *logp;
2515 2515
2516 rw_enter(&sysctl_treelock, RW_WRITER); 2516 rw_enter(&sysctl_treelock, RW_WRITER);
2517 memset(&node, 0, sizeof(node)); 2517 memset(&node, 0, sizeof(node));
2518 2518
2519 while (log->log_left < log->log_size) { 2519 while (log->log_left < log->log_size) {
2520 KASSERT((log->log_left + 3 < log->log_size) && 2520 KASSERT(log->log_left + 3 < log->log_size);
2521 (log->log_left + log->log_num[log->log_left + 2] <= 2521 KASSERT(log->log_left + log->log_num[log->log_left + 2] <=
2522 log->log_size)); 2522 log->log_size);
2523 v = log->log_num[log->log_left++]; 2523 v = log->log_num[log->log_left++];
2524 t = log->log_num[log->log_left++]; 2524 t = log->log_num[log->log_left++];
2525 namelen = log->log_num[log->log_left++]; 2525 namelen = log->log_num[log->log_left++];
2526 name = &log->log_num[log->log_left]; 2526 name = &log->log_num[log->log_left];
2527 2527
2528 node.sysctl_num = name[namelen - 1]; 2528 node.sysctl_num = name[namelen - 1];
2529 node.sysctl_flags = SYSCTL_VERSION|t; 2529 node.sysctl_flags = SYSCTL_VERSION|t;
2530 node.sysctl_ver = v; 2530 node.sysctl_ver = v;
2531 2531
2532 rnode = log->log_root; 2532 rnode = log->log_root;
2533 error = sysctl_locate(NULL, &name[0], namelen, &rnode, &ni); 2533 error = sysctl_locate(NULL, &name[0], namelen, &rnode, &ni);
2534 if (error == 0) { 2534 if (error == 0) {
2535 name[namelen - 1] = CTL_DESTROY; 2535 name[namelen - 1] = CTL_DESTROY;

cvs diff -r1.45 -r1.46 src/sys/kern/kern_turnstile.c (expand / switch to unified diff)

--- src/sys/kern/kern_turnstile.c 2022/10/26 23:27:16 1.45
+++ src/sys/kern/kern_turnstile.c 2023/04/09 09:18:09 1.46
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_turnstile.c,v 1.45 2022/10/26 23:27:16 riastradh Exp $ */ 1/* $NetBSD: kern_turnstile.c,v 1.46 2023/04/09 09:18:09 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2002, 2006, 2007, 2009, 2019, 2020 4 * Copyright (c) 2002, 2006, 2007, 2009, 2019, 2020
5 * The NetBSD Foundation, Inc. 5 * The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe and Andrew Doran. 9 * by Jason R. Thorpe and Andrew Doran.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -51,27 +51,27 @@ @@ -51,27 +51,27 @@
51 * queue. If a thread decides it doesn't need to block after all, then this 51 * queue. If a thread decides it doesn't need to block after all, then this
52 * interlock must be released by explicitly aborting the turnstile 52 * interlock must be released by explicitly aborting the turnstile
53 * operation. 53 * operation.
54 * 54 *
55 * When a thread is awakened, it needs to get its turnstile back. If there 55 * When a thread is awakened, it needs to get its turnstile back. If there
56 * are still other threads waiting in the active turnstile, the thread 56 * are still other threads waiting in the active turnstile, the thread
57 * grabs a free turnstile off the free list. Otherwise, it can take back 57 * grabs a free turnstile off the free list. Otherwise, it can take back
58 * the active turnstile from the lock (thus deactivating the turnstile). 58 * the active turnstile from the lock (thus deactivating the turnstile).
59 * 59 *
60 * Turnstiles are where we do priority inheritence. 60 * Turnstiles are where we do priority inheritence.
61 */ 61 */
62 62
63#include <sys/cdefs.h> 63#include <sys/cdefs.h>
64__KERNEL_RCSID(0, "$NetBSD: kern_turnstile.c,v 1.45 2022/10/26 23:27:16 riastradh Exp $"); 64__KERNEL_RCSID(0, "$NetBSD: kern_turnstile.c,v 1.46 2023/04/09 09:18:09 riastradh Exp $");
65 65
66#include <sys/param.h> 66#include <sys/param.h>
67#include <sys/lockdebug.h> 67#include <sys/lockdebug.h>
68#include <sys/pool.h> 68#include <sys/pool.h>
69#include <sys/proc.h> 69#include <sys/proc.h>
70#include <sys/sleepq.h> 70#include <sys/sleepq.h>
71#include <sys/sleeptab.h> 71#include <sys/sleeptab.h>
72#include <sys/systm.h> 72#include <sys/systm.h>
73 73
74/* 74/*
75 * Shift of 6 aligns to typical cache line size of 64 bytes; there's no 75 * Shift of 6 aligns to typical cache line size of 64 bytes; there's no
76 * point having two turnstile locks to back two lock objects that share one 76 * point having two turnstile locks to back two lock objects that share one
77 * cache line. 77 * cache line.
@@ -375,37 +375,38 @@ turnstile_block(turnstile_t *ts, int q,  @@ -375,37 +375,38 @@ turnstile_block(turnstile_t *ts, int q,
375 turnstile_t *ots; 375 turnstile_t *ots;
376 tschain_t *tc; 376 tschain_t *tc;
377 kmutex_t *lock; 377 kmutex_t *lock;
378 sleepq_t *sq; 378 sleepq_t *sq;
379 pri_t obase; 379 pri_t obase;
380 u_int hash; 380 u_int hash;
381 381
382 hash = TS_HASH(obj); 382 hash = TS_HASH(obj);
383 tc = &turnstile_chains[hash]; 383 tc = &turnstile_chains[hash];
384 lock = &turnstile_locks[hash].lock; 384 lock = &turnstile_locks[hash].lock;
385 385
386 KASSERT(q == TS_READER_Q || q == TS_WRITER_Q); 386 KASSERT(q == TS_READER_Q || q == TS_WRITER_Q);
387 KASSERT(mutex_owned(lock)); 387 KASSERT(mutex_owned(lock));
388 KASSERT(l != NULL && l->l_ts != NULL); 388 KASSERT(l != NULL);
 389 KASSERT(l->l_ts != NULL);
389 390
390 if (ts == NULL) { 391 if (ts == NULL) {
391 /* 392 /*
392 * We are the first thread to wait for this object; 393 * We are the first thread to wait for this object;
393 * lend our turnstile to it. 394 * lend our turnstile to it.
394 */ 395 */
395 ts = l->l_ts; 396 ts = l->l_ts;
396 KASSERT(TS_ALL_WAITERS(ts) == 0); 397 KASSERT(TS_ALL_WAITERS(ts) == 0);
397 KASSERT(LIST_EMPTY(&ts->ts_sleepq[TS_READER_Q]) && 398 KASSERT(LIST_EMPTY(&ts->ts_sleepq[TS_READER_Q]));
398 LIST_EMPTY(&ts->ts_sleepq[TS_WRITER_Q])); 399 KASSERT(LIST_EMPTY(&ts->ts_sleepq[TS_WRITER_Q]));
399 ts->ts_obj = obj; 400 ts->ts_obj = obj;
400 ts->ts_inheritor = NULL; 401 ts->ts_inheritor = NULL;
401 LIST_INSERT_HEAD(tc, ts, ts_chain); 402 LIST_INSERT_HEAD(tc, ts, ts_chain);
402 } else { 403 } else {
403 /* 404 /*
404 * Object already has a turnstile. Put our turnstile 405 * Object already has a turnstile. Put our turnstile
405 * onto the free list, and reference the existing 406 * onto the free list, and reference the existing
406 * turnstile instead. 407 * turnstile instead.
407 */ 408 */
408 ots = l->l_ts; 409 ots = l->l_ts;
409 KASSERT(ots->ts_free == NULL); 410 KASSERT(ots->ts_free == NULL);
410 ots->ts_free = ts->ts_free; 411 ots->ts_free = ts->ts_free;
411 ts->ts_free = ots; 412 ts->ts_free = ots;
@@ -449,27 +450,28 @@ turnstile_block(turnstile_t *ts, int q,  @@ -449,27 +450,28 @@ turnstile_block(turnstile_t *ts, int q,
449void 450void
450turnstile_wakeup(turnstile_t *ts, int q, int count, lwp_t *nl) 451turnstile_wakeup(turnstile_t *ts, int q, int count, lwp_t *nl)
451{ 452{
452 sleepq_t *sq; 453 sleepq_t *sq;
453 kmutex_t *lock; 454 kmutex_t *lock;
454 u_int hash; 455 u_int hash;
455 lwp_t *l; 456 lwp_t *l;
456 457
457 hash = TS_HASH(ts->ts_obj); 458 hash = TS_HASH(ts->ts_obj);
458 lock = &turnstile_locks[hash].lock; 459 lock = &turnstile_locks[hash].lock;
459 sq = &ts->ts_sleepq[q]; 460 sq = &ts->ts_sleepq[q];
460 461
461 KASSERT(q == TS_READER_Q || q == TS_WRITER_Q); 462 KASSERT(q == TS_READER_Q || q == TS_WRITER_Q);
462 KASSERT(count > 0 && count <= TS_WAITERS(ts, q)); 463 KASSERT(count > 0);
 464 KASSERT(count <= TS_WAITERS(ts, q));
463 KASSERT(mutex_owned(lock)); 465 KASSERT(mutex_owned(lock));
464 KASSERT(ts->ts_inheritor == curlwp || ts->ts_inheritor == NULL); 466 KASSERT(ts->ts_inheritor == curlwp || ts->ts_inheritor == NULL);
465 467
466 /* 468 /*
467 * restore inherited priority if necessary. 469 * restore inherited priority if necessary.
468 */ 470 */
469 471
470 if (ts->ts_inheritor != NULL) { 472 if (ts->ts_inheritor != NULL) {
471 turnstile_unlendpri(ts); 473 turnstile_unlendpri(ts);
472 } 474 }
473 475
474 if (nl != NULL) { 476 if (nl != NULL) {
475#if defined(DEBUG) || defined(LOCKDEBUG) 477#if defined(DEBUG) || defined(LOCKDEBUG)

cvs diff -r1.45 -r1.46 src/sys/kern/tty_ptm.c (expand / switch to unified diff)

--- src/sys/kern/tty_ptm.c 2022/09/29 12:18:27 1.45
+++ src/sys/kern/tty_ptm.c 2023/04/09 09:18:09 1.46
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: tty_ptm.c,v 1.45 2022/09/29 12:18:27 christos Exp $ */ 1/* $NetBSD: tty_ptm.c,v 1.46 2023/04/09 09:18:09 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2004, 2020 The NetBSD Foundation, Inc. 4 * Copyright (c) 2004, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -17,27 +17,27 @@ @@ -17,27 +17,27 @@
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE. 26 * POSSIBILITY OF SUCH DAMAGE.
27 */ 27 */
28 28
29#include <sys/cdefs.h> 29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: tty_ptm.c,v 1.45 2022/09/29 12:18:27 christos Exp $"); 30__KERNEL_RCSID(0, "$NetBSD: tty_ptm.c,v 1.46 2023/04/09 09:18:09 riastradh Exp $");
31 31
32#ifdef _KERNEL_OPT 32#ifdef _KERNEL_OPT
33#include "opt_compat_netbsd.h" 33#include "opt_compat_netbsd.h"
34#include "opt_ptm.h" 34#include "opt_ptm.h"
35#endif 35#endif
36 36
37/* pty multiplexor driver /dev/ptm{,x} */ 37/* pty multiplexor driver /dev/ptm{,x} */
38 38
39#include <sys/param.h> 39#include <sys/param.h>
40#include <sys/systm.h> 40#include <sys/systm.h>
41#include <sys/ioctl.h> 41#include <sys/ioctl.h>
42#include <sys/proc.h> 42#include <sys/proc.h>
43#include <sys/tty.h> 43#include <sys/tty.h>
@@ -132,27 +132,28 @@ int @@ -132,27 +132,28 @@ int
132pty_vn_open(struct vnode *vp, struct lwp *l) 132pty_vn_open(struct vnode *vp, struct lwp *l)
133{ 133{
134 int error; 134 int error;
135 135
136 if (vp->v_type != VCHR) { 136 if (vp->v_type != VCHR) {
137 vput(vp); 137 vput(vp);
138 return EINVAL; 138 return EINVAL;
139 } 139 }
140 140
141 error = VOP_OPEN(vp, FREAD|FWRITE, lwp0.l_cred); 141 error = VOP_OPEN(vp, FREAD|FWRITE, lwp0.l_cred);
142 142
143 if (error) { 143 if (error) {
144 /* only ptys mean we can't get these */ 144 /* only ptys mean we can't get these */
145 KASSERT(error != EDUPFD && error != EMOVEFD); 145 KASSERT(error != EDUPFD);
 146 KASSERT(error != EMOVEFD);
146 vput(vp); 147 vput(vp);
147 return error; 148 return error;
148 } 149 }
149 150
150 mutex_enter(vp->v_interlock); 151 mutex_enter(vp->v_interlock);
151 vp->v_writecount++; 152 vp->v_writecount++;
152 mutex_exit(vp->v_interlock); 153 mutex_exit(vp->v_interlock);
153 154
154 return 0; 155 return 0;
155} 156}
156 157
157static int 158static int
158pty_alloc_master(struct lwp *l, int *fd, dev_t *dev, struct mount *mp, 159pty_alloc_master(struct lwp *l, int *fd, dev_t *dev, struct mount *mp,

cvs diff -r1.26 -r1.27 src/sys/kern/kern_veriexec.c (expand / switch to unified diff)

--- src/sys/kern/kern_veriexec.c 2020/06/11 02:30:21 1.26
+++ src/sys/kern/kern_veriexec.c 2023/04/09 09:18:09 1.27
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_veriexec.c,v 1.26 2020/06/11 02:30:21 thorpej Exp $ */ 1/* $NetBSD: kern_veriexec.c,v 1.27 2023/04/09 09:18:09 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2005, 2006 Elad Efrat <elad@NetBSD.org> 4 * Copyright (c) 2005, 2006 Elad Efrat <elad@NetBSD.org>
5 * Copyright (c) 2005, 2006 Brett Lymn <blymn@NetBSD.org> 5 * Copyright (c) 2005, 2006 Brett Lymn <blymn@NetBSD.org>
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
@@ -19,27 +19,27 @@ @@ -19,27 +19,27 @@
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */ 29 */
30 30
31#include <sys/cdefs.h> 31#include <sys/cdefs.h>
32__KERNEL_RCSID(0, "$NetBSD: kern_veriexec.c,v 1.26 2020/06/11 02:30:21 thorpej Exp $"); 32__KERNEL_RCSID(0, "$NetBSD: kern_veriexec.c,v 1.27 2023/04/09 09:18:09 riastradh Exp $");
33 33
34#include "opt_veriexec.h" 34#include "opt_veriexec.h"
35 35
36#include <sys/param.h> 36#include <sys/param.h>
37#include <sys/mount.h> 37#include <sys/mount.h>
38#include <sys/kmem.h> 38#include <sys/kmem.h>
39#include <sys/vnode.h> 39#include <sys/vnode.h>
40#include <sys/namei.h> 40#include <sys/namei.h>
41#include <sys/once.h> 41#include <sys/once.h>
42#include <sys/proc.h> 42#include <sys/proc.h>
43#include <sys/rwlock.h> 43#include <sys/rwlock.h>
44#include <sys/syslog.h> 44#include <sys/syslog.h>
45#include <sys/sysctl.h> 45#include <sys/sysctl.h>
@@ -220,28 +220,31 @@ SYSCTL_SETUP(sysctl_kern_veriexec_setup, @@ -220,28 +220,31 @@ SYSCTL_SETUP(sysctl_kern_veriexec_setup,
220 CTL_CREATE, CTL_EOL); 220 CTL_CREATE, CTL_EOL);
221} 221}
222 222
223/* 223/*
224 * Add ops to the fingerprint ops vector list. 224 * Add ops to the fingerprint ops vector list.
225 */ 225 */
226int 226int
227veriexec_fpops_add(const char *fp_type, size_t hash_len, size_t ctx_size, 227veriexec_fpops_add(const char *fp_type, size_t hash_len, size_t ctx_size,
228 veriexec_fpop_init_t init, veriexec_fpop_update_t update, 228 veriexec_fpop_init_t init, veriexec_fpop_update_t update,
229 veriexec_fpop_final_t final) 229 veriexec_fpop_final_t final)
230{ 230{
231 struct veriexec_fpops *ops; 231 struct veriexec_fpops *ops;
232 232
233 KASSERT((init != NULL) && (update != NULL) && (final != NULL)); 233 KASSERT(init != NULL);
234 KASSERT((hash_len != 0) && (ctx_size != 0)); 234 KASSERT(update != NULL);
 235 KASSERT(final != NULL);
 236 KASSERT(hash_len != 0);
 237 KASSERT(ctx_size != 0);
235 KASSERT(fp_type != NULL); 238 KASSERT(fp_type != NULL);
236 239
237 if (veriexec_fpops_lookup(fp_type) != NULL) 240 if (veriexec_fpops_lookup(fp_type) != NULL)
238 return (EEXIST); 241 return (EEXIST);
239 242
240 ops = kmem_alloc(sizeof(*ops), KM_SLEEP); 243 ops = kmem_alloc(sizeof(*ops), KM_SLEEP);
241 ops->type = fp_type; 244 ops->type = fp_type;
242 ops->hash_len = hash_len; 245 ops->hash_len = hash_len;
243 ops->context_size = ctx_size; 246 ops->context_size = ctx_size;
244 ops->init = init; 247 ops->init = init;
245 ops->update = update; 248 ops->update = update;
246 ops->final = final; 249 ops->final = final;
247 250

cvs diff -r1.27 -r1.28 src/sys/kern/subr_asan.c (expand / switch to unified diff)

--- src/sys/kern/subr_asan.c 2020/12/18 15:33:34 1.27
+++ src/sys/kern/subr_asan.c 2023/04/09 09:18:09 1.28
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: subr_asan.c,v 1.27 2020/12/18 15:33:34 martin Exp $ */ 1/* $NetBSD: subr_asan.c,v 1.28 2023/04/09 09:18:09 riastradh Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net 4 * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is part of the KASAN subsystem of the NetBSD kernel. 7 * This code is part of the KASAN subsystem of the NetBSD kernel.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -19,27 +19,27 @@ @@ -19,27 +19,27 @@
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE. 28 * SUCH DAMAGE.
29 */ 29 */
30 30
31#include <sys/cdefs.h> 31#include <sys/cdefs.h>
32__KERNEL_RCSID(0, "$NetBSD: subr_asan.c,v 1.27 2020/12/18 15:33:34 martin Exp $"); 32__KERNEL_RCSID(0, "$NetBSD: subr_asan.c,v 1.28 2023/04/09 09:18:09 riastradh Exp $");
33 33
34#include <sys/param.h> 34#include <sys/param.h>
35#include <sys/device.h> 35#include <sys/device.h>
36#include <sys/kernel.h> 36#include <sys/kernel.h>
37#include <sys/param.h> 37#include <sys/param.h>
38#include <sys/conf.h> 38#include <sys/conf.h>
39#include <sys/systm.h> 39#include <sys/systm.h>
40#include <sys/types.h> 40#include <sys/types.h>
41#include <sys/asan.h> 41#include <sys/asan.h>
42 42
43#include <uvm/uvm_extern.h> 43#include <uvm/uvm_extern.h>
44 44
45#ifdef DDB 45#ifdef DDB
@@ -105,27 +105,28 @@ kasan_shadow_map(void *addr, size_t size @@ -105,27 +105,28 @@ kasan_shadow_map(void *addr, size_t size
105 105
106 KASSERT((vaddr_t)addr % KASAN_SHADOW_SCALE_SIZE == 0); 106 KASSERT((vaddr_t)addr % KASAN_SHADOW_SCALE_SIZE == 0);
107 107
108 sz = roundup(size, KASAN_SHADOW_SCALE_SIZE) / KASAN_SHADOW_SCALE_SIZE; 108 sz = roundup(size, KASAN_SHADOW_SCALE_SIZE) / KASAN_SHADOW_SCALE_SIZE;
109 109
110 sva = (vaddr_t)kasan_md_addr_to_shad(addr); 110 sva = (vaddr_t)kasan_md_addr_to_shad(addr);
111 eva = (vaddr_t)kasan_md_addr_to_shad(addr) + sz; 111 eva = (vaddr_t)kasan_md_addr_to_shad(addr) + sz;
112 112
113 sva = rounddown(sva, PAGE_SIZE); 113 sva = rounddown(sva, PAGE_SIZE);
114 eva = roundup(eva, PAGE_SIZE); 114 eva = roundup(eva, PAGE_SIZE);
115 115
116 npages = (eva - sva) / PAGE_SIZE; 116 npages = (eva - sva) / PAGE_SIZE;
117 117
118 KASSERT(sva >= KASAN_MD_SHADOW_START && eva < KASAN_MD_SHADOW_END); 118 KASSERT(sva >= KASAN_MD_SHADOW_START);
 119 KASSERT(eva < KASAN_MD_SHADOW_END);
119 120
120 for (i = 0; i < npages; i++) { 121 for (i = 0; i < npages; i++) {
121 kasan_md_shadow_map_page(sva + i * PAGE_SIZE); 122 kasan_md_shadow_map_page(sva + i * PAGE_SIZE);
122 } 123 }
123} 124}
124 125
125static void 126static void
126kasan_ctors(void) 127kasan_ctors(void)
127{ 128{
128 extern Elf_Addr __CTOR_LIST__, __CTOR_END__; 129 extern Elf_Addr __CTOR_LIST__, __CTOR_END__;
129 size_t nentries, i; 130 size_t nentries, i;
130 Elf_Addr *ptr; 131 Elf_Addr *ptr;
131 132

cvs diff -r1.27 -r1.28 src/sys/kern/subr_pcu.c (expand / switch to unified diff)

--- src/sys/kern/subr_pcu.c 2022/10/26 23:38:57 1.27
+++ src/sys/kern/subr_pcu.c 2023/04/09 09:18:09 1.28
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: subr_pcu.c,v 1.27 2022/10/26 23:38:57 riastradh Exp $ */ 1/* $NetBSD: subr_pcu.c,v 1.28 2023/04/09 09:18:09 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2011, 2014 The NetBSD Foundation, Inc. 4 * Copyright (c) 2011, 2014 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Mindaugas Rasiukevicius. 8 * by Mindaugas Rasiukevicius.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -42,27 +42,27 @@ @@ -42,27 +42,27 @@
42 * for a PCU release can be from a) the owner LWP (regardless whether 42 * for a PCU release can be from a) the owner LWP (regardless whether
43 * the PCU state is on the current CPU or remote CPU) b) any other LWP 43 * the PCU state is on the current CPU or remote CPU) b) any other LWP
44 * running on that CPU (in such case, the owner LWP is on a remote CPU 44 * running on that CPU (in such case, the owner LWP is on a remote CPU
45 * or sleeping). 45 * or sleeping).
46 * 46 *
47 * In any case, the PCU state can *only* be changed from the current 47 * In any case, the PCU state can *only* be changed from the current
48 * CPU. If said PCU state is on the remote CPU, a cross-call will be 48 * CPU. If said PCU state is on the remote CPU, a cross-call will be
49 * sent by the owner LWP. Therefore struct cpu_info::ci_pcu_curlwp[id] 49 * sent by the owner LWP. Therefore struct cpu_info::ci_pcu_curlwp[id]
50 * may only be changed by the current CPU and lwp_t::l_pcu_cpu[id] may 50 * may only be changed by the current CPU and lwp_t::l_pcu_cpu[id] may
51 * only be cleared by the CPU which has the PCU state loaded. 51 * only be cleared by the CPU which has the PCU state loaded.
52 */ 52 */
53 53
54#include <sys/cdefs.h> 54#include <sys/cdefs.h>
55__KERNEL_RCSID(0, "$NetBSD: subr_pcu.c,v 1.27 2022/10/26 23:38:57 riastradh Exp $"); 55__KERNEL_RCSID(0, "$NetBSD: subr_pcu.c,v 1.28 2023/04/09 09:18:09 riastradh Exp $");
56 56
57#include <sys/param.h> 57#include <sys/param.h>
58#include <sys/cpu.h> 58#include <sys/cpu.h>
59#include <sys/lwp.h> 59#include <sys/lwp.h>
60#include <sys/pcu.h> 60#include <sys/pcu.h>
61#include <sys/ipi.h> 61#include <sys/ipi.h>
62 62
63#if PCU_UNIT_COUNT > 0 63#if PCU_UNIT_COUNT > 0
64 64
65static inline void pcu_do_op(const pcu_ops_t *, lwp_t * const, const int); 65static inline void pcu_do_op(const pcu_ops_t *, lwp_t * const, const int);
66static void pcu_lwp_op(const pcu_ops_t *, lwp_t *, const int); 66static void pcu_lwp_op(const pcu_ops_t *, lwp_t *, const int);
67 67
68/* 68/*
@@ -298,27 +298,28 @@ pcu_lwp_op(const pcu_ops_t *pcu, lwp_t * @@ -298,27 +298,28 @@ pcu_lwp_op(const pcu_ops_t *pcu, lwp_t *
298} 298}
299 299
300/* 300/*
301 * pcu_load: load/initialize the PCU state of current LWP on current CPU. 301 * pcu_load: load/initialize the PCU state of current LWP on current CPU.
302 */ 302 */
303void 303void
304pcu_load(const pcu_ops_t *pcu) 304pcu_load(const pcu_ops_t *pcu)
305{ 305{
306 lwp_t *oncpu_lwp, * const l = curlwp; 306 lwp_t *oncpu_lwp, * const l = curlwp;
307 const u_int id = pcu->pcu_id; 307 const u_int id = pcu->pcu_id;
308 struct cpu_info *ci, *curci; 308 struct cpu_info *ci, *curci;
309 int s; 309 int s;
310 310
311 KASSERT(!cpu_intr_p() && !cpu_softintr_p()); 311 KASSERT(!cpu_intr_p());
 312 KASSERT(!cpu_softintr_p());
312 313
313 s = splpcu(); 314 s = splpcu();
314 curci = curcpu(); 315 curci = curcpu();
315 ci = l->l_pcu_cpu[id]; 316 ci = l->l_pcu_cpu[id];
316 317
317 /* Does this CPU already have our PCU state loaded? */ 318 /* Does this CPU already have our PCU state loaded? */
318 if (ci == curci) { 319 if (ci == curci) {
319 /* 320 /*
320 * Fault reoccurred while the PCU state is loaded and 321 * Fault reoccurred while the PCU state is loaded and
321 * therefore PCU should be re‐enabled. This happens 322 * therefore PCU should be re‐enabled. This happens
322 * if LWP is context switched to another CPU and then 323 * if LWP is context switched to another CPU and then
323 * switched back to the original CPU while the state 324 * switched back to the original CPU while the state
324 * on that CPU has not been changed by other LWPs. 325 * on that CPU has not been changed by other LWPs.
@@ -368,48 +369,50 @@ pcu_load(const pcu_ops_t *pcu) @@ -368,48 +369,50 @@ pcu_load(const pcu_ops_t *pcu)
368 l->l_pcu_valid |= (1U << id); 369 l->l_pcu_valid |= (1U << id);
369 splx(s); 370 splx(s);
370} 371}
371 372
372/* 373/*
373 * pcu_discard: discard the PCU state of the given LWP. If "valid" 374 * pcu_discard: discard the PCU state of the given LWP. If "valid"
374 * parameter is true, then keep considering the PCU state as valid. 375 * parameter is true, then keep considering the PCU state as valid.
375 */ 376 */
376void 377void
377pcu_discard(const pcu_ops_t *pcu, lwp_t *l, bool valid) 378pcu_discard(const pcu_ops_t *pcu, lwp_t *l, bool valid)
378{ 379{
379 const u_int id = pcu->pcu_id; 380 const u_int id = pcu->pcu_id;
380 381
381 KASSERT(!cpu_intr_p() && !cpu_softintr_p()); 382 KASSERT(!cpu_intr_p());
 383 KASSERT(!cpu_softintr_p());
382 384
383 if (__predict_false(valid)) { 385 if (__predict_false(valid)) {
384 l->l_pcu_valid |= (1U << id); 386 l->l_pcu_valid |= (1U << id);
385 } else { 387 } else {
386 l->l_pcu_valid &= ~(1U << id); 388 l->l_pcu_valid &= ~(1U << id);
387 } 389 }
388 if (__predict_true(l->l_pcu_cpu[id] == NULL)) { 390 if (__predict_true(l->l_pcu_cpu[id] == NULL)) {
389 return; 391 return;
390 } 392 }
391 pcu_lwp_op(pcu, l, PCU_CMD_RELEASE); 393 pcu_lwp_op(pcu, l, PCU_CMD_RELEASE);
392} 394}
393 395
394/* 396/*
395 * pcu_save_lwp: save PCU state to the given LWP. 397 * pcu_save_lwp: save PCU state to the given LWP.
396 */ 398 */
397void 399void
398pcu_save(const pcu_ops_t *pcu, lwp_t *l) 400pcu_save(const pcu_ops_t *pcu, lwp_t *l)
399{ 401{
400 const u_int id = pcu->pcu_id; 402 const u_int id = pcu->pcu_id;
401 403
402 KASSERT(!cpu_intr_p() && !cpu_softintr_p()); 404 KASSERT(!cpu_intr_p());
 405 KASSERT(!cpu_softintr_p());
403 406
404 if (__predict_true(l->l_pcu_cpu[id] == NULL)) { 407 if (__predict_true(l->l_pcu_cpu[id] == NULL)) {
405 return; 408 return;
406 } 409 }
407 pcu_lwp_op(pcu, l, PCU_CMD_SAVE | PCU_CMD_RELEASE); 410 pcu_lwp_op(pcu, l, PCU_CMD_SAVE | PCU_CMD_RELEASE);
408} 411}
409 412
410/* 413/*
411 * pcu_save_all_on_cpu: save all PCU states on the current CPU. 414 * pcu_save_all_on_cpu: save all PCU states on the current CPU.
412 */ 415 */
413void 416void
414pcu_save_all_on_cpu(void) 417pcu_save_all_on_cpu(void)
415{ 418{

cvs diff -r1.9 -r1.10 src/sys/kern/subr_cpufreq.c (expand / switch to unified diff)

--- src/sys/kern/subr_cpufreq.c 2014/02/12 20:20:15 1.9
+++ src/sys/kern/subr_cpufreq.c 2023/04/09 09:18:09 1.10
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: subr_cpufreq.c,v 1.9 2014/02/12 20:20:15 martin Exp $ */ 1/* $NetBSD: subr_cpufreq.c,v 1.10 2023/04/09 09:18:09 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2011 The NetBSD Foundation, Inc. 4 * Copyright (c) 2011 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jukka Ruohonen. 8 * by Jukka Ruohonen.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 13 *
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -20,27 +20,27 @@ @@ -20,27 +20,27 @@
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE. 30 * POSSIBILITY OF SUCH DAMAGE.
31 */ 31 */
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: subr_cpufreq.c,v 1.9 2014/02/12 20:20:15 martin Exp $"); 33__KERNEL_RCSID(0, "$NetBSD: subr_cpufreq.c,v 1.10 2023/04/09 09:18:09 riastradh Exp $");
34 34
35#include <sys/param.h> 35#include <sys/param.h>
36#include <sys/cpu.h> 36#include <sys/cpu.h>
37#include <sys/cpufreq.h> 37#include <sys/cpufreq.h>
38#include <sys/kernel.h> 38#include <sys/kernel.h>
39#include <sys/kmem.h> 39#include <sys/kmem.h>
40#include <sys/mutex.h> 40#include <sys/mutex.h>
41#include <sys/time.h> 41#include <sys/time.h>
42#include <sys/xcall.h> 42#include <sys/xcall.h>
43 43
44static int cpufreq_latency(void); 44static int cpufreq_latency(void);
45static uint32_t cpufreq_get_max(void); 45static uint32_t cpufreq_get_max(void);
46static uint32_t cpufreq_get_min(void); 46static uint32_t cpufreq_get_min(void);
@@ -368,27 +368,28 @@ cpufreq_get_state_index(uint32_t index,  @@ -368,27 +368,28 @@ cpufreq_get_state_index(uint32_t index,
368 memcpy(cfs, &cf->cf_state[index], sizeof(*cfs)); 368 memcpy(cfs, &cf->cf_state[index], sizeof(*cfs));
369 mutex_exit(&cpufreq_lock); 369 mutex_exit(&cpufreq_lock);
370 370
371 return 0; 371 return 0;
372} 372}
373 373
374static void 374static void
375cpufreq_get_state_raw(uint32_t freq, struct cpufreq_state *cfs) 375cpufreq_get_state_raw(uint32_t freq, struct cpufreq_state *cfs)
376{ 376{
377 struct cpufreq *cf = cf_backend; 377 struct cpufreq *cf = cf_backend;
378 uint32_t f, hi, i = 0, lo = 0; 378 uint32_t f, hi, i = 0, lo = 0;
379 379
380 KASSERT(mutex_owned(&cpufreq_lock) != 0); 380 KASSERT(mutex_owned(&cpufreq_lock) != 0);
381 KASSERT(cf->cf_init != false && cfs != NULL); 381 KASSERT(cf->cf_init != false);
 382 KASSERT(cfs != NULL);
382 383
383 hi = cf->cf_state_count; 384 hi = cf->cf_state_count;
384 385
385 while (lo < hi) { 386 while (lo < hi) {
386 387
387 i = (lo + hi) >> 1; 388 i = (lo + hi) >> 1;
388 f = cf->cf_state[i].cfs_freq; 389 f = cf->cf_state[i].cfs_freq;
389 390
390 if (freq == f) 391 if (freq == f)
391 break; 392 break;
392 else if (freq > f) 393 else if (freq > f)
393 hi = i; 394 hi = i;
394 else { 395 else {

cvs diff -r1.14 -r1.15 src/sys/kern/subr_kcpuset.c (expand / switch to unified diff)

--- src/sys/kern/subr_kcpuset.c 2022/04/09 23:38:33 1.14
+++ src/sys/kern/subr_kcpuset.c 2023/04/09 09:18:09 1.15
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: subr_kcpuset.c,v 1.14 2022/04/09 23:38:33 riastradh Exp $ */ 1/* $NetBSD: subr_kcpuset.c,v 1.15 2023/04/09 09:18:09 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2011 The NetBSD Foundation, Inc. 4 * Copyright (c) 2011 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Mindaugas Rasiukevicius. 8 * by Mindaugas Rasiukevicius.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -31,27 +31,27 @@ @@ -31,27 +31,27 @@
31 31
32/* 32/*
33 * Kernel CPU set implementation. 33 * Kernel CPU set implementation.
34 * 34 *
35 * Interface can be used by kernel subsystems as a unified dynamic CPU 35 * Interface can be used by kernel subsystems as a unified dynamic CPU
36 * bitset implementation handling many CPUs. Facility also supports early 36 * bitset implementation handling many CPUs. Facility also supports early
37 * use by MD code on boot, as it fixups bitsets on further boot. 37 * use by MD code on boot, as it fixups bitsets on further boot.
38 * 38 *
39 * TODO: 39 * TODO:
40 * - Handle "reverse" bitset on fixup/grow. 40 * - Handle "reverse" bitset on fixup/grow.
41 */ 41 */
42 42
43#include <sys/cdefs.h> 43#include <sys/cdefs.h>
44__KERNEL_RCSID(0, "$NetBSD: subr_kcpuset.c,v 1.14 2022/04/09 23:38:33 riastradh Exp $"); 44__KERNEL_RCSID(0, "$NetBSD: subr_kcpuset.c,v 1.15 2023/04/09 09:18:09 riastradh Exp $");
45 45
46#include <sys/param.h> 46#include <sys/param.h>
47#include <sys/types.h> 47#include <sys/types.h>
48 48
49#include <sys/atomic.h> 49#include <sys/atomic.h>
50#include <sys/sched.h> 50#include <sys/sched.h>
51#include <sys/kcpuset.h> 51#include <sys/kcpuset.h>
52#include <sys/pool.h> 52#include <sys/pool.h>
53 53
54/* Number of CPUs to support. */ 54/* Number of CPUs to support. */
55#define KC_MAXCPUS roundup2(MAXCPUS, 32) 55#define KC_MAXCPUS roundup2(MAXCPUS, 32)
56 56
57/* 57/*
@@ -104,27 +104,28 @@ static kcpuset_t * kcpuset_create_raw(b @@ -104,27 +104,28 @@ static kcpuset_t * kcpuset_create_raw(b
104/* 104/*
105 * kcpuset_sysinit: initialize the subsystem, transfer early boot cases 105 * kcpuset_sysinit: initialize the subsystem, transfer early boot cases
106 * to dynamically allocated sets. 106 * to dynamically allocated sets.
107 */ 107 */
108void 108void
109kcpuset_sysinit(void) 109kcpuset_sysinit(void)
110{ 110{
111 kcpuset_t *kc_dynamic[KC_SAVE_NITEMS], *kcp; 111 kcpuset_t *kc_dynamic[KC_SAVE_NITEMS], *kcp;
112 int i, s; 112 int i, s;
113 113
114 /* Set a kcpuset_t sizes. */ 114 /* Set a kcpuset_t sizes. */
115 kc_nfields = (KC_MAXCPUS >> KC_SHIFT); 115 kc_nfields = (KC_MAXCPUS >> KC_SHIFT);
116 kc_bitsize = sizeof(uint32_t) * kc_nfields; 116 kc_bitsize = sizeof(uint32_t) * kc_nfields;
117 KASSERT(kc_nfields != 0 && kc_bitsize != 0); 117 KASSERT(kc_nfields != 0);
 118 KASSERT(kc_bitsize != 0);
118 119
119 kc_cache = pool_cache_init(sizeof(kcpuset_impl_t) + kc_bitsize, 120 kc_cache = pool_cache_init(sizeof(kcpuset_impl_t) + kc_bitsize,
120 coherency_unit, 0, 0, "kcpuset", NULL, IPL_NONE, NULL, NULL, NULL); 121 coherency_unit, 0, 0, "kcpuset", NULL, IPL_NONE, NULL, NULL, NULL);
121 122
122 /* First, pre-allocate kcpuset entries. */ 123 /* First, pre-allocate kcpuset entries. */
123 for (i = 0; i < kc_last_idx; i++) { 124 for (i = 0; i < kc_last_idx; i++) {
124 kcp = kcpuset_create_raw(true); 125 kcp = kcpuset_create_raw(true);
125 kc_dynamic[i] = kcp; 126 kc_dynamic[i] = kcp;
126 } 127 }
127 128
128 /* 129 /*
129 * Prepare to convert all early noted kcpuset uses to dynamic sets. 130 * Prepare to convert all early noted kcpuset uses to dynamic sets.
130 * All processors, except the one we are currently running (primary), 131 * All processors, except the one we are currently running (primary),

cvs diff -r1.287 -r1.288 src/sys/kern/subr_pool.c (expand / switch to unified diff)

--- src/sys/kern/subr_pool.c 2023/02/24 11:02:27 1.287
+++ src/sys/kern/subr_pool.c 2023/04/09 09:18:09 1.288
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: subr_pool.c,v 1.287 2023/02/24 11:02:27 riastradh Exp $ */ 1/* $NetBSD: subr_pool.c,v 1.288 2023/04/09 09:18:09 riastradh Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015, 2018, 4 * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010, 2014, 2015, 2018,
5 * 2020, 2021 The NetBSD Foundation, Inc. 5 * 2020, 2021 The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace 9 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center; by Andrew Doran, and by 10 * Simulation Facility, NASA Ames Research Center; by Andrew Doran, and by
11 * Maxime Villard. 11 * Maxime Villard.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions 14 * modification, are permitted provided that the following conditions
@@ -23,27 +23,27 @@ @@ -23,27 +23,27 @@
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE. 32 * POSSIBILITY OF SUCH DAMAGE.
33 */ 33 */
34 34
35#include <sys/cdefs.h> 35#include <sys/cdefs.h>
36__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.287 2023/02/24 11:02:27 riastradh Exp $"); 36__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.288 2023/04/09 09:18:09 riastradh Exp $");
37 37
38#ifdef _KERNEL_OPT 38#ifdef _KERNEL_OPT
39#include "opt_ddb.h" 39#include "opt_ddb.h"
40#include "opt_lockdebug.h" 40#include "opt_lockdebug.h"
41#include "opt_pool.h" 41#include "opt_pool.h"
42#endif 42#endif
43 43
44#include <sys/param.h> 44#include <sys/param.h>
45#include <sys/systm.h> 45#include <sys/systm.h>
46#include <sys/sysctl.h> 46#include <sys/sysctl.h>
47#include <sys/bitops.h> 47#include <sys/bitops.h>
48#include <sys/proc.h> 48#include <sys/proc.h>
49#include <sys/errno.h> 49#include <sys/errno.h>
@@ -1643,27 +1643,28 @@ pool_nput(struct pool *pp) @@ -1643,27 +1643,28 @@ pool_nput(struct pool *pp)
1643 * 1643 *
1644 * Must not be called from interrupt context. 1644 * Must not be called from interrupt context.
1645 */ 1645 */
1646int 1646int
1647pool_reclaim(struct pool *pp) 1647pool_reclaim(struct pool *pp)
1648{ 1648{
1649 struct pool_item_header *ph, *phnext; 1649 struct pool_item_header *ph, *phnext;
1650 struct pool_pagelist pq; 1650 struct pool_pagelist pq;
1651 struct pool_cache *pc; 1651 struct pool_cache *pc;
1652 uint32_t curtime; 1652 uint32_t curtime;
1653 bool klock; 1653 bool klock;
1654 int rv; 1654 int rv;
1655 1655
1656 KASSERT(!cpu_intr_p() && !cpu_softintr_p()); 1656 KASSERT(!cpu_intr_p());
 1657 KASSERT(!cpu_softintr_p());
1657 1658
1658 if (pp->pr_drain_hook != NULL) { 1659 if (pp->pr_drain_hook != NULL) {
1659 /* 1660 /*
1660 * The drain hook must be called with the pool unlocked. 1661 * The drain hook must be called with the pool unlocked.
1661 */ 1662 */
1662 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT); 1663 (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1663 } 1664 }
1664 1665
1665 /* 1666 /*
1666 * XXXSMP Because we do not want to cause non-MPSAFE code 1667 * XXXSMP Because we do not want to cause non-MPSAFE code
1667 * to block. 1668 * to block.
1668 */ 1669 */
1669 if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK || 1670 if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK ||
@@ -2397,27 +2398,28 @@ pool_cache_invalidate_groups(pool_cache_ @@ -2397,27 +2398,28 @@ pool_cache_invalidate_groups(pool_cache_
2397 * is an assumption that another level of synchronization is occurring 2398 * is an assumption that another level of synchronization is occurring
2398 * between the input to the constructor and the cache invalidation. 2399 * between the input to the constructor and the cache invalidation.
2399 * 2400 *
2400 * Invalidation is a costly process and should not be called from 2401 * Invalidation is a costly process and should not be called from
2401 * interrupt context. 2402 * interrupt context.
2402 */ 2403 */
2403void 2404void
2404pool_cache_invalidate(pool_cache_t pc) 2405pool_cache_invalidate(pool_cache_t pc)
2405{ 2406{
2406 uint64_t where; 2407 uint64_t where;
2407 pcg_t *pcg; 2408 pcg_t *pcg;
2408 int n, s; 2409 int n, s;
2409 2410
2410 KASSERT(!cpu_intr_p() && !cpu_softintr_p()); 2411 KASSERT(!cpu_intr_p());
 2412 KASSERT(!cpu_softintr_p());
2411 2413
2412 if (ncpu < 2 || !mp_online) { 2414 if (ncpu < 2 || !mp_online) {
2413 /* 2415 /*
2414 * We might be called early enough in the boot process 2416 * We might be called early enough in the boot process
2415 * for the CPU data structures to not be fully initialized. 2417 * for the CPU data structures to not be fully initialized.
2416 * In this case, transfer the content of the local CPU's 2418 * In this case, transfer the content of the local CPU's
2417 * cache back into global cache as only this CPU is currently 2419 * cache back into global cache as only this CPU is currently
2418 * running. 2420 * running.
2419 */ 2421 */
2420 pool_cache_transfer(pc); 2422 pool_cache_transfer(pc);
2421 } else { 2423 } else {
2422 /* 2424 /*
2423 * Signal all CPUs that they must transfer their local 2425 * Signal all CPUs that they must transfer their local

cvs diff -r1.198 -r1.199 src/sys/kern/subr_prf.c (expand / switch to unified diff)

--- src/sys/kern/subr_prf.c 2023/02/07 09:25:51 1.198
+++ src/sys/kern/subr_prf.c 2023/04/09 09:18:09 1.199
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: subr_prf.c,v 1.198 2023/02/07 09:25:51 macallan Exp $ */ 1/* $NetBSD: subr_prf.c,v 1.199 2023/04/09 09:18:09 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1986, 1988, 1991, 1993 4 * Copyright (c) 1986, 1988, 1991, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc. 6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed 7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph 8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc. 10 * the permission of UNIX System Laboratories, Inc.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
@@ -27,27 +27,27 @@ @@ -27,27 +27,27 @@
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE. 34 * SUCH DAMAGE.
35 * 35 *
36 * @(#)subr_prf.c 8.4 (Berkeley) 5/4/95 36 * @(#)subr_prf.c 8.4 (Berkeley) 5/4/95
37 */ 37 */
38 38
39#include <sys/cdefs.h> 39#include <sys/cdefs.h>
40__KERNEL_RCSID(0, "$NetBSD: subr_prf.c,v 1.198 2023/02/07 09:25:51 macallan Exp $"); 40__KERNEL_RCSID(0, "$NetBSD: subr_prf.c,v 1.199 2023/04/09 09:18:09 riastradh Exp $");
41 41
42#ifdef _KERNEL_OPT 42#ifdef _KERNEL_OPT
43#include "opt_ddb.h" 43#include "opt_ddb.h"
44#include "opt_kgdb.h" 44#include "opt_kgdb.h"
45#include "opt_dump.h" 45#include "opt_dump.h"
46#include "opt_rnd_printf.h" 46#include "opt_rnd_printf.h"
47#endif 47#endif
48 48
49#include <sys/param.h> 49#include <sys/param.h>
50#include <sys/stdint.h> 50#include <sys/stdint.h>
51#include <sys/systm.h> 51#include <sys/systm.h>
52#include <sys/buf.h> 52#include <sys/buf.h>
53#include <sys/device.h> 53#include <sys/device.h>
@@ -136,27 +136,28 @@ const char HEXDIGITS[] = "0123456789ABCD @@ -136,27 +136,28 @@ const char HEXDIGITS[] = "0123456789ABCD
136/* 136/*
137 * functions 137 * functions
138 */ 138 */
139 139
140/* 140/*
141 * Locking is inited fairly early in MI bootstrap. Before that 141 * Locking is inited fairly early in MI bootstrap. Before that
142 * prints are done unlocked. But that doesn't really matter, 142 * prints are done unlocked. But that doesn't really matter,
143 * since nothing can preempt us before interrupts are enabled. 143 * since nothing can preempt us before interrupts are enabled.
144 */ 144 */
145void 145void
146kprintf_init(void) 146kprintf_init(void)
147{ 147{
148 148
149 KASSERT(!kprintf_inited && cold); /* not foolproof, but ... */ 149 KASSERT(!kprintf_inited); /* not foolproof, but ... */
 150 KASSERT(cold);
150 mutex_init(&kprintf_mtx, MUTEX_DEFAULT, IPL_HIGH); 151 mutex_init(&kprintf_mtx, MUTEX_DEFAULT, IPL_HIGH);
151#ifdef RND_PRINTF 152#ifdef RND_PRINTF
152 rnd_attach_source(&rnd_printf_source, "printf", RND_TYPE_UNKNOWN, 153 rnd_attach_source(&rnd_printf_source, "printf", RND_TYPE_UNKNOWN,
153 RND_FLAG_COLLECT_TIME|RND_FLAG_COLLECT_VALUE); 154 RND_FLAG_COLLECT_TIME|RND_FLAG_COLLECT_VALUE);
154#endif 155#endif
155 kprintf_inited = true; 156 kprintf_inited = true;
156} 157}
157 158
158void 159void
159kprintf_lock(void) 160kprintf_lock(void)
160{ 161{
161 162
162 if (__predict_true(kprintf_inited)) 163 if (__predict_true(kprintf_inited))

cvs diff -r1.35 -r1.36 src/sys/kern/subr_time.c (expand / switch to unified diff)

--- src/sys/kern/subr_time.c 2022/06/28 02:04:51 1.35
+++ src/sys/kern/subr_time.c 2023/04/09 09:18:09 1.36
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: subr_time.c,v 1.35 2022/06/28 02:04:51 riastradh Exp $ */ 1/* $NetBSD: subr_time.c,v 1.36 2023/04/09 09:18:09 riastradh Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1982, 1986, 1989, 1993 4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -23,27 +23,27 @@ @@ -23,27 +23,27 @@
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE. 29 * SUCH DAMAGE.
30 * 30 *
31 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 31 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
32 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95 32 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95
33 */ 33 */
34 34
35#include <sys/cdefs.h> 35#include <sys/cdefs.h>
36__KERNEL_RCSID(0, "$NetBSD: subr_time.c,v 1.35 2022/06/28 02:04:51 riastradh Exp $"); 36__KERNEL_RCSID(0, "$NetBSD: subr_time.c,v 1.36 2023/04/09 09:18:09 riastradh Exp $");
37 37
38#include <sys/param.h> 38#include <sys/param.h>
39#include <sys/kernel.h> 39#include <sys/kernel.h>
40#include <sys/proc.h> 40#include <sys/proc.h>
41#include <sys/kauth.h> 41#include <sys/kauth.h>
42#include <sys/lwp.h> 42#include <sys/lwp.h>
43#include <sys/timex.h> 43#include <sys/timex.h>
44#include <sys/time.h> 44#include <sys/time.h>
45#include <sys/timetc.h> 45#include <sys/timetc.h>
46#include <sys/intr.h> 46#include <sys/intr.h>
47 47
48#ifdef DEBUG_STICKS 48#ifdef DEBUG_STICKS
49#define DPRINTF(a) uprintf a 49#define DPRINTF(a) uprintf a
@@ -88,27 +88,28 @@ tvtohz(const struct timeval *tv) @@ -88,27 +88,28 @@ tvtohz(const struct timeval *tv)
88 * ticks separately and add, using similar rounding methods and 88 * ticks separately and add, using similar rounding methods and
89 * overflow avoidance. This method would work in the previous 89 * overflow avoidance. This method would work in the previous
90 * case, but it is slightly slower and assumes that hz is integral. 90 * case, but it is slightly slower and assumes that hz is integral.
91 * 91 *
92 * Otherwise, round the time difference down to the maximum 92 * Otherwise, round the time difference down to the maximum
93 * representable value. 93 * representable value.
94 * 94 *
95 * If ints are 32-bit, then the maximum value for any timeout in 95 * If ints are 32-bit, then the maximum value for any timeout in
96 * 10ms ticks is 248 days. 96 * 10ms ticks is 248 days.
97 */ 97 */
98 sec = tv->tv_sec; 98 sec = tv->tv_sec;
99 usec = tv->tv_usec; 99 usec = tv->tv_usec;
100 100
101 KASSERT(usec >= 0 && usec < 1000000); 101 KASSERT(usec >= 0);
 102 KASSERT(usec < 1000000);
102 103
103 /* catch overflows in conversion time_t->int */ 104 /* catch overflows in conversion time_t->int */
104 if (tv->tv_sec > INT_MAX) 105 if (tv->tv_sec > INT_MAX)
105 return INT_MAX; 106 return INT_MAX;
106 if (tv->tv_sec < 0) 107 if (tv->tv_sec < 0)
107 return 0; 108 return 0;
108 109
109 if (sec < 0 || (sec == 0 && usec == 0)) { 110 if (sec < 0 || (sec == 0 && usec == 0)) {
110 /* 111 /*
111 * Would expire now or in the past. Return 0 ticks. 112 * Would expire now or in the past. Return 0 ticks.
112 * This is different from the legacy tvhzto() interface, 113 * This is different from the legacy tvhzto() interface,
113 * and callers need to check for it. 114 * and callers need to check for it.
114 */ 115 */

cvs diff -r1.108 -r1.109 src/sys/kern/subr_vmem.c (expand / switch to unified diff)

--- src/sys/kern/subr_vmem.c 2022/05/31 08:43:16 1.108
+++ src/sys/kern/subr_vmem.c 2023/04/09 09:18:09 1.109
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: subr_vmem.c,v 1.108 2022/05/31 08:43:16 andvar Exp $ */ 1/* $NetBSD: subr_vmem.c,v 1.109 2023/04/09 09:18:09 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi, 4 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -36,27 +36,27 @@ @@ -36,27 +36,27 @@
36 * - A pool(9) is used for vmem boundary tags 36 * - A pool(9) is used for vmem boundary tags
37 * - During a pool get call the global vmem_btag_refill_lock is taken, 37 * - During a pool get call the global vmem_btag_refill_lock is taken,
38 * to serialize access to the allocation reserve, but no other 38 * to serialize access to the allocation reserve, but no other
39 * vmem arena locks. 39 * vmem arena locks.
40 * - During pool_put calls no vmem mutexes are locked. 40 * - During pool_put calls no vmem mutexes are locked.
41 * - pool_drain doesn't hold the pool's mutex while releasing memory to 41 * - pool_drain doesn't hold the pool's mutex while releasing memory to
42 * its backing therefore no interference with any vmem mutexes. 42 * its backing therefore no interference with any vmem mutexes.
43 * - The boundary tag pool is forced to put page headers into pool pages 43 * - The boundary tag pool is forced to put page headers into pool pages
44 * (PR_PHINPAGE) and not off page to avoid pool recursion. 44 * (PR_PHINPAGE) and not off page to avoid pool recursion.
45 * (due to sizeof(bt_t) it should be the case anyway) 45 * (due to sizeof(bt_t) it should be the case anyway)
46 */ 46 */
47 47
48#include <sys/cdefs.h> 48#include <sys/cdefs.h>
49__KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.108 2022/05/31 08:43:16 andvar Exp $"); 49__KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.109 2023/04/09 09:18:09 riastradh Exp $");
50 50
51#if defined(_KERNEL) && defined(_KERNEL_OPT) 51#if defined(_KERNEL) && defined(_KERNEL_OPT)
52#include "opt_ddb.h" 52#include "opt_ddb.h"
53#endif /* defined(_KERNEL) && defined(_KERNEL_OPT) */ 53#endif /* defined(_KERNEL) && defined(_KERNEL_OPT) */
54 54
55#include <sys/param.h> 55#include <sys/param.h>
56#include <sys/hash.h> 56#include <sys/hash.h>
57#include <sys/queue.h> 57#include <sys/queue.h>
58#include <sys/bitops.h> 58#include <sys/bitops.h>
59 59
60#if defined(_KERNEL) 60#if defined(_KERNEL)
61#include <sys/systm.h> 61#include <sys/systm.h>
62#include <sys/kernel.h> /* hz */ 62#include <sys/kernel.h> /* hz */
@@ -381,50 +381,52 @@ bt_freetrim(vmem_t *vm, int freelimit) @@ -381,50 +381,52 @@ bt_freetrim(vmem_t *vm, int freelimit)
381 * freelist[2] ... [4, 7] 381 * freelist[2] ... [4, 7]
382 * freelist[3] ... [8, 15] 382 * freelist[3] ... [8, 15]
383 * : 383 * :
384 * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1] 384 * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1]
385 * : 385 * :
386 */ 386 */
387 387
388static struct vmem_freelist * 388static struct vmem_freelist *
389bt_freehead_tofree(vmem_t *vm, vmem_size_t size) 389bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
390{ 390{
391 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 391 const vmem_size_t qsize = size >> vm->vm_quantum_shift;
392 const int idx = SIZE2ORDER(qsize); 392 const int idx = SIZE2ORDER(qsize);
393 393
394 KASSERT(size != 0 && qsize != 0); 394 KASSERT(size != 0);
 395 KASSERT(qsize != 0);
395 KASSERT((size & vm->vm_quantum_mask) == 0); 396 KASSERT((size & vm->vm_quantum_mask) == 0);
396 KASSERT(idx >= 0); 397 KASSERT(idx >= 0);
397 KASSERT(idx < VMEM_MAXORDER); 398 KASSERT(idx < VMEM_MAXORDER);
398 399
399 return &vm->vm_freelist[idx]; 400 return &vm->vm_freelist[idx];
400} 401}
401 402
402/* 403/*
403 * bt_freehead_toalloc: return the freelist for the given size and allocation 404 * bt_freehead_toalloc: return the freelist for the given size and allocation
404 * strategy. 405 * strategy.
405 * 406 *
406 * for VM_INSTANTFIT, return the list in which any blocks are large enough 407 * for VM_INSTANTFIT, return the list in which any blocks are large enough
407 * for the requested size. otherwise, return the list which can have blocks 408 * for the requested size. otherwise, return the list which can have blocks
408 * large enough for the requested size. 409 * large enough for the requested size.
409 */ 410 */
410 411
411static struct vmem_freelist * 412static struct vmem_freelist *
412bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, vm_flag_t strat) 413bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, vm_flag_t strat)
413{ 414{
414 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 415 const vmem_size_t qsize = size >> vm->vm_quantum_shift;
415 int idx = SIZE2ORDER(qsize); 416 int idx = SIZE2ORDER(qsize);
416 417
417 KASSERT(size != 0 && qsize != 0); 418 KASSERT(size != 0);
 419 KASSERT(qsize != 0);
418 KASSERT((size & vm->vm_quantum_mask) == 0); 420 KASSERT((size & vm->vm_quantum_mask) == 0);
419 421
420 if (strat == VM_INSTANTFIT && ORDER2SIZE(idx) != qsize) { 422 if (strat == VM_INSTANTFIT && ORDER2SIZE(idx) != qsize) {
421 idx++; 423 idx++;
422 /* check too large request? */ 424 /* check too large request? */
423 } 425 }
424 KASSERT(idx >= 0); 426 KASSERT(idx >= 0);
425 KASSERT(idx < VMEM_MAXORDER); 427 KASSERT(idx < VMEM_MAXORDER);
426 428
427 return &vm->vm_freelist[idx]; 429 return &vm->vm_freelist[idx];
428} 430}
429 431
430/* ---- boundary tag hash */ 432/* ---- boundary tag hash */
@@ -1121,27 +1123,28 @@ vmem_xalloc(vmem_t *vm, const vmem_size_ @@ -1121,27 +1123,28 @@ vmem_xalloc(vmem_t *vm, const vmem_size_
1121 int rc; 1123 int rc;
1122 1124
1123 KASSERT(size0 > 0); 1125 KASSERT(size0 > 0);
1124 KASSERT(size > 0); 1126 KASSERT(size > 0);
1125 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT); 1127 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
1126 if ((flags & VM_SLEEP) != 0) { 1128 if ((flags & VM_SLEEP) != 0) {
1127 ASSERT_SLEEPABLE(); 1129 ASSERT_SLEEPABLE();
1128 } 1130 }
1129 KASSERT((align & vm->vm_quantum_mask) == 0); 1131 KASSERT((align & vm->vm_quantum_mask) == 0);
1130 KASSERT((align & (align - 1)) == 0); 1132 KASSERT((align & (align - 1)) == 0);
1131 KASSERT((phase & vm->vm_quantum_mask) == 0); 1133 KASSERT((phase & vm->vm_quantum_mask) == 0);
1132 KASSERT((nocross & vm->vm_quantum_mask) == 0); 1134 KASSERT((nocross & vm->vm_quantum_mask) == 0);
1133 KASSERT((nocross & (nocross - 1)) == 0); 1135 KASSERT((nocross & (nocross - 1)) == 0);
1134 KASSERT((align == 0 && phase == 0) || phase < align); 1136 KASSERT(align == 0 || phase < align);
 1137 KASSERT(phase == 0 || phase < align);
1135 KASSERT(nocross == 0 || nocross >= size); 1138 KASSERT(nocross == 0 || nocross >= size);
1136 KASSERT(minaddr <= maxaddr); 1139 KASSERT(minaddr <= maxaddr);
1137 KASSERT(!VMEM_CROSS_P(phase, phase + size - 1, nocross)); 1140 KASSERT(!VMEM_CROSS_P(phase, phase + size - 1, nocross));
1138 1141
1139 if (align == 0) { 1142 if (align == 0) {
1140 align = vm->vm_quantum_mask + 1; 1143 align = vm->vm_quantum_mask + 1;
1141 } 1144 }
1142 1145
1143 /* 1146 /*
1144 * allocate boundary tags before acquiring the vmem lock. 1147 * allocate boundary tags before acquiring the vmem lock.
1145 */ 1148 */
1146 VMEM_LOCK(vm); 1149 VMEM_LOCK(vm);
1147 btnew = bt_alloc(vm, flags); 1150 btnew = bt_alloc(vm, flags);

cvs diff -r1.34 -r1.35 src/sys/kern/subr_xcall.c (expand / switch to unified diff)

--- src/sys/kern/subr_xcall.c 2020/12/22 01:57:29 1.34
+++ src/sys/kern/subr_xcall.c 2023/04/09 09:18:09 1.35
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: subr_xcall.c,v 1.34 2020/12/22 01:57:29 ad Exp $ */ 1/* $NetBSD: subr_xcall.c,v 1.35 2023/04/09 09:18:09 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2007-2010, 2019 The NetBSD Foundation, Inc. 4 * Copyright (c) 2007-2010, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran and Mindaugas Rasiukevicius. 8 * by Andrew Doran and Mindaugas Rasiukevicius.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -64,27 +64,27 @@ @@ -64,27 +64,27 @@
64 * CPU, and so has exclusive access to the CPU. Since this facility 64 * CPU, and so has exclusive access to the CPU. Since this facility
65 * is heavyweight, it's expected that it will not be used often. 65 * is heavyweight, it's expected that it will not be used often.
66 * 66 *
67 * Cross calls must not allocate memory, as the pagedaemon uses cross 67 * Cross calls must not allocate memory, as the pagedaemon uses cross
68 * calls (and memory allocation may need to wait on the pagedaemon). 68 * calls (and memory allocation may need to wait on the pagedaemon).
69 * 69 *
70 * A low-overhead mechanism for high priority calls (XC_HIGHPRI) is 70 * A low-overhead mechanism for high priority calls (XC_HIGHPRI) is
71 * also provided. The function to be executed runs in software 71 * also provided. The function to be executed runs in software
72 * interrupt context at IPL_SOFTSERIAL level, and is expected to 72 * interrupt context at IPL_SOFTSERIAL level, and is expected to
73 * be very lightweight, e.g. avoid blocking. 73 * be very lightweight, e.g. avoid blocking.
74 */ 74 */
75 75
76#include <sys/cdefs.h> 76#include <sys/cdefs.h>
77__KERNEL_RCSID(0, "$NetBSD: subr_xcall.c,v 1.34 2020/12/22 01:57:29 ad Exp $"); 77__KERNEL_RCSID(0, "$NetBSD: subr_xcall.c,v 1.35 2023/04/09 09:18:09 riastradh Exp $");
78 78
79#include <sys/types.h> 79#include <sys/types.h>
80#include <sys/param.h> 80#include <sys/param.h>
81#include <sys/xcall.h> 81#include <sys/xcall.h>
82#include <sys/mutex.h> 82#include <sys/mutex.h>
83#include <sys/condvar.h> 83#include <sys/condvar.h>
84#include <sys/evcnt.h> 84#include <sys/evcnt.h>
85#include <sys/kthread.h> 85#include <sys/kthread.h>
86#include <sys/cpu.h> 86#include <sys/cpu.h>
87#include <sys/atomic.h> 87#include <sys/atomic.h>
88 88
89#ifdef _RUMPKERNEL 89#ifdef _RUMPKERNEL
90#include "rump_private.h" 90#include "rump_private.h"
@@ -247,27 +247,28 @@ xc_init_cpu(struct cpu_info *ci) @@ -247,27 +247,28 @@ xc_init_cpu(struct cpu_info *ci)
247 NULL, NULL, "xcall/%u", ci->ci_index); 247 NULL, NULL, "xcall/%u", ci->ci_index);
248 KASSERT(error == 0); 248 KASSERT(error == 0);
249} 249}
250 250
251/* 251/*
252 * xc_broadcast: 252 * xc_broadcast:
253 * 253 *
254 * Trigger a call on all CPUs in the system. 254 * Trigger a call on all CPUs in the system.
255 */ 255 */
256uint64_t 256uint64_t
257xc_broadcast(unsigned int flags, xcfunc_t func, void *arg1, void *arg2) 257xc_broadcast(unsigned int flags, xcfunc_t func, void *arg1, void *arg2)
258{ 258{
259 259
260 KASSERT(!cpu_intr_p() && !cpu_softintr_p()); 260 KASSERT(!cpu_intr_p());
 261 KASSERT(!cpu_softintr_p());
261 ASSERT_SLEEPABLE(); 262 ASSERT_SLEEPABLE();
262 263
263 if (__predict_false(!mp_online)) { 264 if (__predict_false(!mp_online)) {
264 (*func)(arg1, arg2); 265 (*func)(arg1, arg2);
265 return 0; 266 return 0;
266 } 267 }
267 268
268 if ((flags & XC_HIGHPRI) != 0) { 269 if ((flags & XC_HIGHPRI) != 0) {
269 int ipl = xc_extract_ipl(flags); 270 int ipl = xc_extract_ipl(flags);
270 return xc_highpri(func, arg1, arg2, NULL, ipl); 271 return xc_highpri(func, arg1, arg2, NULL, ipl);
271 } else { 272 } else {
272 return xc_lowpri(func, arg1, arg2, NULL); 273 return xc_lowpri(func, arg1, arg2, NULL);
273 } 274 }
@@ -296,27 +297,28 @@ xc_barrier(unsigned int flags) @@ -296,27 +297,28 @@ xc_barrier(unsigned int flags)
296 297
297/* 298/*
298 * xc_unicast: 299 * xc_unicast:
299 * 300 *
300 * Trigger a call on one CPU. 301 * Trigger a call on one CPU.
301 */ 302 */
302uint64_t 303uint64_t
303xc_unicast(unsigned int flags, xcfunc_t func, void *arg1, void *arg2, 304xc_unicast(unsigned int flags, xcfunc_t func, void *arg1, void *arg2,
304 struct cpu_info *ci) 305 struct cpu_info *ci)
305{ 306{
306 int s; 307 int s;
307 308
308 KASSERT(ci != NULL); 309 KASSERT(ci != NULL);
309 KASSERT(!cpu_intr_p() && !cpu_softintr_p()); 310 KASSERT(!cpu_intr_p());
 311 KASSERT(!cpu_softintr_p());
310 ASSERT_SLEEPABLE(); 312 ASSERT_SLEEPABLE();
311 313
312 if (__predict_false(!mp_online)) { 314 if (__predict_false(!mp_online)) {
313 KASSERT(ci == curcpu()); 315 KASSERT(ci == curcpu());
314 s = splsoftserial(); 316 s = splsoftserial();
315 (*func)(arg1, arg2); 317 (*func)(arg1, arg2);
316 splx(s); 318 splx(s);
317 return 0; 319 return 0;
318 } 320 }
319 321
320 if ((flags & XC_HIGHPRI) != 0) { 322 if ((flags & XC_HIGHPRI) != 0) {
321 int ipl = xc_extract_ipl(flags); 323 int ipl = xc_extract_ipl(flags);
322 return xc_highpri(func, arg1, arg2, ci, ipl); 324 return xc_highpri(func, arg1, arg2, ci, ipl);
@@ -325,27 +327,28 @@ xc_unicast(unsigned int flags, xcfunc_t  @@ -325,27 +327,28 @@ xc_unicast(unsigned int flags, xcfunc_t
325 } 327 }
326} 328}
327 329
328/* 330/*
329 * xc_wait: 331 * xc_wait:
330 * 332 *
331 * Wait for a cross call to complete. 333 * Wait for a cross call to complete.
332 */ 334 */
333void 335void
334xc_wait(uint64_t where) 336xc_wait(uint64_t where)
335{ 337{
336 xc_state_t *xc; 338 xc_state_t *xc;
337 339
338 KASSERT(!cpu_intr_p() && !cpu_softintr_p()); 340 KASSERT(!cpu_intr_p());
 341 KASSERT(!cpu_softintr_p());
339 ASSERT_SLEEPABLE(); 342 ASSERT_SLEEPABLE();
340 343
341 if (__predict_false(!mp_online)) { 344 if (__predict_false(!mp_online)) {
342 return; 345 return;
343 } 346 }
344 347
345 /* Determine whether it is high or low priority cross-call. */ 348 /* Determine whether it is high or low priority cross-call. */
346 if ((where & XC_PRI_BIT) != 0) { 349 if ((where & XC_PRI_BIT) != 0) {
347 xc = &xc_high_pri; 350 xc = &xc_high_pri;
348 where &= ~XC_PRI_BIT; 351 where &= ~XC_PRI_BIT;
349 } else { 352 } else {
350 xc = &xc_low_pri; 353 xc = &xc_low_pri;
351 } 354 }

cvs diff -r1.49 -r1.50 src/sys/kern/sys_sched.c (expand / switch to unified diff)

--- src/sys/kern/sys_sched.c 2020/05/23 23:42:43 1.49
+++ src/sys/kern/sys_sched.c 2023/04/09 09:18:09 1.50
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: sys_sched.c,v 1.49 2020/05/23 23:42:43 ad Exp $ */ 1/* $NetBSD: sys_sched.c,v 1.50 2023/04/09 09:18:09 riastradh Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2008, 2011 Mindaugas Rasiukevicius <rmind at NetBSD org> 4 * Copyright (c) 2008, 2011 Mindaugas Rasiukevicius <rmind at NetBSD org>
5 * All rights reserved. 5 * All rights reserved.
6 *  6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -31,27 +31,27 @@ @@ -31,27 +31,27 @@
31 * 31 *
32 * Lock order: 32 * Lock order:
33 * 33 *
34 * cpu_lock -> 34 * cpu_lock ->
35 * proc_lock -> 35 * proc_lock ->
36 * proc_t::p_lock -> 36 * proc_t::p_lock ->
37 * lwp_t::lwp_lock 37 * lwp_t::lwp_lock
38 * 38 *
39 * TODO: 39 * TODO:
40 * - Handle pthread_setschedprio() as defined by POSIX; 40 * - Handle pthread_setschedprio() as defined by POSIX;
41 */ 41 */
42 42
43#include <sys/cdefs.h> 43#include <sys/cdefs.h>
44__KERNEL_RCSID(0, "$NetBSD: sys_sched.c,v 1.49 2020/05/23 23:42:43 ad Exp $"); 44__KERNEL_RCSID(0, "$NetBSD: sys_sched.c,v 1.50 2023/04/09 09:18:09 riastradh Exp $");
45 45
46#include <sys/param.h> 46#include <sys/param.h>
47 47
48#include <sys/cpu.h> 48#include <sys/cpu.h>
49#include <sys/kauth.h> 49#include <sys/kauth.h>
50#include <sys/kmem.h> 50#include <sys/kmem.h>
51#include <sys/lwp.h> 51#include <sys/lwp.h>
52#include <sys/mutex.h> 52#include <sys/mutex.h>
53#include <sys/proc.h> 53#include <sys/proc.h>
54#include <sys/pset.h> 54#include <sys/pset.h>
55#include <sys/sched.h> 55#include <sys/sched.h>
56#include <sys/syscallargs.h> 56#include <sys/syscallargs.h>
57#include <sys/sysctl.h> 57#include <sys/sysctl.h>
@@ -63,27 +63,28 @@ static struct sysctllog *sched_sysctl_lo @@ -63,27 +63,28 @@ static struct sysctllog *sched_sysctl_lo
63static kauth_listener_t sched_listener; 63static kauth_listener_t sched_listener;
64 64
65/* 65/*
66 * Convert user priority or the in-kernel priority or convert the current 66 * Convert user priority or the in-kernel priority or convert the current
67 * priority to the appropriate range according to the policy change. 67 * priority to the appropriate range according to the policy change.
68 */ 68 */
69static pri_t 69static pri_t
70convert_pri(lwp_t *l, int policy, pri_t pri) 70convert_pri(lwp_t *l, int policy, pri_t pri)
71{ 71{
72 72
73 /* Convert user priority to the in-kernel */ 73 /* Convert user priority to the in-kernel */
74 if (pri != PRI_NONE) { 74 if (pri != PRI_NONE) {
75 /* Only for real-time threads */ 75 /* Only for real-time threads */
76 KASSERT(pri >= SCHED_PRI_MIN && pri <= SCHED_PRI_MAX); 76 KASSERT(pri >= SCHED_PRI_MIN);
 77 KASSERT(pri <= SCHED_PRI_MAX);
77 KASSERT(policy != SCHED_OTHER); 78 KASSERT(policy != SCHED_OTHER);
78 return PRI_USER_RT + pri; 79 return PRI_USER_RT + pri;
79 } 80 }
80 81
81 /* Neither policy, nor priority change */ 82 /* Neither policy, nor priority change */
82 if (l->l_class == policy) 83 if (l->l_class == policy)
83 return l->l_priority; 84 return l->l_priority;
84 85
85 /* Time-sharing -> real-time */ 86 /* Time-sharing -> real-time */
86 if (l->l_class == SCHED_OTHER) { 87 if (l->l_class == SCHED_OTHER) {
87 KASSERT(policy == SCHED_FIFO || policy == SCHED_RR); 88 KASSERT(policy == SCHED_FIFO || policy == SCHED_RR);
88 return PRI_USER_RT; 89 return PRI_USER_RT;
89 } 90 }

cvs diff -r1.152 -r1.153 src/sys/kern/vfs_cache.c (expand / switch to unified diff)

--- src/sys/kern/vfs_cache.c 2021/11/01 21:28:03 1.152
+++ src/sys/kern/vfs_cache.c 2023/04/09 09:18:09 1.153
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: vfs_cache.c,v 1.152 2021/11/01 21:28:03 andvar Exp $ */ 1/* $NetBSD: vfs_cache.c,v 1.153 2023/04/09 09:18:09 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2008, 2019, 2020 The NetBSD Foundation, Inc. 4 * Copyright (c) 2008, 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -162,27 +162,27 @@ @@ -162,27 +162,27 @@
162 * 162 *
163 * 1) vi->vi_nc_lock (tree or parent -> child direction, 163 * 1) vi->vi_nc_lock (tree or parent -> child direction,
164 * used during forward lookup) 164 * used during forward lookup)
165 * 165 *
166 * 2) vi->vi_nc_listlock (list or child -> parent direction, 166 * 2) vi->vi_nc_listlock (list or child -> parent direction,
167 * used during reverse lookup) 167 * used during reverse lookup)
168 * 168 *
169 * 3) cache_lru_lock (LRU list direction, used during reclaim) 169 * 3) cache_lru_lock (LRU list direction, used during reclaim)
170 * 170 *
171 * 4) vp->v_interlock (what the cache entry points to) 171 * 4) vp->v_interlock (what the cache entry points to)
172 */ 172 */
173 173
174#include <sys/cdefs.h> 174#include <sys/cdefs.h>
175__KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.152 2021/11/01 21:28:03 andvar Exp $"); 175__KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.153 2023/04/09 09:18:09 riastradh Exp $");
176 176
177#define __NAMECACHE_PRIVATE 177#define __NAMECACHE_PRIVATE
178#ifdef _KERNEL_OPT 178#ifdef _KERNEL_OPT
179#include "opt_ddb.h" 179#include "opt_ddb.h"
180#include "opt_dtrace.h" 180#include "opt_dtrace.h"
181#endif 181#endif
182 182
183#include <sys/param.h> 183#include <sys/param.h>
184#include <sys/types.h> 184#include <sys/types.h>
185#include <sys/atomic.h> 185#include <sys/atomic.h>
186#include <sys/callout.h> 186#include <sys/callout.h>
187#include <sys/cpu.h> 187#include <sys/cpu.h>
188#include <sys/errno.h> 188#include <sys/errno.h>
@@ -690,27 +690,28 @@ cache_lookup_linked(struct vnode *dvp, c @@ -690,27 +690,28 @@ cache_lookup_linked(struct vnode *dvp, c
690 } 690 }
691 691
692 /* 692 /*
693 * First up check if the user is allowed to look up files in this 693 * First up check if the user is allowed to look up files in this
694 * directory. 694 * directory.
695 */ 695 */
696 if (cred != FSCRED) { 696 if (cred != FSCRED) {
697 if (dvi->vi_nc_mode == VNOVAL) { 697 if (dvi->vi_nc_mode == VNOVAL) {
698 if (newlock != NULL) { 698 if (newlock != NULL) {
699 rw_exit(newlock); 699 rw_exit(newlock);
700 } 700 }
701 return false; 701 return false;
702 } 702 }
703 KASSERT(dvi->vi_nc_uid != VNOVAL && dvi->vi_nc_gid != VNOVAL); 703 KASSERT(dvi->vi_nc_uid != VNOVAL);
 704 KASSERT(dvi->vi_nc_gid != VNOVAL);
704 error = kauth_authorize_vnode(cred, 705 error = kauth_authorize_vnode(cred,
705 KAUTH_ACCESS_ACTION(VEXEC, 706 KAUTH_ACCESS_ACTION(VEXEC,
706 dvp->v_type, dvi->vi_nc_mode & ALLPERMS), dvp, NULL, 707 dvp->v_type, dvi->vi_nc_mode & ALLPERMS), dvp, NULL,
707 genfs_can_access(dvp, cred, dvi->vi_nc_uid, dvi->vi_nc_gid, 708 genfs_can_access(dvp, cred, dvi->vi_nc_uid, dvi->vi_nc_gid,
708 dvi->vi_nc_mode & ALLPERMS, NULL, VEXEC)); 709 dvi->vi_nc_mode & ALLPERMS, NULL, VEXEC));
709 if (error != 0) { 710 if (error != 0) {
710 if (newlock != NULL) { 711 if (newlock != NULL) {
711 rw_exit(newlock); 712 rw_exit(newlock);
712 } 713 }
713 COUNT(ncs_denied); 714 COUNT(ncs_denied);
714 return false; 715 return false;
715 } 716 }
716 } 717 }
@@ -720,27 +721,28 @@ cache_lookup_linked(struct vnode *dvp, c @@ -720,27 +721,28 @@ cache_lookup_linked(struct vnode *dvp, c
720 */ 721 */
721 ncp = cache_lookup_entry(dvp, name, namelen, key); 722 ncp = cache_lookup_entry(dvp, name, namelen, key);
722 if (__predict_false(ncp == NULL)) { 723 if (__predict_false(ncp == NULL)) {
723 if (newlock != NULL) { 724 if (newlock != NULL) {
724 rw_exit(newlock); 725 rw_exit(newlock);
725 } 726 }
726 COUNT(ncs_miss); 727 COUNT(ncs_miss);
727 SDT_PROBE(vfs, namecache, lookup, miss, dvp, 728 SDT_PROBE(vfs, namecache, lookup, miss, dvp,
728 name, namelen, 0, 0); 729 name, namelen, 0, 0);
729 return false; 730 return false;
730 } 731 }
731 if (ncp->nc_vp == NULL) { 732 if (ncp->nc_vp == NULL) {
732 /* found negative entry; vn is already null from above */ 733 /* found negative entry; vn is already null from above */
733 KASSERT(namelen != cache_mp_nlen && name != cache_mp_name); 734 KASSERT(namelen != cache_mp_nlen);
 735 KASSERT(name != cache_mp_name);
734 COUNT(ncs_neghits); 736 COUNT(ncs_neghits);
735 } else { 737 } else {
736 COUNT(ncs_goodhits); /* XXX can be "badhits" */ 738 COUNT(ncs_goodhits); /* XXX can be "badhits" */
737 } 739 }
738 SDT_PROBE(vfs, namecache, lookup, hit, dvp, name, namelen, 0, 0); 740 SDT_PROBE(vfs, namecache, lookup, hit, dvp, name, namelen, 0, 0);
739 741
740 /* 742 /*
741 * Return with the directory lock still held. It will either be 743 * Return with the directory lock still held. It will either be
742 * returned to us with another call to cache_lookup_linked() when 744 * returned to us with another call to cache_lookup_linked() when
743 * looking up the next component, or the caller will release it 745 * looking up the next component, or the caller will release it
744 * manually when finished. 746 * manually when finished.
745 */ 747 */
746 if (oldlock) { 748 if (oldlock) {
@@ -786,27 +788,28 @@ cache_revlookup(struct vnode *vp, struct @@ -786,27 +788,28 @@ cache_revlookup(struct vnode *vp, struct
786 /* 788 /*
787 * Check if the user is allowed to see. NOTE: this is 789 * Check if the user is allowed to see. NOTE: this is
788 * checking for access on the "wrong" directory. getcwd() 790 * checking for access on the "wrong" directory. getcwd()
789 * wants to see that there is access on every component 791 * wants to see that there is access on every component
790 * along the way, not that there is access to any individual 792 * along the way, not that there is access to any individual
791 * component. Don't use this to check you can look in vp. 793 * component. Don't use this to check you can look in vp.
792 * 794 *
793 * I don't like it, I didn't come up with it, don't blame me! 795 * I don't like it, I didn't come up with it, don't blame me!
794 */ 796 */
795 if (vi->vi_nc_mode == VNOVAL) { 797 if (vi->vi_nc_mode == VNOVAL) {
796 rw_exit(&vi->vi_nc_listlock); 798 rw_exit(&vi->vi_nc_listlock);
797 return -1; 799 return -1;
798 } 800 }
799 KASSERT(vi->vi_nc_uid != VNOVAL && vi->vi_nc_gid != VNOVAL); 801 KASSERT(vi->vi_nc_uid != VNOVAL);
 802 KASSERT(vi->vi_nc_gid != VNOVAL);
800 error = kauth_authorize_vnode(kauth_cred_get(), 803 error = kauth_authorize_vnode(kauth_cred_get(),
801 KAUTH_ACCESS_ACTION(VEXEC, vp->v_type, vi->vi_nc_mode & 804 KAUTH_ACCESS_ACTION(VEXEC, vp->v_type, vi->vi_nc_mode &
802 ALLPERMS), vp, NULL, genfs_can_access(vp, curlwp->l_cred, 805 ALLPERMS), vp, NULL, genfs_can_access(vp, curlwp->l_cred,
803 vi->vi_nc_uid, vi->vi_nc_gid, vi->vi_nc_mode & ALLPERMS, 806 vi->vi_nc_uid, vi->vi_nc_gid, vi->vi_nc_mode & ALLPERMS,
804 NULL, accmode)); 807 NULL, accmode));
805 if (error != 0) { 808 if (error != 0) {
806 rw_exit(&vi->vi_nc_listlock); 809 rw_exit(&vi->vi_nc_listlock);
807 COUNT(ncs_denied); 810 COUNT(ncs_denied);
808 return EACCES; 811 return EACCES;
809 } 812 }
810 } 813 }
811 TAILQ_FOREACH(ncp, &vi->vi_nc_list, nc_list) { 814 TAILQ_FOREACH(ncp, &vi->vi_nc_list, nc_list) {
812 KASSERT(ncp->nc_vp == vp); 815 KASSERT(ncp->nc_vp == vp);

cvs diff -r1.232 -r1.233 src/sys/kern/vfs_lookup.c (expand / switch to unified diff)

--- src/sys/kern/vfs_lookup.c 2022/08/22 09:14:59 1.232
+++ src/sys/kern/vfs_lookup.c 2023/04/09 09:18:09 1.233
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: vfs_lookup.c,v 1.232 2022/08/22 09:14:59 hannken Exp $ */ 1/* $NetBSD: vfs_lookup.c,v 1.233 2023/04/09 09:18:09 riastradh Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1982, 1986, 1989, 1993 4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc. 6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed 7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph 8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc. 10 * the permission of UNIX System Laboratories, Inc.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
@@ -27,27 +27,27 @@ @@ -27,27 +27,27 @@
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE. 34 * SUCH DAMAGE.
35 * 35 *
36 * @(#)vfs_lookup.c 8.10 (Berkeley) 5/27/95 36 * @(#)vfs_lookup.c 8.10 (Berkeley) 5/27/95
37 */ 37 */
38 38
39#include <sys/cdefs.h> 39#include <sys/cdefs.h>
40__KERNEL_RCSID(0, "$NetBSD: vfs_lookup.c,v 1.232 2022/08/22 09:14:59 hannken Exp $"); 40__KERNEL_RCSID(0, "$NetBSD: vfs_lookup.c,v 1.233 2023/04/09 09:18:09 riastradh Exp $");
41 41
42#ifdef _KERNEL_OPT 42#ifdef _KERNEL_OPT
43#include "opt_magiclinks.h" 43#include "opt_magiclinks.h"
44#endif 44#endif
45 45
46#include <sys/param.h> 46#include <sys/param.h>
47#include <sys/systm.h> 47#include <sys/systm.h>
48#include <sys/kernel.h> 48#include <sys/kernel.h>
49#include <sys/syslimits.h> 49#include <sys/syslimits.h>
50#include <sys/time.h> 50#include <sys/time.h>
51#include <sys/namei.h> 51#include <sys/namei.h>
52#include <sys/vnode.h> 52#include <sys/vnode.h>
53#include <sys/vnode_impl.h> 53#include <sys/vnode_impl.h>
@@ -1318,27 +1318,28 @@ lookup_fastforward(struct namei_state *s @@ -1318,27 +1318,28 @@ lookup_fastforward(struct namei_state *s
1318 } 1318 }
1319 1319
1320 /* 1320 /*
1321 * Good, now look for it in cache. cache_lookup_linked() 1321 * Good, now look for it in cache. cache_lookup_linked()
1322 * will fail if there's nothing there, or if there's no 1322 * will fail if there's nothing there, or if there's no
1323 * ownership info for the directory, or if the user doesn't 1323 * ownership info for the directory, or if the user doesn't
1324 * have permission to look up files in this directory. 1324 * have permission to look up files in this directory.
1325 */ 1325 */
1326 if (!cache_lookup_linked(searchdir, cnp->cn_nameptr, 1326 if (!cache_lookup_linked(searchdir, cnp->cn_nameptr,
1327 cnp->cn_namelen, &foundobj, &plock, cnp->cn_cred)) { 1327 cnp->cn_namelen, &foundobj, &plock, cnp->cn_cred)) {
1328 error = EOPNOTSUPP; 1328 error = EOPNOTSUPP;
1329 break; 1329 break;
1330 } 1330 }
1331 KASSERT(plock != NULL && rw_lock_held(plock)); 1331 KASSERT(plock != NULL);
 1332 KASSERT(rw_lock_held(plock));
1332 1333
1333 /* 1334 /*
1334 * Scored a hit. Negative is good too (ENOENT). If there's 1335 * Scored a hit. Negative is good too (ENOENT). If there's
1335 * a '-o union' mount here, punt and let lookup_once() deal 1336 * a '-o union' mount here, punt and let lookup_once() deal
1336 * with it. 1337 * with it.
1337 */ 1338 */
1338 if (foundobj == NULL) { 1339 if (foundobj == NULL) {
1339 if ((searchdir->v_vflag & VV_ROOT) != 0 && 1340 if ((searchdir->v_vflag & VV_ROOT) != 0 &&
1340 (searchdir->v_mount->mnt_flag & MNT_UNION) != 0) { 1341 (searchdir->v_mount->mnt_flag & MNT_UNION) != 0) {
1341 error = EOPNOTSUPP; 1342 error = EOPNOTSUPP;
1342 } else { 1343 } else {
1343 error = ENOENT; 1344 error = ENOENT;
1344 terminal = ((cnp->cn_flags & ISLASTCN) != 0); 1345 terminal = ((cnp->cn_flags & ISLASTCN) != 0);

cvs diff -r1.557 -r1.558 src/sys/kern/vfs_syscalls.c (expand / switch to unified diff)

--- src/sys/kern/vfs_syscalls.c 2023/03/05 14:40:32 1.557
+++ src/sys/kern/vfs_syscalls.c 2023/04/09 09:18:09 1.558
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: vfs_syscalls.c,v 1.557 2023/03/05 14:40:32 riastradh Exp $ */ 1/* $NetBSD: vfs_syscalls.c,v 1.558 2023/04/09 09:18:09 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2008, 2009, 2019, 2020 The NetBSD Foundation, Inc. 4 * Copyright (c) 2008, 2009, 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -60,27 +60,27 @@ @@ -60,27 +60,27 @@
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE. 63 * SUCH DAMAGE.
64 * 64 *
65 * @(#)vfs_syscalls.c 8.42 (Berkeley) 7/31/95 65 * @(#)vfs_syscalls.c 8.42 (Berkeley) 7/31/95
66 */ 66 */
67 67
68/* 68/*
69 * Virtual File System System Calls 69 * Virtual File System System Calls
70 */ 70 */
71 71
72#include <sys/cdefs.h> 72#include <sys/cdefs.h>
73__KERNEL_RCSID(0, "$NetBSD: vfs_syscalls.c,v 1.557 2023/03/05 14:40:32 riastradh Exp $"); 73__KERNEL_RCSID(0, "$NetBSD: vfs_syscalls.c,v 1.558 2023/04/09 09:18:09 riastradh Exp $");
74 74
75#ifdef _KERNEL_OPT 75#ifdef _KERNEL_OPT
76#include "opt_fileassoc.h" 76#include "opt_fileassoc.h"
77#include "veriexec.h" 77#include "veriexec.h"
78#endif 78#endif
79 79
80#include <sys/param.h> 80#include <sys/param.h>
81#include <sys/systm.h> 81#include <sys/systm.h>
82#include <sys/namei.h> 82#include <sys/namei.h>
83#include <sys/filedesc.h> 83#include <sys/filedesc.h>
84#include <sys/kernel.h> 84#include <sys/kernel.h>
85#include <sys/file.h> 85#include <sys/file.h>
86#include <sys/fcntl.h> 86#include <sys/fcntl.h>
@@ -1937,28 +1937,28 @@ vfs_composefh_alloc(struct vnode *vp, fh @@ -1937,28 +1937,28 @@ vfs_composefh_alloc(struct vnode *vp, fh
1937 1937
1938 mp = vp->v_mount; 1938 mp = vp->v_mount;
1939 fidsize = 0; 1939 fidsize = 0;
1940 error = VFS_VPTOFH(vp, NULL, &fidsize); 1940 error = VFS_VPTOFH(vp, NULL, &fidsize);
1941 KASSERT(error != 0); 1941 KASSERT(error != 0);
1942 if (error != E2BIG) { 1942 if (error != E2BIG) {
1943 goto out; 1943 goto out;
1944 } 1944 }
1945 fhsize = FHANDLE_SIZE_FROM_FILEID_SIZE(fidsize); 1945 fhsize = FHANDLE_SIZE_FROM_FILEID_SIZE(fidsize);
1946 fhp = kmem_zalloc(fhsize, KM_SLEEP); 1946 fhp = kmem_zalloc(fhsize, KM_SLEEP);
1947 fhp->fh_fsid = mp->mnt_stat.f_fsidx; 1947 fhp->fh_fsid = mp->mnt_stat.f_fsidx;
1948 error = VFS_VPTOFH(vp, &fhp->fh_fid, &fidsize); 1948 error = VFS_VPTOFH(vp, &fhp->fh_fid, &fidsize);
1949 if (error == 0) { 1949 if (error == 0) {
1950 KASSERT((FHANDLE_SIZE(fhp) == fhsize && 1950 KASSERT(FHANDLE_SIZE(fhp) == fhsize);
1951 FHANDLE_FILEID(fhp)->fid_len == fidsize)); 1951 KASSERT(FHANDLE_FILEID(fhp)->fid_len == fidsize);
1952 *fhpp = fhp; 1952 *fhpp = fhp;
1953 } else { 1953 } else {
1954 kmem_free(fhp, fhsize); 1954 kmem_free(fhp, fhsize);
1955 } 1955 }
1956out: 1956out:
1957 return error; 1957 return error;
1958} 1958}
1959 1959
1960void 1960void
1961vfs_composefh_free(fhandle_t *fhp) 1961vfs_composefh_free(fhandle_t *fhp)
1962{ 1962{
1963 1963
1964 vfs__fhfree(fhp); 1964 vfs__fhfree(fhp);
@@ -4364,27 +4364,28 @@ do_sys_rename(const char *from, const ch @@ -4364,27 +4364,28 @@ do_sys_rename(const char *from, const ch
4364} 4364}
4365 4365
4366static int 4366static int
4367do_sys_renameat(struct lwp *l, int fromfd, const char *from, int tofd, 4367do_sys_renameat(struct lwp *l, int fromfd, const char *from, int tofd,
4368 const char *to, enum uio_seg seg, int retain) 4368 const char *to, enum uio_seg seg, int retain)
4369{ 4369{
4370 struct pathbuf *fpb, *tpb; 4370 struct pathbuf *fpb, *tpb;
4371 struct nameidata fnd, tnd; 4371 struct nameidata fnd, tnd;
4372 struct vnode *fdvp, *fvp; 4372 struct vnode *fdvp, *fvp;
4373 struct vnode *tdvp, *tvp; 4373 struct vnode *tdvp, *tvp;
4374 struct mount *mp, *tmp; 4374 struct mount *mp, *tmp;
4375 int error; 4375 int error;
4376 4376
4377 KASSERT(l != NULL || (fromfd == AT_FDCWD && tofd == AT_FDCWD)); 4377 KASSERT(l != NULL || fromfd == AT_FDCWD);
 4378 KASSERT(l != NULL || tofd == AT_FDCWD);
4378 4379
4379 error = pathbuf_maybe_copyin(from, seg, &fpb); 4380 error = pathbuf_maybe_copyin(from, seg, &fpb);
4380 if (error) 4381 if (error)
4381 goto out0; 4382 goto out0;
4382 KASSERT(fpb != NULL); 4383 KASSERT(fpb != NULL);
4383 4384
4384 error = pathbuf_maybe_copyin(to, seg, &tpb); 4385 error = pathbuf_maybe_copyin(to, seg, &tpb);
4385 if (error) 4386 if (error)
4386 goto out1; 4387 goto out1;
4387 KASSERT(tpb != NULL); 4388 KASSERT(tpb != NULL);
4388 4389
4389 /* 4390 /*
4390 * Lookup from. 4391 * Lookup from.