Sun Oct 6 15:11:17 2019 UTC ()
xc_barrier - convenience function to xc_broadcast() a nop.

Make the intent more clear and also avoid a bunch of (xcfunc_t)nullop
casts that gcc 8 -Wcast-function-type is not happy about.


(uwe)
diff -r1.79 -r1.80 src/sys/arch/x86/acpi/acpi_cpu_md.c
diff -r1.204 -r1.205 src/sys/kern/kern_lwp.c
diff -r1.38 -r1.39 src/sys/kern/kern_ras.c
diff -r1.47 -r1.48 src/sys/kern/kern_softint.c
diff -r1.18 -r1.19 src/sys/kern/kern_syscall.c
diff -r1.51 -r1.52 src/sys/kern/kern_tc.c
diff -r1.12 -r1.13 src/sys/kern/subr_pserialize.c
diff -r1.26 -r1.27 src/sys/kern/subr_xcall.c
diff -r1.462 -r1.463 src/sys/net/if.c
diff -r1.49 -r1.50 src/sys/net/agr/if_agr.c
diff -r1.109 -r1.110 src/sys/opencrypto/crypto.c
diff -r1.7 -r1.8 src/sys/rump/kern/lib/libsysproxy/sysproxy.c
diff -r1.7 -r1.8 src/sys/sys/xcall.h

cvs diff -r1.79 -r1.80 src/sys/arch/x86/acpi/acpi_cpu_md.c (expand / switch to unified diff)

--- src/sys/arch/x86/acpi/acpi_cpu_md.c 2018/11/10 09:42:42 1.79
+++ src/sys/arch/x86/acpi/acpi_cpu_md.c 2019/10/06 15:11:17 1.80
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: acpi_cpu_md.c,v 1.79 2018/11/10 09:42:42 maxv Exp $ */ 1/* $NetBSD: acpi_cpu_md.c,v 1.80 2019/10/06 15:11:17 uwe Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen@iki.fi> 4 * Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen@iki.fi>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 10 *
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
@@ -17,27 +17,27 @@ @@ -17,27 +17,27 @@
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE. 27 * SUCH DAMAGE.
28 */ 28 */
29#include <sys/cdefs.h> 29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_md.c,v 1.79 2018/11/10 09:42:42 maxv Exp $"); 30__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_md.c,v 1.80 2019/10/06 15:11:17 uwe Exp $");
31 31
32#include <sys/param.h> 32#include <sys/param.h>
33#include <sys/bus.h> 33#include <sys/bus.h>
34#include <sys/cpufreq.h> 34#include <sys/cpufreq.h>
35#include <sys/device.h> 35#include <sys/device.h>
36#include <sys/kcore.h> 36#include <sys/kcore.h>
37#include <sys/sysctl.h> 37#include <sys/sysctl.h>
38#include <sys/xcall.h> 38#include <sys/xcall.h>
39 39
40#include <x86/cpu.h> 40#include <x86/cpu.h>
41#include <x86/cpufunc.h> 41#include <x86/cpufunc.h>
42#include <x86/cputypes.h> 42#include <x86/cputypes.h>
43#include <x86/cpuvar.h> 43#include <x86/cpuvar.h>
@@ -368,43 +368,41 @@ acpicpu_md_cstate_start(struct acpicpu_s @@ -368,43 +368,41 @@ acpicpu_md_cstate_start(struct acpicpu_s
368 } 368 }
369 } 369 }
370 370
371 x86_cpu_idle_set(acpicpu_cstate_idle, "acpi", ipi); 371 x86_cpu_idle_set(acpicpu_cstate_idle, "acpi", ipi);
372 372
373 return 0; 373 return 0;
374} 374}
375 375
376int 376int
377acpicpu_md_cstate_stop(void) 377acpicpu_md_cstate_stop(void)
378{ 378{
379 static char text[16]; 379 static char text[16];
380 void (*func)(void); 380 void (*func)(void);
381 uint64_t xc; 
382 bool ipi; 381 bool ipi;
383 382
384 x86_cpu_idle_get(&func, text, sizeof(text)); 383 x86_cpu_idle_get(&func, text, sizeof(text));
385 384
386 if (func == native_idle) 385 if (func == native_idle)
387 return EALREADY; 386 return EALREADY;
388 387
389 ipi = (native_idle != x86_cpu_idle_halt) ? false : true; 388 ipi = (native_idle != x86_cpu_idle_halt) ? false : true;
390 x86_cpu_idle_set(native_idle, native_idle_text, ipi); 389 x86_cpu_idle_set(native_idle, native_idle_text, ipi);
391 390
392 /* 391 /*
393 * Run a cross-call to ensure that all CPUs are 392 * Run a cross-call to ensure that all CPUs are
394 * out from the ACPI idle-loop before detachment. 393 * out from the ACPI idle-loop before detachment.
395 */ 394 */
396 xc = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL); 395 xc_barrier(0);
397 xc_wait(xc); 
398 396
399 return 0; 397 return 0;
400} 398}
401 399
402/* 400/*
403 * Called with interrupts enabled. 401 * Called with interrupts enabled.
404 */ 402 */
405void 403void
406acpicpu_md_cstate_enter(int method, int state) 404acpicpu_md_cstate_enter(int method, int state)
407{ 405{
408 struct cpu_info *ci = curcpu(); 406 struct cpu_info *ci = curcpu();
409 407
410 KASSERT(ci->ci_ilevel == IPL_NONE); 408 KASSERT(ci->ci_ilevel == IPL_NONE);

cvs diff -r1.204 -r1.205 src/sys/kern/kern_lwp.c (expand / switch to unified diff)

--- src/sys/kern/kern_lwp.c 2019/10/03 22:48:44 1.204
+++ src/sys/kern/kern_lwp.c 2019/10/06 15:11:17 1.205
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_lwp.c,v 1.204 2019/10/03 22:48:44 kamil Exp $ */ 1/* $NetBSD: kern_lwp.c,v 1.205 2019/10/06 15:11:17 uwe Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2001, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc. 4 * Copyright (c) 2001, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, and Andrew Doran. 8 * by Nathan J. Williams, and Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -201,27 +201,27 @@ @@ -201,27 +201,27 @@
201 * (But not always for kernel threads. There are some special cases 201 * (But not always for kernel threads. There are some special cases
202 * as mentioned above. See kern_softint.c.) 202 * as mentioned above. See kern_softint.c.)
203 * 203 *
204 * Note that an LWP is considered running or likely to run soon if in 204 * Note that an LWP is considered running or likely to run soon if in
205 * one of the following states. This affects the value of p_nrlwps: 205 * one of the following states. This affects the value of p_nrlwps:
206 * 206 *
207 * LSRUN, LSONPROC, LSSLEEP 207 * LSRUN, LSONPROC, LSSLEEP
208 * 208 *
209 * p_lock does not need to be held when transitioning among these 209 * p_lock does not need to be held when transitioning among these
210 * three states, hence p_lock is rarely taken for state transitions. 210 * three states, hence p_lock is rarely taken for state transitions.
211 */ 211 */
212 212
213#include <sys/cdefs.h> 213#include <sys/cdefs.h>
214__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.204 2019/10/03 22:48:44 kamil Exp $"); 214__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.205 2019/10/06 15:11:17 uwe Exp $");
215 215
216#include "opt_ddb.h" 216#include "opt_ddb.h"
217#include "opt_lockdebug.h" 217#include "opt_lockdebug.h"
218#include "opt_dtrace.h" 218#include "opt_dtrace.h"
219 219
220#define _LWP_API_PRIVATE 220#define _LWP_API_PRIVATE
221 221
222#include <sys/param.h> 222#include <sys/param.h>
223#include <sys/systm.h> 223#include <sys/systm.h>
224#include <sys/cpu.h> 224#include <sys/cpu.h>
225#include <sys/pool.h> 225#include <sys/pool.h>
226#include <sys/proc.h> 226#include <sys/proc.h>
227#include <sys/syscallargs.h> 227#include <sys/syscallargs.h>
@@ -357,40 +357,38 @@ lwp0_init(void) @@ -357,40 +357,38 @@ lwp0_init(void)
357 kauth_cred_hold(proc0.p_cred); 357 kauth_cred_hold(proc0.p_cred);
358 l->l_cred = proc0.p_cred; 358 l->l_cred = proc0.p_cred;
359 359
360 kdtrace_thread_ctor(NULL, l); 360 kdtrace_thread_ctor(NULL, l);
361 lwp_initspecific(l); 361 lwp_initspecific(l);
362 362
363 SYSCALL_TIME_LWP_INIT(l); 363 SYSCALL_TIME_LWP_INIT(l);
364} 364}
365 365
366static void 366static void
367lwp_dtor(void *arg, void *obj) 367lwp_dtor(void *arg, void *obj)
368{ 368{
369 lwp_t *l = obj; 369 lwp_t *l = obj;
370 uint64_t where; 
371 (void)l; 370 (void)l;
372 371
373 /* 372 /*
374 * Provide a barrier to ensure that all mutex_oncpu() and rw_oncpu() 373 * Provide a barrier to ensure that all mutex_oncpu() and rw_oncpu()
375 * calls will exit before memory of LWP is returned to the pool, where 374 * calls will exit before memory of LWP is returned to the pool, where
376 * KVA of LWP structure might be freed and re-used for other purposes. 375 * KVA of LWP structure might be freed and re-used for other purposes.
377 * Kernel preemption is disabled around mutex_oncpu() and rw_oncpu() 376 * Kernel preemption is disabled around mutex_oncpu() and rw_oncpu()
378 * callers, therefore cross-call to all CPUs will do the job. Also, 377 * callers, therefore cross-call to all CPUs will do the job. Also,
379 * the value of l->l_cpu must be still valid at this point. 378 * the value of l->l_cpu must be still valid at this point.
380 */ 379 */
381 KASSERT(l->l_cpu != NULL); 380 KASSERT(l->l_cpu != NULL);
382 where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL); 381 xc_barrier(0);
383 xc_wait(where); 
384} 382}
385 383
386/* 384/*
387 * Set an suspended. 385 * Set an suspended.
388 * 386 *
389 * Must be called with p_lock held, and the LWP locked. Will unlock the 387 * Must be called with p_lock held, and the LWP locked. Will unlock the
390 * LWP before return. 388 * LWP before return.
391 */ 389 */
392int 390int
393lwp_suspend(struct lwp *curl, struct lwp *t) 391lwp_suspend(struct lwp *curl, struct lwp *t)
394{ 392{
395 int error; 393 int error;
396 394

cvs diff -r1.38 -r1.39 src/sys/kern/kern_ras.c (expand / switch to unified diff)

--- src/sys/kern/kern_ras.c 2016/07/04 07:56:07 1.38
+++ src/sys/kern/kern_ras.c 2019/10/06 15:11:17 1.39
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_ras.c,v 1.38 2016/07/04 07:56:07 maxv Exp $ */ 1/* $NetBSD: kern_ras.c,v 1.39 2019/10/06 15:11:17 uwe Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Gregory McGarry, and by Andrew Doran. 8 * by Gregory McGarry, and by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -20,27 +20,27 @@ @@ -20,27 +20,27 @@
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: kern_ras.c,v 1.38 2016/07/04 07:56:07 maxv Exp $"); 33__KERNEL_RCSID(0, "$NetBSD: kern_ras.c,v 1.39 2019/10/06 15:11:17 uwe Exp $");
34 34
35#include <sys/param.h> 35#include <sys/param.h>
36#include <sys/systm.h> 36#include <sys/systm.h>
37#include <sys/kernel.h> 37#include <sys/kernel.h>
38#include <sys/kmem.h> 38#include <sys/kmem.h>
39#include <sys/proc.h> 39#include <sys/proc.h>
40#include <sys/ras.h> 40#include <sys/ras.h>
41#include <sys/xcall.h> 41#include <sys/xcall.h>
42#include <sys/syscallargs.h> 42#include <sys/syscallargs.h>
43 43
44#include <uvm/uvm_extern.h> 44#include <uvm/uvm_extern.h>
45 45
46#define MAX_RAS_PER_PROC 16 46#define MAX_RAS_PER_PROC 16
@@ -56,29 +56,27 @@ int ras_debug = 0; @@ -56,29 +56,27 @@ int ras_debug = 0;
56 56
57/* 57/*
58 * Force all CPUs through cpu_switchto(), waiting until complete. 58 * Force all CPUs through cpu_switchto(), waiting until complete.
59 * Context switching will drain the write buffer on the calling 59 * Context switching will drain the write buffer on the calling
60 * CPU. 60 * CPU.
61 */ 61 */
62static void 62static void
63ras_sync(void) 63ras_sync(void)
64{ 64{
65 65
66 /* No need to sync if exiting or single threaded. */ 66 /* No need to sync if exiting or single threaded. */
67 if (curproc->p_nlwps > 1 && ncpu > 1) { 67 if (curproc->p_nlwps > 1 && ncpu > 1) {
68#ifdef NO_SOFTWARE_PATENTS 68#ifdef NO_SOFTWARE_PATENTS
69 uint64_t where; 69 xc_barrier(0);
70 where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL); 
71 xc_wait(where); 
72#else 70#else
73 /* 71 /*
74 * Assumptions: 72 * Assumptions:
75 * 73 *
76 * o preemption is disabled by the thread in 74 * o preemption is disabled by the thread in
77 * ras_lookup(). 75 * ras_lookup().
78 * o proc::p_raslist is only inspected with 76 * o proc::p_raslist is only inspected with
79 * preemption disabled. 77 * preemption disabled.
80 * o ras_lookup() plus loads reordered in advance 78 * o ras_lookup() plus loads reordered in advance
81 * will take no longer than 1/8s to complete. 79 * will take no longer than 1/8s to complete.
82 */ 80 */
83 const int delta = hz >> 3; 81 const int delta = hz >> 3;
84 int target = hardclock_ticks + delta; 82 int target = hardclock_ticks + delta;

cvs diff -r1.47 -r1.48 src/sys/kern/kern_softint.c (expand / switch to unified diff)

--- src/sys/kern/kern_softint.c 2019/05/17 03:34:26 1.47
+++ src/sys/kern/kern_softint.c 2019/10/06 15:11:17 1.48
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_softint.c,v 1.47 2019/05/17 03:34:26 ozaki-r Exp $ */ 1/* $NetBSD: kern_softint.c,v 1.48 2019/10/06 15:11:17 uwe Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2007, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -160,27 +160,27 @@ @@ -160,27 +160,27 @@
160 * interrupt; 160 * interrupt;
161 * } 161 * }
162 * 162 *
163 * Once the soft interrupt has fired (and even if it has blocked), 163 * Once the soft interrupt has fired (and even if it has blocked),
164 * no further soft interrupts at that level will be triggered by 164 * no further soft interrupts at that level will be triggered by
165 * MI code until the soft interrupt handler has ceased execution.  165 * MI code until the soft interrupt handler has ceased execution.
166 * If a soft interrupt handler blocks and is resumed, it resumes 166 * If a soft interrupt handler blocks and is resumed, it resumes
167 * execution as a normal LWP (kthread) and gains VM context. Only 167 * execution as a normal LWP (kthread) and gains VM context. Only
168 * when it has completed and is ready to fire again will it 168 * when it has completed and is ready to fire again will it
169 * interrupt other threads. 169 * interrupt other threads.
170 */ 170 */
171 171
172#include <sys/cdefs.h> 172#include <sys/cdefs.h>
173__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.47 2019/05/17 03:34:26 ozaki-r Exp $"); 173__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.48 2019/10/06 15:11:17 uwe Exp $");
174 174
175#include <sys/param.h> 175#include <sys/param.h>
176#include <sys/proc.h> 176#include <sys/proc.h>
177#include <sys/intr.h> 177#include <sys/intr.h>
178#include <sys/ipi.h> 178#include <sys/ipi.h>
179#include <sys/mutex.h> 179#include <sys/mutex.h>
180#include <sys/kernel.h> 180#include <sys/kernel.h>
181#include <sys/kthread.h> 181#include <sys/kthread.h>
182#include <sys/evcnt.h> 182#include <sys/evcnt.h>
183#include <sys/cpu.h> 183#include <sys/cpu.h>
184#include <sys/xcall.h> 184#include <sys/xcall.h>
185#include <sys/pserialize.h> 185#include <sys/pserialize.h>
186 186
@@ -397,53 +397,51 @@ softint_establish(u_int flags, void (*fu @@ -397,53 +397,51 @@ softint_establish(u_int flags, void (*fu
397 * and trigger it again once this call is made. The caller must not 397 * and trigger it again once this call is made. The caller must not
398 * hold any locks that could be taken from soft interrupt context, 398 * hold any locks that could be taken from soft interrupt context,
399 * because we will wait for the softint to complete if it's still 399 * because we will wait for the softint to complete if it's still
400 * running. 400 * running.
401 */ 401 */
402void 402void
403softint_disestablish(void *arg) 403softint_disestablish(void *arg)
404{ 404{
405 CPU_INFO_ITERATOR cii; 405 CPU_INFO_ITERATOR cii;
406 struct cpu_info *ci; 406 struct cpu_info *ci;
407 softcpu_t *sc; 407 softcpu_t *sc;
408 softhand_t *sh; 408 softhand_t *sh;
409 uintptr_t offset; 409 uintptr_t offset;
410 uint64_t where; 
411 u_int flags; 410 u_int flags;
412 411
413 offset = (uintptr_t)arg; 412 offset = (uintptr_t)arg;
414 KASSERTMSG(offset != 0 && offset < softint_bytes, "%"PRIuPTR" %u", 413 KASSERTMSG(offset != 0 && offset < softint_bytes, "%"PRIuPTR" %u",
415 offset, softint_bytes); 414 offset, softint_bytes);
416 415
417 /* 416 /*
418 * Unregister an IPI handler if there is any. Note: there is 417 * Unregister an IPI handler if there is any. Note: there is
419 * no need to disable preemption here - ID is stable. 418 * no need to disable preemption here - ID is stable.
420 */ 419 */
421 sc = curcpu()->ci_data.cpu_softcpu; 420 sc = curcpu()->ci_data.cpu_softcpu;
422 sh = (softhand_t *)((uint8_t *)sc + offset); 421 sh = (softhand_t *)((uint8_t *)sc + offset);
423 if (sh->sh_ipi_id) { 422 if (sh->sh_ipi_id) {
424 ipi_unregister(sh->sh_ipi_id); 423 ipi_unregister(sh->sh_ipi_id);
425 } 424 }
426 425
427 /* 426 /*
428 * Run a cross call so we see up to date values of sh_flags from 427 * Run a cross call so we see up to date values of sh_flags from
429 * all CPUs. Once softint_disestablish() is called, the caller 428 * all CPUs. Once softint_disestablish() is called, the caller
430 * commits to not trigger the interrupt and set SOFTINT_ACTIVE on 429 * commits to not trigger the interrupt and set SOFTINT_ACTIVE on
431 * it again. So, we are only looking for handler records with 430 * it again. So, we are only looking for handler records with
432 * SOFTINT_ACTIVE already set. 431 * SOFTINT_ACTIVE already set.
433 */ 432 */
434 if (__predict_true(mp_online)) { 433 if (__predict_true(mp_online)) {
435 where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL); 434 xc_barrier(0);
436 xc_wait(where); 
437 } 435 }
438 436
439 for (;;) { 437 for (;;) {
440 /* Collect flag values from each CPU. */ 438 /* Collect flag values from each CPU. */
441 flags = 0; 439 flags = 0;
442 for (CPU_INFO_FOREACH(cii, ci)) { 440 for (CPU_INFO_FOREACH(cii, ci)) {
443 sc = ci->ci_data.cpu_softcpu; 441 sc = ci->ci_data.cpu_softcpu;
444 sh = (softhand_t *)((uint8_t *)sc + offset); 442 sh = (softhand_t *)((uint8_t *)sc + offset);
445 KASSERT(sh->sh_func != NULL); 443 KASSERT(sh->sh_func != NULL);
446 flags |= sh->sh_flags; 444 flags |= sh->sh_flags;
447 } 445 }
448 /* Inactive on all CPUs? */ 446 /* Inactive on all CPUs? */
449 if ((flags & SOFTINT_ACTIVE) == 0) { 447 if ((flags & SOFTINT_ACTIVE) == 0) {

cvs diff -r1.18 -r1.19 src/sys/kern/kern_syscall.c (expand / switch to unified diff)

--- src/sys/kern/kern_syscall.c 2019/05/06 08:05:03 1.18
+++ src/sys/kern/kern_syscall.c 2019/10/06 15:11:17 1.19
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_syscall.c,v 1.18 2019/05/06 08:05:03 kamil Exp $ */ 1/* $NetBSD: kern_syscall.c,v 1.19 2019/10/06 15:11:17 uwe Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software developed for The NetBSD Foundation 7 * This code is derived from software developed for The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -20,27 +20,27 @@ @@ -20,27 +20,27 @@
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: kern_syscall.c,v 1.18 2019/05/06 08:05:03 kamil Exp $"); 33__KERNEL_RCSID(0, "$NetBSD: kern_syscall.c,v 1.19 2019/10/06 15:11:17 uwe Exp $");
34 34
35#ifdef _KERNEL_OPT 35#ifdef _KERNEL_OPT
36#include "opt_modular.h" 36#include "opt_modular.h"
37#include "opt_syscall_debug.h" 37#include "opt_syscall_debug.h"
38#include "opt_ktrace.h" 38#include "opt_ktrace.h"
39#include "opt_ptrace.h" 39#include "opt_ptrace.h"
40#include "opt_dtrace.h" 40#include "opt_dtrace.h"
41#endif 41#endif
42 42
43/* XXX To get syscall prototypes. */ 43/* XXX To get syscall prototypes. */
44#define SYSVSHM 44#define SYSVSHM
45#define SYSVSEM 45#define SYSVSEM
46#define SYSVMSG 46#define SYSVMSG
@@ -136,27 +136,26 @@ syscall_establish(const struct emul *em, @@ -136,27 +136,26 @@ syscall_establish(const struct emul *em,
136 /* Everything looks good, patch them in. */ 136 /* Everything looks good, patch them in. */
137 for (i = 0; sp[i].sp_call != NULL; i++) { 137 for (i = 0; sp[i].sp_call != NULL; i++) {
138 sy[sp[i].sp_code].sy_call = sp[i].sp_call; 138 sy[sp[i].sp_code].sy_call = sp[i].sp_call;
139 } 139 }
140 140
141 return 0; 141 return 0;
142} 142}
143 143
144int 144int
145syscall_disestablish(const struct emul *em, const struct syscall_package *sp) 145syscall_disestablish(const struct emul *em, const struct syscall_package *sp)
146{ 146{
147 struct sysent *sy; 147 struct sysent *sy;
148 const uint32_t *sb; 148 const uint32_t *sb;
149 uint64_t where; 
150 lwp_t *l; 149 lwp_t *l;
151 int i; 150 int i;
152 151
153 KASSERT(kernconfig_is_held()); 152 KASSERT(kernconfig_is_held());
154 153
155 if (em == NULL) { 154 if (em == NULL) {
156 em = &emul_netbsd; 155 em = &emul_netbsd;
157 } 156 }
158 sy = em->e_sysent; 157 sy = em->e_sysent;
159 sb = em->e_nomodbits; 158 sb = em->e_nomodbits;
160 159
161 /* 160 /*
162 * First, patch the system calls to sys_nomodule or sys_nosys 161 * First, patch the system calls to sys_nomodule or sys_nosys
@@ -165,28 +164,27 @@ syscall_disestablish(const struct emul * @@ -165,28 +164,27 @@ syscall_disestablish(const struct emul *
165 for (i = 0; sp[i].sp_call != NULL; i++) { 164 for (i = 0; sp[i].sp_call != NULL; i++) {
166 KASSERT(sy[sp[i].sp_code].sy_call == sp[i].sp_call); 165 KASSERT(sy[sp[i].sp_code].sy_call == sp[i].sp_call);
167 sy[sp[i].sp_code].sy_call = 166 sy[sp[i].sp_code].sy_call =
168 sb[sp[i].sp_code / 32] & (1 << (sp[i].sp_code % 32)) ? 167 sb[sp[i].sp_code / 32] & (1 << (sp[i].sp_code % 32)) ?
169 sys_nomodule : sys_nosys; 168 sys_nomodule : sys_nosys;
170 } 169 }
171 170
172 /* 171 /*
173 * Run a cross call to cycle through all CPUs. This does two 172 * Run a cross call to cycle through all CPUs. This does two
174 * things: lock activity provides a barrier and makes our update 173 * things: lock activity provides a barrier and makes our update
175 * of sy_call visible to all CPUs, and upon return we can be sure 174 * of sy_call visible to all CPUs, and upon return we can be sure
176 * that we see pertinent values of l_sysent posted by remote CPUs. 175 * that we see pertinent values of l_sysent posted by remote CPUs.
177 */ 176 */
178 where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL); 177 xc_barrier(0);
179 xc_wait(where); 
180 178
181 /* 179 /*
182 * Now it's safe to check l_sysent. Run through all LWPs and see 180 * Now it's safe to check l_sysent. Run through all LWPs and see
183 * if anyone is still using the system call. 181 * if anyone is still using the system call.
184 */ 182 */
185 for (i = 0; sp[i].sp_call != NULL; i++) { 183 for (i = 0; sp[i].sp_call != NULL; i++) {
186 mutex_enter(proc_lock); 184 mutex_enter(proc_lock);
187 LIST_FOREACH(l, &alllwp, l_list) { 185 LIST_FOREACH(l, &alllwp, l_list) {
188 if (l->l_sysent == &sy[sp[i].sp_code]) { 186 if (l->l_sysent == &sy[sp[i].sp_code]) {
189 break; 187 break;
190 } 188 }
191 } 189 }
192 mutex_exit(proc_lock); 190 mutex_exit(proc_lock);

cvs diff -r1.51 -r1.52 src/sys/kern/kern_tc.c (expand / switch to unified diff)

--- src/sys/kern/kern_tc.c 2018/07/01 15:12:06 1.51
+++ src/sys/kern/kern_tc.c 2019/10/06 15:11:17 1.52
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_tc.c,v 1.51 2018/07/01 15:12:06 riastradh Exp $ */ 1/* $NetBSD: kern_tc.c,v 1.52 2019/10/06 15:11:17 uwe Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc. 4 * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -30,27 +30,27 @@ @@ -30,27 +30,27 @@
30 */ 30 */
31 31
32/*- 32/*-
33 * ---------------------------------------------------------------------------- 33 * ----------------------------------------------------------------------------
34 * "THE BEER-WARE LICENSE" (Revision 42): 34 * "THE BEER-WARE LICENSE" (Revision 42):
35 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you 35 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
36 * can do whatever you want with this stuff. If we meet some day, and you think 36 * can do whatever you want with this stuff. If we meet some day, and you think
37 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp 37 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
38 * --------------------------------------------------------------------------- 38 * ---------------------------------------------------------------------------
39 */ 39 */
40 40
41#include <sys/cdefs.h> 41#include <sys/cdefs.h>
42/* __FBSDID("$FreeBSD: src/sys/kern/kern_tc.c,v 1.166 2005/09/19 22:16:31 andre Exp $"); */ 42/* __FBSDID("$FreeBSD: src/sys/kern/kern_tc.c,v 1.166 2005/09/19 22:16:31 andre Exp $"); */
43__KERNEL_RCSID(0, "$NetBSD: kern_tc.c,v 1.51 2018/07/01 15:12:06 riastradh Exp $"); 43__KERNEL_RCSID(0, "$NetBSD: kern_tc.c,v 1.52 2019/10/06 15:11:17 uwe Exp $");
44 44
45#ifdef _KERNEL_OPT 45#ifdef _KERNEL_OPT
46#include "opt_ntp.h" 46#include "opt_ntp.h"
47#endif 47#endif
48 48
49#include <sys/param.h> 49#include <sys/param.h>
50#include <sys/kernel.h> 50#include <sys/kernel.h>
51#include <sys/reboot.h> /* XXX just to get AB_VERBOSE */ 51#include <sys/reboot.h> /* XXX just to get AB_VERBOSE */
52#include <sys/sysctl.h> 52#include <sys/sysctl.h>
53#include <sys/syslog.h> 53#include <sys/syslog.h>
54#include <sys/systm.h> 54#include <sys/systm.h>
55#include <sys/timepps.h> 55#include <sys/timepps.h>
56#include <sys/timetc.h> 56#include <sys/timetc.h>
@@ -599,27 +599,26 @@ tc_gonebad(struct timecounter *tc) @@ -599,27 +599,26 @@ tc_gonebad(struct timecounter *tc)
599 membar_producer(); 599 membar_producer();
600 atomic_inc_uint(&timecounter_bad); 600 atomic_inc_uint(&timecounter_bad);
601} 601}
602 602
603/* 603/*
604 * Stop using a timecounter and remove it from the timecounters list. 604 * Stop using a timecounter and remove it from the timecounters list.
605 */ 605 */
606int 606int
607tc_detach(struct timecounter *target) 607tc_detach(struct timecounter *target)
608{ 608{
609 struct timecounter *tc; 609 struct timecounter *tc;
610 struct timecounter **tcp = NULL; 610 struct timecounter **tcp = NULL;
611 int removals; 611 int removals;
612 uint64_t where; 
613 lwp_t *l; 612 lwp_t *l;
614 613
615 /* First, find the timecounter. */ 614 /* First, find the timecounter. */
616 mutex_spin_enter(&timecounter_lock); 615 mutex_spin_enter(&timecounter_lock);
617 for (tcp = &timecounters, tc = timecounters; 616 for (tcp = &timecounters, tc = timecounters;
618 tc != NULL; 617 tc != NULL;
619 tcp = &tc->tc_next, tc = tc->tc_next) { 618 tcp = &tc->tc_next, tc = tc->tc_next) {
620 if (tc == target) 619 if (tc == target)
621 break; 620 break;
622 } 621 }
623 if (tc == NULL) { 622 if (tc == NULL) {
624 mutex_spin_exit(&timecounter_lock); 623 mutex_spin_exit(&timecounter_lock);
625 return ESRCH; 624 return ESRCH;
@@ -642,28 +641,27 @@ tc_detach(struct timecounter *target) @@ -642,28 +641,27 @@ tc_detach(struct timecounter *target)
642 * We issue a broadcast cross call to elide memory ordering issues, 641 * We issue a broadcast cross call to elide memory ordering issues,
643 * then scan all LWPs in the system looking at each's timecounter 642 * then scan all LWPs in the system looking at each's timecounter
644 * generation number. We need to see a value of zero (not actively 643 * generation number. We need to see a value of zero (not actively
645 * using a timecounter) or a value greater than our removal value. 644 * using a timecounter) or a value greater than our removal value.
646 * 645 *
647 * We may race with threads that read `timecounter_removals' and 646 * We may race with threads that read `timecounter_removals' and
648 * and then get preempted before updating `l_tcgen'. This is not 647 * and then get preempted before updating `l_tcgen'. This is not
649 * a problem, since it means that these threads have not yet started 648 * a problem, since it means that these threads have not yet started
650 * accessing timecounter state. All we do need is one clean 649 * accessing timecounter state. All we do need is one clean
651 * snapshot of the system where every thread appears not to be using 650 * snapshot of the system where every thread appears not to be using
652 * old timecounter state. 651 * old timecounter state.
653 */ 652 */
654 for (;;) { 653 for (;;) {
655 where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL); 654 xc_barrier(0);
656 xc_wait(where); 
657 655
658 mutex_enter(proc_lock); 656 mutex_enter(proc_lock);
659 LIST_FOREACH(l, &alllwp, l_list) { 657 LIST_FOREACH(l, &alllwp, l_list) {
660 if (l->l_tcgen == 0 || l->l_tcgen > removals) { 658 if (l->l_tcgen == 0 || l->l_tcgen > removals) {
661 /* 659 /*
662 * Not using timecounter or old timecounter 660 * Not using timecounter or old timecounter
663 * state at time of our xcall or later. 661 * state at time of our xcall or later.
664 */ 662 */
665 continue; 663 continue;
666 } 664 }
667 break; 665 break;
668 } 666 }
669 mutex_exit(proc_lock); 667 mutex_exit(proc_lock);

cvs diff -r1.12 -r1.13 src/sys/kern/subr_pserialize.c (expand / switch to unified diff)

--- src/sys/kern/subr_pserialize.c 2018/08/14 01:06:01 1.12
+++ src/sys/kern/subr_pserialize.c 2019/10/06 15:11:17 1.13
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: subr_pserialize.c,v 1.12 2018/08/14 01:06:01 ozaki-r Exp $ */ 1/* $NetBSD: subr_pserialize.c,v 1.13 2019/10/06 15:11:17 uwe Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc. 4 * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -28,27 +28,27 @@ @@ -28,27 +28,27 @@
28 28
29/* 29/*
30 * Passive serialization. 30 * Passive serialization.
31 * 31 *
32 * Implementation accurately matches the lapsed US patent 4809168, therefore 32 * Implementation accurately matches the lapsed US patent 4809168, therefore
33 * code is patent-free in the United States. Your use of this code is at 33 * code is patent-free in the United States. Your use of this code is at
34 * your own risk. 34 * your own risk.
35 *  35 *
36 * Note for NetBSD developers: all changes to this source file must be 36 * Note for NetBSD developers: all changes to this source file must be
37 * approved by the <core>. 37 * approved by the <core>.
38 */ 38 */
39 39
40#include <sys/cdefs.h> 40#include <sys/cdefs.h>
41__KERNEL_RCSID(0, "$NetBSD: subr_pserialize.c,v 1.12 2018/08/14 01:06:01 ozaki-r Exp $"); 41__KERNEL_RCSID(0, "$NetBSD: subr_pserialize.c,v 1.13 2019/10/06 15:11:17 uwe Exp $");
42 42
43#include <sys/param.h> 43#include <sys/param.h>
44 44
45#include <sys/condvar.h> 45#include <sys/condvar.h>
46#include <sys/cpu.h> 46#include <sys/cpu.h>
47#include <sys/evcnt.h> 47#include <sys/evcnt.h>
48#include <sys/kmem.h> 48#include <sys/kmem.h>
49#include <sys/mutex.h> 49#include <sys/mutex.h>
50#include <sys/pserialize.h> 50#include <sys/pserialize.h>
51#include <sys/proc.h> 51#include <sys/proc.h>
52#include <sys/queue.h> 52#include <sys/queue.h>
53#include <sys/xcall.h> 53#include <sys/xcall.h>
54 54
@@ -137,27 +137,26 @@ pserialize_destroy(pserialize_t psz) @@ -137,27 +137,26 @@ pserialize_destroy(pserialize_t psz)
137/* 137/*
138 * pserialize_perform: 138 * pserialize_perform:
139 * 139 *
140 * Perform the write side of passive serialization. The calling 140 * Perform the write side of passive serialization. The calling
141 * thread holds an exclusive lock on the data object(s) being updated. 141 * thread holds an exclusive lock on the data object(s) being updated.
142 * We wait until every processor in the system has made at least two 142 * We wait until every processor in the system has made at least two
143 * passes through cpu_switchto(). The wait is made with the caller's 143 * passes through cpu_switchto(). The wait is made with the caller's
144 * update lock held, but is short term. 144 * update lock held, but is short term.
145 */ 145 */
146void 146void
147pserialize_perform(pserialize_t psz) 147pserialize_perform(pserialize_t psz)
148{ 148{
149 int n; 149 int n;
150 uint64_t xc; 
151 150
152 KASSERT(!cpu_intr_p()); 151 KASSERT(!cpu_intr_p());
153 KASSERT(!cpu_softintr_p()); 152 KASSERT(!cpu_softintr_p());
154 153
155 if (__predict_false(panicstr != NULL)) { 154 if (__predict_false(panicstr != NULL)) {
156 return; 155 return;
157 } 156 }
158 KASSERT(psz->psz_owner == NULL); 157 KASSERT(psz->psz_owner == NULL);
159 KASSERT(ncpu > 0); 158 KASSERT(ncpu > 0);
160 159
161 if (__predict_false(mp_online == false)) { 160 if (__predict_false(mp_online == false)) {
162 psz_ev_excl.ev_count++; 161 psz_ev_excl.ev_count++;
163 return; 162 return;
@@ -177,28 +176,27 @@ pserialize_perform(pserialize_t psz) @@ -177,28 +176,27 @@ pserialize_perform(pserialize_t psz)
177 TAILQ_INSERT_TAIL(&psz_queue0, psz, psz_chain); 176 TAILQ_INSERT_TAIL(&psz_queue0, psz, psz_chain);
178 psz_work_todo++; 177 psz_work_todo++;
179 178
180 n = 0; 179 n = 0;
181 do { 180 do {
182 mutex_spin_exit(&psz_lock); 181 mutex_spin_exit(&psz_lock);
183 182
184 /* 183 /*
185 * Force some context switch activity on every CPU, as 184 * Force some context switch activity on every CPU, as
186 * the system may not be busy. Pause to not flood. 185 * the system may not be busy. Pause to not flood.
187 */ 186 */
188 if (n++ > 1) 187 if (n++ > 1)
189 kpause("psrlz", false, 1, NULL); 188 kpause("psrlz", false, 1, NULL);
190 xc = xc_broadcast(XC_HIGHPRI, (xcfunc_t)nullop, NULL, NULL); 189 xc_barrier(XC_HIGHPRI);
191 xc_wait(xc); 
192 190
193 mutex_spin_enter(&psz_lock); 191 mutex_spin_enter(&psz_lock);
194 } while (!kcpuset_iszero(psz->psz_target)); 192 } while (!kcpuset_iszero(psz->psz_target));
195 193
196 psz_ev_excl.ev_count++; 194 psz_ev_excl.ev_count++;
197 mutex_spin_exit(&psz_lock); 195 mutex_spin_exit(&psz_lock);
198 196
199 psz->psz_owner = NULL; 197 psz->psz_owner = NULL;
200} 198}
201 199
202int 200int
203pserialize_read_enter(void) 201pserialize_read_enter(void)
204{ 202{

cvs diff -r1.26 -r1.27 src/sys/kern/subr_xcall.c (expand / switch to unified diff)

--- src/sys/kern/subr_xcall.c 2018/02/07 04:25:09 1.26
+++ src/sys/kern/subr_xcall.c 2019/10/06 15:11:17 1.27
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: subr_xcall.c,v 1.26 2018/02/07 04:25:09 ozaki-r Exp $ */ 1/* $NetBSD: subr_xcall.c,v 1.27 2019/10/06 15:11:17 uwe Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2007-2010 The NetBSD Foundation, Inc. 4 * Copyright (c) 2007-2010 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran and Mindaugas Rasiukevicius. 8 * by Andrew Doran and Mindaugas Rasiukevicius.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -64,27 +64,27 @@ @@ -64,27 +64,27 @@
64 * CPU, and so has exclusive access to the CPU. Since this facility 64 * CPU, and so has exclusive access to the CPU. Since this facility
65 * is heavyweight, it's expected that it will not be used often. 65 * is heavyweight, it's expected that it will not be used often.
66 * 66 *
67 * Cross calls must not allocate memory, as the pagedaemon uses 67 * Cross calls must not allocate memory, as the pagedaemon uses
68 * them (and memory allocation may need to wait on the pagedaemon). 68 * them (and memory allocation may need to wait on the pagedaemon).
69 * 69 *
70 * A low-overhead mechanism for high priority calls (XC_HIGHPRI) is 70 * A low-overhead mechanism for high priority calls (XC_HIGHPRI) is
71 * also provided. The function to be executed runs on a software 71 * also provided. The function to be executed runs on a software
72 * interrupt context, at IPL_SOFTSERIAL level, and is expected to 72 * interrupt context, at IPL_SOFTSERIAL level, and is expected to
73 * be very lightweight, e.g. avoid blocking. 73 * be very lightweight, e.g. avoid blocking.
74 */ 74 */
75 75
76#include <sys/cdefs.h> 76#include <sys/cdefs.h>
77__KERNEL_RCSID(0, "$NetBSD: subr_xcall.c,v 1.26 2018/02/07 04:25:09 ozaki-r Exp $"); 77__KERNEL_RCSID(0, "$NetBSD: subr_xcall.c,v 1.27 2019/10/06 15:11:17 uwe Exp $");
78 78
79#include <sys/types.h> 79#include <sys/types.h>
80#include <sys/param.h> 80#include <sys/param.h>
81#include <sys/xcall.h> 81#include <sys/xcall.h>
82#include <sys/mutex.h> 82#include <sys/mutex.h>
83#include <sys/condvar.h> 83#include <sys/condvar.h>
84#include <sys/evcnt.h> 84#include <sys/evcnt.h>
85#include <sys/kthread.h> 85#include <sys/kthread.h>
86#include <sys/cpu.h> 86#include <sys/cpu.h>
87 87
88#ifdef _RUMPKERNEL 88#ifdef _RUMPKERNEL
89#include "rump_private.h" 89#include "rump_private.h"
90#endif 90#endif
@@ -257,26 +257,50 @@ xc_broadcast(unsigned int flags, xcfunc_ @@ -257,26 +257,50 @@ xc_broadcast(unsigned int flags, xcfunc_
257{ 257{
258 258
259 KASSERT(!cpu_intr_p() && !cpu_softintr_p()); 259 KASSERT(!cpu_intr_p() && !cpu_softintr_p());
260 ASSERT_SLEEPABLE(); 260 ASSERT_SLEEPABLE();
261 261
262 if ((flags & XC_HIGHPRI) != 0) { 262 if ((flags & XC_HIGHPRI) != 0) {
263 int ipl = xc_extract_ipl(flags); 263 int ipl = xc_extract_ipl(flags);
264 return xc_highpri(func, arg1, arg2, NULL, ipl); 264 return xc_highpri(func, arg1, arg2, NULL, ipl);
265 } else { 265 } else {
266 return xc_lowpri(func, arg1, arg2, NULL); 266 return xc_lowpri(func, arg1, arg2, NULL);
267 } 267 }
268} 268}
269 269
 270
 271static void
 272xc_nop(void *arg1, void *arg2)
 273{
 274
 275 return;
 276}
 277
 278
 279/*
 280 * xc_barrier:
 281 *
 282 * Broadcast a nop to all CPUs in the system.
 283 */
 284void
 285xc_barrier(unsigned int flags)
 286{
 287 uint64_t where;
 288
 289 where = xc_broadcast(flags, xc_nop, NULL, NULL);
 290 xc_wait(where);
 291}
 292
 293
270/* 294/*
271 * xc_unicast: 295 * xc_unicast:
272 * 296 *
273 * Trigger a call on one CPU. 297 * Trigger a call on one CPU.
274 */ 298 */
275uint64_t 299uint64_t
276xc_unicast(unsigned int flags, xcfunc_t func, void *arg1, void *arg2, 300xc_unicast(unsigned int flags, xcfunc_t func, void *arg1, void *arg2,
277 struct cpu_info *ci) 301 struct cpu_info *ci)
278{ 302{
279 303
280 KASSERT(ci != NULL); 304 KASSERT(ci != NULL);
281 KASSERT(!cpu_intr_p() && !cpu_softintr_p()); 305 KASSERT(!cpu_intr_p() && !cpu_softintr_p());
282 ASSERT_SLEEPABLE(); 306 ASSERT_SLEEPABLE();

cvs diff -r1.462 -r1.463 src/sys/net/if.c (expand / switch to unified diff)

--- src/sys/net/if.c 2019/09/25 09:53:37 1.462
+++ src/sys/net/if.c 2019/10/06 15:11:17 1.463
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: if.c,v 1.462 2019/09/25 09:53:37 ozaki-r Exp $ */ 1/* $NetBSD: if.c,v 1.463 2019/10/06 15:11:17 uwe Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1999, 2000, 2001, 2008 The NetBSD Foundation, Inc. 4 * Copyright (c) 1999, 2000, 2001, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by William Studenmund and Jason R. Thorpe. 8 * by William Studenmund and Jason R. Thorpe.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -80,27 +80,27 @@ @@ -80,27 +80,27 @@
80 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 80 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
81 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 81 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
82 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 82 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
83 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 83 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
84 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 84 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
85 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 85 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
86 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 86 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
87 * SUCH DAMAGE. 87 * SUCH DAMAGE.
88 * 88 *
89 * @(#)if.c 8.5 (Berkeley) 1/9/95 89 * @(#)if.c 8.5 (Berkeley) 1/9/95
90 */ 90 */
91 91
92#include <sys/cdefs.h> 92#include <sys/cdefs.h>
93__KERNEL_RCSID(0, "$NetBSD: if.c,v 1.462 2019/09/25 09:53:37 ozaki-r Exp $"); 93__KERNEL_RCSID(0, "$NetBSD: if.c,v 1.463 2019/10/06 15:11:17 uwe Exp $");
94 94
95#if defined(_KERNEL_OPT) 95#if defined(_KERNEL_OPT)
96#include "opt_inet.h" 96#include "opt_inet.h"
97#include "opt_ipsec.h" 97#include "opt_ipsec.h"
98#include "opt_atalk.h" 98#include "opt_atalk.h"
99#include "opt_wlan.h" 99#include "opt_wlan.h"
100#include "opt_net_mpsafe.h" 100#include "opt_net_mpsafe.h"
101#include "opt_mrouting.h" 101#include "opt_mrouting.h"
102#endif 102#endif
103 103
104#include <sys/param.h> 104#include <sys/param.h>
105#include <sys/mbuf.h> 105#include <sys/mbuf.h>
106#include <sys/systm.h> 106#include <sys/systm.h>
@@ -1297,27 +1297,26 @@ if_check_and_free_ifa_list(struct ifnet  @@ -1297,27 +1297,26 @@ if_check_and_free_ifa_list(struct ifnet
1297 * as it may block. 1297 * as it may block.
1298 */ 1298 */
1299void 1299void
1300if_detach(struct ifnet *ifp) 1300if_detach(struct ifnet *ifp)
1301{ 1301{
1302 struct socket so; 1302 struct socket so;
1303 struct ifaddr *ifa; 1303 struct ifaddr *ifa;
1304#ifdef IFAREF_DEBUG 1304#ifdef IFAREF_DEBUG
1305 struct ifaddr *last_ifa = NULL; 1305 struct ifaddr *last_ifa = NULL;
1306#endif 1306#endif
1307 struct domain *dp; 1307 struct domain *dp;
1308 const struct protosw *pr; 1308 const struct protosw *pr;
1309 int s, i, family, purged; 1309 int s, i, family, purged;
1310 uint64_t xc; 
1311 1310
1312#ifdef IFAREF_DEBUG 1311#ifdef IFAREF_DEBUG
1313 if_build_ifa_list(ifp); 1312 if_build_ifa_list(ifp);
1314#endif 1313#endif
1315 /* 1314 /*
1316 * XXX It's kind of lame that we have to have the 1315 * XXX It's kind of lame that we have to have the
1317 * XXX socket structure... 1316 * XXX socket structure...
1318 */ 1317 */
1319 memset(&so, 0, sizeof(so)); 1318 memset(&so, 0, sizeof(so));
1320 1319
1321 s = splnet(); 1320 s = splnet();
1322 1321
1323 sysctl_teardown(&ifp->if_sysctl_log); 1322 sysctl_teardown(&ifp->if_sysctl_log);
@@ -1503,28 +1502,27 @@ restart: @@ -1503,28 +1502,27 @@ restart:
1503 1502
1504 /* 1503 /*
1505 * IP queues have to be processed separately: net-queue barrier 1504 * IP queues have to be processed separately: net-queue barrier
1506 * ensures that the packets are dequeued while a cross-call will 1505 * ensures that the packets are dequeued while a cross-call will
1507 * ensure that the interrupts have completed. FIXME: not quite.. 1506 * ensure that the interrupts have completed. FIXME: not quite..
1508 */ 1507 */
1509#ifdef INET 1508#ifdef INET
1510 pktq_barrier(ip_pktq); 1509 pktq_barrier(ip_pktq);
1511#endif 1510#endif
1512#ifdef INET6 1511#ifdef INET6
1513 if (in6_present) 1512 if (in6_present)
1514 pktq_barrier(ip6_pktq); 1513 pktq_barrier(ip6_pktq);
1515#endif 1514#endif
1516 xc = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL); 1515 xc_barrier(0);
1517 xc_wait(xc); 
1518 1516
1519 if (ifp->if_percpuq != NULL) { 1517 if (ifp->if_percpuq != NULL) {
1520 if_percpuq_destroy(ifp->if_percpuq); 1518 if_percpuq_destroy(ifp->if_percpuq);
1521 ifp->if_percpuq = NULL; 1519 ifp->if_percpuq = NULL;
1522 } 1520 }
1523 1521
1524 mutex_obj_free(ifp->if_ioctl_lock); 1522 mutex_obj_free(ifp->if_ioctl_lock);
1525 ifp->if_ioctl_lock = NULL; 1523 ifp->if_ioctl_lock = NULL;
1526 mutex_obj_free(ifp->if_snd.ifq_lock); 1524 mutex_obj_free(ifp->if_snd.ifq_lock);
1527 1525
1528 splx(s); 1526 splx(s);
1529 1527
1530#ifdef IFAREF_DEBUG 1528#ifdef IFAREF_DEBUG

cvs diff -r1.49 -r1.50 src/sys/net/agr/if_agr.c (expand / switch to unified diff)

--- src/sys/net/agr/if_agr.c 2019/04/26 11:51:56 1.49
+++ src/sys/net/agr/if_agr.c 2019/10/06 15:11:17 1.50
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: if_agr.c,v 1.49 2019/04/26 11:51:56 pgoyette Exp $ */ 1/* $NetBSD: if_agr.c,v 1.50 2019/10/06 15:11:17 uwe Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c)2005 YAMAMOTO Takashi, 4 * Copyright (c)2005 YAMAMOTO Takashi,
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -17,27 +17,27 @@ @@ -17,27 +17,27 @@
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE. 26 * SUCH DAMAGE.
27 */ 27 */
28 28
29#include <sys/cdefs.h> 29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: if_agr.c,v 1.49 2019/04/26 11:51:56 pgoyette Exp $"); 30__KERNEL_RCSID(0, "$NetBSD: if_agr.c,v 1.50 2019/10/06 15:11:17 uwe Exp $");
31 31
32#ifdef _KERNEL_OPT 32#ifdef _KERNEL_OPT
33#include "opt_inet.h" 33#include "opt_inet.h"
34#endif 34#endif
35 35
36#include <sys/param.h> 36#include <sys/param.h>
37#include <sys/callout.h> 37#include <sys/callout.h>
38#include <sys/malloc.h> 38#include <sys/malloc.h>
39#include <sys/mbuf.h> 39#include <sys/mbuf.h>
40#include <sys/systm.h> 40#include <sys/systm.h>
41#include <sys/types.h> 41#include <sys/types.h>
42#include <sys/queue.h> 42#include <sys/queue.h>
43#include <sys/sockio.h> 43#include <sys/sockio.h>
@@ -884,33 +884,31 @@ agrreq_copyout(void *ubuf, struct agrreq @@ -884,33 +884,31 @@ agrreq_copyout(void *ubuf, struct agrreq
884 884
885 error = copyout(ar, ubuf, sizeof(*ar)); 885 error = copyout(ar, ubuf, sizeof(*ar));
886 if (error) { 886 if (error) {
887 return error; 887 return error;
888 } 888 }
889 889
890 return 0; 890 return 0;
891} 891}
892 892
893/* Make sure that if any interrupt handlers are out of the softc. */ 893/* Make sure that if any interrupt handlers are out of the softc. */
894static void 894static void
895agr_sync(void) 895agr_sync(void)
896{ 896{
897 uint64_t h; 
898 897
899 if (!mp_online) 898 if (!mp_online)
900 return; 899 return;
901 900
902 h = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL); 901 xc_barrier(0);
903 xc_wait(h); 
904} 902}
905 903
906static int 904static int
907agr_pause(struct agr_softc *sc) 905agr_pause(struct agr_softc *sc)
908{ 906{
909 int error; 907 int error;
910 908
911 mutex_enter(&sc->sc_entry_mtx); 909 mutex_enter(&sc->sc_entry_mtx);
912 if ((error = sc->sc_noentry) != 0) 910 if ((error = sc->sc_noentry) != 0)
913 goto out; 911 goto out;
914 912
915 sc->sc_noentry = EBUSY; 913 sc->sc_noentry = EBUSY;
916 914

cvs diff -r1.109 -r1.110 src/sys/opencrypto/crypto.c (expand / switch to unified diff)

--- src/sys/opencrypto/crypto.c 2019/10/01 18:00:09 1.109
+++ src/sys/opencrypto/crypto.c 2019/10/06 15:11:17 1.110
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: crypto.c,v 1.109 2019/10/01 18:00:09 chs Exp $ */ 1/* $NetBSD: crypto.c,v 1.110 2019/10/06 15:11:17 uwe Exp $ */
2/* $FreeBSD: src/sys/opencrypto/crypto.c,v 1.4.2.5 2003/02/26 00:14:05 sam Exp $ */ 2/* $FreeBSD: src/sys/opencrypto/crypto.c,v 1.4.2.5 2003/02/26 00:14:05 sam Exp $ */
3/* $OpenBSD: crypto.c,v 1.41 2002/07/17 23:52:38 art Exp $ */ 3/* $OpenBSD: crypto.c,v 1.41 2002/07/17 23:52:38 art Exp $ */
4 4
5/*- 5/*-
6 * Copyright (c) 2008 The NetBSD Foundation, Inc. 6 * Copyright (c) 2008 The NetBSD Foundation, Inc.
7 * All rights reserved. 7 * All rights reserved.
8 * 8 *
9 * This code is derived from software contributed to The NetBSD Foundation 9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Coyote Point Systems, Inc. 10 * by Coyote Point Systems, Inc.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
@@ -43,27 +43,27 @@ @@ -43,27 +43,27 @@
43 * Permission to use, copy, and modify this software with or without fee 43 * Permission to use, copy, and modify this software with or without fee
44 * is hereby granted, provided that this entire notice is included in 44 * is hereby granted, provided that this entire notice is included in
45 * all source code copies of any software which is or includes a copy or 45 * all source code copies of any software which is or includes a copy or
46 * modification of this software. 46 * modification of this software.
47 * 47 *
48 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 48 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
49 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 49 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
50 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 50 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
51 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 51 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
52 * PURPOSE. 52 * PURPOSE.
53 */ 53 */
54 54
55#include <sys/cdefs.h> 55#include <sys/cdefs.h>
56__KERNEL_RCSID(0, "$NetBSD: crypto.c,v 1.109 2019/10/01 18:00:09 chs Exp $"); 56__KERNEL_RCSID(0, "$NetBSD: crypto.c,v 1.110 2019/10/06 15:11:17 uwe Exp $");
57 57
58#include <sys/param.h> 58#include <sys/param.h>
59#include <sys/reboot.h> 59#include <sys/reboot.h>
60#include <sys/systm.h> 60#include <sys/systm.h>
61#include <sys/proc.h> 61#include <sys/proc.h>
62#include <sys/pool.h> 62#include <sys/pool.h>
63#include <sys/kthread.h> 63#include <sys/kthread.h>
64#include <sys/once.h> 64#include <sys/once.h>
65#include <sys/sysctl.h> 65#include <sys/sysctl.h>
66#include <sys/intr.h> 66#include <sys/intr.h>
67#include <sys/errno.h> 67#include <sys/errno.h>
68#include <sys/module.h> 68#include <sys/module.h>
69#include <sys/xcall.h> 69#include <sys/xcall.h>
@@ -608,27 +608,26 @@ crypto_init(void) @@ -608,27 +608,26 @@ crypto_init(void)
608{ 608{
609 static ONCE_DECL(crypto_init_once); 609 static ONCE_DECL(crypto_init_once);
610 610
611 return RUN_ONCE(&crypto_init_once, crypto_init0); 611 return RUN_ONCE(&crypto_init_once, crypto_init0);
612} 612}
613 613
614static int 614static int
615crypto_destroy(bool exit_kthread) 615crypto_destroy(bool exit_kthread)
616{ 616{
617 int i; 617 int i;
618 618
619 if (exit_kthread) { 619 if (exit_kthread) {
620 struct cryptocap *cap = NULL; 620 struct cryptocap *cap = NULL;
621 uint64_t where; 
622 bool is_busy = false; 621 bool is_busy = false;
623 622
624 /* if we have any in-progress requests, don't unload */ 623 /* if we have any in-progress requests, don't unload */
625 percpu_foreach(crypto_crp_qs_percpu, crypto_crp_q_is_busy_pc, 624 percpu_foreach(crypto_crp_qs_percpu, crypto_crp_q_is_busy_pc,
626 &is_busy); 625 &is_busy);
627 if (is_busy) 626 if (is_busy)
628 return EBUSY; 627 return EBUSY;
629 /* FIXME: 628 /* FIXME:
630 * prohibit enqueue to crp_q and crp_kq after here. 629 * prohibit enqueue to crp_q and crp_kq after here.
631 */ 630 */
632 631
633 mutex_enter(&crypto_drv_mtx); 632 mutex_enter(&crypto_drv_mtx);
634 for (i = 0; i < crypto_drivers_num; i++) { 633 for (i = 0; i < crypto_drivers_num; i++) {
@@ -647,28 +646,27 @@ crypto_destroy(bool exit_kthread) @@ -647,28 +646,27 @@ crypto_destroy(bool exit_kthread)
647 646
648 /* 647 /*
649 * Ensure cryptoret_softint() is never scheduled and then wait 648 * Ensure cryptoret_softint() is never scheduled and then wait
650 * for last softint_execute(). 649 * for last softint_execute().
651 */ 650 */
652 for (i = 0; i < ncpu; i++) { 651 for (i = 0; i < ncpu; i++) {
653 struct crypto_crp_ret_qs *qs; 652 struct crypto_crp_ret_qs *qs;
654 struct cpu_info *ci = cpu_lookup(i); 653 struct cpu_info *ci = cpu_lookup(i);
655 654
656 qs = crypto_get_crp_ret_qs(ci); 655 qs = crypto_get_crp_ret_qs(ci);
657 qs->crp_ret_q_exit_flag = true; 656 qs->crp_ret_q_exit_flag = true;
658 crypto_put_crp_ret_qs(ci); 657 crypto_put_crp_ret_qs(ci);
659 } 658 }
660 where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL); 659 xc_barrier(0);
661 xc_wait(where); 
662 } 660 }
663 661
664 if (sysctl_opencrypto_clog != NULL) 662 if (sysctl_opencrypto_clog != NULL)
665 sysctl_teardown(&sysctl_opencrypto_clog); 663 sysctl_teardown(&sysctl_opencrypto_clog);
666 664
667 if (crypto_ret_si != NULL) 665 if (crypto_ret_si != NULL)
668 softint_disestablish(crypto_ret_si); 666 softint_disestablish(crypto_ret_si);
669 667
670 if (crypto_q_si != NULL) 668 if (crypto_q_si != NULL)
671 softint_disestablish(crypto_q_si); 669 softint_disestablish(crypto_q_si);
672 670
673 mutex_enter(&crypto_drv_mtx); 671 mutex_enter(&crypto_drv_mtx);
674 if (crypto_drivers != NULL) 672 if (crypto_drivers != NULL)

cvs diff -r1.7 -r1.8 src/sys/rump/kern/lib/libsysproxy/sysproxy.c (expand / switch to unified diff)

--- src/sys/rump/kern/lib/libsysproxy/sysproxy.c 2019/05/17 03:34:26 1.7
+++ src/sys/rump/kern/lib/libsysproxy/sysproxy.c 2019/10/06 15:11:17 1.8
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: sysproxy.c,v 1.7 2019/05/17 03:34:26 ozaki-r Exp $ */ 1/* $NetBSD: sysproxy.c,v 1.8 2019/10/06 15:11:17 uwe Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2010, 2011 Antti Kantee. All Rights Reserved. 4 * Copyright (c) 2010, 2011 Antti Kantee. All Rights Reserved.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions 7 * modification, are permitted provided that the following conditions
8 * are met: 8 * are met:
9 * 1. Redistributions of source code must retain the above copyright 9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer. 10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright 11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the 12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution. 13 * documentation and/or other materials provided with the distribution.
14 * 14 *
@@ -16,27 +16,27 @@ @@ -16,27 +16,27 @@
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE. 25 * SUCH DAMAGE.
26 */ 26 */
27 27
28#include <sys/cdefs.h> 28#include <sys/cdefs.h>
29__KERNEL_RCSID(0, "$NetBSD: sysproxy.c,v 1.7 2019/05/17 03:34:26 ozaki-r Exp $"); 29__KERNEL_RCSID(0, "$NetBSD: sysproxy.c,v 1.8 2019/10/06 15:11:17 uwe Exp $");
30 30
31#include <sys/param.h> 31#include <sys/param.h>
32#include <sys/filedesc.h> 32#include <sys/filedesc.h>
33#include <sys/kmem.h> 33#include <sys/kmem.h>
34#include <sys/syscall.h> 34#include <sys/syscall.h>
35#include <sys/syscallvar.h> 35#include <sys/syscallvar.h>
36#include <sys/systm.h> 36#include <sys/systm.h>
37#include <sys/xcall.h> 37#include <sys/xcall.h>
38#include <sys/lockdebug.h> 38#include <sys/lockdebug.h>
39#include <sys/psref.h> 39#include <sys/psref.h>
40 40
41#define _RUMP_SYSPROXY 41#define _RUMP_SYSPROXY
42#include <rump/rumpuser.h> 42#include <rump/rumpuser.h>
@@ -131,50 +131,48 @@ hyp_rfork(void *priv, int flags, const c @@ -131,50 +131,48 @@ hyp_rfork(void *priv, int flags, const c
131 if (initfds) 131 if (initfds)
132 rump_consdev_init(); 132 rump_consdev_init();
133 133
134 return 0; 134 return 0;
135} 135}
136 136
137/* 137/*
138 * Order all lwps in a process to exit. does *not* wait for them to drain. 138 * Order all lwps in a process to exit. does *not* wait for them to drain.
139 */ 139 */
140static void 140static void
141hyp_lwpexit(void) 141hyp_lwpexit(void)
142{ 142{
143 struct proc *p = curproc; 143 struct proc *p = curproc;
144 uint64_t where; 
145 struct lwp *l; 144 struct lwp *l;
146 145
147 mutex_enter(p->p_lock); 146 mutex_enter(p->p_lock);
148 /* 147 /*
149 * First pass: mark all lwps in the process with LW_RUMP_QEXIT 148 * First pass: mark all lwps in the process with LW_RUMP_QEXIT
150 * so that they know they should exit. 149 * so that they know they should exit.
151 */ 150 */
152 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 151 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
153 if (l == curlwp) 152 if (l == curlwp)
154 continue; 153 continue;
155 l->l_flag |= LW_RUMP_QEXIT; 154 l->l_flag |= LW_RUMP_QEXIT;
156 } 155 }
157 mutex_exit(p->p_lock); 156 mutex_exit(p->p_lock);
158 157
159 /* 158 /*
160 * Next, make sure everyone on all CPUs sees our status 159 * Next, make sure everyone on all CPUs sees our status
161 * update. This keeps threads inside cv_wait() and makes 160 * update. This keeps threads inside cv_wait() and makes
162 * sure we don't access a stale cv pointer later when 161 * sure we don't access a stale cv pointer later when
163 * we wake up the threads. 162 * we wake up the threads.
164 */ 163 */
165 164
166 where = xc_broadcast(0, (xcfunc_t)nullop, NULL, NULL); 165 xc_barrier(0);
167 xc_wait(where); 
168 166
169 /* 167 /*
170 * Ok, all lwps are either: 168 * Ok, all lwps are either:
171 * 1) not in the cv code 169 * 1) not in the cv code
172 * 2) sleeping on l->l_private 170 * 2) sleeping on l->l_private
173 * 3) sleeping on p->p_waitcv 171 * 3) sleeping on p->p_waitcv
174 * 172 *
175 * Either way, l_private is stable until we set PS_RUMP_LWPEXIT 173 * Either way, l_private is stable until we set PS_RUMP_LWPEXIT
176 * in p->p_sflag. 174 * in p->p_sflag.
177 */ 175 */
178 176
179 mutex_enter(p->p_lock); 177 mutex_enter(p->p_lock);
180 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 178 LIST_FOREACH(l, &p->p_lwps, l_sibling) {

cvs diff -r1.7 -r1.8 src/sys/sys/xcall.h (expand / switch to unified diff)

--- src/sys/sys/xcall.h 2018/08/27 07:10:15 1.7
+++ src/sys/sys/xcall.h 2019/10/06 15:11:16 1.8
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: xcall.h,v 1.7 2018/08/27 07:10:15 riastradh Exp $ */ 1/* $NetBSD: xcall.h,v 1.8 2019/10/06 15:11:16 uwe Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2007 The NetBSD Foundation, Inc. 4 * Copyright (c) 2007 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -43,18 +43,20 @@ typedef void (*xcfunc_t)(void *, void *) @@ -43,18 +43,20 @@ typedef void (*xcfunc_t)(void *, void *)
43 43
44struct cpu_info; 44struct cpu_info;
45 45
46void xc_init_cpu(struct cpu_info *); 46void xc_init_cpu(struct cpu_info *);
47void xc_send_ipi(struct cpu_info *); 47void xc_send_ipi(struct cpu_info *);
48void xc_ipi_handler(void); 48void xc_ipi_handler(void);
49 49
50void xc__highpri_intr(void *); 50void xc__highpri_intr(void *);
51 51
52uint64_t xc_broadcast(u_int, xcfunc_t, void *, void *); 52uint64_t xc_broadcast(u_int, xcfunc_t, void *, void *);
53uint64_t xc_unicast(u_int, xcfunc_t, void *, void *, struct cpu_info *); 53uint64_t xc_unicast(u_int, xcfunc_t, void *, void *, struct cpu_info *);
54void xc_wait(uint64_t); 54void xc_wait(uint64_t);
55 55
 56void xc_barrier(u_int);
 57
56unsigned int xc_encode_ipl(int); 58unsigned int xc_encode_ipl(int);
57 59
58#endif /* _KERNEL */ 60#endif /* _KERNEL */
59 61
60#endif /* _SYS_XCALL_H_ */ 62#endif /* _SYS_XCALL_H_ */