Fri Aug 4 12:24:36 2023 UTC ()
Revert "softint(9): Sprinkle KASSERT(!cold)."

Temporary workaround for PR kern/57563 -- to be fixed properly after
analysis.


(riastradh)
diff -r1.74 -r1.75 src/sys/kern/kern_softint.c

cvs diff -r1.74 -r1.75 src/sys/kern/kern_softint.c (expand / switch to unified diff)

--- src/sys/kern/kern_softint.c 2023/08/04 07:40:30 1.74
+++ src/sys/kern/kern_softint.c 2023/08/04 12:24:36 1.75
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: kern_softint.c,v 1.74 2023/08/04 07:40:30 riastradh Exp $ */ 1/* $NetBSD: kern_softint.c,v 1.75 2023/08/04 12:24:36 riastradh Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2007, 2008, 2019, 2020 The NetBSD Foundation, Inc. 4 * Copyright (c) 2007, 2008, 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran. 8 * by Andrew Doran.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -160,27 +160,27 @@ @@ -160,27 +160,27 @@
160 * interrupt; 160 * interrupt;
161 * } 161 * }
162 * 162 *
163 * Once the soft interrupt has fired (and even if it has blocked), 163 * Once the soft interrupt has fired (and even if it has blocked),
164 * no further soft interrupts at that level will be triggered by 164 * no further soft interrupts at that level will be triggered by
165 * MI code until the soft interrupt handler has ceased execution. 165 * MI code until the soft interrupt handler has ceased execution.
166 * If a soft interrupt handler blocks and is resumed, it resumes 166 * If a soft interrupt handler blocks and is resumed, it resumes
167 * execution as a normal LWP (kthread) and gains VM context. Only 167 * execution as a normal LWP (kthread) and gains VM context. Only
168 * when it has completed and is ready to fire again will it 168 * when it has completed and is ready to fire again will it
169 * interrupt other threads. 169 * interrupt other threads.
170 */ 170 */
171 171
172#include <sys/cdefs.h> 172#include <sys/cdefs.h>
173__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.74 2023/08/04 07:40:30 riastradh Exp $"); 173__KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.75 2023/08/04 12:24:36 riastradh Exp $");
174 174
175#include <sys/param.h> 175#include <sys/param.h>
176#include <sys/proc.h> 176#include <sys/proc.h>
177#include <sys/intr.h> 177#include <sys/intr.h>
178#include <sys/ipi.h> 178#include <sys/ipi.h>
179#include <sys/lock.h> 179#include <sys/lock.h>
180#include <sys/mutex.h> 180#include <sys/mutex.h>
181#include <sys/kernel.h> 181#include <sys/kernel.h>
182#include <sys/kthread.h> 182#include <sys/kthread.h>
183#include <sys/evcnt.h> 183#include <sys/evcnt.h>
184#include <sys/cpu.h> 184#include <sys/cpu.h>
185#include <sys/xcall.h> 185#include <sys/xcall.h>
186#include <sys/psref.h> 186#include <sys/psref.h>
@@ -478,28 +478,26 @@ softint_disestablish(void *arg) @@ -478,28 +478,26 @@ softint_disestablish(void *arg)
478 * interrupt handler, or with preemption disabled (since we are 478 * interrupt handler, or with preemption disabled (since we are
479 * using the value of curcpu()). 479 * using the value of curcpu()).
480 */ 480 */
481void 481void
482softint_schedule(void *arg) 482softint_schedule(void *arg)
483{ 483{
484 softhand_t *sh; 484 softhand_t *sh;
485 softint_t *si; 485 softint_t *si;
486 uintptr_t offset; 486 uintptr_t offset;
487 int s; 487 int s;
488 488
489 SDT_PROBE2(sdt, kernel, softint, schedule, arg, /*ci*/NULL); 489 SDT_PROBE2(sdt, kernel, softint, schedule, arg, /*ci*/NULL);
490 490
491 KASSERT(!cold); 
492 
493 /* 491 /*
494 * If this assert fires, rather than disabling preemption explicitly 492 * If this assert fires, rather than disabling preemption explicitly
495 * to make it stop, consider that you are probably using a softint 493 * to make it stop, consider that you are probably using a softint
496 * when you don't need to. 494 * when you don't need to.
497 */ 495 */
498 KASSERT(kpreempt_disabled()); 496 KASSERT(kpreempt_disabled());
499 497
500 /* Find the handler record for this CPU. */ 498 /* Find the handler record for this CPU. */
501 offset = (uintptr_t)arg; 499 offset = (uintptr_t)arg;
502 KASSERT(offset != 0); 500 KASSERT(offset != 0);
503 KASSERTMSG(offset < softint_bytes, "%"PRIuPTR" %u", 501 KASSERTMSG(offset < softint_bytes, "%"PRIuPTR" %u",
504 offset, softint_bytes); 502 offset, softint_bytes);
505 sh = (softhand_t *)((uint8_t *)curcpu()->ci_data.cpu_softcpu + offset); 503 sh = (softhand_t *)((uint8_t *)curcpu()->ci_data.cpu_softcpu + offset);
@@ -561,27 +559,26 @@ softint_schedule_cpu(void *arg, struct c @@ -561,27 +559,26 @@ softint_schedule_cpu(void *arg, struct c
561 * Must be entered at splhigh. Will drop the priority 559 * Must be entered at splhigh. Will drop the priority
562 * to the level specified, but returns back at splhigh. 560 * to the level specified, but returns back at splhigh.
563 */ 561 */
564static inline void 562static inline void
565softint_execute(lwp_t *l, int s) 563softint_execute(lwp_t *l, int s)
566{ 564{
567 softint_t *si = l->l_private; 565 softint_t *si = l->l_private;
568 softhand_t *sh; 566 softhand_t *sh;
569 567
570 KASSERT(si->si_lwp == curlwp); 568 KASSERT(si->si_lwp == curlwp);
571 KASSERT(si->si_cpu == curcpu()); 569 KASSERT(si->si_cpu == curcpu());
572 KASSERT(si->si_lwp->l_wchan == NULL); 570 KASSERT(si->si_lwp->l_wchan == NULL);
573 KASSERT(si->si_active); 571 KASSERT(si->si_active);
574 KASSERT(!cold); 
575 572
576 /* 573 /*
577 * Note: due to priority inheritance we may have interrupted a 574 * Note: due to priority inheritance we may have interrupted a
578 * higher priority LWP. Since the soft interrupt must be quick 575 * higher priority LWP. Since the soft interrupt must be quick
579 * and is non-preemptable, we don't bother yielding. 576 * and is non-preemptable, we don't bother yielding.
580 */ 577 */
581 578
582 while (!SIMPLEQ_EMPTY(&si->si_q)) { 579 while (!SIMPLEQ_EMPTY(&si->si_q)) {
583 /* 580 /*
584 * Pick the longest waiting handler to run. We block 581 * Pick the longest waiting handler to run. We block
585 * interrupts but do not lock in order to do this, as 582 * interrupts but do not lock in order to do this, as
586 * we are protecting against the local CPU only. 583 * we are protecting against the local CPU only.
587 */ 584 */
@@ -714,28 +711,26 @@ softint_trigger(uintptr_t machdep) @@ -714,28 +711,26 @@ softint_trigger(uintptr_t machdep)
714 711
715/* 712/*
716 * softint_thread: 713 * softint_thread:
717 * 714 *
718 * Slow path: MI software interrupt dispatch. 715 * Slow path: MI software interrupt dispatch.
719 */ 716 */
720void 717void
721softint_thread(void *cookie) 718softint_thread(void *cookie)
722{ 719{
723 softint_t *si; 720 softint_t *si;
724 lwp_t *l; 721 lwp_t *l;
725 int s; 722 int s;
726 723
727 KASSERT(!cold); 
728 
729 l = curlwp; 724 l = curlwp;
730 si = l->l_private; 725 si = l->l_private;
731 726
732 for (;;) { 727 for (;;) {
733 /* Clear pending status and run it. */ 728 /* Clear pending status and run it. */
734 s = splhigh(); 729 s = splhigh();
735 l->l_cpu->ci_data.cpu_softints &= ~si->si_machdep; 730 l->l_cpu->ci_data.cpu_softints &= ~si->si_machdep;
736 softint_execute(l, s); 731 softint_execute(l, s);
737 splx(s); 732 splx(s);
738 733
739 /* Interrupts allowed to run again before switching. */ 734 /* Interrupts allowed to run again before switching. */
740 lwp_lock(l); 735 lwp_lock(l);
741 l->l_stat = LSIDL; 736 l->l_stat = LSIDL;
@@ -795,28 +790,26 @@ softint_thread(void *cookie) @@ -795,28 +790,26 @@ softint_thread(void *cookie)
795 790
796/* 791/*
797 * softint_dispatch: 792 * softint_dispatch:
798 * 793 *
799 * Fast path: entry point from machine-dependent code. 794 * Fast path: entry point from machine-dependent code.
800 */ 795 */
801void 796void
802softint_dispatch(lwp_t *pinned, int s) 797softint_dispatch(lwp_t *pinned, int s)
803{ 798{
804 struct bintime now; 799 struct bintime now;
805 u_int timing; 800 u_int timing;
806 lwp_t *l; 801 lwp_t *l;
807 802
808 KASSERT(!cold); 
809 
810#ifdef DIAGNOSTIC 803#ifdef DIAGNOSTIC
811 if ((pinned->l_pflag & LP_RUNNING) == 0 || curlwp->l_stat != LSIDL) { 804 if ((pinned->l_pflag & LP_RUNNING) == 0 || curlwp->l_stat != LSIDL) {
812 struct lwp *onproc = curcpu()->ci_onproc; 805 struct lwp *onproc = curcpu()->ci_onproc;
813 int s2 = splhigh(); 806 int s2 = splhigh();
814 printf("curcpu=%d, spl=%d curspl=%d\n" 807 printf("curcpu=%d, spl=%d curspl=%d\n"
815 "onproc=%p => l_stat=%d l_flag=%08x l_cpu=%d\n" 808 "onproc=%p => l_stat=%d l_flag=%08x l_cpu=%d\n"
816 "curlwp=%p => l_stat=%d l_flag=%08x l_cpu=%d\n" 809 "curlwp=%p => l_stat=%d l_flag=%08x l_cpu=%d\n"
817 "pinned=%p => l_stat=%d l_flag=%08x l_cpu=%d\n", 810 "pinned=%p => l_stat=%d l_flag=%08x l_cpu=%d\n",
818 cpu_index(curcpu()), s, s2, onproc, onproc->l_stat, 811 cpu_index(curcpu()), s, s2, onproc, onproc->l_stat,
819 onproc->l_flag, cpu_index(onproc->l_cpu), curlwp, 812 onproc->l_flag, cpu_index(onproc->l_cpu), curlwp,
820 curlwp->l_stat, curlwp->l_flag, 813 curlwp->l_stat, curlwp->l_flag,
821 cpu_index(curlwp->l_cpu), pinned, pinned->l_stat, 814 cpu_index(curlwp->l_cpu), pinned, pinned->l_stat,
822 pinned->l_flag, cpu_index(pinned->l_cpu)); 815 pinned->l_flag, cpu_index(pinned->l_cpu));