Fri Mar 4 08:11:48 2022 UTC ()
Rmmove an incorrect KASSERT.


(skrll)
diff -r1.51 -r1.52 src/sys/uvm/pmap/pmap_tlb.c

cvs diff -r1.51 -r1.52 src/sys/uvm/pmap/pmap_tlb.c (expand / switch to unified diff)

--- src/sys/uvm/pmap/pmap_tlb.c 2022/01/02 16:03:30 1.51
+++ src/sys/uvm/pmap/pmap_tlb.c 2022/03/04 08:11:48 1.52
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap_tlb.c,v 1.51 2022/01/02 16:03:30 christos Exp $ */ 1/* $NetBSD: pmap_tlb.c,v 1.52 2022/03/04 08:11:48 skrll Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2010 The NetBSD Foundation, Inc. 4 * Copyright (c) 2010 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas at 3am Software Foundry. 8 * by Matt Thomas at 3am Software Foundry.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -21,27 +21,27 @@ @@ -21,27 +21,27 @@
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33 33
34__KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.51 2022/01/02 16:03:30 christos Exp $"); 34__KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.52 2022/03/04 08:11:48 skrll Exp $");
35 35
36/* 36/*
37 * Manages address spaces in a TLB. 37 * Manages address spaces in a TLB.
38 * 38 *
39 * Normally there is a 1:1 mapping between a TLB and a CPU. However, some 39 * Normally there is a 1:1 mapping between a TLB and a CPU. However, some
40 * implementations may share a TLB between multiple CPUs (really CPU thread 40 * implementations may share a TLB between multiple CPUs (really CPU thread
41 * contexts). This requires the TLB abstraction to be separated from the 41 * contexts). This requires the TLB abstraction to be separated from the
42 * CPU abstraction. It also requires that the TLB be locked while doing 42 * CPU abstraction. It also requires that the TLB be locked while doing
43 * TLB activities. 43 * TLB activities.
44 * 44 *
45 * For each TLB, we track the ASIDs in use in a bitmap and a list of pmaps 45 * For each TLB, we track the ASIDs in use in a bitmap and a list of pmaps
46 * that have a valid ASID. 46 * that have a valid ASID.
47 * 47 *
@@ -537,29 +537,26 @@ pmap_tlb_asid_reinitialize(struct pmap_t @@ -537,29 +537,26 @@ pmap_tlb_asid_reinitialize(struct pmap_t
537#endif 537#endif
538 UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); 538 UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
539} 539}
540 540
541#if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN) 541#if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN)
542#if PMAP_TLB_MAX == 1 542#if PMAP_TLB_MAX == 1
543#error shootdown not required for single TLB systems 543#error shootdown not required for single TLB systems
544#endif 544#endif
545void 545void
546pmap_tlb_shootdown_process(void) 546pmap_tlb_shootdown_process(void)
547{ 547{
548 struct cpu_info * const ci = curcpu(); 548 struct cpu_info * const ci = curcpu();
549 struct pmap_tlb_info * const ti = cpu_tlb_info(ci); 549 struct pmap_tlb_info * const ti = cpu_tlb_info(ci);
550#ifdef DIAGNOSTIC 
551 struct pmap * const pm = curlwp->l_proc->p_vmspace->vm_map.pmap; 
552#endif 
553 550
554 KASSERT(cpu_intr_p()); 551 KASSERT(cpu_intr_p());
555 KASSERTMSG(ci->ci_cpl >= IPL_SCHED, "%s: cpl (%d) < IPL_SCHED (%d)", 552 KASSERTMSG(ci->ci_cpl >= IPL_SCHED, "%s: cpl (%d) < IPL_SCHED (%d)",
556 __func__, ci->ci_cpl, IPL_SCHED); 553 __func__, ci->ci_cpl, IPL_SCHED);
557 554
558 TLBINFO_LOCK(ti); 555 TLBINFO_LOCK(ti);
559 556
560 switch (ti->ti_tlbinvop) { 557 switch (ti->ti_tlbinvop) {
561 case TLBINV_ONE: { 558 case TLBINV_ONE: {
562 /* 559 /*
563 * We only need to invalidate one user ASID. 560 * We only need to invalidate one user ASID.
564 */ 561 */
565 struct pmap_asid_info * const pai = PMAP_PAI(ti->ti_victim, ti); 562 struct pmap_asid_info * const pai = PMAP_PAI(ti->ti_victim, ti);
@@ -570,27 +567,26 @@ pmap_tlb_shootdown_process(void) @@ -570,27 +567,26 @@ pmap_tlb_shootdown_process(void)
570 * invalidate its TLB entries. 567 * invalidate its TLB entries.
571 */ 568 */
572 KASSERT(pai->pai_asid > KERNEL_PID); 569 KASSERT(pai->pai_asid > KERNEL_PID);
573 pmap_tlb_asid_check(); 570 pmap_tlb_asid_check();
574 tlb_invalidate_asids(pai->pai_asid, pai->pai_asid); 571 tlb_invalidate_asids(pai->pai_asid, pai->pai_asid);
575 pmap_tlb_asid_check(); 572 pmap_tlb_asid_check();
576 } else if (pai->pai_asid) { 573 } else if (pai->pai_asid) {
577 /* 574 /*
578 * The victim is no longer an active pmap for this TLB. 575 * The victim is no longer an active pmap for this TLB.
579 * So simply clear its ASID and when pmap_activate is 576 * So simply clear its ASID and when pmap_activate is
580 * next called for this pmap, it will allocate a new 577 * next called for this pmap, it will allocate a new
581 * ASID. 578 * ASID.
582 */ 579 */
583 KASSERT(!pmap_tlb_intersecting_onproc_p(pm, ti)); 
584 pmap_tlb_pai_reset(ti, pai, PAI_PMAP(pai, ti)); 580 pmap_tlb_pai_reset(ti, pai, PAI_PMAP(pai, ti));
585 } 581 }
586 break; 582 break;
587 } 583 }
588 case TLBINV_ALLUSER: 584 case TLBINV_ALLUSER:
589 /* 585 /*
590 * Flush all user TLB entries. 586 * Flush all user TLB entries.
591 */ 587 */
592 pmap_tlb_asid_reinitialize(ti, TLBINV_ALLUSER); 588 pmap_tlb_asid_reinitialize(ti, TLBINV_ALLUSER);
593 break; 589 break;
594 case TLBINV_ALLKERNEL: 590 case TLBINV_ALLKERNEL:
595 /* 591 /*
596 * We need to invalidate all global TLB entries. 592 * We need to invalidate all global TLB entries.