Sun Jan 2 16:03:30 2022 UTC ()
fix KASSERTMSG issue


(christos)
diff -r1.50 -r1.51 src/sys/uvm/pmap/pmap_tlb.c

cvs diff -r1.50 -r1.51 src/sys/uvm/pmap/pmap_tlb.c (expand / switch to unified diff)

--- src/sys/uvm/pmap/pmap_tlb.c 2021/12/29 12:53:38 1.50
+++ src/sys/uvm/pmap/pmap_tlb.c 2022/01/02 16:03:30 1.51
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap_tlb.c,v 1.50 2021/12/29 12:53:38 skrll Exp $ */ 1/* $NetBSD: pmap_tlb.c,v 1.51 2022/01/02 16:03:30 christos Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2010 The NetBSD Foundation, Inc. 4 * Copyright (c) 2010 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas at 3am Software Foundry. 8 * by Matt Thomas at 3am Software Foundry.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -21,27 +21,27 @@ @@ -21,27 +21,27 @@
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33 33
34__KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.50 2021/12/29 12:53:38 skrll Exp $"); 34__KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.51 2022/01/02 16:03:30 christos Exp $");
35 35
36/* 36/*
37 * Manages address spaces in a TLB. 37 * Manages address spaces in a TLB.
38 * 38 *
39 * Normally there is a 1:1 mapping between a TLB and a CPU. However, some 39 * Normally there is a 1:1 mapping between a TLB and a CPU. However, some
40 * implementations may share a TLB between multiple CPUs (really CPU thread 40 * implementations may share a TLB between multiple CPUs (really CPU thread
41 * contexts). This requires the TLB abstraction to be separated from the 41 * contexts). This requires the TLB abstraction to be separated from the
42 * CPU abstraction. It also requires that the TLB be locked while doing 42 * CPU abstraction. It also requires that the TLB be locked while doing
43 * TLB activities. 43 * TLB activities.
44 * 44 *
45 * For each TLB, we track the ASIDs in use in a bitmap and a list of pmaps 45 * For each TLB, we track the ASIDs in use in a bitmap and a list of pmaps
46 * that have a valid ASID. 46 * that have a valid ASID.
47 * 47 *
@@ -461,29 +461,29 @@ pmap_tlb_asid_reinitialize(struct pmap_t @@ -461,29 +461,29 @@ pmap_tlb_asid_reinitialize(struct pmap_t
461 * semi-expensive operation, we don't want to do it too often. 461 * semi-expensive operation, we don't want to do it too often.
462 * So if more half of the ASIDs are in use, we don't have 462 * So if more half of the ASIDs are in use, we don't have
463 * enough free ASIDs so invalidate the TLB entries with ASIDs 463 * enough free ASIDs so invalidate the TLB entries with ASIDs
464 * and clear the ASID bitmap. That will force everyone to 464 * and clear the ASID bitmap. That will force everyone to
465 * allocate a new ASID. 465 * allocate a new ASID.
466 */ 466 */
467#if !defined(MULTIPROCESSOR) || defined(PMAP_TLB_NEED_SHOOTDOWN) 467#if !defined(MULTIPROCESSOR) || defined(PMAP_TLB_NEED_SHOOTDOWN)
468 pmap_tlb_asid_check(); 468 pmap_tlb_asid_check();
469 const u_int asids_found = tlb_record_asids( 469 const u_int asids_found = tlb_record_asids(
470 ti->ti_asid_bitmap._b, ti->ti_asid_max); 470 ti->ti_asid_bitmap._b, ti->ti_asid_max);
471 pmap_tlb_asid_check(); 471 pmap_tlb_asid_check();
472#ifdef DIAGNOSTIC 472#ifdef DIAGNOSTIC
473 const u_int asids_count = pmap_tlb_asid_count(ti); 473 const u_int asids_count = pmap_tlb_asid_count(ti);
474#endif 
475 KASSERTMSG(asids_found == asids_count, 474 KASSERTMSG(asids_found == asids_count,
476 "found %u != count %u", asids_found, asids_count); 475 "found %u != count %u", asids_found, asids_count);
 476#endif
477 if (__predict_false(asids_found >= ti->ti_asid_max / 2)) { 477 if (__predict_false(asids_found >= ti->ti_asid_max / 2)) {
478 tlb_invalidate_asids(KERNEL_PID + 1, ti->ti_asid_max); 478 tlb_invalidate_asids(KERNEL_PID + 1, ti->ti_asid_max);
479#else /* MULTIPROCESSOR && !PMAP_TLB_NEED_SHOOTDOWN */ 479#else /* MULTIPROCESSOR && !PMAP_TLB_NEED_SHOOTDOWN */
480 /* 480 /*
481 * For those systems (PowerPC) that don't require 481 * For those systems (PowerPC) that don't require
482 * cross cpu TLB shootdowns, we have to invalidate the 482 * cross cpu TLB shootdowns, we have to invalidate the
483 * entire TLB because we can't record the ASIDs in use 483 * entire TLB because we can't record the ASIDs in use
484 * on the other CPUs. This is hopefully cheaper than 484 * on the other CPUs. This is hopefully cheaper than
485 * than trying to use an IPI to record all the ASIDs 485 * than trying to use an IPI to record all the ASIDs
486 * on all the CPUs (which would be a synchronization 486 * on all the CPUs (which would be a synchronization
487 * nightmare). 487 * nightmare).
488 */ 488 */
489 tlb_invalidate_all(); 489 tlb_invalidate_all();