Wed Jun 29 05:53:45 2011 UTC ()
Multiprocessing fixes.


(matt)
diff -r1.8 -r1.9 src/sys/common/pmap/tlb/pmap.c
diff -r1.9 -r1.10 src/sys/common/pmap/tlb/pmap_tlb.c

cvs diff -r1.8 -r1.9 src/sys/common/pmap/tlb/Attic/pmap.c (expand / switch to unified diff)

--- src/sys/common/pmap/tlb/Attic/pmap.c 2011/06/23 20:46:15 1.8
+++ src/sys/common/pmap/tlb/Attic/pmap.c 2011/06/29 05:53:44 1.9
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.8 2011/06/23 20:46:15 matt Exp $ */ 1/* $NetBSD: pmap.c,v 1.9 2011/06/29 05:53:44 matt Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center and by Chris G. Demetriou. 9 * NASA Ames Research Center and by Chris G. Demetriou.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -57,27 +57,27 @@ @@ -57,27 +57,27 @@
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE. 63 * SUCH DAMAGE.
64 * 64 *
65 * @(#)pmap.c 8.4 (Berkeley) 1/26/94 65 * @(#)pmap.c 8.4 (Berkeley) 1/26/94
66 */ 66 */
67 67
68#include <sys/cdefs.h> 68#include <sys/cdefs.h>
69 69
70__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.8 2011/06/23 20:46:15 matt Exp $"); 70__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.9 2011/06/29 05:53:44 matt Exp $");
71 71
72/* 72/*
73 * Manages physical address maps. 73 * Manages physical address maps.
74 * 74 *
75 * In addition to hardware address maps, this 75 * In addition to hardware address maps, this
76 * module is called upon to provide software-use-only 76 * module is called upon to provide software-use-only
77 * maps which may or may not be stored in the same 77 * maps which may or may not be stored in the same
78 * form as hardware maps. These pseudo-maps are 78 * form as hardware maps. These pseudo-maps are
79 * used to store intermediate results from copy 79 * used to store intermediate results from copy
80 * operations to and from address spaces. 80 * operations to and from address spaces.
81 * 81 *
82 * Since the information managed by this module is 82 * Since the information managed by this module is
83 * also stored by the logical address mapping module, 83 * also stored by the logical address mapping module,
@@ -314,27 +314,27 @@ static void @@ -314,27 +314,27 @@ static void
314pmap_page_syncicache(struct vm_page *pg) 314pmap_page_syncicache(struct vm_page *pg)
315{ 315{
316#ifndef MULTIPROCESSOR 316#ifndef MULTIPROCESSOR
317 struct pmap * const curpmap = curcpu()->ci_curpm; 317 struct pmap * const curpmap = curcpu()->ci_curpm;
318#endif 318#endif
319 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); 319 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
320 pv_entry_t pv = &mdpg->mdpg_first; 320 pv_entry_t pv = &mdpg->mdpg_first;
321 __cpuset_t onproc = CPUSET_NULLSET; 321 __cpuset_t onproc = CPUSET_NULLSET;
322 (void)VM_PAGEMD_PVLIST_LOCK(mdpg, false); 322 (void)VM_PAGEMD_PVLIST_LOCK(mdpg, false);
323 if (pv->pv_pmap != NULL) { 323 if (pv->pv_pmap != NULL) {
324 for (; pv != NULL; pv = pv->pv_next) { 324 for (; pv != NULL; pv = pv->pv_next) {
325#ifdef MULTIPROCESSOR 325#ifdef MULTIPROCESSOR
326 CPUSET_MERGE(onproc, pv->pv_pmap->pm_onproc); 326 CPUSET_MERGE(onproc, pv->pv_pmap->pm_onproc);
327 if (CPUSET_EQUAL_P(onproc, cpus_running)) { 327 if (CPUSET_EQUAL_P(onproc, cpuset_info.cpus_running)) {
328 break; 328 break;
329 } 329 }
330#else 330#else
331 if (pv->pv_pmap == curpmap) { 331 if (pv->pv_pmap == curpmap) {
332 onproc = CPUSET_SINGLE(0); 332 onproc = CPUSET_SINGLE(0);
333 break; 333 break;
334 } 334 }
335#endif 335#endif
336 } 336 }
337 } 337 }
338 VM_PAGEMD_PVLIST_UNLOCK(mdpg); 338 VM_PAGEMD_PVLIST_UNLOCK(mdpg);
339 kpreempt_disable(); 339 kpreempt_disable();
340 pmap_md_page_syncicache(pg, onproc); 340 pmap_md_page_syncicache(pg, onproc);
@@ -575,27 +575,27 @@ pmap_deactivate(struct lwp *l) @@ -575,27 +575,27 @@ pmap_deactivate(struct lwp *l)
575 575
576 UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0); 576 UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0);
577} 577}
578 578
579void 579void
580pmap_update(struct pmap *pmap) 580pmap_update(struct pmap *pmap)
581{ 581{
582 582
583 UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist); 583 UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
584 UVMHIST_LOG(pmaphist, "(pmap=%p)", pmap, 0,0,0); 584 UVMHIST_LOG(pmaphist, "(pmap=%p)", pmap, 0,0,0);
585 PMAP_COUNT(update); 585 PMAP_COUNT(update);
586 586
587 kpreempt_disable(); 587 kpreempt_disable();
588#ifdef MULTIPROCESSOR 588#if defined(MULTIPROCESSOR) && defined(PMAP_NEED_TLB_SHOOTDOWN)
589 u_int pending = atomic_swap_uint(&pmap->pm_shootdown_pending, 0); 589 u_int pending = atomic_swap_uint(&pmap->pm_shootdown_pending, 0);
590 if (pending && pmap_tlb_shootdown_bystanders(pmap)) 590 if (pending && pmap_tlb_shootdown_bystanders(pmap))
591 PMAP_COUNT(shootdown_ipis); 591 PMAP_COUNT(shootdown_ipis);
592#endif 592#endif
593#ifdef DEBUG 593#ifdef DEBUG
594 pmap_tlb_check(pmap, pmap_md_tlb_check_entry); 594 pmap_tlb_check(pmap, pmap_md_tlb_check_entry);
595#endif /* DEBUG */ 595#endif /* DEBUG */
596 596
597 /* 597 /*
598 * If pmap_remove_all was called, we deactivated ourselves and nuked 598 * If pmap_remove_all was called, we deactivated ourselves and nuked
599 * our ASID. Now we have to reactivate ourselves. 599 * our ASID. Now we have to reactivate ourselves.
600 */ 600 */
601 if (__predict_false(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE)) { 601 if (__predict_false(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE)) {

cvs diff -r1.9 -r1.10 src/sys/common/pmap/tlb/Attic/pmap_tlb.c (expand / switch to unified diff)

--- src/sys/common/pmap/tlb/Attic/pmap_tlb.c 2011/06/23 08:11:56 1.9
+++ src/sys/common/pmap/tlb/Attic/pmap_tlb.c 2011/06/29 05:53:44 1.10
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap_tlb.c,v 1.9 2011/06/23 08:11:56 matt Exp $ */ 1/* $NetBSD: pmap_tlb.c,v 1.10 2011/06/29 05:53:44 matt Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2010 The NetBSD Foundation, Inc. 4 * Copyright (c) 2010 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas at 3am Software Foundry. 8 * by Matt Thomas at 3am Software Foundry.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -21,27 +21,27 @@ @@ -21,27 +21,27 @@
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33 33
34__KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.9 2011/06/23 08:11:56 matt Exp $"); 34__KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.10 2011/06/29 05:53:44 matt Exp $");
35 35
36/* 36/*
37 * Manages address spaces in a TLB. 37 * Manages address spaces in a TLB.
38 * 38 *
39 * Normally there is a 1:1 mapping between a TLB and a CPU. However, some 39 * Normally there is a 1:1 mapping between a TLB and a CPU. However, some
40 * implementations may share a TLB between multiple CPUs (really CPU thread 40 * implementations may share a TLB between multiple CPUs (really CPU thread
41 * contexts). This requires the TLB abstraction to be separated from the 41 * contexts). This requires the TLB abstraction to be separated from the
42 * CPU abstraction. It also requires that the TLB be locked while doing 42 * CPU abstraction. It also requires that the TLB be locked while doing
43 * TLB activities. 43 * TLB activities.
44 * 44 *
45 * For each TLB, we track the ASIDs in use in a bitmap and a list of pmaps 45 * For each TLB, we track the ASIDs in use in a bitmap and a list of pmaps
46 * that have a valid ASID. 46 * that have a valid ASID.
47 * 47 *
@@ -152,30 +152,30 @@ struct pmap_tlb_info pmap_tlb0_info = { @@ -152,30 +152,30 @@ struct pmap_tlb_info pmap_tlb0_info = {
152 .ti_wired = PMAP_TLB_WIRED_UPAGES, 152 .ti_wired = PMAP_TLB_WIRED_UPAGES,
153#endif 153#endif
154 .ti_lock = &pmap_tlb0_mutex, 154 .ti_lock = &pmap_tlb0_mutex,
155 .ti_pais = LIST_HEAD_INITIALIZER(pmap_tlb0_info.ti_pais), 155 .ti_pais = LIST_HEAD_INITIALIZER(pmap_tlb0_info.ti_pais),
156#if defined(MULTIPROCESSOR) 156#if defined(MULTIPROCESSOR)
157 .ti_cpu_mask = 1, 157 .ti_cpu_mask = 1,
158 .ti_tlbinvop = TLBINV_NOBODY, 158 .ti_tlbinvop = TLBINV_NOBODY,
159#endif 159#endif
160}; 160};
161 161
162#undef IFCONSTANT 162#undef IFCONSTANT
163 163
164#if defined(MULTIPROCESSOR) 164#if defined(MULTIPROCESSOR)
165static struct pmap_tlb_info *pmap_tlbs[MAXCPUS] = { 165struct pmap_tlb_info *pmap_tlbs[MAXCPUS] = {
166 [0] = &pmap_tlb0_info, 166 [0] = &pmap_tlb0_info,
167}; 167};
168static u_int pmap_ntlbs = 1; 168u_int pmap_ntlbs = 1;
169#endif 169#endif
170 170
171#define __BITMAP_SET(bm, n) \ 171#define __BITMAP_SET(bm, n) \
172 ((bm)[(n) / (8*sizeof(bm[0]))] |= 1LU << ((n) % (8*sizeof(bm[0])))) 172 ((bm)[(n) / (8*sizeof(bm[0]))] |= 1LU << ((n) % (8*sizeof(bm[0]))))
173#define __BITMAP_CLR(bm, n) \ 173#define __BITMAP_CLR(bm, n) \
174 ((bm)[(n) / (8*sizeof(bm[0]))] &= ~(1LU << ((n) % (8*sizeof(bm[0]))))) 174 ((bm)[(n) / (8*sizeof(bm[0]))] &= ~(1LU << ((n) % (8*sizeof(bm[0])))))
175#define __BITMAP_ISSET_P(bm, n) \ 175#define __BITMAP_ISSET_P(bm, n) \
176 (((bm)[(n) / (8*sizeof(bm[0]))] & (1LU << ((n) % (8*sizeof(bm[0]))))) != 0) 176 (((bm)[(n) / (8*sizeof(bm[0]))] & (1LU << ((n) % (8*sizeof(bm[0]))))) != 0)
177 177
178#define TLBINFO_ASID_MARK_USED(ti, asid) \ 178#define TLBINFO_ASID_MARK_USED(ti, asid) \
179 __BITMAP_SET((ti)->ti_asid_bitmap, (asid)) 179 __BITMAP_SET((ti)->ti_asid_bitmap, (asid))
180#define TLBINFO_ASID_INUSE_P(ti, asid) \ 180#define TLBINFO_ASID_INUSE_P(ti, asid) \
181 __BITMAP_ISSET_P((ti)->ti_asid_bitmap, (asid)) 181 __BITMAP_ISSET_P((ti)->ti_asid_bitmap, (asid))
@@ -453,27 +453,27 @@ pmap_tlb_shootdown_process(void) @@ -453,27 +453,27 @@ pmap_tlb_shootdown_process(void)
453 KASSERTMSG(ci->ci_cpl >= IPL_SCHED, 453 KASSERTMSG(ci->ci_cpl >= IPL_SCHED,
454 ("%s: cpl (%d) < IPL_SCHED (%d)", 454 ("%s: cpl (%d) < IPL_SCHED (%d)",
455 __func__, ci->ci_cpl, IPL_SCHED)); 455 __func__, ci->ci_cpl, IPL_SCHED));
456 456
457 TLBINFO_LOCK(ti); 457 TLBINFO_LOCK(ti);
458 458
459 switch (ti->ti_tlbinvop) { 459 switch (ti->ti_tlbinvop) {
460 case TLBINV_ONE: { 460 case TLBINV_ONE: {
461 /* 461 /*
462 * We only need to invalidate one user ASID. 462 * We only need to invalidate one user ASID.
463 */ 463 */
464 struct pmap_asid_info * const pai = PMAP_PAI(ti->ti_victim, ti); 464 struct pmap_asid_info * const pai = PMAP_PAI(ti->ti_victim, ti);
465 KASSERT(ti->ti_victim != pmap_kernel()); 465 KASSERT(ti->ti_victim != pmap_kernel());
466 if (!CPU_EMPTY_P(CPUSET_SUBSET(ti->ti_victim->pm_onproc, ti->ti_cpu_mask))) { 466 if (!CPUSET_EMPTY_P(CPUSET_SUBSET(ti->ti_victim->pm_onproc, ti->ti_cpu_mask))) {
467 /* 467 /*
468 * The victim is an active pmap so we will just 468 * The victim is an active pmap so we will just
469 * invalidate its TLB entries. 469 * invalidate its TLB entries.
470 */ 470 */
471 KASSERT(pai->pai_asid > KERNEL_PID); 471 KASSERT(pai->pai_asid > KERNEL_PID);
472 pmap_tlb_asid_check(); 472 pmap_tlb_asid_check();
473 tlb_invalidate_asids(pai->pai_asid, pai->pai_asid); 473 tlb_invalidate_asids(pai->pai_asid, pai->pai_asid);
474 pmap_tlb_asid_check(); 474 pmap_tlb_asid_check();
475 } else if (pai->pai_asid) { 475 } else if (pai->pai_asid) {
476 /* 476 /*
477 * The victim is no longer an active pmap for this TLB. 477 * The victim is no longer an active pmap for this TLB.
478 * So simply clear its ASID and when pmap_activate is 478 * So simply clear its ASID and when pmap_activate is
479 * next called for this pmap, it will allocate a new 479 * next called for this pmap, it will allocate a new
@@ -592,27 +592,27 @@ pmap_tlb_shootdown_bystanders(pmap_t pm) @@ -592,27 +592,27 @@ pmap_tlb_shootdown_bystanders(pmap_t pm)
592 ti->ti_victim = NULL; 592 ti->ti_victim = NULL;
593 } 593 }
594 } 594 }
595 TLBINFO_UNLOCK(ti); 595 TLBINFO_UNLOCK(ti);
596 /* 596 /*
597 * Now we can send out the shootdown IPIs to a CPU 597 * Now we can send out the shootdown IPIs to a CPU
598 * that shares this TLB and is currently using this 598 * that shares this TLB and is currently using this
599 * pmap. That CPU will process the IPI and do the 599 * pmap. That CPU will process the IPI and do the
600 * all the work. Any other CPUs sharing that TLB 600 * all the work. Any other CPUs sharing that TLB
601 * will take advantage of that work. pm_onproc might 601 * will take advantage of that work. pm_onproc might
602 * change now that we have released the lock but we 602 * change now that we have released the lock but we
603 * can tolerate spurious shootdowns. 603 * can tolerate spurious shootdowns.
604 */ 604 */
605 KASSERT(!CPU_EMPTY_P(onproc)); 605 KASSERT(!CPUSET_EMPTY_P(onproc));
606 u_int j = CPUSET_NEXT(onproc); 606 u_int j = CPUSET_NEXT(onproc);
607 cpu_send_ipi(cpu_lookup(j), IPI_SHOOTDOWN); 607 cpu_send_ipi(cpu_lookup(j), IPI_SHOOTDOWN);
608 ipi_sent = true; 608 ipi_sent = true;
609 continue; 609 continue;
610 } 610 }
611 if (!CPUSET_EMPTY_P(CPUSET_SUBSET(pm->pm_active, ti->ti_cpu_mask) { 611 if (!CPUSET_EMPTY_P(CPUSET_SUBSET(pm->pm_active, ti->ti_cpu_mask) {
612 /* 612 /*
613 * If this pmap has an ASID assigned but it's not 613 * If this pmap has an ASID assigned but it's not
614 * currently running, nuke its ASID. Next time the 614 * currently running, nuke its ASID. Next time the
615 * pmap is activated, it will allocate a new ASID. 615 * pmap is activated, it will allocate a new ASID.
616 * And best of all, we avoid an IPI. 616 * And best of all, we avoid an IPI.
617 */ 617 */
618 KASSERT(!kernel_p); 618 KASSERT(!kernel_p);
@@ -672,28 +672,28 @@ pmap_tlb_invalidate_addr(pmap_t pm, vadd @@ -672,28 +672,28 @@ pmap_tlb_invalidate_addr(pmap_t pm, vadd
672 672
673static inline void 673static inline void
674pmap_tlb_asid_alloc(struct pmap_tlb_info *ti, pmap_t pm, 674pmap_tlb_asid_alloc(struct pmap_tlb_info *ti, pmap_t pm,
675 struct pmap_asid_info *pai) 675 struct pmap_asid_info *pai)
676{ 676{
677 /* 677 /*
678 * We shouldn't have an ASID assigned, and thusly must not be onproc 678 * We shouldn't have an ASID assigned, and thusly must not be onproc
679 * nor active. 679 * nor active.
680 */ 680 */
681 KASSERT(pm != pmap_kernel()); 681 KASSERT(pm != pmap_kernel());
682 KASSERT(pai->pai_asid == 0); 682 KASSERT(pai->pai_asid == 0);
683 KASSERT(pai->pai_link.le_prev == NULL); 683 KASSERT(pai->pai_link.le_prev == NULL);
684#if defined(MULTIPROCESSOR) 684#if defined(MULTIPROCESSOR)
685 KASSERT(CPU_EMPTY_P(CPUSET_SUBSET(pm->pm_onproc, ti->ti_cpu_mask))); 685 KASSERT(CPUSET_EMPTY_P(CPUSET_SUBSET(pm->pm_onproc, ti->ti_cpu_mask)));
686 KASSERT(CPU_EMPTY_P(CPUSET_SUBSET(pm->pm_active, ti->ti_cpu_mask))); 686 KASSERT(CPUSET_EMPTY_P(CPUSET_SUBSET(pm->pm_active, ti->ti_cpu_mask)));
687#endif 687#endif
688 KASSERT(ti->ti_asids_free > 0); 688 KASSERT(ti->ti_asids_free > 0);
689 KASSERT(ti->ti_asid_hint <= ti->ti_asid_max); 689 KASSERT(ti->ti_asid_hint <= ti->ti_asid_max);
690 690
691 /* 691 /*
692 * Let's see if the hinted ASID is free. If not search for 692 * Let's see if the hinted ASID is free. If not search for
693 * a new one. 693 * a new one.
694 */ 694 */
695 if (__predict_false(TLBINFO_ASID_INUSE_P(ti, ti->ti_asid_hint))) { 695 if (__predict_false(TLBINFO_ASID_INUSE_P(ti, ti->ti_asid_hint))) {
696#ifdef DIAGNOSTIC 696#ifdef DIAGNOSTIC
697 const size_t words = __arraycount(ti->ti_asid_bitmap); 697 const size_t words = __arraycount(ti->ti_asid_bitmap);
698#endif 698#endif
699 const size_t nbpw = 8 * sizeof(ti->ti_asid_bitmap[0]); 699 const size_t nbpw = 8 * sizeof(ti->ti_asid_bitmap[0]);
@@ -780,27 +780,27 @@ pmap_tlb_asid_acquire(pmap_t pm, struct  @@ -780,27 +780,27 @@ pmap_tlb_asid_acquire(pmap_t pm, struct
780 /* 780 /*
781 * Get an ASID. 781 * Get an ASID.
782 */ 782 */
783 pmap_tlb_asid_alloc(ti, pm, pai); 783 pmap_tlb_asid_alloc(ti, pm, pai);
784 } 784 }
785 785
786 if (l == curlwp) { 786 if (l == curlwp) {
787#if defined(MULTIPROCESSOR) 787#if defined(MULTIPROCESSOR)
788 /* 788 /*
789 * The bits in pm_onproc belonging to this TLB can only 789 * The bits in pm_onproc belonging to this TLB can only
790 * be changed while this TLBs lock is held unless atomic 790 * be changed while this TLBs lock is held unless atomic
791 * operations are used. 791 * operations are used.
792 */ 792 */
793 CPUSET_ADD(&pm->pm_onproc, cpu_index(ci)); 793 CPUSET_ADD(pm->pm_onproc, cpu_index(ci));
794#endif 794#endif
795 ci->ci_pmap_asid_cur = pai->pai_asid; 795 ci->ci_pmap_asid_cur = pai->pai_asid;
796 tlb_set_asid(pai->pai_asid); 796 tlb_set_asid(pai->pai_asid);
797 pmap_tlb_asid_check(); 797 pmap_tlb_asid_check();
798 } else { 798 } else {
799 printf("%s: l (%p) != curlwp %p\n", __func__, l, curlwp); 799 printf("%s: l (%p) != curlwp %p\n", __func__, l, curlwp);
800 } 800 }
801 TLBINFO_UNLOCK(ti); 801 TLBINFO_UNLOCK(ti);
802} 802}
803 803
804void 804void
805pmap_tlb_asid_deactivate(pmap_t pm) 805pmap_tlb_asid_deactivate(pmap_t pm)
806{ 806{
@@ -827,31 +827,31 @@ pmap_tlb_asid_deactivate(pmap_t pm) @@ -827,31 +827,31 @@ pmap_tlb_asid_deactivate(pmap_t pm)
827 } 827 }
828#elif defined(DEBUG) 828#elif defined(DEBUG)
829 curcpu()->ci_pmap_asid_cur = 0; 829 curcpu()->ci_pmap_asid_cur = 0;
830 tlb_set_asid(0); 830 tlb_set_asid(0);
831 pmap_tlb_asid_check(); 831 pmap_tlb_asid_check();
832#endif 832#endif
833} 833}
834 834
835void 835void
836pmap_tlb_asid_release_all(struct pmap *pm) 836pmap_tlb_asid_release_all(struct pmap *pm)
837{ 837{
838 KASSERT(pm != pmap_kernel()); 838 KASSERT(pm != pmap_kernel());
839#if defined(MULTIPROCESSOR) 839#if defined(MULTIPROCESSOR)
840 KASSERT(CPUSET_EMPTY(pm->pm_onproc)); 840 KASSERT(CPUSET_EMPTY_P(pm->pm_onproc));
841 for (u_int i = 0; !CPUSET_EMPTY(pm->pm_active); i++) { 841 for (u_int i = 0; !CPUSET_EMPTY_P(pm->pm_active); i++) {
842 KASSERT(i < pmap_ntlbs); 842 KASSERT(i < pmap_ntlbs);
843 struct pmap_tlb_info * const ti = pmap_tlbs[i]; 843 struct pmap_tlb_info * const ti = pmap_tlbs[i];
844 if (!CPU_EMPTY_P(CPUSET_SUBSET(pm->pm_active, ti->ti_cpu_mask))) { 844 if (!CPUSET_EMPTY_P(CPUSET_SUBSET(pm->pm_active, ti->ti_cpu_mask))) {
845 struct pmap_asid_info * const pai = PMAP_PAI(pm, ti); 845 struct pmap_asid_info * const pai = PMAP_PAI(pm, ti);
846 TLBINFO_LOCK(ti); 846 TLBINFO_LOCK(ti);
847 KASSERT(ti->ti_victim != pm); 847 KASSERT(ti->ti_victim != pm);
848 pmap_pai_reset(ti, pai, pm); 848 pmap_pai_reset(ti, pai, pm);
849 TLBINFO_UNLOCK(ti); 849 TLBINFO_UNLOCK(ti);
850 } 850 }
851 } 851 }
852#else 852#else
853 /* 853 /*
854 * Handle the case of an UP kernel which only has, at most, one ASID. 854 * Handle the case of an UP kernel which only has, at most, one ASID.
855 * If the pmap has an ASID allocated, free it. 855 * If the pmap has an ASID allocated, free it.
856 */ 856 */
857 struct pmap_tlb_info * const ti = &pmap_tlb0_info; 857 struct pmap_tlb_info * const ti = &pmap_tlb0_info;