Fri Apr 17 08:17:06 2020 UTC ()
Use UVMHIST_CALLARGS


(skrll)
diff -r1.405 -r1.406 src/sys/arch/arm/arm32/pmap.c

cvs diff -r1.405 -r1.406 src/sys/arch/arm/arm32/pmap.c (expand / switch to unified diff)

--- src/sys/arch/arm/arm32/pmap.c 2020/04/16 21:20:43 1.405
+++ src/sys/arch/arm/arm32/pmap.c 2020/04/17 08:17:06 1.406
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.405 2020/04/16 21:20:43 ad Exp $ */ 1/* $NetBSD: pmap.c,v 1.406 2020/04/17 08:17:06 skrll Exp $ */
2 2
3/* 3/*
4 * Copyright 2003 Wasabi Systems, Inc. 4 * Copyright 2003 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Steve C. Woodford for Wasabi Systems, Inc. 7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -188,27 +188,27 @@ @@ -188,27 +188,27 @@
188 188
189#include "opt_arm_debug.h" 189#include "opt_arm_debug.h"
190#include "opt_cpuoptions.h" 190#include "opt_cpuoptions.h"
191#include "opt_pmap_debug.h" 191#include "opt_pmap_debug.h"
192#include "opt_ddb.h" 192#include "opt_ddb.h"
193#include "opt_lockdebug.h" 193#include "opt_lockdebug.h"
194#include "opt_multiprocessor.h" 194#include "opt_multiprocessor.h"
195 195
196#ifdef MULTIPROCESSOR 196#ifdef MULTIPROCESSOR
197#define _INTR_PRIVATE 197#define _INTR_PRIVATE
198#endif 198#endif
199 199
200#include <sys/cdefs.h> 200#include <sys/cdefs.h>
201__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.405 2020/04/16 21:20:43 ad Exp $"); 201__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.406 2020/04/17 08:17:06 skrll Exp $");
202 202
203#include <sys/atomic.h> 203#include <sys/atomic.h>
204#include <sys/param.h> 204#include <sys/param.h>
205#include <sys/types.h> 205#include <sys/types.h>
206#include <sys/atomic.h> 206#include <sys/atomic.h>
207#include <sys/bus.h> 207#include <sys/bus.h>
208#include <sys/cpu.h> 208#include <sys/cpu.h>
209#include <sys/intr.h> 209#include <sys/intr.h>
210#include <sys/kernel.h> 210#include <sys/kernel.h>
211#include <sys/kernhist.h> 211#include <sys/kernhist.h>
212#include <sys/kmem.h> 212#include <sys/kmem.h>
213#include <sys/pool.h> 213#include <sys/pool.h>
214#include <sys/proc.h> 214#include <sys/proc.h>
@@ -3123,29 +3123,28 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_ @@ -3123,29 +3123,28 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_
3123 u_int nflags; 3123 u_int nflags;
3124 u_int oflags; 3124 u_int oflags;
3125 const bool kpm_p = (pm == pmap_kernel()); 3125 const bool kpm_p = (pm == pmap_kernel());
3126#ifdef ARM_HAS_VBAR 3126#ifdef ARM_HAS_VBAR
3127 const bool vector_page_p = false; 3127 const bool vector_page_p = false;
3128#else 3128#else
3129 const bool vector_page_p = (va == vector_page); 3129 const bool vector_page_p = (va == vector_page);
3130#endif 3130#endif
3131 struct pmap_page *pp = pmap_pv_tracked(pa); 3131 struct pmap_page *pp = pmap_pv_tracked(pa);
3132 struct pv_entry *new_pv = NULL; 3132 struct pv_entry *new_pv = NULL;
3133 struct pv_entry *old_pv = NULL; 3133 struct pv_entry *old_pv = NULL;
3134 int error = 0; 3134 int error = 0;
3135 3135
3136 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 3136 UVMHIST_FUNC(__func__);
3137 3137 UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx pa %#jx prot %#jx",
3138 UVMHIST_LOG(maphist, " (pm %#jx va %#jx pa %#jx prot %#jx", 
3139 (uintptr_t)pm, va, pa, prot); 3138 (uintptr_t)pm, va, pa, prot);
3140 UVMHIST_LOG(maphist, " flag %#jx", flags, 0, 0, 0); 3139 UVMHIST_LOG(maphist, " flag %#jx", flags, 0, 0, 0);
3141 3140
3142 KDASSERT((flags & PMAP_WIRED) == 0 || (flags & VM_PROT_ALL) != 0); 3141 KDASSERT((flags & PMAP_WIRED) == 0 || (flags & VM_PROT_ALL) != 0);
3143 KDASSERT(((va | pa) & PGOFSET) == 0); 3142 KDASSERT(((va | pa) & PGOFSET) == 0);
3144 3143
3145 /* 3144 /*
3146 * Get a pointer to the page. Later on in this function, we 3145 * Get a pointer to the page. Later on in this function, we
3147 * test for a managed page by checking pg != NULL. 3146 * test for a managed page by checking pg != NULL.
3148 */ 3147 */
3149 pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL; 3148 pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL;
3150 /* 3149 /*
3151 * if we may need a new pv entry allocate if now, as we can't do it 3150 * if we may need a new pv entry allocate if now, as we can't do it
@@ -3493,28 +3492,28 @@ free_pv: @@ -3493,28 +3492,28 @@ free_pv:
3493 * all invalidations until pmap_update(), since pmap_remove_all() has 3492 * all invalidations until pmap_update(), since pmap_remove_all() has
3494 * already flushed the cache. 3493 * already flushed the cache.
3495 * 4. Maybe later fast-case a single page, but I don't think this is 3494 * 4. Maybe later fast-case a single page, but I don't think this is
3496 * going to make _that_ much difference overall. 3495 * going to make _that_ much difference overall.
3497 */ 3496 */
3498 3497
3499#define PMAP_REMOVE_CLEAN_LIST_SIZE 3 3498#define PMAP_REMOVE_CLEAN_LIST_SIZE 3
3500 3499
3501void 3500void
3502pmap_remove(pmap_t pm, vaddr_t sva, vaddr_t eva) 3501pmap_remove(pmap_t pm, vaddr_t sva, vaddr_t eva)
3503{ 3502{
3504 SLIST_HEAD(,pv_entry) opv_list; 3503 SLIST_HEAD(,pv_entry) opv_list;
3505 struct pv_entry *pv, *npv; 3504 struct pv_entry *pv, *npv;
3506 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 3505 UVMHIST_FUNC(__func__);
3507 UVMHIST_LOG(maphist, " (pm=%#jx, sva=%#jx, eva=%#jx)", 3506 UVMHIST_CALLARGS(maphist, " (pm=%#jx, sva=%#jx, eva=%#jx)",
3508 (uintptr_t)pm, sva, eva, 0); 3507 (uintptr_t)pm, sva, eva, 0);
3509 3508
3510#ifdef PMAP_FAULTINFO 3509#ifdef PMAP_FAULTINFO
3511 curpcb->pcb_faultinfo.pfi_faultaddr = 0; 3510 curpcb->pcb_faultinfo.pfi_faultaddr = 0;
3512 curpcb->pcb_faultinfo.pfi_repeats = 0; 3511 curpcb->pcb_faultinfo.pfi_repeats = 0;
3513 curpcb->pcb_faultinfo.pfi_faultptep = NULL; 3512 curpcb->pcb_faultinfo.pfi_faultptep = NULL;
3514#endif 3513#endif
3515 3514
3516 SLIST_INIT(&opv_list); 3515 SLIST_INIT(&opv_list);
3517 /* 3516 /*
3518 * we lock in the pmap => pv_head direction 3517 * we lock in the pmap => pv_head direction
3519 */ 3518 */
3520 pmap_acquire_pmap_lock(pm); 3519 pmap_acquire_pmap_lock(pm);
@@ -3741,29 +3740,29 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v @@ -3741,29 +3740,29 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v
3741#endif 3740#endif
3742#ifdef PMAP_CACHE_VIPT 3741#ifdef PMAP_CACHE_VIPT
3743 struct vm_page *pg = PHYS_TO_VM_PAGE(pa); 3742 struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
3744 struct vm_page *opg; 3743 struct vm_page *opg;
3745#ifndef ARM_MMU_EXTENDED 3744#ifndef ARM_MMU_EXTENDED
3746 struct pv_entry *pv = NULL; 3745 struct pv_entry *pv = NULL;
3747#endif 3746#endif
3748#endif 3747#endif
3749 struct vm_page_md *md = pg != NULL ? VM_PAGE_TO_MD(pg) : NULL; 3748 struct vm_page_md *md = pg != NULL ? VM_PAGE_TO_MD(pg) : NULL;
3750 3749
3751 UVMHIST_FUNC(__func__); 3750 UVMHIST_FUNC(__func__);
3752 3751
3753 if (pmap_initialized) { 3752 if (pmap_initialized) {
3754 UVMHIST_CALLED(maphist); 3753 UVMHIST_CALLARGS(maphist,
3755 UVMHIST_LOG(maphist, " (va=%#jx, pa=%#jx, prot=%#jx, flags=%#jx", 3754 "va=%#jx, pa=%#jx, prot=%#jx, flags=%#jx", va, pa, prot,
3756 va, pa, prot, flags); 3755 flags);
3757 } 3756 }
3758 3757
3759 pmap_t kpm = pmap_kernel(); 3758 pmap_t kpm = pmap_kernel();
3760 pmap_acquire_pmap_lock(kpm); 3759 pmap_acquire_pmap_lock(kpm);
3761 struct l2_bucket * const l2b = pmap_get_l2_bucket(kpm, va); 3760 struct l2_bucket * const l2b = pmap_get_l2_bucket(kpm, va);
3762 const size_t l1slot __diagused = l1pte_index(va); 3761 const size_t l1slot __diagused = l1pte_index(va);
3763 KASSERTMSG(l2b != NULL, 3762 KASSERTMSG(l2b != NULL,
3764 "va %#lx pa %#lx prot %d maxkvaddr %#lx: l2 %p l2b %p kva %p", 3763 "va %#lx pa %#lx prot %d maxkvaddr %#lx: l2 %p l2b %p kva %p",
3765 va, pa, prot, pmap_curmaxkvaddr, kpm->pm_l2[L2_IDX(l1slot)], 3764 va, pa, prot, pmap_curmaxkvaddr, kpm->pm_l2[L2_IDX(l1slot)],
3766 kpm->pm_l2[L2_IDX(l1slot)] 3765 kpm->pm_l2[L2_IDX(l1slot)]
3767 ? &kpm->pm_l2[L2_IDX(l1slot)]->l2_bucket[L2_BUCKET(l1slot)] 3766 ? &kpm->pm_l2[L2_IDX(l1slot)]->l2_bucket[L2_BUCKET(l1slot)]
3768 : NULL, 3767 : NULL,
3769 kpm->pm_l2[L2_IDX(l1slot)] 3768 kpm->pm_l2[L2_IDX(l1slot)]
@@ -3898,29 +3897,28 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v @@ -3898,29 +3897,28 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v
3898 } 3897 }
3899 3898
3900} 3899}
3901 3900
3902void 3901void
3903pmap_kremove(vaddr_t va, vsize_t len) 3902pmap_kremove(vaddr_t va, vsize_t len)
3904{ 3903{
3905#ifdef UVMHIST 3904#ifdef UVMHIST
3906 u_int total_mappings = 0; 3905 u_int total_mappings = 0;
3907#endif 3906#endif
3908 3907
3909 PMAPCOUNT(kenter_unmappings); 3908 PMAPCOUNT(kenter_unmappings);
3910 3909
3911 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 3910 UVMHIST_FUNC(__func__);
3912 3911 UVMHIST_CALLARGS(maphist, " (va=%#jx, len=%#jx)", va, len, 0, 0);
3913 UVMHIST_LOG(maphist, " (va=%#jx, len=%#jx)", va, len, 0, 0); 
3914 3912
3915 const vaddr_t eva = va + len; 3913 const vaddr_t eva = va + len;
3916 pmap_t kpm = pmap_kernel(); 3914 pmap_t kpm = pmap_kernel();
3917 3915
3918 pmap_acquire_pmap_lock(kpm); 3916 pmap_acquire_pmap_lock(kpm);
3919 3917
3920 while (va < eva) { 3918 while (va < eva) {
3921 vaddr_t next_bucket = L2_NEXT_BUCKET_VA(va); 3919 vaddr_t next_bucket = L2_NEXT_BUCKET_VA(va);
3922 if (next_bucket > eva) 3920 if (next_bucket > eva)
3923 next_bucket = eva; 3921 next_bucket = eva;
3924 3922
3925 struct l2_bucket * const l2b = pmap_get_l2_bucket(kpm, va); 3923 struct l2_bucket * const l2b = pmap_get_l2_bucket(kpm, va);
3926 KDASSERT(l2b != NULL); 3924 KDASSERT(l2b != NULL);
@@ -4415,34 +4413,34 @@ pmap_prefetchabt_fixup(void *v) @@ -4415,34 +4413,34 @@ pmap_prefetchabt_fixup(void *v)
4415 return rv; 4413 return rv;
4416} 4414}
4417#endif 4415#endif
4418 4416
4419int 4417int
4420pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user) 4418pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user)
4421{ 4419{
4422 struct l2_dtable *l2; 4420 struct l2_dtable *l2;
4423 struct l2_bucket *l2b; 4421 struct l2_bucket *l2b;
4424 paddr_t pa; 4422 paddr_t pa;
4425 const size_t l1slot = l1pte_index(va); 4423 const size_t l1slot = l1pte_index(va);
4426 int rv = 0; 4424 int rv = 0;
4427 4425
4428 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 4426 UVMHIST_FUNC(__func__);
 4427 UVMHIST_CALLARGS(maphist, "pm=%#jx, va=%#jx, ftype=%#jx, user=%jd",
 4428 (uintptr_t)pm, va, ftype, user);
4429 4429
4430 va = trunc_page(va); 4430 va = trunc_page(va);
4431 4431
4432 KASSERT(!user || (pm != pmap_kernel())); 4432 KASSERT(!user || (pm != pmap_kernel()));
4433 4433
4434 UVMHIST_LOG(maphist, " (pm=%#jx, va=%#jx, ftype=%#jx, user=%jd)", 
4435 (uintptr_t)pm, va, ftype, user); 
4436#ifdef ARM_MMU_EXTENDED 4434#ifdef ARM_MMU_EXTENDED
4437 UVMHIST_LOG(maphist, " ti=%#jx pai=%#jx asid=%#jx", 4435 UVMHIST_LOG(maphist, " ti=%#jx pai=%#jx asid=%#jx",
4438 (uintptr_t)cpu_tlb_info(curcpu()), 4436 (uintptr_t)cpu_tlb_info(curcpu()),
4439 (uintptr_t)PMAP_PAI(pm, cpu_tlb_info(curcpu())), 4437 (uintptr_t)PMAP_PAI(pm, cpu_tlb_info(curcpu())),
4440 (uintptr_t)PMAP_PAI(pm, cpu_tlb_info(curcpu()))->pai_asid, 0); 4438 (uintptr_t)PMAP_PAI(pm, cpu_tlb_info(curcpu()))->pai_asid, 0);
4441#endif 4439#endif
4442 4440
4443 pmap_acquire_pmap_lock(pm); 4441 pmap_acquire_pmap_lock(pm);
4444 4442
4445 /* 4443 /*
4446 * If there is no l2_dtable for this address, then the process 4444 * If there is no l2_dtable for this address, then the process
4447 * has no business accessing it. 4445 * has no business accessing it.
4448 * 4446 *
@@ -4887,69 +4885,72 @@ pmap_unwire(pmap_t pm, vaddr_t va) @@ -4887,69 +4885,72 @@ pmap_unwire(pmap_t pm, vaddr_t va)
4887 4885
4888 pmap_acquire_page_lock(md); 4886 pmap_acquire_page_lock(md);
4889 (void) pmap_modify_pv(md, pa, pm, va, PVF_WIRED, 0); 4887 (void) pmap_modify_pv(md, pa, pm, va, PVF_WIRED, 0);
4890 pmap_release_page_lock(md); 4888 pmap_release_page_lock(md);
4891 } 4889 }
4892 4890
4893 pmap_release_pmap_lock(pm); 4891 pmap_release_pmap_lock(pm);
4894} 4892}
4895 4893
4896#ifdef ARM_MMU_EXTENDED 4894#ifdef ARM_MMU_EXTENDED
4897void 4895void
4898pmap_md_pdetab_activate(pmap_t pm, struct lwp *l) 4896pmap_md_pdetab_activate(pmap_t pm, struct lwp *l)
4899{ 4897{
4900 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 4898 UVMHIST_FUNC(__func__);
 4899 struct cpu_info * const ci = curcpu();
 4900 struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(ci));
 4901
 4902 UVMHIST_CALLARGS(maphist, "pm %#jx (pm->pm_l1_pa %08jx asid %ju)",
 4903 (uintptr_t)pm, pm->pm_l1_pa, pai->pai_asid, 0);
4901 4904
4902 /* 4905 /*
4903 * Assume that TTBR1 has only global mappings and TTBR0 only 4906 * Assume that TTBR1 has only global mappings and TTBR0 only
4904 * has non-global mappings. To prevent speculation from doing 4907 * has non-global mappings. To prevent speculation from doing
4905 * evil things we disable translation table walks using TTBR0 4908 * evil things we disable translation table walks using TTBR0
4906 * before setting the CONTEXTIDR (ASID) or new TTBR0 value. 4909 * before setting the CONTEXTIDR (ASID) or new TTBR0 value.
4907 * Once both are set, table walks are reenabled. 4910 * Once both are set, table walks are reenabled.
4908 */ 4911 */
4909 const uint32_t old_ttbcr = armreg_ttbcr_read(); 4912 const uint32_t old_ttbcr = armreg_ttbcr_read();
4910 armreg_ttbcr_write(old_ttbcr | TTBCR_S_PD0); 4913 armreg_ttbcr_write(old_ttbcr | TTBCR_S_PD0);
4911 arm_isb(); 4914 arm_isb();
4912 4915
4913 pmap_tlb_asid_acquire(pm, l); 4916 pmap_tlb_asid_acquire(pm, l);
4914 4917
4915 struct cpu_info * const ci = curcpu(); 
4916 struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(ci)); 
4917 
4918 cpu_setttb(pm->pm_l1_pa, pai->pai_asid); 4918 cpu_setttb(pm->pm_l1_pa, pai->pai_asid);
4919 /* 4919 /*
4920 * Now we can reenable tablewalks since the CONTEXTIDR and TTRB0 4920 * Now we can reenable tablewalks since the CONTEXTIDR and TTRB0
4921 * have been updated. 4921 * have been updated.
4922 */ 4922 */
4923 arm_isb(); 4923 arm_isb();
4924 4924
4925 if (pm != pmap_kernel()) { 4925 if (pm != pmap_kernel()) {
4926 armreg_ttbcr_write(old_ttbcr & ~TTBCR_S_PD0); 4926 armreg_ttbcr_write(old_ttbcr & ~TTBCR_S_PD0);
4927 } 4927 }
4928 cpu_cpwait(); 4928 cpu_cpwait();
4929 4929
4930 UVMHIST_LOG(maphist, " pm %#jx pm->pm_l1_pa %08jx asid %ju... done", 4930 UVMHIST_LOG(maphist, " pm %#jx pm->pm_l1_pa %08jx asid %ju... done",
4931 (uintptr_t)pm, pm->pm_l1_pa, pai->pai_asid, 0); 4931 (uintptr_t)pm, pm->pm_l1_pa, pai->pai_asid, 0);
4932 4932
4933 KASSERTMSG(ci->ci_pmap_asid_cur == pai->pai_asid, "%u vs %u", 4933 KASSERTMSG(ci->ci_pmap_asid_cur == pai->pai_asid, "%u vs %u",
4934 ci->ci_pmap_asid_cur, pai->pai_asid); 4934 ci->ci_pmap_asid_cur, pai->pai_asid);
4935 ci->ci_pmap_cur = pm; 4935 ci->ci_pmap_cur = pm;
4936} 4936}
4937 4937
4938void 4938void
4939pmap_md_pdetab_deactivate(pmap_t pm) 4939pmap_md_pdetab_deactivate(pmap_t pm)
4940{ 4940{
4941 4941
4942 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 4942 UVMHIST_FUNC(__func__);
 4943 UVMHIST_CALLARGS(maphist, "pm %#jx", (uintptr_t)pm, 0, 0, 0);
4943 4944
4944 kpreempt_disable(); 4945 kpreempt_disable();
4945 struct cpu_info * const ci = curcpu(); 4946 struct cpu_info * const ci = curcpu();
4946 /* 4947 /*
4947 * Disable translation table walks from TTBR0 while no pmap has been 4948 * Disable translation table walks from TTBR0 while no pmap has been
4948 * activated. 4949 * activated.
4949 */ 4950 */
4950 const uint32_t old_ttbcr = armreg_ttbcr_read(); 4951 const uint32_t old_ttbcr = armreg_ttbcr_read();
4951 armreg_ttbcr_write(old_ttbcr | TTBCR_S_PD0); 4952 armreg_ttbcr_write(old_ttbcr | TTBCR_S_PD0);
4952 arm_isb(); 4953 arm_isb();
4953 pmap_tlb_asid_deactivate(pm); 4954 pmap_tlb_asid_deactivate(pm);
4954 cpu_setttb(pmap_kernel()->pm_l1_pa, KERNEL_PID); 4955 cpu_setttb(pmap_kernel()->pm_l1_pa, KERNEL_PID);
4955 arm_isb(); 4956 arm_isb();
@@ -4957,30 +4958,29 @@ pmap_md_pdetab_deactivate(pmap_t pm) @@ -4957,30 +4958,29 @@ pmap_md_pdetab_deactivate(pmap_t pm)
4957 ci->ci_pmap_cur = pmap_kernel(); 4958 ci->ci_pmap_cur = pmap_kernel();
4958 KASSERTMSG(ci->ci_pmap_asid_cur == KERNEL_PID, "ci_pmap_asid_cur %u", 4959 KASSERTMSG(ci->ci_pmap_asid_cur == KERNEL_PID, "ci_pmap_asid_cur %u",
4959 ci->ci_pmap_asid_cur); 4960 ci->ci_pmap_asid_cur);
4960 kpreempt_enable(); 4961 kpreempt_enable();
4961} 4962}
4962#endif 4963#endif
4963 4964
4964void 4965void
4965pmap_activate(struct lwp *l) 4966pmap_activate(struct lwp *l)
4966{ 4967{
4967 extern int block_userspace_access; 4968 extern int block_userspace_access;
4968 pmap_t npm = l->l_proc->p_vmspace->vm_map.pmap; 4969 pmap_t npm = l->l_proc->p_vmspace->vm_map.pmap;
4969 4970
4970 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 4971 UVMHIST_FUNC(__func__);
4971 4972 UVMHIST_CALLARGS(maphist, "l=%#jx pm=%#jx", (uintptr_t)l,
4972 UVMHIST_LOG(maphist, "(l=%#jx) pm=%#jx", (uintptr_t)l, (uintptr_t)npm, 4973 (uintptr_t)npm, 0, 0);
4973 0, 0); 
4974 4974
4975 struct cpu_info * const ci = curcpu(); 4975 struct cpu_info * const ci = curcpu();
4976 4976
4977 /* 4977 /*
4978 * If activating a non-current lwp or the current lwp is 4978 * If activating a non-current lwp or the current lwp is
4979 * already active, just return. 4979 * already active, just return.
4980 */ 4980 */
4981 if (false 4981 if (false
4982 || l != curlwp 4982 || l != curlwp
4983#ifdef ARM_MMU_EXTENDED 4983#ifdef ARM_MMU_EXTENDED
4984 || (ci->ci_pmap_cur == npm && 4984 || (ci->ci_pmap_cur == npm &&
4985 (npm == pmap_kernel() 4985 (npm == pmap_kernel()
4986 /* || PMAP_PAI_ASIDVALID_P(pai, cpu_tlb_info(ci)) */)) 4986 /* || PMAP_PAI_ASIDVALID_P(pai, cpu_tlb_info(ci)) */))
@@ -5115,54 +5115,52 @@ pmap_activate(struct lwp *l) @@ -5115,54 +5115,52 @@ pmap_activate(struct lwp *l)
5115 /* But the new one is */ 5115 /* But the new one is */
5116 npm->pm_activated = true; 5116 npm->pm_activated = true;
5117 } 5117 }
5118 ci->ci_pmap_cur = npm; 5118 ci->ci_pmap_cur = npm;
5119#endif 5119#endif
5120 UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); 5120 UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
5121} 5121}
5122 5122
5123void 5123void
5124pmap_deactivate(struct lwp *l) 5124pmap_deactivate(struct lwp *l)
5125{ 5125{
5126 pmap_t pm = l->l_proc->p_vmspace->vm_map.pmap; 5126 pmap_t pm = l->l_proc->p_vmspace->vm_map.pmap;
5127 5127
5128 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 5128 UVMHIST_FUNC(__func__);
5129 5129 UVMHIST_CALLARGS(maphist, "l=%#jx (pm=%#jx)", (uintptr_t)l,
5130 UVMHIST_LOG(maphist, "(l=%#jx) pm=%#jx", (uintptr_t)l, (uintptr_t)pm, 5130 (uintptr_t)pm, 0, 0);
5131 0, 0); 
5132 5131
5133#ifdef ARM_MMU_EXTENDED 5132#ifdef ARM_MMU_EXTENDED
5134 pmap_md_pdetab_deactivate(pm); 5133 pmap_md_pdetab_deactivate(pm);
5135#else 5134#else
5136 /* 5135 /*
5137 * If the process is exiting, make sure pmap_activate() does 5136 * If the process is exiting, make sure pmap_activate() does
5138 * a full MMU context-switch and cache flush, which we might 5137 * a full MMU context-switch and cache flush, which we might
5139 * otherwise skip. See PR port-arm/38950. 5138 * otherwise skip. See PR port-arm/38950.
5140 */ 5139 */
5141 if (l->l_proc->p_sflag & PS_WEXIT) 5140 if (l->l_proc->p_sflag & PS_WEXIT)
5142 curcpu()->ci_lastlwp = NULL; 5141 curcpu()->ci_lastlwp = NULL;
5143 5142
5144 pm->pm_activated = false; 5143 pm->pm_activated = false;
5145#endif 5144#endif
5146 UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); 5145 UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
5147} 5146}
5148 5147
5149void 5148void
5150pmap_update(pmap_t pm) 5149pmap_update(pmap_t pm)
5151{ 5150{
5152 5151
5153 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 5152 UVMHIST_FUNC(__func__);
5154 5153 UVMHIST_CALLARGS(maphist, "pm=%#jx remove_all %jd", (uintptr_t)pm,
5155 UVMHIST_LOG(maphist, "pm=%#jx remove_all %jd", (uintptr_t)pm, 
5156 pm->pm_remove_all, 0, 0); 5154 pm->pm_remove_all, 0, 0);
5157 5155
5158#ifndef ARM_MMU_EXTENDED 5156#ifndef ARM_MMU_EXTENDED
5159 if (pm->pm_remove_all) { 5157 if (pm->pm_remove_all) {
5160 /* 5158 /*
5161 * Finish up the pmap_remove_all() optimisation by flushing 5159 * Finish up the pmap_remove_all() optimisation by flushing
5162 * the TLB. 5160 * the TLB.
5163 */ 5161 */
5164 pmap_tlb_flushID(pm); 5162 pmap_tlb_flushID(pm);
5165 pm->pm_remove_all = false; 5163 pm->pm_remove_all = false;
5166 } 5164 }
5167 5165
5168 if (pmap_is_current(pm)) { 5166 if (pmap_is_current(pm)) {
@@ -5253,34 +5251,33 @@ pmap_remove_all(pmap_t pm) @@ -5253,34 +5251,33 @@ pmap_remove_all(pmap_t pm)
5253 pmap_tlb_asid_release_all(pm); 5251 pmap_tlb_asid_release_all(pm);
5254#endif 5252#endif
5255 pm->pm_remove_all = true; 5253 pm->pm_remove_all = true;
5256 return false; 5254 return false;
5257} 5255}
5258 5256
5259/* 5257/*
5260 * Retire the given physical map from service. 5258 * Retire the given physical map from service.
5261 * Should only be called if the map contains no valid mappings. 5259 * Should only be called if the map contains no valid mappings.
5262 */ 5260 */
5263void 5261void
5264pmap_destroy(pmap_t pm) 5262pmap_destroy(pmap_t pm)
5265{ 5263{
5266 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 5264 UVMHIST_FUNC(__func__);
 5265 UVMHIST_CALLARGS(maphist, "pm=%#jx remove_all %jd", (uintptr_t)pm,
 5266 pm ? pm->pm_remove_all : 0, 0, 0);
5267 5267
5268 if (pm == NULL) 5268 if (pm == NULL)
5269 return; 5269 return;
5270 5270
5271 UVMHIST_LOG(maphist, "pm=%#jx remove_all %jd", (uintptr_t)pm, 
5272 pm->pm_remove_all, 0, 0); 
5273 
5274 if (pm->pm_remove_all) { 5271 if (pm->pm_remove_all) {
5275#ifdef ARM_MMU_EXTENDED 5272#ifdef ARM_MMU_EXTENDED
5276 pmap_tlb_asid_release_all(pm); 5273 pmap_tlb_asid_release_all(pm);
5277#else 5274#else
5278 pmap_tlb_flushID(pm); 5275 pmap_tlb_flushID(pm);
5279#endif 5276#endif
5280 pm->pm_remove_all = false; 5277 pm->pm_remove_all = false;
5281 } 5278 }
5282 5279
5283 /* 5280 /*
5284 * Drop reference count 5281 * Drop reference count
5285 */ 5282 */
5286 if (atomic_dec_uint_nv(&pm->pm_refs) > 0) { 5283 if (atomic_dec_uint_nv(&pm->pm_refs) > 0) {