Sun Mar 14 10:36:46 2021 UTC ()
Sprinkle kpreempt_{dis,en}able ready for when preemption gets turned on.


(skrll)
diff -r1.425 -r1.426 src/sys/arch/arm/arm32/pmap.c

cvs diff -r1.425 -r1.426 src/sys/arch/arm/arm32/pmap.c (expand / switch to unified diff)

--- src/sys/arch/arm/arm32/pmap.c 2021/02/01 19:02:28 1.425
+++ src/sys/arch/arm/arm32/pmap.c 2021/03/14 10:36:46 1.426
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.425 2021/02/01 19:02:28 skrll Exp $ */ 1/* $NetBSD: pmap.c,v 1.426 2021/03/14 10:36:46 skrll Exp $ */
2 2
3/* 3/*
4 * Copyright 2003 Wasabi Systems, Inc. 4 * Copyright 2003 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Steve C. Woodford for Wasabi Systems, Inc. 7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -182,27 +182,27 @@ @@ -182,27 +182,27 @@
182/* Include header files */ 182/* Include header files */
183 183
184#include "opt_arm_debug.h" 184#include "opt_arm_debug.h"
185#include "opt_cpuoptions.h" 185#include "opt_cpuoptions.h"
186#include "opt_ddb.h" 186#include "opt_ddb.h"
187#include "opt_lockdebug.h" 187#include "opt_lockdebug.h"
188#include "opt_multiprocessor.h" 188#include "opt_multiprocessor.h"
189 189
190#ifdef MULTIPROCESSOR 190#ifdef MULTIPROCESSOR
191#define _INTR_PRIVATE 191#define _INTR_PRIVATE
192#endif 192#endif
193 193
194#include <sys/cdefs.h> 194#include <sys/cdefs.h>
195__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.425 2021/02/01 19:02:28 skrll Exp $"); 195__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.426 2021/03/14 10:36:46 skrll Exp $");
196 196
197#include <sys/param.h> 197#include <sys/param.h>
198#include <sys/types.h> 198#include <sys/types.h>
199 199
200#include <sys/asan.h> 200#include <sys/asan.h>
201#include <sys/atomic.h> 201#include <sys/atomic.h>
202#include <sys/bus.h> 202#include <sys/bus.h>
203#include <sys/cpu.h> 203#include <sys/cpu.h>
204#include <sys/intr.h> 204#include <sys/intr.h>
205#include <sys/kernel.h> 205#include <sys/kernel.h>
206#include <sys/kernhist.h> 206#include <sys/kernhist.h>
207#include <sys/kmem.h> 207#include <sys/kmem.h>
208#include <sys/pool.h> 208#include <sys/pool.h>
@@ -2827,40 +2827,43 @@ static void @@ -2827,40 +2827,43 @@ static void
2827pmap_page_remove(struct vm_page_md *md, paddr_t pa) 2827pmap_page_remove(struct vm_page_md *md, paddr_t pa)
2828{ 2828{
2829 struct l2_bucket *l2b; 2829 struct l2_bucket *l2b;
2830 struct pv_entry *pv; 2830 struct pv_entry *pv;
2831 pt_entry_t *ptep; 2831 pt_entry_t *ptep;
2832#ifndef ARM_MMU_EXTENDED 2832#ifndef ARM_MMU_EXTENDED
2833 bool flush = false; 2833 bool flush = false;
2834#endif 2834#endif
2835 u_int flags = 0; 2835 u_int flags = 0;
2836 2836
2837 UVMHIST_FUNC(__func__); 2837 UVMHIST_FUNC(__func__);
2838 UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx", (uintptr_t)md, pa, 0, 0); 2838 UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx", (uintptr_t)md, pa, 0, 0);
2839 2839
 2840 kpreempt_disable();
2840 pmap_acquire_page_lock(md); 2841 pmap_acquire_page_lock(md);
2841 struct pv_entry **pvp = &SLIST_FIRST(&md->pvh_list); 2842 struct pv_entry **pvp = &SLIST_FIRST(&md->pvh_list);
2842 if (*pvp == NULL) { 2843 if (*pvp == NULL) {
2843#ifdef PMAP_CACHE_VIPT 2844#ifdef PMAP_CACHE_VIPT
2844 /* 2845 /*
2845 * We *know* the page contents are about to be replaced. 2846 * We *know* the page contents are about to be replaced.
2846 * Discard the exec contents 2847 * Discard the exec contents
2847 */ 2848 */
2848 if (PV_IS_EXEC_P(md->pvh_attrs)) 2849 if (PV_IS_EXEC_P(md->pvh_attrs))
2849 PMAPCOUNT(exec_discarded_page_protect); 2850 PMAPCOUNT(exec_discarded_page_protect);
2850 md->pvh_attrs &= ~PVF_EXEC; 2851 md->pvh_attrs &= ~PVF_EXEC;
2851 PMAP_VALIDATE_MD_PAGE(md); 2852 PMAP_VALIDATE_MD_PAGE(md);
2852#endif 2853#endif
2853 pmap_release_page_lock(md); 2854 pmap_release_page_lock(md);
 2855 kpreempt_enable();
 2856
2854 return; 2857 return;
2855 } 2858 }
2856#if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) 2859#if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
2857 KASSERT(arm_cache_prefer_mask == 0 || pmap_is_page_colored_p(md)); 2860 KASSERT(arm_cache_prefer_mask == 0 || pmap_is_page_colored_p(md));
2858#endif 2861#endif
2859 2862
2860 /* 2863 /*
2861 * Clear alias counts 2864 * Clear alias counts
2862 */ 2865 */
2863#ifdef PMAP_CACHE_VIVT 2866#ifdef PMAP_CACHE_VIVT
2864 md->k_mappings = 0; 2867 md->k_mappings = 0;
2865#endif 2868#endif
2866 md->urw_mappings = md->uro_mappings = 0; 2869 md->urw_mappings = md->uro_mappings = 0;
@@ -2994,26 +2997,28 @@ pmap_page_remove(struct vm_page_md *md,  @@ -2994,26 +2997,28 @@ pmap_page_remove(struct vm_page_md *md,
2994 * Note: We can't use pmap_tlb_flush{I,D}() here since that 2997 * Note: We can't use pmap_tlb_flush{I,D}() here since that
2995 * would need a subsequent call to pmap_update() to ensure 2998 * would need a subsequent call to pmap_update() to ensure
2996 * curpm->pm_cstate.cs_all is reset. Our callers are not 2999 * curpm->pm_cstate.cs_all is reset. Our callers are not
2997 * required to do that (see pmap(9)), so we can't modify 3000 * required to do that (see pmap(9)), so we can't modify
2998 * the current pmap's state. 3001 * the current pmap's state.
2999 */ 3002 */
3000 if (PV_BEEN_EXECD(flags)) 3003 if (PV_BEEN_EXECD(flags))
3001 cpu_tlb_flushID(); 3004 cpu_tlb_flushID();
3002 else 3005 else
3003 cpu_tlb_flushD(); 3006 cpu_tlb_flushD();
3004 } 3007 }
3005 cpu_cpwait(); 3008 cpu_cpwait();
3006#endif /* ARM_MMU_EXTENDED */ 3009#endif /* ARM_MMU_EXTENDED */
 3010
 3011 kpreempt_enable();
3007} 3012}
3008 3013
3009/* 3014/*
3010 * pmap_t pmap_create(void) 3015 * pmap_t pmap_create(void)
3011 * 3016 *
3012 * Create a new pmap structure from scratch. 3017 * Create a new pmap structure from scratch.
3013 */ 3018 */
3014pmap_t 3019pmap_t
3015pmap_create(void) 3020pmap_create(void)
3016{ 3021{
3017 pmap_t pm; 3022 pmap_t pm;
3018 3023
3019 pm = pool_cache_get(&pmap_cache, PR_WAITOK); 3024 pm = pool_cache_get(&pmap_cache, PR_WAITOK);
@@ -3101,40 +3106,43 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_ @@ -3101,40 +3106,43 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_
3101 * with the kernel_pmap locked 3106 * with the kernel_pmap locked
3102 */ 3107 */
3103 if (pg || pp) 3108 if (pg || pp)
3104 new_pv = pool_get(&pmap_pv_pool, PR_NOWAIT); 3109 new_pv = pool_get(&pmap_pv_pool, PR_NOWAIT);
3105 3110
3106 nflags = 0; 3111 nflags = 0;
3107 if (prot & VM_PROT_WRITE) 3112 if (prot & VM_PROT_WRITE)
3108 nflags |= PVF_WRITE; 3113 nflags |= PVF_WRITE;
3109 if (prot & VM_PROT_EXECUTE) 3114 if (prot & VM_PROT_EXECUTE)
3110 nflags |= PVF_EXEC; 3115 nflags |= PVF_EXEC;
3111 if (flags & PMAP_WIRED) 3116 if (flags & PMAP_WIRED)
3112 nflags |= PVF_WIRED; 3117 nflags |= PVF_WIRED;
3113 3118
 3119 kpreempt_disable();
3114 pmap_acquire_pmap_lock(pm); 3120 pmap_acquire_pmap_lock(pm);
3115 3121
3116 /* 3122 /*
3117 * Fetch the L2 bucket which maps this page, allocating one if 3123 * Fetch the L2 bucket which maps this page, allocating one if
3118 * necessary for user pmaps. 3124 * necessary for user pmaps.
3119 */ 3125 */
3120 if (kpm_p) { 3126 if (kpm_p) {
3121 l2b = pmap_get_l2_bucket(pm, va); 3127 l2b = pmap_get_l2_bucket(pm, va);
3122 } else { 3128 } else {
3123 l2b = pmap_alloc_l2_bucket(pm, va); 3129 l2b = pmap_alloc_l2_bucket(pm, va);
3124 } 3130 }
3125 if (l2b == NULL) { 3131 if (l2b == NULL) {
3126 if (flags & PMAP_CANFAIL) { 3132 if (flags & PMAP_CANFAIL) {
3127 pmap_release_pmap_lock(pm); 3133 pmap_release_pmap_lock(pm);
 3134 kpreempt_enable();
 3135
3128 error = ENOMEM; 3136 error = ENOMEM;
3129 goto free_pv; 3137 goto free_pv;
3130 } 3138 }
3131 panic("pmap_enter: failed to allocate L2 bucket"); 3139 panic("pmap_enter: failed to allocate L2 bucket");
3132 } 3140 }
3133 pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(va)]; 3141 pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(va)];
3134 const pt_entry_t opte = *ptep; 3142 const pt_entry_t opte = *ptep;
3135 pt_entry_t npte = pa; 3143 pt_entry_t npte = pa;
3136 oflags = 0; 3144 oflags = 0;
3137 3145
3138 if (opte) { 3146 if (opte) {
3139 /* 3147 /*
3140 * There is already a mapping at this address. 3148 * There is already a mapping at this address.
@@ -3409,33 +3417,34 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_ @@ -3409,33 +3417,34 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_
3409 if (pg) { 3417 if (pg) {
3410 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3418 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3411 3419
3412 pmap_acquire_page_lock(md); 3420 pmap_acquire_page_lock(md);
3413#ifndef ARM_MMU_EXTENDED 3421#ifndef ARM_MMU_EXTENDED
3414 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 3422 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
3415#endif 3423#endif
3416 PMAP_VALIDATE_MD_PAGE(md); 3424 PMAP_VALIDATE_MD_PAGE(md);
3417 pmap_release_page_lock(md); 3425 pmap_release_page_lock(md);
3418 } 3426 }
3419#endif 3427#endif
3420 3428
3421 pmap_release_pmap_lock(pm); 3429 pmap_release_pmap_lock(pm);
3422 3430 kpreempt_enable();
3423 3431
3424 if (old_pv) 3432 if (old_pv)
3425 pool_put(&pmap_pv_pool, old_pv); 3433 pool_put(&pmap_pv_pool, old_pv);
3426free_pv: 3434free_pv:
3427 if (new_pv) 3435 if (new_pv)
3428 pool_put(&pmap_pv_pool, new_pv); 3436 pool_put(&pmap_pv_pool, new_pv);
 3437
3429 return error; 3438 return error;
3430} 3439}
3431 3440
3432/* 3441/*
3433 * pmap_remove() 3442 * pmap_remove()
3434 * 3443 *
3435 * pmap_remove is responsible for nuking a number of mappings for a range 3444 * pmap_remove is responsible for nuking a number of mappings for a range
3436 * of virtual address space in the current pmap. To do this efficiently 3445 * of virtual address space in the current pmap. To do this efficiently
3437 * is interesting, because in a number of cases a wide virtual address 3446 * is interesting, because in a number of cases a wide virtual address
3438 * range may be supplied that contains few actual mappings. So, the 3447 * range may be supplied that contains few actual mappings. So, the
3439 * optimisations are: 3448 * optimisations are:
3440 * 1. Skip over hunks of address space for which no L1 or L2 entry exists. 3449 * 1. Skip over hunks of address space for which no L1 or L2 entry exists.
3441 * 2. Build up a list of pages we've hit, up to a maximum, so we can 3450 * 2. Build up a list of pages we've hit, up to a maximum, so we can
@@ -3460,26 +3469,27 @@ pmap_remove(pmap_t pm, vaddr_t sva, vadd @@ -3460,26 +3469,27 @@ pmap_remove(pmap_t pm, vaddr_t sva, vadd
3460 UVMHIST_CALLARGS(maphist, " (pm=%#jx, sva=%#jx, eva=%#jx)", 3469 UVMHIST_CALLARGS(maphist, " (pm=%#jx, sva=%#jx, eva=%#jx)",
3461 (uintptr_t)pm, sva, eva, 0); 3470 (uintptr_t)pm, sva, eva, 0);
3462 3471
3463#ifdef PMAP_FAULTINFO 3472#ifdef PMAP_FAULTINFO
3464 curpcb->pcb_faultinfo.pfi_faultaddr = 0; 3473 curpcb->pcb_faultinfo.pfi_faultaddr = 0;
3465 curpcb->pcb_faultinfo.pfi_repeats = 0; 3474 curpcb->pcb_faultinfo.pfi_repeats = 0;
3466 curpcb->pcb_faultinfo.pfi_faultptep = NULL; 3475 curpcb->pcb_faultinfo.pfi_faultptep = NULL;
3467#endif 3476#endif
3468 3477
3469 SLIST_INIT(&opv_list); 3478 SLIST_INIT(&opv_list);
3470 /* 3479 /*
3471 * we lock in the pmap => pv_head direction 3480 * we lock in the pmap => pv_head direction
3472 */ 3481 */
 3482 kpreempt_disable();
3473 pmap_acquire_pmap_lock(pm); 3483 pmap_acquire_pmap_lock(pm);
3474 3484
3475#ifndef ARM_MMU_EXTENDED 3485#ifndef ARM_MMU_EXTENDED
3476 u_int cleanlist_idx, total, cnt; 3486 u_int cleanlist_idx, total, cnt;
3477 struct { 3487 struct {
3478 vaddr_t va; 3488 vaddr_t va;
3479 pt_entry_t *ptep; 3489 pt_entry_t *ptep;
3480 } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE]; 3490 } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
3481 3491
3482 if (pm->pm_remove_all || !pmap_is_cached(pm)) { 3492 if (pm->pm_remove_all || !pmap_is_cached(pm)) {
3483 cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; 3493 cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1;
3484 if (pm->pm_cstate.cs_tlb == 0) 3494 if (pm->pm_cstate.cs_tlb == 0)
3485 pm->pm_remove_all = true; 3495 pm->pm_remove_all = true;
@@ -3627,26 +3637,28 @@ pmap_remove(pmap_t pm, vaddr_t sva, vadd @@ -3627,26 +3637,28 @@ pmap_remove(pmap_t pm, vaddr_t sva, vadd
3627#ifdef PMAP_CACHE_VIVT 3637#ifdef PMAP_CACHE_VIVT
3628 pmap_cache_wbinv_all(pm, PVF_EXEC); 3638 pmap_cache_wbinv_all(pm, PVF_EXEC);
3629#endif 3639#endif
3630 pm->pm_remove_all = true; 3640 pm->pm_remove_all = true;
3631 } 3641 }
3632 } 3642 }
3633#endif /* ARM_MMU_EXTENDED */ 3643#endif /* ARM_MMU_EXTENDED */
3634 3644
3635 pmap_free_l2_bucket(pm, l2b, mappings); 3645 pmap_free_l2_bucket(pm, l2b, mappings);
3636 pm->pm_stats.resident_count -= mappings / (PAGE_SIZE/L2_S_SIZE); 3646 pm->pm_stats.resident_count -= mappings / (PAGE_SIZE/L2_S_SIZE);
3637 } 3647 }
3638 3648
3639 pmap_release_pmap_lock(pm); 3649 pmap_release_pmap_lock(pm);
 3650 kpreempt_enable();
 3651
3640 SLIST_FOREACH_SAFE(pv, &opv_list, pv_link, npv) { 3652 SLIST_FOREACH_SAFE(pv, &opv_list, pv_link, npv) {
3641 pool_put(&pmap_pv_pool, pv); 3653 pool_put(&pmap_pv_pool, pv);
3642 } 3654 }
3643} 3655}
3644 3656
3645#if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) 3657#if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
3646static struct pv_entry * 3658static struct pv_entry *
3647pmap_kremove_pg(struct vm_page *pg, vaddr_t va) 3659pmap_kremove_pg(struct vm_page *pg, vaddr_t va)
3648{ 3660{
3649 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3661 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3650 paddr_t pa = VM_PAGE_TO_PHYS(pg); 3662 paddr_t pa = VM_PAGE_TO_PHYS(pg);
3651 struct pv_entry *pv; 3663 struct pv_entry *pv;
3652 3664
@@ -3699,26 +3711,27 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v @@ -3699,26 +3711,27 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v
3699 struct pv_entry *pv = NULL; 3711 struct pv_entry *pv = NULL;
3700#endif 3712#endif
3701#endif 3713#endif
3702 struct vm_page_md *md = pg != NULL ? VM_PAGE_TO_MD(pg) : NULL; 3714 struct vm_page_md *md = pg != NULL ? VM_PAGE_TO_MD(pg) : NULL;
3703 3715
3704 UVMHIST_FUNC(__func__); 3716 UVMHIST_FUNC(__func__);
3705 3717
3706 if (pmap_initialized) { 3718 if (pmap_initialized) {
3707 UVMHIST_CALLARGS(maphist, 3719 UVMHIST_CALLARGS(maphist,
3708 "va=%#jx, pa=%#jx, prot=%#jx, flags=%#jx", va, pa, prot, 3720 "va=%#jx, pa=%#jx, prot=%#jx, flags=%#jx", va, pa, prot,
3709 flags); 3721 flags);
3710 } 3722 }
3711 3723
 3724 kpreempt_disable();
3712 pmap_t kpm = pmap_kernel(); 3725 pmap_t kpm = pmap_kernel();
3713 pmap_acquire_pmap_lock(kpm); 3726 pmap_acquire_pmap_lock(kpm);
3714 struct l2_bucket * const l2b = pmap_get_l2_bucket(kpm, va); 3727 struct l2_bucket * const l2b = pmap_get_l2_bucket(kpm, va);
3715 const size_t l1slot __diagused = l1pte_index(va); 3728 const size_t l1slot __diagused = l1pte_index(va);
3716 KASSERTMSG(l2b != NULL, 3729 KASSERTMSG(l2b != NULL,
3717 "va %#lx pa %#lx prot %d maxkvaddr %#lx: l2 %p l2b %p kva %p", 3730 "va %#lx pa %#lx prot %d maxkvaddr %#lx: l2 %p l2b %p kva %p",
3718 va, pa, prot, pmap_curmaxkvaddr, kpm->pm_l2[L2_IDX(l1slot)], 3731 va, pa, prot, pmap_curmaxkvaddr, kpm->pm_l2[L2_IDX(l1slot)],
3719 kpm->pm_l2[L2_IDX(l1slot)] 3732 kpm->pm_l2[L2_IDX(l1slot)]
3720 ? &kpm->pm_l2[L2_IDX(l1slot)]->l2_bucket[L2_BUCKET(l1slot)] 3733 ? &kpm->pm_l2[L2_IDX(l1slot)]->l2_bucket[L2_BUCKET(l1slot)]
3721 : NULL, 3734 : NULL,
3722 kpm->pm_l2[L2_IDX(l1slot)] 3735 kpm->pm_l2[L2_IDX(l1slot)]
3723 ? kpm->pm_l2[L2_IDX(l1slot)]->l2_bucket[L2_BUCKET(l1slot)].l2b_kva 3736 ? kpm->pm_l2[L2_IDX(l1slot)]->l2_bucket[L2_BUCKET(l1slot)].l2b_kva
3724 : NULL); 3737 : NULL);
@@ -3835,48 +3848,51 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v @@ -3835,48 +3848,51 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v
3835 && !(md->pvh_attrs & PVF_NC)) 3848 && !(md->pvh_attrs & PVF_NC))
3836 md->pvh_attrs |= PVF_DIRTY; 3849 md->pvh_attrs |= PVF_DIRTY;
3837 KASSERT((prot & VM_PROT_WRITE) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 3850 KASSERT((prot & VM_PROT_WRITE) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
3838 pmap_vac_me_harder(md, pa, pmap_kernel(), va); 3851 pmap_vac_me_harder(md, pa, pmap_kernel(), va);
3839 pmap_release_page_lock(md); 3852 pmap_release_page_lock(md);
3840#endif 3853#endif
3841 } 3854 }
3842#if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED) 3855#if defined(PMAP_CACHE_VIPT) && !defined(ARM_MMU_EXTENDED)
3843 } else { 3856 } else {
3844 if (pv != NULL) 3857 if (pv != NULL)
3845 pool_put(&pmap_pv_pool, pv); 3858 pool_put(&pmap_pv_pool, pv);
3846#endif 3859#endif
3847 } 3860 }
 3861 kpreempt_enable();
 3862
3848 if (pmap_initialized) { 3863 if (pmap_initialized) {
3849 UVMHIST_LOG(maphist, " <-- done (ptep %#jx: %#jx -> %#jx)", 3864 UVMHIST_LOG(maphist, " <-- done (ptep %#jx: %#jx -> %#jx)",
3850 (uintptr_t)ptep, opte, npte, 0); 3865 (uintptr_t)ptep, opte, npte, 0);
3851 } 3866 }
3852 3867
3853} 3868}
3854 3869
3855void 3870void
3856pmap_kremove(vaddr_t va, vsize_t len) 3871pmap_kremove(vaddr_t va, vsize_t len)
3857{ 3872{
3858#ifdef UVMHIST 3873#ifdef UVMHIST
3859 u_int total_mappings = 0; 3874 u_int total_mappings = 0;
3860#endif 3875#endif
3861 3876
3862 PMAPCOUNT(kenter_unmappings); 3877 PMAPCOUNT(kenter_unmappings);
3863 3878
3864 UVMHIST_FUNC(__func__); 3879 UVMHIST_FUNC(__func__);
3865 UVMHIST_CALLARGS(maphist, " (va=%#jx, len=%#jx)", va, len, 0, 0); 3880 UVMHIST_CALLARGS(maphist, " (va=%#jx, len=%#jx)", va, len, 0, 0);
3866 3881
3867 const vaddr_t eva = va + len; 3882 const vaddr_t eva = va + len;
3868 pmap_t kpm = pmap_kernel(); 3883 pmap_t kpm = pmap_kernel();
3869 3884
 3885 kpreempt_disable();
3870 pmap_acquire_pmap_lock(kpm); 3886 pmap_acquire_pmap_lock(kpm);
3871 3887
3872 while (va < eva) { 3888 while (va < eva) {
3873 vaddr_t next_bucket = L2_NEXT_BUCKET_VA(va); 3889 vaddr_t next_bucket = L2_NEXT_BUCKET_VA(va);
3874 if (next_bucket > eva) 3890 if (next_bucket > eva)
3875 next_bucket = eva; 3891 next_bucket = eva;
3876 3892
3877 struct l2_bucket * const l2b = pmap_get_l2_bucket(kpm, va); 3893 struct l2_bucket * const l2b = pmap_get_l2_bucket(kpm, va);
3878 KDASSERT(l2b != NULL); 3894 KDASSERT(l2b != NULL);
3879 3895
3880 pt_entry_t * const sptep = &l2b->l2b_kva[l2pte_index(va)]; 3896 pt_entry_t * const sptep = &l2b->l2b_kva[l2pte_index(va)];
3881 pt_entry_t *ptep = sptep; 3897 pt_entry_t *ptep = sptep;
3882 u_int mappings = 0; 3898 u_int mappings = 0;
@@ -3921,47 +3937,50 @@ pmap_kremove(vaddr_t va, vsize_t len) @@ -3921,47 +3937,50 @@ pmap_kremove(vaddr_t va, vsize_t len)
3921 va += PAGE_SIZE; 3937 va += PAGE_SIZE;
3922 ptep += PAGE_SIZE / L2_S_SIZE; 3938 ptep += PAGE_SIZE / L2_S_SIZE;
3923 } 3939 }
3924 KDASSERTMSG(mappings <= l2b->l2b_occupancy, "%u %u", 3940 KDASSERTMSG(mappings <= l2b->l2b_occupancy, "%u %u",
3925 mappings, l2b->l2b_occupancy); 3941 mappings, l2b->l2b_occupancy);
3926 l2b->l2b_occupancy -= mappings; 3942 l2b->l2b_occupancy -= mappings;
3927 //PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); 3943 //PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep));
3928#ifdef UVMHIST 3944#ifdef UVMHIST
3929 total_mappings += mappings; 3945 total_mappings += mappings;
3930#endif 3946#endif
3931 } 3947 }
3932 pmap_release_pmap_lock(kpm); 3948 pmap_release_pmap_lock(kpm);
3933 cpu_cpwait(); 3949 cpu_cpwait();
 3950 kpreempt_enable();
 3951
3934 UVMHIST_LOG(maphist, " <--- done (%ju mappings removed)", 3952 UVMHIST_LOG(maphist, " <--- done (%ju mappings removed)",
3935 total_mappings, 0, 0, 0); 3953 total_mappings, 0, 0, 0);
3936} 3954}
3937 3955
3938bool 3956bool
3939pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap) 3957pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
3940{ 3958{
3941 3959
3942 return pmap_extract_coherency(pm, va, pap, NULL); 3960 return pmap_extract_coherency(pm, va, pap, NULL);
3943} 3961}
3944 3962
3945bool 3963bool
3946pmap_extract_coherency(pmap_t pm, vaddr_t va, paddr_t *pap, bool *coherentp) 3964pmap_extract_coherency(pmap_t pm, vaddr_t va, paddr_t *pap, bool *coherentp)
3947{ 3965{
3948 struct l2_dtable *l2; 3966 struct l2_dtable *l2;
3949 pd_entry_t *pdep, pde; 3967 pd_entry_t *pdep, pde;
3950 pt_entry_t *ptep, pte; 3968 pt_entry_t *ptep, pte;
3951 paddr_t pa; 3969 paddr_t pa;
3952 u_int l1slot; 3970 u_int l1slot;
3953 bool coherent; 3971 bool coherent;
3954 3972
 3973 kpreempt_disable();
3955 pmap_acquire_pmap_lock(pm); 3974 pmap_acquire_pmap_lock(pm);
3956 3975
3957 l1slot = l1pte_index(va); 3976 l1slot = l1pte_index(va);
3958 pdep = pmap_l1_kva(pm) + l1slot; 3977 pdep = pmap_l1_kva(pm) + l1slot;
3959 pde = *pdep; 3978 pde = *pdep;
3960 3979
3961 if (l1pte_section_p(pde)) { 3980 if (l1pte_section_p(pde)) {
3962 /* 3981 /*
3963 * These should only happen for pmap_kernel() 3982 * These should only happen for pmap_kernel()
3964 */ 3983 */
3965 KDASSERT(pm == pmap_kernel()); 3984 KDASSERT(pm == pmap_kernel());
3966 pmap_release_pmap_lock(pm); 3985 pmap_release_pmap_lock(pm);
3967#if (ARM_MMU_V6 + ARM_MMU_V7) > 0 3986#if (ARM_MMU_V6 + ARM_MMU_V7) > 0
@@ -3972,31 +3991,34 @@ pmap_extract_coherency(pmap_t pm, vaddr_ @@ -3972,31 +3991,34 @@ pmap_extract_coherency(pmap_t pm, vaddr_
3972 pa = (pde & L1_S_FRAME) | (va & L1_S_OFFSET); 3991 pa = (pde & L1_S_FRAME) | (va & L1_S_OFFSET);
3973 coherent = (pde & L1_S_CACHE_MASK) == 0; 3992 coherent = (pde & L1_S_CACHE_MASK) == 0;
3974 } else { 3993 } else {
3975 /* 3994 /*
3976 * Note that we can't rely on the validity of the L1 3995 * Note that we can't rely on the validity of the L1
3977 * descriptor as an indication that a mapping exists. 3996 * descriptor as an indication that a mapping exists.
3978 * We have to look it up in the L2 dtable. 3997 * We have to look it up in the L2 dtable.
3979 */ 3998 */
3980 l2 = pm->pm_l2[L2_IDX(l1slot)]; 3999 l2 = pm->pm_l2[L2_IDX(l1slot)];
3981 4000
3982 if (l2 == NULL || 4001 if (l2 == NULL ||
3983 (ptep = l2->l2_bucket[L2_BUCKET(l1slot)].l2b_kva) == NULL) { 4002 (ptep = l2->l2_bucket[L2_BUCKET(l1slot)].l2b_kva) == NULL) {
3984 pmap_release_pmap_lock(pm); 4003 pmap_release_pmap_lock(pm);
 4004 kpreempt_enable();
 4005
3985 return false; 4006 return false;
3986 } 4007 }
3987 4008
3988 pte = ptep[l2pte_index(va)]; 4009 pte = ptep[l2pte_index(va)];
3989 pmap_release_pmap_lock(pm); 4010 pmap_release_pmap_lock(pm);
 4011 kpreempt_enable();
3990 4012
3991 if (pte == 0) 4013 if (pte == 0)
3992 return false; 4014 return false;
3993 4015
3994 switch (pte & L2_TYPE_MASK) { 4016 switch (pte & L2_TYPE_MASK) {
3995 case L2_TYPE_L: 4017 case L2_TYPE_L:
3996 pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); 4018 pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET);
3997 coherent = (pte & L2_L_CACHE_MASK) == 0; 4019 coherent = (pte & L2_L_CACHE_MASK) == 0;
3998 break; 4020 break;
3999 4021
4000 default: 4022 default:
4001 pa = (pte & ~PAGE_MASK) | (va & PAGE_MASK); 4023 pa = (pte & ~PAGE_MASK) | (va & PAGE_MASK);
4002 coherent = (pte & L2_S_CACHE_MASK) == 0; 4024 coherent = (pte & L2_S_CACHE_MASK) == 0;
@@ -4013,26 +4035,27 @@ pmap_extract_coherency(pmap_t pm, vaddr_ @@ -4013,26 +4035,27 @@ pmap_extract_coherency(pmap_t pm, vaddr_
4013 return true; 4035 return true;
4014} 4036}
4015 4037
4016/* 4038/*
4017 * pmap_pv_remove: remove an unmanaged pv-tracked page from all pmaps 4039 * pmap_pv_remove: remove an unmanaged pv-tracked page from all pmaps
4018 * that map it 4040 * that map it
4019 */ 4041 */
4020 4042
4021static void 4043static void
4022pmap_pv_remove(paddr_t pa) 4044pmap_pv_remove(paddr_t pa)
4023{ 4045{
4024 struct pmap_page *pp; 4046 struct pmap_page *pp;
4025 4047
 4048 KASSERT(kpreempt_disabled());
4026 pp = pmap_pv_tracked(pa); 4049 pp = pmap_pv_tracked(pa);
4027 if (pp == NULL) 4050 if (pp == NULL)
4028 panic("pmap_pv_protect: page not pv-tracked: 0x%"PRIxPADDR, 4051 panic("pmap_pv_protect: page not pv-tracked: 0x%"PRIxPADDR,
4029 pa); 4052 pa);
4030 4053
4031 struct vm_page_md *md = PMAP_PAGE_TO_MD(pp); 4054 struct vm_page_md *md = PMAP_PAGE_TO_MD(pp);
4032 pmap_page_remove(md, pa); 4055 pmap_page_remove(md, pa);
4033} 4056}
4034 4057
4035void 4058void
4036pmap_pv_protect(paddr_t pa, vm_prot_t prot) 4059pmap_pv_protect(paddr_t pa, vm_prot_t prot)
4037{ 4060{
4038 4061
@@ -4054,26 +4077,27 @@ pmap_protect(pmap_t pm, vaddr_t sva, vad @@ -4054,26 +4077,27 @@ pmap_protect(pmap_t pm, vaddr_t sva, vad
4054 if ((prot & VM_PROT_READ) == 0) { 4077 if ((prot & VM_PROT_READ) == 0) {
4055 pmap_remove(pm, sva, eva); 4078 pmap_remove(pm, sva, eva);
4056 return; 4079 return;
4057 } 4080 }
4058 4081
4059 if (prot & VM_PROT_WRITE) { 4082 if (prot & VM_PROT_WRITE) {
4060 /* 4083 /*
4061 * If this is a read->write transition, just ignore it and let 4084 * If this is a read->write transition, just ignore it and let
4062 * uvm_fault() take care of it later. 4085 * uvm_fault() take care of it later.
4063 */ 4086 */
4064 return; 4087 return;
4065 } 4088 }
4066 4089
 4090 kpreempt_disable();
4067 pmap_acquire_pmap_lock(pm); 4091 pmap_acquire_pmap_lock(pm);
4068 4092
4069#ifndef ARM_MMU_EXTENDED 4093#ifndef ARM_MMU_EXTENDED
4070 const bool flush = eva - sva >= PAGE_SIZE * 4; 4094 const bool flush = eva - sva >= PAGE_SIZE * 4;
4071 u_int flags = 0; 4095 u_int flags = 0;
4072#endif 4096#endif
4073 u_int clr_mask = PVF_WRITE | ((prot & VM_PROT_EXECUTE) ? 0 : PVF_EXEC); 4097 u_int clr_mask = PVF_WRITE | ((prot & VM_PROT_EXECUTE) ? 0 : PVF_EXEC);
4074 4098
4075 while (sva < eva) { 4099 while (sva < eva) {
4076 next_bucket = L2_NEXT_BUCKET_VA(sva); 4100 next_bucket = L2_NEXT_BUCKET_VA(sva);
4077 if (next_bucket > eva) 4101 if (next_bucket > eva)
4078 next_bucket = eva; 4102 next_bucket = eva;
4079 4103
@@ -4144,26 +4168,27 @@ pmap_protect(pmap_t pm, vaddr_t sva, vad @@ -4144,26 +4168,27 @@ pmap_protect(pmap_t pm, vaddr_t sva, vad
4144 } 4168 }
4145 4169
4146#ifndef ARM_MMU_EXTENDED 4170#ifndef ARM_MMU_EXTENDED
4147 if (flush) { 4171 if (flush) {
4148 if (PV_BEEN_EXECD(flags)) { 4172 if (PV_BEEN_EXECD(flags)) {
4149 pmap_tlb_flushID(pm); 4173 pmap_tlb_flushID(pm);
4150 } else if (PV_BEEN_REFD(flags)) { 4174 } else if (PV_BEEN_REFD(flags)) {
4151 pmap_tlb_flushD(pm); 4175 pmap_tlb_flushD(pm);
4152 } 4176 }
4153 } 4177 }
4154#endif 4178#endif
4155 4179
4156 pmap_release_pmap_lock(pm); 4180 pmap_release_pmap_lock(pm);
 4181 kpreempt_enable();
4157} 4182}
4158 4183
4159void 4184void
4160pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva) 4185pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva)
4161{ 4186{
4162 struct l2_bucket *l2b; 4187 struct l2_bucket *l2b;
4163 pt_entry_t *ptep; 4188 pt_entry_t *ptep;
4164 vaddr_t next_bucket; 4189 vaddr_t next_bucket;
4165 vsize_t page_size = trunc_page(sva) + PAGE_SIZE - sva; 4190 vsize_t page_size = trunc_page(sva) + PAGE_SIZE - sva;
4166 4191
4167 UVMHIST_FUNC(__func__); 4192 UVMHIST_FUNC(__func__);
4168 UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx...#%jx", 4193 UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx...#%jx",
4169 (uintptr_t)pm, sva, eva, 0); 4194 (uintptr_t)pm, sva, eva, 0);
@@ -4354,26 +4379,27 @@ pmap_prefetchabt_fixup(void *v) @@ -4354,26 +4379,27 @@ pmap_prefetchabt_fixup(void *v)
4354 PTE_SYNC(ptep); 4379 PTE_SYNC(ptep);
4355 pmap_tlb_flush_SE(pm, va, PVF_EXEC | PVF_REF); 4380 pmap_tlb_flush_SE(pm, va, PVF_EXEC | PVF_REF);
4356 if (!PV_IS_EXEC_P(md->pvh_attrs)) { 4381 if (!PV_IS_EXEC_P(md->pvh_attrs)) {
4357 pmap_syncicache_page(md, pa); 4382 pmap_syncicache_page(md, pa);
4358 } 4383 }
4359 rv = ABORT_FIXUP_RETURN; 4384 rv = ABORT_FIXUP_RETURN;
4360 l2pte_set(ptep, opte & ~L2_XS_XN, 0); 4385 l2pte_set(ptep, opte & ~L2_XS_XN, 0);
4361 PTE_SYNC(ptep); 4386 PTE_SYNC(ptep);
4362 } 4387 }
4363 pmap_release_page_lock(md); 4388 pmap_release_page_lock(md);
4364 4389
4365 out: 4390 out:
4366 kpreempt_enable(); 4391 kpreempt_enable();
 4392
4367 return rv; 4393 return rv;
4368} 4394}
4369#endif 4395#endif
4370 4396
4371int 4397int
4372pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user) 4398pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user)
4373{ 4399{
4374 struct l2_dtable *l2; 4400 struct l2_dtable *l2;
4375 struct l2_bucket *l2b; 4401 struct l2_bucket *l2b;
4376 paddr_t pa; 4402 paddr_t pa;
4377 const size_t l1slot = l1pte_index(va); 4403 const size_t l1slot = l1pte_index(va);
4378 int rv = 0; 4404 int rv = 0;
4379 4405
@@ -4382,26 +4408,27 @@ pmap_fault_fixup(pmap_t pm, vaddr_t va,  @@ -4382,26 +4408,27 @@ pmap_fault_fixup(pmap_t pm, vaddr_t va,
4382 (uintptr_t)pm, va, ftype, user); 4408 (uintptr_t)pm, va, ftype, user);
4383 4409
4384 va = trunc_page(va); 4410 va = trunc_page(va);
4385 4411
4386 KASSERT(!user || (pm != pmap_kernel())); 4412 KASSERT(!user || (pm != pmap_kernel()));
4387 4413
4388#ifdef ARM_MMU_EXTENDED 4414#ifdef ARM_MMU_EXTENDED
4389 UVMHIST_LOG(maphist, " ti=%#jx pai=%#jx asid=%#jx", 4415 UVMHIST_LOG(maphist, " ti=%#jx pai=%#jx asid=%#jx",
4390 (uintptr_t)cpu_tlb_info(curcpu()), 4416 (uintptr_t)cpu_tlb_info(curcpu()),
4391 (uintptr_t)PMAP_PAI(pm, cpu_tlb_info(curcpu())), 4417 (uintptr_t)PMAP_PAI(pm, cpu_tlb_info(curcpu())),
4392 (uintptr_t)PMAP_PAI(pm, cpu_tlb_info(curcpu()))->pai_asid, 0); 4418 (uintptr_t)PMAP_PAI(pm, cpu_tlb_info(curcpu()))->pai_asid, 0);
4393#endif 4419#endif
4394 4420
 4421 kpreempt_disable();
4395 pmap_acquire_pmap_lock(pm); 4422 pmap_acquire_pmap_lock(pm);
4396 4423
4397 /* 4424 /*
4398 * If there is no l2_dtable for this address, then the process 4425 * If there is no l2_dtable for this address, then the process
4399 * has no business accessing it. 4426 * has no business accessing it.
4400 * 4427 *
4401 * Note: This will catch userland processes trying to access 4428 * Note: This will catch userland processes trying to access
4402 * kernel addresses. 4429 * kernel addresses.
4403 */ 4430 */
4404 l2 = pm->pm_l2[L2_IDX(l1slot)]; 4431 l2 = pm->pm_l2[L2_IDX(l1slot)];
4405 if (l2 == NULL) { 4432 if (l2 == NULL) {
4406 UVMHIST_LOG(maphist, " no l2 for l1slot %#jx", l1slot, 0, 0, 0); 4433 UVMHIST_LOG(maphist, " no l2 for l1slot %#jx", l1slot, 0, 0, 0);
4407 goto out; 4434 goto out;
@@ -4775,26 +4802,27 @@ pmap_fault_fixup(pmap_t pm, vaddr_t va,  @@ -4775,26 +4802,27 @@ pmap_fault_fixup(pmap_t pm, vaddr_t va,
4775#endif 4802#endif
4776#endif 4803#endif
4777 4804
4778#ifndef ARM_MMU_EXTENDED 4805#ifndef ARM_MMU_EXTENDED
4779 /* Flush the TLB in the shared L1 case - see comment above */ 4806 /* Flush the TLB in the shared L1 case - see comment above */
4780 pmap_tlb_flush_SE(pm, va, 4807 pmap_tlb_flush_SE(pm, va,
4781 (ftype & VM_PROT_EXECUTE) ? PVF_EXEC | PVF_REF : PVF_REF); 4808 (ftype & VM_PROT_EXECUTE) ? PVF_EXEC | PVF_REF : PVF_REF);
4782#endif 4809#endif
4783 4810
4784 rv = 1; 4811 rv = 1;
4785 4812
4786out: 4813out:
4787 pmap_release_pmap_lock(pm); 4814 pmap_release_pmap_lock(pm);
 4815 kpreempt_enable();
4788 4816
4789 return rv; 4817 return rv;
4790} 4818}
4791 4819
4792/* 4820/*
4793 * Routine: pmap_procwr 4821 * Routine: pmap_procwr
4794 * 4822 *
4795 * Function: 4823 * Function:
4796 * Synchronize caches corresponding to [addr, addr+len) in p. 4824 * Synchronize caches corresponding to [addr, addr+len) in p.
4797 * 4825 *
4798 */ 4826 */
4799void 4827void
4800pmap_procwr(struct proc *p, vaddr_t va, int len) 4828pmap_procwr(struct proc *p, vaddr_t va, int len)
@@ -4815,47 +4843,49 @@ pmap_procwr(struct proc *p, vaddr_t va,  @@ -4815,47 +4843,49 @@ pmap_procwr(struct proc *p, vaddr_t va,
4815 * The mapping must already exist in the pmap. 4843 * The mapping must already exist in the pmap.
4816 */ 4844 */
4817void 4845void
4818pmap_unwire(pmap_t pm, vaddr_t va) 4846pmap_unwire(pmap_t pm, vaddr_t va)
4819{ 4847{
4820 struct l2_bucket *l2b; 4848 struct l2_bucket *l2b;
4821 pt_entry_t *ptep, pte; 4849 pt_entry_t *ptep, pte;
4822 struct vm_page *pg; 4850 struct vm_page *pg;
4823 paddr_t pa; 4851 paddr_t pa;
4824 4852
4825 UVMHIST_FUNC(__func__); 4853 UVMHIST_FUNC(__func__);
4826 UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx", (uintptr_t)pm, va, 0, 0); 4854 UVMHIST_CALLARGS(maphist, "pm %#jx va %#jx", (uintptr_t)pm, va, 0, 0);
4827 4855
 4856 kpreempt_disable();
4828 pmap_acquire_pmap_lock(pm); 4857 pmap_acquire_pmap_lock(pm);
4829 4858
4830 l2b = pmap_get_l2_bucket(pm, va); 4859 l2b = pmap_get_l2_bucket(pm, va);
4831 KDASSERT(l2b != NULL); 4860 KDASSERT(l2b != NULL);
4832 4861
4833 ptep = &l2b->l2b_kva[l2pte_index(va)]; 4862 ptep = &l2b->l2b_kva[l2pte_index(va)];
4834 pte = *ptep; 4863 pte = *ptep;
4835 4864
4836 /* Extract the physical address of the page */ 4865 /* Extract the physical address of the page */
4837 pa = l2pte_pa(pte); 4866 pa = l2pte_pa(pte);
4838 4867
4839 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { 4868 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
4840 /* Update the wired bit in the pv entry for this page. */ 4869 /* Update the wired bit in the pv entry for this page. */
4841 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 4870 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
4842 4871
4843 pmap_acquire_page_lock(md); 4872 pmap_acquire_page_lock(md);
4844 (void) pmap_modify_pv(md, pa, pm, va, PVF_WIRED, 0); 4873 (void) pmap_modify_pv(md, pa, pm, va, PVF_WIRED, 0);
4845 pmap_release_page_lock(md); 4874 pmap_release_page_lock(md);
4846 } 4875 }
4847 4876
4848 pmap_release_pmap_lock(pm); 4877 pmap_release_pmap_lock(pm);
 4878 kpreempt_enable();
4849 4879
4850 UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); 4880 UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
4851} 4881}
4852 4882
4853#ifdef ARM_MMU_EXTENDED 4883#ifdef ARM_MMU_EXTENDED
4854void 4884void
4855pmap_md_pdetab_activate(pmap_t pm, struct lwp *l) 4885pmap_md_pdetab_activate(pmap_t pm, struct lwp *l)
4856{ 4886{
4857 UVMHIST_FUNC(__func__); 4887 UVMHIST_FUNC(__func__);
4858 struct cpu_info * const ci = curcpu(); 4888 struct cpu_info * const ci = curcpu();
4859 struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(ci)); 4889 struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(ci));
4860 4890
4861 UVMHIST_CALLARGS(maphist, "pm %#jx (pm->pm_l1_pa %08jx asid %ju)", 4891 UVMHIST_CALLARGS(maphist, "pm %#jx (pm->pm_l1_pa %08jx asid %ju)",
@@ -5178,50 +5208,52 @@ pmap_update(pmap_t pm) @@ -5178,50 +5208,52 @@ pmap_update(pmap_t pm)
5178 cpu_cpwait(); 5208 cpu_cpwait();
5179 UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); 5209 UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
5180} 5210}
5181 5211
5182bool 5212bool
5183pmap_remove_all(pmap_t pm) 5213pmap_remove_all(pmap_t pm)
5184{ 5214{
5185 5215
5186 UVMHIST_FUNC(__func__); 5216 UVMHIST_FUNC(__func__);
5187 UVMHIST_CALLARGS(maphist, "(pm=%#jx)", (uintptr_t)pm, 0, 0, 0); 5217 UVMHIST_CALLARGS(maphist, "(pm=%#jx)", (uintptr_t)pm, 0, 0, 0);
5188 5218
5189 KASSERT(pm != pmap_kernel()); 5219 KASSERT(pm != pmap_kernel());
5190 5220
 5221 kpreempt_disable();
5191 /* 5222 /*
5192 * The vmspace described by this pmap is about to be torn down. 5223 * The vmspace described by this pmap is about to be torn down.
5193 * Until pmap_update() is called, UVM will only make calls 5224 * Until pmap_update() is called, UVM will only make calls
5194 * to pmap_remove(). We can make life much simpler by flushing 5225 * to pmap_remove(). We can make life much simpler by flushing
5195 * the cache now, and deferring TLB invalidation to pmap_update(). 5226 * the cache now, and deferring TLB invalidation to pmap_update().
5196 */ 5227 */
5197#ifdef PMAP_CACHE_VIVT 5228#ifdef PMAP_CACHE_VIVT
5198 pmap_cache_wbinv_all(pm, PVF_EXEC); 5229 pmap_cache_wbinv_all(pm, PVF_EXEC);
5199#endif 5230#endif
5200#ifdef ARM_MMU_EXTENDED 5231#ifdef ARM_MMU_EXTENDED
5201#ifdef MULTIPROCESSOR 5232#ifdef MULTIPROCESSOR
5202 struct cpu_info * const ci = curcpu(); 5233 struct cpu_info * const ci = curcpu();
5203 // This should be the last CPU with this pmap onproc 5234 // This should be the last CPU with this pmap onproc
5204 KASSERT(!kcpuset_isotherset(pm->pm_onproc, cpu_index(ci))); 5235 KASSERT(!kcpuset_isotherset(pm->pm_onproc, cpu_index(ci)));
5205 if (kcpuset_isset(pm->pm_onproc, cpu_index(ci))) 5236 if (kcpuset_isset(pm->pm_onproc, cpu_index(ci)))
5206#endif 5237#endif
5207 pmap_tlb_asid_deactivate(pm); 5238 pmap_tlb_asid_deactivate(pm);
5208#ifdef MULTIPROCESSOR 5239#ifdef MULTIPROCESSOR
5209 KASSERT(kcpuset_iszero(pm->pm_onproc)); 5240 KASSERT(kcpuset_iszero(pm->pm_onproc));
5210#endif 5241#endif
5211 5242
5212 pmap_tlb_asid_release_all(pm); 5243 pmap_tlb_asid_release_all(pm);
5213#endif 5244#endif
5214 pm->pm_remove_all = true; 5245 pm->pm_remove_all = true;
 5246 kpreempt_enable();
5215 5247
5216 UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0); 5248 UVMHIST_LOG(maphist, " <-- done", 0, 0, 0, 0);
5217 return false; 5249 return false;
5218} 5250}
5219 5251
5220/* 5252/*
5221 * Retire the given physical map from service. 5253 * Retire the given physical map from service.
5222 * Should only be called if the map contains no valid mappings. 5254 * Should only be called if the map contains no valid mappings.
5223 */ 5255 */
5224void 5256void
5225pmap_destroy(pmap_t pm) 5257pmap_destroy(pmap_t pm)
5226{ 5258{
5227 UVMHIST_FUNC(__func__); 5259 UVMHIST_FUNC(__func__);