Mon Apr 13 16:19:42 2015 UTC ()
Add pmap locking to pmap_kenter_pa/kremove


(matt)
diff -r1.319 -r1.320 src/sys/arch/arm/arm32/pmap.c

cvs diff -r1.319 -r1.320 src/sys/arch/arm/arm32/pmap.c (expand / switch to unified diff)

--- src/sys/arch/arm/arm32/pmap.c 2015/04/11 15:21:33 1.319
+++ src/sys/arch/arm/arm32/pmap.c 2015/04/13 16:19:42 1.320
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.319 2015/04/11 15:21:33 skrll Exp $ */ 1/* $NetBSD: pmap.c,v 1.320 2015/04/13 16:19:42 matt Exp $ */
2 2
3/* 3/*
4 * Copyright 2003 Wasabi Systems, Inc. 4 * Copyright 2003 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Steve C. Woodford for Wasabi Systems, Inc. 7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -206,27 +206,27 @@ @@ -206,27 +206,27 @@
206#include <sys/pool.h> 206#include <sys/pool.h>
207#include <sys/kmem.h> 207#include <sys/kmem.h>
208#include <sys/cdefs.h> 208#include <sys/cdefs.h>
209#include <sys/cpu.h> 209#include <sys/cpu.h>
210#include <sys/sysctl.h> 210#include <sys/sysctl.h>
211#include <sys/bus.h> 211#include <sys/bus.h>
212#include <sys/atomic.h> 212#include <sys/atomic.h>
213#include <sys/kernhist.h> 213#include <sys/kernhist.h>
214 214
215#include <uvm/uvm.h> 215#include <uvm/uvm.h>
216 216
217#include <arm/locore.h> 217#include <arm/locore.h>
218 218
219__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.319 2015/04/11 15:21:33 skrll Exp $"); 219__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.320 2015/04/13 16:19:42 matt Exp $");
220 220
221//#define PMAP_DEBUG 221//#define PMAP_DEBUG
222#ifdef PMAP_DEBUG 222#ifdef PMAP_DEBUG
223 223
224/* XXX need to get rid of all refs to this */ 224/* XXX need to get rid of all refs to this */
225int pmap_debug_level = 0; 225int pmap_debug_level = 0;
226 226
227/* 227/*
228 * for switching to potentially finer grained debugging 228 * for switching to potentially finer grained debugging
229 */ 229 */
230#define PDB_FOLLOW 0x0001 230#define PDB_FOLLOW 0x0001
231#define PDB_INIT 0x0002 231#define PDB_INIT 0x0002
232#define PDB_ENTER 0x0004 232#define PDB_ENTER 0x0004
@@ -3661,26 +3661,27 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v @@ -3661,26 +3661,27 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v
3661#endif 3661#endif
3662#endif 3662#endif
3663 struct vm_page_md *md = pg != NULL ? VM_PAGE_TO_MD(pg) : NULL; 3663 struct vm_page_md *md = pg != NULL ? VM_PAGE_TO_MD(pg) : NULL;
3664 3664
3665 UVMHIST_FUNC(__func__); 3665 UVMHIST_FUNC(__func__);
3666 3666
3667 if (pmap_initialized) { 3667 if (pmap_initialized) {
3668 UVMHIST_CALLED(maphist); 3668 UVMHIST_CALLED(maphist);
3669 UVMHIST_LOG(maphist, " (va=%#x, pa=%#x, prot=%#x, flags=%#x", 3669 UVMHIST_LOG(maphist, " (va=%#x, pa=%#x, prot=%#x, flags=%#x",
3670 va, pa, prot, flags); 3670 va, pa, prot, flags);
3671 } 3671 }
3672 3672
3673 pmap_t kpm = pmap_kernel(); 3673 pmap_t kpm = pmap_kernel();
 3674 pmap_acquire_pmap_lock(kpm);
3674 struct l2_bucket * const l2b = pmap_get_l2_bucket(kpm, va); 3675 struct l2_bucket * const l2b = pmap_get_l2_bucket(kpm, va);
3675 const size_t l1slot __diagused = l1pte_index(va); 3676 const size_t l1slot __diagused = l1pte_index(va);
3676 KASSERTMSG(l2b != NULL, 3677 KASSERTMSG(l2b != NULL,
3677 "va %#lx pa %#lx prot %d maxkvaddr %#lx: l2 %p l2b %p kva %p", 3678 "va %#lx pa %#lx prot %d maxkvaddr %#lx: l2 %p l2b %p kva %p",
3678 va, pa, prot, pmap_curmaxkvaddr, kpm->pm_l2[L2_IDX(l1slot)], 3679 va, pa, prot, pmap_curmaxkvaddr, kpm->pm_l2[L2_IDX(l1slot)],
3679 kpm->pm_l2[L2_IDX(l1slot)] 3680 kpm->pm_l2[L2_IDX(l1slot)]
3680 ? &kpm->pm_l2[L2_IDX(l1slot)]->l2_bucket[L2_BUCKET(l1slot)] 3681 ? &kpm->pm_l2[L2_IDX(l1slot)]->l2_bucket[L2_BUCKET(l1slot)]
3681 : NULL, 3682 : NULL,
3682 kpm->pm_l2[L2_IDX(l1slot)] 3683 kpm->pm_l2[L2_IDX(l1slot)]
3683 ? kpm->pm_l2[L2_IDX(l1slot)]->l2_bucket[L2_BUCKET(l1slot)].l2b_kva 3684 ? kpm->pm_l2[L2_IDX(l1slot)]->l2_bucket[L2_BUCKET(l1slot)].l2b_kva
3684 : NULL); 3685 : NULL);
3685 KASSERT(l2b->l2b_kva != NULL); 3686 KASSERT(l2b->l2b_kva != NULL);
3686 3687
@@ -3708,26 +3709,27 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v @@ -3708,26 +3709,27 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v
3708#endif 3709#endif
3709 } 3710 }
3710#endif 3711#endif
3711 if (l2pte_valid_p(opte)) { 3712 if (l2pte_valid_p(opte)) {
3712 l2pte_reset(ptep); 3713 l2pte_reset(ptep);
3713 PTE_SYNC(ptep); 3714 PTE_SYNC(ptep);
3714#ifdef PMAP_CACHE_VIVT 3715#ifdef PMAP_CACHE_VIVT
3715 cpu_dcache_wbinv_range(va, PAGE_SIZE); 3716 cpu_dcache_wbinv_range(va, PAGE_SIZE);
3716#endif 3717#endif
3717 cpu_tlb_flushD_SE(va); 3718 cpu_tlb_flushD_SE(va);
3718 cpu_cpwait(); 3719 cpu_cpwait();
3719 } 3720 }
3720 } 3721 }
 3722 pmap_release_pmap_lock(kpm);
3721 3723
3722 pt_entry_t npte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) 3724 pt_entry_t npte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot)
3723 | ((flags & PMAP_NOCACHE) 3725 | ((flags & PMAP_NOCACHE)
3724 ? 0 3726 ? 0
3725 : ((flags & PMAP_PTE) 3727 : ((flags & PMAP_PTE)
3726 ? pte_l2_s_cache_mode_pt : pte_l2_s_cache_mode)); 3728 ? pte_l2_s_cache_mode_pt : pte_l2_s_cache_mode));
3727#ifdef ARM_MMU_EXTENDED 3729#ifdef ARM_MMU_EXTENDED
3728 if (prot & VM_PROT_EXECUTE) 3730 if (prot & VM_PROT_EXECUTE)
3729 npte &= ~L2_XS_XN; 3731 npte &= ~L2_XS_XN;
3730#endif 3732#endif
3731 l2pte_set(ptep, npte, 0); 3733 l2pte_set(ptep, npte, 0);
3732 PTE_SYNC(ptep); 3734 PTE_SYNC(ptep);
3733 3735
@@ -3804,26 +3806,28 @@ pmap_kremove(vaddr_t va, vsize_t len) @@ -3804,26 +3806,28 @@ pmap_kremove(vaddr_t va, vsize_t len)
3804{ 3806{
3805#ifdef UVMHIST 3807#ifdef UVMHIST
3806 u_int total_mappings = 0; 3808 u_int total_mappings = 0;
3807#endif 3809#endif
3808 3810
3809 PMAPCOUNT(kenter_unmappings); 3811 PMAPCOUNT(kenter_unmappings);
3810 3812
3811 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 3813 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
3812 3814
3813 UVMHIST_LOG(maphist, " (va=%#x, len=%#x)", va, len, 0, 0); 3815 UVMHIST_LOG(maphist, " (va=%#x, len=%#x)", va, len, 0, 0);
3814 3816
3815 const vaddr_t eva = va + len; 3817 const vaddr_t eva = va + len;
3816 3818
 3819 pmap_acquire_pmap_lock(pmap_kernel());
 3820
3817 while (va < eva) { 3821 while (va < eva) {
3818 vaddr_t next_bucket = L2_NEXT_BUCKET_VA(va); 3822 vaddr_t next_bucket = L2_NEXT_BUCKET_VA(va);
3819 if (next_bucket > eva) 3823 if (next_bucket > eva)
3820 next_bucket = eva; 3824 next_bucket = eva;
3821 3825
3822 pmap_t kpm = pmap_kernel(); 3826 pmap_t kpm = pmap_kernel();
3823 struct l2_bucket * const l2b = pmap_get_l2_bucket(kpm, va); 3827 struct l2_bucket * const l2b = pmap_get_l2_bucket(kpm, va);
3824 KDASSERT(l2b != NULL); 3828 KDASSERT(l2b != NULL);
3825 3829
3826 pt_entry_t * const sptep = &l2b->l2b_kva[l2pte_index(va)]; 3830 pt_entry_t * const sptep = &l2b->l2b_kva[l2pte_index(va)];
3827 pt_entry_t *ptep = sptep; 3831 pt_entry_t *ptep = sptep;
3828 u_int mappings = 0; 3832 u_int mappings = 0;
3829 3833
@@ -3865,26 +3869,27 @@ pmap_kremove(vaddr_t va, vsize_t len) @@ -3865,26 +3869,27 @@ pmap_kremove(vaddr_t va, vsize_t len)
3865 mappings += PAGE_SIZE / L2_S_SIZE; 3869 mappings += PAGE_SIZE / L2_S_SIZE;
3866 } 3870 }
3867 va += PAGE_SIZE; 3871 va += PAGE_SIZE;
3868 ptep += PAGE_SIZE / L2_S_SIZE; 3872 ptep += PAGE_SIZE / L2_S_SIZE;
3869 } 3873 }
3870 KDASSERTMSG(mappings <= l2b->l2b_occupancy, "%u %u", 3874 KDASSERTMSG(mappings <= l2b->l2b_occupancy, "%u %u",
3871 mappings, l2b->l2b_occupancy); 3875 mappings, l2b->l2b_occupancy);
3872 l2b->l2b_occupancy -= mappings; 3876 l2b->l2b_occupancy -= mappings;
3873 //PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); 3877 //PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep));
3874#ifdef UVMHIST 3878#ifdef UVMHIST
3875 total_mappings += mappings; 3879 total_mappings += mappings;
3876#endif 3880#endif
3877 } 3881 }
 3882 pmap_release_pmap_lock(pmap_kernel());
3878 cpu_cpwait(); 3883 cpu_cpwait();
3879 UVMHIST_LOG(maphist, " <--- done (%u mappings removed)", 3884 UVMHIST_LOG(maphist, " <--- done (%u mappings removed)",
3880 total_mappings, 0, 0, 0); 3885 total_mappings, 0, 0, 0);
3881} 3886}
3882 3887
3883bool 3888bool
3884pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap) 3889pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
3885{ 3890{
3886 struct l2_dtable *l2; 3891 struct l2_dtable *l2;
3887 pd_entry_t *pdep, pde; 3892 pd_entry_t *pdep, pde;
3888 pt_entry_t *ptep, pte; 3893 pt_entry_t *ptep, pte;
3889 paddr_t pa; 3894 paddr_t pa;
3890 u_int l1slot; 3895 u_int l1slot;