Fri Jul 25 15:09:43 2014 UTC ()
PTE_SYNC_RANGE a newly allocated L1 page for ARM_MMU_EXTENDED


(matt)
diff -r1.294 -r1.295 src/sys/arch/arm/arm32/pmap.c

cvs diff -r1.294 -r1.295 src/sys/arch/arm/arm32/pmap.c (expand / switch to unified diff)

--- src/sys/arch/arm/arm32/pmap.c 2014/06/15 04:04:01 1.294
+++ src/sys/arch/arm/arm32/pmap.c 2014/07/25 15:09:43 1.295
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.294 2014/06/15 04:04:01 ozaki-r Exp $ */ 1/* $NetBSD: pmap.c,v 1.295 2014/07/25 15:09:43 matt Exp $ */
2 2
3/* 3/*
4 * Copyright 2003 Wasabi Systems, Inc. 4 * Copyright 2003 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Steve C. Woodford for Wasabi Systems, Inc. 7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -206,27 +206,27 @@ @@ -206,27 +206,27 @@
206#include <sys/kmem.h> 206#include <sys/kmem.h>
207#include <sys/cdefs.h> 207#include <sys/cdefs.h>
208#include <sys/cpu.h> 208#include <sys/cpu.h>
209#include <sys/sysctl.h> 209#include <sys/sysctl.h>
210#include <sys/bus.h> 210#include <sys/bus.h>
211#include <sys/atomic.h> 211#include <sys/atomic.h>
212#include <sys/kernhist.h> 212#include <sys/kernhist.h>
213 213
214#include <uvm/uvm.h> 214#include <uvm/uvm.h>
215 215
216#include <arm/locore.h> 216#include <arm/locore.h>
217//#include <arm/arm32/katelib.h> 217//#include <arm/arm32/katelib.h>
218 218
219__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.294 2014/06/15 04:04:01 ozaki-r Exp $"); 219__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.295 2014/07/25 15:09:43 matt Exp $");
220 220
221//#define PMAP_DEBUG 221//#define PMAP_DEBUG
222#ifdef PMAP_DEBUG 222#ifdef PMAP_DEBUG
223 223
224/* XXX need to get rid of all refs to this */ 224/* XXX need to get rid of all refs to this */
225int pmap_debug_level = 0; 225int pmap_debug_level = 0;
226 226
227/* 227/*
228 * for switching to potentially finer grained debugging 228 * for switching to potentially finer grained debugging
229 */ 229 */
230#define PDB_FOLLOW 0x0001 230#define PDB_FOLLOW 0x0001
231#define PDB_INIT 0x0002 231#define PDB_INIT 0x0002
232#define PDB_ENTER 0x0004 232#define PDB_ENTER 0x0004
@@ -1294,38 +1294,39 @@ pmap_modify_pv(struct vm_page_md *md, pa @@ -1294,38 +1294,39 @@ pmap_modify_pv(struct vm_page_md *md, pa
1294static void 1294static void
1295pmap_alloc_l1(pmap_t pm) 1295pmap_alloc_l1(pmap_t pm)
1296{ 1296{
1297#ifdef ARM_MMU_EXTENDED 1297#ifdef ARM_MMU_EXTENDED
1298#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 1298#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
1299#ifdef PMAP_NEED_ALLOC_POOLPAGE 1299#ifdef PMAP_NEED_ALLOC_POOLPAGE
1300 struct vm_page *pg = arm_pmap_alloc_poolpage(UVM_PGA_ZERO); 1300 struct vm_page *pg = arm_pmap_alloc_poolpage(UVM_PGA_ZERO);
1301#else 1301#else
1302 struct vm_page *pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO); 1302 struct vm_page *pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
1303#endif 1303#endif
1304 bool ok __diagused; 1304 bool ok __diagused;
1305 KASSERT(pg != NULL); 1305 KASSERT(pg != NULL);
1306 pm->pm_l1_pa = VM_PAGE_TO_PHYS(pg); 1306 pm->pm_l1_pa = VM_PAGE_TO_PHYS(pg);
1307 vaddr_t va = pmap_direct_mapped_phys(pm->pm_l1_pa, &ok, 0xdeadbeef); 1307 vaddr_t va = pmap_direct_mapped_phys(pm->pm_l1_pa, &ok, 0);
1308 KASSERT(ok); 1308 KASSERT(ok);
1309 KASSERT(va >= KERNEL_BASE); 1309 KASSERT(va >= KERNEL_BASE);
1310 1310
1311#else 1311#else
1312 KASSERTMSG(kernel_map != NULL, "pm %p", pm); 1312 KASSERTMSG(kernel_map != NULL, "pm %p", pm);
1313 vaddr_t va = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, 1313 vaddr_t va = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
1314 UVM_KMF_WIRED|UVM_KMF_ZERO); 1314 UVM_KMF_WIRED|UVM_KMF_ZERO);
1315 KASSERT(!va); 1315 KASSERT(!va);
1316 pmap_extract(pmap_kernel(), va, &pm->pm_l1_pa); 1316 pmap_extract(pmap_kernel(), va, &pm->pm_l1_pa);
1317#endif 1317#endif
1318 pm->pm_l1 = (pd_entry_t *)va; 1318 pm->pm_l1 = (pd_entry_t *)va;
 1319 PTE_SYNC_RANGE(pm->pm_l1, PAGE_SIZE / sizeof(pt_entry_t));
1319#else 1320#else
1320 struct l1_ttable *l1; 1321 struct l1_ttable *l1;
1321 uint8_t domain; 1322 uint8_t domain;
1322 1323
1323 /* 1324 /*
1324 * Remove the L1 at the head of the LRU list 1325 * Remove the L1 at the head of the LRU list
1325 */ 1326 */
1326 mutex_spin_enter(&l1_lru_lock); 1327 mutex_spin_enter(&l1_lru_lock);
1327 l1 = TAILQ_FIRST(&l1_lru_list); 1328 l1 = TAILQ_FIRST(&l1_lru_list);
1328 KDASSERT(l1 != NULL); 1329 KDASSERT(l1 != NULL);
1329 TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); 1330 TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
1330 1331
1331 /* 1332 /*