Thu Dec 3 09:27:00 2009 UTC ()
Pull up the following revisions(s) (requested by scw in ticket #1168):
	sys/arch/arm/arm32/pmap.c:	revision 1.203

Work-around a possible process exit corner case which can leave stale
data in the cache after a context-switch. Addresses kern/41058.


(sborrill)
diff -r1.187 -r1.187.4.1 src/sys/arch/arm/arm32/pmap.c

cvs diff -r1.187 -r1.187.4.1 src/sys/arch/arm/arm32/pmap.c (expand / switch to unified diff)

--- src/sys/arch/arm/arm32/pmap.c 2008/09/28 21:27:11 1.187
+++ src/sys/arch/arm/arm32/pmap.c 2009/12/03 09:26:59 1.187.4.1
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.187 2008/09/28 21:27:11 skrll Exp $ */ 1/* $NetBSD: pmap.c,v 1.187.4.1 2009/12/03 09:26:59 sborrill Exp $ */
2 2
3/* 3/*
4 * Copyright 2003 Wasabi Systems, Inc. 4 * Copyright 2003 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Steve C. Woodford for Wasabi Systems, Inc. 7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -202,27 +202,27 @@ @@ -202,27 +202,27 @@
202#include <sys/pool.h> 202#include <sys/pool.h>
203#include <sys/cdefs.h> 203#include <sys/cdefs.h>
204#include <sys/cpu.h> 204#include <sys/cpu.h>
205#include <sys/sysctl.h> 205#include <sys/sysctl.h>
206  206
207#include <uvm/uvm.h> 207#include <uvm/uvm.h>
208 208
209#include <machine/bus.h> 209#include <machine/bus.h>
210#include <machine/pmap.h> 210#include <machine/pmap.h>
211#include <machine/pcb.h> 211#include <machine/pcb.h>
212#include <machine/param.h> 212#include <machine/param.h>
213#include <arm/arm32/katelib.h> 213#include <arm/arm32/katelib.h>
214 214
215__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.187 2008/09/28 21:27:11 skrll Exp $"); 215__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.187.4.1 2009/12/03 09:26:59 sborrill Exp $");
216 216
217#ifdef PMAP_DEBUG 217#ifdef PMAP_DEBUG
218 218
219/* XXX need to get rid of all refs to this */ 219/* XXX need to get rid of all refs to this */
220int pmap_debug_level = 0; 220int pmap_debug_level = 0;
221 221
222/* 222/*
223 * for switching to potentially finer grained debugging 223 * for switching to potentially finer grained debugging
224 */ 224 */
225#define PDB_FOLLOW 0x0001 225#define PDB_FOLLOW 0x0001
226#define PDB_INIT 0x0002 226#define PDB_INIT 0x0002
227#define PDB_ENTER 0x0004 227#define PDB_ENTER 0x0004
228#define PDB_REMOVE 0x0008 228#define PDB_REMOVE 0x0008
@@ -3635,27 +3635,26 @@ pmap_icache_sync_range(pmap_t pm, vaddr_ @@ -3635,27 +3635,26 @@ pmap_icache_sync_range(pmap_t pm, vaddr_
3635 pmap_release_pmap_lock(pm); 3635 pmap_release_pmap_lock(pm);
3636 PMAP_MAP_TO_HEAD_UNLOCK(); 3636 PMAP_MAP_TO_HEAD_UNLOCK();
3637} 3637}
3638 3638
3639void 3639void
3640pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 3640pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
3641{ 3641{
3642 3642
3643 NPDEBUG(PDB_PROTECT, 3643 NPDEBUG(PDB_PROTECT,
3644 printf("pmap_page_protect: pg %p (0x%08lx), prot 0x%x\n", 3644 printf("pmap_page_protect: pg %p (0x%08lx), prot 0x%x\n",
3645 pg, VM_PAGE_TO_PHYS(pg), prot)); 3645 pg, VM_PAGE_TO_PHYS(pg), prot));
3646 3646
3647 switch(prot) { 3647 switch(prot) {
3648 return; 
3649 case VM_PROT_READ|VM_PROT_WRITE: 3648 case VM_PROT_READ|VM_PROT_WRITE:
3650#if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX) 3649#if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX)
3651 pmap_clearbit(pg, PVF_EXEC); 3650 pmap_clearbit(pg, PVF_EXEC);
3652 break; 3651 break;
3653#endif 3652#endif
3654 case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE: 3653 case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
3655 break; 3654 break;
3656 3655
3657 case VM_PROT_READ: 3656 case VM_PROT_READ:
3658#if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX) 3657#if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX)
3659 pmap_clearbit(pg, PVF_WRITE|PVF_EXEC); 3658 pmap_clearbit(pg, PVF_WRITE|PVF_EXEC);
3660 break; 3659 break;
3661#endif 3660#endif
@@ -4066,33 +4065,52 @@ pmap_activate(struct lwp *l) @@ -4066,33 +4065,52 @@ pmap_activate(struct lwp *l)
4066 } else 4065 } else
4067 opm = NULL; 4066 opm = NULL;
4068 4067
4069 PMAPCOUNT(activations); 4068 PMAPCOUNT(activations);
4070 block_userspace_access = 1; 4069 block_userspace_access = 1;
4071 4070
4072 /* 4071 /*
4073 * If switching to a user vmspace which is different to the 4072 * If switching to a user vmspace which is different to the
4074 * most recent one, and the most recent one is potentially 4073 * most recent one, and the most recent one is potentially
4075 * live in the cache, we must write-back and invalidate the 4074 * live in the cache, we must write-back and invalidate the
4076 * entire cache. 4075 * entire cache.
4077 */ 4076 */
4078 rpm = pmap_recent_user; 4077 rpm = pmap_recent_user;
 4078
 4079/*
 4080 * XXXSCW: There's a corner case here which can leave turds in the cache as
 4081 * reported in kern/41058. They're probably left over during tear-down and
 4082 * switching away from an exiting process. Until the root cause is identified
 4083 * and fixed, zap the cache when switching pmaps. This will result in a few
 4084 * unnecessary cache flushes, but that's better than silently corrupting data.
 4085 */
 4086#if 0
4079 if (npm != pmap_kernel() && rpm && npm != rpm && 4087 if (npm != pmap_kernel() && rpm && npm != rpm &&
4080 rpm->pm_cstate.cs_cache) { 4088 rpm->pm_cstate.cs_cache) {
4081 rpm->pm_cstate.cs_cache = 0; 4089 rpm->pm_cstate.cs_cache = 0;
4082#ifdef PMAP_CACHE_VIVT 4090#ifdef PMAP_CACHE_VIVT
4083 cpu_idcache_wbinv_all(); 4091 cpu_idcache_wbinv_all();
4084#endif 4092#endif
4085 } 4093 }
 4094#else
 4095 if (rpm) {
 4096 rpm->pm_cstate.cs_cache = 0;
 4097 if (npm == pmap_kernel())
 4098 pmap_recent_user = NULL;
 4099#ifdef PMAP_CACHE_VIVT
 4100 cpu_idcache_wbinv_all();
 4101#endif
 4102 }
 4103#endif
4086 4104
4087 /* No interrupts while we frob the TTB/DACR */ 4105 /* No interrupts while we frob the TTB/DACR */
4088 oldirqstate = disable_interrupts(IF32_bits); 4106 oldirqstate = disable_interrupts(IF32_bits);
4089 4107
4090 /* 4108 /*
4091 * For ARM_VECTORS_LOW, we MUST, I repeat, MUST fix up the L1 4109 * For ARM_VECTORS_LOW, we MUST, I repeat, MUST fix up the L1
4092 * entry corresponding to 'vector_page' in the incoming L1 table 4110 * entry corresponding to 'vector_page' in the incoming L1 table
4093 * before switching to it otherwise subsequent interrupts/exceptions 4111 * before switching to it otherwise subsequent interrupts/exceptions
4094 * (including domain faults!) will jump into hyperspace. 4112 * (including domain faults!) will jump into hyperspace.
4095 */ 4113 */
4096 if (npm->pm_pl1vec != NULL) { 4114 if (npm->pm_pl1vec != NULL) {
4097 cpu_tlb_flushID_SE((u_int)vector_page); 4115 cpu_tlb_flushID_SE((u_int)vector_page);
4098 cpu_cpwait(); 4116 cpu_cpwait();