Wed Jun 12 07:13:18 2013 UTC ()
If the vector_page is not ARM_VECTORS_{LOW,HIGH}, assume it's in kernel
text and don't do anything special to map it.


(matt)
diff -r1.255 -r1.256 src/sys/arch/arm/arm32/pmap.c

cvs diff -r1.255 -r1.256 src/sys/arch/arm/arm32/pmap.c (expand / switch to unified diff)

--- src/sys/arch/arm/arm32/pmap.c 2013/05/11 10:15:43 1.255
+++ src/sys/arch/arm/arm32/pmap.c 2013/06/12 07:13:18 1.256
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.255 2013/05/11 10:15:43 skrll Exp $ */ 1/* $NetBSD: pmap.c,v 1.256 2013/06/12 07:13:18 matt Exp $ */
2 2
3/* 3/*
4 * Copyright 2003 Wasabi Systems, Inc. 4 * Copyright 2003 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Steve C. Woodford for Wasabi Systems, Inc. 7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -152,28 +152,28 @@ @@ -152,28 +152,28 @@
152 * - There are some unresolved issues for MP systems: 152 * - There are some unresolved issues for MP systems:
153 * 153 *
154 * o The L1 metadata needs a lock, or more specifically, some places 154 * o The L1 metadata needs a lock, or more specifically, some places
155 * need to acquire an exclusive lock when modifying L1 translation 155 * need to acquire an exclusive lock when modifying L1 translation
156 * table entries. 156 * table entries.
157 * 157 *
158 * o When one cpu modifies an L1 entry, and that L1 table is also 158 * o When one cpu modifies an L1 entry, and that L1 table is also
159 * being used by another cpu, then the latter will need to be told 159 * being used by another cpu, then the latter will need to be told
160 * that a tlb invalidation may be necessary. (But only if the old 160 * that a tlb invalidation may be necessary. (But only if the old
161 * domain number in the L1 entry being over-written is currently 161 * domain number in the L1 entry being over-written is currently
162 * the active domain on that cpu). I guess there are lots more tlb 162 * the active domain on that cpu). I guess there are lots more tlb
163 * shootdown issues too... 163 * shootdown issues too...
164 * 164 *
165 * o If the vector_page is at 0x00000000 instead of 0xffff0000, then 165 * o If the vector_page is at 0x00000000 instead of in kernel VA space,
166 * MP systems will lose big-time because of the MMU domain hack. 166 * then MP systems will lose big-time because of the MMU domain hack.
167 * The only way this can be solved (apart from moving the vector 167 * The only way this can be solved (apart from moving the vector
168 * page to 0xffff0000) is to reserve the first 1MB of user address 168 * page to 0xffff0000) is to reserve the first 1MB of user address
169 * space for kernel use only. This would require re-linking all 169 * space for kernel use only. This would require re-linking all
170 * applications so that the text section starts above this 1MB 170 * applications so that the text section starts above this 1MB
171 * boundary. 171 * boundary.
172 * 172 *
173 * o Tracking which VM space is resident in the cache/tlb has not yet 173 * o Tracking which VM space is resident in the cache/tlb has not yet
174 * been implemented for MP systems. 174 * been implemented for MP systems.
175 * 175 *
176 * o Finally, there is a pathological condition where two cpus running 176 * o Finally, there is a pathological condition where two cpus running
177 * two separate processes (not lwps) which happen to share an L1 177 * two separate processes (not lwps) which happen to share an L1
178 * can get into a fight over one or more L1 entries. This will result 178 * can get into a fight over one or more L1 entries. This will result
179 * in a significant slow-down if both processes are in tight loops. 179 * in a significant slow-down if both processes are in tight loops.
@@ -202,27 +202,27 @@ @@ -202,27 +202,27 @@
202#include <sys/cdefs.h> 202#include <sys/cdefs.h>
203#include <sys/cpu.h> 203#include <sys/cpu.h>
204#include <sys/sysctl.h> 204#include <sys/sysctl.h>
205 205
206#include <uvm/uvm.h> 206#include <uvm/uvm.h>
207 207
208#include <sys/bus.h> 208#include <sys/bus.h>
209#include <machine/pmap.h> 209#include <machine/pmap.h>
210#include <machine/pcb.h> 210#include <machine/pcb.h>
211#include <machine/param.h> 211#include <machine/param.h>
212#include <arm/cpuconf.h> 212#include <arm/cpuconf.h>
213#include <arm/arm32/katelib.h> 213#include <arm/arm32/katelib.h>
214 214
215__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.255 2013/05/11 10:15:43 skrll Exp $"); 215__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.256 2013/06/12 07:13:18 matt Exp $");
216 216
217#ifdef PMAP_DEBUG 217#ifdef PMAP_DEBUG
218 218
219/* XXX need to get rid of all refs to this */ 219/* XXX need to get rid of all refs to this */
220int pmap_debug_level = 0; 220int pmap_debug_level = 0;
221 221
222/* 222/*
223 * for switching to potentially finer grained debugging 223 * for switching to potentially finer grained debugging
224 */ 224 */
225#define PDB_FOLLOW 0x0001 225#define PDB_FOLLOW 0x0001
226#define PDB_INIT 0x0002 226#define PDB_INIT 0x0002
227#define PDB_ENTER 0x0004 227#define PDB_ENTER 0x0004
228#define PDB_REMOVE 0x0008 228#define PDB_REMOVE 0x0008
@@ -5125,26 +5125,37 @@ out: @@ -5125,26 +5125,37 @@ out:
5125/************************ Utility routines ****************************/ 5125/************************ Utility routines ****************************/
5126 5126
5127/* 5127/*
5128 * vector_page_setprot: 5128 * vector_page_setprot:
5129 * 5129 *
5130 * Manipulate the protection of the vector page. 5130 * Manipulate the protection of the vector page.
5131 */ 5131 */
5132void 5132void
5133vector_page_setprot(int prot) 5133vector_page_setprot(int prot)
5134{ 5134{
5135 struct l2_bucket *l2b; 5135 struct l2_bucket *l2b;
5136 pt_entry_t *ptep; 5136 pt_entry_t *ptep;
5137 5137
 5138#if defined(CPU_ARMV7) || defined(CPU_ARM11)
 5139 /*
 5140 * If we are using VBAR to use the vectors in the kernel, then it's
 5141 * already mapped in the kernel text so no need to anything here.
 5142 */
 5143 if (vector_page != ARM_VECTORS_LOW && vector_page != ARM_VECTORS_HIGH) {
 5144 KASSERT((armreg_pfr1_read() & ARM_PFR1_SEC_MASK) != 0);
 5145 return;
 5146 }
 5147#endif
 5148
5138 l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page); 5149 l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page);
5139 KDASSERT(l2b != NULL); 5150 KDASSERT(l2b != NULL);
5140 5151
5141 ptep = &l2b->l2b_kva[l2pte_index(vector_page)]; 5152 ptep = &l2b->l2b_kva[l2pte_index(vector_page)];
5142 5153
5143 *ptep = (*ptep & ~L2_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot); 5154 *ptep = (*ptep & ~L2_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot);
5144 PTE_SYNC(ptep); 5155 PTE_SYNC(ptep);
5145 cpu_tlb_flushD_SE(vector_page); 5156 cpu_tlb_flushD_SE(vector_page);
5146 cpu_cpwait(); 5157 cpu_cpwait();
5147} 5158}
5148 5159
5149/* 5160/*
5150 * Fetch pointers to the PDE/PTE for the given pmap/VA pair. 5161 * Fetch pointers to the PDE/PTE for the given pmap/VA pair.