Wed Jun 12 21:34:12 2013 UTC ()
Add a ARM_HAS_VBAR option which forces the use of the VBAR register.  This
allows much code to deal with vector_page mappings to be eliminated.  On a
BEAGLEBONE kernel, this saves 8KB of text and instructions that never have
to be executed.  (The PJ4B has VBAR but doesn't implement the security
extensions it is part of so a method was needed to allow it use VBAR with
relying on the default test for the security extensions.)


(matt)
diff -r1.6 -r1.7 src/sys/arch/arm/arm/fiq.c
diff -r1.6 -r1.7 src/sys/arch/arm/arm/vectors.S
diff -r1.19 -r1.20 src/sys/arch/arm/arm32/arm32_kvminit.c
diff -r1.93 -r1.94 src/sys/arch/arm/arm32/arm32_machdep.c
diff -r1.256 -r1.257 src/sys/arch/arm/arm32/pmap.c
diff -r1.118 -r1.119 src/sys/arch/arm/conf/files.arm
diff -r1.119 -r1.120 src/sys/arch/arm/include/arm32/pmap.h

cvs diff -r1.6 -r1.7 src/sys/arch/arm/arm/fiq.c (expand / switch to unified diff)

--- src/sys/arch/arm/arm/fiq.c 2008/11/19 06:29:48 1.6
+++ src/sys/arch/arm/arm/fiq.c 2013/06/12 21:34:12 1.7
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: fiq.c,v 1.6 2008/11/19 06:29:48 matt Exp $ */ 1/* $NetBSD: fiq.c,v 1.7 2013/06/12 21:34:12 matt Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc. 4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -26,52 +26,53 @@ @@ -26,52 +26,53 @@
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE. 35 * POSSIBILITY OF SUCH DAMAGE.
36 */ 36 */
37 37
38#include <sys/cdefs.h> 38#include <sys/cdefs.h>
39__KERNEL_RCSID(0, "$NetBSD: fiq.c,v 1.6 2008/11/19 06:29:48 matt Exp $"); 39__KERNEL_RCSID(0, "$NetBSD: fiq.c,v 1.7 2013/06/12 21:34:12 matt Exp $");
40 40
41#include <sys/param.h> 41#include <sys/param.h>
42#include <sys/systm.h> 42#include <sys/systm.h>
43 43
44#include <arm/cpufunc.h> 44#include <arm/cpufunc.h>
45#include <arm/fiq.h> 45#include <arm/fiq.h>
46 46
47#ifdef __PROG32 47#ifdef __PROG32
48#include <uvm/uvm.h> 48#include <uvm/uvm.h>
49#endif 49#endif
50 50
51TAILQ_HEAD(, fiqhandler) fiqhandler_stack = 51TAILQ_HEAD(, fiqhandler) fiqhandler_stack =
52 TAILQ_HEAD_INITIALIZER(fiqhandler_stack); 52 TAILQ_HEAD_INITIALIZER(fiqhandler_stack);
53 53
54extern char fiqvector[]; 54extern char fiqvector[];
55extern char fiq_nullhandler[], fiq_nullhandler_end[]; 55extern char fiq_nullhandler[], fiq_nullhandler_end[];
56 56
57#ifdef __PROG32 57#ifdef __PROG32
58#define IRQ_BIT I32_bit 58#define IRQ_BIT I32_bit
59#define FIQ_BIT F32_bit 59#define FIQ_BIT F32_bit
60#else 60#else
61#define IRQ_BIT R15_IRQ_DISABLE 61#define IRQ_BIT R15_IRQ_DISABLE
62#define FIQ_BIT R15_FIQ_DISABLE 62#define FIQ_BIT R15_FIQ_DISABLE
63#endif /* __PROG32 */ 63#endif /* __PROG32 */
64 64
 65#ifndef ARM_HAS_VBAR
65/* 66/*
66 * fiq_installhandler: 67 * fiq_installhandler:
67 * 68 *
68 * Actually install the FIQ handler down at the FIQ vector. 69 * Actually install the FIQ handler down at the FIQ vector.
69 * 70 *
70 * Note: If the FIQ is invoked via an extra layer of 71 * Note: If the FIQ is invoked via an extra layer of
71 * indirection, the actual FIQ code store lives in the 72 * indirection, the actual FIQ code store lives in the
72 * data segment, so there is no need to manipulate 73 * data segment, so there is no need to manipulate
73 * the vector page's protection. 74 * the vector page's protection.
74 */ 75 */
75static void 76static void
76fiq_installhandler(void *func, size_t size) 77fiq_installhandler(void *func, size_t size)
77{ 78{
@@ -166,13 +167,14 @@ fiq_release(struct fiqhandler *fh) @@ -166,13 +167,14 @@ fiq_release(struct fiqhandler *fh)
166 167
167 if (TAILQ_FIRST(&fiqhandler_stack) == NULL) { 168 if (TAILQ_FIRST(&fiqhandler_stack) == NULL) {
168 /* Copy the NULL handler back down into the vector. */ 169 /* Copy the NULL handler back down into the vector. */
169 fiq_installhandler(fiq_nullhandler, 170 fiq_installhandler(fiq_nullhandler,
170 (size_t)(fiq_nullhandler_end - fiq_nullhandler)); 171 (size_t)(fiq_nullhandler_end - fiq_nullhandler));
171 172
172 /* Make sure FIQs are disabled when we return. */ 173 /* Make sure FIQs are disabled when we return. */
173 oldirqstate |= FIQ_BIT; 174 oldirqstate |= FIQ_BIT;
174 } 175 }
175 176
176 oldirqstate &= ~FIQ_BIT; 177 oldirqstate &= ~FIQ_BIT;
177 restore_interrupts(oldirqstate); 178 restore_interrupts(oldirqstate);
178} 179}
 180#endif /* !ARM_HAS_VBAR */

cvs diff -r1.6 -r1.7 src/sys/arch/arm/arm/vectors.S (expand / switch to unified diff)

--- src/sys/arch/arm/arm/vectors.S 2013/06/12 15:10:13 1.6
+++ src/sys/arch/arm/arm/vectors.S 2013/06/12 21:34:12 1.7
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: vectors.S,v 1.6 2013/06/12 15:10:13 matt Exp $ */ 1/* $NetBSD: vectors.S,v 1.7 2013/06/12 21:34:12 matt Exp $ */
2 2
3/* 3/*
4 * Copyright (C) 1994-1997 Mark Brinicombe 4 * Copyright (C) 1994-1997 Mark Brinicombe
5 * Copyright (C) 1994 Brini 5 * Copyright (C) 1994 Brini
6 * All rights reserved.  6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
@@ -23,82 +23,91 @@ @@ -23,82 +23,91 @@
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 28 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 30 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */ 32 */
33 33
34#include "assym.h" 34#include "assym.h"
35#include "opt_cputypes.h" 35#include "opt_cputypes.h"
 36#include "opt_cpuoptions.h"
36#include <machine/asm.h> 37#include <machine/asm.h>
37 38
38/* 39/*
39 * These are the exception vectors copied down to page 0. 40 * These are the exception vectors copied down to page 0.
40 * 41 *
41 * Note that FIQs are special; rather than using a level of 42 * Note that FIQs are special; rather than using a level of
42 * indirection, we actually copy the FIQ code down into the 43 * indirection, we actually copy the FIQ code down into the
43 * vector page. 44 * vector page.
44 */ 45 */
45 46
46 .text 47 .text
47 .align 0 
48 .global _C_LABEL(page0), _C_LABEL(page0_data), _C_LABEL(page0_end) 
49 .global _C_LABEL(fiqvector) 48 .global _C_LABEL(fiqvector)
50 49
51#if defined(CPU_ARMV7) || defined(CPU_ARM11) 50#if defined(CPU_ARMV7) || defined(CPU_ARM11) || defined(ARM_HAS_VBAR)
52 /* 51 /*
53 * ARMv[67] processors with the Security Extension have the VBAR 52 * ARMv[67] processors with the Security Extension have the VBAR
54 * which redirects the low vector to any 32-byte aligned address.  53 * which redirects the low vector to any 32-byte aligned address.
55 * Since we are in kernel, we can just do a relative branch to the 54 * Since we are in kernel, we can just do a relative branch to the
56 * exception code and avoid the intermediate load. 55 * exception code and avoid the intermediate load.
57 */ 56 */
58 .global _C_LABEL(page0rel) 57 .global _C_LABEL(page0rel)
59 .p2align 5 58 .p2align 5
60_C_LABEL(page0rel): 59_C_LABEL(page0rel):
61 b reset_entry 60 b reset_entry
62 b undefined_entry 61 b undefined_entry
63 b swi_entry 62 b swi_entry
64 b prefetch_abort_entry 63 b prefetch_abort_entry
65 b data_abort_entry 64 b data_abort_entry
66 b address_exception_entry 65 b address_exception_entry
67 b irq_entry 66 b irq_entry
68#ifdef __ARM_FIQ_INDIRECT 67#ifdef __ARM_FIQ_INDIRECT
69 b _C_LABEL(fiqvector) 68 b _C_LABEL(fiqvector)
70#else 69#elif !defined(ARM_HAS_VBAR)
71 b .Lfiqvector 70 b .Lfiqvector
72#endif 71#endif
73#endif 72#endif /* CPU_ARMV7 || CPU_ARM11 || ARM_HAS_VBAR */
74 73
 74#ifndef ARM_HAS_VBAR
 75 .global _C_LABEL(page0), _C_LABEL(page0_data), _C_LABEL(page0_end)
 76 .align 0
75_C_LABEL(page0): 77_C_LABEL(page0):
76 ldr pc, .Lreset_target 78 ldr pc, .Lreset_target
77 ldr pc, .Lundefined_target 79 ldr pc, .Lundefined_target
78 ldr pc, .Lswi_target 80 ldr pc, .Lswi_target
79 ldr pc, .Lprefetch_abort_target 81 ldr pc, .Lprefetch_abort_target
80 ldr pc, .Ldata_abort_target 82 ldr pc, .Ldata_abort_target
81 ldr pc, .Laddress_exception_target 83 ldr pc, .Laddress_exception_target
82 ldr pc, .Lirq_target 84 ldr pc, .Lirq_target
83#ifdef __ARM_FIQ_INDIRECT 85#ifdef __ARM_FIQ_INDIRECT
84 ldr pc, .Lfiq_target 86 ldr pc, .Lfiq_target
85#else 87#endif
 88#endif /* !ARM_HAS_VBAR */
 89#ifndef __ARM_FIQ_INDIRECT
86.Lfiqvector: 90.Lfiqvector:
 91#ifdef ARM_HAS_VBAR
 92 .set _C_LABEL(fiqvector), . - _C_LABEL(page0rel)
 93#else
87 .set _C_LABEL(fiqvector), . - _C_LABEL(page0) 94 .set _C_LABEL(fiqvector), . - _C_LABEL(page0)
 95#endif
88 subs pc, lr, #4 96 subs pc, lr, #4
89 .org .Lfiqvector + 0x100 97 .org .Lfiqvector + 0x100
90#endif 98#endif
91 99
 100#ifndef ARM_HAS_VBAR
92_C_LABEL(page0_data): 101_C_LABEL(page0_data):
93.Lreset_target: 102.Lreset_target:
94 .word reset_entry 103 .word reset_entry
95 104
96.Lundefined_target: 105.Lundefined_target:
97 .word undefined_entry 106 .word undefined_entry
98 107
99.Lswi_target: 108.Lswi_target:
100 .word swi_entry 109 .word swi_entry
101 110
102.Lprefetch_abort_target: 111.Lprefetch_abort_target:
103 .word prefetch_abort_entry 112 .word prefetch_abort_entry
104 113
@@ -108,21 +117,22 @@ _C_LABEL(page0_data): @@ -108,21 +117,22 @@ _C_LABEL(page0_data):
108.Laddress_exception_target: 117.Laddress_exception_target:
109 .word address_exception_entry 118 .word address_exception_entry
110 119
111.Lirq_target: 120.Lirq_target:
112 .word irq_entry 121 .word irq_entry
113 122
114#ifdef __ARM_FIQ_INDIRECT 123#ifdef __ARM_FIQ_INDIRECT
115.Lfiq_target: 124.Lfiq_target:
116 .word _C_LABEL(fiqvector) 125 .word _C_LABEL(fiqvector)
117#else 126#else
118 .word 0 /* pad it out */ 127 .word 0 /* pad it out */
119#endif 128#endif
120_C_LABEL(page0_end): 129_C_LABEL(page0_end):
 130#endif /* ARM_HAS_VBAR */
121 131
122#ifdef __ARM_FIQ_INDIRECT 132#ifdef __ARM_FIQ_INDIRECT
123 .data 133 .data
124 .align 0 134 .align 0
125_C_LABEL(fiqvector): 135_C_LABEL(fiqvector):
126 subs pc, lr, #4 136 subs pc, lr, #4
127 .org _C_LABEL(fiqvector) + 0x100 137 .org _C_LABEL(fiqvector) + 0x100
128#endif 138#endif

cvs diff -r1.19 -r1.20 src/sys/arch/arm/arm32/arm32_kvminit.c (expand / switch to unified diff)

--- src/sys/arch/arm/arm32/arm32_kvminit.c 2013/06/12 17:13:05 1.19
+++ src/sys/arch/arm/arm32/arm32_kvminit.c 2013/06/12 21:34:12 1.20
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: arm32_kvminit.c,v 1.19 2013/06/12 17:13:05 matt Exp $ */ 1/* $NetBSD: arm32_kvminit.c,v 1.20 2013/06/12 21:34:12 matt Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2002, 2003, 2005 Genetec Corporation. All rights reserved. 4 * Copyright (c) 2002, 2003, 2005 Genetec Corporation. All rights reserved.
5 * Written by Hiroyuki Bessho for Genetec Corporation. 5 * Written by Hiroyuki Bessho for Genetec Corporation.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -112,27 +112,27 @@ @@ -112,27 +112,27 @@
112 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 112 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
113 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 113 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
114 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTERS BE LIABLE FOR ANY DIRECT, 114 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTERS BE LIABLE FOR ANY DIRECT,
115 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 115 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
116 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 116 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
117 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 117 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
118 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 118 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
119 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 119 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
120 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 120 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
121 * SUCH DAMAGE. 121 * SUCH DAMAGE.
122 */ 122 */
123 123
124#include <sys/cdefs.h> 124#include <sys/cdefs.h>
125__KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.19 2013/06/12 17:13:05 matt Exp $"); 125__KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.20 2013/06/12 21:34:12 matt Exp $");
126 126
127#include <sys/param.h> 127#include <sys/param.h>
128#include <sys/device.h> 128#include <sys/device.h>
129#include <sys/kernel.h> 129#include <sys/kernel.h>
130#include <sys/reboot.h> 130#include <sys/reboot.h>
131#include <sys/bus.h> 131#include <sys/bus.h>
132 132
133#include <dev/cons.h> 133#include <dev/cons.h>
134 134
135#include <uvm/uvm_extern.h> 135#include <uvm/uvm_extern.h>
136 136
137#include <arm/db_machdep.h> 137#include <arm/db_machdep.h>
138#include <arm/undefined.h> 138#include <arm/undefined.h>
@@ -362,27 +362,29 @@ valloc_pages(struct bootmem_info *bmi, p @@ -362,27 +362,29 @@ valloc_pages(struct bootmem_info *bmi, p
362 memset((void *)pv->pv_va, 0, nbytes); 362 memset((void *)pv->pv_va, 0, nbytes);
363} 363}
364 364
365void 365void
366arm32_kernel_vm_init(vaddr_t kernel_vm_base, vaddr_t vectors, vaddr_t iovbase, 366arm32_kernel_vm_init(vaddr_t kernel_vm_base, vaddr_t vectors, vaddr_t iovbase,
367 const struct pmap_devmap *devmap, bool mapallmem_p) 367 const struct pmap_devmap *devmap, bool mapallmem_p)
368{ 368{
369 struct bootmem_info * const bmi = &bootmem_info; 369 struct bootmem_info * const bmi = &bootmem_info;
370#ifdef MULTIPROCESSOR 370#ifdef MULTIPROCESSOR
371 const size_t cpu_num = arm_cpu_max + 1; 371 const size_t cpu_num = arm_cpu_max + 1;
372#else 372#else
373 const size_t cpu_num = 1; 373 const size_t cpu_num = 1;
374#endif 374#endif
375#if defined(CPU_ARMV7) || defined(CPU_ARM11) 375#ifdef ARM_HAS_VBAR
 376 const bool map_vectors_p = false;
 377#elif defined(CPU_ARMV7) || defined(CPU_ARM11)
376 const bool map_vectors_p = vectors == ARM_VECTORS_LOW 378 const bool map_vectors_p = vectors == ARM_VECTORS_LOW
377 && !(armreg_pfr1_read() & ARM_PFR1_SEC_MASK); 379 && !(armreg_pfr1_read() & ARM_PFR1_SEC_MASK);
378#else 380#else
379 const bool map_vectors_p = true; 381 const bool map_vectors_p = true;
380#endif 382#endif
381 383
382#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 384#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
383 KASSERT(mapallmem_p); 385 KASSERT(mapallmem_p);
384#endif 386#endif
385 387
386 /* 388 /*
387 * Calculate the number of L2 pages needed for mapping the 389 * Calculate the number of L2 pages needed for mapping the
388 * kernel + data + stuff. Assume 2 L2 pages for kernel, 1 for vectors, 390 * kernel + data + stuff. Assume 2 L2 pages for kernel, 1 for vectors,

cvs diff -r1.93 -r1.94 src/sys/arch/arm/arm32/arm32_machdep.c (expand / switch to unified diff)

--- src/sys/arch/arm/arm32/arm32_machdep.c 2013/06/12 17:13:05 1.93
+++ src/sys/arch/arm/arm32/arm32_machdep.c 2013/06/12 21:34:12 1.94
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: arm32_machdep.c,v 1.93 2013/06/12 17:13:05 matt Exp $ */ 1/* $NetBSD: arm32_machdep.c,v 1.94 2013/06/12 21:34:12 matt Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1994-1998 Mark Brinicombe. 4 * Copyright (c) 1994-1998 Mark Brinicombe.
5 * Copyright (c) 1994 Brini. 5 * Copyright (c) 1994 Brini.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software written for Brini by Mark Brinicombe 8 * This code is derived from software written for Brini by Mark Brinicombe
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -32,27 +32,27 @@ @@ -32,27 +32,27 @@
32 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE. 36 * SUCH DAMAGE.
37 * 37 *
38 * Machine dependent functions for kernel setup 38 * Machine dependent functions for kernel setup
39 * 39 *
40 * Created : 17/09/94 40 * Created : 17/09/94
41 * Updated : 18/04/01 updated for new wscons 41 * Updated : 18/04/01 updated for new wscons
42 */ 42 */
43 43
44#include <sys/cdefs.h> 44#include <sys/cdefs.h>
45__KERNEL_RCSID(0, "$NetBSD: arm32_machdep.c,v 1.93 2013/06/12 17:13:05 matt Exp $"); 45__KERNEL_RCSID(0, "$NetBSD: arm32_machdep.c,v 1.94 2013/06/12 21:34:12 matt Exp $");
46 46
47#include "opt_modular.h" 47#include "opt_modular.h"
48#include "opt_md.h" 48#include "opt_md.h"
49#include "opt_pmap_debug.h" 49#include "opt_pmap_debug.h"
50 50
51#include <sys/param.h> 51#include <sys/param.h>
52#include <sys/systm.h> 52#include <sys/systm.h>
53#include <sys/reboot.h> 53#include <sys/reboot.h>
54#include <sys/proc.h> 54#include <sys/proc.h>
55#include <sys/kauth.h> 55#include <sys/kauth.h>
56#include <sys/kernel.h> 56#include <sys/kernel.h>
57#include <sys/mbuf.h> 57#include <sys/mbuf.h>
58#include <sys/mount.h> 58#include <sys/mount.h>
@@ -120,45 +120,50 @@ extern void configure(void); @@ -120,45 +120,50 @@ extern void configure(void);
120 120
121/* 121/*
122 * arm32_vector_init: 122 * arm32_vector_init:
123 * 123 *
124 * Initialize the vector page, and select whether or not to 124 * Initialize the vector page, and select whether or not to
125 * relocate the vectors. 125 * relocate the vectors.
126 * 126 *
127 * NOTE: We expect the vector page to be mapped at its expected 127 * NOTE: We expect the vector page to be mapped at its expected
128 * destination. 128 * destination.
129 */ 129 */
130void 130void
131arm32_vector_init(vaddr_t va, int which) 131arm32_vector_init(vaddr_t va, int which)
132{ 132{
133#if defined(CPU_ARMV7) || defined(CPU_ARM11) 133#if defined(CPU_ARMV7) || defined(CPU_ARM11) || defined(ARM_HAS_VBAR)
134 /* 134 /*
135 * If this processor has the security extension, don't bother 135 * If this processor has the security extension, don't bother
136 * to move/map the vector page. Simply point VBAR to the copy 136 * to move/map the vector page. Simply point VBAR to the copy
137 * that exists in the .text segment. 137 * that exists in the .text segment.
138 */ 138 */
 139#ifndef ARM_HAS_VBAR
139 if (va == ARM_VECTORS_LOW 140 if (va == ARM_VECTORS_LOW
140 && (armreg_pfr1_read() && ARM_PFR1_SEC_MASK) != 0) { 141 && (armreg_pfr1_read() && ARM_PFR1_SEC_MASK) != 0) {
 142#endif
141 extern const uint32_t page0rel[]; 143 extern const uint32_t page0rel[];
142 vector_page = (vaddr_t)page0rel; 144 vector_page = (vaddr_t)page0rel;
143 KASSERT((vector_page & 0x1f) == 0); 145 KASSERT((vector_page & 0x1f) == 0);
144 armreg_vbar_write(vector_page); 146 armreg_vbar_write(vector_page);
145#ifdef VERBOSE_INIT_ARM 147#ifdef VERBOSE_INIT_ARM
146 printf(" vbar=%p", page0rel); 148 printf(" vbar=%p", page0rel);
147#endif 149#endif
148 cpu_control(CPU_CONTROL_VECRELOC, 0); 150 cpu_control(CPU_CONTROL_VECRELOC, 0);
149 return; 151 return;
 152#ifndef ARM_HAS_VBAR
150 } 153 }
151#endif 154#endif
 155#endif
 156#ifndef ARM_HAS_VBAR
152 if (CPU_IS_PRIMARY(curcpu())) { 157 if (CPU_IS_PRIMARY(curcpu())) {
153 extern unsigned int page0[], page0_data[]; 158 extern unsigned int page0[], page0_data[];
154 unsigned int *vectors = (int *) va; 159 unsigned int *vectors = (int *) va;
155 unsigned int *vectors_data = vectors + (page0_data - page0); 160 unsigned int *vectors_data = vectors + (page0_data - page0);
156 int vec; 161 int vec;
157 162
158 /* 163 /*
159 * Loop through the vectors we're taking over, and copy the 164 * Loop through the vectors we're taking over, and copy the
160 * vector's insn and data word. 165 * vector's insn and data word.
161 */ 166 */
162 for (vec = 0; vec < ARM_NVEC; vec++) { 167 for (vec = 0; vec < ARM_NVEC; vec++) {
163 if ((which & (1 << vec)) == 0) { 168 if ((which & (1 << vec)) == 0) {
164 /* Don't want to take over this vector. */ 169 /* Don't want to take over this vector. */
@@ -183,26 +188,27 @@ arm32_vector_init(vaddr_t va, int which) @@ -183,26 +188,27 @@ arm32_vector_init(vaddr_t va, int which)
183 * cpu_setup()) because the vector page needs to be 188 * cpu_setup()) because the vector page needs to be
184 * accessible *before* cpu_startup() is called. 189 * accessible *before* cpu_startup() is called.
185 * Think ddb(9) ... 190 * Think ddb(9) ...
186 * 191 *
187 * NOTE: If the CPU control register is not readable, 192 * NOTE: If the CPU control register is not readable,
188 * this will totally fail! We'll just assume that 193 * this will totally fail! We'll just assume that
189 * any system that has high vector support has a 194 * any system that has high vector support has a
190 * readable CPU control register, for now. If we 195 * readable CPU control register, for now. If we
191 * ever encounter one that does not, we'll have to 196 * ever encounter one that does not, we'll have to
192 * rethink this. 197 * rethink this.
193 */ 198 */
194 cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC); 199 cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
195 } 200 }
 201#endif
196} 202}
197 203
198/* 204/*
199 * Debug function just to park the CPU 205 * Debug function just to park the CPU
200 */ 206 */
201 207
202void 208void
203halt(void) 209halt(void)
204{ 210{
205 while (1) 211 while (1)
206 cpu_sleep(0); 212 cpu_sleep(0);
207} 213}
208 214
@@ -247,28 +253,30 @@ cpu_startup(void) @@ -247,28 +253,30 @@ cpu_startup(void)
247 vaddr_t minaddr; 253 vaddr_t minaddr;
248 vaddr_t maxaddr; 254 vaddr_t maxaddr;
249 u_int loop; 255 u_int loop;
250 char pbuf[9]; 256 char pbuf[9];
251 257
252 /* 258 /*
253 * Until we better locking, we have to live under the kernel lock. 259 * Until we better locking, we have to live under the kernel lock.
254 */ 260 */
255 //KERNEL_LOCK(1, NULL); 261 //KERNEL_LOCK(1, NULL);
256 262
257 /* Set the CPU control register */ 263 /* Set the CPU control register */
258 cpu_setup(boot_args); 264 cpu_setup(boot_args);
259 265
 266#ifndef ARM_HAS_VBAR
260 /* Lock down zero page */ 267 /* Lock down zero page */
261 vector_page_setprot(VM_PROT_READ); 268 vector_page_setprot(VM_PROT_READ);
 269#endif
262 270
263 /* 271 /*
264 * Give pmap a chance to set up a few more things now the vm 272 * Give pmap a chance to set up a few more things now the vm
265 * is initialised 273 * is initialised
266 */ 274 */
267 pmap_postinit(); 275 pmap_postinit();
268 276
269 /* 277 /*
270 * Initialize error message buffer (at end of core). 278 * Initialize error message buffer (at end of core).
271 */ 279 */
272 280
273 /* msgbufphys was setup during the secondary boot strap */ 281 /* msgbufphys was setup during the secondary boot strap */
274 for (loop = 0; loop < btoc(MSGBUFSIZE); ++loop) 282 for (loop = 0; loop < btoc(MSGBUFSIZE); ++loop)

cvs diff -r1.256 -r1.257 src/sys/arch/arm/arm32/pmap.c (expand / switch to unified diff)

--- src/sys/arch/arm/arm32/pmap.c 2013/06/12 07:13:18 1.256
+++ src/sys/arch/arm/arm32/pmap.c 2013/06/12 21:34:12 1.257
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.256 2013/06/12 07:13:18 matt Exp $ */ 1/* $NetBSD: pmap.c,v 1.257 2013/06/12 21:34:12 matt Exp $ */
2 2
3/* 3/*
4 * Copyright 2003 Wasabi Systems, Inc. 4 * Copyright 2003 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Steve C. Woodford for Wasabi Systems, Inc. 7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -202,27 +202,27 @@ @@ -202,27 +202,27 @@
202#include <sys/cdefs.h> 202#include <sys/cdefs.h>
203#include <sys/cpu.h> 203#include <sys/cpu.h>
204#include <sys/sysctl.h> 204#include <sys/sysctl.h>
205 205
206#include <uvm/uvm.h> 206#include <uvm/uvm.h>
207 207
208#include <sys/bus.h> 208#include <sys/bus.h>
209#include <machine/pmap.h> 209#include <machine/pmap.h>
210#include <machine/pcb.h> 210#include <machine/pcb.h>
211#include <machine/param.h> 211#include <machine/param.h>
212#include <arm/cpuconf.h> 212#include <arm/cpuconf.h>
213#include <arm/arm32/katelib.h> 213#include <arm/arm32/katelib.h>
214 214
215__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.256 2013/06/12 07:13:18 matt Exp $"); 215__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.257 2013/06/12 21:34:12 matt Exp $");
216 216
217#ifdef PMAP_DEBUG 217#ifdef PMAP_DEBUG
218 218
219/* XXX need to get rid of all refs to this */ 219/* XXX need to get rid of all refs to this */
220int pmap_debug_level = 0; 220int pmap_debug_level = 0;
221 221
222/* 222/*
223 * for switching to potentially finer grained debugging 223 * for switching to potentially finer grained debugging
224 */ 224 */
225#define PDB_FOLLOW 0x0001 225#define PDB_FOLLOW 0x0001
226#define PDB_INIT 0x0002 226#define PDB_INIT 0x0002
227#define PDB_ENTER 0x0004 227#define PDB_ENTER 0x0004
228#define PDB_REMOVE 0x0008 228#define PDB_REMOVE 0x0008
@@ -1544,43 +1544,45 @@ pmap_l2dtable_ctor(void *arg, void *v, i @@ -1544,43 +1544,45 @@ pmap_l2dtable_ctor(void *arg, void *v, i
1544} 1544}
1545 1545
1546static int 1546static int
1547pmap_pmap_ctor(void *arg, void *v, int flags) 1547pmap_pmap_ctor(void *arg, void *v, int flags)
1548{ 1548{
1549 1549
1550 memset(v, 0, sizeof(struct pmap)); 1550 memset(v, 0, sizeof(struct pmap));
1551 return (0); 1551 return (0);
1552} 1552}
1553 1553
1554static void 1554static void
1555pmap_pinit(pmap_t pm) 1555pmap_pinit(pmap_t pm)
1556{ 1556{
 1557#ifndef ARM_HAS_VBAR
1557 struct l2_bucket *l2b; 1558 struct l2_bucket *l2b;
1558 1559
1559 if (vector_page < KERNEL_BASE) { 1560 if (vector_page < KERNEL_BASE) {
1560 /* 1561 /*
1561 * Map the vector page. 1562 * Map the vector page.
1562 */ 1563 */
1563 pmap_enter(pm, vector_page, systempage.pv_pa, 1564 pmap_enter(pm, vector_page, systempage.pv_pa,
1564 VM_PROT_READ, VM_PROT_READ | PMAP_WIRED); 1565 VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
1565 pmap_update(pm); 1566 pmap_update(pm);
1566 1567
1567 pm->pm_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)]; 1568 pm->pm_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)];
1568 l2b = pmap_get_l2_bucket(pm, vector_page); 1569 l2b = pmap_get_l2_bucket(pm, vector_page);
1569 KDASSERT(l2b != NULL); 1570 KDASSERT(l2b != NULL);
1570 pm->pm_l1vec = l2b->l2b_phys | L1_C_PROTO | 1571 pm->pm_l1vec = l2b->l2b_phys | L1_C_PROTO |
1571 L1_C_DOM(pm->pm_domain); 1572 L1_C_DOM(pm->pm_domain);
1572 } else 1573 } else
1573 pm->pm_pl1vec = NULL; 1574 pm->pm_pl1vec = NULL;
 1575#endif
1574} 1576}
1575 1577
1576#ifdef PMAP_CACHE_VIVT 1578#ifdef PMAP_CACHE_VIVT
1577/* 1579/*
1578 * Since we have a virtually indexed cache, we may need to inhibit caching if 1580 * Since we have a virtually indexed cache, we may need to inhibit caching if
1579 * there is more than one mapping and at least one of them is writable. 1581 * there is more than one mapping and at least one of them is writable.
1580 * Since we purge the cache on every context switch, we only need to check for 1582 * Since we purge the cache on every context switch, we only need to check for
1581 * other mappings within the same pmap, or kernel_pmap. 1583 * other mappings within the same pmap, or kernel_pmap.
1582 * This function is also called when a page is unmapped, to possibly reenable 1584 * This function is also called when a page is unmapped, to possibly reenable
1583 * caching on any remaining mappings. 1585 * caching on any remaining mappings.
1584 * 1586 *
1585 * The code implements the following logic, where: 1587 * The code implements the following logic, where:
1586 * 1588 *
@@ -2813,26 +2815,31 @@ arm32_mmap_flags(paddr_t pa) @@ -2813,26 +2815,31 @@ arm32_mmap_flags(paddr_t pa)
2813 * NB: This is the only routine which MAY NOT lazy-evaluate 2815 * NB: This is the only routine which MAY NOT lazy-evaluate
2814 * or lose information. That is, this routine must actually 2816 * or lose information. That is, this routine must actually
2815 * insert this page into the given map NOW. 2817 * insert this page into the given map NOW.
2816 */ 2818 */
2817int 2819int
2818pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 2820pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
2819{ 2821{
2820 struct l2_bucket *l2b; 2822 struct l2_bucket *l2b;
2821 struct vm_page *pg, *opg; 2823 struct vm_page *pg, *opg;
2822 struct pv_entry *pv; 2824 struct pv_entry *pv;
2823 pt_entry_t *ptep, npte, opte; 2825 pt_entry_t *ptep, npte, opte;
2824 u_int nflags; 2826 u_int nflags;
2825 u_int oflags; 2827 u_int oflags;
 2828#ifdef ARM_HAS_VBAR
 2829 const bool vector_page_p = false;
 2830#else
 2831 const bool vector_page_p = (va == vector_page);
 2832#endif
2826 2833
2827 NPDEBUG(PDB_ENTER, printf("pmap_enter: pm %p va 0x%lx pa 0x%lx prot %x flag %x\n", pm, va, pa, prot, flags)); 2834 NPDEBUG(PDB_ENTER, printf("pmap_enter: pm %p va 0x%lx pa 0x%lx prot %x flag %x\n", pm, va, pa, prot, flags));
2828 2835
2829 KDASSERT((flags & PMAP_WIRED) == 0 || (flags & VM_PROT_ALL) != 0); 2836 KDASSERT((flags & PMAP_WIRED) == 0 || (flags & VM_PROT_ALL) != 0);
2830 KDASSERT(((va | pa) & PGOFSET) == 0); 2837 KDASSERT(((va | pa) & PGOFSET) == 0);
2831 2838
2832 /* 2839 /*
2833 * Get a pointer to the page. Later on in this function, we 2840 * Get a pointer to the page. Later on in this function, we
2834 * test for a managed page by checking pg != NULL. 2841 * test for a managed page by checking pg != NULL.
2835 */ 2842 */
2836 pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL; 2843 pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL;
2837 2844
2838 nflags = 0; 2845 nflags = 0;
@@ -3004,28 +3011,28 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_ @@ -3004,28 +3011,28 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_
3004 } else { 3011 } else {
3005 /* 3012 /*
3006 * We're mapping an unmanaged page. 3013 * We're mapping an unmanaged page.
3007 * These are always readable, and possibly writable, from 3014 * These are always readable, and possibly writable, from
3008 * the get go as we don't need to track ref/mod status. 3015 * the get go as we don't need to track ref/mod status.
3009 */ 3016 */
3010 npte |= l2pte_set_readonly(L2_S_PROTO); 3017 npte |= l2pte_set_readonly(L2_S_PROTO);
3011 if (prot & VM_PROT_WRITE) 3018 if (prot & VM_PROT_WRITE)
3012 npte = l2pte_set_writable(npte); 3019 npte = l2pte_set_writable(npte);
3013 3020
3014 /* 3021 /*
3015 * Make sure the vector table is mapped cacheable 3022 * Make sure the vector table is mapped cacheable
3016 */ 3023 */
3017 if ((pm != pmap_kernel() && va == vector_page) || 3024 if ((vector_page_p && pm != pmap_kernel())
3018 (flags & ARM32_MMAP_CACHEABLE)) { 3025 || (flags & ARM32_MMAP_CACHEABLE)) {
3019 npte |= pte_l2_s_cache_mode; 3026 npte |= pte_l2_s_cache_mode;
3020 } else if (flags & ARM32_MMAP_WRITECOMBINE) { 3027 } else if (flags & ARM32_MMAP_WRITECOMBINE) {
3021 npte |= pte_l2_s_wc_mode; 3028 npte |= pte_l2_s_wc_mode;
3022 } 3029 }
3023 if (opg) { 3030 if (opg) {
3024 /* 3031 /*
3025 * Looks like there's an existing 'managed' mapping 3032 * Looks like there's an existing 'managed' mapping
3026 * at this address. 3033 * at this address.
3027 */ 3034 */
3028 struct vm_page_md *omd = VM_PAGE_TO_MD(opg); 3035 struct vm_page_md *omd = VM_PAGE_TO_MD(opg);
3029 paddr_t opa = VM_PAGE_TO_PHYS(opg); 3036 paddr_t opa = VM_PAGE_TO_PHYS(opg);
3030 3037
3031#ifdef MULTIPROCESSOR 3038#ifdef MULTIPROCESSOR
@@ -3043,55 +3050,56 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_ @@ -3043,55 +3050,56 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_
3043 else 3050 else
3044 if (PV_BEEN_REFD(oflags)) 3051 if (PV_BEEN_REFD(oflags))
3045 pmap_dcache_wb_range(pm, va, PAGE_SIZE, 3052 pmap_dcache_wb_range(pm, va, PAGE_SIZE,
3046 true, (oflags & PVF_WRITE) == 0); 3053 true, (oflags & PVF_WRITE) == 0);
3047 } 3054 }
3048#endif 3055#endif
3049 pool_put(&pmap_pv_pool, pv); 3056 pool_put(&pmap_pv_pool, pv);
3050 } 3057 }
3051 } 3058 }
3052 3059
3053 /* 3060 /*
3054 * Make sure userland mappings get the right permissions 3061 * Make sure userland mappings get the right permissions
3055 */ 3062 */
3056 if (pm != pmap_kernel() && va != vector_page) 3063 if (!vector_page_p && pm != pmap_kernel()) {
3057 npte |= L2_S_PROT_U; 3064 npte |= L2_S_PROT_U;
 3065 }
3058 3066
3059 /* 3067 /*
3060 * Keep the stats up to date 3068 * Keep the stats up to date
3061 */ 3069 */
3062 if (opte == 0) { 3070 if (opte == 0) {
3063 l2b->l2b_occupancy++; 3071 l2b->l2b_occupancy++;
3064 pm->pm_stats.resident_count++; 3072 pm->pm_stats.resident_count++;
3065 }  3073 }
3066 3074
3067 NPDEBUG(PDB_ENTER, 3075 NPDEBUG(PDB_ENTER,
3068 printf("pmap_enter: opte 0x%08x npte 0x%08x\n", opte, npte)); 3076 printf("pmap_enter: opte 0x%08x npte 0x%08x\n", opte, npte));
3069 3077
3070 /* 3078 /*
3071 * If this is just a wiring change, the two PTEs will be 3079 * If this is just a wiring change, the two PTEs will be
3072 * identical, so there's no need to update the page table. 3080 * identical, so there's no need to update the page table.
3073 */ 3081 */
3074 if (npte != opte) { 3082 if (npte != opte) {
3075 bool is_cached = pmap_is_cached(pm); 3083 bool is_cached = pmap_is_cached(pm);
3076 3084
3077 *ptep = npte; 3085 *ptep = npte;
3078 PTE_SYNC(ptep); 3086 PTE_SYNC(ptep);
3079 if (is_cached) { 3087 if (is_cached) {
3080 /* 3088 /*
3081 * We only need to frob the cache/tlb if this pmap 3089 * We only need to frob the cache/tlb if this pmap
3082 * is current 3090 * is current
3083 */ 3091 */
3084 if (va != vector_page && l2pte_valid(npte)) { 3092 if (!vector_page_p && l2pte_valid(npte)) {
3085 /* 3093 /*
3086 * This mapping is likely to be accessed as 3094 * This mapping is likely to be accessed as
3087 * soon as we return to userland. Fix up the 3095 * soon as we return to userland. Fix up the
3088 * L1 entry to avoid taking another 3096 * L1 entry to avoid taking another
3089 * page/domain fault. 3097 * page/domain fault.
3090 */ 3098 */
3091 pd_entry_t *pl1pd, l1pd; 3099 pd_entry_t *pl1pd, l1pd;
3092 3100
3093 pl1pd = &pm->pm_l1->l1_kva[L1_IDX(va)]; 3101 pl1pd = &pm->pm_l1->l1_kva[L1_IDX(va)];
3094 l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | 3102 l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) |
3095 L1_C_PROTO; 3103 L1_C_PROTO;
3096 if (*pl1pd != l1pd) { 3104 if (*pl1pd != l1pd) {
3097 *pl1pd = l1pd; 3105 *pl1pd = l1pd;
@@ -4272,38 +4280,40 @@ pmap_activate(struct lwp *l) @@ -4272,38 +4280,40 @@ pmap_activate(struct lwp *l)
4272 if (rpm) { 4280 if (rpm) {
4273 rpm->pm_cstate.cs_cache = 0; 4281 rpm->pm_cstate.cs_cache = 0;
4274 if (npm == pmap_kernel()) 4282 if (npm == pmap_kernel())
4275 pmap_recent_user = NULL; 4283 pmap_recent_user = NULL;
4276#ifdef PMAP_CACHE_VIVT 4284#ifdef PMAP_CACHE_VIVT
4277 cpu_idcache_wbinv_all(); 4285 cpu_idcache_wbinv_all();
4278#endif 4286#endif
4279 } 4287 }
4280#endif 4288#endif
4281 4289
4282 /* No interrupts while we frob the TTB/DACR */ 4290 /* No interrupts while we frob the TTB/DACR */
4283 oldirqstate = disable_interrupts(IF32_bits); 4291 oldirqstate = disable_interrupts(IF32_bits);
4284 4292
 4293#ifndef ARM_HAS_VBAR
4285 /* 4294 /*
4286 * For ARM_VECTORS_LOW, we MUST, I repeat, MUST fix up the L1 4295 * For ARM_VECTORS_LOW, we MUST, I repeat, MUST fix up the L1
4287 * entry corresponding to 'vector_page' in the incoming L1 table 4296 * entry corresponding to 'vector_page' in the incoming L1 table
4288 * before switching to it otherwise subsequent interrupts/exceptions 4297 * before switching to it otherwise subsequent interrupts/exceptions
4289 * (including domain faults!) will jump into hyperspace. 4298 * (including domain faults!) will jump into hyperspace.
4290 */ 4299 */
4291 if (npm->pm_pl1vec != NULL) { 4300 if (npm->pm_pl1vec != NULL) {
4292 cpu_tlb_flushID_SE((u_int)vector_page); 4301 cpu_tlb_flushID_SE((u_int)vector_page);
4293 cpu_cpwait(); 4302 cpu_cpwait();
4294 *npm->pm_pl1vec = npm->pm_l1vec; 4303 *npm->pm_pl1vec = npm->pm_l1vec;
4295 PTE_SYNC(npm->pm_pl1vec); 4304 PTE_SYNC(npm->pm_pl1vec);
4296 } 4305 }
 4306#endif
4297 4307
4298 cpu_domains(ndacr); 4308 cpu_domains(ndacr);
4299 4309
4300 if (npm == pmap_kernel() || npm == rpm) { 4310 if (npm == pmap_kernel() || npm == rpm) {
4301 /* 4311 /*
4302 * Switching to a kernel thread, or back to the 4312 * Switching to a kernel thread, or back to the
4303 * same user vmspace as before... Simply update 4313 * same user vmspace as before... Simply update
4304 * the TTB (no TLB flush required) 4314 * the TTB (no TLB flush required)
4305 */ 4315 */
4306 cpu_setttb(npm->pm_l1->l1_physaddr, false); 4316 cpu_setttb(npm->pm_l1->l1_physaddr, false);
4307 cpu_cpwait(); 4317 cpu_cpwait();
4308 } else { 4318 } else {
4309 /* 4319 /*
@@ -4429,33 +4439,35 @@ pmap_destroy(pmap_t pm) @@ -4429,33 +4439,35 @@ pmap_destroy(pmap_t pm)
4429 if (count > 0) { 4439 if (count > 0) {
4430 if (pmap_is_current(pm)) { 4440 if (pmap_is_current(pm)) {
4431 if (pm != pmap_kernel()) 4441 if (pm != pmap_kernel())
4432 pmap_use_l1(pm); 4442 pmap_use_l1(pm);
4433 pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; 4443 pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL;
4434 } 4444 }
4435 return; 4445 return;
4436 } 4446 }
4437 4447
4438 /* 4448 /*
4439 * reference count is zero, free pmap resources and then free pmap. 4449 * reference count is zero, free pmap resources and then free pmap.
4440 */ 4450 */
4441 4451
 4452#ifndef ARM_HAS_VBAR
4442 if (vector_page < KERNEL_BASE) { 4453 if (vector_page < KERNEL_BASE) {
4443 KDASSERT(!pmap_is_current(pm)); 4454 KDASSERT(!pmap_is_current(pm));
4444 4455
4445 /* Remove the vector page mapping */ 4456 /* Remove the vector page mapping */
4446 pmap_remove(pm, vector_page, vector_page + PAGE_SIZE); 4457 pmap_remove(pm, vector_page, vector_page + PAGE_SIZE);
4447 pmap_update(pm); 4458 pmap_update(pm);
4448 } 4459 }
 4460#endif
4449 4461
4450 LIST_REMOVE(pm, pm_list); 4462 LIST_REMOVE(pm, pm_list);
4451 4463
4452 pmap_free_l1(pm); 4464 pmap_free_l1(pm);
4453 4465
4454 if (pmap_recent_user == pm) 4466 if (pmap_recent_user == pm)
4455 pmap_recent_user = NULL; 4467 pmap_recent_user = NULL;
4456 4468
4457 uvm_obj_destroy(&pm->pm_obj, false); 4469 uvm_obj_destroy(&pm->pm_obj, false);
4458 mutex_destroy(&pm->pm_obj_lock); 4470 mutex_destroy(&pm->pm_obj_lock);
4459 pool_cache_put(&pmap_cache, pm); 4471 pool_cache_put(&pmap_cache, pm);
4460} 4472}
4461 4473
@@ -5114,26 +5126,27 @@ pmap_growkernel(vaddr_t maxkvaddr) @@ -5114,26 +5126,27 @@ pmap_growkernel(vaddr_t maxkvaddr)
5114 cpu_dcache_wbinv_all(); 5126 cpu_dcache_wbinv_all();
5115 cpu_tlb_flushD(); 5127 cpu_tlb_flushD();
5116 cpu_cpwait(); 5128 cpu_cpwait();
5117 5129
5118 mutex_exit(kpm->pm_lock); 5130 mutex_exit(kpm->pm_lock);
5119 splx(s); 5131 splx(s);
5120 5132
5121out: 5133out:
5122 return (pmap_curmaxkvaddr); 5134 return (pmap_curmaxkvaddr);
5123} 5135}
5124 5136
5125/************************ Utility routines ****************************/ 5137/************************ Utility routines ****************************/
5126 5138
 5139#ifndef ARM_HAS_VBAR
5127/* 5140/*
5128 * vector_page_setprot: 5141 * vector_page_setprot:
5129 * 5142 *
5130 * Manipulate the protection of the vector page. 5143 * Manipulate the protection of the vector page.
5131 */ 5144 */
5132void 5145void
5133vector_page_setprot(int prot) 5146vector_page_setprot(int prot)
5134{ 5147{
5135 struct l2_bucket *l2b; 5148 struct l2_bucket *l2b;
5136 pt_entry_t *ptep; 5149 pt_entry_t *ptep;
5137 5150
5138#if defined(CPU_ARMV7) || defined(CPU_ARM11) 5151#if defined(CPU_ARMV7) || defined(CPU_ARM11)
5139 /* 5152 /*
@@ -5146,26 +5159,27 @@ vector_page_setprot(int prot) @@ -5146,26 +5159,27 @@ vector_page_setprot(int prot)
5146 } 5159 }
5147#endif 5160#endif
5148 5161
5149 l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page); 5162 l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page);
5150 KDASSERT(l2b != NULL); 5163 KDASSERT(l2b != NULL);
5151 5164
5152 ptep = &l2b->l2b_kva[l2pte_index(vector_page)]; 5165 ptep = &l2b->l2b_kva[l2pte_index(vector_page)];
5153 5166
5154 *ptep = (*ptep & ~L2_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot); 5167 *ptep = (*ptep & ~L2_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot);
5155 PTE_SYNC(ptep); 5168 PTE_SYNC(ptep);
5156 cpu_tlb_flushD_SE(vector_page); 5169 cpu_tlb_flushD_SE(vector_page);
5157 cpu_cpwait(); 5170 cpu_cpwait();
5158} 5171}
 5172#endif
5159 5173
5160/* 5174/*
5161 * Fetch pointers to the PDE/PTE for the given pmap/VA pair. 5175 * Fetch pointers to the PDE/PTE for the given pmap/VA pair.
5162 * Returns true if the mapping exists, else false. 5176 * Returns true if the mapping exists, else false.
5163 * 5177 *
5164 * NOTE: This function is only used by a couple of arm-specific modules. 5178 * NOTE: This function is only used by a couple of arm-specific modules.
5165 * It is not safe to take any pmap locks here, since we could be right 5179 * It is not safe to take any pmap locks here, since we could be right
5166 * in the middle of debugging the pmap anyway... 5180 * in the middle of debugging the pmap anyway...
5167 * 5181 *
5168 * It is possible for this routine to return false even though a valid 5182 * It is possible for this routine to return false even though a valid
5169 * mapping does exist. This is because we don't lock, so the metadata 5183 * mapping does exist. This is because we don't lock, so the metadata
5170 * state may be inconsistent. 5184 * state may be inconsistent.
5171 * 5185 *
@@ -5428,35 +5442,37 @@ pmap_bootstrap(vaddr_t vstart, vaddr_t v @@ -5428,35 +5442,37 @@ pmap_bootstrap(vaddr_t vstart, vaddr_t v
5428 5442
5429 /* 5443 /*
5430 * init the static-global locks and global pmap list. 5444 * init the static-global locks and global pmap list.
5431 */ 5445 */
5432 mutex_init(&l1_lru_lock, MUTEX_DEFAULT, IPL_VM); 5446 mutex_init(&l1_lru_lock, MUTEX_DEFAULT, IPL_VM);
5433 5447
5434 /* 5448 /*
5435 * We can now initialise the first L1's metadata. 5449 * We can now initialise the first L1's metadata.
5436 */ 5450 */
5437 SLIST_INIT(&l1_list); 5451 SLIST_INIT(&l1_list);
5438 TAILQ_INIT(&l1_lru_list); 5452 TAILQ_INIT(&l1_lru_list);
5439 pmap_init_l1(l1, l1pt); 5453 pmap_init_l1(l1, l1pt);
5440 5454
 5455#ifndef ARM_HAS_VBAR
5441 /* Set up vector page L1 details, if necessary */ 5456 /* Set up vector page L1 details, if necessary */
5442 if (vector_page < KERNEL_BASE) { 5457 if (vector_page < KERNEL_BASE) {
5443 pm->pm_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)]; 5458 pm->pm_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)];
5444 l2b = pmap_get_l2_bucket(pm, vector_page); 5459 l2b = pmap_get_l2_bucket(pm, vector_page);
5445 KDASSERT(l2b != NULL); 5460 KDASSERT(l2b != NULL);
5446 pm->pm_l1vec = l2b->l2b_phys | L1_C_PROTO | 5461 pm->pm_l1vec = l2b->l2b_phys | L1_C_PROTO |
5447 L1_C_DOM(pm->pm_domain); 5462 L1_C_DOM(pm->pm_domain);
5448 } else 5463 } else
5449 pm->pm_pl1vec = NULL; 5464 pm->pm_pl1vec = NULL;
 5465#endif
5450 5466
5451 /* 5467 /*
5452 * Initialize the pmap cache 5468 * Initialize the pmap cache
5453 */ 5469 */
5454 pool_cache_bootstrap(&pmap_cache, sizeof(struct pmap), 0, 0, 0, 5470 pool_cache_bootstrap(&pmap_cache, sizeof(struct pmap), 0, 0, 0,
5455 "pmappl", NULL, IPL_NONE, pmap_pmap_ctor, NULL, NULL); 5471 "pmappl", NULL, IPL_NONE, pmap_pmap_ctor, NULL, NULL);
5456 LIST_INIT(&pmap_pmaps); 5472 LIST_INIT(&pmap_pmaps);
5457 LIST_INSERT_HEAD(&pmap_pmaps, pm, pm_list); 5473 LIST_INSERT_HEAD(&pmap_pmaps, pm, pm_list);
5458 5474
5459 /* 5475 /*
5460 * Initialize the pv pool. 5476 * Initialize the pv pool.
5461 */ 5477 */
5462 pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvepl", 5478 pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvepl",

cvs diff -r1.118 -r1.119 src/sys/arch/arm/conf/files.arm (expand / switch to unified diff)

--- src/sys/arch/arm/conf/files.arm 2013/06/12 07:12:10 1.118
+++ src/sys/arch/arm/conf/files.arm 2013/06/12 21:34:12 1.119
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1# $NetBSD: files.arm,v 1.118 2013/06/12 07:12:10 matt Exp $ 1# $NetBSD: files.arm,v 1.119 2013/06/12 21:34:12 matt Exp $
2 2
3# temporary define to allow easy moving to ../arch/arm/arm32 3# temporary define to allow easy moving to ../arch/arm/arm32
4defflag ARM32 4defflag ARM32
5 5
6# CPU types. Make sure to update <arm/cpuconf.h> if you change this list. 6# CPU types. Make sure to update <arm/cpuconf.h> if you change this list.
7defflag opt_cputypes.h CPU_ARM2 CPU_ARM250 CPU_ARM3 7defflag opt_cputypes.h CPU_ARM2 CPU_ARM250 CPU_ARM3
8defflag opt_cputypes.h CPU_ARM6 CPU_ARM7 CPU_ARM7TDMI CPU_ARM8 8defflag opt_cputypes.h CPU_ARM6 CPU_ARM7 CPU_ARM7TDMI CPU_ARM8
9 CPU_ARM9 CPU_ARM9E CPU_ARM10 CPU_ARM11 CPU_ARMV7 9 CPU_ARM9 CPU_ARM9E CPU_ARM10 CPU_ARM11 CPU_ARMV7
10 CPU_SA110 CPU_SA1100 CPU_SA1110 CPU_IXP12X0 10 CPU_SA110 CPU_SA1100 CPU_SA1110 CPU_IXP12X0
11 CPU_FA526 CPU_XSCALE_80200 CPU_XSCALE_80321 11 CPU_FA526 CPU_XSCALE_80200 CPU_XSCALE_80321
12 CPU_XSCALE_PXA250 CPU_XSCALE_PXA270 12 CPU_XSCALE_PXA250 CPU_XSCALE_PXA270
13 CPU_XSCALE_IXP425  13 CPU_XSCALE_IXP425
14 CPU_SHEEVA 14 CPU_SHEEVA
@@ -26,26 +26,27 @@ defflag opt_cputypes.h FPU_VFP @@ -26,26 +26,27 @@ defflag opt_cputypes.h FPU_VFP
26 26
27defparam opt_cpuoptions.h XSCALE_CCLKCFG 27defparam opt_cpuoptions.h XSCALE_CCLKCFG
28defflag opt_cpuoptions.h XSCALE_CACHE_WRITE_THROUGH 28defflag opt_cpuoptions.h XSCALE_CACHE_WRITE_THROUGH
29defflag opt_cpuoptions.h XSCALE_CACHE_WRITE_BACK 29defflag opt_cpuoptions.h XSCALE_CACHE_WRITE_BACK
30defflag opt_cpuoptions.h XSCALE_NO_COALESCE_WRITES 30defflag opt_cpuoptions.h XSCALE_NO_COALESCE_WRITES
31defflag opt_cpuoptions.h XSCALE_CACHE_READ_WRITE_ALLOCATE 31defflag opt_cpuoptions.h XSCALE_CACHE_READ_WRITE_ALLOCATE
32defflag opt_cpuoptions.h ARM32_DISABLE_ALIGNMENT_FAULTS 32defflag opt_cpuoptions.h ARM32_DISABLE_ALIGNMENT_FAULTS
33defflag opt_cpuoptions.h ARM9_CACHE_WRITE_THROUGH 33defflag opt_cpuoptions.h ARM9_CACHE_WRITE_THROUGH
34defflag opt_cpuoptions.h TPIDRPRW_IS_CURLWP 34defflag opt_cpuoptions.h TPIDRPRW_IS_CURLWP
35defflag opt_cpuoptions.h TPIDRPRW_IS_CURCPU 35defflag opt_cpuoptions.h TPIDRPRW_IS_CURCPU
36defflag opt_cpuoptions.h ARM11_PMC CORTEX_PMC 36defflag opt_cpuoptions.h ARM11_PMC CORTEX_PMC
37defflag opt_cpuoptions.h ARM11_CACHE_WRITE_THROUGH 37defflag opt_cpuoptions.h ARM11_CACHE_WRITE_THROUGH
38defflag opt_cpuoptions.h ARM11MPCORE_COMPAT_MMU 38defflag opt_cpuoptions.h ARM11MPCORE_COMPAT_MMU
 39defflag opt_cpuoptions.h ARM_USE_VBAR
39# use extended small page in compatible MMU mode for ARMv6 40# use extended small page in compatible MMU mode for ARMv6
40defflag opt_cpuoptions.h ARMV6_EXTENDED_SMALL_PAGE 41defflag opt_cpuoptions.h ARMV6_EXTENDED_SMALL_PAGE
41 42
42# Interrupt implementation header definition. 43# Interrupt implementation header definition.
43defparam opt_arm_intr_impl.h ARM_INTR_IMPL 44defparam opt_arm_intr_impl.h ARM_INTR_IMPL
44 45
45# ARM-specific debug options 46# ARM-specific debug options
46defflag opt_arm_debug.h ARM_LOCK_CAS_DEBUG 47defflag opt_arm_debug.h ARM_LOCK_CAS_DEBUG
47 48
48# Board-specific bus_space(9)/bus_dma(9) definitions 49# Board-specific bus_space(9)/bus_dma(9) definitions
49defflag opt_arm_bus_space.h __BUS_SPACE_HAS_STREAM_METHODS 50defflag opt_arm_bus_space.h __BUS_SPACE_HAS_STREAM_METHODS
50 _ARM32_NEED_BUS_DMA_BOUNCE 51 _ARM32_NEED_BUS_DMA_BOUNCE
51 BUSDMA_COUNTERS 52 BUSDMA_COUNTERS

cvs diff -r1.119 -r1.120 src/sys/arch/arm/include/arm32/pmap.h (expand / switch to unified diff)

--- src/sys/arch/arm/include/arm32/pmap.h 2012/12/12 15:09:37 1.119
+++ src/sys/arch/arm/include/arm32/pmap.h 2013/06/12 21:34:12 1.120
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.h,v 1.119 2012/12/12 15:09:37 matt Exp $ */ 1/* $NetBSD: pmap.h,v 1.120 2013/06/12 21:34:12 matt Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2002, 2003 Wasabi Systems, Inc. 4 * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -176,27 +176,29 @@ struct pmap_devmap { @@ -176,27 +176,29 @@ struct pmap_devmap {
176 psize_t pd_size; /* size of region */ 176 psize_t pd_size; /* size of region */
177 vm_prot_t pd_prot; /* protection code */ 177 vm_prot_t pd_prot; /* protection code */
178 int pd_cache; /* cache attributes */ 178 int pd_cache; /* cache attributes */
179}; 179};
180 180
181/* 181/*
182 * The pmap structure itself 182 * The pmap structure itself
183 */ 183 */
184struct pmap { 184struct pmap {
185 uint8_t pm_domain; 185 uint8_t pm_domain;
186 bool pm_remove_all; 186 bool pm_remove_all;
187 bool pm_activated; 187 bool pm_activated;
188 struct l1_ttable *pm_l1; 188 struct l1_ttable *pm_l1;
 189#ifndef ARM_HAS_VBAR
189 pd_entry_t *pm_pl1vec; 190 pd_entry_t *pm_pl1vec;
 191#endif
190 pd_entry_t pm_l1vec; 192 pd_entry_t pm_l1vec;
191 union pmap_cache_state pm_cstate; 193 union pmap_cache_state pm_cstate;
192 struct uvm_object pm_obj; 194 struct uvm_object pm_obj;
193 kmutex_t pm_obj_lock; 195 kmutex_t pm_obj_lock;
194#define pm_lock pm_obj.vmobjlock 196#define pm_lock pm_obj.vmobjlock
195 struct l2_dtable *pm_l2[L2_SIZE]; 197 struct l2_dtable *pm_l2[L2_SIZE];
196 struct pmap_statistics pm_stats; 198 struct pmap_statistics pm_stats;
197 LIST_ENTRY(pmap) pm_list; 199 LIST_ENTRY(pmap) pm_list;
198}; 200};
199 201
200/* 202/*
201 * Physical / virtual address structure. In a number of places (particularly 203 * Physical / virtual address structure. In a number of places (particularly
202 * during bootstrapping) we need to keep track of the physical and virtual 204 * during bootstrapping) we need to keep track of the physical and virtual