Wed Jun 12 21:34:12 2013 UTC ()
Add a ARM_HAS_VBAR option which forces the use of the VBAR register.  This
allows much code to deal with vector_page mappings to be eliminated.  On a
BEAGLEBONE kernel, this saves 8KB of text and instructions that never have
to be executed.  (The PJ4B has VBAR but doesn't implement the security
extensions it is part of so a method was needed to allow it use VBAR with
relying on the default test for the security extensions.)


(matt)
diff -r1.6 -r1.7 src/sys/arch/arm/arm/fiq.c
diff -r1.6 -r1.7 src/sys/arch/arm/arm/vectors.S
diff -r1.19 -r1.20 src/sys/arch/arm/arm32/arm32_kvminit.c
diff -r1.93 -r1.94 src/sys/arch/arm/arm32/arm32_machdep.c
diff -r1.256 -r1.257 src/sys/arch/arm/arm32/pmap.c
diff -r1.118 -r1.119 src/sys/arch/arm/conf/files.arm
diff -r1.119 -r1.120 src/sys/arch/arm/include/arm32/pmap.h

cvs diff -r1.6 -r1.7 src/sys/arch/arm/arm/fiq.c (switch to unified diff)

--- src/sys/arch/arm/arm/fiq.c 2008/11/19 06:29:48 1.6
+++ src/sys/arch/arm/arm/fiq.c 2013/06/12 21:34:12 1.7
@@ -1,178 +1,180 @@ @@ -1,178 +1,180 @@
1/* $NetBSD: fiq.c,v 1.6 2008/11/19 06:29:48 matt Exp $ */ 1/* $NetBSD: fiq.c,v 1.7 2013/06/12 21:34:12 matt Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc. 4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the 15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution. 16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software 17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement: 18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by 19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc. 20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior 22 * or promote products derived from this software without specific prior
23 * written permission. 23 * written permission.
24 * 24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE. 35 * POSSIBILITY OF SUCH DAMAGE.
36 */ 36 */
37 37
38#include <sys/cdefs.h> 38#include <sys/cdefs.h>
39__KERNEL_RCSID(0, "$NetBSD: fiq.c,v 1.6 2008/11/19 06:29:48 matt Exp $"); 39__KERNEL_RCSID(0, "$NetBSD: fiq.c,v 1.7 2013/06/12 21:34:12 matt Exp $");
40 40
41#include <sys/param.h> 41#include <sys/param.h>
42#include <sys/systm.h> 42#include <sys/systm.h>
43 43
44#include <arm/cpufunc.h> 44#include <arm/cpufunc.h>
45#include <arm/fiq.h> 45#include <arm/fiq.h>
46 46
47#ifdef __PROG32 47#ifdef __PROG32
48#include <uvm/uvm.h> 48#include <uvm/uvm.h>
49#endif 49#endif
50 50
51TAILQ_HEAD(, fiqhandler) fiqhandler_stack = 51TAILQ_HEAD(, fiqhandler) fiqhandler_stack =
52 TAILQ_HEAD_INITIALIZER(fiqhandler_stack); 52 TAILQ_HEAD_INITIALIZER(fiqhandler_stack);
53 53
54extern char fiqvector[]; 54extern char fiqvector[];
55extern char fiq_nullhandler[], fiq_nullhandler_end[]; 55extern char fiq_nullhandler[], fiq_nullhandler_end[];
56 56
57#ifdef __PROG32 57#ifdef __PROG32
58#define IRQ_BIT I32_bit 58#define IRQ_BIT I32_bit
59#define FIQ_BIT F32_bit 59#define FIQ_BIT F32_bit
60#else 60#else
61#define IRQ_BIT R15_IRQ_DISABLE 61#define IRQ_BIT R15_IRQ_DISABLE
62#define FIQ_BIT R15_FIQ_DISABLE 62#define FIQ_BIT R15_FIQ_DISABLE
63#endif /* __PROG32 */ 63#endif /* __PROG32 */
64 64
 65#ifndef ARM_HAS_VBAR
65/* 66/*
66 * fiq_installhandler: 67 * fiq_installhandler:
67 * 68 *
68 * Actually install the FIQ handler down at the FIQ vector. 69 * Actually install the FIQ handler down at the FIQ vector.
69 * 70 *
70 * Note: If the FIQ is invoked via an extra layer of 71 * Note: If the FIQ is invoked via an extra layer of
71 * indirection, the actual FIQ code store lives in the 72 * indirection, the actual FIQ code store lives in the
72 * data segment, so there is no need to manipulate 73 * data segment, so there is no need to manipulate
73 * the vector page's protection. 74 * the vector page's protection.
74 */ 75 */
75static void 76static void
76fiq_installhandler(void *func, size_t size) 77fiq_installhandler(void *func, size_t size)
77{ 78{
78#if defined(__PROG32) && !defined(__ARM_FIQ_INDIRECT) 79#if defined(__PROG32) && !defined(__ARM_FIQ_INDIRECT)
79 vector_page_setprot(VM_PROT_READ|VM_PROT_WRITE); 80 vector_page_setprot(VM_PROT_READ|VM_PROT_WRITE);
80#endif 81#endif
81 82
82 memcpy(fiqvector, func, size); 83 memcpy(fiqvector, func, size);
83 84
84#ifdef __PROG32 85#ifdef __PROG32
85#if !defined(__ARM_FIQ_INDIRECT) 86#if !defined(__ARM_FIQ_INDIRECT)
86 vector_page_setprot(VM_PROT_READ); 87 vector_page_setprot(VM_PROT_READ);
87#endif 88#endif
88 cpu_icache_sync_range((vaddr_t) fiqvector, size); 89 cpu_icache_sync_range((vaddr_t) fiqvector, size);
89#endif 90#endif
90} 91}
91 92
92/* 93/*
93 * fiq_claim: 94 * fiq_claim:
94 * 95 *
95 * Claim the FIQ vector. 96 * Claim the FIQ vector.
96 */ 97 */
97int 98int
98fiq_claim(struct fiqhandler *fh) 99fiq_claim(struct fiqhandler *fh)
99{ 100{
100 struct fiqhandler *ofh; 101 struct fiqhandler *ofh;
101 u_int oldirqstate; 102 u_int oldirqstate;
102 int error = 0; 103 int error = 0;
103 104
104 if (fh->fh_size > 0x100) 105 if (fh->fh_size > 0x100)
105 return (EFBIG); 106 return (EFBIG);
106 107
107 oldirqstate = disable_interrupts(FIQ_BIT); 108 oldirqstate = disable_interrupts(FIQ_BIT);
108 109
109 if ((ofh = TAILQ_FIRST(&fiqhandler_stack)) != NULL) { 110 if ((ofh = TAILQ_FIRST(&fiqhandler_stack)) != NULL) {
110 if ((ofh->fh_flags & FH_CANPUSH) == 0) { 111 if ((ofh->fh_flags & FH_CANPUSH) == 0) {
111 error = EBUSY; 112 error = EBUSY;
112 goto out; 113 goto out;
113 } 114 }
114 115
115 /* Save the previous FIQ handler's registers. */ 116 /* Save the previous FIQ handler's registers. */
116 if (ofh->fh_regs != NULL) 117 if (ofh->fh_regs != NULL)
117 fiq_getregs(ofh->fh_regs); 118 fiq_getregs(ofh->fh_regs);
118 } 119 }
119 120
120 /* Set FIQ mode registers to ours. */ 121 /* Set FIQ mode registers to ours. */
121 if (fh->fh_regs != NULL) 122 if (fh->fh_regs != NULL)
122 fiq_setregs(fh->fh_regs); 123 fiq_setregs(fh->fh_regs);
123 124
124 TAILQ_INSERT_HEAD(&fiqhandler_stack, fh, fh_list); 125 TAILQ_INSERT_HEAD(&fiqhandler_stack, fh, fh_list);
125 126
126 /* Now copy the actual handler into place. */ 127 /* Now copy the actual handler into place. */
127 fiq_installhandler(fh->fh_func, fh->fh_size); 128 fiq_installhandler(fh->fh_func, fh->fh_size);
128 129
129 /* Make sure FIQs are enabled when we return. */ 130 /* Make sure FIQs are enabled when we return. */
130 oldirqstate &= ~FIQ_BIT; 131 oldirqstate &= ~FIQ_BIT;
131 132
132 out: 133 out:
133 restore_interrupts(oldirqstate); 134 restore_interrupts(oldirqstate);
134 return (error); 135 return (error);
135} 136}
136 137
137/* 138/*
138 * fiq_release: 139 * fiq_release:
139 * 140 *
140 * Release the FIQ vector. 141 * Release the FIQ vector.
141 */ 142 */
142void 143void
143fiq_release(struct fiqhandler *fh) 144fiq_release(struct fiqhandler *fh)
144{ 145{
145 u_int oldirqstate; 146 u_int oldirqstate;
146 struct fiqhandler *ofh; 147 struct fiqhandler *ofh;
147 148
148 oldirqstate = disable_interrupts(FIQ_BIT); 149 oldirqstate = disable_interrupts(FIQ_BIT);
149 150
150 /* 151 /*
151 * If we are the currently active FIQ handler, then we 152 * If we are the currently active FIQ handler, then we
152 * need to save our registers and pop the next one back 153 * need to save our registers and pop the next one back
153 * into the vector. 154 * into the vector.
154 */ 155 */
155 if (fh == TAILQ_FIRST(&fiqhandler_stack)) { 156 if (fh == TAILQ_FIRST(&fiqhandler_stack)) {
156 if (fh->fh_regs != NULL) 157 if (fh->fh_regs != NULL)
157 fiq_getregs(fh->fh_regs); 158 fiq_getregs(fh->fh_regs);
158 TAILQ_REMOVE(&fiqhandler_stack, fh, fh_list); 159 TAILQ_REMOVE(&fiqhandler_stack, fh, fh_list);
159 if ((ofh = TAILQ_FIRST(&fiqhandler_stack)) != NULL) { 160 if ((ofh = TAILQ_FIRST(&fiqhandler_stack)) != NULL) {
160 if (ofh->fh_regs != NULL) 161 if (ofh->fh_regs != NULL)
161 fiq_setregs(ofh->fh_regs); 162 fiq_setregs(ofh->fh_regs);
162 fiq_installhandler(ofh->fh_func, ofh->fh_size); 163 fiq_installhandler(ofh->fh_func, ofh->fh_size);
163 } 164 }
164 } else 165 } else
165 TAILQ_REMOVE(&fiqhandler_stack, fh, fh_list); 166 TAILQ_REMOVE(&fiqhandler_stack, fh, fh_list);
166 167
167 if (TAILQ_FIRST(&fiqhandler_stack) == NULL) { 168 if (TAILQ_FIRST(&fiqhandler_stack) == NULL) {
168 /* Copy the NULL handler back down into the vector. */ 169 /* Copy the NULL handler back down into the vector. */
169 fiq_installhandler(fiq_nullhandler, 170 fiq_installhandler(fiq_nullhandler,
170 (size_t)(fiq_nullhandler_end - fiq_nullhandler)); 171 (size_t)(fiq_nullhandler_end - fiq_nullhandler));
171 172
172 /* Make sure FIQs are disabled when we return. */ 173 /* Make sure FIQs are disabled when we return. */
173 oldirqstate |= FIQ_BIT; 174 oldirqstate |= FIQ_BIT;
174 } 175 }
175 176
176 oldirqstate &= ~FIQ_BIT; 177 oldirqstate &= ~FIQ_BIT;
177 restore_interrupts(oldirqstate); 178 restore_interrupts(oldirqstate);
178} 179}
 180#endif /* !ARM_HAS_VBAR */

cvs diff -r1.6 -r1.7 src/sys/arch/arm/arm/vectors.S (switch to unified diff)

--- src/sys/arch/arm/arm/vectors.S 2013/06/12 15:10:13 1.6
+++ src/sys/arch/arm/arm/vectors.S 2013/06/12 21:34:12 1.7
@@ -1,128 +1,138 @@ @@ -1,128 +1,138 @@
1/* $NetBSD: vectors.S,v 1.6 2013/06/12 15:10:13 matt Exp $ */ 1/* $NetBSD: vectors.S,v 1.7 2013/06/12 21:34:12 matt Exp $ */
2 2
3/* 3/*
4 * Copyright (C) 1994-1997 Mark Brinicombe 4 * Copyright (C) 1994-1997 Mark Brinicombe
5 * Copyright (C) 1994 Brini 5 * Copyright (C) 1994 Brini
6 * All rights reserved.  6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution. 15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software 16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement: 17 * must display the following acknowledgement:
18 * This product includes software developed by Brini. 18 * This product includes software developed by Brini.
19 * 4. The name of Brini may not be used to endorse or promote products 19 * 4. The name of Brini may not be used to endorse or promote products
20 * derived from this software without specific prior written permission. 20 * derived from this software without specific prior written permission.
21 *  21 *
22 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR 22 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 28 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 30 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */ 32 */
33 33
34#include "assym.h" 34#include "assym.h"
35#include "opt_cputypes.h" 35#include "opt_cputypes.h"
 36#include "opt_cpuoptions.h"
36#include <machine/asm.h> 37#include <machine/asm.h>
37 38
38/* 39/*
39 * These are the exception vectors copied down to page 0. 40 * These are the exception vectors copied down to page 0.
40 * 41 *
41 * Note that FIQs are special; rather than using a level of 42 * Note that FIQs are special; rather than using a level of
42 * indirection, we actually copy the FIQ code down into the 43 * indirection, we actually copy the FIQ code down into the
43 * vector page. 44 * vector page.
44 */ 45 */
45 46
46 .text 47 .text
47 .align 0 
48 .global _C_LABEL(page0), _C_LABEL(page0_data), _C_LABEL(page0_end) 
49 .global _C_LABEL(fiqvector) 48 .global _C_LABEL(fiqvector)
50 49
51#if defined(CPU_ARMV7) || defined(CPU_ARM11) 50#if defined(CPU_ARMV7) || defined(CPU_ARM11) || defined(ARM_HAS_VBAR)
52 /* 51 /*
53 * ARMv[67] processors with the Security Extension have the VBAR 52 * ARMv[67] processors with the Security Extension have the VBAR
54 * which redirects the low vector to any 32-byte aligned address.  53 * which redirects the low vector to any 32-byte aligned address.
55 * Since we are in kernel, we can just do a relative branch to the 54 * Since we are in kernel, we can just do a relative branch to the
56 * exception code and avoid the intermediate load. 55 * exception code and avoid the intermediate load.
57 */ 56 */
58 .global _C_LABEL(page0rel) 57 .global _C_LABEL(page0rel)
59 .p2align 5 58 .p2align 5
60_C_LABEL(page0rel): 59_C_LABEL(page0rel):
61 b reset_entry 60 b reset_entry
62 b undefined_entry 61 b undefined_entry
63 b swi_entry 62 b swi_entry
64 b prefetch_abort_entry 63 b prefetch_abort_entry
65 b data_abort_entry 64 b data_abort_entry
66 b address_exception_entry 65 b address_exception_entry
67 b irq_entry 66 b irq_entry
68#ifdef __ARM_FIQ_INDIRECT 67#ifdef __ARM_FIQ_INDIRECT
69 b _C_LABEL(fiqvector) 68 b _C_LABEL(fiqvector)
70#else 69#elif !defined(ARM_HAS_VBAR)
71 b .Lfiqvector 70 b .Lfiqvector
72#endif 71#endif
73#endif 72#endif /* CPU_ARMV7 || CPU_ARM11 || ARM_HAS_VBAR */
74 73
 74#ifndef ARM_HAS_VBAR
 75 .global _C_LABEL(page0), _C_LABEL(page0_data), _C_LABEL(page0_end)
 76 .align 0
75_C_LABEL(page0): 77_C_LABEL(page0):
76 ldr pc, .Lreset_target 78 ldr pc, .Lreset_target
77 ldr pc, .Lundefined_target 79 ldr pc, .Lundefined_target
78 ldr pc, .Lswi_target 80 ldr pc, .Lswi_target
79 ldr pc, .Lprefetch_abort_target 81 ldr pc, .Lprefetch_abort_target
80 ldr pc, .Ldata_abort_target 82 ldr pc, .Ldata_abort_target
81 ldr pc, .Laddress_exception_target 83 ldr pc, .Laddress_exception_target
82 ldr pc, .Lirq_target 84 ldr pc, .Lirq_target
83#ifdef __ARM_FIQ_INDIRECT 85#ifdef __ARM_FIQ_INDIRECT
84 ldr pc, .Lfiq_target 86 ldr pc, .Lfiq_target
85#else 87#endif
 88#endif /* !ARM_HAS_VBAR */
 89#ifndef __ARM_FIQ_INDIRECT
86.Lfiqvector: 90.Lfiqvector:
 91#ifdef ARM_HAS_VBAR
 92 .set _C_LABEL(fiqvector), . - _C_LABEL(page0rel)
 93#else
87 .set _C_LABEL(fiqvector), . - _C_LABEL(page0) 94 .set _C_LABEL(fiqvector), . - _C_LABEL(page0)
 95#endif
88 subs pc, lr, #4 96 subs pc, lr, #4
89 .org .Lfiqvector + 0x100 97 .org .Lfiqvector + 0x100
90#endif 98#endif
91 99
 100#ifndef ARM_HAS_VBAR
92_C_LABEL(page0_data): 101_C_LABEL(page0_data):
93.Lreset_target: 102.Lreset_target:
94 .word reset_entry 103 .word reset_entry
95 104
96.Lundefined_target: 105.Lundefined_target:
97 .word undefined_entry 106 .word undefined_entry
98 107
99.Lswi_target: 108.Lswi_target:
100 .word swi_entry 109 .word swi_entry
101 110
102.Lprefetch_abort_target: 111.Lprefetch_abort_target:
103 .word prefetch_abort_entry 112 .word prefetch_abort_entry
104 113
105.Ldata_abort_target: 114.Ldata_abort_target:
106 .word data_abort_entry 115 .word data_abort_entry
107 116
108.Laddress_exception_target: 117.Laddress_exception_target:
109 .word address_exception_entry 118 .word address_exception_entry
110 119
111.Lirq_target: 120.Lirq_target:
112 .word irq_entry 121 .word irq_entry
113 122
114#ifdef __ARM_FIQ_INDIRECT 123#ifdef __ARM_FIQ_INDIRECT
115.Lfiq_target: 124.Lfiq_target:
116 .word _C_LABEL(fiqvector) 125 .word _C_LABEL(fiqvector)
117#else 126#else
118 .word 0 /* pad it out */ 127 .word 0 /* pad it out */
119#endif 128#endif
120_C_LABEL(page0_end): 129_C_LABEL(page0_end):
 130#endif /* ARM_HAS_VBAR */
121 131
122#ifdef __ARM_FIQ_INDIRECT 132#ifdef __ARM_FIQ_INDIRECT
123 .data 133 .data
124 .align 0 134 .align 0
125_C_LABEL(fiqvector): 135_C_LABEL(fiqvector):
126 subs pc, lr, #4 136 subs pc, lr, #4
127 .org _C_LABEL(fiqvector) + 0x100 137 .org _C_LABEL(fiqvector) + 0x100
128#endif 138#endif

cvs diff -r1.19 -r1.20 src/sys/arch/arm/arm32/arm32_kvminit.c (switch to unified diff)

--- src/sys/arch/arm/arm32/arm32_kvminit.c 2013/06/12 17:13:05 1.19
+++ src/sys/arch/arm/arm32/arm32_kvminit.c 2013/06/12 21:34:12 1.20
@@ -1,917 +1,919 @@ @@ -1,917 +1,919 @@
1/* $NetBSD: arm32_kvminit.c,v 1.19 2013/06/12 17:13:05 matt Exp $ */ 1/* $NetBSD: arm32_kvminit.c,v 1.20 2013/06/12 21:34:12 matt Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2002, 2003, 2005 Genetec Corporation. All rights reserved. 4 * Copyright (c) 2002, 2003, 2005 Genetec Corporation. All rights reserved.
5 * Written by Hiroyuki Bessho for Genetec Corporation. 5 * Written by Hiroyuki Bessho for Genetec Corporation.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of Genetec Corporation may not be used to endorse or 15 * 3. The name of Genetec Corporation may not be used to endorse or
16 * promote products derived from this software without specific prior 16 * promote products derived from this software without specific prior
17 * written permission. 17 * written permission.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY GENETEC CORPORATION ``AS IS'' AND 19 * THIS SOFTWARE IS PROVIDED BY GENETEC CORPORATION ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GENETEC CORPORATION 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GENETEC CORPORATION
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 * 30 *
31 * Copyright (c) 2001 Wasabi Systems, Inc. 31 * Copyright (c) 2001 Wasabi Systems, Inc.
32 * All rights reserved. 32 * All rights reserved.
33 * 33 *
34 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 34 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions 37 * modification, are permitted provided that the following conditions
38 * are met: 38 * are met:
39 * 1. Redistributions of source code must retain the above copyright 39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer. 40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright 41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the 42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution. 43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software 44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement: 45 * must display the following acknowledgement:
46 * This product includes software developed for the NetBSD Project by 46 * This product includes software developed for the NetBSD Project by
47 * Wasabi Systems, Inc. 47 * Wasabi Systems, Inc.
48 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 48 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
49 * or promote products derived from this software without specific prior 49 * or promote products derived from this software without specific prior
50 * written permission. 50 * written permission.
51 * 51 *
52 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 52 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
54 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 54 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
55 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 55 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
56 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 56 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
62 * POSSIBILITY OF SUCH DAMAGE. 62 * POSSIBILITY OF SUCH DAMAGE.
63 * 63 *
64 * Copyright (c) 1997,1998 Mark Brinicombe. 64 * Copyright (c) 1997,1998 Mark Brinicombe.
65 * Copyright (c) 1997,1998 Causality Limited. 65 * Copyright (c) 1997,1998 Causality Limited.
66 * All rights reserved. 66 * All rights reserved.
67 * 67 *
68 * Redistribution and use in source and binary forms, with or without 68 * Redistribution and use in source and binary forms, with or without
69 * modification, are permitted provided that the following conditions 69 * modification, are permitted provided that the following conditions
70 * are met: 70 * are met:
71 * 1. Redistributions of source code must retain the above copyright 71 * 1. Redistributions of source code must retain the above copyright
72 * notice, this list of conditions and the following disclaimer. 72 * notice, this list of conditions and the following disclaimer.
73 * 2. Redistributions in binary form must reproduce the above copyright 73 * 2. Redistributions in binary form must reproduce the above copyright
74 * notice, this list of conditions and the following disclaimer in the 74 * notice, this list of conditions and the following disclaimer in the
75 * documentation and/or other materials provided with the distribution. 75 * documentation and/or other materials provided with the distribution.
76 * 3. All advertising materials mentioning features or use of this software 76 * 3. All advertising materials mentioning features or use of this software
77 * must display the following acknowledgement: 77 * must display the following acknowledgement:
78 * This product includes software developed by Mark Brinicombe 78 * This product includes software developed by Mark Brinicombe
79 * for the NetBSD Project. 79 * for the NetBSD Project.
80 * 4. The name of the company nor the name of the author may be used to 80 * 4. The name of the company nor the name of the author may be used to
81 * endorse or promote products derived from this software without specific 81 * endorse or promote products derived from this software without specific
82 * prior written permission. 82 * prior written permission.
83 * 83 *
84 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 84 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
85 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 85 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
86 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 86 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
87 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 87 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
88 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 88 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
89 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 89 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
90 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 90 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
91 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 91 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
92 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 92 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
93 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 93 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
94 * SUCH DAMAGE. 94 * SUCH DAMAGE.
95 * 95 *
96 * Copyright (c) 2007 Microsoft 96 * Copyright (c) 2007 Microsoft
97 * All rights reserved. 97 * All rights reserved.
98 * 98 *
99 * Redistribution and use in source and binary forms, with or without 99 * Redistribution and use in source and binary forms, with or without
100 * modification, are permitted provided that the following conditions 100 * modification, are permitted provided that the following conditions
101 * are met: 101 * are met:
102 * 1. Redistributions of source code must retain the above copyright 102 * 1. Redistributions of source code must retain the above copyright
103 * notice, this list of conditions and the following disclaimer. 103 * notice, this list of conditions and the following disclaimer.
104 * 2. Redistributions in binary form must reproduce the above copyright 104 * 2. Redistributions in binary form must reproduce the above copyright
105 * notice, this list of conditions and the following disclaimer in the 105 * notice, this list of conditions and the following disclaimer in the
106 * documentation and/or other materials provided with the distribution. 106 * documentation and/or other materials provided with the distribution.
107 * 3. All advertising materials mentioning features or use of this software 107 * 3. All advertising materials mentioning features or use of this software
108 * must display the following acknowledgement: 108 * must display the following acknowledgement:
109 * This product includes software developed by Microsoft 109 * This product includes software developed by Microsoft
110 * 110 *
111 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 111 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
112 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 112 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
113 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 113 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
114 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTERS BE LIABLE FOR ANY DIRECT, 114 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTERS BE LIABLE FOR ANY DIRECT,
115 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 115 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
116 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 116 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
117 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 117 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
118 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 118 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
119 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 119 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
120 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 120 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
121 * SUCH DAMAGE. 121 * SUCH DAMAGE.
122 */ 122 */
123 123
124#include <sys/cdefs.h> 124#include <sys/cdefs.h>
125__KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.19 2013/06/12 17:13:05 matt Exp $"); 125__KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.20 2013/06/12 21:34:12 matt Exp $");
126 126
127#include <sys/param.h> 127#include <sys/param.h>
128#include <sys/device.h> 128#include <sys/device.h>
129#include <sys/kernel.h> 129#include <sys/kernel.h>
130#include <sys/reboot.h> 130#include <sys/reboot.h>
131#include <sys/bus.h> 131#include <sys/bus.h>
132 132
133#include <dev/cons.h> 133#include <dev/cons.h>
134 134
135#include <uvm/uvm_extern.h> 135#include <uvm/uvm_extern.h>
136 136
137#include <arm/db_machdep.h> 137#include <arm/db_machdep.h>
138#include <arm/undefined.h> 138#include <arm/undefined.h>
139#include <arm/bootconfig.h> 139#include <arm/bootconfig.h>
140#include <arm/arm32/machdep.h> 140#include <arm/arm32/machdep.h>
141 141
142#include "ksyms.h" 142#include "ksyms.h"
143 143
144struct bootmem_info bootmem_info; 144struct bootmem_info bootmem_info;
145 145
146paddr_t msgbufphys; 146paddr_t msgbufphys;
147paddr_t physical_start; 147paddr_t physical_start;
148paddr_t physical_end; 148paddr_t physical_end;
149 149
150extern char etext[]; 150extern char etext[];
151extern char __data_start[], _edata[]; 151extern char __data_start[], _edata[];
152extern char __bss_start[], __bss_end__[]; 152extern char __bss_start[], __bss_end__[];
153extern char _end[]; 153extern char _end[];
154 154
155/* Page tables for mapping kernel VM */ 155/* Page tables for mapping kernel VM */
156#define KERNEL_L2PT_VMDATA_NUM 8 /* start with 32MB of KVM */ 156#define KERNEL_L2PT_VMDATA_NUM 8 /* start with 32MB of KVM */
157 157
158/* 158/*
159 * Macros to translate between physical and virtual for a subset of the 159 * Macros to translate between physical and virtual for a subset of the
160 * kernel address space. *Not* for general use. 160 * kernel address space. *Not* for general use.
161 */ 161 */
162#define KERN_VTOPHYS(bmi, va) \ 162#define KERN_VTOPHYS(bmi, va) \
163 ((paddr_t)((vaddr_t)(va) - KERNEL_BASE + (bmi)->bmi_start)) 163 ((paddr_t)((vaddr_t)(va) - KERNEL_BASE + (bmi)->bmi_start))
164#define KERN_PHYSTOV(bmi, pa) \ 164#define KERN_PHYSTOV(bmi, pa) \
165 ((vaddr_t)((paddr_t)(pa) - (bmi)->bmi_start + KERNEL_BASE)) 165 ((vaddr_t)((paddr_t)(pa) - (bmi)->bmi_start + KERNEL_BASE))
166 166
167void 167void
168arm32_bootmem_init(paddr_t memstart, psize_t memsize, vsize_t kernelstart) 168arm32_bootmem_init(paddr_t memstart, psize_t memsize, vsize_t kernelstart)
169{ 169{
170 struct bootmem_info * const bmi = &bootmem_info; 170 struct bootmem_info * const bmi = &bootmem_info;
171 pv_addr_t *pv = bmi->bmi_freeblocks; 171 pv_addr_t *pv = bmi->bmi_freeblocks;
172 172
173#ifdef VERBOSE_INIT_ARM 173#ifdef VERBOSE_INIT_ARM
174 printf("%s: memstart=%#lx, memsize=%#lx, kernelstart=%#lx\n", 174 printf("%s: memstart=%#lx, memsize=%#lx, kernelstart=%#lx\n",
175 __func__, memstart, memsize, kernelstart); 175 __func__, memstart, memsize, kernelstart);
176#endif 176#endif
177 177
178 physical_start = bmi->bmi_start = memstart; 178 physical_start = bmi->bmi_start = memstart;
179 physical_end = bmi->bmi_end = memstart + memsize; 179 physical_end = bmi->bmi_end = memstart + memsize;
180 physmem = memsize / PAGE_SIZE; 180 physmem = memsize / PAGE_SIZE;
181 181
182 /* 182 /*
183 * Let's record where the kernel lives. 183 * Let's record where the kernel lives.
184 */ 184 */
185 bmi->bmi_kernelstart = kernelstart; 185 bmi->bmi_kernelstart = kernelstart;
186 bmi->bmi_kernelend = KERN_VTOPHYS(bmi, round_page((vaddr_t)_end)); 186 bmi->bmi_kernelend = KERN_VTOPHYS(bmi, round_page((vaddr_t)_end));
187 187
188#ifdef VERBOSE_INIT_ARM 188#ifdef VERBOSE_INIT_ARM
189 printf("%s: kernelend=%#lx\n", __func__, bmi->bmi_kernelend); 189 printf("%s: kernelend=%#lx\n", __func__, bmi->bmi_kernelend);
190#endif 190#endif
191 191
192 /* 192 /*
193 * Now the rest of the free memory must be after the kernel. 193 * Now the rest of the free memory must be after the kernel.
194 */ 194 */
195 pv->pv_pa = bmi->bmi_kernelend; 195 pv->pv_pa = bmi->bmi_kernelend;
196 pv->pv_va = KERN_PHYSTOV(bmi, pv->pv_pa); 196 pv->pv_va = KERN_PHYSTOV(bmi, pv->pv_pa);
197 pv->pv_size = bmi->bmi_end - bmi->bmi_kernelend; 197 pv->pv_size = bmi->bmi_end - bmi->bmi_kernelend;
198 bmi->bmi_freepages += pv->pv_size / PAGE_SIZE; 198 bmi->bmi_freepages += pv->pv_size / PAGE_SIZE;
199#ifdef VERBOSE_INIT_ARM 199#ifdef VERBOSE_INIT_ARM
200 printf("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n", 200 printf("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n",
201 __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa, 201 __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa,
202 pv->pv_pa + pv->pv_size - 1, pv->pv_va); 202 pv->pv_pa + pv->pv_size - 1, pv->pv_va);
203#endif 203#endif
204 pv++; 204 pv++;
205 205
206 /* 206 /*
207 * Add a free block for any memory before the kernel. 207 * Add a free block for any memory before the kernel.
208 */ 208 */
209 if (bmi->bmi_start < bmi->bmi_kernelstart) { 209 if (bmi->bmi_start < bmi->bmi_kernelstart) {
210 pv->pv_pa = bmi->bmi_start; 210 pv->pv_pa = bmi->bmi_start;
211 pv->pv_va = KERNEL_BASE; 211 pv->pv_va = KERNEL_BASE;
212 pv->pv_size = bmi->bmi_kernelstart - bmi->bmi_start; 212 pv->pv_size = bmi->bmi_kernelstart - bmi->bmi_start;
213 bmi->bmi_freepages += pv->pv_size / PAGE_SIZE; 213 bmi->bmi_freepages += pv->pv_size / PAGE_SIZE;
214#ifdef VERBOSE_INIT_ARM 214#ifdef VERBOSE_INIT_ARM
215 printf("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n", 215 printf("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n",
216 __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa, 216 __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa,
217 pv->pv_pa + pv->pv_size - 1, pv->pv_va); 217 pv->pv_pa + pv->pv_size - 1, pv->pv_va);
218#endif 218#endif
219 pv++; 219 pv++;
220 } 220 }
221 221
222 bmi->bmi_nfreeblocks = pv - bmi->bmi_freeblocks; 222 bmi->bmi_nfreeblocks = pv - bmi->bmi_freeblocks;
223  223
224 SLIST_INIT(&bmi->bmi_freechunks); 224 SLIST_INIT(&bmi->bmi_freechunks);
225 SLIST_INIT(&bmi->bmi_chunks); 225 SLIST_INIT(&bmi->bmi_chunks);
226} 226}
227 227
228static bool 228static bool
229concat_pvaddr(pv_addr_t *acc_pv, pv_addr_t *pv) 229concat_pvaddr(pv_addr_t *acc_pv, pv_addr_t *pv)
230{ 230{
231 if (acc_pv->pv_pa + acc_pv->pv_size == pv->pv_pa 231 if (acc_pv->pv_pa + acc_pv->pv_size == pv->pv_pa
232 && acc_pv->pv_va + acc_pv->pv_size == pv->pv_va 232 && acc_pv->pv_va + acc_pv->pv_size == pv->pv_va
233 && acc_pv->pv_prot == pv->pv_prot 233 && acc_pv->pv_prot == pv->pv_prot
234 && acc_pv->pv_cache == pv->pv_cache) { 234 && acc_pv->pv_cache == pv->pv_cache) {
235#ifdef VERBOSE_INIT_ARMX 235#ifdef VERBOSE_INIT_ARMX
236 printf("%s: appending pv %p (%#lx..%#lx) to %#lx..%#lx\n", 236 printf("%s: appending pv %p (%#lx..%#lx) to %#lx..%#lx\n",
237 __func__, pv, pv->pv_pa, pv->pv_pa + pv->pv_size + 1, 237 __func__, pv, pv->pv_pa, pv->pv_pa + pv->pv_size + 1,
238 acc_pv->pv_pa, acc_pv->pv_pa + acc_pv->pv_size + 1); 238 acc_pv->pv_pa, acc_pv->pv_pa + acc_pv->pv_size + 1);
239#endif 239#endif
240 acc_pv->pv_size += pv->pv_size; 240 acc_pv->pv_size += pv->pv_size;
241 return true; 241 return true;
242 } 242 }
243 243
244 return false; 244 return false;
245} 245}
246 246
247static void 247static void
248add_pages(struct bootmem_info *bmi, pv_addr_t *pv) 248add_pages(struct bootmem_info *bmi, pv_addr_t *pv)
249{ 249{
250 pv_addr_t **pvp = &SLIST_FIRST(&bmi->bmi_chunks); 250 pv_addr_t **pvp = &SLIST_FIRST(&bmi->bmi_chunks);
251 while ((*pvp) != NULL && (*pvp)->pv_va <= pv->pv_va) { 251 while ((*pvp) != NULL && (*pvp)->pv_va <= pv->pv_va) {
252 pv_addr_t * const pv0 = (*pvp); 252 pv_addr_t * const pv0 = (*pvp);
253 KASSERT(SLIST_NEXT(pv0, pv_list) == NULL || pv0->pv_pa < SLIST_NEXT(pv0, pv_list)->pv_pa); 253 KASSERT(SLIST_NEXT(pv0, pv_list) == NULL || pv0->pv_pa < SLIST_NEXT(pv0, pv_list)->pv_pa);
254 if (concat_pvaddr(pv0, pv)) { 254 if (concat_pvaddr(pv0, pv)) {
255#ifdef VERBOSE_INIT_ARM 255#ifdef VERBOSE_INIT_ARM
256 printf("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n", 256 printf("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n",
257 __func__, "appending", pv, 257 __func__, "appending", pv,
258 pv->pv_pa, pv->pv_pa + pv->pv_size - 1, 258 pv->pv_pa, pv->pv_pa + pv->pv_size - 1,
259 pv0->pv_pa, pv0->pv_pa + pv0->pv_size - pv->pv_size - 1); 259 pv0->pv_pa, pv0->pv_pa + pv0->pv_size - pv->pv_size - 1);
260#endif 260#endif
261 pv = SLIST_NEXT(pv0, pv_list); 261 pv = SLIST_NEXT(pv0, pv_list);
262 if (pv != NULL && concat_pvaddr(pv0, pv)) { 262 if (pv != NULL && concat_pvaddr(pv0, pv)) {
263#ifdef VERBOSE_INIT_ARM 263#ifdef VERBOSE_INIT_ARM
264 printf("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n", 264 printf("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n",
265 __func__, "merging", pv, 265 __func__, "merging", pv,
266 pv->pv_pa, pv->pv_pa + pv->pv_size - 1, 266 pv->pv_pa, pv->pv_pa + pv->pv_size - 1,
267 pv0->pv_pa, 267 pv0->pv_pa,
268 pv0->pv_pa + pv0->pv_size - pv->pv_size - 1); 268 pv0->pv_pa + pv0->pv_size - pv->pv_size - 1);
269#endif 269#endif
270 SLIST_REMOVE_AFTER(pv0, pv_list); 270 SLIST_REMOVE_AFTER(pv0, pv_list);
271 SLIST_INSERT_HEAD(&bmi->bmi_freechunks, pv, pv_list); 271 SLIST_INSERT_HEAD(&bmi->bmi_freechunks, pv, pv_list);
272 } 272 }
273 return; 273 return;
274 } 274 }
275 KASSERT(pv->pv_va != (*pvp)->pv_va); 275 KASSERT(pv->pv_va != (*pvp)->pv_va);
276 pvp = &SLIST_NEXT(*pvp, pv_list); 276 pvp = &SLIST_NEXT(*pvp, pv_list);
277 } 277 }
278 KASSERT((*pvp) == NULL || pv->pv_va < (*pvp)->pv_va); 278 KASSERT((*pvp) == NULL || pv->pv_va < (*pvp)->pv_va);
279 pv_addr_t * const new_pv = SLIST_FIRST(&bmi->bmi_freechunks); 279 pv_addr_t * const new_pv = SLIST_FIRST(&bmi->bmi_freechunks);
280 KASSERT(new_pv != NULL); 280 KASSERT(new_pv != NULL);
281 SLIST_REMOVE_HEAD(&bmi->bmi_freechunks, pv_list); 281 SLIST_REMOVE_HEAD(&bmi->bmi_freechunks, pv_list);
282 *new_pv = *pv; 282 *new_pv = *pv;
283 SLIST_NEXT(new_pv, pv_list) = *pvp; 283 SLIST_NEXT(new_pv, pv_list) = *pvp;
284 (*pvp) = new_pv; 284 (*pvp) = new_pv;
285#ifdef VERBOSE_INIT_ARM 285#ifdef VERBOSE_INIT_ARM
286 printf("%s: adding pv %p (pa %#lx, va %#lx, %lu pages) ", 286 printf("%s: adding pv %p (pa %#lx, va %#lx, %lu pages) ",
287 __func__, new_pv, new_pv->pv_pa, new_pv->pv_va, 287 __func__, new_pv, new_pv->pv_pa, new_pv->pv_va,
288 new_pv->pv_size / PAGE_SIZE); 288 new_pv->pv_size / PAGE_SIZE);
289 if (SLIST_NEXT(new_pv, pv_list)) 289 if (SLIST_NEXT(new_pv, pv_list))
290 printf("before pa %#lx\n", SLIST_NEXT(new_pv, pv_list)->pv_pa); 290 printf("before pa %#lx\n", SLIST_NEXT(new_pv, pv_list)->pv_pa);
291 else 291 else
292 printf("at tail\n"); 292 printf("at tail\n");
293#endif 293#endif
294} 294}
295 295
296static void 296static void
297valloc_pages(struct bootmem_info *bmi, pv_addr_t *pv, size_t npages, 297valloc_pages(struct bootmem_info *bmi, pv_addr_t *pv, size_t npages,
298 int prot, int cache, bool zero_p) 298 int prot, int cache, bool zero_p)
299{ 299{
300 size_t nbytes = npages * PAGE_SIZE; 300 size_t nbytes = npages * PAGE_SIZE;
301 pv_addr_t *free_pv = bmi->bmi_freeblocks; 301 pv_addr_t *free_pv = bmi->bmi_freeblocks;
302 size_t free_idx = 0; 302 size_t free_idx = 0;
303 static bool l1pt_found; 303 static bool l1pt_found;
304 304
305 /* 305 /*
306 * If we haven't allocated the kernel L1 page table and we are aligned 306 * If we haven't allocated the kernel L1 page table and we are aligned
307 * at a L1 table boundary, alloc the memory for it. 307 * at a L1 table boundary, alloc the memory for it.
308 */ 308 */
309 if (!l1pt_found 309 if (!l1pt_found
310 && (free_pv->pv_pa & (L1_TABLE_SIZE - 1)) == 0 310 && (free_pv->pv_pa & (L1_TABLE_SIZE - 1)) == 0
311 && free_pv->pv_size >= L1_TABLE_SIZE) { 311 && free_pv->pv_size >= L1_TABLE_SIZE) {
312 l1pt_found = true; 312 l1pt_found = true;
313 valloc_pages(bmi, &kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE, 313 valloc_pages(bmi, &kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE,
314 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true); 314 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true);
315 add_pages(bmi, &kernel_l1pt); 315 add_pages(bmi, &kernel_l1pt);
316 } 316 }
317 317
318 while (nbytes > free_pv->pv_size) { 318 while (nbytes > free_pv->pv_size) {
319 free_pv++; 319 free_pv++;
320 free_idx++; 320 free_idx++;
321 if (free_idx == bmi->bmi_nfreeblocks) { 321 if (free_idx == bmi->bmi_nfreeblocks) {
322 panic("%s: could not allocate %zu bytes", 322 panic("%s: could not allocate %zu bytes",
323 __func__, nbytes); 323 __func__, nbytes);
324 } 324 }
325 } 325 }
326 326
327 /* 327 /*
328 * As we allocate the memory, make sure that we don't walk over 328 * As we allocate the memory, make sure that we don't walk over
329 * our current first level translation table. 329 * our current first level translation table.
330 */ 330 */
331 KASSERT((armreg_ttbr_read() & ~(L1_TABLE_SIZE - 1)) != free_pv->pv_pa); 331 KASSERT((armreg_ttbr_read() & ~(L1_TABLE_SIZE - 1)) != free_pv->pv_pa);
332 332
333 pv->pv_pa = free_pv->pv_pa; 333 pv->pv_pa = free_pv->pv_pa;
334 pv->pv_va = free_pv->pv_va; 334 pv->pv_va = free_pv->pv_va;
335 pv->pv_size = nbytes; 335 pv->pv_size = nbytes;
336 pv->pv_prot = prot; 336 pv->pv_prot = prot;
337 pv->pv_cache = cache; 337 pv->pv_cache = cache;
338 338
339 /* 339 /*
340 * If PTE_PAGETABLE uses the same cache modes as PTE_CACHE 340 * If PTE_PAGETABLE uses the same cache modes as PTE_CACHE
341 * just use PTE_CACHE. 341 * just use PTE_CACHE.
342 */ 342 */
343 if (cache == PTE_PAGETABLE 343 if (cache == PTE_PAGETABLE
344 && pte_l1_s_cache_mode == pte_l1_s_cache_mode_pt 344 && pte_l1_s_cache_mode == pte_l1_s_cache_mode_pt
345 && pte_l2_l_cache_mode == pte_l2_l_cache_mode_pt 345 && pte_l2_l_cache_mode == pte_l2_l_cache_mode_pt
346 && pte_l2_s_cache_mode == pte_l2_s_cache_mode_pt) 346 && pte_l2_s_cache_mode == pte_l2_s_cache_mode_pt)
347 pv->pv_cache = PTE_CACHE; 347 pv->pv_cache = PTE_CACHE;
348 348
349 free_pv->pv_pa += nbytes; 349 free_pv->pv_pa += nbytes;
350 free_pv->pv_va += nbytes; 350 free_pv->pv_va += nbytes;
351 free_pv->pv_size -= nbytes; 351 free_pv->pv_size -= nbytes;
352 if (free_pv->pv_size == 0) { 352 if (free_pv->pv_size == 0) {
353 --bmi->bmi_nfreeblocks; 353 --bmi->bmi_nfreeblocks;
354 for (; free_idx < bmi->bmi_nfreeblocks; free_idx++) { 354 for (; free_idx < bmi->bmi_nfreeblocks; free_idx++) {
355 free_pv[0] = free_pv[1]; 355 free_pv[0] = free_pv[1];
356 } 356 }
357 } 357 }
358 358
359 bmi->bmi_freepages -= npages; 359 bmi->bmi_freepages -= npages;
360 360
361 if (zero_p) 361 if (zero_p)
362 memset((void *)pv->pv_va, 0, nbytes); 362 memset((void *)pv->pv_va, 0, nbytes);
363} 363}
364 364
365void 365void
366arm32_kernel_vm_init(vaddr_t kernel_vm_base, vaddr_t vectors, vaddr_t iovbase, 366arm32_kernel_vm_init(vaddr_t kernel_vm_base, vaddr_t vectors, vaddr_t iovbase,
367 const struct pmap_devmap *devmap, bool mapallmem_p) 367 const struct pmap_devmap *devmap, bool mapallmem_p)
368{ 368{
369 struct bootmem_info * const bmi = &bootmem_info; 369 struct bootmem_info * const bmi = &bootmem_info;
370#ifdef MULTIPROCESSOR 370#ifdef MULTIPROCESSOR
371 const size_t cpu_num = arm_cpu_max + 1; 371 const size_t cpu_num = arm_cpu_max + 1;
372#else 372#else
373 const size_t cpu_num = 1; 373 const size_t cpu_num = 1;
374#endif 374#endif
375#if defined(CPU_ARMV7) || defined(CPU_ARM11) 375#ifdef ARM_HAS_VBAR
 376 const bool map_vectors_p = false;
 377#elif defined(CPU_ARMV7) || defined(CPU_ARM11)
376 const bool map_vectors_p = vectors == ARM_VECTORS_LOW 378 const bool map_vectors_p = vectors == ARM_VECTORS_LOW
377 && !(armreg_pfr1_read() & ARM_PFR1_SEC_MASK); 379 && !(armreg_pfr1_read() & ARM_PFR1_SEC_MASK);
378#else 380#else
379 const bool map_vectors_p = true; 381 const bool map_vectors_p = true;
380#endif 382#endif
381 383
382#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 384#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
383 KASSERT(mapallmem_p); 385 KASSERT(mapallmem_p);
384#endif 386#endif
385 387
386 /* 388 /*
387 * Calculate the number of L2 pages needed for mapping the 389 * Calculate the number of L2 pages needed for mapping the
388 * kernel + data + stuff. Assume 2 L2 pages for kernel, 1 for vectors, 390 * kernel + data + stuff. Assume 2 L2 pages for kernel, 1 for vectors,
389 * and 1 for IO 391 * and 1 for IO
390 */ 392 */
391 size_t kernel_size = bmi->bmi_kernelend; 393 size_t kernel_size = bmi->bmi_kernelend;
392 kernel_size -= (bmi->bmi_kernelstart & -L2_S_SEGSIZE); 394 kernel_size -= (bmi->bmi_kernelstart & -L2_S_SEGSIZE);
393 kernel_size += L1_TABLE_SIZE; 395 kernel_size += L1_TABLE_SIZE;
394 kernel_size += L2_TABLE_SIZE * (2 + 1 + KERNEL_L2PT_VMDATA_NUM + 1); 396 kernel_size += L2_TABLE_SIZE * (2 + 1 + KERNEL_L2PT_VMDATA_NUM + 1);
395 kernel_size += 397 kernel_size +=
396 cpu_num * (ABT_STACK_SIZE + FIQ_STACK_SIZE + IRQ_STACK_SIZE 398 cpu_num * (ABT_STACK_SIZE + FIQ_STACK_SIZE + IRQ_STACK_SIZE
397 + UND_STACK_SIZE + UPAGES) * PAGE_SIZE; 399 + UND_STACK_SIZE + UPAGES) * PAGE_SIZE;
398 kernel_size += round_page(MSGBUFSIZE); 400 kernel_size += round_page(MSGBUFSIZE);
399 kernel_size += 0x10000; /* slop */ 401 kernel_size += 0x10000; /* slop */
400 kernel_size += PAGE_SIZE * (kernel_size + L2_S_SEGSIZE - 1) / L2_S_SEGSIZE; 402 kernel_size += PAGE_SIZE * (kernel_size + L2_S_SEGSIZE - 1) / L2_S_SEGSIZE;
401 kernel_size = round_page(kernel_size); 403 kernel_size = round_page(kernel_size);
402 404
403 /* 405 /*
404 * Now we know how many L2 pages it will take. 406 * Now we know how many L2 pages it will take.
405 */ 407 */
406 const size_t KERNEL_L2PT_KERNEL_NUM = 408 const size_t KERNEL_L2PT_KERNEL_NUM =
407 (kernel_size + L2_S_SEGSIZE - 1) / L2_S_SEGSIZE; 409 (kernel_size + L2_S_SEGSIZE - 1) / L2_S_SEGSIZE;
408 410
409#ifdef VERBOSE_INIT_ARM 411#ifdef VERBOSE_INIT_ARM
410 printf("%s: %zu L2 pages are needed to map %#zx kernel bytes\n", 412 printf("%s: %zu L2 pages are needed to map %#zx kernel bytes\n",
411 __func__, KERNEL_L2PT_KERNEL_NUM, kernel_size); 413 __func__, KERNEL_L2PT_KERNEL_NUM, kernel_size);
412#endif 414#endif
413 415
414 KASSERT(KERNEL_L2PT_KERNEL_NUM + KERNEL_L2PT_VMDATA_NUM < __arraycount(bmi->bmi_l2pts)); 416 KASSERT(KERNEL_L2PT_KERNEL_NUM + KERNEL_L2PT_VMDATA_NUM < __arraycount(bmi->bmi_l2pts));
415 pv_addr_t * const kernel_l2pt = bmi->bmi_l2pts; 417 pv_addr_t * const kernel_l2pt = bmi->bmi_l2pts;
416 pv_addr_t * const vmdata_l2pt = kernel_l2pt + KERNEL_L2PT_KERNEL_NUM; 418 pv_addr_t * const vmdata_l2pt = kernel_l2pt + KERNEL_L2PT_KERNEL_NUM;
417 pv_addr_t msgbuf; 419 pv_addr_t msgbuf;
418 pv_addr_t text; 420 pv_addr_t text;
419 pv_addr_t data; 421 pv_addr_t data;
420 pv_addr_t chunks[KERNEL_L2PT_KERNEL_NUM+KERNEL_L2PT_VMDATA_NUM+11]; 422 pv_addr_t chunks[KERNEL_L2PT_KERNEL_NUM+KERNEL_L2PT_VMDATA_NUM+11];
421#if ARM_MMU_XSCALE == 1 423#if ARM_MMU_XSCALE == 1
422 pv_addr_t minidataclean; 424 pv_addr_t minidataclean;
423#endif 425#endif
424 426
425 /* 427 /*
426 * We need to allocate some fixed page tables to get the kernel going. 428 * We need to allocate some fixed page tables to get the kernel going.
427 * 429 *
428 * We are going to allocate our bootstrap pages from the beginning of 430 * We are going to allocate our bootstrap pages from the beginning of
429 * the free space that we just calculated. We allocate one page 431 * the free space that we just calculated. We allocate one page
430 * directory and a number of page tables and store the physical 432 * directory and a number of page tables and store the physical
431 * addresses in the bmi_l2pts array in bootmem_info. 433 * addresses in the bmi_l2pts array in bootmem_info.
432 * 434 *
433 * The kernel page directory must be on a 16K boundary. The page 435 * The kernel page directory must be on a 16K boundary. The page
434 * tables must be on 4K boundaries. What we do is allocate the 436 * tables must be on 4K boundaries. What we do is allocate the
435 * page directory on the first 16K boundary that we encounter, and 437 * page directory on the first 16K boundary that we encounter, and
436 * the page tables on 4K boundaries otherwise. Since we allocate 438 * the page tables on 4K boundaries otherwise. Since we allocate
437 * at least 3 L2 page tables, we are guaranteed to encounter at 439 * at least 3 L2 page tables, we are guaranteed to encounter at
438 * least one 16K aligned region. 440 * least one 16K aligned region.
439 */ 441 */
440 442
441#ifdef VERBOSE_INIT_ARM 443#ifdef VERBOSE_INIT_ARM
442 printf("%s: allocating page tables for", __func__); 444 printf("%s: allocating page tables for", __func__);
443#endif 445#endif
444 for (size_t i = 0; i < __arraycount(chunks); i++) { 446 for (size_t i = 0; i < __arraycount(chunks); i++) {
445 SLIST_INSERT_HEAD(&bmi->bmi_freechunks, &chunks[i], pv_list); 447 SLIST_INSERT_HEAD(&bmi->bmi_freechunks, &chunks[i], pv_list);
446 } 448 }
447 449
448 kernel_l1pt.pv_pa = 0; 450 kernel_l1pt.pv_pa = 0;
449 kernel_l1pt.pv_va = 0; 451 kernel_l1pt.pv_va = 0;
450 452
451 /* 453 /*
452 * Allocate the L2 pages, but if we get to a page that is aligned for 454 * Allocate the L2 pages, but if we get to a page that is aligned for
453 * an L1 page table, we will allocate the pages for it first and then 455 * an L1 page table, we will allocate the pages for it first and then
454 * allocate the L2 page. 456 * allocate the L2 page.
455 */ 457 */
456 458
457 if (map_vectors_p) { 459 if (map_vectors_p) {
458 /* 460 /*
459 * First allocate L2 page for the vectors. 461 * First allocate L2 page for the vectors.
460 */ 462 */
461#ifdef VERBOSE_INIT_ARM 463#ifdef VERBOSE_INIT_ARM
462 printf(" vector"); 464 printf(" vector");
463#endif 465#endif
464 valloc_pages(bmi, &bmi->bmi_vector_l2pt, 466 valloc_pages(bmi, &bmi->bmi_vector_l2pt,
465 L2_TABLE_SIZE / PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, 467 L2_TABLE_SIZE / PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE,
466 PTE_PAGETABLE, true); 468 PTE_PAGETABLE, true);
467 add_pages(bmi, &bmi->bmi_vector_l2pt); 469 add_pages(bmi, &bmi->bmi_vector_l2pt);
468 } 470 }
469 471
470 /* 472 /*
471 * Now allocate L2 pages for the kernel 473 * Now allocate L2 pages for the kernel
472 */ 474 */
473#ifdef VERBOSE_INIT_ARM 475#ifdef VERBOSE_INIT_ARM
474 printf(" kernel"); 476 printf(" kernel");
475#endif 477#endif
476 for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; ++idx) { 478 for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; ++idx) {
477 valloc_pages(bmi, &kernel_l2pt[idx], L2_TABLE_SIZE / PAGE_SIZE, 479 valloc_pages(bmi, &kernel_l2pt[idx], L2_TABLE_SIZE / PAGE_SIZE,
478 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true); 480 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true);
479 add_pages(bmi, &kernel_l2pt[idx]); 481 add_pages(bmi, &kernel_l2pt[idx]);
480 } 482 }
481 483
482 /* 484 /*
483 * Now allocate L2 pages for the initial kernel VA space. 485 * Now allocate L2 pages for the initial kernel VA space.
484 */ 486 */
485#ifdef VERBOSE_INIT_ARM 487#ifdef VERBOSE_INIT_ARM
486 printf(" vm"); 488 printf(" vm");
487#endif 489#endif
488 for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; ++idx) { 490 for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; ++idx) {
489 valloc_pages(bmi, &vmdata_l2pt[idx], L2_TABLE_SIZE / PAGE_SIZE, 491 valloc_pages(bmi, &vmdata_l2pt[idx], L2_TABLE_SIZE / PAGE_SIZE,
490 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true); 492 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true);
491 add_pages(bmi, &vmdata_l2pt[idx]); 493 add_pages(bmi, &vmdata_l2pt[idx]);
492 } 494 }
493 495
494 /* 496 /*
495 * If someone wanted a L2 page for I/O, allocate it now. 497 * If someone wanted a L2 page for I/O, allocate it now.
496 */ 498 */
497 if (iovbase != 0) { 499 if (iovbase != 0) {
498#ifdef VERBOSE_INIT_ARM 500#ifdef VERBOSE_INIT_ARM
499 printf(" io"); 501 printf(" io");
500#endif 502#endif
501 valloc_pages(bmi, &bmi->bmi_io_l2pt, L2_TABLE_SIZE / PAGE_SIZE, 503 valloc_pages(bmi, &bmi->bmi_io_l2pt, L2_TABLE_SIZE / PAGE_SIZE,
502 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true); 504 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true);
503 add_pages(bmi, &bmi->bmi_io_l2pt); 505 add_pages(bmi, &bmi->bmi_io_l2pt);
504 } 506 }
505 507
506#ifdef VERBOSE_ARM_INIT 508#ifdef VERBOSE_ARM_INIT
507 printf("%s: allocating stacks\n", __func__); 509 printf("%s: allocating stacks\n", __func__);
508#endif 510#endif
509 511
510 /* Allocate stacks for all modes and CPUs */ 512 /* Allocate stacks for all modes and CPUs */
511 valloc_pages(bmi, &abtstack, ABT_STACK_SIZE * cpu_num, 513 valloc_pages(bmi, &abtstack, ABT_STACK_SIZE * cpu_num,
512 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); 514 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
513 add_pages(bmi, &abtstack); 515 add_pages(bmi, &abtstack);
514 valloc_pages(bmi, &fiqstack, FIQ_STACK_SIZE * cpu_num, 516 valloc_pages(bmi, &fiqstack, FIQ_STACK_SIZE * cpu_num,
515 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); 517 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
516 add_pages(bmi, &fiqstack); 518 add_pages(bmi, &fiqstack);
517 valloc_pages(bmi, &irqstack, IRQ_STACK_SIZE * cpu_num, 519 valloc_pages(bmi, &irqstack, IRQ_STACK_SIZE * cpu_num,
518 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); 520 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
519 add_pages(bmi, &irqstack); 521 add_pages(bmi, &irqstack);
520 valloc_pages(bmi, &undstack, UND_STACK_SIZE * cpu_num, 522 valloc_pages(bmi, &undstack, UND_STACK_SIZE * cpu_num,
521 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); 523 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
522 add_pages(bmi, &undstack); 524 add_pages(bmi, &undstack);
523 valloc_pages(bmi, &idlestack, UPAGES * cpu_num, /* SVC32 */ 525 valloc_pages(bmi, &idlestack, UPAGES * cpu_num, /* SVC32 */
524 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); 526 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
525 add_pages(bmi, &idlestack); 527 add_pages(bmi, &idlestack);
526 valloc_pages(bmi, &kernelstack, UPAGES, /* SVC32 */ 528 valloc_pages(bmi, &kernelstack, UPAGES, /* SVC32 */
527 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); 529 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
528 add_pages(bmi, &kernelstack); 530 add_pages(bmi, &kernelstack);
529 531
530 /* Allocate the message buffer from the end of memory. */ 532 /* Allocate the message buffer from the end of memory. */
531 const size_t msgbuf_pgs = round_page(MSGBUFSIZE) / PAGE_SIZE; 533 const size_t msgbuf_pgs = round_page(MSGBUFSIZE) / PAGE_SIZE;
532 valloc_pages(bmi, &msgbuf, msgbuf_pgs, 534 valloc_pages(bmi, &msgbuf, msgbuf_pgs,
533 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, false); 535 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, false);
534 add_pages(bmi, &msgbuf); 536 add_pages(bmi, &msgbuf);
535 msgbufphys = msgbuf.pv_pa; 537 msgbufphys = msgbuf.pv_pa;
536 538
537 if (map_vectors_p) { 539 if (map_vectors_p) {
538 /* 540 /*
539 * Allocate a page for the system vector page. 541 * Allocate a page for the system vector page.
540 * This page will just contain the system vectors and can be 542 * This page will just contain the system vectors and can be
541 * shared by all processes. 543 * shared by all processes.
542 */ 544 */
543 valloc_pages(bmi, &systempage, 1, VM_PROT_READ|VM_PROT_WRITE, 545 valloc_pages(bmi, &systempage, 1, VM_PROT_READ|VM_PROT_WRITE,
544 PTE_CACHE, true); 546 PTE_CACHE, true);
545 } 547 }
546 systempage.pv_va = vectors; 548 systempage.pv_va = vectors;
547 549
548 /* 550 /*
549 * If the caller needed a few extra pages for some reason, allocate 551 * If the caller needed a few extra pages for some reason, allocate
550 * them now. 552 * them now.
551 */ 553 */
552#if ARM_MMU_XSCALE == 1 554#if ARM_MMU_XSCALE == 1
553#if (ARM_NMMUS > 1) 555#if (ARM_NMMUS > 1)
554 if (xscale_use_minidata) 556 if (xscale_use_minidata)
555#endif  557#endif
556 valloc_pages(bmi, extrapv, nextrapages, 558 valloc_pages(bmi, extrapv, nextrapages,
557 VM_PROT_READ|VM_PROT_WRITE, 0, true); 559 VM_PROT_READ|VM_PROT_WRITE, 0, true);
558#endif 560#endif
559 561
560 /* 562 /*
561 * Ok we have allocated physical pages for the primary kernel 563 * Ok we have allocated physical pages for the primary kernel
562 * page tables and stacks. Let's just confirm that. 564 * page tables and stacks. Let's just confirm that.
563 */ 565 */
564 if (kernel_l1pt.pv_va == 0 566 if (kernel_l1pt.pv_va == 0
565 && (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE - 1)) != 0)) 567 && (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE - 1)) != 0))
566 panic("%s: Failed to allocate or align the kernel " 568 panic("%s: Failed to allocate or align the kernel "
567 "page directory", __func__); 569 "page directory", __func__);
568 570
569 571
570#ifdef VERBOSE_INIT_ARM 572#ifdef VERBOSE_INIT_ARM
571 printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa); 573 printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa);
572#endif 574#endif
573 575
574 /* 576 /*
575 * Now we start construction of the L1 page table 577 * Now we start construction of the L1 page table
576 * We start by mapping the L2 page tables into the L1. 578 * We start by mapping the L2 page tables into the L1.
577 * This means that we can replace L1 mappings later on if necessary 579 * This means that we can replace L1 mappings later on if necessary
578 */ 580 */
579 vaddr_t l1pt_va = kernel_l1pt.pv_va; 581 vaddr_t l1pt_va = kernel_l1pt.pv_va;
580 paddr_t l1pt_pa = kernel_l1pt.pv_pa; 582 paddr_t l1pt_pa = kernel_l1pt.pv_pa;
581 583
582 if (map_vectors_p) { 584 if (map_vectors_p) {
583 /* Map the L2 pages tables in the L1 page table */ 585 /* Map the L2 pages tables in the L1 page table */
584 pmap_link_l2pt(l1pt_va, systempage.pv_va & -L2_S_SEGSIZE, 586 pmap_link_l2pt(l1pt_va, systempage.pv_va & -L2_S_SEGSIZE,
585 &bmi->bmi_vector_l2pt); 587 &bmi->bmi_vector_l2pt);
586#ifdef VERBOSE_INIT_ARM 588#ifdef VERBOSE_INIT_ARM
587 printf("%s: adding L2 pt (VA %#lx, PA %#lx) " 589 printf("%s: adding L2 pt (VA %#lx, PA %#lx) "
588 "for VA %#lx\n (vectors)", 590 "for VA %#lx\n (vectors)",
589 __func__, bmi->bmi_vector_l2pt.pv_va, 591 __func__, bmi->bmi_vector_l2pt.pv_va,
590 bmi->bmi_vector_l2pt.pv_pa, systempage.pv_va); 592 bmi->bmi_vector_l2pt.pv_pa, systempage.pv_va);
591#endif 593#endif
592 } 594 }
593 595
594 const vaddr_t kernel_base = 596 const vaddr_t kernel_base =
595 KERN_PHYSTOV(bmi, bmi->bmi_kernelstart & -L2_S_SEGSIZE); 597 KERN_PHYSTOV(bmi, bmi->bmi_kernelstart & -L2_S_SEGSIZE);
596 for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; idx++) { 598 for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; idx++) {
597 pmap_link_l2pt(l1pt_va, kernel_base + idx * L2_S_SEGSIZE, 599 pmap_link_l2pt(l1pt_va, kernel_base + idx * L2_S_SEGSIZE,
598 &kernel_l2pt[idx]); 600 &kernel_l2pt[idx]);
599#ifdef VERBOSE_INIT_ARM 601#ifdef VERBOSE_INIT_ARM
600 printf("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (kernel)\n", 602 printf("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (kernel)\n",
601 __func__, kernel_l2pt[idx].pv_va, kernel_l2pt[idx].pv_pa, 603 __func__, kernel_l2pt[idx].pv_va, kernel_l2pt[idx].pv_pa,
602 kernel_base + idx * L2_S_SEGSIZE); 604 kernel_base + idx * L2_S_SEGSIZE);
603#endif 605#endif
604 } 606 }
605 607
606 for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; idx++) { 608 for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; idx++) {
607 pmap_link_l2pt(l1pt_va, kernel_vm_base + idx * L2_S_SEGSIZE, 609 pmap_link_l2pt(l1pt_va, kernel_vm_base + idx * L2_S_SEGSIZE,
608 &vmdata_l2pt[idx]); 610 &vmdata_l2pt[idx]);
609#ifdef VERBOSE_INIT_ARM 611#ifdef VERBOSE_INIT_ARM
610 printf("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (vm)\n", 612 printf("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (vm)\n",
611 __func__, vmdata_l2pt[idx].pv_va, vmdata_l2pt[idx].pv_pa, 613 __func__, vmdata_l2pt[idx].pv_va, vmdata_l2pt[idx].pv_pa,
612 kernel_vm_base + idx * L2_S_SEGSIZE); 614 kernel_vm_base + idx * L2_S_SEGSIZE);
613#endif 615#endif
614 } 616 }
615 if (iovbase) { 617 if (iovbase) {
616 pmap_link_l2pt(l1pt_va, iovbase & -L2_S_SEGSIZE, &bmi->bmi_io_l2pt); 618 pmap_link_l2pt(l1pt_va, iovbase & -L2_S_SEGSIZE, &bmi->bmi_io_l2pt);
617#ifdef VERBOSE_INIT_ARM 619#ifdef VERBOSE_INIT_ARM
618 printf("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (io)\n", 620 printf("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (io)\n",
619 __func__, bmi->bmi_io_l2pt.pv_va, bmi->bmi_io_l2pt.pv_pa, 621 __func__, bmi->bmi_io_l2pt.pv_va, bmi->bmi_io_l2pt.pv_pa,
620 iovbase & -L2_S_SEGSIZE); 622 iovbase & -L2_S_SEGSIZE);
621#endif 623#endif
622 } 624 }
623 625
624 /* update the top of the kernel VM */ 626 /* update the top of the kernel VM */
625 pmap_curmaxkvaddr = 627 pmap_curmaxkvaddr =
626 kernel_vm_base + (KERNEL_L2PT_VMDATA_NUM * L2_S_SEGSIZE); 628 kernel_vm_base + (KERNEL_L2PT_VMDATA_NUM * L2_S_SEGSIZE);
627 629
628#ifdef VERBOSE_INIT_ARM 630#ifdef VERBOSE_INIT_ARM
629 printf("Mapping kernel\n"); 631 printf("Mapping kernel\n");
630#endif 632#endif
631 633
632 extern char etext[], _end[]; 634 extern char etext[], _end[];
633 size_t totalsize = bmi->bmi_kernelend - bmi->bmi_kernelstart; 635 size_t totalsize = bmi->bmi_kernelend - bmi->bmi_kernelstart;
634 size_t textsize = KERN_VTOPHYS(bmi, (uintptr_t)etext) - bmi->bmi_kernelstart; 636 size_t textsize = KERN_VTOPHYS(bmi, (uintptr_t)etext) - bmi->bmi_kernelstart;
635 637
636 textsize = (textsize + PGOFSET) & ~PGOFSET; 638 textsize = (textsize + PGOFSET) & ~PGOFSET;
637 639
638 /* start at offset of kernel in RAM */ 640 /* start at offset of kernel in RAM */
639 641
640 text.pv_pa = bmi->bmi_kernelstart; 642 text.pv_pa = bmi->bmi_kernelstart;
641 text.pv_va = KERN_PHYSTOV(bmi, bmi->bmi_kernelstart); 643 text.pv_va = KERN_PHYSTOV(bmi, bmi->bmi_kernelstart);
642 text.pv_size = textsize; 644 text.pv_size = textsize;
643 text.pv_prot = VM_PROT_READ|VM_PROT_WRITE; /* XXX VM_PROT_EXECUTE */ 645 text.pv_prot = VM_PROT_READ|VM_PROT_WRITE; /* XXX VM_PROT_EXECUTE */
644 text.pv_cache = PTE_CACHE; 646 text.pv_cache = PTE_CACHE;
645 647
646#ifdef VERBOSE_INIT_ARM 648#ifdef VERBOSE_INIT_ARM
647 printf("%s: adding chunk for kernel text %#lx..%#lx (VA %#lx)\n", 649 printf("%s: adding chunk for kernel text %#lx..%#lx (VA %#lx)\n",
648 __func__, text.pv_pa, text.pv_pa + text.pv_size - 1, text.pv_va); 650 __func__, text.pv_pa, text.pv_pa + text.pv_size - 1, text.pv_va);
649#endif 651#endif
650 652
651 add_pages(bmi, &text); 653 add_pages(bmi, &text);
652 654
653 data.pv_pa = text.pv_pa + textsize; 655 data.pv_pa = text.pv_pa + textsize;
654 data.pv_va = text.pv_va + textsize; 656 data.pv_va = text.pv_va + textsize;
655 data.pv_size = totalsize - textsize; 657 data.pv_size = totalsize - textsize;
656 data.pv_prot = VM_PROT_READ|VM_PROT_WRITE; 658 data.pv_prot = VM_PROT_READ|VM_PROT_WRITE;
657 data.pv_cache = PTE_CACHE; 659 data.pv_cache = PTE_CACHE;
658 660
659#ifdef VERBOSE_INIT_ARM 661#ifdef VERBOSE_INIT_ARM
660 printf("%s: adding chunk for kernel data/bss %#lx..%#lx (VA %#lx)\n", 662 printf("%s: adding chunk for kernel data/bss %#lx..%#lx (VA %#lx)\n",
661 __func__, data.pv_pa, data.pv_pa + data.pv_size - 1, data.pv_va); 663 __func__, data.pv_pa, data.pv_pa + data.pv_size - 1, data.pv_va);
662#endif 664#endif
663 665
664 add_pages(bmi, &data); 666 add_pages(bmi, &data);
665 667
666#ifdef VERBOSE_INIT_ARM 668#ifdef VERBOSE_INIT_ARM
667 printf("Listing Chunks\n"); 669 printf("Listing Chunks\n");
668 { 670 {
669 pv_addr_t *pv; 671 pv_addr_t *pv;
670 SLIST_FOREACH(pv, &bmi->bmi_chunks, pv_list) { 672 SLIST_FOREACH(pv, &bmi->bmi_chunks, pv_list) {
671 printf("%s: pv %p: chunk VA %#lx..%#lx " 673 printf("%s: pv %p: chunk VA %#lx..%#lx "
672 "(PA %#lx, prot %d, cache %d)\n", 674 "(PA %#lx, prot %d, cache %d)\n",
673 __func__, pv, pv->pv_va, pv->pv_va + pv->pv_size - 1, 675 __func__, pv, pv->pv_va, pv->pv_va + pv->pv_size - 1,
674 pv->pv_pa, pv->pv_prot, pv->pv_cache); 676 pv->pv_pa, pv->pv_prot, pv->pv_cache);
675 } 677 }
676 } 678 }
677 printf("\nMapping Chunks\n"); 679 printf("\nMapping Chunks\n");
678#endif 680#endif
679 681
680 pv_addr_t cur_pv; 682 pv_addr_t cur_pv;
681 pv_addr_t *pv = SLIST_FIRST(&bmi->bmi_chunks); 683 pv_addr_t *pv = SLIST_FIRST(&bmi->bmi_chunks);
682 if (!mapallmem_p || pv->pv_pa == bmi->bmi_start) { 684 if (!mapallmem_p || pv->pv_pa == bmi->bmi_start) {
683 cur_pv = *pv; 685 cur_pv = *pv;
684 pv = SLIST_NEXT(pv, pv_list); 686 pv = SLIST_NEXT(pv, pv_list);
685 } else { 687 } else {
686 cur_pv.pv_va = KERNEL_BASE; 688 cur_pv.pv_va = KERNEL_BASE;
687 cur_pv.pv_pa = bmi->bmi_start; 689 cur_pv.pv_pa = bmi->bmi_start;
688 cur_pv.pv_size = pv->pv_pa - bmi->bmi_start; 690 cur_pv.pv_size = pv->pv_pa - bmi->bmi_start;
689 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 691 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE;
690 cur_pv.pv_cache = PTE_CACHE; 692 cur_pv.pv_cache = PTE_CACHE;
691 } 693 }
692 while (pv != NULL) { 694 while (pv != NULL) {
693 if (mapallmem_p) { 695 if (mapallmem_p) {
694 if (concat_pvaddr(&cur_pv, pv)) { 696 if (concat_pvaddr(&cur_pv, pv)) {
695 pv = SLIST_NEXT(pv, pv_list); 697 pv = SLIST_NEXT(pv, pv_list);
696 continue; 698 continue;
697 } 699 }
698 if (cur_pv.pv_pa + cur_pv.pv_size < pv->pv_pa) { 700 if (cur_pv.pv_pa + cur_pv.pv_size < pv->pv_pa) {
699 /* 701 /*
700 * See if we can extend the current pv to emcompass the 702 * See if we can extend the current pv to emcompass the
701 * hole, and if so do it and retry the concatenation. 703 * hole, and if so do it and retry the concatenation.
702 */ 704 */
703 if (cur_pv.pv_prot == (VM_PROT_READ|VM_PROT_WRITE) 705 if (cur_pv.pv_prot == (VM_PROT_READ|VM_PROT_WRITE)
704 && cur_pv.pv_cache == PTE_CACHE) { 706 && cur_pv.pv_cache == PTE_CACHE) {
705 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va; 707 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va;
706 continue; 708 continue;
707 } 709 }
708 710
709 /* 711 /*
710 * We couldn't so emit the current chunk and then 712 * We couldn't so emit the current chunk and then
711 */ 713 */
712#ifdef VERBOSE_INIT_ARM 714#ifdef VERBOSE_INIT_ARM
713 printf("%s: mapping chunk VA %#lx..%#lx " 715 printf("%s: mapping chunk VA %#lx..%#lx "
714 "(PA %#lx, prot %d, cache %d)\n", 716 "(PA %#lx, prot %d, cache %d)\n",
715 __func__, 717 __func__,
716 cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 718 cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
717 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 719 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
718#endif 720#endif
719 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 721 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
720 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 722 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
721 723
722 /* 724 /*
723 * set the current chunk to the hole and try again. 725 * set the current chunk to the hole and try again.
724 */ 726 */
725 cur_pv.pv_pa += cur_pv.pv_size; 727 cur_pv.pv_pa += cur_pv.pv_size;
726 cur_pv.pv_va += cur_pv.pv_size; 728 cur_pv.pv_va += cur_pv.pv_size;
727 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va; 729 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va;
728 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 730 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE;
729 cur_pv.pv_cache = PTE_CACHE; 731 cur_pv.pv_cache = PTE_CACHE;
730 continue; 732 continue;
731 } 733 }
732 } 734 }
733 735
734 /* 736 /*
735 * The new pv didn't concatenate so emit the current one 737 * The new pv didn't concatenate so emit the current one
736 * and use the new pv as the current pv. 738 * and use the new pv as the current pv.
737 */ 739 */
738#ifdef VERBOSE_INIT_ARM 740#ifdef VERBOSE_INIT_ARM
739 printf("%s: mapping chunk VA %#lx..%#lx " 741 printf("%s: mapping chunk VA %#lx..%#lx "
740 "(PA %#lx, prot %d, cache %d)\n", 742 "(PA %#lx, prot %d, cache %d)\n",
741 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 743 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
742 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 744 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
743#endif 745#endif
744 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 746 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
745 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 747 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
746 cur_pv = *pv; 748 cur_pv = *pv;
747 pv = SLIST_NEXT(pv, pv_list); 749 pv = SLIST_NEXT(pv, pv_list);
748 } 750 }
749 751
750 /* 752 /*
751 * If we are mapping all of memory, let's map the rest of memory. 753 * If we are mapping all of memory, let's map the rest of memory.
752 */ 754 */
753 if (mapallmem_p && cur_pv.pv_pa + cur_pv.pv_size < bmi->bmi_end) { 755 if (mapallmem_p && cur_pv.pv_pa + cur_pv.pv_size < bmi->bmi_end) {
754 if (cur_pv.pv_prot == (VM_PROT_READ | VM_PROT_WRITE) 756 if (cur_pv.pv_prot == (VM_PROT_READ | VM_PROT_WRITE)
755 && cur_pv.pv_cache == PTE_CACHE) { 757 && cur_pv.pv_cache == PTE_CACHE) {
756 cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa; 758 cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa;
757 } else { 759 } else {
758#ifdef VERBOSE_INIT_ARM 760#ifdef VERBOSE_INIT_ARM
759 printf("%s: mapping chunk VA %#lx..%#lx " 761 printf("%s: mapping chunk VA %#lx..%#lx "
760 "(PA %#lx, prot %d, cache %d)\n", 762 "(PA %#lx, prot %d, cache %d)\n",
761 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 763 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
762 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 764 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
763#endif 765#endif
764 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 766 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
765 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 767 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
766 cur_pv.pv_pa += cur_pv.pv_size; 768 cur_pv.pv_pa += cur_pv.pv_size;
767 cur_pv.pv_va += cur_pv.pv_size; 769 cur_pv.pv_va += cur_pv.pv_size;
768 cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa; 770 cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa;
769 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 771 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE;
770 cur_pv.pv_cache = PTE_CACHE; 772 cur_pv.pv_cache = PTE_CACHE;
771 } 773 }
772 } 774 }
773 775
774 /* 776 /*
775 * Now we map the final chunk. 777 * Now we map the final chunk.
776 */ 778 */
777#ifdef VERBOSE_INIT_ARM 779#ifdef VERBOSE_INIT_ARM
778 printf("%s: mapping last chunk VA %#lx..%#lx (PA %#lx, prot %d, cache %d)\n", 780 printf("%s: mapping last chunk VA %#lx..%#lx (PA %#lx, prot %d, cache %d)\n",
779 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 781 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
780 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 782 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
781#endif 783#endif
782 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 784 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
783 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 785 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
784 786
785 /* 787 /*
786 * Now we map the stuff that isn't directly after the kernel 788 * Now we map the stuff that isn't directly after the kernel
787 */ 789 */
788 790
789 if (map_vectors_p) { 791 if (map_vectors_p) {
790 /* Map the vector page. */ 792 /* Map the vector page. */
791 pmap_map_entry(l1pt_va, systempage.pv_va, systempage.pv_pa, 793 pmap_map_entry(l1pt_va, systempage.pv_va, systempage.pv_pa,
792 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); 794 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
793 } 795 }
794 796
795 /* Map the Mini-Data cache clean area. */  797 /* Map the Mini-Data cache clean area. */
796#if ARM_MMU_XSCALE == 1 798#if ARM_MMU_XSCALE == 1
797#if (ARM_NMMUS > 1) 799#if (ARM_NMMUS > 1)
798 if (xscale_use_minidata) 800 if (xscale_use_minidata)
799#endif  801#endif
800 xscale_setup_minidata(l1_va, minidataclean.pv_va, 802 xscale_setup_minidata(l1_va, minidataclean.pv_va,
801 minidataclean.pv_pa);  803 minidataclean.pv_pa);
802#endif 804#endif
803 805
804 /* 806 /*
805 * Map integrated peripherals at same address in first level page 807 * Map integrated peripherals at same address in first level page
806 * table so that we can continue to use console. 808 * table so that we can continue to use console.
807 */ 809 */
808 if (devmap) 810 if (devmap)
809 pmap_devmap_bootstrap(l1pt_va, devmap); 811 pmap_devmap_bootstrap(l1pt_va, devmap);
810 812
811#ifdef VERBOSE_INIT_ARM 813#ifdef VERBOSE_INIT_ARM
812 /* Tell the user about where all the bits and pieces live. */ 814 /* Tell the user about where all the bits and pieces live. */
813 printf("%22s Physical Virtual Num\n", " "); 815 printf("%22s Physical Virtual Num\n", " ");
814 printf("%22s Starting Ending Starting Ending Pages\n", " "); 816 printf("%22s Starting Ending Starting Ending Pages\n", " ");
815 817
816 static const char mem_fmt[] = 818 static const char mem_fmt[] =
817 "%20s: 0x%08lx 0x%08lx 0x%08lx 0x%08lx %u\n"; 819 "%20s: 0x%08lx 0x%08lx 0x%08lx 0x%08lx %u\n";
818 static const char mem_fmt_nov[] = 820 static const char mem_fmt_nov[] =
819 "%20s: 0x%08lx 0x%08lx %zu\n"; 821 "%20s: 0x%08lx 0x%08lx %zu\n";
820 822
821 printf(mem_fmt, "SDRAM", bmi->bmi_start, bmi->bmi_end - 1, 823 printf(mem_fmt, "SDRAM", bmi->bmi_start, bmi->bmi_end - 1,
822 KERN_PHYSTOV(bmi, bmi->bmi_start), KERN_PHYSTOV(bmi, bmi->bmi_end - 1), 824 KERN_PHYSTOV(bmi, bmi->bmi_start), KERN_PHYSTOV(bmi, bmi->bmi_end - 1),
823 physmem); 825 physmem);
824 printf(mem_fmt, "text section", 826 printf(mem_fmt, "text section",
825 text.pv_pa, text.pv_pa + text.pv_size - 1, 827 text.pv_pa, text.pv_pa + text.pv_size - 1,
826 text.pv_va, text.pv_va + text.pv_size - 1, 828 text.pv_va, text.pv_va + text.pv_size - 1,
827 (int)(text.pv_size / PAGE_SIZE)); 829 (int)(text.pv_size / PAGE_SIZE));
828 printf(mem_fmt, "data section", 830 printf(mem_fmt, "data section",
829 KERN_VTOPHYS(bmi, __data_start), KERN_VTOPHYS(bmi, _edata), 831 KERN_VTOPHYS(bmi, __data_start), KERN_VTOPHYS(bmi, _edata),
830 (vaddr_t)__data_start, (vaddr_t)_edata, 832 (vaddr_t)__data_start, (vaddr_t)_edata,
831 (int)((round_page((vaddr_t)_edata) 833 (int)((round_page((vaddr_t)_edata)
832 - trunc_page((vaddr_t)__data_start)) / PAGE_SIZE)); 834 - trunc_page((vaddr_t)__data_start)) / PAGE_SIZE));
833 printf(mem_fmt, "bss section", 835 printf(mem_fmt, "bss section",
834 KERN_VTOPHYS(bmi, __bss_start), KERN_VTOPHYS(bmi, __bss_end__), 836 KERN_VTOPHYS(bmi, __bss_start), KERN_VTOPHYS(bmi, __bss_end__),
835 (vaddr_t)__bss_start, (vaddr_t)__bss_end__, 837 (vaddr_t)__bss_start, (vaddr_t)__bss_end__,
836 (int)((round_page((vaddr_t)__bss_end__) 838 (int)((round_page((vaddr_t)__bss_end__)
837 - trunc_page((vaddr_t)__bss_start)) / PAGE_SIZE)); 839 - trunc_page((vaddr_t)__bss_start)) / PAGE_SIZE));
838 printf(mem_fmt, "L1 page directory", 840 printf(mem_fmt, "L1 page directory",
839 kernel_l1pt.pv_pa, kernel_l1pt.pv_pa + L1_TABLE_SIZE - 1, 841 kernel_l1pt.pv_pa, kernel_l1pt.pv_pa + L1_TABLE_SIZE - 1,
840 kernel_l1pt.pv_va, kernel_l1pt.pv_va + L1_TABLE_SIZE - 1, 842 kernel_l1pt.pv_va, kernel_l1pt.pv_va + L1_TABLE_SIZE - 1,
841 L1_TABLE_SIZE / PAGE_SIZE); 843 L1_TABLE_SIZE / PAGE_SIZE);
842 printf(mem_fmt, "ABT stack (CPU 0)", 844 printf(mem_fmt, "ABT stack (CPU 0)",
843 abtstack.pv_pa, abtstack.pv_pa + (ABT_STACK_SIZE * PAGE_SIZE) - 1, 845 abtstack.pv_pa, abtstack.pv_pa + (ABT_STACK_SIZE * PAGE_SIZE) - 1,
844 abtstack.pv_va, abtstack.pv_va + (ABT_STACK_SIZE * PAGE_SIZE) - 1, 846 abtstack.pv_va, abtstack.pv_va + (ABT_STACK_SIZE * PAGE_SIZE) - 1,
845 ABT_STACK_SIZE); 847 ABT_STACK_SIZE);
846 printf(mem_fmt, "FIQ stack (CPU 0)", 848 printf(mem_fmt, "FIQ stack (CPU 0)",
847 fiqstack.pv_pa, fiqstack.pv_pa + (FIQ_STACK_SIZE * PAGE_SIZE) - 1, 849 fiqstack.pv_pa, fiqstack.pv_pa + (FIQ_STACK_SIZE * PAGE_SIZE) - 1,
848 fiqstack.pv_va, fiqstack.pv_va + (FIQ_STACK_SIZE * PAGE_SIZE) - 1, 850 fiqstack.pv_va, fiqstack.pv_va + (FIQ_STACK_SIZE * PAGE_SIZE) - 1,
849 FIQ_STACK_SIZE); 851 FIQ_STACK_SIZE);
850 printf(mem_fmt, "IRQ stack (CPU 0)", 852 printf(mem_fmt, "IRQ stack (CPU 0)",
851 irqstack.pv_pa, irqstack.pv_pa + (IRQ_STACK_SIZE * PAGE_SIZE) - 1, 853 irqstack.pv_pa, irqstack.pv_pa + (IRQ_STACK_SIZE * PAGE_SIZE) - 1,
852 irqstack.pv_va, irqstack.pv_va + (IRQ_STACK_SIZE * PAGE_SIZE) - 1, 854 irqstack.pv_va, irqstack.pv_va + (IRQ_STACK_SIZE * PAGE_SIZE) - 1,
853 IRQ_STACK_SIZE); 855 IRQ_STACK_SIZE);
854 printf(mem_fmt, "UND stack (CPU 0)", 856 printf(mem_fmt, "UND stack (CPU 0)",
855 undstack.pv_pa, undstack.pv_pa + (UND_STACK_SIZE * PAGE_SIZE) - 1, 857 undstack.pv_pa, undstack.pv_pa + (UND_STACK_SIZE * PAGE_SIZE) - 1,
856 undstack.pv_va, undstack.pv_va + (UND_STACK_SIZE * PAGE_SIZE) - 1, 858 undstack.pv_va, undstack.pv_va + (UND_STACK_SIZE * PAGE_SIZE) - 1,
857 UND_STACK_SIZE); 859 UND_STACK_SIZE);
858 printf(mem_fmt, "IDLE stack (CPU 0)", 860 printf(mem_fmt, "IDLE stack (CPU 0)",
859 idlestack.pv_pa, idlestack.pv_pa + (UPAGES * PAGE_SIZE) - 1, 861 idlestack.pv_pa, idlestack.pv_pa + (UPAGES * PAGE_SIZE) - 1,
860 idlestack.pv_va, idlestack.pv_va + (UPAGES * PAGE_SIZE) - 1, 862 idlestack.pv_va, idlestack.pv_va + (UPAGES * PAGE_SIZE) - 1,
861 UPAGES); 863 UPAGES);
862 printf(mem_fmt, "SVC stack", 864 printf(mem_fmt, "SVC stack",
863 kernelstack.pv_pa, kernelstack.pv_pa + (UPAGES * PAGE_SIZE) - 1, 865 kernelstack.pv_pa, kernelstack.pv_pa + (UPAGES * PAGE_SIZE) - 1,
864 kernelstack.pv_va, kernelstack.pv_va + (UPAGES * PAGE_SIZE) - 1, 866 kernelstack.pv_va, kernelstack.pv_va + (UPAGES * PAGE_SIZE) - 1,
865 UPAGES); 867 UPAGES);
866 printf(mem_fmt, "Message Buffer", 868 printf(mem_fmt, "Message Buffer",
867 msgbuf.pv_pa, msgbuf.pv_pa + (msgbuf_pgs * PAGE_SIZE) - 1, 869 msgbuf.pv_pa, msgbuf.pv_pa + (msgbuf_pgs * PAGE_SIZE) - 1,
868 msgbuf.pv_va, msgbuf.pv_va + (msgbuf_pgs * PAGE_SIZE) - 1, 870 msgbuf.pv_va, msgbuf.pv_va + (msgbuf_pgs * PAGE_SIZE) - 1,
869 (int)msgbuf_pgs); 871 (int)msgbuf_pgs);
870 if (map_vectors_p) { 872 if (map_vectors_p) {
871 printf(mem_fmt, "Exception Vectors", 873 printf(mem_fmt, "Exception Vectors",
872 systempage.pv_pa, systempage.pv_pa + PAGE_SIZE - 1, 874 systempage.pv_pa, systempage.pv_pa + PAGE_SIZE - 1,
873 systempage.pv_va, systempage.pv_va + PAGE_SIZE - 1, 875 systempage.pv_va, systempage.pv_va + PAGE_SIZE - 1,
874 1); 876 1);
875 } 877 }
876 for (size_t i = 0; i < bmi->bmi_nfreeblocks; i++) { 878 for (size_t i = 0; i < bmi->bmi_nfreeblocks; i++) {
877 pv = &bmi->bmi_freeblocks[i]; 879 pv = &bmi->bmi_freeblocks[i];
878 880
879 printf(mem_fmt_nov, "Free Memory", 881 printf(mem_fmt_nov, "Free Memory",
880 pv->pv_pa, pv->pv_pa + pv->pv_size - 1, 882 pv->pv_pa, pv->pv_pa + pv->pv_size - 1,
881 pv->pv_size / PAGE_SIZE); 883 pv->pv_size / PAGE_SIZE);
882 } 884 }
883#endif 885#endif
884 /* 886 /*
885 * Now we have the real page tables in place so we can switch to them. 887 * Now we have the real page tables in place so we can switch to them.
886 * Once this is done we will be running with the REAL kernel page 888 * Once this is done we will be running with the REAL kernel page
887 * tables. 889 * tables.
888 */ 890 */
889 891
890#if defined(VERBOSE_INIT_ARM) && 0 892#if defined(VERBOSE_INIT_ARM) && 0
891 printf("TTBR0=%#x", armreg_ttbr_read()); 893 printf("TTBR0=%#x", armreg_ttbr_read());
892#ifdef _ARM_ARCH_6 894#ifdef _ARM_ARCH_6
893 printf(" TTBR1=%#x TTBCR=%#x", 895 printf(" TTBR1=%#x TTBCR=%#x",
894 armreg_ttbr1_read(), armreg_ttbcr_read()); 896 armreg_ttbr1_read(), armreg_ttbcr_read());
895#endif 897#endif
896 printf("\n"); 898 printf("\n");
897#endif 899#endif
898 900
899 /* Switch tables */ 901 /* Switch tables */
900#ifdef VERBOSE_INIT_ARM 902#ifdef VERBOSE_INIT_ARM
901 printf("switching to new L1 page table @%#lx...", l1pt_pa); 903 printf("switching to new L1 page table @%#lx...", l1pt_pa);
902#endif 904#endif
903 905
904 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT); 906 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
905 cpu_idcache_wbinv_all(); 907 cpu_idcache_wbinv_all();
906#ifdef ARM_MMU_EXTENDED 908#ifdef ARM_MMU_EXTENDED
907 cpu_setttb(l1pt_pa, KERNEL_PID); 909 cpu_setttb(l1pt_pa, KERNEL_PID);
908#else 910#else
909 cpu_setttb(l1pt_pa, true); 911 cpu_setttb(l1pt_pa, true);
910#endif 912#endif
911 cpu_tlb_flushID(); 913 cpu_tlb_flushID();
912 cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)); 914 cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));
913 915
914#ifdef VERBOSE_INIT_ARM 916#ifdef VERBOSE_INIT_ARM
915 printf("TTBR0=%#x OK\n", armreg_ttbr_read()); 917 printf("TTBR0=%#x OK\n", armreg_ttbr_read());
916#endif 918#endif
917} 919}

cvs diff -r1.93 -r1.94 src/sys/arch/arm/arm32/arm32_machdep.c (switch to unified diff)

--- src/sys/arch/arm/arm32/arm32_machdep.c 2013/06/12 17:13:05 1.93
+++ src/sys/arch/arm/arm32/arm32_machdep.c 2013/06/12 21:34:12 1.94
@@ -1,683 +1,691 @@ @@ -1,683 +1,691 @@
1/* $NetBSD: arm32_machdep.c,v 1.93 2013/06/12 17:13:05 matt Exp $ */ 1/* $NetBSD: arm32_machdep.c,v 1.94 2013/06/12 21:34:12 matt Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1994-1998 Mark Brinicombe. 4 * Copyright (c) 1994-1998 Mark Brinicombe.
5 * Copyright (c) 1994 Brini. 5 * Copyright (c) 1994 Brini.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software written for Brini by Mark Brinicombe 8 * This code is derived from software written for Brini by Mark Brinicombe
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software 18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement: 19 * must display the following acknowledgement:
20 * This product includes software developed by Mark Brinicombe 20 * This product includes software developed by Mark Brinicombe
21 * for the NetBSD Project. 21 * for the NetBSD Project.
22 * 4. The name of the company nor the name of the author may be used to 22 * 4. The name of the company nor the name of the author may be used to
23 * endorse or promote products derived from this software without specific 23 * endorse or promote products derived from this software without specific
24 * prior written permission. 24 * prior written permission.
25 * 25 *
26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
27 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
28 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 28 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
29 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 29 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
30 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 30 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
31 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 31 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
32 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE. 36 * SUCH DAMAGE.
37 * 37 *
38 * Machine dependent functions for kernel setup 38 * Machine dependent functions for kernel setup
39 * 39 *
40 * Created : 17/09/94 40 * Created : 17/09/94
41 * Updated : 18/04/01 updated for new wscons 41 * Updated : 18/04/01 updated for new wscons
42 */ 42 */
43 43
44#include <sys/cdefs.h> 44#include <sys/cdefs.h>
45__KERNEL_RCSID(0, "$NetBSD: arm32_machdep.c,v 1.93 2013/06/12 17:13:05 matt Exp $"); 45__KERNEL_RCSID(0, "$NetBSD: arm32_machdep.c,v 1.94 2013/06/12 21:34:12 matt Exp $");
46 46
47#include "opt_modular.h" 47#include "opt_modular.h"
48#include "opt_md.h" 48#include "opt_md.h"
49#include "opt_pmap_debug.h" 49#include "opt_pmap_debug.h"
50 50
51#include <sys/param.h> 51#include <sys/param.h>
52#include <sys/systm.h> 52#include <sys/systm.h>
53#include <sys/reboot.h> 53#include <sys/reboot.h>
54#include <sys/proc.h> 54#include <sys/proc.h>
55#include <sys/kauth.h> 55#include <sys/kauth.h>
56#include <sys/kernel.h> 56#include <sys/kernel.h>
57#include <sys/mbuf.h> 57#include <sys/mbuf.h>
58#include <sys/mount.h> 58#include <sys/mount.h>
59#include <sys/buf.h> 59#include <sys/buf.h>
60#include <sys/msgbuf.h> 60#include <sys/msgbuf.h>
61#include <sys/device.h> 61#include <sys/device.h>
62#include <sys/sysctl.h> 62#include <sys/sysctl.h>
63#include <sys/cpu.h> 63#include <sys/cpu.h>
64#include <sys/intr.h> 64#include <sys/intr.h>
65#include <sys/module.h> 65#include <sys/module.h>
66#include <sys/atomic.h> 66#include <sys/atomic.h>
67#include <sys/xcall.h> 67#include <sys/xcall.h>
68 68
69#include <uvm/uvm_extern.h> 69#include <uvm/uvm_extern.h>
70 70
71#include <dev/cons.h> 71#include <dev/cons.h>
72#include <dev/mm.h> 72#include <dev/mm.h>
73 73
74#include <arm/arm32/katelib.h> 74#include <arm/arm32/katelib.h>
75#include <arm/arm32/machdep.h> 75#include <arm/arm32/machdep.h>
76 76
77#include <machine/bootconfig.h> 77#include <machine/bootconfig.h>
78#include <machine/pcb.h> 78#include <machine/pcb.h>
79 79
80void (*cpu_reset_address)(void); /* Used by locore */ 80void (*cpu_reset_address)(void); /* Used by locore */
81paddr_t cpu_reset_address_paddr; /* Used by locore */ 81paddr_t cpu_reset_address_paddr; /* Used by locore */
82 82
83struct vm_map *phys_map = NULL; 83struct vm_map *phys_map = NULL;
84 84
85#if defined(MEMORY_DISK_HOOKS) && !defined(MEMORY_DISK_ROOT_SIZE) 85#if defined(MEMORY_DISK_HOOKS) && !defined(MEMORY_DISK_ROOT_SIZE)
86extern size_t md_root_size; /* Memory disc size */ 86extern size_t md_root_size; /* Memory disc size */
87#endif /* MEMORY_DISK_HOOKS && !MEMORY_DISK_ROOT_SIZE */ 87#endif /* MEMORY_DISK_HOOKS && !MEMORY_DISK_ROOT_SIZE */
88 88
89pv_addr_t kernelstack; 89pv_addr_t kernelstack;
90pv_addr_t abtstack; 90pv_addr_t abtstack;
91pv_addr_t fiqstack; 91pv_addr_t fiqstack;
92pv_addr_t irqstack; 92pv_addr_t irqstack;
93pv_addr_t undstack; 93pv_addr_t undstack;
94pv_addr_t idlestack; 94pv_addr_t idlestack;
95 95
96void * msgbufaddr; 96void * msgbufaddr;
97extern paddr_t msgbufphys; 97extern paddr_t msgbufphys;
98 98
99int kernel_debug = 0; 99int kernel_debug = 0;
100int cpu_fpu_present; 100int cpu_fpu_present;
101int cpu_neon_present; 101int cpu_neon_present;
102int cpu_simd_present; 102int cpu_simd_present;
103int cpu_simdex_present; 103int cpu_simdex_present;
104int cpu_umull_present; 104int cpu_umull_present;
105const char *cpu_arch = ""; 105const char *cpu_arch = "";
106 106
107int cpu_instruction_set_attributes[6]; 107int cpu_instruction_set_attributes[6];
108int cpu_memory_model_features[4]; 108int cpu_memory_model_features[4];
109int cpu_processor_features[2]; 109int cpu_processor_features[2];
110int cpu_media_and_vfp_features[2]; 110int cpu_media_and_vfp_features[2];
111 111
112/* exported variable to be filled in by the bootloaders */ 112/* exported variable to be filled in by the bootloaders */
113char *booted_kernel; 113char *booted_kernel;
114 114
115/* Prototypes */ 115/* Prototypes */
116 116
117void data_abort_handler(trapframe_t *frame); 117void data_abort_handler(trapframe_t *frame);
118void prefetch_abort_handler(trapframe_t *frame); 118void prefetch_abort_handler(trapframe_t *frame);
119extern void configure(void); 119extern void configure(void);
120 120
121/* 121/*
122 * arm32_vector_init: 122 * arm32_vector_init:
123 * 123 *
124 * Initialize the vector page, and select whether or not to 124 * Initialize the vector page, and select whether or not to
125 * relocate the vectors. 125 * relocate the vectors.
126 * 126 *
127 * NOTE: We expect the vector page to be mapped at its expected 127 * NOTE: We expect the vector page to be mapped at its expected
128 * destination. 128 * destination.
129 */ 129 */
130void 130void
131arm32_vector_init(vaddr_t va, int which) 131arm32_vector_init(vaddr_t va, int which)
132{ 132{
133#if defined(CPU_ARMV7) || defined(CPU_ARM11) 133#if defined(CPU_ARMV7) || defined(CPU_ARM11) || defined(ARM_HAS_VBAR)
134 /* 134 /*
135 * If this processor has the security extension, don't bother 135 * If this processor has the security extension, don't bother
136 * to move/map the vector page. Simply point VBAR to the copy 136 * to move/map the vector page. Simply point VBAR to the copy
137 * that exists in the .text segment. 137 * that exists in the .text segment.
138 */ 138 */
 139#ifndef ARM_HAS_VBAR
139 if (va == ARM_VECTORS_LOW 140 if (va == ARM_VECTORS_LOW
140 && (armreg_pfr1_read() && ARM_PFR1_SEC_MASK) != 0) { 141 && (armreg_pfr1_read() && ARM_PFR1_SEC_MASK) != 0) {
 142#endif
141 extern const uint32_t page0rel[]; 143 extern const uint32_t page0rel[];
142 vector_page = (vaddr_t)page0rel; 144 vector_page = (vaddr_t)page0rel;
143 KASSERT((vector_page & 0x1f) == 0); 145 KASSERT((vector_page & 0x1f) == 0);
144 armreg_vbar_write(vector_page); 146 armreg_vbar_write(vector_page);
145#ifdef VERBOSE_INIT_ARM 147#ifdef VERBOSE_INIT_ARM
146 printf(" vbar=%p", page0rel); 148 printf(" vbar=%p", page0rel);
147#endif 149#endif
148 cpu_control(CPU_CONTROL_VECRELOC, 0); 150 cpu_control(CPU_CONTROL_VECRELOC, 0);
149 return; 151 return;
 152#ifndef ARM_HAS_VBAR
150 } 153 }
151#endif 154#endif
 155#endif
 156#ifndef ARM_HAS_VBAR
152 if (CPU_IS_PRIMARY(curcpu())) { 157 if (CPU_IS_PRIMARY(curcpu())) {
153 extern unsigned int page0[], page0_data[]; 158 extern unsigned int page0[], page0_data[];
154 unsigned int *vectors = (int *) va; 159 unsigned int *vectors = (int *) va;
155 unsigned int *vectors_data = vectors + (page0_data - page0); 160 unsigned int *vectors_data = vectors + (page0_data - page0);
156 int vec; 161 int vec;
157 162
158 /* 163 /*
159 * Loop through the vectors we're taking over, and copy the 164 * Loop through the vectors we're taking over, and copy the
160 * vector's insn and data word. 165 * vector's insn and data word.
161 */ 166 */
162 for (vec = 0; vec < ARM_NVEC; vec++) { 167 for (vec = 0; vec < ARM_NVEC; vec++) {
163 if ((which & (1 << vec)) == 0) { 168 if ((which & (1 << vec)) == 0) {
164 /* Don't want to take over this vector. */ 169 /* Don't want to take over this vector. */
165 continue; 170 continue;
166 } 171 }
167 vectors[vec] = page0[vec]; 172 vectors[vec] = page0[vec];
168 vectors_data[vec] = page0_data[vec]; 173 vectors_data[vec] = page0_data[vec];
169 } 174 }
170 175
171 /* Now sync the vectors. */ 176 /* Now sync the vectors. */
172 cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int)); 177 cpu_icache_sync_range(va, (ARM_NVEC * 2) * sizeof(u_int));
173 178
174 vector_page = va; 179 vector_page = va;
175 } 180 }
176 181
177 if (va == ARM_VECTORS_HIGH) { 182 if (va == ARM_VECTORS_HIGH) {
178 /* 183 /*
179 * Assume the MD caller knows what it's doing here, and 184 * Assume the MD caller knows what it's doing here, and
180 * really does want the vector page relocated. 185 * really does want the vector page relocated.
181 * 186 *
182 * Note: This has to be done here (and not just in 187 * Note: This has to be done here (and not just in
183 * cpu_setup()) because the vector page needs to be 188 * cpu_setup()) because the vector page needs to be
184 * accessible *before* cpu_startup() is called. 189 * accessible *before* cpu_startup() is called.
185 * Think ddb(9) ... 190 * Think ddb(9) ...
186 * 191 *
187 * NOTE: If the CPU control register is not readable, 192 * NOTE: If the CPU control register is not readable,
188 * this will totally fail! We'll just assume that 193 * this will totally fail! We'll just assume that
189 * any system that has high vector support has a 194 * any system that has high vector support has a
190 * readable CPU control register, for now. If we 195 * readable CPU control register, for now. If we
191 * ever encounter one that does not, we'll have to 196 * ever encounter one that does not, we'll have to
192 * rethink this. 197 * rethink this.
193 */ 198 */
194 cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC); 199 cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC);
195 } 200 }
 201#endif
196} 202}
197 203
198/* 204/*
199 * Debug function just to park the CPU 205 * Debug function just to park the CPU
200 */ 206 */
201 207
202void 208void
203halt(void) 209halt(void)
204{ 210{
205 while (1) 211 while (1)
206 cpu_sleep(0); 212 cpu_sleep(0);
207} 213}
208 214
209 215
210/* Sync the discs, unmount the filesystems, and adjust the todr */ 216/* Sync the discs, unmount the filesystems, and adjust the todr */
211 217
212void 218void
213bootsync(void) 219bootsync(void)
214{ 220{
215 static bool bootsyncdone = false; 221 static bool bootsyncdone = false;
216 222
217 if (bootsyncdone) return; 223 if (bootsyncdone) return;
218 224
219 bootsyncdone = true; 225 bootsyncdone = true;
220 226
221 /* Make sure we can still manage to do things */ 227 /* Make sure we can still manage to do things */
222 if (GetCPSR() & I32_bit) { 228 if (GetCPSR() & I32_bit) {
223 /* 229 /*
224 * If we get here then boot has been called without RB_NOSYNC 230 * If we get here then boot has been called without RB_NOSYNC
225 * and interrupts were disabled. This means the boot() call 231 * and interrupts were disabled. This means the boot() call
226 * did not come from a user process e.g. shutdown, but must 232 * did not come from a user process e.g. shutdown, but must
227 * have come from somewhere in the kernel. 233 * have come from somewhere in the kernel.
228 */ 234 */
229 IRQenable; 235 IRQenable;
230 printf("Warning IRQ's disabled during boot()\n"); 236 printf("Warning IRQ's disabled during boot()\n");
231 } 237 }
232 238
233 vfs_shutdown(); 239 vfs_shutdown();
234 240
235 resettodr(); 241 resettodr();
236} 242}
237 243
238/* 244/*
239 * void cpu_startup(void) 245 * void cpu_startup(void)
240 * 246 *
241 * Machine dependent startup code.  247 * Machine dependent startup code.
242 * 248 *
243 */ 249 */
244void 250void
245cpu_startup(void) 251cpu_startup(void)
246{ 252{
247 vaddr_t minaddr; 253 vaddr_t minaddr;
248 vaddr_t maxaddr; 254 vaddr_t maxaddr;
249 u_int loop; 255 u_int loop;
250 char pbuf[9]; 256 char pbuf[9];
251 257
252 /* 258 /*
253 * Until we better locking, we have to live under the kernel lock. 259 * Until we better locking, we have to live under the kernel lock.
254 */ 260 */
255 //KERNEL_LOCK(1, NULL); 261 //KERNEL_LOCK(1, NULL);
256 262
257 /* Set the CPU control register */ 263 /* Set the CPU control register */
258 cpu_setup(boot_args); 264 cpu_setup(boot_args);
259 265
 266#ifndef ARM_HAS_VBAR
260 /* Lock down zero page */ 267 /* Lock down zero page */
261 vector_page_setprot(VM_PROT_READ); 268 vector_page_setprot(VM_PROT_READ);
 269#endif
262 270
263 /* 271 /*
264 * Give pmap a chance to set up a few more things now the vm 272 * Give pmap a chance to set up a few more things now the vm
265 * is initialised 273 * is initialised
266 */ 274 */
267 pmap_postinit(); 275 pmap_postinit();
268 276
269 /* 277 /*
270 * Initialize error message buffer (at end of core). 278 * Initialize error message buffer (at end of core).
271 */ 279 */
272 280
273 /* msgbufphys was setup during the secondary boot strap */ 281 /* msgbufphys was setup during the secondary boot strap */
274 for (loop = 0; loop < btoc(MSGBUFSIZE); ++loop) 282 for (loop = 0; loop < btoc(MSGBUFSIZE); ++loop)
275 pmap_kenter_pa((vaddr_t)msgbufaddr + loop * PAGE_SIZE, 283 pmap_kenter_pa((vaddr_t)msgbufaddr + loop * PAGE_SIZE,
276 msgbufphys + loop * PAGE_SIZE, 284 msgbufphys + loop * PAGE_SIZE,
277 VM_PROT_READ|VM_PROT_WRITE, 0); 285 VM_PROT_READ|VM_PROT_WRITE, 0);
278 pmap_update(pmap_kernel()); 286 pmap_update(pmap_kernel());
279 initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE)); 287 initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));
280 288
281 /* 289 /*
282 * Identify ourselves for the msgbuf (everything printed earlier will 290 * Identify ourselves for the msgbuf (everything printed earlier will
283 * not be buffered). 291 * not be buffered).
284 */ 292 */
285 printf("%s%s", copyright, version); 293 printf("%s%s", copyright, version);
286 294
287 format_bytes(pbuf, sizeof(pbuf), arm_ptob(physmem)); 295 format_bytes(pbuf, sizeof(pbuf), arm_ptob(physmem));
288 printf("total memory = %s\n", pbuf); 296 printf("total memory = %s\n", pbuf);
289 297
290 minaddr = 0; 298 minaddr = 0;
291 299
292 /* 300 /*
293 * Allocate a submap for physio 301 * Allocate a submap for physio
294 */ 302 */
295 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 303 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
296 VM_PHYS_SIZE, 0, false, NULL); 304 VM_PHYS_SIZE, 0, false, NULL);
297 305
298 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); 306 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
299 printf("avail memory = %s\n", pbuf); 307 printf("avail memory = %s\n", pbuf);
300 308
301 struct lwp * const l = &lwp0; 309 struct lwp * const l = &lwp0;
302 struct pcb * const pcb = lwp_getpcb(l); 310 struct pcb * const pcb = lwp_getpcb(l);
303 pcb->pcb_ksp = uvm_lwp_getuarea(l) + USPACE_SVC_STACK_TOP; 311 pcb->pcb_ksp = uvm_lwp_getuarea(l) + USPACE_SVC_STACK_TOP;
304 lwp_settrapframe(l, (struct trapframe *)pcb->pcb_ksp - 1); 312 lwp_settrapframe(l, (struct trapframe *)pcb->pcb_ksp - 1);
305} 313}
306 314
307/* 315/*
308 * machine dependent system variables. 316 * machine dependent system variables.
309 */ 317 */
310static int 318static int
311sysctl_machdep_booted_device(SYSCTLFN_ARGS) 319sysctl_machdep_booted_device(SYSCTLFN_ARGS)
312{ 320{
313 struct sysctlnode node; 321 struct sysctlnode node;
314 322
315 if (booted_device == NULL) 323 if (booted_device == NULL)
316 return (EOPNOTSUPP); 324 return (EOPNOTSUPP);
317 325
318 node = *rnode; 326 node = *rnode;
319 node.sysctl_data = __UNCONST(device_xname(booted_device)); 327 node.sysctl_data = __UNCONST(device_xname(booted_device));
320 node.sysctl_size = strlen(device_xname(booted_device)) + 1; 328 node.sysctl_size = strlen(device_xname(booted_device)) + 1;
321 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 329 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
322} 330}
323 331
324static int 332static int
325sysctl_machdep_booted_kernel(SYSCTLFN_ARGS) 333sysctl_machdep_booted_kernel(SYSCTLFN_ARGS)
326{ 334{
327 struct sysctlnode node; 335 struct sysctlnode node;
328 336
329 if (booted_kernel == NULL || booted_kernel[0] == '\0') 337 if (booted_kernel == NULL || booted_kernel[0] == '\0')
330 return (EOPNOTSUPP); 338 return (EOPNOTSUPP);
331 339
332 node = *rnode; 340 node = *rnode;
333 node.sysctl_data = booted_kernel; 341 node.sysctl_data = booted_kernel;
334 node.sysctl_size = strlen(booted_kernel) + 1; 342 node.sysctl_size = strlen(booted_kernel) + 1;
335 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 343 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
336} 344}
337 345
338static int 346static int
339sysctl_machdep_cpu_arch(SYSCTLFN_ARGS) 347sysctl_machdep_cpu_arch(SYSCTLFN_ARGS)
340{ 348{
341 struct sysctlnode node = *rnode; 349 struct sysctlnode node = *rnode;
342 node.sysctl_data = __UNCONST(cpu_arch); 350 node.sysctl_data = __UNCONST(cpu_arch);
343 node.sysctl_size = strlen(cpu_arch) + 1; 351 node.sysctl_size = strlen(cpu_arch) + 1;
344 return sysctl_lookup(SYSCTLFN_CALL(&node)); 352 return sysctl_lookup(SYSCTLFN_CALL(&node));
345} 353}
346 354
347static int 355static int
348sysctl_machdep_powersave(SYSCTLFN_ARGS) 356sysctl_machdep_powersave(SYSCTLFN_ARGS)
349{ 357{
350 struct sysctlnode node = *rnode; 358 struct sysctlnode node = *rnode;
351 int error, newval; 359 int error, newval;
352 360
353 newval = cpu_do_powersave; 361 newval = cpu_do_powersave;
354 node.sysctl_data = &newval; 362 node.sysctl_data = &newval;
355 if (cpufuncs.cf_sleep == (void *) cpufunc_nullop) 363 if (cpufuncs.cf_sleep == (void *) cpufunc_nullop)
356 node.sysctl_flags &= ~CTLFLAG_READWRITE; 364 node.sysctl_flags &= ~CTLFLAG_READWRITE;
357 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 365 error = sysctl_lookup(SYSCTLFN_CALL(&node));
358 if (error || newp == NULL || newval == cpu_do_powersave) 366 if (error || newp == NULL || newval == cpu_do_powersave)
359 return (error); 367 return (error);
360 368
361 if (newval < 0 || newval > 1) 369 if (newval < 0 || newval > 1)
362 return (EINVAL); 370 return (EINVAL);
363 cpu_do_powersave = newval; 371 cpu_do_powersave = newval;
364 372
365 return (0); 373 return (0);
366} 374}
367 375
368SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup") 376SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
369{ 377{
370 378
371 sysctl_createv(clog, 0, NULL, NULL, 379 sysctl_createv(clog, 0, NULL, NULL,
372 CTLFLAG_PERMANENT, 380 CTLFLAG_PERMANENT,
373 CTLTYPE_NODE, "machdep", NULL, 381 CTLTYPE_NODE, "machdep", NULL,
374 NULL, 0, NULL, 0, 382 NULL, 0, NULL, 0,
375 CTL_MACHDEP, CTL_EOL); 383 CTL_MACHDEP, CTL_EOL);
376 384
377 sysctl_createv(clog, 0, NULL, NULL, 385 sysctl_createv(clog, 0, NULL, NULL,
378 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 386 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
379 CTLTYPE_INT, "debug", NULL, 387 CTLTYPE_INT, "debug", NULL,
380 NULL, 0, &kernel_debug, 0, 388 NULL, 0, &kernel_debug, 0,
381 CTL_MACHDEP, CPU_DEBUG, CTL_EOL); 389 CTL_MACHDEP, CPU_DEBUG, CTL_EOL);
382 sysctl_createv(clog, 0, NULL, NULL, 390 sysctl_createv(clog, 0, NULL, NULL,
383 CTLFLAG_PERMANENT, 391 CTLFLAG_PERMANENT,
384 CTLTYPE_STRING, "booted_device", NULL, 392 CTLTYPE_STRING, "booted_device", NULL,
385 sysctl_machdep_booted_device, 0, NULL, 0, 393 sysctl_machdep_booted_device, 0, NULL, 0,
386 CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL); 394 CTL_MACHDEP, CPU_BOOTED_DEVICE, CTL_EOL);
387 sysctl_createv(clog, 0, NULL, NULL, 395 sysctl_createv(clog, 0, NULL, NULL,
388 CTLFLAG_PERMANENT, 396 CTLFLAG_PERMANENT,
389 CTLTYPE_STRING, "booted_kernel", NULL, 397 CTLTYPE_STRING, "booted_kernel", NULL,
390 sysctl_machdep_booted_kernel, 0, NULL, 0, 398 sysctl_machdep_booted_kernel, 0, NULL, 0,
391 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL); 399 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
392 sysctl_createv(clog, 0, NULL, NULL, 400 sysctl_createv(clog, 0, NULL, NULL,
393 CTLFLAG_PERMANENT, 401 CTLFLAG_PERMANENT,
394 CTLTYPE_STRUCT, "console_device", NULL, 402 CTLTYPE_STRUCT, "console_device", NULL,
395 sysctl_consdev, 0, NULL, sizeof(dev_t), 403 sysctl_consdev, 0, NULL, sizeof(dev_t),
396 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL); 404 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL);
397 sysctl_createv(clog, 0, NULL, NULL, 405 sysctl_createv(clog, 0, NULL, NULL,
398 CTLFLAG_PERMANENT, 406 CTLFLAG_PERMANENT,
399 CTLTYPE_STRING, "cpu_arch", NULL, 407 CTLTYPE_STRING, "cpu_arch", NULL,
400 sysctl_machdep_cpu_arch, 0, NULL, 0, 408 sysctl_machdep_cpu_arch, 0, NULL, 0,
401 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 409 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
402 sysctl_createv(clog, 0, NULL, NULL, 410 sysctl_createv(clog, 0, NULL, NULL,
403 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 411 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
404 CTLTYPE_INT, "powersave", NULL, 412 CTLTYPE_INT, "powersave", NULL,
405 sysctl_machdep_powersave, 0, &cpu_do_powersave, 0, 413 sysctl_machdep_powersave, 0, &cpu_do_powersave, 0,
406 CTL_MACHDEP, CPU_POWERSAVE, CTL_EOL); 414 CTL_MACHDEP, CPU_POWERSAVE, CTL_EOL);
407 sysctl_createv(clog, 0, NULL, NULL, 415 sysctl_createv(clog, 0, NULL, NULL,
408 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE, 416 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
409 CTLTYPE_INT, "cpu_id", NULL, 417 CTLTYPE_INT, "cpu_id", NULL,
410 NULL, curcpu()->ci_arm_cpuid, NULL, 0, 418 NULL, curcpu()->ci_arm_cpuid, NULL, 0,
411 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 419 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
412#ifdef FPU_VFP 420#ifdef FPU_VFP
413 sysctl_createv(clog, 0, NULL, NULL, 421 sysctl_createv(clog, 0, NULL, NULL,
414 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 422 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
415 CTLTYPE_INT, "fpu_id", NULL, 423 CTLTYPE_INT, "fpu_id", NULL,
416 NULL, 0, &cpu_info_store.ci_vfp_id, 0, 424 NULL, 0, &cpu_info_store.ci_vfp_id, 0,
417 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 425 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
418#endif 426#endif
419 sysctl_createv(clog, 0, NULL, NULL, 427 sysctl_createv(clog, 0, NULL, NULL,
420 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 428 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
421 CTLTYPE_INT, "fpu_present", NULL, 429 CTLTYPE_INT, "fpu_present", NULL,
422 NULL, 0, &cpu_fpu_present, 0, 430 NULL, 0, &cpu_fpu_present, 0,
423 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 431 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
424 sysctl_createv(clog, 0, NULL, NULL, 432 sysctl_createv(clog, 0, NULL, NULL,
425 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 433 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
426 CTLTYPE_INT, "neon_present", NULL, 434 CTLTYPE_INT, "neon_present", NULL,
427 NULL, 0, &cpu_neon_present, 0, 435 NULL, 0, &cpu_neon_present, 0,
428 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 436 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
429 sysctl_createv(clog, 0, NULL, NULL, 437 sysctl_createv(clog, 0, NULL, NULL,
430 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 438 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
431 CTLTYPE_STRUCT, "id_isar", NULL, 439 CTLTYPE_STRUCT, "id_isar", NULL,
432 NULL, 0, 440 NULL, 0,
433 cpu_instruction_set_attributes, 441 cpu_instruction_set_attributes,
434 sizeof(cpu_instruction_set_attributes), 442 sizeof(cpu_instruction_set_attributes),
435 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 443 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
436 sysctl_createv(clog, 0, NULL, NULL, 444 sysctl_createv(clog, 0, NULL, NULL,
437 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 445 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
438 CTLTYPE_STRUCT, "id_mmfr", NULL, 446 CTLTYPE_STRUCT, "id_mmfr", NULL,
439 NULL, 0, 447 NULL, 0,
440 cpu_memory_model_features, 448 cpu_memory_model_features,
441 sizeof(cpu_memory_model_features), 449 sizeof(cpu_memory_model_features),
442 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 450 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
443 sysctl_createv(clog, 0, NULL, NULL, 451 sysctl_createv(clog, 0, NULL, NULL,
444 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 452 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
445 CTLTYPE_STRUCT, "id_pfr", NULL, 453 CTLTYPE_STRUCT, "id_pfr", NULL,
446 NULL, 0, 454 NULL, 0,
447 cpu_processor_features, 455 cpu_processor_features,
448 sizeof(cpu_processor_features), 456 sizeof(cpu_processor_features),
449 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 457 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
450 sysctl_createv(clog, 0, NULL, NULL, 458 sysctl_createv(clog, 0, NULL, NULL,
451 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 459 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
452 CTLTYPE_STRUCT, "id_mvfr", NULL, 460 CTLTYPE_STRUCT, "id_mvfr", NULL,
453 NULL, 0, 461 NULL, 0,
454 cpu_media_and_vfp_features, 462 cpu_media_and_vfp_features,
455 sizeof(cpu_media_and_vfp_features), 463 sizeof(cpu_media_and_vfp_features),
456 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 464 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
457 sysctl_createv(clog, 0, NULL, NULL, 465 sysctl_createv(clog, 0, NULL, NULL,
458 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 466 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
459 CTLTYPE_INT, "simd_present", NULL, 467 CTLTYPE_INT, "simd_present", NULL,
460 NULL, 0, &cpu_simd_present, 0, 468 NULL, 0, &cpu_simd_present, 0,
461 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 469 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
462 sysctl_createv(clog, 0, NULL, NULL, 470 sysctl_createv(clog, 0, NULL, NULL,
463 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 471 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
464 CTLTYPE_INT, "simdex_present", NULL, 472 CTLTYPE_INT, "simdex_present", NULL,
465 NULL, 0, &cpu_simdex_present, 0, 473 NULL, 0, &cpu_simdex_present, 0,
466 CTL_MACHDEP, CTL_CREATE, CTL_EOL); 474 CTL_MACHDEP, CTL_CREATE, CTL_EOL);
467} 475}
468 476
469void 477void
470parse_mi_bootargs(char *args) 478parse_mi_bootargs(char *args)
471{ 479{
472 int integer; 480 int integer;
473 481
474 if (get_bootconf_option(args, "single", BOOTOPT_TYPE_BOOLEAN, &integer) 482 if (get_bootconf_option(args, "single", BOOTOPT_TYPE_BOOLEAN, &integer)
475 || get_bootconf_option(args, "-s", BOOTOPT_TYPE_BOOLEAN, &integer)) 483 || get_bootconf_option(args, "-s", BOOTOPT_TYPE_BOOLEAN, &integer))
476 if (integer) 484 if (integer)
477 boothowto |= RB_SINGLE; 485 boothowto |= RB_SINGLE;
478 if (get_bootconf_option(args, "kdb", BOOTOPT_TYPE_BOOLEAN, &integer) 486 if (get_bootconf_option(args, "kdb", BOOTOPT_TYPE_BOOLEAN, &integer)
479 || get_bootconf_option(args, "-k", BOOTOPT_TYPE_BOOLEAN, &integer) 487 || get_bootconf_option(args, "-k", BOOTOPT_TYPE_BOOLEAN, &integer)
480 || get_bootconf_option(args, "-d", BOOTOPT_TYPE_BOOLEAN, &integer)) 488 || get_bootconf_option(args, "-d", BOOTOPT_TYPE_BOOLEAN, &integer))
481 if (integer) 489 if (integer)
482 boothowto |= RB_KDB; 490 boothowto |= RB_KDB;
483 if (get_bootconf_option(args, "ask", BOOTOPT_TYPE_BOOLEAN, &integer) 491 if (get_bootconf_option(args, "ask", BOOTOPT_TYPE_BOOLEAN, &integer)
484 || get_bootconf_option(args, "-a", BOOTOPT_TYPE_BOOLEAN, &integer)) 492 || get_bootconf_option(args, "-a", BOOTOPT_TYPE_BOOLEAN, &integer))
485 if (integer) 493 if (integer)
486 boothowto |= RB_ASKNAME; 494 boothowto |= RB_ASKNAME;
487 495
488#ifdef PMAP_DEBUG 496#ifdef PMAP_DEBUG
489 if (get_bootconf_option(args, "pmapdebug", BOOTOPT_TYPE_INT, &integer)) { 497 if (get_bootconf_option(args, "pmapdebug", BOOTOPT_TYPE_INT, &integer)) {
490 pmap_debug_level = integer; 498 pmap_debug_level = integer;
491 pmap_debug(pmap_debug_level); 499 pmap_debug(pmap_debug_level);
492 } 500 }
493#endif /* PMAP_DEBUG */ 501#endif /* PMAP_DEBUG */
494 502
495/* if (get_bootconf_option(args, "nbuf", BOOTOPT_TYPE_INT, &integer)) 503/* if (get_bootconf_option(args, "nbuf", BOOTOPT_TYPE_INT, &integer))
496 bufpages = integer;*/ 504 bufpages = integer;*/
497 505
498#if defined(MEMORY_DISK_HOOKS) && !defined(MEMORY_DISK_ROOT_SIZE) 506#if defined(MEMORY_DISK_HOOKS) && !defined(MEMORY_DISK_ROOT_SIZE)
499 if (get_bootconf_option(args, "memorydisc", BOOTOPT_TYPE_INT, &integer) 507 if (get_bootconf_option(args, "memorydisc", BOOTOPT_TYPE_INT, &integer)
500 || get_bootconf_option(args, "memorydisk", BOOTOPT_TYPE_INT, &integer)) { 508 || get_bootconf_option(args, "memorydisk", BOOTOPT_TYPE_INT, &integer)) {
501 md_root_size = integer; 509 md_root_size = integer;
502 md_root_size *= 1024; 510 md_root_size *= 1024;
503 if (md_root_size < 32*1024) 511 if (md_root_size < 32*1024)
504 md_root_size = 32*1024; 512 md_root_size = 32*1024;
505 if (md_root_size > 2048*1024) 513 if (md_root_size > 2048*1024)
506 md_root_size = 2048*1024; 514 md_root_size = 2048*1024;
507 } 515 }
508#endif /* MEMORY_DISK_HOOKS && !MEMORY_DISK_ROOT_SIZE */ 516#endif /* MEMORY_DISK_HOOKS && !MEMORY_DISK_ROOT_SIZE */
509 517
510 if (get_bootconf_option(args, "quiet", BOOTOPT_TYPE_BOOLEAN, &integer) 518 if (get_bootconf_option(args, "quiet", BOOTOPT_TYPE_BOOLEAN, &integer)
511 || get_bootconf_option(args, "-q", BOOTOPT_TYPE_BOOLEAN, &integer)) 519 || get_bootconf_option(args, "-q", BOOTOPT_TYPE_BOOLEAN, &integer))
512 if (integer) 520 if (integer)
513 boothowto |= AB_QUIET; 521 boothowto |= AB_QUIET;
514 if (get_bootconf_option(args, "verbose", BOOTOPT_TYPE_BOOLEAN, &integer) 522 if (get_bootconf_option(args, "verbose", BOOTOPT_TYPE_BOOLEAN, &integer)
515 || get_bootconf_option(args, "-v", BOOTOPT_TYPE_BOOLEAN, &integer)) 523 || get_bootconf_option(args, "-v", BOOTOPT_TYPE_BOOLEAN, &integer))
516 if (integer) 524 if (integer)
517 boothowto |= AB_VERBOSE; 525 boothowto |= AB_VERBOSE;
518} 526}
519 527
520#ifdef __HAVE_FAST_SOFTINTS 528#ifdef __HAVE_FAST_SOFTINTS
521#if IPL_SOFTSERIAL != IPL_SOFTNET + 1 529#if IPL_SOFTSERIAL != IPL_SOFTNET + 1
522#error IPLs are screwed up 530#error IPLs are screwed up
523#elif IPL_SOFTNET != IPL_SOFTBIO + 1 531#elif IPL_SOFTNET != IPL_SOFTBIO + 1
524#error IPLs are screwed up 532#error IPLs are screwed up
525#elif IPL_SOFTBIO != IPL_SOFTCLOCK + 1 533#elif IPL_SOFTBIO != IPL_SOFTCLOCK + 1
526#error IPLs are screwed up 534#error IPLs are screwed up
527#elif !(IPL_SOFTCLOCK > IPL_NONE) 535#elif !(IPL_SOFTCLOCK > IPL_NONE)
528#error IPLs are screwed up 536#error IPLs are screwed up
529#elif (IPL_NONE != 0) 537#elif (IPL_NONE != 0)
530#error IPLs are screwed up 538#error IPLs are screwed up
531#endif 539#endif
532 540
533#ifndef __HAVE_PIC_FAST_SOFTINTS 541#ifndef __HAVE_PIC_FAST_SOFTINTS
534#define SOFTINT2IPLMAP \ 542#define SOFTINT2IPLMAP \
535 (((IPL_SOFTSERIAL - IPL_SOFTCLOCK) << (SOFTINT_SERIAL * 4)) | \ 543 (((IPL_SOFTSERIAL - IPL_SOFTCLOCK) << (SOFTINT_SERIAL * 4)) | \
536 ((IPL_SOFTNET - IPL_SOFTCLOCK) << (SOFTINT_NET * 4)) | \ 544 ((IPL_SOFTNET - IPL_SOFTCLOCK) << (SOFTINT_NET * 4)) | \
537 ((IPL_SOFTBIO - IPL_SOFTCLOCK) << (SOFTINT_BIO * 4)) | \ 545 ((IPL_SOFTBIO - IPL_SOFTCLOCK) << (SOFTINT_BIO * 4)) | \
538 ((IPL_SOFTCLOCK - IPL_SOFTCLOCK) << (SOFTINT_CLOCK * 4))) 546 ((IPL_SOFTCLOCK - IPL_SOFTCLOCK) << (SOFTINT_CLOCK * 4)))
539#define SOFTINT2IPL(l) ((SOFTINT2IPLMAP >> ((l) * 4)) & 0x0f) 547#define SOFTINT2IPL(l) ((SOFTINT2IPLMAP >> ((l) * 4)) & 0x0f)
540 548
541/* 549/*
542 * This returns a mask of softint IPLs that be dispatch at <ipl> 550 * This returns a mask of softint IPLs that be dispatch at <ipl>
543 * SOFTIPLMASK(IPL_NONE) = 0x0000000f 551 * SOFTIPLMASK(IPL_NONE) = 0x0000000f
544 * SOFTIPLMASK(IPL_SOFTCLOCK) = 0x0000000e 552 * SOFTIPLMASK(IPL_SOFTCLOCK) = 0x0000000e
545 * SOFTIPLMASK(IPL_SOFTBIO) = 0x0000000c 553 * SOFTIPLMASK(IPL_SOFTBIO) = 0x0000000c
546 * SOFTIPLMASK(IPL_SOFTNET) = 0x00000008 554 * SOFTIPLMASK(IPL_SOFTNET) = 0x00000008
547 * SOFTIPLMASK(IPL_SOFTSERIAL) = 0x00000000 555 * SOFTIPLMASK(IPL_SOFTSERIAL) = 0x00000000
548 */ 556 */
549#define SOFTIPLMASK(ipl) ((0x0f << (ipl)) & 0x0f) 557#define SOFTIPLMASK(ipl) ((0x0f << (ipl)) & 0x0f)
550 558
551void softint_switch(lwp_t *, int); 559void softint_switch(lwp_t *, int);
552 560
553void 561void
554softint_trigger(uintptr_t mask) 562softint_trigger(uintptr_t mask)
555{ 563{
556 curcpu()->ci_softints |= mask; 564 curcpu()->ci_softints |= mask;
557} 565}
558 566
559void 567void
560softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep) 568softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep)
561{ 569{
562 lwp_t ** lp = &l->l_cpu->ci_softlwps[level]; 570 lwp_t ** lp = &l->l_cpu->ci_softlwps[level];
563 KASSERT(*lp == NULL || *lp == l); 571 KASSERT(*lp == NULL || *lp == l);
564 *lp = l; 572 *lp = l;
565 *machdep = 1 << SOFTINT2IPL(level); 573 *machdep = 1 << SOFTINT2IPL(level);
566 KASSERT(level != SOFTINT_CLOCK || *machdep == (1 << (IPL_SOFTCLOCK - IPL_SOFTCLOCK))); 574 KASSERT(level != SOFTINT_CLOCK || *machdep == (1 << (IPL_SOFTCLOCK - IPL_SOFTCLOCK)));
567 KASSERT(level != SOFTINT_BIO || *machdep == (1 << (IPL_SOFTBIO - IPL_SOFTCLOCK))); 575 KASSERT(level != SOFTINT_BIO || *machdep == (1 << (IPL_SOFTBIO - IPL_SOFTCLOCK)));
568 KASSERT(level != SOFTINT_NET || *machdep == (1 << (IPL_SOFTNET - IPL_SOFTCLOCK))); 576 KASSERT(level != SOFTINT_NET || *machdep == (1 << (IPL_SOFTNET - IPL_SOFTCLOCK)));
569 KASSERT(level != SOFTINT_SERIAL || *machdep == (1 << (IPL_SOFTSERIAL - IPL_SOFTCLOCK))); 577 KASSERT(level != SOFTINT_SERIAL || *machdep == (1 << (IPL_SOFTSERIAL - IPL_SOFTCLOCK)));
570} 578}
571 579
572void 580void
573dosoftints(void) 581dosoftints(void)
574{ 582{
575 struct cpu_info * const ci = curcpu(); 583 struct cpu_info * const ci = curcpu();
576 const int opl = ci->ci_cpl; 584 const int opl = ci->ci_cpl;
577 const uint32_t softiplmask = SOFTIPLMASK(opl); 585 const uint32_t softiplmask = SOFTIPLMASK(opl);
578 586
579 splhigh(); 587 splhigh();
580 for (;;) { 588 for (;;) {
581 u_int softints = ci->ci_softints & softiplmask; 589 u_int softints = ci->ci_softints & softiplmask;
582 KASSERT((softints != 0) == ((ci->ci_softints >> opl) != 0)); 590 KASSERT((softints != 0) == ((ci->ci_softints >> opl) != 0));
583 KASSERT(opl == IPL_NONE || (softints & (1 << (opl - IPL_SOFTCLOCK))) == 0); 591 KASSERT(opl == IPL_NONE || (softints & (1 << (opl - IPL_SOFTCLOCK))) == 0);
584 if (softints == 0) { 592 if (softints == 0) {
585 splx(opl); 593 splx(opl);
586 return; 594 return;
587 } 595 }
588#define DOSOFTINT(n) \ 596#define DOSOFTINT(n) \
589 if (ci->ci_softints & (1 << (IPL_SOFT ## n - IPL_SOFTCLOCK))) { \ 597 if (ci->ci_softints & (1 << (IPL_SOFT ## n - IPL_SOFTCLOCK))) { \
590 ci->ci_softints &= \ 598 ci->ci_softints &= \
591 ~(1 << (IPL_SOFT ## n - IPL_SOFTCLOCK)); \ 599 ~(1 << (IPL_SOFT ## n - IPL_SOFTCLOCK)); \
592 softint_switch(ci->ci_softlwps[SOFTINT_ ## n], \ 600 softint_switch(ci->ci_softlwps[SOFTINT_ ## n], \
593 IPL_SOFT ## n); \ 601 IPL_SOFT ## n); \
594 continue; \ 602 continue; \
595 } 603 }
596 DOSOFTINT(SERIAL); 604 DOSOFTINT(SERIAL);
597 DOSOFTINT(NET); 605 DOSOFTINT(NET);
598 DOSOFTINT(BIO); 606 DOSOFTINT(BIO);
599 DOSOFTINT(CLOCK); 607 DOSOFTINT(CLOCK);
600 panic("dosoftints wtf (softints=%u?, ipl=%d)", softints, opl); 608 panic("dosoftints wtf (softints=%u?, ipl=%d)", softints, opl);
601 } 609 }
602} 610}
603#endif /* !__HAVE_PIC_FAST_SOFTINTS */ 611#endif /* !__HAVE_PIC_FAST_SOFTINTS */
604#endif /* __HAVE_FAST_SOFTINTS */ 612#endif /* __HAVE_FAST_SOFTINTS */
605 613
606#ifdef MODULAR 614#ifdef MODULAR
607/* 615/*
608 * Push any modules loaded by the boot loader. 616 * Push any modules loaded by the boot loader.
609 */ 617 */
610void 618void
611module_init_md(void) 619module_init_md(void)
612{ 620{
613} 621}
614#endif /* MODULAR */ 622#endif /* MODULAR */
615 623
616int 624int
617mm_md_physacc(paddr_t pa, vm_prot_t prot) 625mm_md_physacc(paddr_t pa, vm_prot_t prot)
618{ 626{
619 627
620 return (pa < ctob(physmem)) ? 0 : EFAULT; 628 return (pa < ctob(physmem)) ? 0 : EFAULT;
621} 629}
622 630
623#ifdef __HAVE_CPU_UAREA_ALLOC_IDLELWP 631#ifdef __HAVE_CPU_UAREA_ALLOC_IDLELWP
624vaddr_t 632vaddr_t
625cpu_uarea_alloc_idlelwp(struct cpu_info *ci) 633cpu_uarea_alloc_idlelwp(struct cpu_info *ci)
626{ 634{
627 const vaddr_t va = idlestack.pv_va + ci->ci_cpuid * USPACE; 635 const vaddr_t va = idlestack.pv_va + ci->ci_cpuid * USPACE;
628 // printf("%s: %s: va=%lx\n", __func__, ci->ci_data.cpu_name, va); 636 // printf("%s: %s: va=%lx\n", __func__, ci->ci_data.cpu_name, va);
629 return va; 637 return va;
630} 638}
631#endif 639#endif
632 640
633#ifdef MULTIPROCESSOR 641#ifdef MULTIPROCESSOR
634void 642void
635cpu_boot_secondary_processors(void) 643cpu_boot_secondary_processors(void)
636{ 644{
637 uint32_t mbox; 645 uint32_t mbox;
638 kcpuset_export_u32(kcpuset_attached, &mbox, sizeof(mbox)); 646 kcpuset_export_u32(kcpuset_attached, &mbox, sizeof(mbox));
639 atomic_swap_32(&arm_cpu_mbox, mbox); 647 atomic_swap_32(&arm_cpu_mbox, mbox);
640 membar_producer(); 648 membar_producer();
641#ifdef _ARM_ARCH_7 649#ifdef _ARM_ARCH_7
642 __asm __volatile("sev; sev; sev"); 650 __asm __volatile("sev; sev; sev");
643#endif 651#endif
644} 652}
645 653
646void 654void
647xc_send_ipi(struct cpu_info *ci) 655xc_send_ipi(struct cpu_info *ci)
648{ 656{
649 KASSERT(kpreempt_disabled()); 657 KASSERT(kpreempt_disabled());
650 KASSERT(curcpu() != ci); 658 KASSERT(curcpu() != ci);
651 659
652 660
653 if (ci) { 661 if (ci) {
654 /* Unicast, remote CPU */ 662 /* Unicast, remote CPU */
655 printf("%s: -> %s", __func__, ci->ci_data.cpu_name); 663 printf("%s: -> %s", __func__, ci->ci_data.cpu_name);
656 intr_ipi_send(ci->ci_kcpuset, IPI_XCALL); 664 intr_ipi_send(ci->ci_kcpuset, IPI_XCALL);
657 } else { 665 } else {
658 printf("%s: -> !%s", __func__, ci->ci_data.cpu_name); 666 printf("%s: -> !%s", __func__, ci->ci_data.cpu_name);
659 /* Broadcast to all but ourselves */ 667 /* Broadcast to all but ourselves */
660 kcpuset_t *kcp; 668 kcpuset_t *kcp;
661 kcpuset_create(&kcp, (ci != NULL)); 669 kcpuset_create(&kcp, (ci != NULL));
662 KASSERT(kcp != NULL); 670 KASSERT(kcp != NULL);
663 kcpuset_copy(kcp, kcpuset_running); 671 kcpuset_copy(kcp, kcpuset_running);
664 kcpuset_clear(kcp, cpu_index(ci)); 672 kcpuset_clear(kcp, cpu_index(ci));
665 intr_ipi_send(kcp, IPI_XCALL); 673 intr_ipi_send(kcp, IPI_XCALL);
666 kcpuset_destroy(kcp); 674 kcpuset_destroy(kcp);
667 } 675 }
668 printf("\n"); 676 printf("\n");
669} 677}
670#endif /* MULTIPROCESSOR */ 678#endif /* MULTIPROCESSOR */
671 679
672#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 680#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
673bool 681bool
674mm_md_direct_mapped_phys(paddr_t pa, vaddr_t *vap) 682mm_md_direct_mapped_phys(paddr_t pa, vaddr_t *vap)
675{ 683{
676 if (physical_start <= pa && pa < physical_end) { 684 if (physical_start <= pa && pa < physical_end) {
677 *vap = KERNEL_BASE + (pa - physical_start); 685 *vap = KERNEL_BASE + (pa - physical_start);
678 return true; 686 return true;
679 } 687 }
680 688
681 return false; 689 return false;
682} 690}
683#endif 691#endif

cvs diff -r1.256 -r1.257 src/sys/arch/arm/arm32/pmap.c (switch to unified diff)

--- src/sys/arch/arm/arm32/pmap.c 2013/06/12 07:13:18 1.256
+++ src/sys/arch/arm/arm32/pmap.c 2013/06/12 21:34:12 1.257
@@ -1,6448 +1,6464 @@ @@ -1,6448 +1,6464 @@
1/* $NetBSD: pmap.c,v 1.256 2013/06/12 07:13:18 matt Exp $ */ 1/* $NetBSD: pmap.c,v 1.257 2013/06/12 21:34:12 matt Exp $ */
2 2
3/* 3/*
4 * Copyright 2003 Wasabi Systems, Inc. 4 * Copyright 2003 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Steve C. Woodford for Wasabi Systems, Inc. 7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the 15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution. 16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software 17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement: 18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by 19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc. 20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior 22 * or promote products derived from this software without specific prior
23 * written permission. 23 * written permission.
24 * 24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE. 35 * POSSIBILITY OF SUCH DAMAGE.
36 */ 36 */
37 37
38/* 38/*
39 * Copyright (c) 2002-2003 Wasabi Systems, Inc. 39 * Copyright (c) 2002-2003 Wasabi Systems, Inc.
40 * Copyright (c) 2001 Richard Earnshaw 40 * Copyright (c) 2001 Richard Earnshaw
41 * Copyright (c) 2001-2002 Christopher Gilbert 41 * Copyright (c) 2001-2002 Christopher Gilbert
42 * All rights reserved. 42 * All rights reserved.
43 * 43 *
44 * 1. Redistributions of source code must retain the above copyright 44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer. 45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright 46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the 47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution. 48 * documentation and/or other materials provided with the distribution.
49 * 3. The name of the company nor the name of the author may be used to 49 * 3. The name of the company nor the name of the author may be used to
50 * endorse or promote products derived from this software without specific 50 * endorse or promote products derived from this software without specific
51 * prior written permission. 51 * prior written permission.
52 * 52 *
53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
54 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 54 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
55 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 55 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
56 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 56 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
57 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 57 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
58 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 58 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
59 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 59 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE. 63 * SUCH DAMAGE.
64 */ 64 */
65 65
66/*- 66/*-
67 * Copyright (c) 1999 The NetBSD Foundation, Inc. 67 * Copyright (c) 1999 The NetBSD Foundation, Inc.
68 * All rights reserved. 68 * All rights reserved.
69 * 69 *
70 * This code is derived from software contributed to The NetBSD Foundation 70 * This code is derived from software contributed to The NetBSD Foundation
71 * by Charles M. Hannum. 71 * by Charles M. Hannum.
72 * 72 *
73 * Redistribution and use in source and binary forms, with or without 73 * Redistribution and use in source and binary forms, with or without
74 * modification, are permitted provided that the following conditions 74 * modification, are permitted provided that the following conditions
75 * are met: 75 * are met:
76 * 1. Redistributions of source code must retain the above copyright 76 * 1. Redistributions of source code must retain the above copyright
77 * notice, this list of conditions and the following disclaimer. 77 * notice, this list of conditions and the following disclaimer.
78 * 2. Redistributions in binary form must reproduce the above copyright 78 * 2. Redistributions in binary form must reproduce the above copyright
79 * notice, this list of conditions and the following disclaimer in the 79 * notice, this list of conditions and the following disclaimer in the
80 * documentation and/or other materials provided with the distribution. 80 * documentation and/or other materials provided with the distribution.
81 * 81 *
82 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 82 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
83 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 83 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
84 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 84 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
85 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 85 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
86 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 86 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
87 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 87 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
88 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 88 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
89 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 89 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
90 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 90 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
91 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 91 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
92 * POSSIBILITY OF SUCH DAMAGE. 92 * POSSIBILITY OF SUCH DAMAGE.
93 */ 93 */
94 94
95/* 95/*
96 * Copyright (c) 1994-1998 Mark Brinicombe. 96 * Copyright (c) 1994-1998 Mark Brinicombe.
97 * Copyright (c) 1994 Brini. 97 * Copyright (c) 1994 Brini.
98 * All rights reserved. 98 * All rights reserved.
99 * 99 *
100 * This code is derived from software written for Brini by Mark Brinicombe 100 * This code is derived from software written for Brini by Mark Brinicombe
101 * 101 *
102 * Redistribution and use in source and binary forms, with or without 102 * Redistribution and use in source and binary forms, with or without
103 * modification, are permitted provided that the following conditions 103 * modification, are permitted provided that the following conditions
104 * are met: 104 * are met:
105 * 1. Redistributions of source code must retain the above copyright 105 * 1. Redistributions of source code must retain the above copyright
106 * notice, this list of conditions and the following disclaimer. 106 * notice, this list of conditions and the following disclaimer.
107 * 2. Redistributions in binary form must reproduce the above copyright 107 * 2. Redistributions in binary form must reproduce the above copyright
108 * notice, this list of conditions and the following disclaimer in the 108 * notice, this list of conditions and the following disclaimer in the
109 * documentation and/or other materials provided with the distribution. 109 * documentation and/or other materials provided with the distribution.
110 * 3. All advertising materials mentioning features or use of this software 110 * 3. All advertising materials mentioning features or use of this software
111 * must display the following acknowledgement: 111 * must display the following acknowledgement:
112 * This product includes software developed by Mark Brinicombe. 112 * This product includes software developed by Mark Brinicombe.
113 * 4. The name of the author may not be used to endorse or promote products 113 * 4. The name of the author may not be used to endorse or promote products
114 * derived from this software without specific prior written permission. 114 * derived from this software without specific prior written permission.
115 * 115 *
116 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 116 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
117 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 117 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
118 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 118 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
119 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 119 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
120 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 120 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
121 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 121 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
122 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 122 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
123 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 123 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
124 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 124 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
125 * 125 *
126 * RiscBSD kernel project 126 * RiscBSD kernel project
127 * 127 *
128 * pmap.c 128 * pmap.c
129 * 129 *
130 * Machine dependent vm stuff 130 * Machine dependent vm stuff
131 * 131 *
132 * Created : 20/09/94 132 * Created : 20/09/94
133 */ 133 */
134 134
135/* 135/*
136 * armv6 and VIPT cache support by 3am Software Foundry, 136 * armv6 and VIPT cache support by 3am Software Foundry,
137 * Copyright (c) 2007 Microsoft 137 * Copyright (c) 2007 Microsoft
138 */ 138 */
139 139
140/* 140/*
141 * Performance improvements, UVM changes, overhauls and part-rewrites 141 * Performance improvements, UVM changes, overhauls and part-rewrites
142 * were contributed by Neil A. Carson <neil@causality.com>. 142 * were contributed by Neil A. Carson <neil@causality.com>.
143 */ 143 */
144 144
145/* 145/*
146 * Overhauled again to speedup the pmap, use MMU Domains so that L1 tables 146 * Overhauled again to speedup the pmap, use MMU Domains so that L1 tables
147 * can be shared, and re-work the KVM layout, by Steve Woodford of Wasabi 147 * can be shared, and re-work the KVM layout, by Steve Woodford of Wasabi
148 * Systems, Inc. 148 * Systems, Inc.
149 * 149 *
150 * There are still a few things outstanding at this time: 150 * There are still a few things outstanding at this time:
151 * 151 *
152 * - There are some unresolved issues for MP systems: 152 * - There are some unresolved issues for MP systems:
153 * 153 *
154 * o The L1 metadata needs a lock, or more specifically, some places 154 * o The L1 metadata needs a lock, or more specifically, some places
155 * need to acquire an exclusive lock when modifying L1 translation 155 * need to acquire an exclusive lock when modifying L1 translation
156 * table entries. 156 * table entries.
157 * 157 *
158 * o When one cpu modifies an L1 entry, and that L1 table is also 158 * o When one cpu modifies an L1 entry, and that L1 table is also
159 * being used by another cpu, then the latter will need to be told 159 * being used by another cpu, then the latter will need to be told
160 * that a tlb invalidation may be necessary. (But only if the old 160 * that a tlb invalidation may be necessary. (But only if the old
161 * domain number in the L1 entry being over-written is currently 161 * domain number in the L1 entry being over-written is currently
162 * the active domain on that cpu). I guess there are lots more tlb 162 * the active domain on that cpu). I guess there are lots more tlb
163 * shootdown issues too... 163 * shootdown issues too...
164 * 164 *
165 * o If the vector_page is at 0x00000000 instead of in kernel VA space, 165 * o If the vector_page is at 0x00000000 instead of in kernel VA space,
166 * then MP systems will lose big-time because of the MMU domain hack. 166 * then MP systems will lose big-time because of the MMU domain hack.
167 * The only way this can be solved (apart from moving the vector 167 * The only way this can be solved (apart from moving the vector
168 * page to 0xffff0000) is to reserve the first 1MB of user address 168 * page to 0xffff0000) is to reserve the first 1MB of user address
169 * space for kernel use only. This would require re-linking all 169 * space for kernel use only. This would require re-linking all
170 * applications so that the text section starts above this 1MB 170 * applications so that the text section starts above this 1MB
171 * boundary. 171 * boundary.
172 * 172 *
173 * o Tracking which VM space is resident in the cache/tlb has not yet 173 * o Tracking which VM space is resident in the cache/tlb has not yet
174 * been implemented for MP systems. 174 * been implemented for MP systems.
175 * 175 *
176 * o Finally, there is a pathological condition where two cpus running 176 * o Finally, there is a pathological condition where two cpus running
177 * two separate processes (not lwps) which happen to share an L1 177 * two separate processes (not lwps) which happen to share an L1
178 * can get into a fight over one or more L1 entries. This will result 178 * can get into a fight over one or more L1 entries. This will result
179 * in a significant slow-down if both processes are in tight loops. 179 * in a significant slow-down if both processes are in tight loops.
180 */ 180 */
181 181
182/* 182/*
183 * Special compilation symbols 183 * Special compilation symbols
184 * PMAP_DEBUG - Build in pmap_debug_level code 184 * PMAP_DEBUG - Build in pmap_debug_level code
185 */ 185 */
186 186
187/* Include header files */ 187/* Include header files */
188 188
189#include "opt_cpuoptions.h" 189#include "opt_cpuoptions.h"
190#include "opt_pmap_debug.h" 190#include "opt_pmap_debug.h"
191#include "opt_ddb.h" 191#include "opt_ddb.h"
192#include "opt_lockdebug.h" 192#include "opt_lockdebug.h"
193#include "opt_multiprocessor.h" 193#include "opt_multiprocessor.h"
194 194
195#include <sys/param.h> 195#include <sys/param.h>
196#include <sys/types.h> 196#include <sys/types.h>
197#include <sys/kernel.h> 197#include <sys/kernel.h>
198#include <sys/systm.h> 198#include <sys/systm.h>
199#include <sys/proc.h> 199#include <sys/proc.h>
200#include <sys/pool.h> 200#include <sys/pool.h>
201#include <sys/kmem.h> 201#include <sys/kmem.h>
202#include <sys/cdefs.h> 202#include <sys/cdefs.h>
203#include <sys/cpu.h> 203#include <sys/cpu.h>
204#include <sys/sysctl.h> 204#include <sys/sysctl.h>
205 205
206#include <uvm/uvm.h> 206#include <uvm/uvm.h>
207 207
208#include <sys/bus.h> 208#include <sys/bus.h>
209#include <machine/pmap.h> 209#include <machine/pmap.h>
210#include <machine/pcb.h> 210#include <machine/pcb.h>
211#include <machine/param.h> 211#include <machine/param.h>
212#include <arm/cpuconf.h> 212#include <arm/cpuconf.h>
213#include <arm/arm32/katelib.h> 213#include <arm/arm32/katelib.h>
214 214
215__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.256 2013/06/12 07:13:18 matt Exp $"); 215__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.257 2013/06/12 21:34:12 matt Exp $");
216 216
217#ifdef PMAP_DEBUG 217#ifdef PMAP_DEBUG
218 218
219/* XXX need to get rid of all refs to this */ 219/* XXX need to get rid of all refs to this */
220int pmap_debug_level = 0; 220int pmap_debug_level = 0;
221 221
222/* 222/*
223 * for switching to potentially finer grained debugging 223 * for switching to potentially finer grained debugging
224 */ 224 */
225#define PDB_FOLLOW 0x0001 225#define PDB_FOLLOW 0x0001
226#define PDB_INIT 0x0002 226#define PDB_INIT 0x0002
227#define PDB_ENTER 0x0004 227#define PDB_ENTER 0x0004
228#define PDB_REMOVE 0x0008 228#define PDB_REMOVE 0x0008
229#define PDB_CREATE 0x0010 229#define PDB_CREATE 0x0010
230#define PDB_PTPAGE 0x0020 230#define PDB_PTPAGE 0x0020
231#define PDB_GROWKERN 0x0040 231#define PDB_GROWKERN 0x0040
232#define PDB_BITS 0x0080 232#define PDB_BITS 0x0080
233#define PDB_COLLECT 0x0100 233#define PDB_COLLECT 0x0100
234#define PDB_PROTECT 0x0200 234#define PDB_PROTECT 0x0200
235#define PDB_MAP_L1 0x0400 235#define PDB_MAP_L1 0x0400
236#define PDB_BOOTSTRAP 0x1000 236#define PDB_BOOTSTRAP 0x1000
237#define PDB_PARANOIA 0x2000 237#define PDB_PARANOIA 0x2000
238#define PDB_WIRING 0x4000 238#define PDB_WIRING 0x4000
239#define PDB_PVDUMP 0x8000 239#define PDB_PVDUMP 0x8000
240#define PDB_VAC 0x10000 240#define PDB_VAC 0x10000
241#define PDB_KENTER 0x20000 241#define PDB_KENTER 0x20000
242#define PDB_KREMOVE 0x40000 242#define PDB_KREMOVE 0x40000
243#define PDB_EXEC 0x80000 243#define PDB_EXEC 0x80000
244 244
245int debugmap = 1; 245int debugmap = 1;
246int pmapdebug = 0;  246int pmapdebug = 0;
247#define NPDEBUG(_lev_,_stat_) \ 247#define NPDEBUG(_lev_,_stat_) \
248 if (pmapdebug & (_lev_)) \ 248 if (pmapdebug & (_lev_)) \
249 ((_stat_)) 249 ((_stat_))
250  250
251#else /* PMAP_DEBUG */ 251#else /* PMAP_DEBUG */
252#define NPDEBUG(_lev_,_stat_) /* Nothing */ 252#define NPDEBUG(_lev_,_stat_) /* Nothing */
253#endif /* PMAP_DEBUG */ 253#endif /* PMAP_DEBUG */
254 254
255/* 255/*
256 * pmap_kernel() points here 256 * pmap_kernel() points here
257 */ 257 */
258static struct pmap kernel_pmap_store; 258static struct pmap kernel_pmap_store;
259struct pmap *const kernel_pmap_ptr = &kernel_pmap_store; 259struct pmap *const kernel_pmap_ptr = &kernel_pmap_store;
260#ifdef PMAP_NEED_ALLOC_POOLPAGE 260#ifdef PMAP_NEED_ALLOC_POOLPAGE
261int arm_poolpage_vmfreelist = VM_FREELIST_DEFAULT; 261int arm_poolpage_vmfreelist = VM_FREELIST_DEFAULT;
262#endif 262#endif
263 263
264/* 264/*
265 * Which pmap is currently 'live' in the cache 265 * Which pmap is currently 'live' in the cache
266 * 266 *
267 * XXXSCW: Fix for SMP ... 267 * XXXSCW: Fix for SMP ...
268 */ 268 */
269static pmap_t pmap_recent_user; 269static pmap_t pmap_recent_user;
270 270
271/* 271/*
272 * Pointer to last active lwp, or NULL if it exited. 272 * Pointer to last active lwp, or NULL if it exited.
273 */ 273 */
274struct lwp *pmap_previous_active_lwp; 274struct lwp *pmap_previous_active_lwp;
275 275
276/* 276/*
277 * Pool and cache that pmap structures are allocated from. 277 * Pool and cache that pmap structures are allocated from.
278 * We use a cache to avoid clearing the pm_l2[] array (1KB) 278 * We use a cache to avoid clearing the pm_l2[] array (1KB)
279 * in pmap_create(). 279 * in pmap_create().
280 */ 280 */
281static struct pool_cache pmap_cache; 281static struct pool_cache pmap_cache;
282static LIST_HEAD(, pmap) pmap_pmaps; 282static LIST_HEAD(, pmap) pmap_pmaps;
283 283
284/* 284/*
285 * Pool of PV structures 285 * Pool of PV structures
286 */ 286 */
287static struct pool pmap_pv_pool; 287static struct pool pmap_pv_pool;
288static void *pmap_bootstrap_pv_page_alloc(struct pool *, int); 288static void *pmap_bootstrap_pv_page_alloc(struct pool *, int);
289static void pmap_bootstrap_pv_page_free(struct pool *, void *); 289static void pmap_bootstrap_pv_page_free(struct pool *, void *);
290static struct pool_allocator pmap_bootstrap_pv_allocator = { 290static struct pool_allocator pmap_bootstrap_pv_allocator = {
291 pmap_bootstrap_pv_page_alloc, pmap_bootstrap_pv_page_free 291 pmap_bootstrap_pv_page_alloc, pmap_bootstrap_pv_page_free
292}; 292};
293 293
294/* 294/*
295 * Pool and cache of l2_dtable structures. 295 * Pool and cache of l2_dtable structures.
296 * We use a cache to avoid clearing the structures when they're 296 * We use a cache to avoid clearing the structures when they're
297 * allocated. (196 bytes) 297 * allocated. (196 bytes)
298 */ 298 */
299static struct pool_cache pmap_l2dtable_cache; 299static struct pool_cache pmap_l2dtable_cache;
300static vaddr_t pmap_kernel_l2dtable_kva; 300static vaddr_t pmap_kernel_l2dtable_kva;
301 301
302/* 302/*
303 * Pool and cache of L2 page descriptors. 303 * Pool and cache of L2 page descriptors.
304 * We use a cache to avoid clearing the descriptor table 304 * We use a cache to avoid clearing the descriptor table
305 * when they're allocated. (1KB) 305 * when they're allocated. (1KB)
306 */ 306 */
307static struct pool_cache pmap_l2ptp_cache; 307static struct pool_cache pmap_l2ptp_cache;
308static vaddr_t pmap_kernel_l2ptp_kva; 308static vaddr_t pmap_kernel_l2ptp_kva;
309static paddr_t pmap_kernel_l2ptp_phys; 309static paddr_t pmap_kernel_l2ptp_phys;
310 310
311#ifdef PMAPCOUNTERS 311#ifdef PMAPCOUNTERS
312#define PMAP_EVCNT_INITIALIZER(name) \ 312#define PMAP_EVCNT_INITIALIZER(name) \
313 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", name) 313 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", name)
314 314
315#ifdef PMAP_CACHE_VIPT 315#ifdef PMAP_CACHE_VIPT
316static struct evcnt pmap_ev_vac_clean_one = 316static struct evcnt pmap_ev_vac_clean_one =
317 PMAP_EVCNT_INITIALIZER("clean page (1 color)"); 317 PMAP_EVCNT_INITIALIZER("clean page (1 color)");
318static struct evcnt pmap_ev_vac_flush_one = 318static struct evcnt pmap_ev_vac_flush_one =
319 PMAP_EVCNT_INITIALIZER("flush page (1 color)"); 319 PMAP_EVCNT_INITIALIZER("flush page (1 color)");
320static struct evcnt pmap_ev_vac_flush_lots = 320static struct evcnt pmap_ev_vac_flush_lots =
321 PMAP_EVCNT_INITIALIZER("flush page (2+ colors)"); 321 PMAP_EVCNT_INITIALIZER("flush page (2+ colors)");
322static struct evcnt pmap_ev_vac_flush_lots2 = 322static struct evcnt pmap_ev_vac_flush_lots2 =
323 PMAP_EVCNT_INITIALIZER("flush page (2+ colors, kmpage)"); 323 PMAP_EVCNT_INITIALIZER("flush page (2+ colors, kmpage)");
324EVCNT_ATTACH_STATIC(pmap_ev_vac_clean_one); 324EVCNT_ATTACH_STATIC(pmap_ev_vac_clean_one);
325EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_one); 325EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_one);
326EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots); 326EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots);
327EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots2); 327EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots2);
328 328
329static struct evcnt pmap_ev_vac_color_new = 329static struct evcnt pmap_ev_vac_color_new =
330 PMAP_EVCNT_INITIALIZER("new page color"); 330 PMAP_EVCNT_INITIALIZER("new page color");
331static struct evcnt pmap_ev_vac_color_reuse = 331static struct evcnt pmap_ev_vac_color_reuse =
332 PMAP_EVCNT_INITIALIZER("ok first page color"); 332 PMAP_EVCNT_INITIALIZER("ok first page color");
333static struct evcnt pmap_ev_vac_color_ok = 333static struct evcnt pmap_ev_vac_color_ok =
334 PMAP_EVCNT_INITIALIZER("ok page color"); 334 PMAP_EVCNT_INITIALIZER("ok page color");
335static struct evcnt pmap_ev_vac_color_blind = 335static struct evcnt pmap_ev_vac_color_blind =
336 PMAP_EVCNT_INITIALIZER("blind page color"); 336 PMAP_EVCNT_INITIALIZER("blind page color");
337static struct evcnt pmap_ev_vac_color_change = 337static struct evcnt pmap_ev_vac_color_change =
338 PMAP_EVCNT_INITIALIZER("change page color"); 338 PMAP_EVCNT_INITIALIZER("change page color");
339static struct evcnt pmap_ev_vac_color_erase = 339static struct evcnt pmap_ev_vac_color_erase =
340 PMAP_EVCNT_INITIALIZER("erase page color"); 340 PMAP_EVCNT_INITIALIZER("erase page color");
341static struct evcnt pmap_ev_vac_color_none = 341static struct evcnt pmap_ev_vac_color_none =
342 PMAP_EVCNT_INITIALIZER("no page color"); 342 PMAP_EVCNT_INITIALIZER("no page color");
343static struct evcnt pmap_ev_vac_color_restore = 343static struct evcnt pmap_ev_vac_color_restore =
344 PMAP_EVCNT_INITIALIZER("restore page color"); 344 PMAP_EVCNT_INITIALIZER("restore page color");
345 345
346EVCNT_ATTACH_STATIC(pmap_ev_vac_color_new); 346EVCNT_ATTACH_STATIC(pmap_ev_vac_color_new);
347EVCNT_ATTACH_STATIC(pmap_ev_vac_color_reuse); 347EVCNT_ATTACH_STATIC(pmap_ev_vac_color_reuse);
348EVCNT_ATTACH_STATIC(pmap_ev_vac_color_ok); 348EVCNT_ATTACH_STATIC(pmap_ev_vac_color_ok);
349EVCNT_ATTACH_STATIC(pmap_ev_vac_color_blind); 349EVCNT_ATTACH_STATIC(pmap_ev_vac_color_blind);
350EVCNT_ATTACH_STATIC(pmap_ev_vac_color_change); 350EVCNT_ATTACH_STATIC(pmap_ev_vac_color_change);
351EVCNT_ATTACH_STATIC(pmap_ev_vac_color_erase); 351EVCNT_ATTACH_STATIC(pmap_ev_vac_color_erase);
352EVCNT_ATTACH_STATIC(pmap_ev_vac_color_none); 352EVCNT_ATTACH_STATIC(pmap_ev_vac_color_none);
353EVCNT_ATTACH_STATIC(pmap_ev_vac_color_restore); 353EVCNT_ATTACH_STATIC(pmap_ev_vac_color_restore);
354#endif 354#endif
355 355
356static struct evcnt pmap_ev_mappings = 356static struct evcnt pmap_ev_mappings =
357 PMAP_EVCNT_INITIALIZER("pages mapped"); 357 PMAP_EVCNT_INITIALIZER("pages mapped");
358static struct evcnt pmap_ev_unmappings = 358static struct evcnt pmap_ev_unmappings =
359 PMAP_EVCNT_INITIALIZER("pages unmapped"); 359 PMAP_EVCNT_INITIALIZER("pages unmapped");
360static struct evcnt pmap_ev_remappings = 360static struct evcnt pmap_ev_remappings =
361 PMAP_EVCNT_INITIALIZER("pages remapped"); 361 PMAP_EVCNT_INITIALIZER("pages remapped");
362 362
363EVCNT_ATTACH_STATIC(pmap_ev_mappings); 363EVCNT_ATTACH_STATIC(pmap_ev_mappings);
364EVCNT_ATTACH_STATIC(pmap_ev_unmappings); 364EVCNT_ATTACH_STATIC(pmap_ev_unmappings);
365EVCNT_ATTACH_STATIC(pmap_ev_remappings); 365EVCNT_ATTACH_STATIC(pmap_ev_remappings);
366 366
367static struct evcnt pmap_ev_kernel_mappings = 367static struct evcnt pmap_ev_kernel_mappings =
368 PMAP_EVCNT_INITIALIZER("kernel pages mapped"); 368 PMAP_EVCNT_INITIALIZER("kernel pages mapped");
369static struct evcnt pmap_ev_kernel_unmappings = 369static struct evcnt pmap_ev_kernel_unmappings =
370 PMAP_EVCNT_INITIALIZER("kernel pages unmapped"); 370 PMAP_EVCNT_INITIALIZER("kernel pages unmapped");
371static struct evcnt pmap_ev_kernel_remappings = 371static struct evcnt pmap_ev_kernel_remappings =
372 PMAP_EVCNT_INITIALIZER("kernel pages remapped"); 372 PMAP_EVCNT_INITIALIZER("kernel pages remapped");
373 373
374EVCNT_ATTACH_STATIC(pmap_ev_kernel_mappings); 374EVCNT_ATTACH_STATIC(pmap_ev_kernel_mappings);
375EVCNT_ATTACH_STATIC(pmap_ev_kernel_unmappings); 375EVCNT_ATTACH_STATIC(pmap_ev_kernel_unmappings);
376EVCNT_ATTACH_STATIC(pmap_ev_kernel_remappings); 376EVCNT_ATTACH_STATIC(pmap_ev_kernel_remappings);
377 377
378static struct evcnt pmap_ev_kenter_mappings = 378static struct evcnt pmap_ev_kenter_mappings =
379 PMAP_EVCNT_INITIALIZER("kenter pages mapped"); 379 PMAP_EVCNT_INITIALIZER("kenter pages mapped");
380static struct evcnt pmap_ev_kenter_unmappings = 380static struct evcnt pmap_ev_kenter_unmappings =
381 PMAP_EVCNT_INITIALIZER("kenter pages unmapped"); 381 PMAP_EVCNT_INITIALIZER("kenter pages unmapped");
382static struct evcnt pmap_ev_kenter_remappings = 382static struct evcnt pmap_ev_kenter_remappings =
383 PMAP_EVCNT_INITIALIZER("kenter pages remapped"); 383 PMAP_EVCNT_INITIALIZER("kenter pages remapped");
384static struct evcnt pmap_ev_pt_mappings = 384static struct evcnt pmap_ev_pt_mappings =
385 PMAP_EVCNT_INITIALIZER("page table pages mapped"); 385 PMAP_EVCNT_INITIALIZER("page table pages mapped");
386 386
387EVCNT_ATTACH_STATIC(pmap_ev_kenter_mappings); 387EVCNT_ATTACH_STATIC(pmap_ev_kenter_mappings);
388EVCNT_ATTACH_STATIC(pmap_ev_kenter_unmappings); 388EVCNT_ATTACH_STATIC(pmap_ev_kenter_unmappings);
389EVCNT_ATTACH_STATIC(pmap_ev_kenter_remappings); 389EVCNT_ATTACH_STATIC(pmap_ev_kenter_remappings);
390EVCNT_ATTACH_STATIC(pmap_ev_pt_mappings); 390EVCNT_ATTACH_STATIC(pmap_ev_pt_mappings);
391 391
392#ifdef PMAP_CACHE_VIPT 392#ifdef PMAP_CACHE_VIPT
393static struct evcnt pmap_ev_exec_mappings = 393static struct evcnt pmap_ev_exec_mappings =
394 PMAP_EVCNT_INITIALIZER("exec pages mapped"); 394 PMAP_EVCNT_INITIALIZER("exec pages mapped");
395static struct evcnt pmap_ev_exec_cached = 395static struct evcnt pmap_ev_exec_cached =
396 PMAP_EVCNT_INITIALIZER("exec pages cached"); 396 PMAP_EVCNT_INITIALIZER("exec pages cached");
397 397
398EVCNT_ATTACH_STATIC(pmap_ev_exec_mappings); 398EVCNT_ATTACH_STATIC(pmap_ev_exec_mappings);
399EVCNT_ATTACH_STATIC(pmap_ev_exec_cached); 399EVCNT_ATTACH_STATIC(pmap_ev_exec_cached);
400 400
401static struct evcnt pmap_ev_exec_synced = 401static struct evcnt pmap_ev_exec_synced =
402 PMAP_EVCNT_INITIALIZER("exec pages synced"); 402 PMAP_EVCNT_INITIALIZER("exec pages synced");
403static struct evcnt pmap_ev_exec_synced_map = 403static struct evcnt pmap_ev_exec_synced_map =
404 PMAP_EVCNT_INITIALIZER("exec pages synced (MP)"); 404 PMAP_EVCNT_INITIALIZER("exec pages synced (MP)");
405static struct evcnt pmap_ev_exec_synced_unmap = 405static struct evcnt pmap_ev_exec_synced_unmap =
406 PMAP_EVCNT_INITIALIZER("exec pages synced (UM)"); 406 PMAP_EVCNT_INITIALIZER("exec pages synced (UM)");
407static struct evcnt pmap_ev_exec_synced_remap = 407static struct evcnt pmap_ev_exec_synced_remap =
408 PMAP_EVCNT_INITIALIZER("exec pages synced (RM)"); 408 PMAP_EVCNT_INITIALIZER("exec pages synced (RM)");
409static struct evcnt pmap_ev_exec_synced_clearbit = 409static struct evcnt pmap_ev_exec_synced_clearbit =
410 PMAP_EVCNT_INITIALIZER("exec pages synced (DG)"); 410 PMAP_EVCNT_INITIALIZER("exec pages synced (DG)");
411static struct evcnt pmap_ev_exec_synced_kremove = 411static struct evcnt pmap_ev_exec_synced_kremove =
412 PMAP_EVCNT_INITIALIZER("exec pages synced (KU)"); 412 PMAP_EVCNT_INITIALIZER("exec pages synced (KU)");
413 413
414EVCNT_ATTACH_STATIC(pmap_ev_exec_synced); 414EVCNT_ATTACH_STATIC(pmap_ev_exec_synced);
415EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_map); 415EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_map);
416EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_unmap); 416EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_unmap);
417EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_remap); 417EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_remap);
418EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_clearbit); 418EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_clearbit);
419EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_kremove); 419EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_kremove);
420 420
421static struct evcnt pmap_ev_exec_discarded_unmap = 421static struct evcnt pmap_ev_exec_discarded_unmap =
422 PMAP_EVCNT_INITIALIZER("exec pages discarded (UM)"); 422 PMAP_EVCNT_INITIALIZER("exec pages discarded (UM)");
423static struct evcnt pmap_ev_exec_discarded_zero = 423static struct evcnt pmap_ev_exec_discarded_zero =
424 PMAP_EVCNT_INITIALIZER("exec pages discarded (ZP)"); 424 PMAP_EVCNT_INITIALIZER("exec pages discarded (ZP)");
425static struct evcnt pmap_ev_exec_discarded_copy = 425static struct evcnt pmap_ev_exec_discarded_copy =
426 PMAP_EVCNT_INITIALIZER("exec pages discarded (CP)"); 426 PMAP_EVCNT_INITIALIZER("exec pages discarded (CP)");
427static struct evcnt pmap_ev_exec_discarded_page_protect = 427static struct evcnt pmap_ev_exec_discarded_page_protect =
428 PMAP_EVCNT_INITIALIZER("exec pages discarded (PP)"); 428 PMAP_EVCNT_INITIALIZER("exec pages discarded (PP)");
429static struct evcnt pmap_ev_exec_discarded_clearbit = 429static struct evcnt pmap_ev_exec_discarded_clearbit =
430 PMAP_EVCNT_INITIALIZER("exec pages discarded (DG)"); 430 PMAP_EVCNT_INITIALIZER("exec pages discarded (DG)");
431static struct evcnt pmap_ev_exec_discarded_kremove = 431static struct evcnt pmap_ev_exec_discarded_kremove =
432 PMAP_EVCNT_INITIALIZER("exec pages discarded (KU)"); 432 PMAP_EVCNT_INITIALIZER("exec pages discarded (KU)");
433 433
434EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_unmap); 434EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_unmap);
435EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_zero); 435EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_zero);
436EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_copy); 436EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_copy);
437EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_page_protect); 437EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_page_protect);
438EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_clearbit); 438EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_clearbit);
439EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_kremove); 439EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_kremove);
440#endif /* PMAP_CACHE_VIPT */ 440#endif /* PMAP_CACHE_VIPT */
441 441
442static struct evcnt pmap_ev_updates = PMAP_EVCNT_INITIALIZER("updates"); 442static struct evcnt pmap_ev_updates = PMAP_EVCNT_INITIALIZER("updates");
443static struct evcnt pmap_ev_collects = PMAP_EVCNT_INITIALIZER("collects"); 443static struct evcnt pmap_ev_collects = PMAP_EVCNT_INITIALIZER("collects");
444static struct evcnt pmap_ev_activations = PMAP_EVCNT_INITIALIZER("activations"); 444static struct evcnt pmap_ev_activations = PMAP_EVCNT_INITIALIZER("activations");
445 445
446EVCNT_ATTACH_STATIC(pmap_ev_updates); 446EVCNT_ATTACH_STATIC(pmap_ev_updates);
447EVCNT_ATTACH_STATIC(pmap_ev_collects); 447EVCNT_ATTACH_STATIC(pmap_ev_collects);
448EVCNT_ATTACH_STATIC(pmap_ev_activations); 448EVCNT_ATTACH_STATIC(pmap_ev_activations);
449 449
450#define PMAPCOUNT(x) ((void)(pmap_ev_##x.ev_count++)) 450#define PMAPCOUNT(x) ((void)(pmap_ev_##x.ev_count++))
451#else 451#else
452#define PMAPCOUNT(x) ((void)0) 452#define PMAPCOUNT(x) ((void)0)
453#endif 453#endif
454 454
455/* 455/*
456 * pmap copy/zero page, and mem(5) hook point 456 * pmap copy/zero page, and mem(5) hook point
457 */ 457 */
458static pt_entry_t *csrc_pte, *cdst_pte; 458static pt_entry_t *csrc_pte, *cdst_pte;
459static vaddr_t csrcp, cdstp; 459static vaddr_t csrcp, cdstp;
460vaddr_t memhook; /* used by mem.c */ 460vaddr_t memhook; /* used by mem.c */
461kmutex_t memlock; /* used by mem.c */ 461kmutex_t memlock; /* used by mem.c */
462void *zeropage; /* used by mem.c */ 462void *zeropage; /* used by mem.c */
463extern void *msgbufaddr; 463extern void *msgbufaddr;
464int pmap_kmpages; 464int pmap_kmpages;
465/* 465/*
466 * Flag to indicate if pmap_init() has done its thing 466 * Flag to indicate if pmap_init() has done its thing
467 */ 467 */
468bool pmap_initialized; 468bool pmap_initialized;
469 469
470/* 470/*
471 * Misc. locking data structures 471 * Misc. locking data structures
472 */ 472 */
473 473
474#define pmap_acquire_pmap_lock(pm) \ 474#define pmap_acquire_pmap_lock(pm) \
475 do { \ 475 do { \
476 if ((pm) != pmap_kernel()) \ 476 if ((pm) != pmap_kernel()) \
477 mutex_enter((pm)->pm_lock); \ 477 mutex_enter((pm)->pm_lock); \
478 } while (/*CONSTCOND*/0) 478 } while (/*CONSTCOND*/0)
479 479
480#define pmap_release_pmap_lock(pm) \ 480#define pmap_release_pmap_lock(pm) \
481 do { \ 481 do { \
482 if ((pm) != pmap_kernel()) \ 482 if ((pm) != pmap_kernel()) \
483 mutex_exit((pm)->pm_lock); \ 483 mutex_exit((pm)->pm_lock); \
484 } while (/*CONSTCOND*/0) 484 } while (/*CONSTCOND*/0)
485 485
486 486
487/* 487/*
488 * Metadata for L1 translation tables. 488 * Metadata for L1 translation tables.
489 */ 489 */
490struct l1_ttable { 490struct l1_ttable {
491 /* Entry on the L1 Table list */ 491 /* Entry on the L1 Table list */
492 SLIST_ENTRY(l1_ttable) l1_link; 492 SLIST_ENTRY(l1_ttable) l1_link;
493 493
494 /* Entry on the L1 Least Recently Used list */ 494 /* Entry on the L1 Least Recently Used list */
495 TAILQ_ENTRY(l1_ttable) l1_lru; 495 TAILQ_ENTRY(l1_ttable) l1_lru;
496 496
497 /* Track how many domains are allocated from this L1 */ 497 /* Track how many domains are allocated from this L1 */
498 volatile u_int l1_domain_use_count; 498 volatile u_int l1_domain_use_count;
499 499
500 /* 500 /*
501 * A free-list of domain numbers for this L1. 501 * A free-list of domain numbers for this L1.
502 * We avoid using ffs() and a bitmap to track domains since ffs() 502 * We avoid using ffs() and a bitmap to track domains since ffs()
503 * is slow on ARM. 503 * is slow on ARM.
504 */ 504 */
505 uint8_t l1_domain_first; 505 uint8_t l1_domain_first;
506 uint8_t l1_domain_free[PMAP_DOMAINS]; 506 uint8_t l1_domain_free[PMAP_DOMAINS];
507 507
508 /* Physical address of this L1 page table */ 508 /* Physical address of this L1 page table */
509 paddr_t l1_physaddr; 509 paddr_t l1_physaddr;
510 510
511 /* KVA of this L1 page table */ 511 /* KVA of this L1 page table */
512 pd_entry_t *l1_kva; 512 pd_entry_t *l1_kva;
513}; 513};
514 514
515/* 515/*
516 * Convert a virtual address into its L1 table index. That is, the 516 * Convert a virtual address into its L1 table index. That is, the
517 * index used to locate the L2 descriptor table pointer in an L1 table. 517 * index used to locate the L2 descriptor table pointer in an L1 table.
518 * This is basically used to index l1->l1_kva[]. 518 * This is basically used to index l1->l1_kva[].
519 * 519 *
520 * Each L2 descriptor table represents 1MB of VA space. 520 * Each L2 descriptor table represents 1MB of VA space.
521 */ 521 */
522#define L1_IDX(va) (((vaddr_t)(va)) >> L1_S_SHIFT) 522#define L1_IDX(va) (((vaddr_t)(va)) >> L1_S_SHIFT)
523 523
524/* 524/*
525 * L1 Page Tables are tracked using a Least Recently Used list. 525 * L1 Page Tables are tracked using a Least Recently Used list.
526 * - New L1s are allocated from the HEAD. 526 * - New L1s are allocated from the HEAD.
527 * - Freed L1s are added to the TAIl. 527 * - Freed L1s are added to the TAIl.
528 * - Recently accessed L1s (where an 'access' is some change to one of 528 * - Recently accessed L1s (where an 'access' is some change to one of
529 * the userland pmaps which owns this L1) are moved to the TAIL. 529 * the userland pmaps which owns this L1) are moved to the TAIL.
530 */ 530 */
531static TAILQ_HEAD(, l1_ttable) l1_lru_list; 531static TAILQ_HEAD(, l1_ttable) l1_lru_list;
532static kmutex_t l1_lru_lock __cacheline_aligned; 532static kmutex_t l1_lru_lock __cacheline_aligned;
533 533
534/* 534/*
535 * A list of all L1 tables 535 * A list of all L1 tables
536 */ 536 */
537static SLIST_HEAD(, l1_ttable) l1_list; 537static SLIST_HEAD(, l1_ttable) l1_list;
538 538
539/* 539/*
540 * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots. 540 * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots.
541 * 541 *
542 * This is normally 16MB worth L2 page descriptors for any given pmap. 542 * This is normally 16MB worth L2 page descriptors for any given pmap.
543 * Reference counts are maintained for L2 descriptors so they can be 543 * Reference counts are maintained for L2 descriptors so they can be
544 * freed when empty. 544 * freed when empty.
545 */ 545 */
546struct l2_dtable { 546struct l2_dtable {
547 /* The number of L2 page descriptors allocated to this l2_dtable */ 547 /* The number of L2 page descriptors allocated to this l2_dtable */
548 u_int l2_occupancy; 548 u_int l2_occupancy;
549 549
550 /* List of L2 page descriptors */ 550 /* List of L2 page descriptors */
551 struct l2_bucket { 551 struct l2_bucket {
552 pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */ 552 pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */
553 paddr_t l2b_phys; /* Physical address of same */ 553 paddr_t l2b_phys; /* Physical address of same */
554 u_short l2b_l1idx; /* This L2 table's L1 index */ 554 u_short l2b_l1idx; /* This L2 table's L1 index */
555 u_short l2b_occupancy; /* How many active descriptors */ 555 u_short l2b_occupancy; /* How many active descriptors */
556 } l2_bucket[L2_BUCKET_SIZE]; 556 } l2_bucket[L2_BUCKET_SIZE];
557}; 557};
558 558
559/* 559/*
560 * Given an L1 table index, calculate the corresponding l2_dtable index 560 * Given an L1 table index, calculate the corresponding l2_dtable index
561 * and bucket index within the l2_dtable. 561 * and bucket index within the l2_dtable.
562 */ 562 */
563#define L2_IDX(l1idx) (((l1idx) >> L2_BUCKET_LOG2) & \ 563#define L2_IDX(l1idx) (((l1idx) >> L2_BUCKET_LOG2) & \
564 (L2_SIZE - 1)) 564 (L2_SIZE - 1))
565#define L2_BUCKET(l1idx) ((l1idx) & (L2_BUCKET_SIZE - 1)) 565#define L2_BUCKET(l1idx) ((l1idx) & (L2_BUCKET_SIZE - 1))
566 566
567/* 567/*
568 * Given a virtual address, this macro returns the 568 * Given a virtual address, this macro returns the
569 * virtual address required to drop into the next L2 bucket. 569 * virtual address required to drop into the next L2 bucket.
570 */ 570 */
571#define L2_NEXT_BUCKET(va) (((va) & L1_S_FRAME) + L1_S_SIZE) 571#define L2_NEXT_BUCKET(va) (((va) & L1_S_FRAME) + L1_S_SIZE)
572 572
573/* 573/*
574 * L2 allocation. 574 * L2 allocation.
575 */ 575 */
576#define pmap_alloc_l2_dtable() \ 576#define pmap_alloc_l2_dtable() \
577 pool_cache_get(&pmap_l2dtable_cache, PR_NOWAIT) 577 pool_cache_get(&pmap_l2dtable_cache, PR_NOWAIT)
578#define pmap_free_l2_dtable(l2) \ 578#define pmap_free_l2_dtable(l2) \
579 pool_cache_put(&pmap_l2dtable_cache, (l2)) 579 pool_cache_put(&pmap_l2dtable_cache, (l2))
580#define pmap_alloc_l2_ptp(pap) \ 580#define pmap_alloc_l2_ptp(pap) \
581 ((pt_entry_t *)pool_cache_get_paddr(&pmap_l2ptp_cache,\ 581 ((pt_entry_t *)pool_cache_get_paddr(&pmap_l2ptp_cache,\
582 PR_NOWAIT, (pap))) 582 PR_NOWAIT, (pap)))
583 583
584/* 584/*
585 * We try to map the page tables write-through, if possible. However, not 585 * We try to map the page tables write-through, if possible. However, not
586 * all CPUs have a write-through cache mode, so on those we have to sync 586 * all CPUs have a write-through cache mode, so on those we have to sync
587 * the cache when we frob page tables. 587 * the cache when we frob page tables.
588 * 588 *
589 * We try to evaluate this at compile time, if possible. However, it's 589 * We try to evaluate this at compile time, if possible. However, it's
590 * not always possible to do that, hence this run-time var. 590 * not always possible to do that, hence this run-time var.
591 */ 591 */
592int pmap_needs_pte_sync; 592int pmap_needs_pte_sync;
593 593
594/* 594/*
595 * Real definition of pv_entry. 595 * Real definition of pv_entry.
596 */ 596 */
597struct pv_entry { 597struct pv_entry {
598 SLIST_ENTRY(pv_entry) pv_link; /* next pv_entry */ 598 SLIST_ENTRY(pv_entry) pv_link; /* next pv_entry */
599 pmap_t pv_pmap; /* pmap where mapping lies */ 599 pmap_t pv_pmap; /* pmap where mapping lies */
600 vaddr_t pv_va; /* virtual address for mapping */ 600 vaddr_t pv_va; /* virtual address for mapping */
601 u_int pv_flags; /* flags */ 601 u_int pv_flags; /* flags */
602}; 602};
603 603
604/* 604/*
605 * Macro to determine if a mapping might be resident in the 605 * Macro to determine if a mapping might be resident in the
606 * instruction cache and/or TLB 606 * instruction cache and/or TLB
607 */ 607 */
608#if ARM_MMU_V7 > 0 608#if ARM_MMU_V7 > 0
609/* 609/*
610 * Speculative loads by Cortex cores can cause TLB entries to be filled even if 610 * Speculative loads by Cortex cores can cause TLB entries to be filled even if
611 * there are no explicit accesses, so there may be always be TLB entries to 611 * there are no explicit accesses, so there may be always be TLB entries to
612 * flush. If we used ASIDs then this would not be a problem. 612 * flush. If we used ASIDs then this would not be a problem.
613 */ 613 */
614#define PV_BEEN_EXECD(f) (((f) & PVF_EXEC) == PVF_EXEC) 614#define PV_BEEN_EXECD(f) (((f) & PVF_EXEC) == PVF_EXEC)
615#else 615#else
616#define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC)) 616#define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC))
617#endif 617#endif
618#define PV_IS_EXEC_P(f) (((f) & PVF_EXEC) != 0) 618#define PV_IS_EXEC_P(f) (((f) & PVF_EXEC) != 0)
619 619
620/* 620/*
621 * Macro to determine if a mapping might be resident in the 621 * Macro to determine if a mapping might be resident in the
622 * data cache and/or TLB 622 * data cache and/or TLB
623 */ 623 */
624#if ARM_MMU_V7 > 0 624#if ARM_MMU_V7 > 0
625/* 625/*
626 * Speculative loads by Cortex cores can cause TLB entries to be filled even if 626 * Speculative loads by Cortex cores can cause TLB entries to be filled even if
627 * there are no explicit accesses, so there may be always be TLB entries to 627 * there are no explicit accesses, so there may be always be TLB entries to
628 * flush. If we used ASIDs then this would not be a problem. 628 * flush. If we used ASIDs then this would not be a problem.
629 */ 629 */
630#define PV_BEEN_REFD(f) (1) 630#define PV_BEEN_REFD(f) (1)
631#else 631#else
632#define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0) 632#define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0)
633#endif 633#endif
634 634
635/* 635/*
636 * Local prototypes 636 * Local prototypes
637 */ 637 */
638static int pmap_set_pt_cache_mode(pd_entry_t *, vaddr_t); 638static int pmap_set_pt_cache_mode(pd_entry_t *, vaddr_t);
639static void pmap_alloc_specials(vaddr_t *, int, vaddr_t *, 639static void pmap_alloc_specials(vaddr_t *, int, vaddr_t *,
640 pt_entry_t **); 640 pt_entry_t **);
641static bool pmap_is_current(pmap_t); 641static bool pmap_is_current(pmap_t);
642static bool pmap_is_cached(pmap_t); 642static bool pmap_is_cached(pmap_t);
643static void pmap_enter_pv(struct vm_page_md *, paddr_t, struct pv_entry *, 643static void pmap_enter_pv(struct vm_page_md *, paddr_t, struct pv_entry *,
644 pmap_t, vaddr_t, u_int); 644 pmap_t, vaddr_t, u_int);
645static struct pv_entry *pmap_find_pv(struct vm_page_md *, pmap_t, vaddr_t); 645static struct pv_entry *pmap_find_pv(struct vm_page_md *, pmap_t, vaddr_t);
646static struct pv_entry *pmap_remove_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); 646static struct pv_entry *pmap_remove_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t);
647static u_int pmap_modify_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t, 647static u_int pmap_modify_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t,
648 u_int, u_int); 648 u_int, u_int);
649 649
650static void pmap_pinit(pmap_t); 650static void pmap_pinit(pmap_t);
651static int pmap_pmap_ctor(void *, void *, int); 651static int pmap_pmap_ctor(void *, void *, int);
652 652
653static void pmap_alloc_l1(pmap_t); 653static void pmap_alloc_l1(pmap_t);
654static void pmap_free_l1(pmap_t); 654static void pmap_free_l1(pmap_t);
655static void pmap_use_l1(pmap_t); 655static void pmap_use_l1(pmap_t);
656 656
657static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vaddr_t); 657static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vaddr_t);
658static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vaddr_t); 658static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vaddr_t);
659static void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int); 659static void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int);
660static int pmap_l2ptp_ctor(void *, void *, int); 660static int pmap_l2ptp_ctor(void *, void *, int);
661static int pmap_l2dtable_ctor(void *, void *, int); 661static int pmap_l2dtable_ctor(void *, void *, int);
662 662
663static void pmap_vac_me_harder(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); 663static void pmap_vac_me_harder(struct vm_page_md *, paddr_t, pmap_t, vaddr_t);
664#ifdef PMAP_CACHE_VIVT 664#ifdef PMAP_CACHE_VIVT
665static void pmap_vac_me_kpmap(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); 665static void pmap_vac_me_kpmap(struct vm_page_md *, paddr_t, pmap_t, vaddr_t);
666static void pmap_vac_me_user(struct vm_page_md *, paddr_t, pmap_t, vaddr_t); 666static void pmap_vac_me_user(struct vm_page_md *, paddr_t, pmap_t, vaddr_t);
667#endif 667#endif
668 668
669static void pmap_clearbit(struct vm_page_md *, paddr_t, u_int); 669static void pmap_clearbit(struct vm_page_md *, paddr_t, u_int);
670#ifdef PMAP_CACHE_VIVT 670#ifdef PMAP_CACHE_VIVT
671static int pmap_clean_page(struct pv_entry *, bool); 671static int pmap_clean_page(struct pv_entry *, bool);
672#endif 672#endif
673#ifdef PMAP_CACHE_VIPT 673#ifdef PMAP_CACHE_VIPT
674static void pmap_syncicache_page(struct vm_page_md *, paddr_t); 674static void pmap_syncicache_page(struct vm_page_md *, paddr_t);
675enum pmap_flush_op { 675enum pmap_flush_op {
676 PMAP_FLUSH_PRIMARY, 676 PMAP_FLUSH_PRIMARY,
677 PMAP_FLUSH_SECONDARY, 677 PMAP_FLUSH_SECONDARY,
678 PMAP_CLEAN_PRIMARY 678 PMAP_CLEAN_PRIMARY
679}; 679};
680static void pmap_flush_page(struct vm_page_md *, paddr_t, enum pmap_flush_op); 680static void pmap_flush_page(struct vm_page_md *, paddr_t, enum pmap_flush_op);
681#endif 681#endif
682static void pmap_page_remove(struct vm_page_md *, paddr_t); 682static void pmap_page_remove(struct vm_page_md *, paddr_t);
683 683
684static void pmap_init_l1(struct l1_ttable *, pd_entry_t *); 684static void pmap_init_l1(struct l1_ttable *, pd_entry_t *);
685static vaddr_t kernel_pt_lookup(paddr_t); 685static vaddr_t kernel_pt_lookup(paddr_t);
686 686
687 687
688/* 688/*
689 * Misc variables 689 * Misc variables
690 */ 690 */
691vaddr_t virtual_avail; 691vaddr_t virtual_avail;
692vaddr_t virtual_end; 692vaddr_t virtual_end;
693vaddr_t pmap_curmaxkvaddr; 693vaddr_t pmap_curmaxkvaddr;
694 694
695paddr_t avail_start; 695paddr_t avail_start;
696paddr_t avail_end; 696paddr_t avail_end;
697 697
698pv_addrqh_t pmap_boot_freeq = SLIST_HEAD_INITIALIZER(&pmap_boot_freeq); 698pv_addrqh_t pmap_boot_freeq = SLIST_HEAD_INITIALIZER(&pmap_boot_freeq);
699pv_addr_t kernelpages; 699pv_addr_t kernelpages;
700pv_addr_t kernel_l1pt; 700pv_addr_t kernel_l1pt;
701pv_addr_t systempage; 701pv_addr_t systempage;
702 702
703/* Function to set the debug level of the pmap code */ 703/* Function to set the debug level of the pmap code */
704 704
705#ifdef PMAP_DEBUG 705#ifdef PMAP_DEBUG
706void 706void
707pmap_debug(int level) 707pmap_debug(int level)
708{ 708{
709 pmap_debug_level = level; 709 pmap_debug_level = level;
710 printf("pmap_debug: level=%d\n", pmap_debug_level); 710 printf("pmap_debug: level=%d\n", pmap_debug_level);
711} 711}
712#endif /* PMAP_DEBUG */ 712#endif /* PMAP_DEBUG */
713 713
714#ifdef PMAP_CACHE_VIPT 714#ifdef PMAP_CACHE_VIPT
715#define PMAP_VALIDATE_MD_PAGE(md) \ 715#define PMAP_VALIDATE_MD_PAGE(md) \
716 KASSERTMSG(arm_cache_prefer_mask == 0 || (((md)->pvh_attrs & PVF_WRITE) == 0) == ((md)->urw_mappings + (md)->krw_mappings == 0), \ 716 KASSERTMSG(arm_cache_prefer_mask == 0 || (((md)->pvh_attrs & PVF_WRITE) == 0) == ((md)->urw_mappings + (md)->krw_mappings == 0), \
717 "(md) %p: attrs=%#x urw=%u krw=%u", (md), \ 717 "(md) %p: attrs=%#x urw=%u krw=%u", (md), \
718 (md)->pvh_attrs, (md)->urw_mappings, (md)->krw_mappings); 718 (md)->pvh_attrs, (md)->urw_mappings, (md)->krw_mappings);
719#endif /* PMAP_CACHE_VIPT */ 719#endif /* PMAP_CACHE_VIPT */
720/* 720/*
721 * A bunch of routines to conditionally flush the caches/TLB depending 721 * A bunch of routines to conditionally flush the caches/TLB depending
722 * on whether the specified pmap actually needs to be flushed at any 722 * on whether the specified pmap actually needs to be flushed at any
723 * given time. 723 * given time.
724 */ 724 */
725static inline void 725static inline void
726pmap_tlb_flushID_SE(pmap_t pm, vaddr_t va) 726pmap_tlb_flushID_SE(pmap_t pm, vaddr_t va)
727{ 727{
728 728
729 if (pm->pm_cstate.cs_tlb_id) 729 if (pm->pm_cstate.cs_tlb_id)
730 cpu_tlb_flushID_SE(va); 730 cpu_tlb_flushID_SE(va);
731} 731}
732 732
733static inline void 733static inline void
734pmap_tlb_flushD_SE(pmap_t pm, vaddr_t va) 734pmap_tlb_flushD_SE(pmap_t pm, vaddr_t va)
735{ 735{
736 736
737 if (pm->pm_cstate.cs_tlb_d) 737 if (pm->pm_cstate.cs_tlb_d)
738 cpu_tlb_flushD_SE(va); 738 cpu_tlb_flushD_SE(va);
739} 739}
740 740
741static inline void 741static inline void
742pmap_tlb_flushID(pmap_t pm) 742pmap_tlb_flushID(pmap_t pm)
743{ 743{
744 744
745 if (pm->pm_cstate.cs_tlb_id) { 745 if (pm->pm_cstate.cs_tlb_id) {
746 cpu_tlb_flushID(); 746 cpu_tlb_flushID();
747#if ARM_MMU_V7 == 0 747#if ARM_MMU_V7 == 0
748 /* 748 /*
749 * Speculative loads by Cortex cores can cause TLB entries to 749 * Speculative loads by Cortex cores can cause TLB entries to
750 * be filled even if there are no explicit accesses, so there 750 * be filled even if there are no explicit accesses, so there
751 * may be always be TLB entries to flush. If we used ASIDs 751 * may be always be TLB entries to flush. If we used ASIDs
752 * then it would not be a problem. 752 * then it would not be a problem.
753 * This is not true for other CPUs. 753 * This is not true for other CPUs.
754 */ 754 */
755 pm->pm_cstate.cs_tlb = 0; 755 pm->pm_cstate.cs_tlb = 0;
756#endif 756#endif
757 } 757 }
758} 758}
759 759
760static inline void 760static inline void
761pmap_tlb_flushD(pmap_t pm) 761pmap_tlb_flushD(pmap_t pm)
762{ 762{
763 763
764 if (pm->pm_cstate.cs_tlb_d) { 764 if (pm->pm_cstate.cs_tlb_d) {
765 cpu_tlb_flushD(); 765 cpu_tlb_flushD();
766#if ARM_MMU_V7 == 0 766#if ARM_MMU_V7 == 0
767 /* 767 /*
768 * Speculative loads by Cortex cores can cause TLB entries to 768 * Speculative loads by Cortex cores can cause TLB entries to
769 * be filled even if there are no explicit accesses, so there 769 * be filled even if there are no explicit accesses, so there
770 * may be always be TLB entries to flush. If we used ASIDs 770 * may be always be TLB entries to flush. If we used ASIDs
771 * then it would not be a problem. 771 * then it would not be a problem.
772 * This is not true for other CPUs. 772 * This is not true for other CPUs.
773 */ 773 */
774 pm->pm_cstate.cs_tlb_d = 0; 774 pm->pm_cstate.cs_tlb_d = 0;
775#endif 775#endif
776 } 776 }
777} 777}
778 778
779#ifdef PMAP_CACHE_VIVT 779#ifdef PMAP_CACHE_VIVT
780static inline void 780static inline void
781pmap_idcache_wbinv_range(pmap_t pm, vaddr_t va, vsize_t len) 781pmap_idcache_wbinv_range(pmap_t pm, vaddr_t va, vsize_t len)
782{ 782{
783 if (pm->pm_cstate.cs_cache_id) { 783 if (pm->pm_cstate.cs_cache_id) {
784 cpu_idcache_wbinv_range(va, len); 784 cpu_idcache_wbinv_range(va, len);
785 } 785 }
786} 786}
787 787
788static inline void 788static inline void
789pmap_dcache_wb_range(pmap_t pm, vaddr_t va, vsize_t len, 789pmap_dcache_wb_range(pmap_t pm, vaddr_t va, vsize_t len,
790 bool do_inv, bool rd_only) 790 bool do_inv, bool rd_only)
791{ 791{
792 792
793 if (pm->pm_cstate.cs_cache_d) { 793 if (pm->pm_cstate.cs_cache_d) {
794 if (do_inv) { 794 if (do_inv) {
795 if (rd_only) 795 if (rd_only)
796 cpu_dcache_inv_range(va, len); 796 cpu_dcache_inv_range(va, len);
797 else 797 else
798 cpu_dcache_wbinv_range(va, len); 798 cpu_dcache_wbinv_range(va, len);
799 } else 799 } else
800 if (!rd_only) 800 if (!rd_only)
801 cpu_dcache_wb_range(va, len); 801 cpu_dcache_wb_range(va, len);
802 } 802 }
803} 803}
804 804
805static inline void 805static inline void
806pmap_idcache_wbinv_all(pmap_t pm) 806pmap_idcache_wbinv_all(pmap_t pm)
807{ 807{
808 if (pm->pm_cstate.cs_cache_id) { 808 if (pm->pm_cstate.cs_cache_id) {
809 cpu_idcache_wbinv_all(); 809 cpu_idcache_wbinv_all();
810 pm->pm_cstate.cs_cache = 0; 810 pm->pm_cstate.cs_cache = 0;
811 } 811 }
812} 812}
813 813
814static inline void 814static inline void
815pmap_dcache_wbinv_all(pmap_t pm) 815pmap_dcache_wbinv_all(pmap_t pm)
816{ 816{
817 if (pm->pm_cstate.cs_cache_d) { 817 if (pm->pm_cstate.cs_cache_d) {
818 cpu_dcache_wbinv_all(); 818 cpu_dcache_wbinv_all();
819 pm->pm_cstate.cs_cache_d = 0; 819 pm->pm_cstate.cs_cache_d = 0;
820 } 820 }
821} 821}
822#endif /* PMAP_CACHE_VIVT */ 822#endif /* PMAP_CACHE_VIVT */
823 823
824static inline bool 824static inline bool
825pmap_is_current(pmap_t pm) 825pmap_is_current(pmap_t pm)
826{ 826{
827 827
828 if (pm == pmap_kernel() || curproc->p_vmspace->vm_map.pmap == pm) 828 if (pm == pmap_kernel() || curproc->p_vmspace->vm_map.pmap == pm)
829 return true; 829 return true;
830 830
831 return false; 831 return false;
832} 832}
833 833
834static inline bool 834static inline bool
835pmap_is_cached(pmap_t pm) 835pmap_is_cached(pmap_t pm)
836{ 836{
837 837
838 if (pm == pmap_kernel() || pmap_recent_user == NULL || 838 if (pm == pmap_kernel() || pmap_recent_user == NULL ||
839 pmap_recent_user == pm) 839 pmap_recent_user == pm)
840 return (true); 840 return (true);
841 841
842 return false; 842 return false;
843} 843}
844 844
845/* 845/*
846 * PTE_SYNC_CURRENT: 846 * PTE_SYNC_CURRENT:
847 * 847 *
848 * Make sure the pte is written out to RAM. 848 * Make sure the pte is written out to RAM.
849 * We need to do this for one of two cases: 849 * We need to do this for one of two cases:
850 * - We're dealing with the kernel pmap 850 * - We're dealing with the kernel pmap
851 * - There is no pmap active in the cache/tlb. 851 * - There is no pmap active in the cache/tlb.
852 * - The specified pmap is 'active' in the cache/tlb. 852 * - The specified pmap is 'active' in the cache/tlb.
853 */ 853 */
854#ifdef PMAP_INCLUDE_PTE_SYNC 854#ifdef PMAP_INCLUDE_PTE_SYNC
855#define PTE_SYNC_CURRENT(pm, ptep) \ 855#define PTE_SYNC_CURRENT(pm, ptep) \
856do { \ 856do { \
857 if (PMAP_NEEDS_PTE_SYNC && \ 857 if (PMAP_NEEDS_PTE_SYNC && \
858 pmap_is_cached(pm)) \ 858 pmap_is_cached(pm)) \
859 PTE_SYNC(ptep); \ 859 PTE_SYNC(ptep); \
860} while (/*CONSTCOND*/0) 860} while (/*CONSTCOND*/0)
861#else 861#else
862#define PTE_SYNC_CURRENT(pm, ptep) /* nothing */ 862#define PTE_SYNC_CURRENT(pm, ptep) /* nothing */
863#endif 863#endif
864 864
865/* 865/*
866 * main pv_entry manipulation functions: 866 * main pv_entry manipulation functions:
867 * pmap_enter_pv: enter a mapping onto a vm_page list 867 * pmap_enter_pv: enter a mapping onto a vm_page list
868 * pmap_remove_pv: remove a mapping from a vm_page list 868 * pmap_remove_pv: remove a mapping from a vm_page list
869 * 869 *
870 * NOTE: pmap_enter_pv expects to lock the pvh itself 870 * NOTE: pmap_enter_pv expects to lock the pvh itself
871 * pmap_remove_pv expects the caller to lock the pvh before calling 871 * pmap_remove_pv expects the caller to lock the pvh before calling
872 */ 872 */
873 873
874/* 874/*
875 * pmap_enter_pv: enter a mapping onto a vm_page lst 875 * pmap_enter_pv: enter a mapping onto a vm_page lst
876 * 876 *
877 * => caller should hold the proper lock on pmap_main_lock 877 * => caller should hold the proper lock on pmap_main_lock
878 * => caller should have pmap locked 878 * => caller should have pmap locked
879 * => we will gain the lock on the vm_page and allocate the new pv_entry 879 * => we will gain the lock on the vm_page and allocate the new pv_entry
880 * => caller should adjust ptp's wire_count before calling 880 * => caller should adjust ptp's wire_count before calling
881 * => caller should not adjust pmap's wire_count 881 * => caller should not adjust pmap's wire_count
882 */ 882 */
883static void 883static void
884pmap_enter_pv(struct vm_page_md *md, paddr_t pa, struct pv_entry *pv, pmap_t pm, 884pmap_enter_pv(struct vm_page_md *md, paddr_t pa, struct pv_entry *pv, pmap_t pm,
885 vaddr_t va, u_int flags) 885 vaddr_t va, u_int flags)
886{ 886{
887 struct pv_entry **pvp; 887 struct pv_entry **pvp;
888 888
889 NPDEBUG(PDB_PVDUMP, 889 NPDEBUG(PDB_PVDUMP,
890 printf("pmap_enter_pv: pm %p, md %p, flags 0x%x\n", pm, md, flags)); 890 printf("pmap_enter_pv: pm %p, md %p, flags 0x%x\n", pm, md, flags));
891 891
892 pv->pv_pmap = pm; 892 pv->pv_pmap = pm;
893 pv->pv_va = va; 893 pv->pv_va = va;
894 pv->pv_flags = flags; 894 pv->pv_flags = flags;
895 895
896 pvp = &SLIST_FIRST(&md->pvh_list); 896 pvp = &SLIST_FIRST(&md->pvh_list);
897#ifdef PMAP_CACHE_VIPT 897#ifdef PMAP_CACHE_VIPT
898 /* 898 /*
899 * Insert unmanaged entries, writeable first, at the head of 899 * Insert unmanaged entries, writeable first, at the head of
900 * the pv list. 900 * the pv list.
901 */ 901 */
902 if (__predict_true((flags & PVF_KENTRY) == 0)) { 902 if (__predict_true((flags & PVF_KENTRY) == 0)) {
903 while (*pvp != NULL && (*pvp)->pv_flags & PVF_KENTRY) 903 while (*pvp != NULL && (*pvp)->pv_flags & PVF_KENTRY)
904 pvp = &SLIST_NEXT(*pvp, pv_link); 904 pvp = &SLIST_NEXT(*pvp, pv_link);
905 } else if ((flags & PVF_WRITE) == 0) { 905 } else if ((flags & PVF_WRITE) == 0) {
906 while (*pvp != NULL && (*pvp)->pv_flags & PVF_WRITE) 906 while (*pvp != NULL && (*pvp)->pv_flags & PVF_WRITE)
907 pvp = &SLIST_NEXT(*pvp, pv_link); 907 pvp = &SLIST_NEXT(*pvp, pv_link);
908 } 908 }
909#endif 909#endif
910 SLIST_NEXT(pv, pv_link) = *pvp; /* add to ... */ 910 SLIST_NEXT(pv, pv_link) = *pvp; /* add to ... */
911 *pvp = pv; /* ... locked list */ 911 *pvp = pv; /* ... locked list */
912 md->pvh_attrs |= flags & (PVF_REF | PVF_MOD); 912 md->pvh_attrs |= flags & (PVF_REF | PVF_MOD);
913#ifdef PMAP_CACHE_VIPT 913#ifdef PMAP_CACHE_VIPT
914 if ((pv->pv_flags & PVF_KWRITE) == PVF_KWRITE) 914 if ((pv->pv_flags & PVF_KWRITE) == PVF_KWRITE)
915 md->pvh_attrs |= PVF_KMOD; 915 md->pvh_attrs |= PVF_KMOD;
916 if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC) 916 if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC)
917 md->pvh_attrs |= PVF_DIRTY; 917 md->pvh_attrs |= PVF_DIRTY;
918 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 918 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
919#endif 919#endif
920 if (pm == pmap_kernel()) { 920 if (pm == pmap_kernel()) {
921 PMAPCOUNT(kernel_mappings); 921 PMAPCOUNT(kernel_mappings);
922 if (flags & PVF_WRITE) 922 if (flags & PVF_WRITE)
923 md->krw_mappings++; 923 md->krw_mappings++;
924 else 924 else
925 md->kro_mappings++; 925 md->kro_mappings++;
926 } else { 926 } else {
927 if (flags & PVF_WRITE) 927 if (flags & PVF_WRITE)
928 md->urw_mappings++; 928 md->urw_mappings++;
929 else 929 else
930 md->uro_mappings++; 930 md->uro_mappings++;
931 } 931 }
932 932
933#ifdef PMAP_CACHE_VIPT 933#ifdef PMAP_CACHE_VIPT
934 /* 934 /*
935 * Even though pmap_vac_me_harder will set PVF_WRITE for us, 935 * Even though pmap_vac_me_harder will set PVF_WRITE for us,
936 * do it here as well to keep the mappings & KVF_WRITE consistent. 936 * do it here as well to keep the mappings & KVF_WRITE consistent.
937 */ 937 */
938 if (arm_cache_prefer_mask != 0 && (flags & PVF_WRITE) != 0) { 938 if (arm_cache_prefer_mask != 0 && (flags & PVF_WRITE) != 0) {
939 md->pvh_attrs |= PVF_WRITE; 939 md->pvh_attrs |= PVF_WRITE;
940 } 940 }
941 /* 941 /*
942 * If this is an exec mapping and its the first exec mapping 942 * If this is an exec mapping and its the first exec mapping
943 * for this page, make sure to sync the I-cache. 943 * for this page, make sure to sync the I-cache.
944 */ 944 */
945 if (PV_IS_EXEC_P(flags)) { 945 if (PV_IS_EXEC_P(flags)) {
946 if (!PV_IS_EXEC_P(md->pvh_attrs)) { 946 if (!PV_IS_EXEC_P(md->pvh_attrs)) {
947 pmap_syncicache_page(md, pa); 947 pmap_syncicache_page(md, pa);
948 PMAPCOUNT(exec_synced_map); 948 PMAPCOUNT(exec_synced_map);
949 } 949 }
950 PMAPCOUNT(exec_mappings); 950 PMAPCOUNT(exec_mappings);
951 } 951 }
952#endif 952#endif
953 953
954 PMAPCOUNT(mappings); 954 PMAPCOUNT(mappings);
955 955
956 if (pv->pv_flags & PVF_WIRED) 956 if (pv->pv_flags & PVF_WIRED)
957 ++pm->pm_stats.wired_count; 957 ++pm->pm_stats.wired_count;
958} 958}
959 959
960/* 960/*
961 * 961 *
962 * pmap_find_pv: Find a pv entry 962 * pmap_find_pv: Find a pv entry
963 * 963 *
964 * => caller should hold lock on vm_page 964 * => caller should hold lock on vm_page
965 */ 965 */
966static inline struct pv_entry * 966static inline struct pv_entry *
967pmap_find_pv(struct vm_page_md *md, pmap_t pm, vaddr_t va) 967pmap_find_pv(struct vm_page_md *md, pmap_t pm, vaddr_t va)
968{ 968{
969 struct pv_entry *pv; 969 struct pv_entry *pv;
970 970
971 SLIST_FOREACH(pv, &md->pvh_list, pv_link) { 971 SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
972 if (pm == pv->pv_pmap && va == pv->pv_va) 972 if (pm == pv->pv_pmap && va == pv->pv_va)
973 break; 973 break;
974 } 974 }
975 975
976 return (pv); 976 return (pv);
977} 977}
978 978
979/* 979/*
980 * pmap_remove_pv: try to remove a mapping from a pv_list 980 * pmap_remove_pv: try to remove a mapping from a pv_list
981 * 981 *
982 * => caller should hold proper lock on pmap_main_lock 982 * => caller should hold proper lock on pmap_main_lock
983 * => pmap should be locked 983 * => pmap should be locked
984 * => caller should hold lock on vm_page [so that attrs can be adjusted] 984 * => caller should hold lock on vm_page [so that attrs can be adjusted]
985 * => caller should adjust ptp's wire_count and free PTP if needed 985 * => caller should adjust ptp's wire_count and free PTP if needed
986 * => caller should NOT adjust pmap's wire_count 986 * => caller should NOT adjust pmap's wire_count
987 * => we return the removed pv 987 * => we return the removed pv
988 */ 988 */
989static struct pv_entry * 989static struct pv_entry *
990pmap_remove_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) 990pmap_remove_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
991{ 991{
992 struct pv_entry *pv, **prevptr; 992 struct pv_entry *pv, **prevptr;
993 993
994 NPDEBUG(PDB_PVDUMP, 994 NPDEBUG(PDB_PVDUMP,
995 printf("pmap_remove_pv: pm %p, md %p, va 0x%08lx\n", pm, md, va)); 995 printf("pmap_remove_pv: pm %p, md %p, va 0x%08lx\n", pm, md, va));
996 996
997 prevptr = &SLIST_FIRST(&md->pvh_list); /* prev pv_entry ptr */ 997 prevptr = &SLIST_FIRST(&md->pvh_list); /* prev pv_entry ptr */
998 pv = *prevptr; 998 pv = *prevptr;
999 999
1000 while (pv) { 1000 while (pv) {
1001 if (pv->pv_pmap == pm && pv->pv_va == va) { /* match? */ 1001 if (pv->pv_pmap == pm && pv->pv_va == va) { /* match? */
1002 NPDEBUG(PDB_PVDUMP, printf("pmap_remove_pv: pm %p, md " 1002 NPDEBUG(PDB_PVDUMP, printf("pmap_remove_pv: pm %p, md "
1003 "%p, flags 0x%x\n", pm, md, pv->pv_flags)); 1003 "%p, flags 0x%x\n", pm, md, pv->pv_flags));
1004 if (pv->pv_flags & PVF_WIRED) { 1004 if (pv->pv_flags & PVF_WIRED) {
1005 --pm->pm_stats.wired_count; 1005 --pm->pm_stats.wired_count;
1006 } 1006 }
1007 *prevptr = SLIST_NEXT(pv, pv_link); /* remove it! */ 1007 *prevptr = SLIST_NEXT(pv, pv_link); /* remove it! */
1008 if (pm == pmap_kernel()) { 1008 if (pm == pmap_kernel()) {
1009 PMAPCOUNT(kernel_unmappings); 1009 PMAPCOUNT(kernel_unmappings);
1010 if (pv->pv_flags & PVF_WRITE) 1010 if (pv->pv_flags & PVF_WRITE)
1011 md->krw_mappings--; 1011 md->krw_mappings--;
1012 else 1012 else
1013 md->kro_mappings--; 1013 md->kro_mappings--;
1014 } else { 1014 } else {
1015 if (pv->pv_flags & PVF_WRITE) 1015 if (pv->pv_flags & PVF_WRITE)
1016 md->urw_mappings--; 1016 md->urw_mappings--;
1017 else 1017 else
1018 md->uro_mappings--; 1018 md->uro_mappings--;
1019 } 1019 }
1020 1020
1021 PMAPCOUNT(unmappings); 1021 PMAPCOUNT(unmappings);
1022#ifdef PMAP_CACHE_VIPT 1022#ifdef PMAP_CACHE_VIPT
1023 if (!(pv->pv_flags & PVF_WRITE)) 1023 if (!(pv->pv_flags & PVF_WRITE))
1024 break; 1024 break;
1025 /* 1025 /*
1026 * If this page has had an exec mapping, then if 1026 * If this page has had an exec mapping, then if
1027 * this was the last mapping, discard the contents, 1027 * this was the last mapping, discard the contents,
1028 * otherwise sync the i-cache for this page. 1028 * otherwise sync the i-cache for this page.
1029 */ 1029 */
1030 if (PV_IS_EXEC_P(md->pvh_attrs)) { 1030 if (PV_IS_EXEC_P(md->pvh_attrs)) {
1031 if (SLIST_EMPTY(&md->pvh_list)) { 1031 if (SLIST_EMPTY(&md->pvh_list)) {
1032 md->pvh_attrs &= ~PVF_EXEC; 1032 md->pvh_attrs &= ~PVF_EXEC;
1033 PMAPCOUNT(exec_discarded_unmap); 1033 PMAPCOUNT(exec_discarded_unmap);
1034 } else { 1034 } else {
1035 pmap_syncicache_page(md, pa); 1035 pmap_syncicache_page(md, pa);
1036 PMAPCOUNT(exec_synced_unmap); 1036 PMAPCOUNT(exec_synced_unmap);
1037 } 1037 }
1038 } 1038 }
1039#endif /* PMAP_CACHE_VIPT */ 1039#endif /* PMAP_CACHE_VIPT */
1040 break; 1040 break;
1041 } 1041 }
1042 prevptr = &SLIST_NEXT(pv, pv_link); /* previous pointer */ 1042 prevptr = &SLIST_NEXT(pv, pv_link); /* previous pointer */
1043 pv = *prevptr; /* advance */ 1043 pv = *prevptr; /* advance */
1044 } 1044 }
1045 1045
1046#ifdef PMAP_CACHE_VIPT 1046#ifdef PMAP_CACHE_VIPT
1047 /* 1047 /*
1048 * If we no longer have a WRITEABLE KENTRY at the head of list, 1048 * If we no longer have a WRITEABLE KENTRY at the head of list,
1049 * clear the KMOD attribute from the page. 1049 * clear the KMOD attribute from the page.
1050 */ 1050 */
1051 if (SLIST_FIRST(&md->pvh_list) == NULL 1051 if (SLIST_FIRST(&md->pvh_list) == NULL
1052 || (SLIST_FIRST(&md->pvh_list)->pv_flags & PVF_KWRITE) != PVF_KWRITE) 1052 || (SLIST_FIRST(&md->pvh_list)->pv_flags & PVF_KWRITE) != PVF_KWRITE)
1053 md->pvh_attrs &= ~PVF_KMOD; 1053 md->pvh_attrs &= ~PVF_KMOD;
1054 1054
1055 /* 1055 /*
1056 * If this was a writeable page and there are no more writeable 1056 * If this was a writeable page and there are no more writeable
1057 * mappings (ignoring KMPAGE), clear the WRITE flag and writeback 1057 * mappings (ignoring KMPAGE), clear the WRITE flag and writeback
1058 * the contents to memory. 1058 * the contents to memory.
1059 */ 1059 */
1060 if (arm_cache_prefer_mask != 0) { 1060 if (arm_cache_prefer_mask != 0) {
1061 if (md->krw_mappings + md->urw_mappings == 0) 1061 if (md->krw_mappings + md->urw_mappings == 0)
1062 md->pvh_attrs &= ~PVF_WRITE; 1062 md->pvh_attrs &= ~PVF_WRITE;
1063 PMAP_VALIDATE_MD_PAGE(md); 1063 PMAP_VALIDATE_MD_PAGE(md);
1064 } 1064 }
1065 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 1065 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1066#endif /* PMAP_CACHE_VIPT */ 1066#endif /* PMAP_CACHE_VIPT */
1067 1067
1068 return(pv); /* return removed pv */ 1068 return(pv); /* return removed pv */
1069} 1069}
1070 1070
1071/* 1071/*
1072 * 1072 *
1073 * pmap_modify_pv: Update pv flags 1073 * pmap_modify_pv: Update pv flags
1074 * 1074 *
1075 * => caller should hold lock on vm_page [so that attrs can be adjusted] 1075 * => caller should hold lock on vm_page [so that attrs can be adjusted]
1076 * => caller should NOT adjust pmap's wire_count 1076 * => caller should NOT adjust pmap's wire_count
1077 * => caller must call pmap_vac_me_harder() if writable status of a page 1077 * => caller must call pmap_vac_me_harder() if writable status of a page
1078 * may have changed. 1078 * may have changed.
1079 * => we return the old flags 1079 * => we return the old flags
1080 *  1080 *
1081 * Modify a physical-virtual mapping in the pv table 1081 * Modify a physical-virtual mapping in the pv table
1082 */ 1082 */
1083static u_int 1083static u_int
1084pmap_modify_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va, 1084pmap_modify_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va,
1085 u_int clr_mask, u_int set_mask) 1085 u_int clr_mask, u_int set_mask)
1086{ 1086{
1087 struct pv_entry *npv; 1087 struct pv_entry *npv;
1088 u_int flags, oflags; 1088 u_int flags, oflags;
1089 1089
1090 KASSERT((clr_mask & PVF_KENTRY) == 0); 1090 KASSERT((clr_mask & PVF_KENTRY) == 0);
1091 KASSERT((set_mask & PVF_KENTRY) == 0); 1091 KASSERT((set_mask & PVF_KENTRY) == 0);
1092 1092
1093 if ((npv = pmap_find_pv(md, pm, va)) == NULL) 1093 if ((npv = pmap_find_pv(md, pm, va)) == NULL)
1094 return (0); 1094 return (0);
1095 1095
1096 NPDEBUG(PDB_PVDUMP, 1096 NPDEBUG(PDB_PVDUMP,
1097 printf("pmap_modify_pv: pm %p, md %p, clr 0x%x, set 0x%x, flags 0x%x\n", pm, md, clr_mask, set_mask, npv->pv_flags)); 1097 printf("pmap_modify_pv: pm %p, md %p, clr 0x%x, set 0x%x, flags 0x%x\n", pm, md, clr_mask, set_mask, npv->pv_flags));
1098 1098
1099 /* 1099 /*
1100 * There is at least one VA mapping this page. 1100 * There is at least one VA mapping this page.
1101 */ 1101 */
1102 1102
1103 if (clr_mask & (PVF_REF | PVF_MOD)) { 1103 if (clr_mask & (PVF_REF | PVF_MOD)) {
1104 md->pvh_attrs |= set_mask & (PVF_REF | PVF_MOD); 1104 md->pvh_attrs |= set_mask & (PVF_REF | PVF_MOD);
1105#ifdef PMAP_CACHE_VIPT 1105#ifdef PMAP_CACHE_VIPT
1106 if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC) 1106 if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC)
1107 md->pvh_attrs |= PVF_DIRTY; 1107 md->pvh_attrs |= PVF_DIRTY;
1108 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 1108 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1109#endif 1109#endif
1110 } 1110 }
1111 1111
1112 oflags = npv->pv_flags; 1112 oflags = npv->pv_flags;
1113 npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask; 1113 npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask;
1114 1114
1115 if ((flags ^ oflags) & PVF_WIRED) { 1115 if ((flags ^ oflags) & PVF_WIRED) {
1116 if (flags & PVF_WIRED) 1116 if (flags & PVF_WIRED)
1117 ++pm->pm_stats.wired_count; 1117 ++pm->pm_stats.wired_count;
1118 else 1118 else
1119 --pm->pm_stats.wired_count; 1119 --pm->pm_stats.wired_count;
1120 } 1120 }
1121 1121
1122 if ((flags ^ oflags) & PVF_WRITE) { 1122 if ((flags ^ oflags) & PVF_WRITE) {
1123 if (pm == pmap_kernel()) { 1123 if (pm == pmap_kernel()) {
1124 if (flags & PVF_WRITE) { 1124 if (flags & PVF_WRITE) {
1125 md->krw_mappings++; 1125 md->krw_mappings++;
1126 md->kro_mappings--; 1126 md->kro_mappings--;
1127 } else { 1127 } else {
1128 md->kro_mappings++; 1128 md->kro_mappings++;
1129 md->krw_mappings--; 1129 md->krw_mappings--;
1130 } 1130 }
1131 } else { 1131 } else {
1132 if (flags & PVF_WRITE) { 1132 if (flags & PVF_WRITE) {
1133 md->urw_mappings++; 1133 md->urw_mappings++;
1134 md->uro_mappings--; 1134 md->uro_mappings--;
1135 } else { 1135 } else {
1136 md->uro_mappings++; 1136 md->uro_mappings++;
1137 md->urw_mappings--; 1137 md->urw_mappings--;
1138 } 1138 }
1139 } 1139 }
1140 } 1140 }
1141#ifdef PMAP_CACHE_VIPT 1141#ifdef PMAP_CACHE_VIPT
1142 if (arm_cache_prefer_mask != 0) { 1142 if (arm_cache_prefer_mask != 0) {
1143 if (md->urw_mappings + md->krw_mappings == 0) { 1143 if (md->urw_mappings + md->krw_mappings == 0) {
1144 md->pvh_attrs &= ~PVF_WRITE; 1144 md->pvh_attrs &= ~PVF_WRITE;
1145 } else { 1145 } else {
1146 md->pvh_attrs |= PVF_WRITE; 1146 md->pvh_attrs |= PVF_WRITE;
1147 } 1147 }
1148 } 1148 }
1149 /* 1149 /*
1150 * We have two cases here: the first is from enter_pv (new exec 1150 * We have two cases here: the first is from enter_pv (new exec
1151 * page), the second is a combined pmap_remove_pv/pmap_enter_pv. 1151 * page), the second is a combined pmap_remove_pv/pmap_enter_pv.
1152 * Since in latter, pmap_enter_pv won't do anything, we just have 1152 * Since in latter, pmap_enter_pv won't do anything, we just have
1153 * to do what pmap_remove_pv would do. 1153 * to do what pmap_remove_pv would do.
1154 */ 1154 */
1155 if ((PV_IS_EXEC_P(flags) && !PV_IS_EXEC_P(md->pvh_attrs)) 1155 if ((PV_IS_EXEC_P(flags) && !PV_IS_EXEC_P(md->pvh_attrs))
1156 || (PV_IS_EXEC_P(md->pvh_attrs) 1156 || (PV_IS_EXEC_P(md->pvh_attrs)
1157 || (!(flags & PVF_WRITE) && (oflags & PVF_WRITE)))) { 1157 || (!(flags & PVF_WRITE) && (oflags & PVF_WRITE)))) {
1158 pmap_syncicache_page(md, pa); 1158 pmap_syncicache_page(md, pa);
1159 PMAPCOUNT(exec_synced_remap); 1159 PMAPCOUNT(exec_synced_remap);
1160 } 1160 }
1161 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 1161 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1162#endif 1162#endif
1163 1163
1164 PMAPCOUNT(remappings); 1164 PMAPCOUNT(remappings);
1165 1165
1166 return (oflags); 1166 return (oflags);
1167} 1167}
1168 1168
1169/* 1169/*
1170 * Allocate an L1 translation table for the specified pmap. 1170 * Allocate an L1 translation table for the specified pmap.
1171 * This is called at pmap creation time. 1171 * This is called at pmap creation time.
1172 */ 1172 */
1173static void 1173static void
1174pmap_alloc_l1(pmap_t pm) 1174pmap_alloc_l1(pmap_t pm)
1175{ 1175{
1176 struct l1_ttable *l1; 1176 struct l1_ttable *l1;
1177 uint8_t domain; 1177 uint8_t domain;
1178 1178
1179 /* 1179 /*
1180 * Remove the L1 at the head of the LRU list 1180 * Remove the L1 at the head of the LRU list
1181 */ 1181 */
1182 mutex_spin_enter(&l1_lru_lock); 1182 mutex_spin_enter(&l1_lru_lock);
1183 l1 = TAILQ_FIRST(&l1_lru_list); 1183 l1 = TAILQ_FIRST(&l1_lru_list);
1184 KDASSERT(l1 != NULL); 1184 KDASSERT(l1 != NULL);
1185 TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); 1185 TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
1186 1186
1187 /* 1187 /*
1188 * Pick the first available domain number, and update 1188 * Pick the first available domain number, and update
1189 * the link to the next number. 1189 * the link to the next number.
1190 */ 1190 */
1191 domain = l1->l1_domain_first; 1191 domain = l1->l1_domain_first;
1192 l1->l1_domain_first = l1->l1_domain_free[domain]; 1192 l1->l1_domain_first = l1->l1_domain_free[domain];
1193 1193
1194 /* 1194 /*
1195 * If there are still free domain numbers in this L1, 1195 * If there are still free domain numbers in this L1,
1196 * put it back on the TAIL of the LRU list. 1196 * put it back on the TAIL of the LRU list.
1197 */ 1197 */
1198 if (++l1->l1_domain_use_count < PMAP_DOMAINS) 1198 if (++l1->l1_domain_use_count < PMAP_DOMAINS)
1199 TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 1199 TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
1200 1200
1201 mutex_spin_exit(&l1_lru_lock); 1201 mutex_spin_exit(&l1_lru_lock);
1202 1202
1203 /* 1203 /*
1204 * Fix up the relevant bits in the pmap structure 1204 * Fix up the relevant bits in the pmap structure
1205 */ 1205 */
1206 pm->pm_l1 = l1; 1206 pm->pm_l1 = l1;
1207 pm->pm_domain = domain + 1; 1207 pm->pm_domain = domain + 1;
1208} 1208}
1209 1209
1210/* 1210/*
1211 * Free an L1 translation table. 1211 * Free an L1 translation table.
1212 * This is called at pmap destruction time. 1212 * This is called at pmap destruction time.
1213 */ 1213 */
1214static void 1214static void
1215pmap_free_l1(pmap_t pm) 1215pmap_free_l1(pmap_t pm)
1216{ 1216{
1217 struct l1_ttable *l1 = pm->pm_l1; 1217 struct l1_ttable *l1 = pm->pm_l1;
1218 1218
1219 mutex_spin_enter(&l1_lru_lock); 1219 mutex_spin_enter(&l1_lru_lock);
1220 1220
1221 /* 1221 /*
1222 * If this L1 is currently on the LRU list, remove it. 1222 * If this L1 is currently on the LRU list, remove it.
1223 */ 1223 */
1224 if (l1->l1_domain_use_count < PMAP_DOMAINS) 1224 if (l1->l1_domain_use_count < PMAP_DOMAINS)
1225 TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); 1225 TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
1226 1226
1227 /* 1227 /*
1228 * Free up the domain number which was allocated to the pmap 1228 * Free up the domain number which was allocated to the pmap
1229 */ 1229 */
1230 l1->l1_domain_free[pm->pm_domain - 1] = l1->l1_domain_first; 1230 l1->l1_domain_free[pm->pm_domain - 1] = l1->l1_domain_first;
1231 l1->l1_domain_first = pm->pm_domain - 1; 1231 l1->l1_domain_first = pm->pm_domain - 1;
1232 l1->l1_domain_use_count--; 1232 l1->l1_domain_use_count--;
1233 1233
1234 /* 1234 /*
1235 * The L1 now must have at least 1 free domain, so add 1235 * The L1 now must have at least 1 free domain, so add
1236 * it back to the LRU list. If the use count is zero, 1236 * it back to the LRU list. If the use count is zero,
1237 * put it at the head of the list, otherwise it goes 1237 * put it at the head of the list, otherwise it goes
1238 * to the tail. 1238 * to the tail.
1239 */ 1239 */
1240 if (l1->l1_domain_use_count == 0) 1240 if (l1->l1_domain_use_count == 0)
1241 TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru); 1241 TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru);
1242 else 1242 else
1243 TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 1243 TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
1244 1244
1245 mutex_spin_exit(&l1_lru_lock); 1245 mutex_spin_exit(&l1_lru_lock);
1246} 1246}
1247 1247
1248static inline void 1248static inline void
1249pmap_use_l1(pmap_t pm) 1249pmap_use_l1(pmap_t pm)
1250{ 1250{
1251 struct l1_ttable *l1; 1251 struct l1_ttable *l1;
1252 1252
1253 /* 1253 /*
1254 * Do nothing if we're in interrupt context. 1254 * Do nothing if we're in interrupt context.
1255 * Access to an L1 by the kernel pmap must not affect 1255 * Access to an L1 by the kernel pmap must not affect
1256 * the LRU list. 1256 * the LRU list.
1257 */ 1257 */
1258 if (cpu_intr_p() || pm == pmap_kernel()) 1258 if (cpu_intr_p() || pm == pmap_kernel())
1259 return; 1259 return;
1260 1260
1261 l1 = pm->pm_l1; 1261 l1 = pm->pm_l1;
1262 1262
1263 /* 1263 /*
1264 * If the L1 is not currently on the LRU list, just return 1264 * If the L1 is not currently on the LRU list, just return
1265 */ 1265 */
1266 if (l1->l1_domain_use_count == PMAP_DOMAINS) 1266 if (l1->l1_domain_use_count == PMAP_DOMAINS)
1267 return; 1267 return;
1268 1268
1269 mutex_spin_enter(&l1_lru_lock); 1269 mutex_spin_enter(&l1_lru_lock);
1270 1270
1271 /* 1271 /*
1272 * Check the use count again, now that we've acquired the lock 1272 * Check the use count again, now that we've acquired the lock
1273 */ 1273 */
1274 if (l1->l1_domain_use_count == PMAP_DOMAINS) { 1274 if (l1->l1_domain_use_count == PMAP_DOMAINS) {
1275 mutex_spin_exit(&l1_lru_lock); 1275 mutex_spin_exit(&l1_lru_lock);
1276 return; 1276 return;
1277 } 1277 }
1278 1278
1279 /* 1279 /*
1280 * Move the L1 to the back of the LRU list 1280 * Move the L1 to the back of the LRU list
1281 */ 1281 */
1282 TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); 1282 TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
1283 TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 1283 TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
1284 1284
1285 mutex_spin_exit(&l1_lru_lock); 1285 mutex_spin_exit(&l1_lru_lock);
1286} 1286}
1287 1287
1288/* 1288/*
1289 * void pmap_free_l2_ptp(pt_entry_t *, paddr_t *) 1289 * void pmap_free_l2_ptp(pt_entry_t *, paddr_t *)
1290 * 1290 *
1291 * Free an L2 descriptor table. 1291 * Free an L2 descriptor table.
1292 */ 1292 */
1293static inline void 1293static inline void
1294#ifndef PMAP_INCLUDE_PTE_SYNC 1294#ifndef PMAP_INCLUDE_PTE_SYNC
1295pmap_free_l2_ptp(pt_entry_t *l2, paddr_t pa) 1295pmap_free_l2_ptp(pt_entry_t *l2, paddr_t pa)
1296#else 1296#else
1297pmap_free_l2_ptp(bool need_sync, pt_entry_t *l2, paddr_t pa) 1297pmap_free_l2_ptp(bool need_sync, pt_entry_t *l2, paddr_t pa)
1298#endif 1298#endif
1299{ 1299{
1300#ifdef PMAP_INCLUDE_PTE_SYNC 1300#ifdef PMAP_INCLUDE_PTE_SYNC
1301#ifdef PMAP_CACHE_VIVT 1301#ifdef PMAP_CACHE_VIVT
1302 /* 1302 /*
1303 * Note: With a write-back cache, we may need to sync this 1303 * Note: With a write-back cache, we may need to sync this
1304 * L2 table before re-using it. 1304 * L2 table before re-using it.
1305 * This is because it may have belonged to a non-current 1305 * This is because it may have belonged to a non-current
1306 * pmap, in which case the cache syncs would have been 1306 * pmap, in which case the cache syncs would have been
1307 * skipped for the pages that were being unmapped. If the 1307 * skipped for the pages that were being unmapped. If the
1308 * L2 table were then to be immediately re-allocated to 1308 * L2 table were then to be immediately re-allocated to
1309 * the *current* pmap, it may well contain stale mappings 1309 * the *current* pmap, it may well contain stale mappings
1310 * which have not yet been cleared by a cache write-back 1310 * which have not yet been cleared by a cache write-back
1311 * and so would still be visible to the mmu. 1311 * and so would still be visible to the mmu.
1312 */ 1312 */
1313 if (need_sync) 1313 if (need_sync)
1314 PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); 1314 PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
1315#endif /* PMAP_CACHE_VIVT */ 1315#endif /* PMAP_CACHE_VIVT */
1316#endif /* PMAP_INCLUDE_PTE_SYNC */ 1316#endif /* PMAP_INCLUDE_PTE_SYNC */
1317 pool_cache_put_paddr(&pmap_l2ptp_cache, (void *)l2, pa); 1317 pool_cache_put_paddr(&pmap_l2ptp_cache, (void *)l2, pa);
1318} 1318}
1319 1319
1320/* 1320/*
1321 * Returns a pointer to the L2 bucket associated with the specified pmap 1321 * Returns a pointer to the L2 bucket associated with the specified pmap
1322 * and VA, or NULL if no L2 bucket exists for the address. 1322 * and VA, or NULL if no L2 bucket exists for the address.
1323 */ 1323 */
1324static inline struct l2_bucket * 1324static inline struct l2_bucket *
1325pmap_get_l2_bucket(pmap_t pm, vaddr_t va) 1325pmap_get_l2_bucket(pmap_t pm, vaddr_t va)
1326{ 1326{
1327 struct l2_dtable *l2; 1327 struct l2_dtable *l2;
1328 struct l2_bucket *l2b; 1328 struct l2_bucket *l2b;
1329 u_short l1idx; 1329 u_short l1idx;
1330 1330
1331 l1idx = L1_IDX(va); 1331 l1idx = L1_IDX(va);
1332 1332
1333 if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL || 1333 if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL ||
1334 (l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL) 1334 (l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL)
1335 return (NULL); 1335 return (NULL);
1336 1336
1337 return (l2b); 1337 return (l2b);
1338} 1338}
1339 1339
1340/* 1340/*
1341 * Returns a pointer to the L2 bucket associated with the specified pmap 1341 * Returns a pointer to the L2 bucket associated with the specified pmap
1342 * and VA. 1342 * and VA.
1343 * 1343 *
1344 * If no L2 bucket exists, perform the necessary allocations to put an L2 1344 * If no L2 bucket exists, perform the necessary allocations to put an L2
1345 * bucket/page table in place. 1345 * bucket/page table in place.
1346 * 1346 *
1347 * Note that if a new L2 bucket/page was allocated, the caller *must* 1347 * Note that if a new L2 bucket/page was allocated, the caller *must*
1348 * increment the bucket occupancy counter appropriately *before*  1348 * increment the bucket occupancy counter appropriately *before*
1349 * releasing the pmap's lock to ensure no other thread or cpu deallocates 1349 * releasing the pmap's lock to ensure no other thread or cpu deallocates
1350 * the bucket/page in the meantime. 1350 * the bucket/page in the meantime.
1351 */ 1351 */
1352static struct l2_bucket * 1352static struct l2_bucket *
1353pmap_alloc_l2_bucket(pmap_t pm, vaddr_t va) 1353pmap_alloc_l2_bucket(pmap_t pm, vaddr_t va)
1354{ 1354{
1355 struct l2_dtable *l2; 1355 struct l2_dtable *l2;
1356 struct l2_bucket *l2b; 1356 struct l2_bucket *l2b;
1357 u_short l1idx; 1357 u_short l1idx;
1358 1358
1359 l1idx = L1_IDX(va); 1359 l1idx = L1_IDX(va);
1360 1360
1361 if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { 1361 if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
1362 /* 1362 /*
1363 * No mapping at this address, as there is 1363 * No mapping at this address, as there is
1364 * no entry in the L1 table. 1364 * no entry in the L1 table.
1365 * Need to allocate a new l2_dtable. 1365 * Need to allocate a new l2_dtable.
1366 */ 1366 */
1367 if ((l2 = pmap_alloc_l2_dtable()) == NULL) 1367 if ((l2 = pmap_alloc_l2_dtable()) == NULL)
1368 return (NULL); 1368 return (NULL);
1369 1369
1370 /* 1370 /*
1371 * Link it into the parent pmap 1371 * Link it into the parent pmap
1372 */ 1372 */
1373 pm->pm_l2[L2_IDX(l1idx)] = l2; 1373 pm->pm_l2[L2_IDX(l1idx)] = l2;
1374 } 1374 }
1375 1375
1376 l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 1376 l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
1377 1377
1378 /* 1378 /*
1379 * Fetch pointer to the L2 page table associated with the address. 1379 * Fetch pointer to the L2 page table associated with the address.
1380 */ 1380 */
1381 if (l2b->l2b_kva == NULL) { 1381 if (l2b->l2b_kva == NULL) {
1382 pt_entry_t *ptep; 1382 pt_entry_t *ptep;
1383 1383
1384 /* 1384 /*
1385 * No L2 page table has been allocated. Chances are, this 1385 * No L2 page table has been allocated. Chances are, this
1386 * is because we just allocated the l2_dtable, above. 1386 * is because we just allocated the l2_dtable, above.
1387 */ 1387 */
1388 if ((ptep = pmap_alloc_l2_ptp(&l2b->l2b_phys)) == NULL) { 1388 if ((ptep = pmap_alloc_l2_ptp(&l2b->l2b_phys)) == NULL) {
1389 /* 1389 /*
1390 * Oops, no more L2 page tables available at this 1390 * Oops, no more L2 page tables available at this
1391 * time. We may need to deallocate the l2_dtable 1391 * time. We may need to deallocate the l2_dtable
1392 * if we allocated a new one above. 1392 * if we allocated a new one above.
1393 */ 1393 */
1394 if (l2->l2_occupancy == 0) { 1394 if (l2->l2_occupancy == 0) {
1395 pm->pm_l2[L2_IDX(l1idx)] = NULL; 1395 pm->pm_l2[L2_IDX(l1idx)] = NULL;
1396 pmap_free_l2_dtable(l2); 1396 pmap_free_l2_dtable(l2);
1397 } 1397 }
1398 return (NULL); 1398 return (NULL);
1399 } 1399 }
1400 1400
1401 l2->l2_occupancy++; 1401 l2->l2_occupancy++;
1402 l2b->l2b_kva = ptep; 1402 l2b->l2b_kva = ptep;
1403 l2b->l2b_l1idx = l1idx; 1403 l2b->l2b_l1idx = l1idx;
1404 } 1404 }
1405 1405
1406 return (l2b); 1406 return (l2b);
1407} 1407}
1408 1408
1409/* 1409/*
1410 * One or more mappings in the specified L2 descriptor table have just been 1410 * One or more mappings in the specified L2 descriptor table have just been
1411 * invalidated. 1411 * invalidated.
1412 * 1412 *
1413 * Garbage collect the metadata and descriptor table itself if necessary. 1413 * Garbage collect the metadata and descriptor table itself if necessary.
1414 * 1414 *
1415 * The pmap lock must be acquired when this is called (not necessary 1415 * The pmap lock must be acquired when this is called (not necessary
1416 * for the kernel pmap). 1416 * for the kernel pmap).
1417 */ 1417 */
1418static void 1418static void
1419pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count) 1419pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count)
1420{ 1420{
1421 struct l2_dtable *l2; 1421 struct l2_dtable *l2;
1422 pd_entry_t *pl1pd, l1pd; 1422 pd_entry_t *pl1pd, l1pd;
1423 pt_entry_t *ptep; 1423 pt_entry_t *ptep;
1424 u_short l1idx; 1424 u_short l1idx;
1425 1425
1426 KDASSERT(count <= l2b->l2b_occupancy); 1426 KDASSERT(count <= l2b->l2b_occupancy);
1427 1427
1428 /* 1428 /*
1429 * Update the bucket's reference count according to how many 1429 * Update the bucket's reference count according to how many
1430 * PTEs the caller has just invalidated. 1430 * PTEs the caller has just invalidated.
1431 */ 1431 */
1432 l2b->l2b_occupancy -= count; 1432 l2b->l2b_occupancy -= count;
1433 1433
1434 /* 1434 /*
1435 * Note: 1435 * Note:
1436 * 1436 *
1437 * Level 2 page tables allocated to the kernel pmap are never freed 1437 * Level 2 page tables allocated to the kernel pmap are never freed
1438 * as that would require checking all Level 1 page tables and 1438 * as that would require checking all Level 1 page tables and
1439 * removing any references to the Level 2 page table. See also the 1439 * removing any references to the Level 2 page table. See also the
1440 * comment elsewhere about never freeing bootstrap L2 descriptors. 1440 * comment elsewhere about never freeing bootstrap L2 descriptors.
1441 * 1441 *
1442 * We make do with just invalidating the mapping in the L2 table. 1442 * We make do with just invalidating the mapping in the L2 table.
1443 * 1443 *
1444 * This isn't really a big deal in practice and, in fact, leads 1444 * This isn't really a big deal in practice and, in fact, leads
1445 * to a performance win over time as we don't need to continually 1445 * to a performance win over time as we don't need to continually
1446 * alloc/free. 1446 * alloc/free.
1447 */ 1447 */
1448 if (l2b->l2b_occupancy > 0 || pm == pmap_kernel()) 1448 if (l2b->l2b_occupancy > 0 || pm == pmap_kernel())
1449 return; 1449 return;
1450 1450
1451 /* 1451 /*
1452 * There are no more valid mappings in this level 2 page table. 1452 * There are no more valid mappings in this level 2 page table.
1453 * Go ahead and NULL-out the pointer in the bucket, then 1453 * Go ahead and NULL-out the pointer in the bucket, then
1454 * free the page table. 1454 * free the page table.
1455 */ 1455 */
1456 l1idx = l2b->l2b_l1idx; 1456 l1idx = l2b->l2b_l1idx;
1457 ptep = l2b->l2b_kva; 1457 ptep = l2b->l2b_kva;
1458 l2b->l2b_kva = NULL; 1458 l2b->l2b_kva = NULL;
1459 1459
1460 pl1pd = &pm->pm_l1->l1_kva[l1idx]; 1460 pl1pd = &pm->pm_l1->l1_kva[l1idx];
1461 1461
1462 /* 1462 /*
1463 * If the L1 slot matches the pmap's domain 1463 * If the L1 slot matches the pmap's domain
1464 * number, then invalidate it. 1464 * number, then invalidate it.
1465 */ 1465 */
1466 l1pd = *pl1pd & (L1_TYPE_MASK | L1_C_DOM_MASK); 1466 l1pd = *pl1pd & (L1_TYPE_MASK | L1_C_DOM_MASK);
1467 if (l1pd == (L1_C_DOM(pm->pm_domain) | L1_TYPE_C)) { 1467 if (l1pd == (L1_C_DOM(pm->pm_domain) | L1_TYPE_C)) {
1468 *pl1pd = 0; 1468 *pl1pd = 0;
1469 PTE_SYNC(pl1pd); 1469 PTE_SYNC(pl1pd);
1470 } 1470 }
1471 1471
1472 /* 1472 /*
1473 * Release the L2 descriptor table back to the pool cache. 1473 * Release the L2 descriptor table back to the pool cache.
1474 */ 1474 */
1475#ifndef PMAP_INCLUDE_PTE_SYNC 1475#ifndef PMAP_INCLUDE_PTE_SYNC
1476 pmap_free_l2_ptp(ptep, l2b->l2b_phys); 1476 pmap_free_l2_ptp(ptep, l2b->l2b_phys);
1477#else 1477#else
1478 pmap_free_l2_ptp(!pmap_is_cached(pm), ptep, l2b->l2b_phys); 1478 pmap_free_l2_ptp(!pmap_is_cached(pm), ptep, l2b->l2b_phys);
1479#endif 1479#endif
1480 1480
1481 /* 1481 /*
1482 * Update the reference count in the associated l2_dtable 1482 * Update the reference count in the associated l2_dtable
1483 */ 1483 */
1484 l2 = pm->pm_l2[L2_IDX(l1idx)]; 1484 l2 = pm->pm_l2[L2_IDX(l1idx)];
1485 if (--l2->l2_occupancy > 0) 1485 if (--l2->l2_occupancy > 0)
1486 return; 1486 return;
1487 1487
1488 /* 1488 /*
1489 * There are no more valid mappings in any of the Level 1 1489 * There are no more valid mappings in any of the Level 1
1490 * slots managed by this l2_dtable. Go ahead and NULL-out 1490 * slots managed by this l2_dtable. Go ahead and NULL-out
1491 * the pointer in the parent pmap and free the l2_dtable. 1491 * the pointer in the parent pmap and free the l2_dtable.
1492 */ 1492 */
1493 pm->pm_l2[L2_IDX(l1idx)] = NULL; 1493 pm->pm_l2[L2_IDX(l1idx)] = NULL;
1494 pmap_free_l2_dtable(l2); 1494 pmap_free_l2_dtable(l2);
1495} 1495}
1496 1496
1497/* 1497/*
1498 * Pool cache constructors for L2 descriptor tables, metadata and pmap 1498 * Pool cache constructors for L2 descriptor tables, metadata and pmap
1499 * structures. 1499 * structures.
1500 */ 1500 */
1501static int 1501static int
1502pmap_l2ptp_ctor(void *arg, void *v, int flags) 1502pmap_l2ptp_ctor(void *arg, void *v, int flags)
1503{ 1503{
1504#ifndef PMAP_INCLUDE_PTE_SYNC 1504#ifndef PMAP_INCLUDE_PTE_SYNC
1505 struct l2_bucket *l2b; 1505 struct l2_bucket *l2b;
1506 pt_entry_t *ptep, pte; 1506 pt_entry_t *ptep, pte;
1507 vaddr_t va = (vaddr_t)v & ~PGOFSET; 1507 vaddr_t va = (vaddr_t)v & ~PGOFSET;
1508 1508
1509 /* 1509 /*
1510 * The mappings for these page tables were initially made using 1510 * The mappings for these page tables were initially made using
1511 * pmap_kenter_pa() by the pool subsystem. Therefore, the cache- 1511 * pmap_kenter_pa() by the pool subsystem. Therefore, the cache-
1512 * mode will not be right for page table mappings. To avoid 1512 * mode will not be right for page table mappings. To avoid
1513 * polluting the pmap_kenter_pa() code with a special case for 1513 * polluting the pmap_kenter_pa() code with a special case for
1514 * page tables, we simply fix up the cache-mode here if it's not 1514 * page tables, we simply fix up the cache-mode here if it's not
1515 * correct. 1515 * correct.
1516 */ 1516 */
1517 l2b = pmap_get_l2_bucket(pmap_kernel(), va); 1517 l2b = pmap_get_l2_bucket(pmap_kernel(), va);
1518 KDASSERT(l2b != NULL); 1518 KDASSERT(l2b != NULL);
1519 ptep = &l2b->l2b_kva[l2pte_index(va)]; 1519 ptep = &l2b->l2b_kva[l2pte_index(va)];
1520 pte = *ptep; 1520 pte = *ptep;
1521 1521
1522 if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { 1522 if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
1523 /* 1523 /*
1524 * Page tables must have the cache-mode set to Write-Thru. 1524 * Page tables must have the cache-mode set to Write-Thru.
1525 */ 1525 */
1526 *ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; 1526 *ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
1527 PTE_SYNC(ptep); 1527 PTE_SYNC(ptep);
1528 cpu_tlb_flushD_SE(va); 1528 cpu_tlb_flushD_SE(va);
1529 cpu_cpwait(); 1529 cpu_cpwait();
1530 } 1530 }
1531#endif 1531#endif
1532 1532
1533 memset(v, 0, L2_TABLE_SIZE_REAL); 1533 memset(v, 0, L2_TABLE_SIZE_REAL);
1534 PTE_SYNC_RANGE(v, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); 1534 PTE_SYNC_RANGE(v, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
1535 return (0); 1535 return (0);
1536} 1536}
1537 1537
1538static int 1538static int
1539pmap_l2dtable_ctor(void *arg, void *v, int flags) 1539pmap_l2dtable_ctor(void *arg, void *v, int flags)
1540{ 1540{
1541 1541
1542 memset(v, 0, sizeof(struct l2_dtable)); 1542 memset(v, 0, sizeof(struct l2_dtable));
1543 return (0); 1543 return (0);
1544} 1544}
1545 1545
1546static int 1546static int
1547pmap_pmap_ctor(void *arg, void *v, int flags) 1547pmap_pmap_ctor(void *arg, void *v, int flags)
1548{ 1548{
1549 1549
1550 memset(v, 0, sizeof(struct pmap)); 1550 memset(v, 0, sizeof(struct pmap));
1551 return (0); 1551 return (0);
1552} 1552}
1553 1553
1554static void 1554static void
1555pmap_pinit(pmap_t pm) 1555pmap_pinit(pmap_t pm)
1556{ 1556{
 1557#ifndef ARM_HAS_VBAR
1557 struct l2_bucket *l2b; 1558 struct l2_bucket *l2b;
1558 1559
1559 if (vector_page < KERNEL_BASE) { 1560 if (vector_page < KERNEL_BASE) {
1560 /* 1561 /*
1561 * Map the vector page. 1562 * Map the vector page.
1562 */ 1563 */
1563 pmap_enter(pm, vector_page, systempage.pv_pa, 1564 pmap_enter(pm, vector_page, systempage.pv_pa,
1564 VM_PROT_READ, VM_PROT_READ | PMAP_WIRED); 1565 VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
1565 pmap_update(pm); 1566 pmap_update(pm);
1566 1567
1567 pm->pm_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)]; 1568 pm->pm_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)];
1568 l2b = pmap_get_l2_bucket(pm, vector_page); 1569 l2b = pmap_get_l2_bucket(pm, vector_page);
1569 KDASSERT(l2b != NULL); 1570 KDASSERT(l2b != NULL);
1570 pm->pm_l1vec = l2b->l2b_phys | L1_C_PROTO | 1571 pm->pm_l1vec = l2b->l2b_phys | L1_C_PROTO |
1571 L1_C_DOM(pm->pm_domain); 1572 L1_C_DOM(pm->pm_domain);
1572 } else 1573 } else
1573 pm->pm_pl1vec = NULL; 1574 pm->pm_pl1vec = NULL;
 1575#endif
1574} 1576}
1575 1577
1576#ifdef PMAP_CACHE_VIVT 1578#ifdef PMAP_CACHE_VIVT
1577/* 1579/*
1578 * Since we have a virtually indexed cache, we may need to inhibit caching if 1580 * Since we have a virtually indexed cache, we may need to inhibit caching if
1579 * there is more than one mapping and at least one of them is writable. 1581 * there is more than one mapping and at least one of them is writable.
1580 * Since we purge the cache on every context switch, we only need to check for 1582 * Since we purge the cache on every context switch, we only need to check for
1581 * other mappings within the same pmap, or kernel_pmap. 1583 * other mappings within the same pmap, or kernel_pmap.
1582 * This function is also called when a page is unmapped, to possibly reenable 1584 * This function is also called when a page is unmapped, to possibly reenable
1583 * caching on any remaining mappings. 1585 * caching on any remaining mappings.
1584 * 1586 *
1585 * The code implements the following logic, where: 1587 * The code implements the following logic, where:
1586 * 1588 *
1587 * KW = # of kernel read/write pages 1589 * KW = # of kernel read/write pages
1588 * KR = # of kernel read only pages 1590 * KR = # of kernel read only pages
1589 * UW = # of user read/write pages 1591 * UW = # of user read/write pages
1590 * UR = # of user read only pages 1592 * UR = # of user read only pages
1591 *  1593 *
1592 * KC = kernel mapping is cacheable 1594 * KC = kernel mapping is cacheable
1593 * UC = user mapping is cacheable 1595 * UC = user mapping is cacheable
1594 * 1596 *
1595 * KW=0,KR=0 KW=0,KR>0 KW=1,KR=0 KW>1,KR>=0 1597 * KW=0,KR=0 KW=0,KR>0 KW=1,KR=0 KW>1,KR>=0
1596 * +--------------------------------------------- 1598 * +---------------------------------------------
1597 * UW=0,UR=0 | --- KC=1 KC=1 KC=0 1599 * UW=0,UR=0 | --- KC=1 KC=1 KC=0
1598 * UW=0,UR>0 | UC=1 KC=1,UC=1 KC=0,UC=0 KC=0,UC=0 1600 * UW=0,UR>0 | UC=1 KC=1,UC=1 KC=0,UC=0 KC=0,UC=0
1599 * UW=1,UR=0 | UC=1 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0 1601 * UW=1,UR=0 | UC=1 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0
1600 * UW>1,UR>=0 | UC=0 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0 1602 * UW>1,UR>=0 | UC=0 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0
1601 */ 1603 */
1602 1604
1603static const int pmap_vac_flags[4][4] = { 1605static const int pmap_vac_flags[4][4] = {
1604 {-1, 0, 0, PVF_KNC}, 1606 {-1, 0, 0, PVF_KNC},
1605 {0, 0, PVF_NC, PVF_NC}, 1607 {0, 0, PVF_NC, PVF_NC},
1606 {0, PVF_NC, PVF_NC, PVF_NC}, 1608 {0, PVF_NC, PVF_NC, PVF_NC},
1607 {PVF_UNC, PVF_NC, PVF_NC, PVF_NC} 1609 {PVF_UNC, PVF_NC, PVF_NC, PVF_NC}
1608}; 1610};
1609 1611
1610static inline int 1612static inline int
1611pmap_get_vac_flags(const struct vm_page_md *md) 1613pmap_get_vac_flags(const struct vm_page_md *md)
1612{ 1614{
1613 int kidx, uidx; 1615 int kidx, uidx;
1614 1616
1615 kidx = 0; 1617 kidx = 0;
1616 if (md->kro_mappings || md->krw_mappings > 1) 1618 if (md->kro_mappings || md->krw_mappings > 1)
1617 kidx |= 1; 1619 kidx |= 1;
1618 if (md->krw_mappings) 1620 if (md->krw_mappings)
1619 kidx |= 2; 1621 kidx |= 2;
1620 1622
1621 uidx = 0; 1623 uidx = 0;
1622 if (md->uro_mappings || md->urw_mappings > 1) 1624 if (md->uro_mappings || md->urw_mappings > 1)
1623 uidx |= 1; 1625 uidx |= 1;
1624 if (md->urw_mappings) 1626 if (md->urw_mappings)
1625 uidx |= 2; 1627 uidx |= 2;
1626 1628
1627 return (pmap_vac_flags[uidx][kidx]); 1629 return (pmap_vac_flags[uidx][kidx]);
1628} 1630}
1629 1631
1630static inline void 1632static inline void
1631pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) 1633pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
1632{ 1634{
1633 int nattr; 1635 int nattr;
1634 1636
1635 nattr = pmap_get_vac_flags(md); 1637 nattr = pmap_get_vac_flags(md);
1636 1638
1637 if (nattr < 0) { 1639 if (nattr < 0) {
1638 md->pvh_attrs &= ~PVF_NC; 1640 md->pvh_attrs &= ~PVF_NC;
1639 return; 1641 return;
1640 } 1642 }
1641 1643
1642 if (nattr == 0 && (md->pvh_attrs & PVF_NC) == 0) 1644 if (nattr == 0 && (md->pvh_attrs & PVF_NC) == 0)
1643 return; 1645 return;
1644 1646
1645 if (pm == pmap_kernel()) 1647 if (pm == pmap_kernel())
1646 pmap_vac_me_kpmap(md, pa, pm, va); 1648 pmap_vac_me_kpmap(md, pa, pm, va);
1647 else 1649 else
1648 pmap_vac_me_user(md, pa, pm, va); 1650 pmap_vac_me_user(md, pa, pm, va);
1649 1651
1650 md->pvh_attrs = (md->pvh_attrs & ~PVF_NC) | nattr; 1652 md->pvh_attrs = (md->pvh_attrs & ~PVF_NC) | nattr;
1651} 1653}
1652 1654
1653static void 1655static void
1654pmap_vac_me_kpmap(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) 1656pmap_vac_me_kpmap(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
1655{ 1657{
1656 u_int u_cacheable, u_entries; 1658 u_int u_cacheable, u_entries;
1657 struct pv_entry *pv; 1659 struct pv_entry *pv;
1658 pmap_t last_pmap = pm; 1660 pmap_t last_pmap = pm;
1659 1661
1660 /*  1662 /*
1661 * Pass one, see if there are both kernel and user pmaps for 1663 * Pass one, see if there are both kernel and user pmaps for
1662 * this page. Calculate whether there are user-writable or 1664 * this page. Calculate whether there are user-writable or
1663 * kernel-writable pages. 1665 * kernel-writable pages.
1664 */ 1666 */
1665 u_cacheable = 0; 1667 u_cacheable = 0;
1666 SLIST_FOREACH(pv, &md->pvh_list, pv_link) { 1668 SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
1667 if (pv->pv_pmap != pm && (pv->pv_flags & PVF_NC) == 0) 1669 if (pv->pv_pmap != pm && (pv->pv_flags & PVF_NC) == 0)
1668 u_cacheable++; 1670 u_cacheable++;
1669 } 1671 }
1670 1672
1671 u_entries = md->urw_mappings + md->uro_mappings; 1673 u_entries = md->urw_mappings + md->uro_mappings;
1672 1674
1673 /*  1675 /*
1674 * We know we have just been updating a kernel entry, so if 1676 * We know we have just been updating a kernel entry, so if
1675 * all user pages are already cacheable, then there is nothing 1677 * all user pages are already cacheable, then there is nothing
1676 * further to do. 1678 * further to do.
1677 */ 1679 */
1678 if (md->k_mappings == 0 && u_cacheable == u_entries) 1680 if (md->k_mappings == 0 && u_cacheable == u_entries)
1679 return; 1681 return;
1680 1682
1681 if (u_entries) { 1683 if (u_entries) {
1682 /*  1684 /*
1683 * Scan over the list again, for each entry, if it 1685 * Scan over the list again, for each entry, if it
1684 * might not be set correctly, call pmap_vac_me_user 1686 * might not be set correctly, call pmap_vac_me_user
1685 * to recalculate the settings. 1687 * to recalculate the settings.
1686 */ 1688 */
1687 SLIST_FOREACH(pv, &md->pvh_list, pv_link) { 1689 SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
1688 /*  1690 /*
1689 * We know kernel mappings will get set 1691 * We know kernel mappings will get set
1690 * correctly in other calls. We also know 1692 * correctly in other calls. We also know
1691 * that if the pmap is the same as last_pmap 1693 * that if the pmap is the same as last_pmap
1692 * then we've just handled this entry. 1694 * then we've just handled this entry.
1693 */ 1695 */
1694 if (pv->pv_pmap == pm || pv->pv_pmap == last_pmap) 1696 if (pv->pv_pmap == pm || pv->pv_pmap == last_pmap)
1695 continue; 1697 continue;
1696 1698
1697 /*  1699 /*
1698 * If there are kernel entries and this page 1700 * If there are kernel entries and this page
1699 * is writable but non-cacheable, then we can 1701 * is writable but non-cacheable, then we can
1700 * skip this entry also.  1702 * skip this entry also.
1701 */ 1703 */
1702 if (md->k_mappings && 1704 if (md->k_mappings &&
1703 (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 1705 (pv->pv_flags & (PVF_NC | PVF_WRITE)) ==
1704 (PVF_NC | PVF_WRITE)) 1706 (PVF_NC | PVF_WRITE))
1705 continue; 1707 continue;
1706 1708
1707 /*  1709 /*
1708 * Similarly if there are no kernel-writable  1710 * Similarly if there are no kernel-writable
1709 * entries and the page is already  1711 * entries and the page is already
1710 * read-only/cacheable. 1712 * read-only/cacheable.
1711 */ 1713 */
1712 if (md->krw_mappings == 0 && 1714 if (md->krw_mappings == 0 &&
1713 (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0) 1715 (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0)
1714 continue; 1716 continue;
1715 1717
1716 /*  1718 /*
1717 * For some of the remaining cases, we know 1719 * For some of the remaining cases, we know
1718 * that we must recalculate, but for others we 1720 * that we must recalculate, but for others we
1719 * can't tell if they are correct or not, so 1721 * can't tell if they are correct or not, so
1720 * we recalculate anyway. 1722 * we recalculate anyway.
1721 */ 1723 */
1722 pmap_vac_me_user(md, pa, (last_pmap = pv->pv_pmap), 0); 1724 pmap_vac_me_user(md, pa, (last_pmap = pv->pv_pmap), 0);
1723 } 1725 }
1724 1726
1725 if (md->k_mappings == 0) 1727 if (md->k_mappings == 0)
1726 return; 1728 return;
1727 } 1729 }
1728 1730
1729 pmap_vac_me_user(md, pa, pm, va); 1731 pmap_vac_me_user(md, pa, pm, va);
1730} 1732}
1731 1733
1732static void 1734static void
1733pmap_vac_me_user(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) 1735pmap_vac_me_user(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
1734{ 1736{
1735 pmap_t kpmap = pmap_kernel(); 1737 pmap_t kpmap = pmap_kernel();
1736 struct pv_entry *pv, *npv = NULL; 1738 struct pv_entry *pv, *npv = NULL;
1737 struct l2_bucket *l2b; 1739 struct l2_bucket *l2b;
1738 pt_entry_t *ptep, pte; 1740 pt_entry_t *ptep, pte;
1739 u_int entries = 0; 1741 u_int entries = 0;
1740 u_int writable = 0; 1742 u_int writable = 0;
1741 u_int cacheable_entries = 0; 1743 u_int cacheable_entries = 0;
1742 u_int kern_cacheable = 0; 1744 u_int kern_cacheable = 0;
1743 u_int other_writable = 0; 1745 u_int other_writable = 0;
1744 1746
1745 /* 1747 /*
1746 * Count mappings and writable mappings in this pmap. 1748 * Count mappings and writable mappings in this pmap.
1747 * Include kernel mappings as part of our own. 1749 * Include kernel mappings as part of our own.
1748 * Keep a pointer to the first one. 1750 * Keep a pointer to the first one.
1749 */ 1751 */
1750 npv = NULL; 1752 npv = NULL;
1751 SLIST_FOREACH(pv, &md->pvh_list, pv_link) { 1753 SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
1752 /* Count mappings in the same pmap */ 1754 /* Count mappings in the same pmap */
1753 if (pm == pv->pv_pmap || kpmap == pv->pv_pmap) { 1755 if (pm == pv->pv_pmap || kpmap == pv->pv_pmap) {
1754 if (entries++ == 0) 1756 if (entries++ == 0)
1755 npv = pv; 1757 npv = pv;
1756 1758
1757 /* Cacheable mappings */ 1759 /* Cacheable mappings */
1758 if ((pv->pv_flags & PVF_NC) == 0) { 1760 if ((pv->pv_flags & PVF_NC) == 0) {
1759 cacheable_entries++; 1761 cacheable_entries++;
1760 if (kpmap == pv->pv_pmap) 1762 if (kpmap == pv->pv_pmap)
1761 kern_cacheable++; 1763 kern_cacheable++;
1762 } 1764 }
1763 1765
1764 /* Writable mappings */ 1766 /* Writable mappings */
1765 if (pv->pv_flags & PVF_WRITE) 1767 if (pv->pv_flags & PVF_WRITE)
1766 ++writable; 1768 ++writable;
1767 } else 1769 } else
1768 if (pv->pv_flags & PVF_WRITE) 1770 if (pv->pv_flags & PVF_WRITE)
1769 other_writable = 1; 1771 other_writable = 1;
1770 } 1772 }
1771 1773
1772 /* 1774 /*
1773 * Enable or disable caching as necessary. 1775 * Enable or disable caching as necessary.
1774 * Note: the first entry might be part of the kernel pmap, 1776 * Note: the first entry might be part of the kernel pmap,
1775 * so we can't assume this is indicative of the state of the 1777 * so we can't assume this is indicative of the state of the
1776 * other (maybe non-kpmap) entries. 1778 * other (maybe non-kpmap) entries.
1777 */ 1779 */
1778 if ((entries > 1 && writable) || 1780 if ((entries > 1 && writable) ||
1779 (entries > 0 && pm == kpmap && other_writable)) { 1781 (entries > 0 && pm == kpmap && other_writable)) {
1780 if (cacheable_entries == 0) 1782 if (cacheable_entries == 0)
1781 return; 1783 return;
1782 1784
1783 for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) { 1785 for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) {
1784 if ((pm != pv->pv_pmap && kpmap != pv->pv_pmap) || 1786 if ((pm != pv->pv_pmap && kpmap != pv->pv_pmap) ||
1785 (pv->pv_flags & PVF_NC)) 1787 (pv->pv_flags & PVF_NC))
1786 continue; 1788 continue;
1787 1789
1788 pv->pv_flags |= PVF_NC; 1790 pv->pv_flags |= PVF_NC;
1789 1791
1790 l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); 1792 l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
1791 KDASSERT(l2b != NULL); 1793 KDASSERT(l2b != NULL);
1792 ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 1794 ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
1793 pte = *ptep & ~L2_S_CACHE_MASK; 1795 pte = *ptep & ~L2_S_CACHE_MASK;
1794 1796
1795 if ((va != pv->pv_va || pm != pv->pv_pmap) && 1797 if ((va != pv->pv_va || pm != pv->pv_pmap) &&
1796 l2pte_valid(pte)) { 1798 l2pte_valid(pte)) {
1797 if (PV_BEEN_EXECD(pv->pv_flags)) { 1799 if (PV_BEEN_EXECD(pv->pv_flags)) {
1798#ifdef PMAP_CACHE_VIVT 1800#ifdef PMAP_CACHE_VIVT
1799 pmap_idcache_wbinv_range(pv->pv_pmap, 1801 pmap_idcache_wbinv_range(pv->pv_pmap,
1800 pv->pv_va, PAGE_SIZE); 1802 pv->pv_va, PAGE_SIZE);
1801#endif 1803#endif
1802 pmap_tlb_flushID_SE(pv->pv_pmap, 1804 pmap_tlb_flushID_SE(pv->pv_pmap,
1803 pv->pv_va); 1805 pv->pv_va);
1804 } else 1806 } else
1805 if (PV_BEEN_REFD(pv->pv_flags)) { 1807 if (PV_BEEN_REFD(pv->pv_flags)) {
1806#ifdef PMAP_CACHE_VIVT 1808#ifdef PMAP_CACHE_VIVT
1807 pmap_dcache_wb_range(pv->pv_pmap, 1809 pmap_dcache_wb_range(pv->pv_pmap,
1808 pv->pv_va, PAGE_SIZE, true, 1810 pv->pv_va, PAGE_SIZE, true,
1809 (pv->pv_flags & PVF_WRITE) == 0); 1811 (pv->pv_flags & PVF_WRITE) == 0);
1810#endif 1812#endif
1811 pmap_tlb_flushD_SE(pv->pv_pmap, 1813 pmap_tlb_flushD_SE(pv->pv_pmap,
1812 pv->pv_va); 1814 pv->pv_va);
1813 } 1815 }
1814 } 1816 }
1815 1817
1816 *ptep = pte; 1818 *ptep = pte;
1817 PTE_SYNC_CURRENT(pv->pv_pmap, ptep); 1819 PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
1818 } 1820 }
1819 cpu_cpwait(); 1821 cpu_cpwait();
1820 } else 1822 } else
1821 if (entries > cacheable_entries) { 1823 if (entries > cacheable_entries) {
1822 /* 1824 /*
1823 * Turn cacheing back on for some pages. If it is a kernel 1825 * Turn cacheing back on for some pages. If it is a kernel
1824 * page, only do so if there are no other writable pages. 1826 * page, only do so if there are no other writable pages.
1825 */ 1827 */
1826 for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) { 1828 for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) {
1827 if (!(pv->pv_flags & PVF_NC) || (pm != pv->pv_pmap && 1829 if (!(pv->pv_flags & PVF_NC) || (pm != pv->pv_pmap &&
1828 (kpmap != pv->pv_pmap || other_writable))) 1830 (kpmap != pv->pv_pmap || other_writable)))
1829 continue; 1831 continue;
1830 1832
1831 pv->pv_flags &= ~PVF_NC; 1833 pv->pv_flags &= ~PVF_NC;
1832 1834
1833 l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); 1835 l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
1834 KDASSERT(l2b != NULL); 1836 KDASSERT(l2b != NULL);
1835 ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 1837 ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
1836 pte = (*ptep & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode; 1838 pte = (*ptep & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode;
1837 1839
1838 if (l2pte_valid(pte)) { 1840 if (l2pte_valid(pte)) {
1839 if (PV_BEEN_EXECD(pv->pv_flags)) { 1841 if (PV_BEEN_EXECD(pv->pv_flags)) {
1840 pmap_tlb_flushID_SE(pv->pv_pmap, 1842 pmap_tlb_flushID_SE(pv->pv_pmap,
1841 pv->pv_va); 1843 pv->pv_va);
1842 } else 1844 } else
1843 if (PV_BEEN_REFD(pv->pv_flags)) { 1845 if (PV_BEEN_REFD(pv->pv_flags)) {
1844 pmap_tlb_flushD_SE(pv->pv_pmap, 1846 pmap_tlb_flushD_SE(pv->pv_pmap,
1845 pv->pv_va); 1847 pv->pv_va);
1846 } 1848 }
1847 } 1849 }
1848 1850
1849 *ptep = pte; 1851 *ptep = pte;
1850 PTE_SYNC_CURRENT(pv->pv_pmap, ptep); 1852 PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
1851 } 1853 }
1852 } 1854 }
1853} 1855}
1854#endif 1856#endif
1855 1857
1856#ifdef PMAP_CACHE_VIPT 1858#ifdef PMAP_CACHE_VIPT
1857static void 1859static void
1858pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va) 1860pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
1859{ 1861{
1860 struct pv_entry *pv; 1862 struct pv_entry *pv;
1861 vaddr_t tst_mask; 1863 vaddr_t tst_mask;
1862 bool bad_alias; 1864 bool bad_alias;
1863 struct l2_bucket *l2b; 1865 struct l2_bucket *l2b;
1864 pt_entry_t *ptep, pte, opte; 1866 pt_entry_t *ptep, pte, opte;
1865 const u_int 1867 const u_int
1866 rw_mappings = md->urw_mappings + md->krw_mappings, 1868 rw_mappings = md->urw_mappings + md->krw_mappings,
1867 ro_mappings = md->uro_mappings + md->kro_mappings; 1869 ro_mappings = md->uro_mappings + md->kro_mappings;
1868 1870
1869 /* do we need to do anything? */ 1871 /* do we need to do anything? */
1870 if (arm_cache_prefer_mask == 0) 1872 if (arm_cache_prefer_mask == 0)
1871 return; 1873 return;
1872 1874
1873 NPDEBUG(PDB_VAC, printf("pmap_vac_me_harder: md=%p, pmap=%p va=%08lx\n", 1875 NPDEBUG(PDB_VAC, printf("pmap_vac_me_harder: md=%p, pmap=%p va=%08lx\n",
1874 md, pm, va)); 1876 md, pm, va));
1875 1877
1876 KASSERT(!va || pm); 1878 KASSERT(!va || pm);
1877 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 1879 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1878 1880
1879 /* Already a conflict? */ 1881 /* Already a conflict? */
1880 if (__predict_false(md->pvh_attrs & PVF_NC)) { 1882 if (__predict_false(md->pvh_attrs & PVF_NC)) {
1881 /* just an add, things are already non-cached */ 1883 /* just an add, things are already non-cached */
1882 KASSERT(!(md->pvh_attrs & PVF_DIRTY)); 1884 KASSERT(!(md->pvh_attrs & PVF_DIRTY));
1883 KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); 1885 KASSERT(!(md->pvh_attrs & PVF_MULTCLR));
1884 bad_alias = false; 1886 bad_alias = false;
1885 if (va) { 1887 if (va) {
1886 PMAPCOUNT(vac_color_none); 1888 PMAPCOUNT(vac_color_none);
1887 bad_alias = true; 1889 bad_alias = true;
1888 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); 1890 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
1889 goto fixup; 1891 goto fixup;
1890 } 1892 }
1891 pv = SLIST_FIRST(&md->pvh_list); 1893 pv = SLIST_FIRST(&md->pvh_list);
1892 /* the list can't be empty because it would be cachable */ 1894 /* the list can't be empty because it would be cachable */
1893 if (md->pvh_attrs & PVF_KMPAGE) { 1895 if (md->pvh_attrs & PVF_KMPAGE) {
1894 tst_mask = md->pvh_attrs; 1896 tst_mask = md->pvh_attrs;
1895 } else { 1897 } else {
1896 KASSERT(pv); 1898 KASSERT(pv);
1897 tst_mask = pv->pv_va; 1899 tst_mask = pv->pv_va;
1898 pv = SLIST_NEXT(pv, pv_link); 1900 pv = SLIST_NEXT(pv, pv_link);
1899 } 1901 }
1900 /* 1902 /*
1901 * Only check for a bad alias if we have writable mappings. 1903 * Only check for a bad alias if we have writable mappings.
1902 */ 1904 */
1903 tst_mask &= arm_cache_prefer_mask; 1905 tst_mask &= arm_cache_prefer_mask;
1904 if (rw_mappings > 0) { 1906 if (rw_mappings > 0) {
1905 for (; pv && !bad_alias; pv = SLIST_NEXT(pv, pv_link)) { 1907 for (; pv && !bad_alias; pv = SLIST_NEXT(pv, pv_link)) {
1906 /* if there's a bad alias, stop checking. */ 1908 /* if there's a bad alias, stop checking. */
1907 if (tst_mask != (pv->pv_va & arm_cache_prefer_mask)) 1909 if (tst_mask != (pv->pv_va & arm_cache_prefer_mask))
1908 bad_alias = true; 1910 bad_alias = true;
1909 } 1911 }
1910 md->pvh_attrs |= PVF_WRITE; 1912 md->pvh_attrs |= PVF_WRITE;
1911 if (!bad_alias) 1913 if (!bad_alias)
1912 md->pvh_attrs |= PVF_DIRTY; 1914 md->pvh_attrs |= PVF_DIRTY;
1913 } else { 1915 } else {
1914 /* 1916 /*
1915 * We have only read-only mappings. Let's see if there 1917 * We have only read-only mappings. Let's see if there
1916 * are multiple colors in use or if we mapped a KMPAGE. 1918 * are multiple colors in use or if we mapped a KMPAGE.
1917 * If the latter, we have a bad alias. If the former, 1919 * If the latter, we have a bad alias. If the former,
1918 * we need to remember that. 1920 * we need to remember that.
1919 */ 1921 */
1920 for (; pv; pv = SLIST_NEXT(pv, pv_link)) { 1922 for (; pv; pv = SLIST_NEXT(pv, pv_link)) {
1921 if (tst_mask != (pv->pv_va & arm_cache_prefer_mask)) { 1923 if (tst_mask != (pv->pv_va & arm_cache_prefer_mask)) {
1922 if (md->pvh_attrs & PVF_KMPAGE) 1924 if (md->pvh_attrs & PVF_KMPAGE)
1923 bad_alias = true; 1925 bad_alias = true;
1924 break; 1926 break;
1925 } 1927 }
1926 } 1928 }
1927 md->pvh_attrs &= ~PVF_WRITE; 1929 md->pvh_attrs &= ~PVF_WRITE;
1928 /* 1930 /*
1929 * No KMPAGE and we exited early, so we must have  1931 * No KMPAGE and we exited early, so we must have
1930 * multiple color mappings. 1932 * multiple color mappings.
1931 */ 1933 */
1932 if (!bad_alias && pv != NULL) 1934 if (!bad_alias && pv != NULL)
1933 md->pvh_attrs |= PVF_MULTCLR; 1935 md->pvh_attrs |= PVF_MULTCLR;
1934 } 1936 }
1935 1937
1936 /* If no conflicting colors, set everything back to cached */ 1938 /* If no conflicting colors, set everything back to cached */
1937 if (!bad_alias) { 1939 if (!bad_alias) {
1938#ifdef DEBUG 1940#ifdef DEBUG
1939 if ((md->pvh_attrs & PVF_WRITE) 1941 if ((md->pvh_attrs & PVF_WRITE)
1940 || ro_mappings < 2) { 1942 || ro_mappings < 2) {
1941 SLIST_FOREACH(pv, &md->pvh_list, pv_link) 1943 SLIST_FOREACH(pv, &md->pvh_list, pv_link)
1942 KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0); 1944 KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0);
1943 } 1945 }
1944#endif 1946#endif
1945 md->pvh_attrs &= (PAGE_SIZE - 1) & ~PVF_NC; 1947 md->pvh_attrs &= (PAGE_SIZE - 1) & ~PVF_NC;
1946 md->pvh_attrs |= tst_mask | PVF_COLORED; 1948 md->pvh_attrs |= tst_mask | PVF_COLORED;
1947 /* 1949 /*
1948 * Restore DIRTY bit if page is modified 1950 * Restore DIRTY bit if page is modified
1949 */ 1951 */
1950 if (md->pvh_attrs & PVF_DMOD) 1952 if (md->pvh_attrs & PVF_DMOD)
1951 md->pvh_attrs |= PVF_DIRTY; 1953 md->pvh_attrs |= PVF_DIRTY;
1952 PMAPCOUNT(vac_color_restore); 1954 PMAPCOUNT(vac_color_restore);
1953 } else { 1955 } else {
1954 KASSERT(SLIST_FIRST(&md->pvh_list) != NULL); 1956 KASSERT(SLIST_FIRST(&md->pvh_list) != NULL);
1955 KASSERT(SLIST_NEXT(SLIST_FIRST(&md->pvh_list), pv_link) != NULL); 1957 KASSERT(SLIST_NEXT(SLIST_FIRST(&md->pvh_list), pv_link) != NULL);
1956 } 1958 }
1957 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 1959 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1958 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); 1960 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
1959 } else if (!va) { 1961 } else if (!va) {
1960 KASSERT(pmap_is_page_colored_p(md)); 1962 KASSERT(pmap_is_page_colored_p(md));
1961 KASSERT(!(md->pvh_attrs & PVF_WRITE) 1963 KASSERT(!(md->pvh_attrs & PVF_WRITE)
1962 || (md->pvh_attrs & PVF_DIRTY)); 1964 || (md->pvh_attrs & PVF_DIRTY));
1963 if (rw_mappings == 0) { 1965 if (rw_mappings == 0) {
1964 md->pvh_attrs &= ~PVF_WRITE; 1966 md->pvh_attrs &= ~PVF_WRITE;
1965 if (ro_mappings == 1 1967 if (ro_mappings == 1
1966 && (md->pvh_attrs & PVF_MULTCLR)) { 1968 && (md->pvh_attrs & PVF_MULTCLR)) {
1967 /* 1969 /*
1968 * If this is the last readonly mapping 1970 * If this is the last readonly mapping
1969 * but it doesn't match the current color 1971 * but it doesn't match the current color
1970 * for the page, change the current color 1972 * for the page, change the current color
1971 * to match this last readonly mapping. 1973 * to match this last readonly mapping.
1972 */ 1974 */
1973 pv = SLIST_FIRST(&md->pvh_list); 1975 pv = SLIST_FIRST(&md->pvh_list);
1974 tst_mask = (md->pvh_attrs ^ pv->pv_va) 1976 tst_mask = (md->pvh_attrs ^ pv->pv_va)
1975 & arm_cache_prefer_mask; 1977 & arm_cache_prefer_mask;
1976 if (tst_mask) { 1978 if (tst_mask) {
1977 md->pvh_attrs ^= tst_mask; 1979 md->pvh_attrs ^= tst_mask;
1978 PMAPCOUNT(vac_color_change); 1980 PMAPCOUNT(vac_color_change);
1979 } 1981 }
1980 } 1982 }
1981 } 1983 }
1982 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 1984 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1983 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); 1985 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
1984 return; 1986 return;
1985 } else if (!pmap_is_page_colored_p(md)) { 1987 } else if (!pmap_is_page_colored_p(md)) {
1986 /* not colored so we just use its color */ 1988 /* not colored so we just use its color */
1987 KASSERT(md->pvh_attrs & (PVF_WRITE|PVF_DIRTY)); 1989 KASSERT(md->pvh_attrs & (PVF_WRITE|PVF_DIRTY));
1988 KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); 1990 KASSERT(!(md->pvh_attrs & PVF_MULTCLR));
1989 PMAPCOUNT(vac_color_new); 1991 PMAPCOUNT(vac_color_new);
1990 md->pvh_attrs &= PAGE_SIZE - 1; 1992 md->pvh_attrs &= PAGE_SIZE - 1;
1991 md->pvh_attrs |= PVF_COLORED 1993 md->pvh_attrs |= PVF_COLORED
1992 | (va & arm_cache_prefer_mask) 1994 | (va & arm_cache_prefer_mask)
1993 | (rw_mappings > 0 ? PVF_WRITE : 0); 1995 | (rw_mappings > 0 ? PVF_WRITE : 0);
1994 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 1996 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1995 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); 1997 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
1996 return; 1998 return;
1997 } else if (((md->pvh_attrs ^ va) & arm_cache_prefer_mask) == 0) { 1999 } else if (((md->pvh_attrs ^ va) & arm_cache_prefer_mask) == 0) {
1998 bad_alias = false; 2000 bad_alias = false;
1999 if (rw_mappings > 0) { 2001 if (rw_mappings > 0) {
2000 /* 2002 /*
2001 * We now have writeable mappings and if we have 2003 * We now have writeable mappings and if we have
2002 * readonly mappings in more than once color, we have 2004 * readonly mappings in more than once color, we have
2003 * an aliasing problem. Regardless mark the page as 2005 * an aliasing problem. Regardless mark the page as
2004 * writeable. 2006 * writeable.
2005 */ 2007 */
2006 if (md->pvh_attrs & PVF_MULTCLR) { 2008 if (md->pvh_attrs & PVF_MULTCLR) {
2007 if (ro_mappings < 2) { 2009 if (ro_mappings < 2) {
2008 /* 2010 /*
2009 * If we only have less than two 2011 * If we only have less than two
2010 * read-only mappings, just flush the 2012 * read-only mappings, just flush the
2011 * non-primary colors from the cache. 2013 * non-primary colors from the cache.
2012 */ 2014 */
2013 pmap_flush_page(md, pa, 2015 pmap_flush_page(md, pa,
2014 PMAP_FLUSH_SECONDARY); 2016 PMAP_FLUSH_SECONDARY);
2015 } else { 2017 } else {
2016 bad_alias = true; 2018 bad_alias = true;
2017 } 2019 }
2018 } 2020 }
2019 md->pvh_attrs |= PVF_WRITE; 2021 md->pvh_attrs |= PVF_WRITE;
2020 } 2022 }
2021 /* If no conflicting colors, set everything back to cached */ 2023 /* If no conflicting colors, set everything back to cached */
2022 if (!bad_alias) { 2024 if (!bad_alias) {
2023#ifdef DEBUG 2025#ifdef DEBUG
2024 if (rw_mappings > 0 2026 if (rw_mappings > 0
2025 || (md->pvh_attrs & PMAP_KMPAGE)) { 2027 || (md->pvh_attrs & PMAP_KMPAGE)) {
2026 tst_mask = md->pvh_attrs & arm_cache_prefer_mask; 2028 tst_mask = md->pvh_attrs & arm_cache_prefer_mask;
2027 SLIST_FOREACH(pv, &md->pvh_list, pv_link) 2029 SLIST_FOREACH(pv, &md->pvh_list, pv_link)
2028 KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0); 2030 KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0);
2029 } 2031 }
2030#endif 2032#endif
2031 if (SLIST_EMPTY(&md->pvh_list)) 2033 if (SLIST_EMPTY(&md->pvh_list))
2032 PMAPCOUNT(vac_color_reuse); 2034 PMAPCOUNT(vac_color_reuse);
2033 else 2035 else
2034 PMAPCOUNT(vac_color_ok); 2036 PMAPCOUNT(vac_color_ok);
2035 2037
2036 /* matching color, just return */ 2038 /* matching color, just return */
2037 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 2039 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
2038 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); 2040 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
2039 return; 2041 return;
2040 } 2042 }
2041 KASSERT(SLIST_FIRST(&md->pvh_list) != NULL); 2043 KASSERT(SLIST_FIRST(&md->pvh_list) != NULL);
2042 KASSERT(SLIST_NEXT(SLIST_FIRST(&md->pvh_list), pv_link) != NULL); 2044 KASSERT(SLIST_NEXT(SLIST_FIRST(&md->pvh_list), pv_link) != NULL);
2043 2045
2044 /* color conflict. evict from cache. */ 2046 /* color conflict. evict from cache. */
2045 2047
2046 pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY); 2048 pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY);
2047 md->pvh_attrs &= ~PVF_COLORED; 2049 md->pvh_attrs &= ~PVF_COLORED;
2048 md->pvh_attrs |= PVF_NC; 2050 md->pvh_attrs |= PVF_NC;
2049 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 2051 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
2050 KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); 2052 KASSERT(!(md->pvh_attrs & PVF_MULTCLR));
2051 PMAPCOUNT(vac_color_erase); 2053 PMAPCOUNT(vac_color_erase);
2052 } else if (rw_mappings == 0 2054 } else if (rw_mappings == 0
2053 && (md->pvh_attrs & PVF_KMPAGE) == 0) { 2055 && (md->pvh_attrs & PVF_KMPAGE) == 0) {
2054 KASSERT((md->pvh_attrs & PVF_WRITE) == 0); 2056 KASSERT((md->pvh_attrs & PVF_WRITE) == 0);
2055 2057
2056 /* 2058 /*
2057 * If the page has dirty cache lines, clean it. 2059 * If the page has dirty cache lines, clean it.
2058 */ 2060 */
2059 if (md->pvh_attrs & PVF_DIRTY) 2061 if (md->pvh_attrs & PVF_DIRTY)
2060 pmap_flush_page(md, pa, PMAP_CLEAN_PRIMARY); 2062 pmap_flush_page(md, pa, PMAP_CLEAN_PRIMARY);
2061 2063
2062 /* 2064 /*
2063 * If this is the first remapping (we know that there are no 2065 * If this is the first remapping (we know that there are no
2064 * writeable mappings), then this is a simple color change. 2066 * writeable mappings), then this is a simple color change.
2065 * Otherwise this is a seconary r/o mapping, which means 2067 * Otherwise this is a seconary r/o mapping, which means
2066 * we don't have to do anything. 2068 * we don't have to do anything.
2067 */ 2069 */
2068 if (ro_mappings == 1) { 2070 if (ro_mappings == 1) {
2069 KASSERT(((md->pvh_attrs ^ va) & arm_cache_prefer_mask) != 0); 2071 KASSERT(((md->pvh_attrs ^ va) & arm_cache_prefer_mask) != 0);
2070 md->pvh_attrs &= PAGE_SIZE - 1; 2072 md->pvh_attrs &= PAGE_SIZE - 1;
2071 md->pvh_attrs |= (va & arm_cache_prefer_mask); 2073 md->pvh_attrs |= (va & arm_cache_prefer_mask);
2072 PMAPCOUNT(vac_color_change); 2074 PMAPCOUNT(vac_color_change);
2073 } else { 2075 } else {
2074 PMAPCOUNT(vac_color_blind); 2076 PMAPCOUNT(vac_color_blind);
2075 } 2077 }
2076 md->pvh_attrs |= PVF_MULTCLR; 2078 md->pvh_attrs |= PVF_MULTCLR;
2077 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 2079 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
2078 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); 2080 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
2079 return; 2081 return;
2080 } else { 2082 } else {
2081 if (rw_mappings > 0) 2083 if (rw_mappings > 0)
2082 md->pvh_attrs |= PVF_WRITE; 2084 md->pvh_attrs |= PVF_WRITE;
2083 2085
2084 /* color conflict. evict from cache. */ 2086 /* color conflict. evict from cache. */
2085 pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY); 2087 pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY);
2086 2088
2087 /* the list can't be empty because this was a enter/modify */ 2089 /* the list can't be empty because this was a enter/modify */
2088 pv = SLIST_FIRST(&md->pvh_list); 2090 pv = SLIST_FIRST(&md->pvh_list);
2089 if ((md->pvh_attrs & PVF_KMPAGE) == 0) { 2091 if ((md->pvh_attrs & PVF_KMPAGE) == 0) {
2090 KASSERT(pv); 2092 KASSERT(pv);
2091 /* 2093 /*
2092 * If there's only one mapped page, change color to the 2094 * If there's only one mapped page, change color to the
2093 * page's new color and return. Restore the DIRTY bit 2095 * page's new color and return. Restore the DIRTY bit
2094 * that was erased by pmap_flush_page. 2096 * that was erased by pmap_flush_page.
2095 */ 2097 */
2096 if (SLIST_NEXT(pv, pv_link) == NULL) { 2098 if (SLIST_NEXT(pv, pv_link) == NULL) {
2097 md->pvh_attrs &= PAGE_SIZE - 1; 2099 md->pvh_attrs &= PAGE_SIZE - 1;
2098 md->pvh_attrs |= (va & arm_cache_prefer_mask); 2100 md->pvh_attrs |= (va & arm_cache_prefer_mask);
2099 if (md->pvh_attrs & PVF_DMOD) 2101 if (md->pvh_attrs & PVF_DMOD)
2100 md->pvh_attrs |= PVF_DIRTY; 2102 md->pvh_attrs |= PVF_DIRTY;
2101 PMAPCOUNT(vac_color_change); 2103 PMAPCOUNT(vac_color_change);
2102 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 2104 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
2103 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); 2105 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
2104 KASSERT(!(md->pvh_attrs & PVF_MULTCLR)); 2106 KASSERT(!(md->pvh_attrs & PVF_MULTCLR));
2105 return; 2107 return;
2106 } 2108 }
2107 } 2109 }
2108 bad_alias = true; 2110 bad_alias = true;
2109 md->pvh_attrs &= ~PVF_COLORED; 2111 md->pvh_attrs &= ~PVF_COLORED;
2110 md->pvh_attrs |= PVF_NC; 2112 md->pvh_attrs |= PVF_NC;
2111 PMAPCOUNT(vac_color_erase); 2113 PMAPCOUNT(vac_color_erase);
2112 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 2114 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
2113 } 2115 }
2114 2116
2115 fixup: 2117 fixup:
2116 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE)); 2118 KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
2117 2119
2118 /* 2120 /*
2119 * Turn cacheing on/off for all pages. 2121 * Turn cacheing on/off for all pages.
2120 */ 2122 */
2121 SLIST_FOREACH(pv, &md->pvh_list, pv_link) { 2123 SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
2122 l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); 2124 l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
2123 KDASSERT(l2b != NULL); 2125 KDASSERT(l2b != NULL);
2124 ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 2126 ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
2125 opte = *ptep; 2127 opte = *ptep;
2126 pte = opte & ~L2_S_CACHE_MASK; 2128 pte = opte & ~L2_S_CACHE_MASK;
2127 if (bad_alias) { 2129 if (bad_alias) {
2128 pv->pv_flags |= PVF_NC; 2130 pv->pv_flags |= PVF_NC;
2129 } else { 2131 } else {
2130 pv->pv_flags &= ~PVF_NC; 2132 pv->pv_flags &= ~PVF_NC;
2131 pte |= pte_l2_s_cache_mode; 2133 pte |= pte_l2_s_cache_mode;
2132 } 2134 }
2133 2135
2134 if (opte == pte) /* only update is there's a change */ 2136 if (opte == pte) /* only update is there's a change */
2135 continue; 2137 continue;
2136 2138
2137 if (l2pte_valid(pte)) { 2139 if (l2pte_valid(pte)) {
2138 if (PV_BEEN_EXECD(pv->pv_flags)) { 2140 if (PV_BEEN_EXECD(pv->pv_flags)) {
2139 pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va); 2141 pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va);
2140 } else if (PV_BEEN_REFD(pv->pv_flags)) { 2142 } else if (PV_BEEN_REFD(pv->pv_flags)) {
2141 pmap_tlb_flushD_SE(pv->pv_pmap, pv->pv_va); 2143 pmap_tlb_flushD_SE(pv->pv_pmap, pv->pv_va);
2142 } 2144 }
2143 } 2145 }
2144 2146
2145 *ptep = pte; 2147 *ptep = pte;
2146 PTE_SYNC_CURRENT(pv->pv_pmap, ptep); 2148 PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
2147 } 2149 }
2148} 2150}
2149#endif /* PMAP_CACHE_VIPT */ 2151#endif /* PMAP_CACHE_VIPT */
2150 2152
2151 2153
2152/* 2154/*
2153 * Modify pte bits for all ptes corresponding to the given physical address. 2155 * Modify pte bits for all ptes corresponding to the given physical address.
2154 * We use `maskbits' rather than `clearbits' because we're always passing 2156 * We use `maskbits' rather than `clearbits' because we're always passing
2155 * constants and the latter would require an extra inversion at run-time. 2157 * constants and the latter would require an extra inversion at run-time.
2156 */ 2158 */
2157static void 2159static void
2158pmap_clearbit(struct vm_page_md *md, paddr_t pa, u_int maskbits) 2160pmap_clearbit(struct vm_page_md *md, paddr_t pa, u_int maskbits)
2159{ 2161{
2160 struct l2_bucket *l2b; 2162 struct l2_bucket *l2b;
2161 struct pv_entry *pv; 2163 struct pv_entry *pv;
2162 pt_entry_t *ptep, npte, opte; 2164 pt_entry_t *ptep, npte, opte;
2163 pmap_t pm; 2165 pmap_t pm;
2164 vaddr_t va; 2166 vaddr_t va;
2165 u_int oflags; 2167 u_int oflags;
2166#ifdef PMAP_CACHE_VIPT 2168#ifdef PMAP_CACHE_VIPT
2167 const bool want_syncicache = PV_IS_EXEC_P(md->pvh_attrs); 2169 const bool want_syncicache = PV_IS_EXEC_P(md->pvh_attrs);
2168 bool need_syncicache = false; 2170 bool need_syncicache = false;
2169 bool did_syncicache = false; 2171 bool did_syncicache = false;
2170 bool need_vac_me_harder = false; 2172 bool need_vac_me_harder = false;
2171#endif 2173#endif
2172 2174
2173 NPDEBUG(PDB_BITS, 2175 NPDEBUG(PDB_BITS,
2174 printf("pmap_clearbit: md %p mask 0x%x\n", 2176 printf("pmap_clearbit: md %p mask 0x%x\n",
2175 md, maskbits)); 2177 md, maskbits));
2176 2178
2177#ifdef PMAP_CACHE_VIPT 2179#ifdef PMAP_CACHE_VIPT
2178 /* 2180 /*
2179 * If we might want to sync the I-cache and we've modified it, 2181 * If we might want to sync the I-cache and we've modified it,
2180 * then we know we definitely need to sync or discard it. 2182 * then we know we definitely need to sync or discard it.
2181 */ 2183 */
2182 if (want_syncicache) 2184 if (want_syncicache)
2183 need_syncicache = md->pvh_attrs & PVF_MOD; 2185 need_syncicache = md->pvh_attrs & PVF_MOD;
2184#endif 2186#endif
2185 /* 2187 /*
2186 * Clear saved attributes (modify, reference) 2188 * Clear saved attributes (modify, reference)
2187 */ 2189 */
2188 md->pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF)); 2190 md->pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF));
2189 2191
2190 if (SLIST_EMPTY(&md->pvh_list)) { 2192 if (SLIST_EMPTY(&md->pvh_list)) {
2191#ifdef PMAP_CACHE_VIPT 2193#ifdef PMAP_CACHE_VIPT
2192 if (need_syncicache) { 2194 if (need_syncicache) {
2193 /* 2195 /*
2194 * No one has it mapped, so just discard it. The next 2196 * No one has it mapped, so just discard it. The next
2195 * exec remapping will cause it to be synced. 2197 * exec remapping will cause it to be synced.
2196 */ 2198 */
2197 md->pvh_attrs &= ~PVF_EXEC; 2199 md->pvh_attrs &= ~PVF_EXEC;
2198 PMAPCOUNT(exec_discarded_clearbit); 2200 PMAPCOUNT(exec_discarded_clearbit);
2199 } 2201 }
2200#endif 2202#endif
2201 return; 2203 return;
2202 } 2204 }
2203 2205
2204 /* 2206 /*
2205 * Loop over all current mappings setting/clearing as appropos 2207 * Loop over all current mappings setting/clearing as appropos
2206 */ 2208 */
2207 SLIST_FOREACH(pv, &md->pvh_list, pv_link) { 2209 SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
2208 va = pv->pv_va; 2210 va = pv->pv_va;
2209 pm = pv->pv_pmap; 2211 pm = pv->pv_pmap;
2210 oflags = pv->pv_flags; 2212 oflags = pv->pv_flags;
2211 /* 2213 /*
2212 * Kernel entries are unmanaged and as such not to be changed. 2214 * Kernel entries are unmanaged and as such not to be changed.
2213 */ 2215 */
2214 if (oflags & PVF_KENTRY) 2216 if (oflags & PVF_KENTRY)
2215 continue; 2217 continue;
2216 pv->pv_flags &= ~maskbits; 2218 pv->pv_flags &= ~maskbits;
2217 2219
2218 pmap_acquire_pmap_lock(pm); 2220 pmap_acquire_pmap_lock(pm);
2219 2221
2220 l2b = pmap_get_l2_bucket(pm, va); 2222 l2b = pmap_get_l2_bucket(pm, va);
2221 KDASSERT(l2b != NULL); 2223 KDASSERT(l2b != NULL);
2222 2224
2223 ptep = &l2b->l2b_kva[l2pte_index(va)]; 2225 ptep = &l2b->l2b_kva[l2pte_index(va)];
2224 npte = opte = *ptep; 2226 npte = opte = *ptep;
2225 2227
2226 NPDEBUG(PDB_BITS, 2228 NPDEBUG(PDB_BITS,
2227 printf( 2229 printf(
2228 "pmap_clearbit: pv %p, pm %p, va 0x%08lx, flag 0x%x\n", 2230 "pmap_clearbit: pv %p, pm %p, va 0x%08lx, flag 0x%x\n",
2229 pv, pv->pv_pmap, pv->pv_va, oflags)); 2231 pv, pv->pv_pmap, pv->pv_va, oflags));
2230 2232
2231 if (maskbits & (PVF_WRITE|PVF_MOD)) { 2233 if (maskbits & (PVF_WRITE|PVF_MOD)) {
2232#ifdef PMAP_CACHE_VIVT 2234#ifdef PMAP_CACHE_VIVT
2233 if ((pv->pv_flags & PVF_NC)) { 2235 if ((pv->pv_flags & PVF_NC)) {
2234 /*  2236 /*
2235 * Entry is not cacheable: 2237 * Entry is not cacheable:
2236 * 2238 *
2237 * Don't turn caching on again if this is a  2239 * Don't turn caching on again if this is a
2238 * modified emulation. This would be 2240 * modified emulation. This would be
2239 * inconsitent with the settings created by 2241 * inconsitent with the settings created by
2240 * pmap_vac_me_harder(). Otherwise, it's safe 2242 * pmap_vac_me_harder(). Otherwise, it's safe
2241 * to re-enable cacheing. 2243 * to re-enable cacheing.
2242 * 2244 *
2243 * There's no need to call pmap_vac_me_harder() 2245 * There's no need to call pmap_vac_me_harder()
2244 * here: all pages are losing their write 2246 * here: all pages are losing their write
2245 * permission. 2247 * permission.
2246 */ 2248 */
2247 if (maskbits & PVF_WRITE) { 2249 if (maskbits & PVF_WRITE) {
2248 npte |= pte_l2_s_cache_mode; 2250 npte |= pte_l2_s_cache_mode;
2249 pv->pv_flags &= ~PVF_NC; 2251 pv->pv_flags &= ~PVF_NC;
2250 } 2252 }
2251 } else 2253 } else
2252 if (l2pte_writable_p(opte)) { 2254 if (l2pte_writable_p(opte)) {
2253 /*  2255 /*
2254 * Entry is writable/cacheable: check if pmap 2256 * Entry is writable/cacheable: check if pmap
2255 * is current if it is flush it, otherwise it 2257 * is current if it is flush it, otherwise it
2256 * won't be in the cache 2258 * won't be in the cache
2257 */ 2259 */
2258 if (PV_BEEN_EXECD(oflags)) 2260 if (PV_BEEN_EXECD(oflags))
2259 pmap_idcache_wbinv_range(pm, pv->pv_va, 2261 pmap_idcache_wbinv_range(pm, pv->pv_va,
2260 PAGE_SIZE); 2262 PAGE_SIZE);
2261 else 2263 else
2262 if (PV_BEEN_REFD(oflags)) 2264 if (PV_BEEN_REFD(oflags))
2263 pmap_dcache_wb_range(pm, pv->pv_va, 2265 pmap_dcache_wb_range(pm, pv->pv_va,
2264 PAGE_SIZE, 2266 PAGE_SIZE,
2265 (maskbits & PVF_REF) != 0, false); 2267 (maskbits & PVF_REF) != 0, false);
2266 } 2268 }
2267#endif 2269#endif
2268 2270
2269 /* make the pte read only */ 2271 /* make the pte read only */
2270 npte = l2pte_set_readonly(npte); 2272 npte = l2pte_set_readonly(npte);
2271 2273
2272 if (maskbits & oflags & PVF_WRITE) { 2274 if (maskbits & oflags & PVF_WRITE) {
2273 /* 2275 /*
2274 * Keep alias accounting up to date 2276 * Keep alias accounting up to date
2275 */ 2277 */
2276 if (pv->pv_pmap == pmap_kernel()) { 2278 if (pv->pv_pmap == pmap_kernel()) {
2277 md->krw_mappings--; 2279 md->krw_mappings--;
2278 md->kro_mappings++; 2280 md->kro_mappings++;
2279 } else { 2281 } else {
2280 md->urw_mappings--; 2282 md->urw_mappings--;
2281 md->uro_mappings++; 2283 md->uro_mappings++;
2282 } 2284 }
2283#ifdef PMAP_CACHE_VIPT 2285#ifdef PMAP_CACHE_VIPT
2284 if (arm_cache_prefer_mask != 0) { 2286 if (arm_cache_prefer_mask != 0) {
2285 if (md->urw_mappings + md->krw_mappings == 0) { 2287 if (md->urw_mappings + md->krw_mappings == 0) {
2286 md->pvh_attrs &= ~PVF_WRITE; 2288 md->pvh_attrs &= ~PVF_WRITE;
2287 } else { 2289 } else {
2288 PMAP_VALIDATE_MD_PAGE(md); 2290 PMAP_VALIDATE_MD_PAGE(md);
2289 } 2291 }
2290 } 2292 }
2291 if (want_syncicache) 2293 if (want_syncicache)
2292 need_syncicache = true; 2294 need_syncicache = true;
2293 need_vac_me_harder = true; 2295 need_vac_me_harder = true;
2294#endif 2296#endif
2295 } 2297 }
2296 } 2298 }
2297 2299
2298 if (maskbits & PVF_REF) { 2300 if (maskbits & PVF_REF) {
2299 if ((pv->pv_flags & PVF_NC) == 0 && 2301 if ((pv->pv_flags & PVF_NC) == 0 &&
2300 (maskbits & (PVF_WRITE|PVF_MOD)) == 0 && 2302 (maskbits & (PVF_WRITE|PVF_MOD)) == 0 &&
2301 l2pte_valid(npte)) { 2303 l2pte_valid(npte)) {
2302#ifdef PMAP_CACHE_VIVT 2304#ifdef PMAP_CACHE_VIVT
2303 /* 2305 /*
2304 * Check npte here; we may have already 2306 * Check npte here; we may have already
2305 * done the wbinv above, and the validity 2307 * done the wbinv above, and the validity
2306 * of the PTE is the same for opte and 2308 * of the PTE is the same for opte and
2307 * npte. 2309 * npte.
2308 */ 2310 */
2309 /* XXXJRT need idcache_inv_range */ 2311 /* XXXJRT need idcache_inv_range */
2310 if (PV_BEEN_EXECD(oflags)) 2312 if (PV_BEEN_EXECD(oflags))
2311 pmap_idcache_wbinv_range(pm, 2313 pmap_idcache_wbinv_range(pm,
2312 pv->pv_va, PAGE_SIZE); 2314 pv->pv_va, PAGE_SIZE);
2313 else 2315 else
2314 if (PV_BEEN_REFD(oflags)) 2316 if (PV_BEEN_REFD(oflags))
2315 pmap_dcache_wb_range(pm, 2317 pmap_dcache_wb_range(pm,
2316 pv->pv_va, PAGE_SIZE, 2318 pv->pv_va, PAGE_SIZE,
2317 true, true); 2319 true, true);
2318#endif 2320#endif
2319 } 2321 }
2320 2322
2321 /* 2323 /*
2322 * Make the PTE invalid so that we will take a 2324 * Make the PTE invalid so that we will take a
2323 * page fault the next time the mapping is 2325 * page fault the next time the mapping is
2324 * referenced. 2326 * referenced.
2325 */ 2327 */
2326 npte &= ~L2_TYPE_MASK; 2328 npte &= ~L2_TYPE_MASK;
2327 npte |= L2_TYPE_INV; 2329 npte |= L2_TYPE_INV;
2328 } 2330 }
2329 2331
2330 if (npte != opte) { 2332 if (npte != opte) {
2331 *ptep = npte; 2333 *ptep = npte;
2332 PTE_SYNC(ptep); 2334 PTE_SYNC(ptep);
2333 /* Flush the TLB entry if a current pmap. */ 2335 /* Flush the TLB entry if a current pmap. */
2334 if (PV_BEEN_EXECD(oflags)) 2336 if (PV_BEEN_EXECD(oflags))
2335 pmap_tlb_flushID_SE(pm, pv->pv_va); 2337 pmap_tlb_flushID_SE(pm, pv->pv_va);
2336 else 2338 else
2337 if (PV_BEEN_REFD(oflags)) 2339 if (PV_BEEN_REFD(oflags))
2338 pmap_tlb_flushD_SE(pm, pv->pv_va); 2340 pmap_tlb_flushD_SE(pm, pv->pv_va);
2339 } 2341 }
2340 2342
2341 pmap_release_pmap_lock(pm); 2343 pmap_release_pmap_lock(pm);
2342 2344
2343 NPDEBUG(PDB_BITS, 2345 NPDEBUG(PDB_BITS,
2344 printf("pmap_clearbit: pm %p va 0x%lx opte 0x%08x npte 0x%08x\n", 2346 printf("pmap_clearbit: pm %p va 0x%lx opte 0x%08x npte 0x%08x\n",
2345 pm, va, opte, npte)); 2347 pm, va, opte, npte));
2346 } 2348 }
2347 2349
2348#ifdef PMAP_CACHE_VIPT 2350#ifdef PMAP_CACHE_VIPT
2349 /* 2351 /*
2350 * If we need to sync the I-cache and we haven't done it yet, do it. 2352 * If we need to sync the I-cache and we haven't done it yet, do it.
2351 */ 2353 */
2352 if (need_syncicache && !did_syncicache) { 2354 if (need_syncicache && !did_syncicache) {
2353 pmap_syncicache_page(md, pa); 2355 pmap_syncicache_page(md, pa);
2354 PMAPCOUNT(exec_synced_clearbit); 2356 PMAPCOUNT(exec_synced_clearbit);
2355 } 2357 }
2356 /* 2358 /*
2357 * If we are changing this to read-only, we need to call vac_me_harder 2359 * If we are changing this to read-only, we need to call vac_me_harder
2358 * so we can change all the read-only pages to cacheable. We pretend 2360 * so we can change all the read-only pages to cacheable. We pretend
2359 * this as a page deletion. 2361 * this as a page deletion.
2360 */ 2362 */
2361 if (need_vac_me_harder) { 2363 if (need_vac_me_harder) {
2362 if (md->pvh_attrs & PVF_NC) 2364 if (md->pvh_attrs & PVF_NC)
2363 pmap_vac_me_harder(md, pa, NULL, 0); 2365 pmap_vac_me_harder(md, pa, NULL, 0);
2364 } 2366 }
2365#endif 2367#endif
2366} 2368}
2367 2369
2368/* 2370/*
2369 * pmap_clean_page() 2371 * pmap_clean_page()
2370 * 2372 *
2371 * This is a local function used to work out the best strategy to clean 2373 * This is a local function used to work out the best strategy to clean
2372 * a single page referenced by its entry in the PV table. It's used by 2374 * a single page referenced by its entry in the PV table. It's used by
2373 * pmap_copy_page, pmap_zero page and maybe some others later on. 2375 * pmap_copy_page, pmap_zero page and maybe some others later on.
2374 * 2376 *
2375 * Its policy is effectively: 2377 * Its policy is effectively:
2376 * o If there are no mappings, we don't bother doing anything with the cache. 2378 * o If there are no mappings, we don't bother doing anything with the cache.
2377 * o If there is one mapping, we clean just that page. 2379 * o If there is one mapping, we clean just that page.
2378 * o If there are multiple mappings, we clean the entire cache. 2380 * o If there are multiple mappings, we clean the entire cache.
2379 * 2381 *
2380 * So that some functions can be further optimised, it returns 0 if it didn't 2382 * So that some functions can be further optimised, it returns 0 if it didn't
2381 * clean the entire cache, or 1 if it did. 2383 * clean the entire cache, or 1 if it did.
2382 * 2384 *
2383 * XXX One bug in this routine is that if the pv_entry has a single page 2385 * XXX One bug in this routine is that if the pv_entry has a single page
2384 * mapped at 0x00000000 a whole cache clean will be performed rather than 2386 * mapped at 0x00000000 a whole cache clean will be performed rather than
2385 * just the 1 page. Since this should not occur in everyday use and if it does 2387 * just the 1 page. Since this should not occur in everyday use and if it does
2386 * it will just result in not the most efficient clean for the page. 2388 * it will just result in not the most efficient clean for the page.
2387 */ 2389 */
2388#ifdef PMAP_CACHE_VIVT 2390#ifdef PMAP_CACHE_VIVT
2389static int 2391static int
2390pmap_clean_page(struct pv_entry *pv, bool is_src) 2392pmap_clean_page(struct pv_entry *pv, bool is_src)
2391{ 2393{
2392 pmap_t pm_to_clean = NULL; 2394 pmap_t pm_to_clean = NULL;
2393 struct pv_entry *npv; 2395 struct pv_entry *npv;
2394 u_int cache_needs_cleaning = 0; 2396 u_int cache_needs_cleaning = 0;
2395 u_int flags = 0; 2397 u_int flags = 0;
2396 vaddr_t page_to_clean = 0; 2398 vaddr_t page_to_clean = 0;
2397 2399
2398 if (pv == NULL) { 2400 if (pv == NULL) {
2399 /* nothing mapped in so nothing to flush */ 2401 /* nothing mapped in so nothing to flush */
2400 return (0); 2402 return (0);
2401 } 2403 }
2402 2404
2403 /* 2405 /*
2404 * Since we flush the cache each time we change to a different 2406 * Since we flush the cache each time we change to a different
2405 * user vmspace, we only need to flush the page if it is in the 2407 * user vmspace, we only need to flush the page if it is in the
2406 * current pmap. 2408 * current pmap.
2407 */ 2409 */
2408 2410
2409 for (npv = pv; npv; npv = SLIST_NEXT(npv, pv_link)) { 2411 for (npv = pv; npv; npv = SLIST_NEXT(npv, pv_link)) {
2410 if (pmap_is_current(npv->pv_pmap)) { 2412 if (pmap_is_current(npv->pv_pmap)) {
2411 flags |= npv->pv_flags; 2413 flags |= npv->pv_flags;
2412 /* 2414 /*
2413 * The page is mapped non-cacheable in  2415 * The page is mapped non-cacheable in
2414 * this map. No need to flush the cache. 2416 * this map. No need to flush the cache.
2415 */ 2417 */
2416 if (npv->pv_flags & PVF_NC) { 2418 if (npv->pv_flags & PVF_NC) {
2417#ifdef DIAGNOSTIC 2419#ifdef DIAGNOSTIC
2418 if (cache_needs_cleaning) 2420 if (cache_needs_cleaning)
2419 panic("pmap_clean_page: " 2421 panic("pmap_clean_page: "
2420 "cache inconsistency"); 2422 "cache inconsistency");
2421#endif 2423#endif
2422 break; 2424 break;
2423 } else if (is_src && (npv->pv_flags & PVF_WRITE) == 0) 2425 } else if (is_src && (npv->pv_flags & PVF_WRITE) == 0)
2424 continue; 2426 continue;
2425 if (cache_needs_cleaning) { 2427 if (cache_needs_cleaning) {
2426 page_to_clean = 0; 2428 page_to_clean = 0;
2427 break; 2429 break;
2428 } else { 2430 } else {
2429 page_to_clean = npv->pv_va; 2431 page_to_clean = npv->pv_va;
2430 pm_to_clean = npv->pv_pmap; 2432 pm_to_clean = npv->pv_pmap;
2431 } 2433 }
2432 cache_needs_cleaning = 1; 2434 cache_needs_cleaning = 1;
2433 } 2435 }
2434 } 2436 }
2435 2437
2436 if (page_to_clean) { 2438 if (page_to_clean) {
2437 if (PV_BEEN_EXECD(flags)) 2439 if (PV_BEEN_EXECD(flags))
2438 pmap_idcache_wbinv_range(pm_to_clean, page_to_clean, 2440 pmap_idcache_wbinv_range(pm_to_clean, page_to_clean,
2439 PAGE_SIZE); 2441 PAGE_SIZE);
2440 else 2442 else
2441 pmap_dcache_wb_range(pm_to_clean, page_to_clean, 2443 pmap_dcache_wb_range(pm_to_clean, page_to_clean,
2442 PAGE_SIZE, !is_src, (flags & PVF_WRITE) == 0); 2444 PAGE_SIZE, !is_src, (flags & PVF_WRITE) == 0);
2443 } else if (cache_needs_cleaning) { 2445 } else if (cache_needs_cleaning) {
2444 pmap_t const pm = curproc->p_vmspace->vm_map.pmap; 2446 pmap_t const pm = curproc->p_vmspace->vm_map.pmap;
2445 2447
2446 if (PV_BEEN_EXECD(flags)) 2448 if (PV_BEEN_EXECD(flags))
2447 pmap_idcache_wbinv_all(pm); 2449 pmap_idcache_wbinv_all(pm);
2448 else 2450 else
2449 pmap_dcache_wbinv_all(pm); 2451 pmap_dcache_wbinv_all(pm);
2450 return (1); 2452 return (1);
2451 } 2453 }
2452 return (0); 2454 return (0);
2453} 2455}
2454#endif 2456#endif
2455 2457
2456#ifdef PMAP_CACHE_VIPT 2458#ifdef PMAP_CACHE_VIPT
2457/* 2459/*
2458 * Sync a page with the I-cache. Since this is a VIPT, we must pick the 2460 * Sync a page with the I-cache. Since this is a VIPT, we must pick the
2459 * right cache alias to make sure we flush the right stuff. 2461 * right cache alias to make sure we flush the right stuff.
2460 */ 2462 */
2461void 2463void
2462pmap_syncicache_page(struct vm_page_md *md, paddr_t pa) 2464pmap_syncicache_page(struct vm_page_md *md, paddr_t pa)
2463{ 2465{
2464 const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask; 2466 const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask;
2465 pt_entry_t * const ptep = &cdst_pte[va_offset >> PGSHIFT]; 2467 pt_entry_t * const ptep = &cdst_pte[va_offset >> PGSHIFT];
2466 2468
2467 NPDEBUG(PDB_EXEC, printf("pmap_syncicache_page: md=%p (attrs=%#x)\n", 2469 NPDEBUG(PDB_EXEC, printf("pmap_syncicache_page: md=%p (attrs=%#x)\n",
2468 md, md->pvh_attrs)); 2470 md, md->pvh_attrs));
2469 /* 2471 /*
2470 * No need to clean the page if it's non-cached. 2472 * No need to clean the page if it's non-cached.
2471 */ 2473 */
2472 if (md->pvh_attrs & PVF_NC) 2474 if (md->pvh_attrs & PVF_NC)
2473 return; 2475 return;
2474 KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & PVF_COLORED); 2476 KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & PVF_COLORED);
2475 2477
2476 pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset); 2478 pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset);
2477 /* 2479 /*
2478 * Set up a PTE with the right coloring to flush existing cache lines. 2480 * Set up a PTE with the right coloring to flush existing cache lines.
2479 */ 2481 */
2480 *ptep = L2_S_PROTO | 2482 *ptep = L2_S_PROTO |
2481 pa 2483 pa
2482 | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE) 2484 | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE)
2483 | pte_l2_s_cache_mode; 2485 | pte_l2_s_cache_mode;
2484 PTE_SYNC(ptep); 2486 PTE_SYNC(ptep);
2485 2487
2486 /* 2488 /*
2487 * Flush it. 2489 * Flush it.
2488 */ 2490 */
2489 cpu_icache_sync_range(cdstp + va_offset, PAGE_SIZE); 2491 cpu_icache_sync_range(cdstp + va_offset, PAGE_SIZE);
2490 /* 2492 /*
2491 * Unmap the page. 2493 * Unmap the page.
2492 */ 2494 */
2493 *ptep = 0; 2495 *ptep = 0;
2494 PTE_SYNC(ptep); 2496 PTE_SYNC(ptep);
2495 pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset); 2497 pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset);
2496 2498
2497 md->pvh_attrs |= PVF_EXEC; 2499 md->pvh_attrs |= PVF_EXEC;
2498 PMAPCOUNT(exec_synced); 2500 PMAPCOUNT(exec_synced);
2499} 2501}
2500 2502
2501void 2503void
2502pmap_flush_page(struct vm_page_md *md, paddr_t pa, enum pmap_flush_op flush) 2504pmap_flush_page(struct vm_page_md *md, paddr_t pa, enum pmap_flush_op flush)
2503{ 2505{
2504 vsize_t va_offset, end_va; 2506 vsize_t va_offset, end_va;
2505 bool wbinv_p; 2507 bool wbinv_p;
2506 2508
2507 if (arm_cache_prefer_mask == 0) 2509 if (arm_cache_prefer_mask == 0)
2508 return; 2510 return;
2509 2511
2510 switch (flush) { 2512 switch (flush) {
2511 case PMAP_FLUSH_PRIMARY: 2513 case PMAP_FLUSH_PRIMARY:
2512 if (md->pvh_attrs & PVF_MULTCLR) { 2514 if (md->pvh_attrs & PVF_MULTCLR) {
2513 va_offset = 0; 2515 va_offset = 0;
2514 end_va = arm_cache_prefer_mask; 2516 end_va = arm_cache_prefer_mask;
2515 md->pvh_attrs &= ~PVF_MULTCLR; 2517 md->pvh_attrs &= ~PVF_MULTCLR;
2516 PMAPCOUNT(vac_flush_lots); 2518 PMAPCOUNT(vac_flush_lots);
2517 } else { 2519 } else {
2518 va_offset = md->pvh_attrs & arm_cache_prefer_mask; 2520 va_offset = md->pvh_attrs & arm_cache_prefer_mask;
2519 end_va = va_offset; 2521 end_va = va_offset;
2520 PMAPCOUNT(vac_flush_one); 2522 PMAPCOUNT(vac_flush_one);
2521 } 2523 }
2522 /* 2524 /*
2523 * Mark that the page is no longer dirty. 2525 * Mark that the page is no longer dirty.
2524 */ 2526 */
2525 md->pvh_attrs &= ~PVF_DIRTY; 2527 md->pvh_attrs &= ~PVF_DIRTY;
2526 wbinv_p = true; 2528 wbinv_p = true;
2527 break; 2529 break;
2528 case PMAP_FLUSH_SECONDARY: 2530 case PMAP_FLUSH_SECONDARY:
2529 va_offset = 0; 2531 va_offset = 0;
2530 end_va = arm_cache_prefer_mask; 2532 end_va = arm_cache_prefer_mask;
2531 wbinv_p = true; 2533 wbinv_p = true;
2532 md->pvh_attrs &= ~PVF_MULTCLR; 2534 md->pvh_attrs &= ~PVF_MULTCLR;
2533 PMAPCOUNT(vac_flush_lots); 2535 PMAPCOUNT(vac_flush_lots);
2534 break; 2536 break;
2535 case PMAP_CLEAN_PRIMARY: 2537 case PMAP_CLEAN_PRIMARY:
2536 va_offset = md->pvh_attrs & arm_cache_prefer_mask; 2538 va_offset = md->pvh_attrs & arm_cache_prefer_mask;
2537 end_va = va_offset; 2539 end_va = va_offset;
2538 wbinv_p = false; 2540 wbinv_p = false;
2539 /* 2541 /*
2540 * Mark that the page is no longer dirty. 2542 * Mark that the page is no longer dirty.
2541 */ 2543 */
2542 if ((md->pvh_attrs & PVF_DMOD) == 0) 2544 if ((md->pvh_attrs & PVF_DMOD) == 0)
2543 md->pvh_attrs &= ~PVF_DIRTY; 2545 md->pvh_attrs &= ~PVF_DIRTY;
2544 PMAPCOUNT(vac_clean_one); 2546 PMAPCOUNT(vac_clean_one);
2545 break; 2547 break;
2546 default: 2548 default:
2547 return; 2549 return;
2548 } 2550 }
2549 2551
2550 KASSERT(!(md->pvh_attrs & PVF_NC)); 2552 KASSERT(!(md->pvh_attrs & PVF_NC));
2551 2553
2552 NPDEBUG(PDB_VAC, printf("pmap_flush_page: md=%p (attrs=%#x)\n", 2554 NPDEBUG(PDB_VAC, printf("pmap_flush_page: md=%p (attrs=%#x)\n",
2553 md, md->pvh_attrs)); 2555 md, md->pvh_attrs));
2554 2556
2555 const size_t scache_line_size = arm_scache.dcache_line_size; 2557 const size_t scache_line_size = arm_scache.dcache_line_size;
2556 2558
2557 for (; va_offset <= end_va; va_offset += PAGE_SIZE) { 2559 for (; va_offset <= end_va; va_offset += PAGE_SIZE) {
2558 const size_t pte_offset = va_offset >> PGSHIFT; 2560 const size_t pte_offset = va_offset >> PGSHIFT;
2559 pt_entry_t * const ptep = &cdst_pte[pte_offset]; 2561 pt_entry_t * const ptep = &cdst_pte[pte_offset];
2560 const pt_entry_t oldpte = *ptep; 2562 const pt_entry_t oldpte = *ptep;
2561 2563
2562 if (flush == PMAP_FLUSH_SECONDARY 2564 if (flush == PMAP_FLUSH_SECONDARY
2563 && va_offset == (md->pvh_attrs & arm_cache_prefer_mask)) 2565 && va_offset == (md->pvh_attrs & arm_cache_prefer_mask))
2564 continue; 2566 continue;
2565 2567
2566 pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset); 2568 pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset);
2567 /* 2569 /*
2568 * Set up a PTE with the right coloring to flush 2570 * Set up a PTE with the right coloring to flush
2569 * existing cache entries. 2571 * existing cache entries.
2570 */ 2572 */
2571 *ptep = L2_S_PROTO 2573 *ptep = L2_S_PROTO
2572 | pa 2574 | pa
2573 | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE) 2575 | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE)
2574 | pte_l2_s_cache_mode; 2576 | pte_l2_s_cache_mode;
2575 PTE_SYNC(ptep); 2577 PTE_SYNC(ptep);
2576 2578
2577 /* 2579 /*
2578 * Flush it. 2580 * Flush it.
2579 */ 2581 */
2580 vaddr_t va = cdstp + va_offset;  2582 vaddr_t va = cdstp + va_offset;
2581 if (scache_line_size != 0) { 2583 if (scache_line_size != 0) {
2582 cpu_dcache_wb_range(va, PAGE_SIZE);  2584 cpu_dcache_wb_range(va, PAGE_SIZE);
2583 if (wbinv_p) { 2585 if (wbinv_p) {
2584 cpu_sdcache_wbinv_range(va, pa, PAGE_SIZE);  2586 cpu_sdcache_wbinv_range(va, pa, PAGE_SIZE);
2585 cpu_dcache_inv_range(va, PAGE_SIZE); 2587 cpu_dcache_inv_range(va, PAGE_SIZE);
2586 } else { 2588 } else {
2587 cpu_sdcache_wb_range(va, pa, PAGE_SIZE); 2589 cpu_sdcache_wb_range(va, pa, PAGE_SIZE);
2588 } 2590 }
2589 } else { 2591 } else {
2590 if (wbinv_p) { 2592 if (wbinv_p) {
2591 cpu_dcache_wbinv_range(va, PAGE_SIZE); 2593 cpu_dcache_wbinv_range(va, PAGE_SIZE);
2592 } else { 2594 } else {
2593 cpu_dcache_wb_range(va, PAGE_SIZE); 2595 cpu_dcache_wb_range(va, PAGE_SIZE);
2594 } 2596 }
2595 } 2597 }
2596 2598
2597 /* 2599 /*
2598 * Restore the page table entry since we might have interrupted 2600 * Restore the page table entry since we might have interrupted
2599 * pmap_zero_page or pmap_copy_page which was already using 2601 * pmap_zero_page or pmap_copy_page which was already using
2600 * this pte. 2602 * this pte.
2601 */ 2603 */
2602 *ptep = oldpte; 2604 *ptep = oldpte;
2603 PTE_SYNC(ptep); 2605 PTE_SYNC(ptep);
2604 pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset); 2606 pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset);
2605 } 2607 }
2606} 2608}
2607#endif /* PMAP_CACHE_VIPT */ 2609#endif /* PMAP_CACHE_VIPT */
2608 2610
2609/* 2611/*
2610 * Routine: pmap_page_remove 2612 * Routine: pmap_page_remove
2611 * Function: 2613 * Function:
2612 * Removes this physical page from 2614 * Removes this physical page from
2613 * all physical maps in which it resides. 2615 * all physical maps in which it resides.
2614 * Reflects back modify bits to the pager. 2616 * Reflects back modify bits to the pager.
2615 */ 2617 */
2616static void 2618static void
2617pmap_page_remove(struct vm_page_md *md, paddr_t pa) 2619pmap_page_remove(struct vm_page_md *md, paddr_t pa)
2618{ 2620{
2619 struct l2_bucket *l2b; 2621 struct l2_bucket *l2b;
2620 struct pv_entry *pv, *npv, **pvp; 2622 struct pv_entry *pv, *npv, **pvp;
2621 pmap_t pm; 2623 pmap_t pm;
2622 pt_entry_t *ptep; 2624 pt_entry_t *ptep;
2623 bool flush; 2625 bool flush;
2624 u_int flags; 2626 u_int flags;
2625 2627
2626 NPDEBUG(PDB_FOLLOW, 2628 NPDEBUG(PDB_FOLLOW,
2627 printf("pmap_page_remove: md %p (0x%08lx)\n", md, 2629 printf("pmap_page_remove: md %p (0x%08lx)\n", md,
2628 pa)); 2630 pa));
2629 2631
2630 pv = SLIST_FIRST(&md->pvh_list); 2632 pv = SLIST_FIRST(&md->pvh_list);
2631 if (pv == NULL) { 2633 if (pv == NULL) {
2632#ifdef PMAP_CACHE_VIPT 2634#ifdef PMAP_CACHE_VIPT
2633 /* 2635 /*
2634 * We *know* the page contents are about to be replaced. 2636 * We *know* the page contents are about to be replaced.
2635 * Discard the exec contents 2637 * Discard the exec contents
2636 */ 2638 */
2637 if (PV_IS_EXEC_P(md->pvh_attrs)) 2639 if (PV_IS_EXEC_P(md->pvh_attrs))
2638 PMAPCOUNT(exec_discarded_page_protect); 2640 PMAPCOUNT(exec_discarded_page_protect);
2639 md->pvh_attrs &= ~PVF_EXEC; 2641 md->pvh_attrs &= ~PVF_EXEC;
2640 PMAP_VALIDATE_MD_PAGE(md); 2642 PMAP_VALIDATE_MD_PAGE(md);
2641#endif 2643#endif
2642 return; 2644 return;
2643 } 2645 }
2644#ifdef PMAP_CACHE_VIPT 2646#ifdef PMAP_CACHE_VIPT
2645 KASSERT(arm_cache_prefer_mask == 0 || pmap_is_page_colored_p(md)); 2647 KASSERT(arm_cache_prefer_mask == 0 || pmap_is_page_colored_p(md));
2646#endif 2648#endif
2647 2649
2648 /* 2650 /*
2649 * Clear alias counts 2651 * Clear alias counts
2650 */ 2652 */
2651#ifdef PMAP_CACHE_VIVT 2653#ifdef PMAP_CACHE_VIVT
2652 md->k_mappings = 0; 2654 md->k_mappings = 0;
2653#endif 2655#endif
2654 md->urw_mappings = md->uro_mappings = 0; 2656 md->urw_mappings = md->uro_mappings = 0;
2655 2657
2656 flush = false; 2658 flush = false;
2657 flags = 0; 2659 flags = 0;
2658 2660
2659#ifdef PMAP_CACHE_VIVT 2661#ifdef PMAP_CACHE_VIVT
2660 pmap_clean_page(pv, false); 2662 pmap_clean_page(pv, false);
2661#endif 2663#endif
2662 2664
2663 pvp = &SLIST_FIRST(&md->pvh_list); 2665 pvp = &SLIST_FIRST(&md->pvh_list);
2664 while (pv) { 2666 while (pv) {
2665 pm = pv->pv_pmap; 2667 pm = pv->pv_pmap;
2666 npv = SLIST_NEXT(pv, pv_link); 2668 npv = SLIST_NEXT(pv, pv_link);
2667 if (flush == false && pmap_is_current(pm)) 2669 if (flush == false && pmap_is_current(pm))
2668 flush = true; 2670 flush = true;
2669 2671
2670 if (pm == pmap_kernel()) { 2672 if (pm == pmap_kernel()) {
2671#ifdef PMAP_CACHE_VIPT 2673#ifdef PMAP_CACHE_VIPT
2672 /* 2674 /*
2673 * If this was unmanaged mapping, it must be preserved. 2675 * If this was unmanaged mapping, it must be preserved.
2674 * Move it back on the list and advance the end-of-list 2676 * Move it back on the list and advance the end-of-list
2675 * pointer. 2677 * pointer.
2676 */ 2678 */
2677 if (pv->pv_flags & PVF_KENTRY) { 2679 if (pv->pv_flags & PVF_KENTRY) {
2678 *pvp = pv; 2680 *pvp = pv;
2679 pvp = &SLIST_NEXT(pv, pv_link); 2681 pvp = &SLIST_NEXT(pv, pv_link);
2680 pv = npv; 2682 pv = npv;
2681 continue; 2683 continue;
2682 } 2684 }
2683 if (pv->pv_flags & PVF_WRITE) 2685 if (pv->pv_flags & PVF_WRITE)
2684 md->krw_mappings--; 2686 md->krw_mappings--;
2685 else 2687 else
2686 md->kro_mappings--; 2688 md->kro_mappings--;
2687#endif 2689#endif
2688 PMAPCOUNT(kernel_unmappings); 2690 PMAPCOUNT(kernel_unmappings);
2689 } 2691 }
2690 PMAPCOUNT(unmappings); 2692 PMAPCOUNT(unmappings);
2691 2693
2692 pmap_acquire_pmap_lock(pm); 2694 pmap_acquire_pmap_lock(pm);
2693 2695
2694 l2b = pmap_get_l2_bucket(pm, pv->pv_va); 2696 l2b = pmap_get_l2_bucket(pm, pv->pv_va);
2695 KDASSERT(l2b != NULL); 2697 KDASSERT(l2b != NULL);
2696 2698
2697 ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 2699 ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
2698 2700
2699 /* 2701 /*
2700 * Update statistics 2702 * Update statistics
2701 */ 2703 */
2702 --pm->pm_stats.resident_count; 2704 --pm->pm_stats.resident_count;
2703 2705
2704 /* Wired bit */ 2706 /* Wired bit */
2705 if (pv->pv_flags & PVF_WIRED) 2707 if (pv->pv_flags & PVF_WIRED)
2706 --pm->pm_stats.wired_count; 2708 --pm->pm_stats.wired_count;
2707 2709
2708 flags |= pv->pv_flags; 2710 flags |= pv->pv_flags;
2709 2711
2710 /* 2712 /*
2711 * Invalidate the PTEs. 2713 * Invalidate the PTEs.
2712 */ 2714 */
2713 *ptep = 0; 2715 *ptep = 0;
2714 PTE_SYNC_CURRENT(pm, ptep); 2716 PTE_SYNC_CURRENT(pm, ptep);
2715 pmap_free_l2_bucket(pm, l2b, 1); 2717 pmap_free_l2_bucket(pm, l2b, 1);
2716 2718
2717 pool_put(&pmap_pv_pool, pv); 2719 pool_put(&pmap_pv_pool, pv);
2718 pv = npv; 2720 pv = npv;
2719 /* 2721 /*
2720 * if we reach the end of the list and there are still 2722 * if we reach the end of the list and there are still
2721 * mappings, they might be able to be cached now. 2723 * mappings, they might be able to be cached now.
2722 */ 2724 */
2723 if (pv == NULL) { 2725 if (pv == NULL) {
2724 *pvp = NULL; 2726 *pvp = NULL;
2725 if (!SLIST_EMPTY(&md->pvh_list)) 2727 if (!SLIST_EMPTY(&md->pvh_list))
2726 pmap_vac_me_harder(md, pa, pm, 0); 2728 pmap_vac_me_harder(md, pa, pm, 0);
2727 } 2729 }
2728 pmap_release_pmap_lock(pm); 2730 pmap_release_pmap_lock(pm);
2729 } 2731 }
2730#ifdef PMAP_CACHE_VIPT 2732#ifdef PMAP_CACHE_VIPT
2731 /* 2733 /*
2732 * Its EXEC cache is now gone. 2734 * Its EXEC cache is now gone.
2733 */ 2735 */
2734 if (PV_IS_EXEC_P(md->pvh_attrs)) 2736 if (PV_IS_EXEC_P(md->pvh_attrs))
2735 PMAPCOUNT(exec_discarded_page_protect); 2737 PMAPCOUNT(exec_discarded_page_protect);
2736 md->pvh_attrs &= ~PVF_EXEC; 2738 md->pvh_attrs &= ~PVF_EXEC;
2737 KASSERT(md->urw_mappings == 0); 2739 KASSERT(md->urw_mappings == 0);
2738 KASSERT(md->uro_mappings == 0); 2740 KASSERT(md->uro_mappings == 0);
2739 if (arm_cache_prefer_mask != 0) { 2741 if (arm_cache_prefer_mask != 0) {
2740 if (md->krw_mappings == 0) 2742 if (md->krw_mappings == 0)
2741 md->pvh_attrs &= ~PVF_WRITE; 2743 md->pvh_attrs &= ~PVF_WRITE;
2742 PMAP_VALIDATE_MD_PAGE(md); 2744 PMAP_VALIDATE_MD_PAGE(md);
2743 } 2745 }
2744#endif 2746#endif
2745 2747
2746 if (flush) { 2748 if (flush) {
2747 /* 2749 /*
2748 * Note: We can't use pmap_tlb_flush{I,D}() here since that 2750 * Note: We can't use pmap_tlb_flush{I,D}() here since that
2749 * would need a subsequent call to pmap_update() to ensure 2751 * would need a subsequent call to pmap_update() to ensure
2750 * curpm->pm_cstate.cs_all is reset. Our callers are not 2752 * curpm->pm_cstate.cs_all is reset. Our callers are not
2751 * required to do that (see pmap(9)), so we can't modify 2753 * required to do that (see pmap(9)), so we can't modify
2752 * the current pmap's state. 2754 * the current pmap's state.
2753 */ 2755 */
2754 if (PV_BEEN_EXECD(flags)) 2756 if (PV_BEEN_EXECD(flags))
2755 cpu_tlb_flushID(); 2757 cpu_tlb_flushID();
2756 else 2758 else
2757 cpu_tlb_flushD(); 2759 cpu_tlb_flushD();
2758 } 2760 }
2759 cpu_cpwait(); 2761 cpu_cpwait();
2760} 2762}
2761 2763
2762/* 2764/*
2763 * pmap_t pmap_create(void) 2765 * pmap_t pmap_create(void)
2764 *  2766 *
2765 * Create a new pmap structure from scratch. 2767 * Create a new pmap structure from scratch.
2766 */ 2768 */
2767pmap_t 2769pmap_t
2768pmap_create(void) 2770pmap_create(void)
2769{ 2771{
2770 pmap_t pm; 2772 pmap_t pm;
2771 2773
2772 pm = pool_cache_get(&pmap_cache, PR_WAITOK); 2774 pm = pool_cache_get(&pmap_cache, PR_WAITOK);
2773 2775
2774 mutex_init(&pm->pm_obj_lock, MUTEX_DEFAULT, IPL_NONE); 2776 mutex_init(&pm->pm_obj_lock, MUTEX_DEFAULT, IPL_NONE);
2775 uvm_obj_init(&pm->pm_obj, NULL, false, 1); 2777 uvm_obj_init(&pm->pm_obj, NULL, false, 1);
2776 uvm_obj_setlock(&pm->pm_obj, &pm->pm_obj_lock); 2778 uvm_obj_setlock(&pm->pm_obj, &pm->pm_obj_lock);
2777 2779
2778 pm->pm_stats.wired_count = 0; 2780 pm->pm_stats.wired_count = 0;
2779 pm->pm_stats.resident_count = 1; 2781 pm->pm_stats.resident_count = 1;
2780 pm->pm_cstate.cs_all = 0; 2782 pm->pm_cstate.cs_all = 0;
2781 pmap_alloc_l1(pm); 2783 pmap_alloc_l1(pm);
2782 2784
2783 /* 2785 /*
2784 * Note: The pool cache ensures that the pm_l2[] array is already 2786 * Note: The pool cache ensures that the pm_l2[] array is already
2785 * initialised to zero. 2787 * initialised to zero.
2786 */ 2788 */
2787 2789
2788 pmap_pinit(pm); 2790 pmap_pinit(pm);
2789 2791
2790 LIST_INSERT_HEAD(&pmap_pmaps, pm, pm_list); 2792 LIST_INSERT_HEAD(&pmap_pmaps, pm, pm_list);
2791 2793
2792 return (pm); 2794 return (pm);
2793} 2795}
2794 2796
2795u_int 2797u_int
2796arm32_mmap_flags(paddr_t pa) 2798arm32_mmap_flags(paddr_t pa)
2797{ 2799{
2798 /* 2800 /*
2799 * the upper 8 bits in pmap_enter()'s flags are reserved for MD stuff 2801 * the upper 8 bits in pmap_enter()'s flags are reserved for MD stuff
2800 * and we're using the upper bits in page numbers to pass flags around 2802 * and we're using the upper bits in page numbers to pass flags around
2801 * so we might as well use the same bits 2803 * so we might as well use the same bits
2802 */ 2804 */
2803 return (u_int)pa & PMAP_MD_MASK; 2805 return (u_int)pa & PMAP_MD_MASK;
2804} 2806}
2805/* 2807/*
2806 * int pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, 2808 * int pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot,
2807 * u_int flags) 2809 * u_int flags)
2808 *  2810 *
2809 * Insert the given physical page (p) at 2811 * Insert the given physical page (p) at
2810 * the specified virtual address (v) in the 2812 * the specified virtual address (v) in the
2811 * target physical map with the protection requested. 2813 * target physical map with the protection requested.
2812 * 2814 *
2813 * NB: This is the only routine which MAY NOT lazy-evaluate 2815 * NB: This is the only routine which MAY NOT lazy-evaluate
2814 * or lose information. That is, this routine must actually 2816 * or lose information. That is, this routine must actually
2815 * insert this page into the given map NOW. 2817 * insert this page into the given map NOW.
2816 */ 2818 */
2817int 2819int
2818pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 2820pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
2819{ 2821{
2820 struct l2_bucket *l2b; 2822 struct l2_bucket *l2b;
2821 struct vm_page *pg, *opg; 2823 struct vm_page *pg, *opg;
2822 struct pv_entry *pv; 2824 struct pv_entry *pv;
2823 pt_entry_t *ptep, npte, opte; 2825 pt_entry_t *ptep, npte, opte;
2824 u_int nflags; 2826 u_int nflags;
2825 u_int oflags; 2827 u_int oflags;
 2828#ifdef ARM_HAS_VBAR
 2829 const bool vector_page_p = false;
 2830#else
 2831 const bool vector_page_p = (va == vector_page);
 2832#endif
2826 2833
2827 NPDEBUG(PDB_ENTER, printf("pmap_enter: pm %p va 0x%lx pa 0x%lx prot %x flag %x\n", pm, va, pa, prot, flags)); 2834 NPDEBUG(PDB_ENTER, printf("pmap_enter: pm %p va 0x%lx pa 0x%lx prot %x flag %x\n", pm, va, pa, prot, flags));
2828 2835
2829 KDASSERT((flags & PMAP_WIRED) == 0 || (flags & VM_PROT_ALL) != 0); 2836 KDASSERT((flags & PMAP_WIRED) == 0 || (flags & VM_PROT_ALL) != 0);
2830 KDASSERT(((va | pa) & PGOFSET) == 0); 2837 KDASSERT(((va | pa) & PGOFSET) == 0);
2831 2838
2832 /* 2839 /*
2833 * Get a pointer to the page. Later on in this function, we 2840 * Get a pointer to the page. Later on in this function, we
2834 * test for a managed page by checking pg != NULL. 2841 * test for a managed page by checking pg != NULL.
2835 */ 2842 */
2836 pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL; 2843 pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL;
2837 2844
2838 nflags = 0; 2845 nflags = 0;
2839 if (prot & VM_PROT_WRITE) 2846 if (prot & VM_PROT_WRITE)
2840 nflags |= PVF_WRITE; 2847 nflags |= PVF_WRITE;
2841 if (prot & VM_PROT_EXECUTE) 2848 if (prot & VM_PROT_EXECUTE)
2842 nflags |= PVF_EXEC; 2849 nflags |= PVF_EXEC;
2843 if (flags & PMAP_WIRED) 2850 if (flags & PMAP_WIRED)
2844 nflags |= PVF_WIRED; 2851 nflags |= PVF_WIRED;
2845 2852
2846 pmap_acquire_pmap_lock(pm); 2853 pmap_acquire_pmap_lock(pm);
2847 2854
2848 /* 2855 /*
2849 * Fetch the L2 bucket which maps this page, allocating one if 2856 * Fetch the L2 bucket which maps this page, allocating one if
2850 * necessary for user pmaps. 2857 * necessary for user pmaps.
2851 */ 2858 */
2852 if (pm == pmap_kernel()) 2859 if (pm == pmap_kernel())
2853 l2b = pmap_get_l2_bucket(pm, va); 2860 l2b = pmap_get_l2_bucket(pm, va);
2854 else 2861 else
2855 l2b = pmap_alloc_l2_bucket(pm, va); 2862 l2b = pmap_alloc_l2_bucket(pm, va);
2856 if (l2b == NULL) { 2863 if (l2b == NULL) {
2857 if (flags & PMAP_CANFAIL) { 2864 if (flags & PMAP_CANFAIL) {
2858 pmap_release_pmap_lock(pm); 2865 pmap_release_pmap_lock(pm);
2859 return (ENOMEM); 2866 return (ENOMEM);
2860 } 2867 }
2861 panic("pmap_enter: failed to allocate L2 bucket"); 2868 panic("pmap_enter: failed to allocate L2 bucket");
2862 } 2869 }
2863 ptep = &l2b->l2b_kva[l2pte_index(va)]; 2870 ptep = &l2b->l2b_kva[l2pte_index(va)];
2864 opte = *ptep; 2871 opte = *ptep;
2865 npte = pa; 2872 npte = pa;
2866 oflags = 0; 2873 oflags = 0;
2867 2874
2868 if (opte) { 2875 if (opte) {
2869 /* 2876 /*
2870 * There is already a mapping at this address. 2877 * There is already a mapping at this address.
2871 * If the physical address is different, lookup the 2878 * If the physical address is different, lookup the
2872 * vm_page. 2879 * vm_page.
2873 */ 2880 */
2874 if (l2pte_pa(opte) != pa) 2881 if (l2pte_pa(opte) != pa)
2875 opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); 2882 opg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
2876 else 2883 else
2877 opg = pg; 2884 opg = pg;
2878 } else 2885 } else
2879 opg = NULL; 2886 opg = NULL;
2880 2887
2881 if (pg) { 2888 if (pg) {
2882 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 2889 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
2883 2890
2884 /* 2891 /*
2885 * This is to be a managed mapping. 2892 * This is to be a managed mapping.
2886 */ 2893 */
2887 if ((flags & VM_PROT_ALL) || (md->pvh_attrs & PVF_REF)) { 2894 if ((flags & VM_PROT_ALL) || (md->pvh_attrs & PVF_REF)) {
2888 /* 2895 /*
2889 * - The access type indicates that we don't need 2896 * - The access type indicates that we don't need
2890 * to do referenced emulation. 2897 * to do referenced emulation.
2891 * OR 2898 * OR
2892 * - The physical page has already been referenced 2899 * - The physical page has already been referenced
2893 * so no need to re-do referenced emulation here. 2900 * so no need to re-do referenced emulation here.
2894 */ 2901 */
2895 npte |= l2pte_set_readonly(L2_S_PROTO); 2902 npte |= l2pte_set_readonly(L2_S_PROTO);
2896 2903
2897 nflags |= PVF_REF; 2904 nflags |= PVF_REF;
2898 2905
2899 if ((prot & VM_PROT_WRITE) != 0 && 2906 if ((prot & VM_PROT_WRITE) != 0 &&
2900 ((flags & VM_PROT_WRITE) != 0 || 2907 ((flags & VM_PROT_WRITE) != 0 ||
2901 (md->pvh_attrs & PVF_MOD) != 0)) { 2908 (md->pvh_attrs & PVF_MOD) != 0)) {
2902 /* 2909 /*
2903 * This is a writable mapping, and the 2910 * This is a writable mapping, and the
2904 * page's mod state indicates it has 2911 * page's mod state indicates it has
2905 * already been modified. Make it 2912 * already been modified. Make it
2906 * writable from the outset. 2913 * writable from the outset.
2907 */ 2914 */
2908 npte = l2pte_set_writable(npte); 2915 npte = l2pte_set_writable(npte);
2909 nflags |= PVF_MOD; 2916 nflags |= PVF_MOD;
2910 } 2917 }
2911 } else { 2918 } else {
2912 /* 2919 /*
2913 * Need to do page referenced emulation. 2920 * Need to do page referenced emulation.
2914 */ 2921 */
2915 npte |= L2_TYPE_INV; 2922 npte |= L2_TYPE_INV;
2916 } 2923 }
2917 2924
2918 if (flags & ARM32_MMAP_WRITECOMBINE) { 2925 if (flags & ARM32_MMAP_WRITECOMBINE) {
2919 npte |= pte_l2_s_wc_mode; 2926 npte |= pte_l2_s_wc_mode;
2920 } else 2927 } else
2921 npte |= pte_l2_s_cache_mode; 2928 npte |= pte_l2_s_cache_mode;
2922 2929
2923 if (pg == opg) { 2930 if (pg == opg) {
2924 /* 2931 /*
2925 * We're changing the attrs of an existing mapping. 2932 * We're changing the attrs of an existing mapping.
2926 */ 2933 */
2927#ifdef MULTIPROCESSOR 2934#ifdef MULTIPROCESSOR
2928 KASSERT(uvm_page_locked_p(pg)); 2935 KASSERT(uvm_page_locked_p(pg));
2929#endif 2936#endif
2930 oflags = pmap_modify_pv(md, pa, pm, va, 2937 oflags = pmap_modify_pv(md, pa, pm, va,
2931 PVF_WRITE | PVF_EXEC | PVF_WIRED | 2938 PVF_WRITE | PVF_EXEC | PVF_WIRED |
2932 PVF_MOD | PVF_REF, nflags); 2939 PVF_MOD | PVF_REF, nflags);
2933 2940
2934#ifdef PMAP_CACHE_VIVT 2941#ifdef PMAP_CACHE_VIVT
2935 /* 2942 /*
2936 * We may need to flush the cache if we're 2943 * We may need to flush the cache if we're
2937 * doing rw-ro... 2944 * doing rw-ro...
2938 */ 2945 */
2939 if (pm->pm_cstate.cs_cache_d && 2946 if (pm->pm_cstate.cs_cache_d &&
2940 (oflags & PVF_NC) == 0 && 2947 (oflags & PVF_NC) == 0 &&
2941 l2pte_writable_p(opte) && 2948 l2pte_writable_p(opte) &&
2942 (prot & VM_PROT_WRITE) == 0) 2949 (prot & VM_PROT_WRITE) == 0)
2943 cpu_dcache_wb_range(va, PAGE_SIZE); 2950 cpu_dcache_wb_range(va, PAGE_SIZE);
2944#endif 2951#endif
2945 } else { 2952 } else {
2946 /* 2953 /*
2947 * New mapping, or changing the backing page 2954 * New mapping, or changing the backing page
2948 * of an existing mapping. 2955 * of an existing mapping.
2949 */ 2956 */
2950 if (opg) { 2957 if (opg) {
2951 struct vm_page_md *omd = VM_PAGE_TO_MD(opg); 2958 struct vm_page_md *omd = VM_PAGE_TO_MD(opg);
2952 paddr_t opa = VM_PAGE_TO_PHYS(opg); 2959 paddr_t opa = VM_PAGE_TO_PHYS(opg);
2953 2960
2954 /* 2961 /*
2955 * Replacing an existing mapping with a new one. 2962 * Replacing an existing mapping with a new one.
2956 * It is part of our managed memory so we 2963 * It is part of our managed memory so we
2957 * must remove it from the PV list 2964 * must remove it from the PV list
2958 */ 2965 */
2959#ifdef MULTIPROCESSOR 2966#ifdef MULTIPROCESSOR
2960 KASSERT(uvm_page_locked_p(opg)); 2967 KASSERT(uvm_page_locked_p(opg));
2961#endif 2968#endif
2962 pv = pmap_remove_pv(omd, opa, pm, va); 2969 pv = pmap_remove_pv(omd, opa, pm, va);
2963 pmap_vac_me_harder(omd, opa, pm, 0); 2970 pmap_vac_me_harder(omd, opa, pm, 0);
2964 oflags = pv->pv_flags; 2971 oflags = pv->pv_flags;
2965 2972
2966#ifdef PMAP_CACHE_VIVT 2973#ifdef PMAP_CACHE_VIVT
2967 /* 2974 /*
2968 * If the old mapping was valid (ref/mod 2975 * If the old mapping was valid (ref/mod
2969 * emulation creates 'invalid' mappings 2976 * emulation creates 'invalid' mappings
2970 * initially) then make sure to frob 2977 * initially) then make sure to frob
2971 * the cache. 2978 * the cache.
2972 */ 2979 */
2973 if ((oflags & PVF_NC) == 0 && 2980 if ((oflags & PVF_NC) == 0 &&
2974 l2pte_valid(opte)) { 2981 l2pte_valid(opte)) {
2975 if (PV_BEEN_EXECD(oflags)) { 2982 if (PV_BEEN_EXECD(oflags)) {
2976 pmap_idcache_wbinv_range(pm, va, 2983 pmap_idcache_wbinv_range(pm, va,
2977 PAGE_SIZE); 2984 PAGE_SIZE);
2978 } else 2985 } else
2979 if (PV_BEEN_REFD(oflags)) { 2986 if (PV_BEEN_REFD(oflags)) {
2980 pmap_dcache_wb_range(pm, va, 2987 pmap_dcache_wb_range(pm, va,
2981 PAGE_SIZE, true, 2988 PAGE_SIZE, true,
2982 (oflags & PVF_WRITE) == 0); 2989 (oflags & PVF_WRITE) == 0);
2983 } 2990 }
2984 } 2991 }
2985#endif 2992#endif
2986 } else 2993 } else
2987 if ((pv = pool_get(&pmap_pv_pool, PR_NOWAIT)) == NULL){ 2994 if ((pv = pool_get(&pmap_pv_pool, PR_NOWAIT)) == NULL){
2988 if ((flags & PMAP_CANFAIL) == 0) 2995 if ((flags & PMAP_CANFAIL) == 0)
2989 panic("pmap_enter: no pv entries"); 2996 panic("pmap_enter: no pv entries");
2990 2997
2991 if (pm != pmap_kernel()) 2998 if (pm != pmap_kernel())
2992 pmap_free_l2_bucket(pm, l2b, 0); 2999 pmap_free_l2_bucket(pm, l2b, 0);
2993 pmap_release_pmap_lock(pm); 3000 pmap_release_pmap_lock(pm);
2994 NPDEBUG(PDB_ENTER, 3001 NPDEBUG(PDB_ENTER,
2995 printf("pmap_enter: ENOMEM\n")); 3002 printf("pmap_enter: ENOMEM\n"));
2996 return (ENOMEM); 3003 return (ENOMEM);
2997 } 3004 }
2998 3005
2999#ifdef MULTIPROCESSOR 3006#ifdef MULTIPROCESSOR
3000 KASSERT(uvm_page_locked_p(pg)); 3007 KASSERT(uvm_page_locked_p(pg));
3001#endif 3008#endif
3002 pmap_enter_pv(md, pa, pv, pm, va, nflags); 3009 pmap_enter_pv(md, pa, pv, pm, va, nflags);
3003 } 3010 }
3004 } else { 3011 } else {
3005 /* 3012 /*
3006 * We're mapping an unmanaged page. 3013 * We're mapping an unmanaged page.
3007 * These are always readable, and possibly writable, from 3014 * These are always readable, and possibly writable, from
3008 * the get go as we don't need to track ref/mod status. 3015 * the get go as we don't need to track ref/mod status.
3009 */ 3016 */
3010 npte |= l2pte_set_readonly(L2_S_PROTO); 3017 npte |= l2pte_set_readonly(L2_S_PROTO);
3011 if (prot & VM_PROT_WRITE) 3018 if (prot & VM_PROT_WRITE)
3012 npte = l2pte_set_writable(npte); 3019 npte = l2pte_set_writable(npte);
3013 3020
3014 /* 3021 /*
3015 * Make sure the vector table is mapped cacheable 3022 * Make sure the vector table is mapped cacheable
3016 */ 3023 */
3017 if ((pm != pmap_kernel() && va == vector_page) || 3024 if ((vector_page_p && pm != pmap_kernel())
3018 (flags & ARM32_MMAP_CACHEABLE)) { 3025 || (flags & ARM32_MMAP_CACHEABLE)) {
3019 npte |= pte_l2_s_cache_mode; 3026 npte |= pte_l2_s_cache_mode;
3020 } else if (flags & ARM32_MMAP_WRITECOMBINE) { 3027 } else if (flags & ARM32_MMAP_WRITECOMBINE) {
3021 npte |= pte_l2_s_wc_mode; 3028 npte |= pte_l2_s_wc_mode;
3022 } 3029 }
3023 if (opg) { 3030 if (opg) {
3024 /* 3031 /*
3025 * Looks like there's an existing 'managed' mapping 3032 * Looks like there's an existing 'managed' mapping
3026 * at this address. 3033 * at this address.
3027 */ 3034 */
3028 struct vm_page_md *omd = VM_PAGE_TO_MD(opg); 3035 struct vm_page_md *omd = VM_PAGE_TO_MD(opg);
3029 paddr_t opa = VM_PAGE_TO_PHYS(opg); 3036 paddr_t opa = VM_PAGE_TO_PHYS(opg);
3030 3037
3031#ifdef MULTIPROCESSOR 3038#ifdef MULTIPROCESSOR
3032 KASSERT(uvm_page_locked_p(opg)); 3039 KASSERT(uvm_page_locked_p(opg));
3033#endif 3040#endif
3034 pv = pmap_remove_pv(omd, opa, pm, va); 3041 pv = pmap_remove_pv(omd, opa, pm, va);
3035 pmap_vac_me_harder(omd, opa, pm, 0); 3042 pmap_vac_me_harder(omd, opa, pm, 0);
3036 oflags = pv->pv_flags; 3043 oflags = pv->pv_flags;
3037 3044
3038#ifdef PMAP_CACHE_VIVT 3045#ifdef PMAP_CACHE_VIVT
3039 if ((oflags & PVF_NC) == 0 && l2pte_valid(opte)) { 3046 if ((oflags & PVF_NC) == 0 && l2pte_valid(opte)) {
3040 if (PV_BEEN_EXECD(oflags)) 3047 if (PV_BEEN_EXECD(oflags))
3041 pmap_idcache_wbinv_range(pm, va, 3048 pmap_idcache_wbinv_range(pm, va,
3042 PAGE_SIZE); 3049 PAGE_SIZE);
3043 else 3050 else
3044 if (PV_BEEN_REFD(oflags)) 3051 if (PV_BEEN_REFD(oflags))
3045 pmap_dcache_wb_range(pm, va, PAGE_SIZE, 3052 pmap_dcache_wb_range(pm, va, PAGE_SIZE,
3046 true, (oflags & PVF_WRITE) == 0); 3053 true, (oflags & PVF_WRITE) == 0);
3047 } 3054 }
3048#endif 3055#endif
3049 pool_put(&pmap_pv_pool, pv); 3056 pool_put(&pmap_pv_pool, pv);
3050 } 3057 }
3051 } 3058 }
3052 3059
3053 /* 3060 /*
3054 * Make sure userland mappings get the right permissions 3061 * Make sure userland mappings get the right permissions
3055 */ 3062 */
3056 if (pm != pmap_kernel() && va != vector_page) 3063 if (!vector_page_p && pm != pmap_kernel()) {
3057 npte |= L2_S_PROT_U; 3064 npte |= L2_S_PROT_U;
 3065 }
3058 3066
3059 /* 3067 /*
3060 * Keep the stats up to date 3068 * Keep the stats up to date
3061 */ 3069 */
3062 if (opte == 0) { 3070 if (opte == 0) {
3063 l2b->l2b_occupancy++; 3071 l2b->l2b_occupancy++;
3064 pm->pm_stats.resident_count++; 3072 pm->pm_stats.resident_count++;
3065 }  3073 }
3066 3074
3067 NPDEBUG(PDB_ENTER, 3075 NPDEBUG(PDB_ENTER,
3068 printf("pmap_enter: opte 0x%08x npte 0x%08x\n", opte, npte)); 3076 printf("pmap_enter: opte 0x%08x npte 0x%08x\n", opte, npte));
3069 3077
3070 /* 3078 /*
3071 * If this is just a wiring change, the two PTEs will be 3079 * If this is just a wiring change, the two PTEs will be
3072 * identical, so there's no need to update the page table. 3080 * identical, so there's no need to update the page table.
3073 */ 3081 */
3074 if (npte != opte) { 3082 if (npte != opte) {
3075 bool is_cached = pmap_is_cached(pm); 3083 bool is_cached = pmap_is_cached(pm);
3076 3084
3077 *ptep = npte; 3085 *ptep = npte;
3078 PTE_SYNC(ptep); 3086 PTE_SYNC(ptep);
3079 if (is_cached) { 3087 if (is_cached) {
3080 /* 3088 /*
3081 * We only need to frob the cache/tlb if this pmap 3089 * We only need to frob the cache/tlb if this pmap
3082 * is current 3090 * is current
3083 */ 3091 */
3084 if (va != vector_page && l2pte_valid(npte)) { 3092 if (!vector_page_p && l2pte_valid(npte)) {
3085 /* 3093 /*
3086 * This mapping is likely to be accessed as 3094 * This mapping is likely to be accessed as
3087 * soon as we return to userland. Fix up the 3095 * soon as we return to userland. Fix up the
3088 * L1 entry to avoid taking another 3096 * L1 entry to avoid taking another
3089 * page/domain fault. 3097 * page/domain fault.
3090 */ 3098 */
3091 pd_entry_t *pl1pd, l1pd; 3099 pd_entry_t *pl1pd, l1pd;
3092 3100
3093 pl1pd = &pm->pm_l1->l1_kva[L1_IDX(va)]; 3101 pl1pd = &pm->pm_l1->l1_kva[L1_IDX(va)];
3094 l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | 3102 l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) |
3095 L1_C_PROTO; 3103 L1_C_PROTO;
3096 if (*pl1pd != l1pd) { 3104 if (*pl1pd != l1pd) {
3097 *pl1pd = l1pd; 3105 *pl1pd = l1pd;
3098 PTE_SYNC(pl1pd); 3106 PTE_SYNC(pl1pd);
3099 } 3107 }
3100 } 3108 }
3101 } 3109 }
3102 3110
3103 if (PV_BEEN_EXECD(oflags)) 3111 if (PV_BEEN_EXECD(oflags))
3104 pmap_tlb_flushID_SE(pm, va); 3112 pmap_tlb_flushID_SE(pm, va);
3105 else 3113 else
3106 if (PV_BEEN_REFD(oflags)) 3114 if (PV_BEEN_REFD(oflags))
3107 pmap_tlb_flushD_SE(pm, va); 3115 pmap_tlb_flushD_SE(pm, va);
3108 3116
3109 NPDEBUG(PDB_ENTER, 3117 NPDEBUG(PDB_ENTER,
3110 printf("pmap_enter: is_cached %d cs 0x%08x\n", 3118 printf("pmap_enter: is_cached %d cs 0x%08x\n",
3111 is_cached, pm->pm_cstate.cs_all)); 3119 is_cached, pm->pm_cstate.cs_all));
3112 3120
3113 if (pg != NULL) { 3121 if (pg != NULL) {
3114 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3122 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3115 3123
3116#ifdef MULTIPROCESSOR 3124#ifdef MULTIPROCESSOR
3117 KASSERT(uvm_page_locked_p(pg)); 3125 KASSERT(uvm_page_locked_p(pg));
3118#endif 3126#endif
3119 pmap_vac_me_harder(md, pa, pm, va); 3127 pmap_vac_me_harder(md, pa, pm, va);
3120 } 3128 }
3121 } 3129 }
3122#if defined(PMAP_CACHE_VIPT) && defined(DIAGNOSTIC) 3130#if defined(PMAP_CACHE_VIPT) && defined(DIAGNOSTIC)
3123 if (pg) { 3131 if (pg) {
3124 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3132 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3125 3133
3126#ifdef MULTIPROCESSOR 3134#ifdef MULTIPROCESSOR
3127 KASSERT(uvm_page_locked_p(pg)); 3135 KASSERT(uvm_page_locked_p(pg));
3128#endif 3136#endif
3129 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 3137 KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
3130 PMAP_VALIDATE_MD_PAGE(md); 3138 PMAP_VALIDATE_MD_PAGE(md);
3131 } 3139 }
3132#endif 3140#endif
3133 3141
3134 pmap_release_pmap_lock(pm); 3142 pmap_release_pmap_lock(pm);
3135 3143
3136 return (0); 3144 return (0);
3137} 3145}
3138 3146
3139/* 3147/*
3140 * pmap_remove() 3148 * pmap_remove()
3141 * 3149 *
3142 * pmap_remove is responsible for nuking a number of mappings for a range 3150 * pmap_remove is responsible for nuking a number of mappings for a range
3143 * of virtual address space in the current pmap. To do this efficiently 3151 * of virtual address space in the current pmap. To do this efficiently
3144 * is interesting, because in a number of cases a wide virtual address 3152 * is interesting, because in a number of cases a wide virtual address
3145 * range may be supplied that contains few actual mappings. So, the 3153 * range may be supplied that contains few actual mappings. So, the
3146 * optimisations are: 3154 * optimisations are:
3147 * 1. Skip over hunks of address space for which no L1 or L2 entry exists. 3155 * 1. Skip over hunks of address space for which no L1 or L2 entry exists.
3148 * 2. Build up a list of pages we've hit, up to a maximum, so we can 3156 * 2. Build up a list of pages we've hit, up to a maximum, so we can
3149 * maybe do just a partial cache clean. This path of execution is 3157 * maybe do just a partial cache clean. This path of execution is
3150 * complicated by the fact that the cache must be flushed _before_ 3158 * complicated by the fact that the cache must be flushed _before_
3151 * the PTE is nuked, being a VAC :-) 3159 * the PTE is nuked, being a VAC :-)
3152 * 3. If we're called after UVM calls pmap_remove_all(), we can defer 3160 * 3. If we're called after UVM calls pmap_remove_all(), we can defer
3153 * all invalidations until pmap_update(), since pmap_remove_all() has 3161 * all invalidations until pmap_update(), since pmap_remove_all() has
3154 * already flushed the cache. 3162 * already flushed the cache.
3155 * 4. Maybe later fast-case a single page, but I don't think this is 3163 * 4. Maybe later fast-case a single page, but I don't think this is
3156 * going to make _that_ much difference overall. 3164 * going to make _that_ much difference overall.
3157 */ 3165 */
3158 3166
3159#define PMAP_REMOVE_CLEAN_LIST_SIZE 3 3167#define PMAP_REMOVE_CLEAN_LIST_SIZE 3
3160 3168
3161void 3169void
3162pmap_remove(pmap_t pm, vaddr_t sva, vaddr_t eva) 3170pmap_remove(pmap_t pm, vaddr_t sva, vaddr_t eva)
3163{ 3171{
3164 struct l2_bucket *l2b; 3172 struct l2_bucket *l2b;
3165 vaddr_t next_bucket; 3173 vaddr_t next_bucket;
3166 pt_entry_t *ptep; 3174 pt_entry_t *ptep;
3167 u_int cleanlist_idx, total, cnt; 3175 u_int cleanlist_idx, total, cnt;
3168 struct { 3176 struct {
3169 vaddr_t va; 3177 vaddr_t va;
3170 pt_entry_t *ptep; 3178 pt_entry_t *ptep;
3171 } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE]; 3179 } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
3172 u_int mappings, is_exec, is_refd; 3180 u_int mappings, is_exec, is_refd;
3173 3181
3174 NPDEBUG(PDB_REMOVE, printf("pmap_do_remove: pmap=%p sva=%08lx " 3182 NPDEBUG(PDB_REMOVE, printf("pmap_do_remove: pmap=%p sva=%08lx "
3175 "eva=%08lx\n", pm, sva, eva)); 3183 "eva=%08lx\n", pm, sva, eva));
3176 3184
3177 /* 3185 /*
3178 * we lock in the pmap => pv_head direction 3186 * we lock in the pmap => pv_head direction
3179 */ 3187 */
3180 pmap_acquire_pmap_lock(pm); 3188 pmap_acquire_pmap_lock(pm);
3181 3189
3182 if (pm->pm_remove_all || !pmap_is_cached(pm)) { 3190 if (pm->pm_remove_all || !pmap_is_cached(pm)) {
3183 cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; 3191 cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1;
3184 if (pm->pm_cstate.cs_tlb == 0) 3192 if (pm->pm_cstate.cs_tlb == 0)
3185 pm->pm_remove_all = true; 3193 pm->pm_remove_all = true;
3186 } else 3194 } else
3187 cleanlist_idx = 0; 3195 cleanlist_idx = 0;
3188 3196
3189 total = 0; 3197 total = 0;
3190 3198
3191 while (sva < eva) { 3199 while (sva < eva) {
3192 /* 3200 /*
3193 * Do one L2 bucket's worth at a time. 3201 * Do one L2 bucket's worth at a time.
3194 */ 3202 */
3195 next_bucket = L2_NEXT_BUCKET(sva); 3203 next_bucket = L2_NEXT_BUCKET(sva);
3196 if (next_bucket > eva) 3204 if (next_bucket > eva)
3197 next_bucket = eva; 3205 next_bucket = eva;
3198 3206
3199 l2b = pmap_get_l2_bucket(pm, sva); 3207 l2b = pmap_get_l2_bucket(pm, sva);
3200 if (l2b == NULL) { 3208 if (l2b == NULL) {
3201 sva = next_bucket; 3209 sva = next_bucket;
3202 continue; 3210 continue;
3203 } 3211 }
3204 3212
3205 ptep = &l2b->l2b_kva[l2pte_index(sva)]; 3213 ptep = &l2b->l2b_kva[l2pte_index(sva)];
3206 3214
3207 for (mappings = 0; sva < next_bucket; sva += PAGE_SIZE, ptep++){ 3215 for (mappings = 0; sva < next_bucket; sva += PAGE_SIZE, ptep++){
3208 struct vm_page *pg; 3216 struct vm_page *pg;
3209 pt_entry_t pte; 3217 pt_entry_t pte;
3210 paddr_t pa; 3218 paddr_t pa;
3211 3219
3212 pte = *ptep; 3220 pte = *ptep;
3213 3221
3214 if (pte == 0) { 3222 if (pte == 0) {
3215 /* Nothing here, move along */ 3223 /* Nothing here, move along */
3216 continue; 3224 continue;
3217 } 3225 }
3218 3226
3219 pa = l2pte_pa(pte); 3227 pa = l2pte_pa(pte);
3220 is_exec = 0; 3228 is_exec = 0;
3221 is_refd = 1; 3229 is_refd = 1;
3222 3230
3223 /* 3231 /*
3224 * Update flags. In a number of circumstances, 3232 * Update flags. In a number of circumstances,
3225 * we could cluster a lot of these and do a 3233 * we could cluster a lot of these and do a
3226 * number of sequential pages in one go. 3234 * number of sequential pages in one go.
3227 */ 3235 */
3228 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { 3236 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
3229 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3237 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3230 struct pv_entry *pv; 3238 struct pv_entry *pv;
3231 3239
3232#ifdef MULTIPROCESSOR 3240#ifdef MULTIPROCESSOR
3233 KASSERT(uvm_page_locked_p(pg)); 3241 KASSERT(uvm_page_locked_p(pg));
3234#endif 3242#endif
3235 pv = pmap_remove_pv(md, pa, pm, sva); 3243 pv = pmap_remove_pv(md, pa, pm, sva);
3236 pmap_vac_me_harder(md, pa, pm, 0); 3244 pmap_vac_me_harder(md, pa, pm, 0);
3237 if (pv != NULL) { 3245 if (pv != NULL) {
3238 if (pm->pm_remove_all == false) { 3246 if (pm->pm_remove_all == false) {
3239 is_exec = 3247 is_exec =
3240 PV_BEEN_EXECD(pv->pv_flags); 3248 PV_BEEN_EXECD(pv->pv_flags);
3241 is_refd = 3249 is_refd =
3242 PV_BEEN_REFD(pv->pv_flags); 3250 PV_BEEN_REFD(pv->pv_flags);
3243 } 3251 }
3244 pool_put(&pmap_pv_pool, pv); 3252 pool_put(&pmap_pv_pool, pv);
3245 } 3253 }
3246 } 3254 }
3247 mappings++; 3255 mappings++;
3248 3256
3249 if (!l2pte_valid(pte)) { 3257 if (!l2pte_valid(pte)) {
3250 /* 3258 /*
3251 * Ref/Mod emulation is still active for this 3259 * Ref/Mod emulation is still active for this
3252 * mapping, therefore it is has not yet been 3260 * mapping, therefore it is has not yet been
3253 * accessed. No need to frob the cache/tlb. 3261 * accessed. No need to frob the cache/tlb.
3254 */ 3262 */
3255 *ptep = 0; 3263 *ptep = 0;
3256 PTE_SYNC_CURRENT(pm, ptep); 3264 PTE_SYNC_CURRENT(pm, ptep);
3257 continue; 3265 continue;
3258 } 3266 }
3259 3267
3260 if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) { 3268 if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
3261 /* Add to the clean list. */ 3269 /* Add to the clean list. */
3262 cleanlist[cleanlist_idx].ptep = ptep; 3270 cleanlist[cleanlist_idx].ptep = ptep;
3263 cleanlist[cleanlist_idx].va = 3271 cleanlist[cleanlist_idx].va =
3264 sva | (is_exec & 1); 3272 sva | (is_exec & 1);
3265 cleanlist_idx++; 3273 cleanlist_idx++;
3266 } else 3274 } else
3267 if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) { 3275 if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
3268 /* Nuke everything if needed. */ 3276 /* Nuke everything if needed. */
3269#ifdef PMAP_CACHE_VIVT 3277#ifdef PMAP_CACHE_VIVT
3270 pmap_idcache_wbinv_all(pm); 3278 pmap_idcache_wbinv_all(pm);
3271#endif 3279#endif
3272 pmap_tlb_flushID(pm); 3280 pmap_tlb_flushID(pm);
3273 3281
3274 /* 3282 /*
3275 * Roll back the previous PTE list, 3283 * Roll back the previous PTE list,
3276 * and zero out the current PTE. 3284 * and zero out the current PTE.
3277 */ 3285 */
3278 for (cnt = 0; 3286 for (cnt = 0;
3279 cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) { 3287 cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
3280 *cleanlist[cnt].ptep = 0; 3288 *cleanlist[cnt].ptep = 0;
3281 PTE_SYNC(cleanlist[cnt].ptep); 3289 PTE_SYNC(cleanlist[cnt].ptep);
3282 } 3290 }
3283 *ptep = 0; 3291 *ptep = 0;
3284 PTE_SYNC(ptep); 3292 PTE_SYNC(ptep);
3285 cleanlist_idx++; 3293 cleanlist_idx++;
3286 pm->pm_remove_all = true; 3294 pm->pm_remove_all = true;
3287 } else { 3295 } else {
3288 *ptep = 0; 3296 *ptep = 0;
3289 PTE_SYNC(ptep); 3297 PTE_SYNC(ptep);
3290 if (pm->pm_remove_all == false) { 3298 if (pm->pm_remove_all == false) {
3291 if (is_exec) 3299 if (is_exec)
3292 pmap_tlb_flushID_SE(pm, sva); 3300 pmap_tlb_flushID_SE(pm, sva);
3293 else 3301 else
3294 if (is_refd) 3302 if (is_refd)
3295 pmap_tlb_flushD_SE(pm, sva); 3303 pmap_tlb_flushD_SE(pm, sva);
3296 } 3304 }
3297 } 3305 }
3298 } 3306 }
3299 3307
3300 /* 3308 /*
3301 * Deal with any left overs 3309 * Deal with any left overs
3302 */ 3310 */
3303 if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) { 3311 if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
3304 total += cleanlist_idx; 3312 total += cleanlist_idx;
3305 for (cnt = 0; cnt < cleanlist_idx; cnt++) { 3313 for (cnt = 0; cnt < cleanlist_idx; cnt++) {
3306 if (pm->pm_cstate.cs_all != 0) { 3314 if (pm->pm_cstate.cs_all != 0) {
3307 vaddr_t clva = cleanlist[cnt].va & ~1; 3315 vaddr_t clva = cleanlist[cnt].va & ~1;
3308 if (cleanlist[cnt].va & 1) { 3316 if (cleanlist[cnt].va & 1) {
3309#ifdef PMAP_CACHE_VIVT 3317#ifdef PMAP_CACHE_VIVT
3310 pmap_idcache_wbinv_range(pm, 3318 pmap_idcache_wbinv_range(pm,
3311 clva, PAGE_SIZE); 3319 clva, PAGE_SIZE);
3312#endif 3320#endif
3313 pmap_tlb_flushID_SE(pm, clva); 3321 pmap_tlb_flushID_SE(pm, clva);
3314 } else { 3322 } else {
3315#ifdef PMAP_CACHE_VIVT 3323#ifdef PMAP_CACHE_VIVT
3316 pmap_dcache_wb_range(pm, 3324 pmap_dcache_wb_range(pm,
3317 clva, PAGE_SIZE, true, 3325 clva, PAGE_SIZE, true,
3318 false); 3326 false);
3319#endif 3327#endif
3320 pmap_tlb_flushD_SE(pm, clva); 3328 pmap_tlb_flushD_SE(pm, clva);
3321 } 3329 }
3322 } 3330 }
3323 *cleanlist[cnt].ptep = 0; 3331 *cleanlist[cnt].ptep = 0;
3324 PTE_SYNC_CURRENT(pm, cleanlist[cnt].ptep); 3332 PTE_SYNC_CURRENT(pm, cleanlist[cnt].ptep);
3325 } 3333 }
3326 3334
3327 /* 3335 /*
3328 * If it looks like we're removing a whole bunch 3336 * If it looks like we're removing a whole bunch
3329 * of mappings, it's faster to just write-back 3337 * of mappings, it's faster to just write-back
3330 * the whole cache now and defer TLB flushes until 3338 * the whole cache now and defer TLB flushes until
3331 * pmap_update() is called. 3339 * pmap_update() is called.
3332 */ 3340 */
3333 if (total <= PMAP_REMOVE_CLEAN_LIST_SIZE) 3341 if (total <= PMAP_REMOVE_CLEAN_LIST_SIZE)
3334 cleanlist_idx = 0; 3342 cleanlist_idx = 0;
3335 else { 3343 else {
3336 cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; 3344 cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1;
3337#ifdef PMAP_CACHE_VIVT 3345#ifdef PMAP_CACHE_VIVT
3338 pmap_idcache_wbinv_all(pm); 3346 pmap_idcache_wbinv_all(pm);
3339#endif 3347#endif
3340 pm->pm_remove_all = true; 3348 pm->pm_remove_all = true;
3341 } 3349 }
3342 } 3350 }
3343 3351
3344 pmap_free_l2_bucket(pm, l2b, mappings); 3352 pmap_free_l2_bucket(pm, l2b, mappings);
3345 pm->pm_stats.resident_count -= mappings; 3353 pm->pm_stats.resident_count -= mappings;
3346 } 3354 }
3347 3355
3348 pmap_release_pmap_lock(pm); 3356 pmap_release_pmap_lock(pm);
3349} 3357}
3350 3358
3351#ifdef PMAP_CACHE_VIPT 3359#ifdef PMAP_CACHE_VIPT
3352static struct pv_entry * 3360static struct pv_entry *
3353pmap_kremove_pg(struct vm_page *pg, vaddr_t va) 3361pmap_kremove_pg(struct vm_page *pg, vaddr_t va)
3354{ 3362{
3355 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3363 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3356 paddr_t pa = VM_PAGE_TO_PHYS(pg); 3364 paddr_t pa = VM_PAGE_TO_PHYS(pg);
3357 struct pv_entry *pv; 3365 struct pv_entry *pv;
3358 3366
3359 KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & (PVF_COLORED|PVF_NC)); 3367 KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & (PVF_COLORED|PVF_NC));
3360 KASSERT((md->pvh_attrs & PVF_KMPAGE) == 0); 3368 KASSERT((md->pvh_attrs & PVF_KMPAGE) == 0);
3361 3369
3362 pv = pmap_remove_pv(md, pa, pmap_kernel(), va); 3370 pv = pmap_remove_pv(md, pa, pmap_kernel(), va);
3363 KASSERT(pv); 3371 KASSERT(pv);
3364 KASSERT(pv->pv_flags & PVF_KENTRY); 3372 KASSERT(pv->pv_flags & PVF_KENTRY);
3365 3373
3366 /* 3374 /*
3367 * If we are removing a writeable mapping to a cached exec page, 3375 * If we are removing a writeable mapping to a cached exec page,
3368 * if it's the last mapping then clear it execness other sync 3376 * if it's the last mapping then clear it execness other sync
3369 * the page to the icache. 3377 * the page to the icache.
3370 */ 3378 */
3371 if ((md->pvh_attrs & (PVF_NC|PVF_EXEC)) == PVF_EXEC 3379 if ((md->pvh_attrs & (PVF_NC|PVF_EXEC)) == PVF_EXEC
3372 && (pv->pv_flags & PVF_WRITE) != 0) { 3380 && (pv->pv_flags & PVF_WRITE) != 0) {
3373 if (SLIST_EMPTY(&md->pvh_list)) { 3381 if (SLIST_EMPTY(&md->pvh_list)) {
3374 md->pvh_attrs &= ~PVF_EXEC; 3382 md->pvh_attrs &= ~PVF_EXEC;
3375 PMAPCOUNT(exec_discarded_kremove); 3383 PMAPCOUNT(exec_discarded_kremove);
3376 } else { 3384 } else {
3377 pmap_syncicache_page(md, pa); 3385 pmap_syncicache_page(md, pa);
3378 PMAPCOUNT(exec_synced_kremove); 3386 PMAPCOUNT(exec_synced_kremove);
3379 } 3387 }
3380 } 3388 }
3381 pmap_vac_me_harder(md, pa, pmap_kernel(), 0); 3389 pmap_vac_me_harder(md, pa, pmap_kernel(), 0);
3382 3390
3383 return pv; 3391 return pv;
3384} 3392}
3385#endif /* PMAP_CACHE_VIPT */ 3393#endif /* PMAP_CACHE_VIPT */
3386 3394
3387/* 3395/*
3388 * pmap_kenter_pa: enter an unmanaged, wired kernel mapping 3396 * pmap_kenter_pa: enter an unmanaged, wired kernel mapping
3389 * 3397 *
3390 * We assume there is already sufficient KVM space available 3398 * We assume there is already sufficient KVM space available
3391 * to do this, as we can't allocate L2 descriptor tables/metadata 3399 * to do this, as we can't allocate L2 descriptor tables/metadata
3392 * from here. 3400 * from here.
3393 */ 3401 */
3394void 3402void
3395pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 3403pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
3396{ 3404{
3397 struct l2_bucket *l2b; 3405 struct l2_bucket *l2b;
3398 pt_entry_t *ptep, opte; 3406 pt_entry_t *ptep, opte;
3399#ifdef PMAP_CACHE_VIVT 3407#ifdef PMAP_CACHE_VIVT
3400 struct vm_page *pg = (flags & PMAP_KMPAGE) ? PHYS_TO_VM_PAGE(pa) : NULL; 3408 struct vm_page *pg = (flags & PMAP_KMPAGE) ? PHYS_TO_VM_PAGE(pa) : NULL;
3401#endif 3409#endif
3402#ifdef PMAP_CACHE_VIPT 3410#ifdef PMAP_CACHE_VIPT
3403 struct vm_page *pg = PHYS_TO_VM_PAGE(pa); 3411 struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
3404 struct vm_page *opg; 3412 struct vm_page *opg;
3405 struct pv_entry *pv = NULL; 3413 struct pv_entry *pv = NULL;
3406#endif 3414#endif
3407 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3415 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3408 3416
3409 NPDEBUG(PDB_KENTER, 3417 NPDEBUG(PDB_KENTER,
3410 printf("pmap_kenter_pa: va 0x%08lx, pa 0x%08lx, prot 0x%x\n", 3418 printf("pmap_kenter_pa: va 0x%08lx, pa 0x%08lx, prot 0x%x\n",
3411 va, pa, prot)); 3419 va, pa, prot));
3412 3420
3413 l2b = pmap_get_l2_bucket(pmap_kernel(), va); 3421 l2b = pmap_get_l2_bucket(pmap_kernel(), va);
3414 KDASSERT(l2b != NULL); 3422 KDASSERT(l2b != NULL);
3415 3423
3416 ptep = &l2b->l2b_kva[l2pte_index(va)]; 3424 ptep = &l2b->l2b_kva[l2pte_index(va)];
3417 opte = *ptep; 3425 opte = *ptep;
3418 3426
3419 if (opte == 0) { 3427 if (opte == 0) {
3420 PMAPCOUNT(kenter_mappings); 3428 PMAPCOUNT(kenter_mappings);
3421 l2b->l2b_occupancy++; 3429 l2b->l2b_occupancy++;
3422 } else { 3430 } else {
3423 PMAPCOUNT(kenter_remappings); 3431 PMAPCOUNT(kenter_remappings);
3424#ifdef PMAP_CACHE_VIPT 3432#ifdef PMAP_CACHE_VIPT
3425 opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); 3433 opg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
3426#ifdef DIAGNOSTIC 3434#ifdef DIAGNOSTIC
3427 struct vm_page_md *omd = VM_PAGE_TO_MD(opg); 3435 struct vm_page_md *omd = VM_PAGE_TO_MD(opg);
3428#endif 3436#endif
3429 if (opg) { 3437 if (opg) {
3430 KASSERT(opg != pg); 3438 KASSERT(opg != pg);
3431 KASSERT((omd->pvh_attrs & PVF_KMPAGE) == 0); 3439 KASSERT((omd->pvh_attrs & PVF_KMPAGE) == 0);
3432 KASSERT((flags & PMAP_KMPAGE) == 0); 3440 KASSERT((flags & PMAP_KMPAGE) == 0);
3433 pv = pmap_kremove_pg(opg, va); 3441 pv = pmap_kremove_pg(opg, va);
3434 } 3442 }
3435#endif 3443#endif
3436 if (l2pte_valid(opte)) { 3444 if (l2pte_valid(opte)) {
3437#ifdef PMAP_CACHE_VIVT 3445#ifdef PMAP_CACHE_VIVT
3438 cpu_dcache_wbinv_range(va, PAGE_SIZE); 3446 cpu_dcache_wbinv_range(va, PAGE_SIZE);
3439#endif 3447#endif
3440 cpu_tlb_flushD_SE(va); 3448 cpu_tlb_flushD_SE(va);
3441 cpu_cpwait(); 3449 cpu_cpwait();
3442 } 3450 }
3443 } 3451 }
3444 3452
3445 *ptep = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) 3453 *ptep = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot)
3446 | ((flags & PMAP_NOCACHE) ? 0 : pte_l2_s_cache_mode); 3454 | ((flags & PMAP_NOCACHE) ? 0 : pte_l2_s_cache_mode);
3447 PTE_SYNC(ptep); 3455 PTE_SYNC(ptep);
3448 3456
3449 if (pg) { 3457 if (pg) {
3450#ifdef MULTIPROCESSOR 3458#ifdef MULTIPROCESSOR
3451 KASSERT(uvm_page_locked_p(pg)); 3459 KASSERT(uvm_page_locked_p(pg));
3452#endif 3460#endif
3453 if (flags & PMAP_KMPAGE) { 3461 if (flags & PMAP_KMPAGE) {
3454 KASSERT(md->urw_mappings == 0); 3462 KASSERT(md->urw_mappings == 0);
3455 KASSERT(md->uro_mappings == 0); 3463 KASSERT(md->uro_mappings == 0);
3456 KASSERT(md->krw_mappings == 0); 3464 KASSERT(md->krw_mappings == 0);
3457 KASSERT(md->kro_mappings == 0); 3465 KASSERT(md->kro_mappings == 0);
3458#ifdef PMAP_CACHE_VIPT 3466#ifdef PMAP_CACHE_VIPT
3459 KASSERT(pv == NULL); 3467 KASSERT(pv == NULL);
3460 KASSERT(arm_cache_prefer_mask == 0 || (va & PVF_COLORED) == 0); 3468 KASSERT(arm_cache_prefer_mask == 0 || (va & PVF_COLORED) == 0);
3461 KASSERT((md->pvh_attrs & PVF_NC) == 0); 3469 KASSERT((md->pvh_attrs & PVF_NC) == 0);
3462 /* if there is a color conflict, evict from cache. */ 3470 /* if there is a color conflict, evict from cache. */
3463 if (pmap_is_page_colored_p(md) 3471 if (pmap_is_page_colored_p(md)
3464 && ((va ^ md->pvh_attrs) & arm_cache_prefer_mask)) { 3472 && ((va ^ md->pvh_attrs) & arm_cache_prefer_mask)) {
3465 PMAPCOUNT(vac_color_change); 3473 PMAPCOUNT(vac_color_change);
3466 pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY); 3474 pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY);
3467 } else if (md->pvh_attrs & PVF_MULTCLR) { 3475 } else if (md->pvh_attrs & PVF_MULTCLR) {
3468 /* 3476 /*
3469 * If this page has multiple colors, expunge 3477 * If this page has multiple colors, expunge
3470 * them. 3478 * them.
3471 */ 3479 */
3472 PMAPCOUNT(vac_flush_lots2); 3480 PMAPCOUNT(vac_flush_lots2);
3473 pmap_flush_page(md, pa, PMAP_FLUSH_SECONDARY); 3481 pmap_flush_page(md, pa, PMAP_FLUSH_SECONDARY);
3474 } 3482 }
3475 md->pvh_attrs &= PAGE_SIZE - 1; 3483 md->pvh_attrs &= PAGE_SIZE - 1;
3476 md->pvh_attrs |= PVF_KMPAGE 3484 md->pvh_attrs |= PVF_KMPAGE
3477 | PVF_COLORED | PVF_DIRTY 3485 | PVF_COLORED | PVF_DIRTY
3478 | (va & arm_cache_prefer_mask); 3486 | (va & arm_cache_prefer_mask);
3479#endif 3487#endif
3480#ifdef PMAP_CACHE_VIVT 3488#ifdef PMAP_CACHE_VIVT
3481 md->pvh_attrs |= PVF_KMPAGE; 3489 md->pvh_attrs |= PVF_KMPAGE;
3482#endif 3490#endif
3483 pmap_kmpages++; 3491 pmap_kmpages++;
3484#ifdef PMAP_CACHE_VIPT 3492#ifdef PMAP_CACHE_VIPT
3485 } else { 3493 } else {
3486 if (pv == NULL) { 3494 if (pv == NULL) {
3487 pv = pool_get(&pmap_pv_pool, PR_NOWAIT); 3495 pv = pool_get(&pmap_pv_pool, PR_NOWAIT);
3488 KASSERT(pv != NULL); 3496 KASSERT(pv != NULL);
3489 } 3497 }
3490 pmap_enter_pv(md, pa, pv, pmap_kernel(), va, 3498 pmap_enter_pv(md, pa, pv, pmap_kernel(), va,
3491 PVF_WIRED | PVF_KENTRY 3499 PVF_WIRED | PVF_KENTRY
3492 | (prot & VM_PROT_WRITE ? PVF_WRITE : 0)); 3500 | (prot & VM_PROT_WRITE ? PVF_WRITE : 0));
3493 if ((prot & VM_PROT_WRITE) 3501 if ((prot & VM_PROT_WRITE)
3494 && !(md->pvh_attrs & PVF_NC)) 3502 && !(md->pvh_attrs & PVF_NC))
3495 md->pvh_attrs |= PVF_DIRTY; 3503 md->pvh_attrs |= PVF_DIRTY;
3496 KASSERT((prot & VM_PROT_WRITE) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC))); 3504 KASSERT((prot & VM_PROT_WRITE) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
3497 pmap_vac_me_harder(md, pa, pmap_kernel(), va); 3505 pmap_vac_me_harder(md, pa, pmap_kernel(), va);
3498#endif 3506#endif
3499 } 3507 }
3500#ifdef PMAP_CACHE_VIPT 3508#ifdef PMAP_CACHE_VIPT
3501 } else { 3509 } else {
3502 if (pv != NULL) 3510 if (pv != NULL)
3503 pool_put(&pmap_pv_pool, pv); 3511 pool_put(&pmap_pv_pool, pv);
3504#endif 3512#endif
3505 } 3513 }
3506} 3514}
3507 3515
3508void 3516void
3509pmap_kremove(vaddr_t va, vsize_t len) 3517pmap_kremove(vaddr_t va, vsize_t len)
3510{ 3518{
3511 struct l2_bucket *l2b; 3519 struct l2_bucket *l2b;
3512 pt_entry_t *ptep, *sptep, opte; 3520 pt_entry_t *ptep, *sptep, opte;
3513 vaddr_t next_bucket, eva; 3521 vaddr_t next_bucket, eva;
3514 u_int mappings; 3522 u_int mappings;
3515 struct vm_page *opg; 3523 struct vm_page *opg;
3516 3524
3517 PMAPCOUNT(kenter_unmappings); 3525 PMAPCOUNT(kenter_unmappings);
3518 3526
3519 NPDEBUG(PDB_KREMOVE, printf("pmap_kremove: va 0x%08lx, len 0x%08lx\n", 3527 NPDEBUG(PDB_KREMOVE, printf("pmap_kremove: va 0x%08lx, len 0x%08lx\n",
3520 va, len)); 3528 va, len));
3521 3529
3522 eva = va + len; 3530 eva = va + len;
3523 3531
3524 while (va < eva) { 3532 while (va < eva) {
3525 next_bucket = L2_NEXT_BUCKET(va); 3533 next_bucket = L2_NEXT_BUCKET(va);
3526 if (next_bucket > eva) 3534 if (next_bucket > eva)
3527 next_bucket = eva; 3535 next_bucket = eva;
3528 3536
3529 l2b = pmap_get_l2_bucket(pmap_kernel(), va); 3537 l2b = pmap_get_l2_bucket(pmap_kernel(), va);
3530 KDASSERT(l2b != NULL); 3538 KDASSERT(l2b != NULL);
3531 3539
3532 sptep = ptep = &l2b->l2b_kva[l2pte_index(va)]; 3540 sptep = ptep = &l2b->l2b_kva[l2pte_index(va)];
3533 mappings = 0; 3541 mappings = 0;
3534 3542
3535 while (va < next_bucket) { 3543 while (va < next_bucket) {
3536 opte = *ptep; 3544 opte = *ptep;
3537 opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); 3545 opg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
3538 if (opg) { 3546 if (opg) {
3539 struct vm_page_md *omd = VM_PAGE_TO_MD(opg); 3547 struct vm_page_md *omd = VM_PAGE_TO_MD(opg);
3540 3548
3541 if (omd->pvh_attrs & PVF_KMPAGE) { 3549 if (omd->pvh_attrs & PVF_KMPAGE) {
3542 KASSERT(omd->urw_mappings == 0); 3550 KASSERT(omd->urw_mappings == 0);
3543 KASSERT(omd->uro_mappings == 0); 3551 KASSERT(omd->uro_mappings == 0);
3544 KASSERT(omd->krw_mappings == 0); 3552 KASSERT(omd->krw_mappings == 0);
3545 KASSERT(omd->kro_mappings == 0); 3553 KASSERT(omd->kro_mappings == 0);
3546 omd->pvh_attrs &= ~PVF_KMPAGE; 3554 omd->pvh_attrs &= ~PVF_KMPAGE;
3547#ifdef PMAP_CACHE_VIPT 3555#ifdef PMAP_CACHE_VIPT
3548 if (arm_cache_prefer_mask != 0) { 3556 if (arm_cache_prefer_mask != 0) {
3549 omd->pvh_attrs &= ~PVF_WRITE; 3557 omd->pvh_attrs &= ~PVF_WRITE;
3550 } 3558 }
3551#endif 3559#endif
3552 pmap_kmpages--; 3560 pmap_kmpages--;
3553#ifdef PMAP_CACHE_VIPT 3561#ifdef PMAP_CACHE_VIPT
3554 } else { 3562 } else {
3555 pool_put(&pmap_pv_pool, 3563 pool_put(&pmap_pv_pool,
3556 pmap_kremove_pg(opg, va)); 3564 pmap_kremove_pg(opg, va));
3557#endif 3565#endif
3558 } 3566 }
3559 } 3567 }
3560 if (l2pte_valid(opte)) { 3568 if (l2pte_valid(opte)) {
3561#ifdef PMAP_CACHE_VIVT 3569#ifdef PMAP_CACHE_VIVT
3562 cpu_dcache_wbinv_range(va, PAGE_SIZE); 3570 cpu_dcache_wbinv_range(va, PAGE_SIZE);
3563#endif 3571#endif
3564 cpu_tlb_flushD_SE(va); 3572 cpu_tlb_flushD_SE(va);
3565 } 3573 }
3566 if (opte) { 3574 if (opte) {
3567 *ptep = 0; 3575 *ptep = 0;
3568 mappings++; 3576 mappings++;
3569 } 3577 }
3570 va += PAGE_SIZE; 3578 va += PAGE_SIZE;
3571 ptep++; 3579 ptep++;
3572 } 3580 }
3573 KDASSERT(mappings <= l2b->l2b_occupancy); 3581 KDASSERT(mappings <= l2b->l2b_occupancy);
3574 l2b->l2b_occupancy -= mappings; 3582 l2b->l2b_occupancy -= mappings;
3575 PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); 3583 PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep));
3576 } 3584 }
3577 cpu_cpwait(); 3585 cpu_cpwait();
3578} 3586}
3579 3587
3580bool 3588bool
3581pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap) 3589pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
3582{ 3590{
3583 struct l2_dtable *l2; 3591 struct l2_dtable *l2;
3584 pd_entry_t *pl1pd, l1pd; 3592 pd_entry_t *pl1pd, l1pd;
3585 pt_entry_t *ptep, pte; 3593 pt_entry_t *ptep, pte;
3586 paddr_t pa; 3594 paddr_t pa;
3587 u_int l1idx; 3595 u_int l1idx;
3588 3596
3589 pmap_acquire_pmap_lock(pm); 3597 pmap_acquire_pmap_lock(pm);
3590 3598
3591 l1idx = L1_IDX(va); 3599 l1idx = L1_IDX(va);
3592 pl1pd = &pm->pm_l1->l1_kva[l1idx]; 3600 pl1pd = &pm->pm_l1->l1_kva[l1idx];
3593 l1pd = *pl1pd; 3601 l1pd = *pl1pd;
3594 3602
3595 if (l1pte_section_p(l1pd)) { 3603 if (l1pte_section_p(l1pd)) {
3596 /* 3604 /*
3597 * These should only happen for pmap_kernel() 3605 * These should only happen for pmap_kernel()
3598 */ 3606 */
3599 KDASSERT(pm == pmap_kernel()); 3607 KDASSERT(pm == pmap_kernel());
3600 pmap_release_pmap_lock(pm); 3608 pmap_release_pmap_lock(pm);
3601#if (ARM_MMU_V6 + ARM_MMU_V7) > 0 3609#if (ARM_MMU_V6 + ARM_MMU_V7) > 0
3602 if (l1pte_supersection_p(l1pd)) { 3610 if (l1pte_supersection_p(l1pd)) {
3603 pa = (l1pd & L1_SS_FRAME) | (va & L1_SS_OFFSET); 3611 pa = (l1pd & L1_SS_FRAME) | (va & L1_SS_OFFSET);
3604 } else 3612 } else
3605#endif 3613#endif
3606 pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); 3614 pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET);
3607 } else { 3615 } else {
3608 /* 3616 /*
3609 * Note that we can't rely on the validity of the L1 3617 * Note that we can't rely on the validity of the L1
3610 * descriptor as an indication that a mapping exists. 3618 * descriptor as an indication that a mapping exists.
3611 * We have to look it up in the L2 dtable. 3619 * We have to look it up in the L2 dtable.
3612 */ 3620 */
3613 l2 = pm->pm_l2[L2_IDX(l1idx)]; 3621 l2 = pm->pm_l2[L2_IDX(l1idx)];
3614 3622
3615 if (l2 == NULL || 3623 if (l2 == NULL ||
3616 (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { 3624 (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
3617 pmap_release_pmap_lock(pm); 3625 pmap_release_pmap_lock(pm);
3618 return false; 3626 return false;
3619 } 3627 }
3620 3628
3621 ptep = &ptep[l2pte_index(va)]; 3629 ptep = &ptep[l2pte_index(va)];
3622 pte = *ptep; 3630 pte = *ptep;
3623 pmap_release_pmap_lock(pm); 3631 pmap_release_pmap_lock(pm);
3624 3632
3625 if (pte == 0) 3633 if (pte == 0)
3626 return false; 3634 return false;
3627 3635
3628 switch (pte & L2_TYPE_MASK) { 3636 switch (pte & L2_TYPE_MASK) {
3629 case L2_TYPE_L: 3637 case L2_TYPE_L:
3630 pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); 3638 pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET);
3631 break; 3639 break;
3632 3640
3633 default: 3641 default:
3634 pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); 3642 pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET);
3635 break; 3643 break;
3636 } 3644 }
3637 } 3645 }
3638 3646
3639 if (pap != NULL) 3647 if (pap != NULL)
3640 *pap = pa; 3648 *pap = pa;
3641 3649
3642 return true; 3650 return true;
3643} 3651}
3644 3652
3645void 3653void
3646pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot) 3654pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
3647{ 3655{
3648 struct l2_bucket *l2b; 3656 struct l2_bucket *l2b;
3649 pt_entry_t *ptep, pte; 3657 pt_entry_t *ptep, pte;
3650 vaddr_t next_bucket; 3658 vaddr_t next_bucket;
3651 u_int flags; 3659 u_int flags;
3652 u_int clr_mask; 3660 u_int clr_mask;
3653 int flush; 3661 int flush;
3654 3662
3655 NPDEBUG(PDB_PROTECT, 3663 NPDEBUG(PDB_PROTECT,
3656 printf("pmap_protect: pm %p sva 0x%lx eva 0x%lx prot 0x%x\n", 3664 printf("pmap_protect: pm %p sva 0x%lx eva 0x%lx prot 0x%x\n",
3657 pm, sva, eva, prot)); 3665 pm, sva, eva, prot));
3658 3666
3659 if ((prot & VM_PROT_READ) == 0) { 3667 if ((prot & VM_PROT_READ) == 0) {
3660 pmap_remove(pm, sva, eva); 3668 pmap_remove(pm, sva, eva);
3661 return; 3669 return;
3662 } 3670 }
3663 3671
3664 if (prot & VM_PROT_WRITE) { 3672 if (prot & VM_PROT_WRITE) {
3665 /* 3673 /*
3666 * If this is a read->write transition, just ignore it and let 3674 * If this is a read->write transition, just ignore it and let
3667 * uvm_fault() take care of it later. 3675 * uvm_fault() take care of it later.
3668 */ 3676 */
3669 return; 3677 return;
3670 } 3678 }
3671 3679
3672 pmap_acquire_pmap_lock(pm); 3680 pmap_acquire_pmap_lock(pm);
3673 3681
3674 flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1; 3682 flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1;
3675 flags = 0; 3683 flags = 0;
3676 clr_mask = PVF_WRITE | ((prot & VM_PROT_EXECUTE) ? 0 : PVF_EXEC); 3684 clr_mask = PVF_WRITE | ((prot & VM_PROT_EXECUTE) ? 0 : PVF_EXEC);
3677 3685
3678 while (sva < eva) { 3686 while (sva < eva) {
3679 next_bucket = L2_NEXT_BUCKET(sva); 3687 next_bucket = L2_NEXT_BUCKET(sva);
3680 if (next_bucket > eva) 3688 if (next_bucket > eva)
3681 next_bucket = eva; 3689 next_bucket = eva;
3682 3690
3683 l2b = pmap_get_l2_bucket(pm, sva); 3691 l2b = pmap_get_l2_bucket(pm, sva);
3684 if (l2b == NULL) { 3692 if (l2b == NULL) {
3685 sva = next_bucket; 3693 sva = next_bucket;
3686 continue; 3694 continue;
3687 } 3695 }
3688 3696
3689 ptep = &l2b->l2b_kva[l2pte_index(sva)]; 3697 ptep = &l2b->l2b_kva[l2pte_index(sva)];
3690 3698
3691 while (sva < next_bucket) { 3699 while (sva < next_bucket) {
3692 pte = *ptep; 3700 pte = *ptep;
3693 if (l2pte_valid(pte) != 0 && l2pte_writable_p(pte)) { 3701 if (l2pte_valid(pte) != 0 && l2pte_writable_p(pte)) {
3694 struct vm_page *pg; 3702 struct vm_page *pg;
3695 u_int f; 3703 u_int f;
3696 3704
3697#ifdef PMAP_CACHE_VIVT 3705#ifdef PMAP_CACHE_VIVT
3698 /* 3706 /*
3699 * OK, at this point, we know we're doing 3707 * OK, at this point, we know we're doing
3700 * write-protect operation. If the pmap is 3708 * write-protect operation. If the pmap is
3701 * active, write-back the page. 3709 * active, write-back the page.
3702 */ 3710 */
3703 pmap_dcache_wb_range(pm, sva, PAGE_SIZE, 3711 pmap_dcache_wb_range(pm, sva, PAGE_SIZE,
3704 false, false); 3712 false, false);
3705#endif 3713#endif
3706 3714
3707 pg = PHYS_TO_VM_PAGE(l2pte_pa(pte)); 3715 pg = PHYS_TO_VM_PAGE(l2pte_pa(pte));
3708 pte = l2pte_set_readonly(pte); 3716 pte = l2pte_set_readonly(pte);
3709 *ptep = pte; 3717 *ptep = pte;
3710 PTE_SYNC(ptep); 3718 PTE_SYNC(ptep);
3711 3719
3712 if (pg != NULL) { 3720 if (pg != NULL) {
3713 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3721 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3714 paddr_t pa = VM_PAGE_TO_PHYS(pg); 3722 paddr_t pa = VM_PAGE_TO_PHYS(pg);
3715 3723
3716#ifdef MULTIPROCESSOR 3724#ifdef MULTIPROCESSOR
3717 KASSERT(uvm_page_locked_p(pg)); 3725 KASSERT(uvm_page_locked_p(pg));
3718#endif 3726#endif
3719 f = pmap_modify_pv(md, pa, pm, sva, 3727 f = pmap_modify_pv(md, pa, pm, sva,
3720 clr_mask, 0); 3728 clr_mask, 0);
3721 pmap_vac_me_harder(md, pa, pm, sva); 3729 pmap_vac_me_harder(md, pa, pm, sva);
3722 } else { 3730 } else {
3723 f = PVF_REF | PVF_EXEC; 3731 f = PVF_REF | PVF_EXEC;
3724 } 3732 }
3725 3733
3726 if (flush >= 0) { 3734 if (flush >= 0) {
3727 flush++; 3735 flush++;
3728 flags |= f; 3736 flags |= f;
3729 } else 3737 } else
3730 if (PV_BEEN_EXECD(f)) 3738 if (PV_BEEN_EXECD(f))
3731 pmap_tlb_flushID_SE(pm, sva); 3739 pmap_tlb_flushID_SE(pm, sva);
3732 else 3740 else
3733 if (PV_BEEN_REFD(f)) 3741 if (PV_BEEN_REFD(f))
3734 pmap_tlb_flushD_SE(pm, sva); 3742 pmap_tlb_flushD_SE(pm, sva);
3735 } 3743 }
3736 3744
3737 sva += PAGE_SIZE; 3745 sva += PAGE_SIZE;
3738 ptep++; 3746 ptep++;
3739 } 3747 }
3740 } 3748 }
3741 3749
3742 pmap_release_pmap_lock(pm); 3750 pmap_release_pmap_lock(pm);
3743 3751
3744 if (flush) { 3752 if (flush) {
3745 if (PV_BEEN_EXECD(flags)) 3753 if (PV_BEEN_EXECD(flags))
3746 pmap_tlb_flushID(pm); 3754 pmap_tlb_flushID(pm);
3747 else 3755 else
3748 if (PV_BEEN_REFD(flags)) 3756 if (PV_BEEN_REFD(flags))
3749 pmap_tlb_flushD(pm); 3757 pmap_tlb_flushD(pm);
3750 } 3758 }
3751} 3759}
3752 3760
3753void 3761void
3754pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva) 3762pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva)
3755{ 3763{
3756 struct l2_bucket *l2b; 3764 struct l2_bucket *l2b;
3757 pt_entry_t *ptep; 3765 pt_entry_t *ptep;
3758 vaddr_t next_bucket; 3766 vaddr_t next_bucket;
3759 vsize_t page_size = trunc_page(sva) + PAGE_SIZE - sva; 3767 vsize_t page_size = trunc_page(sva) + PAGE_SIZE - sva;
3760 3768
3761 NPDEBUG(PDB_EXEC, 3769 NPDEBUG(PDB_EXEC,
3762 printf("pmap_icache_sync_range: pm %p sva 0x%lx eva 0x%lx\n", 3770 printf("pmap_icache_sync_range: pm %p sva 0x%lx eva 0x%lx\n",
3763 pm, sva, eva)); 3771 pm, sva, eva));
3764 3772
3765 pmap_acquire_pmap_lock(pm); 3773 pmap_acquire_pmap_lock(pm);
3766 3774
3767 while (sva < eva) { 3775 while (sva < eva) {
3768 next_bucket = L2_NEXT_BUCKET(sva); 3776 next_bucket = L2_NEXT_BUCKET(sva);
3769 if (next_bucket > eva) 3777 if (next_bucket > eva)
3770 next_bucket = eva; 3778 next_bucket = eva;
3771 3779
3772 l2b = pmap_get_l2_bucket(pm, sva); 3780 l2b = pmap_get_l2_bucket(pm, sva);
3773 if (l2b == NULL) { 3781 if (l2b == NULL) {
3774 sva = next_bucket; 3782 sva = next_bucket;
3775 continue; 3783 continue;
3776 } 3784 }
3777 3785
3778 for (ptep = &l2b->l2b_kva[l2pte_index(sva)]; 3786 for (ptep = &l2b->l2b_kva[l2pte_index(sva)];
3779 sva < next_bucket; 3787 sva < next_bucket;
3780 sva += page_size, ptep++, page_size = PAGE_SIZE) { 3788 sva += page_size, ptep++, page_size = PAGE_SIZE) {
3781 if (l2pte_valid(*ptep)) { 3789 if (l2pte_valid(*ptep)) {
3782 cpu_icache_sync_range(sva, 3790 cpu_icache_sync_range(sva,
3783 min(page_size, eva - sva)); 3791 min(page_size, eva - sva));
3784 } 3792 }
3785 } 3793 }
3786 } 3794 }
3787 3795
3788 pmap_release_pmap_lock(pm); 3796 pmap_release_pmap_lock(pm);
3789} 3797}
3790 3798
3791void 3799void
3792pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 3800pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
3793{ 3801{
3794 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3802 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3795 paddr_t pa = VM_PAGE_TO_PHYS(pg); 3803 paddr_t pa = VM_PAGE_TO_PHYS(pg);
3796 3804
3797 NPDEBUG(PDB_PROTECT, 3805 NPDEBUG(PDB_PROTECT,
3798 printf("pmap_page_protect: md %p (0x%08lx), prot 0x%x\n", 3806 printf("pmap_page_protect: md %p (0x%08lx), prot 0x%x\n",
3799 md, pa, prot)); 3807 md, pa, prot));
3800 3808
3801#ifdef MULTIPROCESSOR 3809#ifdef MULTIPROCESSOR
3802 KASSERT(uvm_page_locked_p(pg)); 3810 KASSERT(uvm_page_locked_p(pg));
3803#endif 3811#endif
3804 3812
3805 switch(prot) { 3813 switch(prot) {
3806 case VM_PROT_READ|VM_PROT_WRITE: 3814 case VM_PROT_READ|VM_PROT_WRITE:
3807#if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX) 3815#if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX)
3808 pmap_clearbit(md, pa, PVF_EXEC); 3816 pmap_clearbit(md, pa, PVF_EXEC);
3809 break; 3817 break;
3810#endif 3818#endif
3811 case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE: 3819 case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
3812 break; 3820 break;
3813 3821
3814 case VM_PROT_READ: 3822 case VM_PROT_READ:
3815#if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX) 3823#if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX)
3816 pmap_clearbit(md, pa, PVF_WRITE|PVF_EXEC); 3824 pmap_clearbit(md, pa, PVF_WRITE|PVF_EXEC);
3817 break; 3825 break;
3818#endif 3826#endif
3819 case VM_PROT_READ|VM_PROT_EXECUTE: 3827 case VM_PROT_READ|VM_PROT_EXECUTE:
3820 pmap_clearbit(md, pa, PVF_WRITE); 3828 pmap_clearbit(md, pa, PVF_WRITE);
3821 break; 3829 break;
3822 3830
3823 default: 3831 default:
3824 pmap_page_remove(md, pa); 3832 pmap_page_remove(md, pa);
3825 break; 3833 break;
3826 } 3834 }
3827} 3835}
3828 3836
3829/* 3837/*
3830 * pmap_clear_modify: 3838 * pmap_clear_modify:
3831 * 3839 *
3832 * Clear the "modified" attribute for a page. 3840 * Clear the "modified" attribute for a page.
3833 */ 3841 */
3834bool 3842bool
3835pmap_clear_modify(struct vm_page *pg) 3843pmap_clear_modify(struct vm_page *pg)
3836{ 3844{
3837 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3845 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3838 paddr_t pa = VM_PAGE_TO_PHYS(pg); 3846 paddr_t pa = VM_PAGE_TO_PHYS(pg);
3839 bool rv; 3847 bool rv;
3840 3848
3841#ifdef MULTIPROCESSOR 3849#ifdef MULTIPROCESSOR
3842 KASSERT(uvm_page_locked_p(pg)); 3850 KASSERT(uvm_page_locked_p(pg));
3843#endif 3851#endif
3844 3852
3845 if (md->pvh_attrs & PVF_MOD) { 3853 if (md->pvh_attrs & PVF_MOD) {
3846 rv = true; 3854 rv = true;
3847#ifdef PMAP_CACHE_VIPT 3855#ifdef PMAP_CACHE_VIPT
3848 /* 3856 /*
3849 * If we are going to clear the modified bit and there are 3857 * If we are going to clear the modified bit and there are
3850 * no other modified bits set, flush the page to memory and 3858 * no other modified bits set, flush the page to memory and
3851 * mark it clean. 3859 * mark it clean.
3852 */ 3860 */
3853 if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) == PVF_MOD) 3861 if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) == PVF_MOD)
3854 pmap_flush_page(md, pa, PMAP_CLEAN_PRIMARY); 3862 pmap_flush_page(md, pa, PMAP_CLEAN_PRIMARY);
3855#endif 3863#endif
3856 pmap_clearbit(md, pa, PVF_MOD); 3864 pmap_clearbit(md, pa, PVF_MOD);
3857 } else 3865 } else
3858 rv = false; 3866 rv = false;
3859 3867
3860 return (rv); 3868 return (rv);
3861} 3869}
3862 3870
3863/* 3871/*
3864 * pmap_clear_reference: 3872 * pmap_clear_reference:
3865 * 3873 *
3866 * Clear the "referenced" attribute for a page. 3874 * Clear the "referenced" attribute for a page.
3867 */ 3875 */
3868bool 3876bool
3869pmap_clear_reference(struct vm_page *pg) 3877pmap_clear_reference(struct vm_page *pg)
3870{ 3878{
3871 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3879 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3872 paddr_t pa = VM_PAGE_TO_PHYS(pg); 3880 paddr_t pa = VM_PAGE_TO_PHYS(pg);
3873 bool rv; 3881 bool rv;
3874 3882
3875#ifdef MULTIPROCESSOR 3883#ifdef MULTIPROCESSOR
3876 KASSERT(uvm_page_locked_p(pg)); 3884 KASSERT(uvm_page_locked_p(pg));
3877#endif 3885#endif
3878 3886
3879 if (md->pvh_attrs & PVF_REF) { 3887 if (md->pvh_attrs & PVF_REF) {
3880 rv = true; 3888 rv = true;
3881 pmap_clearbit(md, pa, PVF_REF); 3889 pmap_clearbit(md, pa, PVF_REF);
3882 } else 3890 } else
3883 rv = false; 3891 rv = false;
3884 3892
3885 return (rv); 3893 return (rv);
3886} 3894}
3887 3895
3888/* 3896/*
3889 * pmap_is_modified: 3897 * pmap_is_modified:
3890 * 3898 *
3891 * Test if a page has the "modified" attribute. 3899 * Test if a page has the "modified" attribute.
3892 */ 3900 */
3893/* See <arm/arm32/pmap.h> */ 3901/* See <arm/arm32/pmap.h> */
3894 3902
3895/* 3903/*
3896 * pmap_is_referenced: 3904 * pmap_is_referenced:
3897 * 3905 *
3898 * Test if a page has the "referenced" attribute. 3906 * Test if a page has the "referenced" attribute.
3899 */ 3907 */
3900/* See <arm/arm32/pmap.h> */ 3908/* See <arm/arm32/pmap.h> */
3901 3909
3902int 3910int
3903pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user) 3911pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user)
3904{ 3912{
3905 struct l2_dtable *l2; 3913 struct l2_dtable *l2;
3906 struct l2_bucket *l2b; 3914 struct l2_bucket *l2b;
3907 pd_entry_t *pl1pd, l1pd; 3915 pd_entry_t *pl1pd, l1pd;
3908 pt_entry_t *ptep, pte; 3916 pt_entry_t *ptep, pte;
3909 paddr_t pa; 3917 paddr_t pa;
3910 u_int l1idx; 3918 u_int l1idx;
3911 int rv = 0; 3919 int rv = 0;
3912 3920
3913 pmap_acquire_pmap_lock(pm); 3921 pmap_acquire_pmap_lock(pm);
3914 3922
3915 l1idx = L1_IDX(va); 3923 l1idx = L1_IDX(va);
3916 3924
3917 /* 3925 /*
3918 * If there is no l2_dtable for this address, then the process 3926 * If there is no l2_dtable for this address, then the process
3919 * has no business accessing it. 3927 * has no business accessing it.
3920 * 3928 *
3921 * Note: This will catch userland processes trying to access 3929 * Note: This will catch userland processes trying to access
3922 * kernel addresses. 3930 * kernel addresses.
3923 */ 3931 */
3924 l2 = pm->pm_l2[L2_IDX(l1idx)]; 3932 l2 = pm->pm_l2[L2_IDX(l1idx)];
3925 if (l2 == NULL) 3933 if (l2 == NULL)
3926 goto out; 3934 goto out;
3927 3935
3928 /* 3936 /*
3929 * Likewise if there is no L2 descriptor table 3937 * Likewise if there is no L2 descriptor table
3930 */ 3938 */
3931 l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 3939 l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
3932 if (l2b->l2b_kva == NULL) 3940 if (l2b->l2b_kva == NULL)
3933 goto out; 3941 goto out;
3934 3942
3935 /* 3943 /*
3936 * Check the PTE itself. 3944 * Check the PTE itself.
3937 */ 3945 */
3938 ptep = &l2b->l2b_kva[l2pte_index(va)]; 3946 ptep = &l2b->l2b_kva[l2pte_index(va)];
3939 pte = *ptep; 3947 pte = *ptep;
3940 if (pte == 0) 3948 if (pte == 0)
3941 goto out; 3949 goto out;
3942 3950
3943 /* 3951 /*
3944 * Catch a userland access to the vector page mapped at 0x0 3952 * Catch a userland access to the vector page mapped at 0x0
3945 */ 3953 */
3946 if (user && (pte & L2_S_PROT_U) == 0) 3954 if (user && (pte & L2_S_PROT_U) == 0)
3947 goto out; 3955 goto out;
3948 3956
3949 pa = l2pte_pa(pte); 3957 pa = l2pte_pa(pte);
3950 3958
3951 if ((ftype & VM_PROT_WRITE) && !l2pte_writable_p(pte)) { 3959 if ((ftype & VM_PROT_WRITE) && !l2pte_writable_p(pte)) {
3952 /* 3960 /*
3953 * This looks like a good candidate for "page modified" 3961 * This looks like a good candidate for "page modified"
3954 * emulation... 3962 * emulation...
3955 */ 3963 */
3956 struct pv_entry *pv; 3964 struct pv_entry *pv;
3957 struct vm_page *pg; 3965 struct vm_page *pg;
3958 3966
3959 /* Extract the physical address of the page */ 3967 /* Extract the physical address of the page */
3960 if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) 3968 if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
3961 goto out; 3969 goto out;
3962 3970
3963 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 3971 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
3964 3972
3965 /* Get the current flags for this page. */ 3973 /* Get the current flags for this page. */
3966#ifdef MULTIPROCESSOR 3974#ifdef MULTIPROCESSOR
3967 KASSERT(uvm_page_locked_p(pg)); 3975 KASSERT(uvm_page_locked_p(pg));
3968#endif 3976#endif
3969 3977
3970 pv = pmap_find_pv(md, pm, va); 3978 pv = pmap_find_pv(md, pm, va);
3971 if (pv == NULL) { 3979 if (pv == NULL) {
3972 goto out; 3980 goto out;
3973 } 3981 }
3974 3982
3975 /* 3983 /*
3976 * Do the flags say this page is writable? If not then it 3984 * Do the flags say this page is writable? If not then it
3977 * is a genuine write fault. If yes then the write fault is 3985 * is a genuine write fault. If yes then the write fault is
3978 * our fault as we did not reflect the write access in the 3986 * our fault as we did not reflect the write access in the
3979 * PTE. Now we know a write has occurred we can correct this 3987 * PTE. Now we know a write has occurred we can correct this
3980 * and also set the modified bit 3988 * and also set the modified bit
3981 */ 3989 */
3982 if ((pv->pv_flags & PVF_WRITE) == 0) { 3990 if ((pv->pv_flags & PVF_WRITE) == 0) {
3983 goto out; 3991 goto out;
3984 } 3992 }
3985 3993
3986 NPDEBUG(PDB_FOLLOW, 3994 NPDEBUG(PDB_FOLLOW,
3987 printf("pmap_fault_fixup: mod emul. pm %p, va 0x%08lx, pa 0x%08lx\n", 3995 printf("pmap_fault_fixup: mod emul. pm %p, va 0x%08lx, pa 0x%08lx\n",
3988 pm, va, pa)); 3996 pm, va, pa));
3989 3997
3990 md->pvh_attrs |= PVF_REF | PVF_MOD; 3998 md->pvh_attrs |= PVF_REF | PVF_MOD;
3991 pv->pv_flags |= PVF_REF | PVF_MOD; 3999 pv->pv_flags |= PVF_REF | PVF_MOD;
3992#ifdef PMAP_CACHE_VIPT 4000#ifdef PMAP_CACHE_VIPT
3993 /* 4001 /*
3994 * If there are cacheable mappings for this page, mark it dirty. 4002 * If there are cacheable mappings for this page, mark it dirty.
3995 */ 4003 */
3996 if ((md->pvh_attrs & PVF_NC) == 0) 4004 if ((md->pvh_attrs & PVF_NC) == 0)
3997 md->pvh_attrs |= PVF_DIRTY; 4005 md->pvh_attrs |= PVF_DIRTY;
3998#endif 4006#endif
3999 4007
4000 /*  4008 /*
4001 * Re-enable write permissions for the page. No need to call 4009 * Re-enable write permissions for the page. No need to call
4002 * pmap_vac_me_harder(), since this is just a 4010 * pmap_vac_me_harder(), since this is just a
4003 * modified-emulation fault, and the PVF_WRITE bit isn't 4011 * modified-emulation fault, and the PVF_WRITE bit isn't
4004 * changing. We've already set the cacheable bits based on 4012 * changing. We've already set the cacheable bits based on
4005 * the assumption that we can write to this page. 4013 * the assumption that we can write to this page.
4006 */ 4014 */
4007 *ptep = l2pte_set_writable((pte & ~L2_TYPE_MASK) | L2_S_PROTO); 4015 *ptep = l2pte_set_writable((pte & ~L2_TYPE_MASK) | L2_S_PROTO);
4008 PTE_SYNC(ptep); 4016 PTE_SYNC(ptep);
4009 rv = 1; 4017 rv = 1;
4010 } else 4018 } else
4011 if ((pte & L2_TYPE_MASK) == L2_TYPE_INV) { 4019 if ((pte & L2_TYPE_MASK) == L2_TYPE_INV) {
4012 /* 4020 /*
4013 * This looks like a good candidate for "page referenced" 4021 * This looks like a good candidate for "page referenced"
4014 * emulation. 4022 * emulation.
4015 */ 4023 */
4016 struct pv_entry *pv; 4024 struct pv_entry *pv;
4017 struct vm_page *pg; 4025 struct vm_page *pg;
4018 4026
4019 /* Extract the physical address of the page */ 4027 /* Extract the physical address of the page */
4020 if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) 4028 if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
4021 goto out; 4029 goto out;
4022 4030
4023 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 4031 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
4024 4032
4025 /* Get the current flags for this page. */ 4033 /* Get the current flags for this page. */
4026#ifdef MULTIPROCESSOR 4034#ifdef MULTIPROCESSOR
4027 KASSERT(uvm_page_locked_p(pg)); 4035 KASSERT(uvm_page_locked_p(pg));
4028#endif 4036#endif
4029 4037
4030 pv = pmap_find_pv(md, pm, va); 4038 pv = pmap_find_pv(md, pm, va);
4031 if (pv == NULL) { 4039 if (pv == NULL) {
4032 goto out; 4040 goto out;
4033 } 4041 }
4034 4042
4035 md->pvh_attrs |= PVF_REF; 4043 md->pvh_attrs |= PVF_REF;
4036 pv->pv_flags |= PVF_REF; 4044 pv->pv_flags |= PVF_REF;
4037 4045
4038 NPDEBUG(PDB_FOLLOW, 4046 NPDEBUG(PDB_FOLLOW,
4039 printf("pmap_fault_fixup: ref emul. pm %p, va 0x%08lx, pa 0x%08lx\n", 4047 printf("pmap_fault_fixup: ref emul. pm %p, va 0x%08lx, pa 0x%08lx\n",
4040 pm, va, pa)); 4048 pm, va, pa));
4041 4049
4042 *ptep = l2pte_set_readonly((pte & ~L2_TYPE_MASK) | L2_S_PROTO); 4050 *ptep = l2pte_set_readonly((pte & ~L2_TYPE_MASK) | L2_S_PROTO);
4043 PTE_SYNC(ptep); 4051 PTE_SYNC(ptep);
4044 rv = 1; 4052 rv = 1;
4045 } 4053 }
4046 4054
4047 /* 4055 /*
4048 * We know there is a valid mapping here, so simply 4056 * We know there is a valid mapping here, so simply
4049 * fix up the L1 if necessary. 4057 * fix up the L1 if necessary.
4050 */ 4058 */
4051 pl1pd = &pm->pm_l1->l1_kva[l1idx]; 4059 pl1pd = &pm->pm_l1->l1_kva[l1idx];
4052 l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO; 4060 l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO;
4053 if (*pl1pd != l1pd) { 4061 if (*pl1pd != l1pd) {
4054 *pl1pd = l1pd; 4062 *pl1pd = l1pd;
4055 PTE_SYNC(pl1pd); 4063 PTE_SYNC(pl1pd);
4056 rv = 1; 4064 rv = 1;
4057 } 4065 }
4058 4066
4059#ifdef CPU_SA110 4067#ifdef CPU_SA110
4060 /* 4068 /*
4061 * There are bugs in the rev K SA110. This is a check for one 4069 * There are bugs in the rev K SA110. This is a check for one
4062 * of them. 4070 * of them.
4063 */ 4071 */
4064 if (rv == 0 && curcpu()->ci_arm_cputype == CPU_ID_SA110 && 4072 if (rv == 0 && curcpu()->ci_arm_cputype == CPU_ID_SA110 &&
4065 curcpu()->ci_arm_cpurev < 3) { 4073 curcpu()->ci_arm_cpurev < 3) {
4066 /* Always current pmap */ 4074 /* Always current pmap */
4067 if (l2pte_valid(pte)) { 4075 if (l2pte_valid(pte)) {
4068 extern int kernel_debug; 4076 extern int kernel_debug;
4069 if (kernel_debug & 1) { 4077 if (kernel_debug & 1) {
4070 struct proc *p = curlwp->l_proc; 4078 struct proc *p = curlwp->l_proc;
4071 printf("prefetch_abort: page is already " 4079 printf("prefetch_abort: page is already "
4072 "mapped - pte=%p *pte=%08x\n", ptep, pte); 4080 "mapped - pte=%p *pte=%08x\n", ptep, pte);
4073 printf("prefetch_abort: pc=%08lx proc=%p " 4081 printf("prefetch_abort: pc=%08lx proc=%p "
4074 "process=%s\n", va, p, p->p_comm); 4082 "process=%s\n", va, p, p->p_comm);
4075 printf("prefetch_abort: far=%08x fs=%x\n", 4083 printf("prefetch_abort: far=%08x fs=%x\n",
4076 cpu_faultaddress(), cpu_faultstatus()); 4084 cpu_faultaddress(), cpu_faultstatus());
4077 } 4085 }
4078#ifdef DDB 4086#ifdef DDB
4079 if (kernel_debug & 2) 4087 if (kernel_debug & 2)
4080 Debugger(); 4088 Debugger();
4081#endif 4089#endif
4082 rv = 1; 4090 rv = 1;
4083 } 4091 }
4084 } 4092 }
4085#endif /* CPU_SA110 */ 4093#endif /* CPU_SA110 */
4086 4094
4087 /* 4095 /*
4088 * If 'rv == 0' at this point, it generally indicates that there is a 4096 * If 'rv == 0' at this point, it generally indicates that there is a
4089 * stale TLB entry for the faulting address. That might be due to a 4097 * stale TLB entry for the faulting address. That might be due to a
4090 * wrong setting of pmap_needs_pte_sync. So set it and retry. 4098 * wrong setting of pmap_needs_pte_sync. So set it and retry.
4091 */ 4099 */
4092 if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1 4100 if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1
4093 && pmap_needs_pte_sync == 0) { 4101 && pmap_needs_pte_sync == 0) {
4094 pmap_needs_pte_sync = 1; 4102 pmap_needs_pte_sync = 1;
4095 PTE_SYNC(ptep); 4103 PTE_SYNC(ptep);
4096 rv = 1; 4104 rv = 1;
4097 } 4105 }
4098 4106
4099#ifdef DEBUG 4107#ifdef DEBUG
4100 /* 4108 /*
4101 * If 'rv == 0' at this point, it generally indicates that there is a 4109 * If 'rv == 0' at this point, it generally indicates that there is a
4102 * stale TLB entry for the faulting address. This happens when two or 4110 * stale TLB entry for the faulting address. This happens when two or
4103 * more processes are sharing an L1. Since we don't flush the TLB on 4111 * more processes are sharing an L1. Since we don't flush the TLB on
4104 * a context switch between such processes, we can take domain faults 4112 * a context switch between such processes, we can take domain faults
4105 * for mappings which exist at the same VA in both processes. EVEN IF 4113 * for mappings which exist at the same VA in both processes. EVEN IF
4106 * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for 4114 * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for
4107 * example. 4115 * example.
4108 * 4116 *
4109 * This is extremely likely to happen if pmap_enter() updated the L1 4117 * This is extremely likely to happen if pmap_enter() updated the L1
4110 * entry for a recently entered mapping. In this case, the TLB is 4118 * entry for a recently entered mapping. In this case, the TLB is
4111 * flushed for the new mapping, but there may still be TLB entries for 4119 * flushed for the new mapping, but there may still be TLB entries for
4112 * other mappings belonging to other processes in the 1MB range 4120 * other mappings belonging to other processes in the 1MB range
4113 * covered by the L1 entry. 4121 * covered by the L1 entry.
4114 * 4122 *
4115 * Since 'rv == 0', we know that the L1 already contains the correct 4123 * Since 'rv == 0', we know that the L1 already contains the correct
4116 * value, so the fault must be due to a stale TLB entry. 4124 * value, so the fault must be due to a stale TLB entry.
4117 * 4125 *
4118 * Since we always need to flush the TLB anyway in the case where we 4126 * Since we always need to flush the TLB anyway in the case where we
4119 * fixed up the L1, or frobbed the L2 PTE, we effectively deal with 4127 * fixed up the L1, or frobbed the L2 PTE, we effectively deal with
4120 * stale TLB entries dynamically. 4128 * stale TLB entries dynamically.
4121 * 4129 *
4122 * However, the above condition can ONLY happen if the current L1 is 4130 * However, the above condition can ONLY happen if the current L1 is
4123 * being shared. If it happens when the L1 is unshared, it indicates 4131 * being shared. If it happens when the L1 is unshared, it indicates
4124 * that other parts of the pmap are not doing their job WRT managing 4132 * that other parts of the pmap are not doing their job WRT managing
4125 * the TLB. 4133 * the TLB.
4126 */ 4134 */
4127 if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) { 4135 if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) {
4128 extern int last_fault_code; 4136 extern int last_fault_code;
4129 printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n", 4137 printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n",
4130 pm, va, ftype); 4138 pm, va, ftype);
4131 printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n", 4139 printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n",
4132 l2, l2b, ptep, pl1pd); 4140 l2, l2b, ptep, pl1pd);
4133 printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n", 4141 printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n",
4134 pte, l1pd, last_fault_code); 4142 pte, l1pd, last_fault_code);
4135#ifdef DDB 4143#ifdef DDB
4136 extern int kernel_debug; 4144 extern int kernel_debug;
4137 4145
4138 if (kernel_debug & 2) 4146 if (kernel_debug & 2)
4139 Debugger(); 4147 Debugger();
4140#endif 4148#endif
4141 } 4149 }
4142#endif 4150#endif
4143 4151
4144 cpu_tlb_flushID_SE(va); 4152 cpu_tlb_flushID_SE(va);
4145 cpu_cpwait(); 4153 cpu_cpwait();
4146 4154
4147 rv = 1; 4155 rv = 1;
4148 4156
4149out: 4157out:
4150 pmap_release_pmap_lock(pm); 4158 pmap_release_pmap_lock(pm);
4151 4159
4152 return (rv); 4160 return (rv);
4153} 4161}
4154 4162
4155/* 4163/*
4156 * Routine: pmap_procwr 4164 * Routine: pmap_procwr
4157 * 4165 *
4158 * Function: 4166 * Function:
4159 * Synchronize caches corresponding to [addr, addr+len) in p. 4167 * Synchronize caches corresponding to [addr, addr+len) in p.
4160 * 4168 *
4161 */ 4169 */
4162void 4170void
4163pmap_procwr(struct proc *p, vaddr_t va, int len) 4171pmap_procwr(struct proc *p, vaddr_t va, int len)
4164{ 4172{
4165 /* We only need to do anything if it is the current process. */ 4173 /* We only need to do anything if it is the current process. */
4166 if (p == curproc) 4174 if (p == curproc)
4167 cpu_icache_sync_range(va, len); 4175 cpu_icache_sync_range(va, len);
4168} 4176}
4169 4177
4170/* 4178/*
4171 * Routine: pmap_unwire 4179 * Routine: pmap_unwire
4172 * Function: Clear the wired attribute for a map/virtual-address pair. 4180 * Function: Clear the wired attribute for a map/virtual-address pair.
4173 * 4181 *
4174 * In/out conditions: 4182 * In/out conditions:
4175 * The mapping must already exist in the pmap. 4183 * The mapping must already exist in the pmap.
4176 */ 4184 */
4177void 4185void
4178pmap_unwire(pmap_t pm, vaddr_t va) 4186pmap_unwire(pmap_t pm, vaddr_t va)
4179{ 4187{
4180 struct l2_bucket *l2b; 4188 struct l2_bucket *l2b;
4181 pt_entry_t *ptep, pte; 4189 pt_entry_t *ptep, pte;
4182 struct vm_page *pg; 4190 struct vm_page *pg;
4183 paddr_t pa; 4191 paddr_t pa;
4184 4192
4185 NPDEBUG(PDB_WIRING, printf("pmap_unwire: pm %p, va 0x%08lx\n", pm, va)); 4193 NPDEBUG(PDB_WIRING, printf("pmap_unwire: pm %p, va 0x%08lx\n", pm, va));
4186 4194
4187 pmap_acquire_pmap_lock(pm); 4195 pmap_acquire_pmap_lock(pm);
4188 4196
4189 l2b = pmap_get_l2_bucket(pm, va); 4197 l2b = pmap_get_l2_bucket(pm, va);
4190 KDASSERT(l2b != NULL); 4198 KDASSERT(l2b != NULL);
4191 4199
4192 ptep = &l2b->l2b_kva[l2pte_index(va)]; 4200 ptep = &l2b->l2b_kva[l2pte_index(va)];
4193 pte = *ptep; 4201 pte = *ptep;
4194 4202
4195 /* Extract the physical address of the page */ 4203 /* Extract the physical address of the page */
4196 pa = l2pte_pa(pte); 4204 pa = l2pte_pa(pte);
4197 4205
4198 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { 4206 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
4199 /* Update the wired bit in the pv entry for this page. */ 4207 /* Update the wired bit in the pv entry for this page. */
4200 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 4208 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
4201 4209
4202#ifdef MULTIPROCESSOR 4210#ifdef MULTIPROCESSOR
4203 KASSERT(uvm_page_locked_p(pg)); 4211 KASSERT(uvm_page_locked_p(pg));
4204#endif 4212#endif
4205 (void) pmap_modify_pv(md, pa, pm, va, PVF_WIRED, 0); 4213 (void) pmap_modify_pv(md, pa, pm, va, PVF_WIRED, 0);
4206 } 4214 }
4207 4215
4208 pmap_release_pmap_lock(pm); 4216 pmap_release_pmap_lock(pm);
4209} 4217}
4210 4218
4211void 4219void
4212pmap_activate(struct lwp *l) 4220pmap_activate(struct lwp *l)
4213{ 4221{
4214 extern int block_userspace_access; 4222 extern int block_userspace_access;
4215 pmap_t opm, npm, rpm; 4223 pmap_t opm, npm, rpm;
4216 uint32_t odacr, ndacr; 4224 uint32_t odacr, ndacr;
4217 int oldirqstate; 4225 int oldirqstate;
4218 4226
4219 /* 4227 /*
4220 * If activating a non-current lwp or the current lwp is 4228 * If activating a non-current lwp or the current lwp is
4221 * already active, just return. 4229 * already active, just return.
4222 */ 4230 */
4223 if (l != curlwp || 4231 if (l != curlwp ||
4224 l->l_proc->p_vmspace->vm_map.pmap->pm_activated == true) 4232 l->l_proc->p_vmspace->vm_map.pmap->pm_activated == true)
4225 return; 4233 return;
4226 4234
4227 npm = l->l_proc->p_vmspace->vm_map.pmap; 4235 npm = l->l_proc->p_vmspace->vm_map.pmap;
4228 ndacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | 4236 ndacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) |
4229 (DOMAIN_CLIENT << (npm->pm_domain * 2)); 4237 (DOMAIN_CLIENT << (npm->pm_domain * 2));
4230 4238
4231 /* 4239 /*
4232 * If TTB and DACR are unchanged, short-circuit all the 4240 * If TTB and DACR are unchanged, short-circuit all the
4233 * TLB/cache management stuff. 4241 * TLB/cache management stuff.
4234 */ 4242 */
4235 if (pmap_previous_active_lwp != NULL) { 4243 if (pmap_previous_active_lwp != NULL) {
4236 opm = pmap_previous_active_lwp->l_proc->p_vmspace->vm_map.pmap; 4244 opm = pmap_previous_active_lwp->l_proc->p_vmspace->vm_map.pmap;
4237 odacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | 4245 odacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) |
4238 (DOMAIN_CLIENT << (opm->pm_domain * 2)); 4246 (DOMAIN_CLIENT << (opm->pm_domain * 2));
4239 4247
4240 if (opm->pm_l1 == npm->pm_l1 && odacr == ndacr) 4248 if (opm->pm_l1 == npm->pm_l1 && odacr == ndacr)
4241 goto all_done; 4249 goto all_done;
4242 } else 4250 } else
4243 opm = NULL; 4251 opm = NULL;
4244 4252
4245 PMAPCOUNT(activations); 4253 PMAPCOUNT(activations);
4246 block_userspace_access = 1; 4254 block_userspace_access = 1;
4247 4255
4248 /* 4256 /*
4249 * If switching to a user vmspace which is different to the 4257 * If switching to a user vmspace which is different to the
4250 * most recent one, and the most recent one is potentially 4258 * most recent one, and the most recent one is potentially
4251 * live in the cache, we must write-back and invalidate the 4259 * live in the cache, we must write-back and invalidate the
4252 * entire cache. 4260 * entire cache.
4253 */ 4261 */
4254 rpm = pmap_recent_user; 4262 rpm = pmap_recent_user;
4255 4263
4256/* 4264/*
4257 * XXXSCW: There's a corner case here which can leave turds in the cache as 4265 * XXXSCW: There's a corner case here which can leave turds in the cache as
4258 * reported in kern/41058. They're probably left over during tear-down and 4266 * reported in kern/41058. They're probably left over during tear-down and
4259 * switching away from an exiting process. Until the root cause is identified 4267 * switching away from an exiting process. Until the root cause is identified
4260 * and fixed, zap the cache when switching pmaps. This will result in a few 4268 * and fixed, zap the cache when switching pmaps. This will result in a few
4261 * unnecessary cache flushes, but that's better than silently corrupting data. 4269 * unnecessary cache flushes, but that's better than silently corrupting data.
4262 */ 4270 */
4263#if 0 4271#if 0
4264 if (npm != pmap_kernel() && rpm && npm != rpm && 4272 if (npm != pmap_kernel() && rpm && npm != rpm &&
4265 rpm->pm_cstate.cs_cache) { 4273 rpm->pm_cstate.cs_cache) {
4266 rpm->pm_cstate.cs_cache = 0; 4274 rpm->pm_cstate.cs_cache = 0;
4267#ifdef PMAP_CACHE_VIVT 4275#ifdef PMAP_CACHE_VIVT
4268 cpu_idcache_wbinv_all(); 4276 cpu_idcache_wbinv_all();
4269#endif 4277#endif
4270 } 4278 }
4271#else 4279#else
4272 if (rpm) { 4280 if (rpm) {
4273 rpm->pm_cstate.cs_cache = 0; 4281 rpm->pm_cstate.cs_cache = 0;
4274 if (npm == pmap_kernel()) 4282 if (npm == pmap_kernel())
4275 pmap_recent_user = NULL; 4283 pmap_recent_user = NULL;
4276#ifdef PMAP_CACHE_VIVT 4284#ifdef PMAP_CACHE_VIVT
4277 cpu_idcache_wbinv_all(); 4285 cpu_idcache_wbinv_all();
4278#endif 4286#endif
4279 } 4287 }
4280#endif 4288#endif
4281 4289
4282 /* No interrupts while we frob the TTB/DACR */ 4290 /* No interrupts while we frob the TTB/DACR */
4283 oldirqstate = disable_interrupts(IF32_bits); 4291 oldirqstate = disable_interrupts(IF32_bits);
4284 4292
 4293#ifndef ARM_HAS_VBAR
4285 /* 4294 /*
4286 * For ARM_VECTORS_LOW, we MUST, I repeat, MUST fix up the L1 4295 * For ARM_VECTORS_LOW, we MUST, I repeat, MUST fix up the L1
4287 * entry corresponding to 'vector_page' in the incoming L1 table 4296 * entry corresponding to 'vector_page' in the incoming L1 table
4288 * before switching to it otherwise subsequent interrupts/exceptions 4297 * before switching to it otherwise subsequent interrupts/exceptions
4289 * (including domain faults!) will jump into hyperspace. 4298 * (including domain faults!) will jump into hyperspace.
4290 */ 4299 */
4291 if (npm->pm_pl1vec != NULL) { 4300 if (npm->pm_pl1vec != NULL) {
4292 cpu_tlb_flushID_SE((u_int)vector_page); 4301 cpu_tlb_flushID_SE((u_int)vector_page);
4293 cpu_cpwait(); 4302 cpu_cpwait();
4294 *npm->pm_pl1vec = npm->pm_l1vec; 4303 *npm->pm_pl1vec = npm->pm_l1vec;
4295 PTE_SYNC(npm->pm_pl1vec); 4304 PTE_SYNC(npm->pm_pl1vec);
4296 } 4305 }
 4306#endif
4297 4307
4298 cpu_domains(ndacr); 4308 cpu_domains(ndacr);
4299 4309
4300 if (npm == pmap_kernel() || npm == rpm) { 4310 if (npm == pmap_kernel() || npm == rpm) {
4301 /* 4311 /*
4302 * Switching to a kernel thread, or back to the 4312 * Switching to a kernel thread, or back to the
4303 * same user vmspace as before... Simply update 4313 * same user vmspace as before... Simply update
4304 * the TTB (no TLB flush required) 4314 * the TTB (no TLB flush required)
4305 */ 4315 */
4306 cpu_setttb(npm->pm_l1->l1_physaddr, false); 4316 cpu_setttb(npm->pm_l1->l1_physaddr, false);
4307 cpu_cpwait(); 4317 cpu_cpwait();
4308 } else { 4318 } else {
4309 /* 4319 /*
4310 * Otherwise, update TTB and flush TLB 4320 * Otherwise, update TTB and flush TLB
4311 */ 4321 */
4312 cpu_context_switch(npm->pm_l1->l1_physaddr); 4322 cpu_context_switch(npm->pm_l1->l1_physaddr);
4313 if (rpm != NULL) 4323 if (rpm != NULL)
4314 rpm->pm_cstate.cs_tlb = 0; 4324 rpm->pm_cstate.cs_tlb = 0;
4315 } 4325 }
4316 4326
4317 restore_interrupts(oldirqstate); 4327 restore_interrupts(oldirqstate);
4318 4328
4319 block_userspace_access = 0; 4329 block_userspace_access = 0;
4320 4330
4321 all_done: 4331 all_done:
4322 /* 4332 /*
4323 * The new pmap is resident. Make sure it's marked 4333 * The new pmap is resident. Make sure it's marked
4324 * as resident in the cache/TLB. 4334 * as resident in the cache/TLB.
4325 */ 4335 */
4326 npm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; 4336 npm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL;
4327 if (npm != pmap_kernel()) 4337 if (npm != pmap_kernel())
4328 pmap_recent_user = npm; 4338 pmap_recent_user = npm;
4329 4339
4330 /* The old pmap is not longer active */ 4340 /* The old pmap is not longer active */
4331 if (opm != NULL) 4341 if (opm != NULL)
4332 opm->pm_activated = false; 4342 opm->pm_activated = false;
4333 4343
4334 /* But the new one is */ 4344 /* But the new one is */
4335 npm->pm_activated = true; 4345 npm->pm_activated = true;
4336} 4346}
4337 4347
4338void 4348void
4339pmap_deactivate(struct lwp *l) 4349pmap_deactivate(struct lwp *l)
4340{ 4350{
4341 4351
4342 /* 4352 /*
4343 * If the process is exiting, make sure pmap_activate() does 4353 * If the process is exiting, make sure pmap_activate() does
4344 * a full MMU context-switch and cache flush, which we might 4354 * a full MMU context-switch and cache flush, which we might
4345 * otherwise skip. See PR port-arm/38950. 4355 * otherwise skip. See PR port-arm/38950.
4346 */ 4356 */
4347 if (l->l_proc->p_sflag & PS_WEXIT) 4357 if (l->l_proc->p_sflag & PS_WEXIT)
4348 pmap_previous_active_lwp = NULL; 4358 pmap_previous_active_lwp = NULL;
4349 4359
4350 l->l_proc->p_vmspace->vm_map.pmap->pm_activated = false; 4360 l->l_proc->p_vmspace->vm_map.pmap->pm_activated = false;
4351} 4361}
4352 4362
4353void 4363void
4354pmap_update(pmap_t pm) 4364pmap_update(pmap_t pm)
4355{ 4365{
4356 4366
4357 if (pm->pm_remove_all) { 4367 if (pm->pm_remove_all) {
4358 /* 4368 /*
4359 * Finish up the pmap_remove_all() optimisation by flushing 4369 * Finish up the pmap_remove_all() optimisation by flushing
4360 * the TLB. 4370 * the TLB.
4361 */ 4371 */
4362 pmap_tlb_flushID(pm); 4372 pmap_tlb_flushID(pm);
4363 pm->pm_remove_all = false; 4373 pm->pm_remove_all = false;
4364 } 4374 }
4365 4375
4366 if (pmap_is_current(pm)) { 4376 if (pmap_is_current(pm)) {
4367 /* 4377 /*
4368 * If we're dealing with a current userland pmap, move its L1 4378 * If we're dealing with a current userland pmap, move its L1
4369 * to the end of the LRU. 4379 * to the end of the LRU.
4370 */ 4380 */
4371 if (pm != pmap_kernel()) 4381 if (pm != pmap_kernel())
4372 pmap_use_l1(pm); 4382 pmap_use_l1(pm);
4373 4383
4374 /* 4384 /*
4375 * We can assume we're done with frobbing the cache/tlb for 4385 * We can assume we're done with frobbing the cache/tlb for
4376 * now. Make sure any future pmap ops don't skip cache/tlb 4386 * now. Make sure any future pmap ops don't skip cache/tlb
4377 * flushes. 4387 * flushes.
4378 */ 4388 */
4379 pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; 4389 pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL;
4380 } 4390 }
4381 4391
4382 PMAPCOUNT(updates); 4392 PMAPCOUNT(updates);
4383 4393
4384 /* 4394 /*
4385 * make sure TLB/cache operations have completed. 4395 * make sure TLB/cache operations have completed.
4386 */ 4396 */
4387 cpu_cpwait(); 4397 cpu_cpwait();
4388} 4398}
4389 4399
4390void 4400void
4391pmap_remove_all(pmap_t pm) 4401pmap_remove_all(pmap_t pm)
4392{ 4402{
4393 4403
4394 /* 4404 /*
4395 * The vmspace described by this pmap is about to be torn down. 4405 * The vmspace described by this pmap is about to be torn down.
4396 * Until pmap_update() is called, UVM will only make calls 4406 * Until pmap_update() is called, UVM will only make calls
4397 * to pmap_remove(). We can make life much simpler by flushing 4407 * to pmap_remove(). We can make life much simpler by flushing
4398 * the cache now, and deferring TLB invalidation to pmap_update(). 4408 * the cache now, and deferring TLB invalidation to pmap_update().
4399 */ 4409 */
4400#ifdef PMAP_CACHE_VIVT 4410#ifdef PMAP_CACHE_VIVT
4401 pmap_idcache_wbinv_all(pm); 4411 pmap_idcache_wbinv_all(pm);
4402#endif 4412#endif
4403 pm->pm_remove_all = true; 4413 pm->pm_remove_all = true;
4404} 4414}
4405 4415
4406/* 4416/*
4407 * Retire the given physical map from service. 4417 * Retire the given physical map from service.
4408 * Should only be called if the map contains no valid mappings. 4418 * Should only be called if the map contains no valid mappings.
4409 */ 4419 */
4410void 4420void
4411pmap_destroy(pmap_t pm) 4421pmap_destroy(pmap_t pm)
4412{ 4422{
4413 u_int count; 4423 u_int count;
4414 4424
4415 if (pm == NULL) 4425 if (pm == NULL)
4416 return; 4426 return;
4417 4427
4418 if (pm->pm_remove_all) { 4428 if (pm->pm_remove_all) {
4419 pmap_tlb_flushID(pm); 4429 pmap_tlb_flushID(pm);
4420 pm->pm_remove_all = false; 4430 pm->pm_remove_all = false;
4421 } 4431 }
4422 4432
4423 /* 4433 /*
4424 * Drop reference count 4434 * Drop reference count
4425 */ 4435 */
4426 mutex_enter(pm->pm_lock); 4436 mutex_enter(pm->pm_lock);
4427 count = --pm->pm_obj.uo_refs; 4437 count = --pm->pm_obj.uo_refs;
4428 mutex_exit(pm->pm_lock); 4438 mutex_exit(pm->pm_lock);
4429 if (count > 0) { 4439 if (count > 0) {
4430 if (pmap_is_current(pm)) { 4440 if (pmap_is_current(pm)) {
4431 if (pm != pmap_kernel()) 4441 if (pm != pmap_kernel())
4432 pmap_use_l1(pm); 4442 pmap_use_l1(pm);
4433 pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; 4443 pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL;
4434 } 4444 }
4435 return; 4445 return;
4436 } 4446 }
4437 4447
4438 /* 4448 /*
4439 * reference count is zero, free pmap resources and then free pmap. 4449 * reference count is zero, free pmap resources and then free pmap.
4440 */ 4450 */
4441 4451
 4452#ifndef ARM_HAS_VBAR
4442 if (vector_page < KERNEL_BASE) { 4453 if (vector_page < KERNEL_BASE) {
4443 KDASSERT(!pmap_is_current(pm)); 4454 KDASSERT(!pmap_is_current(pm));
4444 4455
4445 /* Remove the vector page mapping */ 4456 /* Remove the vector page mapping */
4446 pmap_remove(pm, vector_page, vector_page + PAGE_SIZE); 4457 pmap_remove(pm, vector_page, vector_page + PAGE_SIZE);
4447 pmap_update(pm); 4458 pmap_update(pm);
4448 } 4459 }
 4460#endif
4449 4461
4450 LIST_REMOVE(pm, pm_list); 4462 LIST_REMOVE(pm, pm_list);
4451 4463
4452 pmap_free_l1(pm); 4464 pmap_free_l1(pm);
4453 4465
4454 if (pmap_recent_user == pm) 4466 if (pmap_recent_user == pm)
4455 pmap_recent_user = NULL; 4467 pmap_recent_user = NULL;
4456 4468
4457 uvm_obj_destroy(&pm->pm_obj, false); 4469 uvm_obj_destroy(&pm->pm_obj, false);
4458 mutex_destroy(&pm->pm_obj_lock); 4470 mutex_destroy(&pm->pm_obj_lock);
4459 pool_cache_put(&pmap_cache, pm); 4471 pool_cache_put(&pmap_cache, pm);
4460} 4472}
4461 4473
4462 4474
4463/* 4475/*
4464 * void pmap_reference(pmap_t pm) 4476 * void pmap_reference(pmap_t pm)
4465 * 4477 *
4466 * Add a reference to the specified pmap. 4478 * Add a reference to the specified pmap.
4467 */ 4479 */
4468void 4480void
4469pmap_reference(pmap_t pm) 4481pmap_reference(pmap_t pm)
4470{ 4482{
4471 4483
4472 if (pm == NULL) 4484 if (pm == NULL)
4473 return; 4485 return;
4474 4486
4475 pmap_use_l1(pm); 4487 pmap_use_l1(pm);
4476 4488
4477 mutex_enter(pm->pm_lock); 4489 mutex_enter(pm->pm_lock);
4478 pm->pm_obj.uo_refs++; 4490 pm->pm_obj.uo_refs++;
4479 mutex_exit(pm->pm_lock); 4491 mutex_exit(pm->pm_lock);
4480} 4492}
4481 4493
4482#if (ARM_MMU_V6 + ARM_MMU_V7) > 0 4494#if (ARM_MMU_V6 + ARM_MMU_V7) > 0
4483 4495
4484static struct evcnt pmap_prefer_nochange_ev = 4496static struct evcnt pmap_prefer_nochange_ev =
4485 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap prefer", "nochange"); 4497 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap prefer", "nochange");
4486static struct evcnt pmap_prefer_change_ev = 4498static struct evcnt pmap_prefer_change_ev =
4487 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap prefer", "change"); 4499 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap prefer", "change");
4488 4500
4489EVCNT_ATTACH_STATIC(pmap_prefer_change_ev); 4501EVCNT_ATTACH_STATIC(pmap_prefer_change_ev);
4490EVCNT_ATTACH_STATIC(pmap_prefer_nochange_ev); 4502EVCNT_ATTACH_STATIC(pmap_prefer_nochange_ev);
4491 4503
4492void 4504void
4493pmap_prefer(vaddr_t hint, vaddr_t *vap, int td) 4505pmap_prefer(vaddr_t hint, vaddr_t *vap, int td)
4494{ 4506{
4495 vsize_t mask = arm_cache_prefer_mask | (PAGE_SIZE - 1); 4507 vsize_t mask = arm_cache_prefer_mask | (PAGE_SIZE - 1);
4496 vaddr_t va = *vap; 4508 vaddr_t va = *vap;
4497 vaddr_t diff = (hint - va) & mask; 4509 vaddr_t diff = (hint - va) & mask;
4498 if (diff == 0) { 4510 if (diff == 0) {
4499 pmap_prefer_nochange_ev.ev_count++; 4511 pmap_prefer_nochange_ev.ev_count++;
4500 } else { 4512 } else {
4501 pmap_prefer_change_ev.ev_count++; 4513 pmap_prefer_change_ev.ev_count++;
4502 if (__predict_false(td)) 4514 if (__predict_false(td))
4503 va -= mask + 1; 4515 va -= mask + 1;
4504 *vap = va + diff; 4516 *vap = va + diff;
4505 } 4517 }
4506} 4518}
4507#endif /* ARM_MMU_V6 | ARM_MMU_V7 */ 4519#endif /* ARM_MMU_V6 | ARM_MMU_V7 */
4508 4520
4509/* 4521/*
4510 * pmap_zero_page() 4522 * pmap_zero_page()
4511 *  4523 *
4512 * Zero a given physical page by mapping it at a page hook point. 4524 * Zero a given physical page by mapping it at a page hook point.
4513 * In doing the zero page op, the page we zero is mapped cachable, as with 4525 * In doing the zero page op, the page we zero is mapped cachable, as with
4514 * StrongARM accesses to non-cached pages are non-burst making writing 4526 * StrongARM accesses to non-cached pages are non-burst making writing
4515 * _any_ bulk data very slow. 4527 * _any_ bulk data very slow.
4516 */ 4528 */
4517#if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0 4529#if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0
4518void 4530void
4519pmap_zero_page_generic(paddr_t phys) 4531pmap_zero_page_generic(paddr_t phys)
4520{ 4532{
4521#if defined(PMAP_CACHE_VIPT) || defined(DEBUG) 4533#if defined(PMAP_CACHE_VIPT) || defined(DEBUG)
4522 struct vm_page *pg = PHYS_TO_VM_PAGE(phys); 4534 struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
4523 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 4535 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
4524#endif 4536#endif
4525#if defined(PMAP_CACHE_VIPT) 4537#if defined(PMAP_CACHE_VIPT)
4526 /* Choose the last page color it had, if any */ 4538 /* Choose the last page color it had, if any */
4527 const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask; 4539 const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask;
4528#else 4540#else
4529 const vsize_t va_offset = 0; 4541 const vsize_t va_offset = 0;
4530#endif 4542#endif
4531#if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) 4543#if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
4532 /* 4544 /*
4533 * Is this page mapped at its natural color? 4545 * Is this page mapped at its natural color?
4534 * If we have all of memory mapped, then just convert PA to VA. 4546 * If we have all of memory mapped, then just convert PA to VA.
4535 */ 4547 */
4536 const bool okcolor = va_offset == (phys & arm_cache_prefer_mask); 4548 const bool okcolor = va_offset == (phys & arm_cache_prefer_mask);
4537 const vaddr_t vdstp = KERNEL_BASE + (phys - physical_start); 4549 const vaddr_t vdstp = KERNEL_BASE + (phys - physical_start);
4538#else 4550#else
4539 const bool okcolor = false; 4551 const bool okcolor = false;
4540 const vaddr_t vdstp = cdstp + va_offset; 4552 const vaddr_t vdstp = cdstp + va_offset;
4541#endif 4553#endif
4542 pt_entry_t * const ptep = &cdst_pte[va_offset >> PGSHIFT]; 4554 pt_entry_t * const ptep = &cdst_pte[va_offset >> PGSHIFT];
4543 4555
4544 4556
4545#ifdef DEBUG 4557#ifdef DEBUG
4546 if (!SLIST_EMPTY(&md->pvh_list)) 4558 if (!SLIST_EMPTY(&md->pvh_list))
4547 panic("pmap_zero_page: page has mappings"); 4559 panic("pmap_zero_page: page has mappings");
4548#endif 4560#endif
4549 4561
4550 KDASSERT((phys & PGOFSET) == 0); 4562 KDASSERT((phys & PGOFSET) == 0);
4551 4563
4552 if (!okcolor) { 4564 if (!okcolor) {
4553 /* 4565 /*
4554 * Hook in the page, zero it, and purge the cache for that 4566 * Hook in the page, zero it, and purge the cache for that
4555 * zeroed page. Invalidate the TLB as needed. 4567 * zeroed page. Invalidate the TLB as needed.
4556 */ 4568 */
4557 *ptep = L2_S_PROTO | phys | 4569 *ptep = L2_S_PROTO | phys |
4558 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; 4570 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
4559 PTE_SYNC(ptep); 4571 PTE_SYNC(ptep);
4560 cpu_tlb_flushD_SE(cdstp + va_offset); 4572 cpu_tlb_flushD_SE(cdstp + va_offset);
4561 cpu_cpwait(); 4573 cpu_cpwait();
4562#if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) && defined(PMAP_CACHE_VIPT) 4574#if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) && defined(PMAP_CACHE_VIPT)
4563 /* 4575 /*
4564 * If we are direct-mapped and our color isn't ok, then before 4576 * If we are direct-mapped and our color isn't ok, then before
4565 * we bzero the page invalidate its contents from the cache and 4577 * we bzero the page invalidate its contents from the cache and
4566 * reset the color to its natural color. 4578 * reset the color to its natural color.
4567 */ 4579 */
4568 cpu_dcache_inv_range(cdstp + va_offset, PAGE_SIZE); 4580 cpu_dcache_inv_range(cdstp + va_offset, PAGE_SIZE);
4569 md->pvh_attrs &= ~arm_cache_prefer_mask; 4581 md->pvh_attrs &= ~arm_cache_prefer_mask;
4570 md->pvh_attrs |= (phys & arm_cache_prefer_mask); 4582 md->pvh_attrs |= (phys & arm_cache_prefer_mask);
4571#endif 4583#endif
4572 } 4584 }
4573 bzero_page(vdstp); 4585 bzero_page(vdstp);
4574 if (!okcolor) { 4586 if (!okcolor) {
4575 /* 4587 /*
4576 * Unmap the page. 4588 * Unmap the page.
4577 */ 4589 */
4578 *ptep = 0; 4590 *ptep = 0;
4579 PTE_SYNC(ptep); 4591 PTE_SYNC(ptep);
4580 cpu_tlb_flushD_SE(cdstp + va_offset); 4592 cpu_tlb_flushD_SE(cdstp + va_offset);
4581#ifdef PMAP_CACHE_VIVT 4593#ifdef PMAP_CACHE_VIVT
4582 cpu_dcache_wbinv_range(cdstp + va_offset, PAGE_SIZE); 4594 cpu_dcache_wbinv_range(cdstp + va_offset, PAGE_SIZE);
4583#endif 4595#endif
4584 } 4596 }
4585#ifdef PMAP_CACHE_VIPT 4597#ifdef PMAP_CACHE_VIPT
4586 /* 4598 /*
4587 * This page is now cache resident so it now has a page color. 4599 * This page is now cache resident so it now has a page color.
4588 * Any contents have been obliterated so clear the EXEC flag. 4600 * Any contents have been obliterated so clear the EXEC flag.
4589 */ 4601 */
4590 if (!pmap_is_page_colored_p(md)) { 4602 if (!pmap_is_page_colored_p(md)) {
4591 PMAPCOUNT(vac_color_new); 4603 PMAPCOUNT(vac_color_new);
4592 md->pvh_attrs |= PVF_COLORED; 4604 md->pvh_attrs |= PVF_COLORED;
4593 } 4605 }
4594 if (PV_IS_EXEC_P(md->pvh_attrs)) { 4606 if (PV_IS_EXEC_P(md->pvh_attrs)) {
4595 md->pvh_attrs &= ~PVF_EXEC; 4607 md->pvh_attrs &= ~PVF_EXEC;
4596 PMAPCOUNT(exec_discarded_zero); 4608 PMAPCOUNT(exec_discarded_zero);
4597 } 4609 }
4598 md->pvh_attrs |= PVF_DIRTY; 4610 md->pvh_attrs |= PVF_DIRTY;
4599#endif 4611#endif
4600} 4612}
4601#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */ 4613#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */
4602 4614
4603#if ARM_MMU_XSCALE == 1 4615#if ARM_MMU_XSCALE == 1
4604void 4616void
4605pmap_zero_page_xscale(paddr_t phys) 4617pmap_zero_page_xscale(paddr_t phys)
4606{ 4618{
4607#ifdef DEBUG 4619#ifdef DEBUG
4608 struct vm_page *pg = PHYS_TO_VM_PAGE(phys); 4620 struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
4609 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 4621 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
4610 4622
4611 if (!SLIST_EMPTY(&md->pvh_list)) 4623 if (!SLIST_EMPTY(&md->pvh_list))
4612 panic("pmap_zero_page: page has mappings"); 4624 panic("pmap_zero_page: page has mappings");
4613#endif 4625#endif
4614 4626
4615 KDASSERT((phys & PGOFSET) == 0); 4627 KDASSERT((phys & PGOFSET) == 0);
4616 4628
4617 /* 4629 /*
4618 * Hook in the page, zero it, and purge the cache for that 4630 * Hook in the page, zero it, and purge the cache for that
4619 * zeroed page. Invalidate the TLB as needed. 4631 * zeroed page. Invalidate the TLB as needed.
4620 */ 4632 */
4621 *cdst_pte = L2_S_PROTO | phys | 4633 *cdst_pte = L2_S_PROTO | phys |
4622 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | 4634 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
4623 L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ 4635 L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */
4624 PTE_SYNC(cdst_pte); 4636 PTE_SYNC(cdst_pte);
4625 cpu_tlb_flushD_SE(cdstp); 4637 cpu_tlb_flushD_SE(cdstp);
4626 cpu_cpwait(); 4638 cpu_cpwait();
4627 bzero_page(cdstp); 4639 bzero_page(cdstp);
4628 xscale_cache_clean_minidata(); 4640 xscale_cache_clean_minidata();
4629} 4641}
4630#endif /* ARM_MMU_XSCALE == 1 */ 4642#endif /* ARM_MMU_XSCALE == 1 */
4631 4643
4632/* pmap_pageidlezero() 4644/* pmap_pageidlezero()
4633 * 4645 *
4634 * The same as above, except that we assume that the page is not 4646 * The same as above, except that we assume that the page is not
4635 * mapped. This means we never have to flush the cache first. Called 4647 * mapped. This means we never have to flush the cache first. Called
4636 * from the idle loop. 4648 * from the idle loop.
4637 */ 4649 */
4638bool 4650bool
4639pmap_pageidlezero(paddr_t phys) 4651pmap_pageidlezero(paddr_t phys)
4640{ 4652{
4641 unsigned int i; 4653 unsigned int i;
4642 int *ptr; 4654 int *ptr;
4643 bool rv = true; 4655 bool rv = true;
4644#if defined(PMAP_CACHE_VIPT) || defined(DEBUG) 4656#if defined(PMAP_CACHE_VIPT) || defined(DEBUG)
4645 struct vm_page * const pg = PHYS_TO_VM_PAGE(phys); 4657 struct vm_page * const pg = PHYS_TO_VM_PAGE(phys);
4646 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 4658 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
4647#endif 4659#endif
4648#ifdef PMAP_CACHE_VIPT 4660#ifdef PMAP_CACHE_VIPT
4649 /* Choose the last page color it had, if any */ 4661 /* Choose the last page color it had, if any */
4650 const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask; 4662 const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask;
4651#else 4663#else
4652 const vsize_t va_offset = 0; 4664 const vsize_t va_offset = 0;
4653#endif 4665#endif
4654 pt_entry_t * const ptep = &csrc_pte[va_offset >> PGSHIFT]; 4666 pt_entry_t * const ptep = &csrc_pte[va_offset >> PGSHIFT];
4655 4667
4656 4668
4657#ifdef DEBUG 4669#ifdef DEBUG
4658 if (!SLIST_EMPTY(&md->pvh_list)) 4670 if (!SLIST_EMPTY(&md->pvh_list))
4659 panic("pmap_pageidlezero: page has mappings"); 4671 panic("pmap_pageidlezero: page has mappings");
4660#endif 4672#endif
4661 4673
4662 KDASSERT((phys & PGOFSET) == 0); 4674 KDASSERT((phys & PGOFSET) == 0);
4663 4675
4664 /* 4676 /*
4665 * Hook in the page, zero it, and purge the cache for that 4677 * Hook in the page, zero it, and purge the cache for that
4666 * zeroed page. Invalidate the TLB as needed. 4678 * zeroed page. Invalidate the TLB as needed.
4667 */ 4679 */
4668 *ptep = L2_S_PROTO | phys | 4680 *ptep = L2_S_PROTO | phys |
4669 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; 4681 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
4670 PTE_SYNC(ptep); 4682 PTE_SYNC(ptep);
4671 cpu_tlb_flushD_SE(cdstp + va_offset); 4683 cpu_tlb_flushD_SE(cdstp + va_offset);
4672 cpu_cpwait(); 4684 cpu_cpwait();
4673 4685
4674 for (i = 0, ptr = (int *)(cdstp + va_offset); 4686 for (i = 0, ptr = (int *)(cdstp + va_offset);
4675 i < (PAGE_SIZE / sizeof(int)); i++) { 4687 i < (PAGE_SIZE / sizeof(int)); i++) {
4676 if (sched_curcpu_runnable_p() != 0) { 4688 if (sched_curcpu_runnable_p() != 0) {
4677 /* 4689 /*
4678 * A process has become ready. Abort now, 4690 * A process has become ready. Abort now,
4679 * so we don't keep it waiting while we 4691 * so we don't keep it waiting while we
4680 * do slow memory access to finish this 4692 * do slow memory access to finish this
4681 * page. 4693 * page.
4682 */ 4694 */
4683 rv = false; 4695 rv = false;
4684 break; 4696 break;
4685 } 4697 }
4686 *ptr++ = 0; 4698 *ptr++ = 0;
4687 } 4699 }
4688 4700
4689#ifdef PMAP_CACHE_VIVT 4701#ifdef PMAP_CACHE_VIVT
4690 if (rv) 4702 if (rv)
4691 /*  4703 /*
4692 * if we aborted we'll rezero this page again later so don't 4704 * if we aborted we'll rezero this page again later so don't
4693 * purge it unless we finished it 4705 * purge it unless we finished it
4694 */ 4706 */
4695 cpu_dcache_wbinv_range(cdstp, PAGE_SIZE); 4707 cpu_dcache_wbinv_range(cdstp, PAGE_SIZE);
4696#elif defined(PMAP_CACHE_VIPT) 4708#elif defined(PMAP_CACHE_VIPT)
4697 /* 4709 /*
4698 * This page is now cache resident so it now has a page color. 4710 * This page is now cache resident so it now has a page color.
4699 * Any contents have been obliterated so clear the EXEC flag. 4711 * Any contents have been obliterated so clear the EXEC flag.
4700 */ 4712 */
4701 if (!pmap_is_page_colored_p(md)) { 4713 if (!pmap_is_page_colored_p(md)) {
4702 PMAPCOUNT(vac_color_new); 4714 PMAPCOUNT(vac_color_new);
4703 md->pvh_attrs |= PVF_COLORED; 4715 md->pvh_attrs |= PVF_COLORED;
4704 } 4716 }
4705 if (PV_IS_EXEC_P(md->pvh_attrs)) { 4717 if (PV_IS_EXEC_P(md->pvh_attrs)) {
4706 md->pvh_attrs &= ~PVF_EXEC; 4718 md->pvh_attrs &= ~PVF_EXEC;
4707 PMAPCOUNT(exec_discarded_zero); 4719 PMAPCOUNT(exec_discarded_zero);
4708 } 4720 }
4709#endif 4721#endif
4710 /* 4722 /*
4711 * Unmap the page. 4723 * Unmap the page.
4712 */ 4724 */
4713 *ptep = 0; 4725 *ptep = 0;
4714 PTE_SYNC(ptep); 4726 PTE_SYNC(ptep);
4715 cpu_tlb_flushD_SE(cdstp + va_offset); 4727 cpu_tlb_flushD_SE(cdstp + va_offset);
4716 4728
4717 return (rv); 4729 return (rv);
4718} 4730}
4719  4731
4720/* 4732/*
4721 * pmap_copy_page() 4733 * pmap_copy_page()
4722 * 4734 *
4723 * Copy one physical page into another, by mapping the pages into 4735 * Copy one physical page into another, by mapping the pages into
4724 * hook points. The same comment regarding cachability as in 4736 * hook points. The same comment regarding cachability as in
4725 * pmap_zero_page also applies here. 4737 * pmap_zero_page also applies here.
4726 */ 4738 */
4727#if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0 4739#if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0
4728void 4740void
4729pmap_copy_page_generic(paddr_t src, paddr_t dst) 4741pmap_copy_page_generic(paddr_t src, paddr_t dst)
4730{ 4742{
4731 struct vm_page * const src_pg = PHYS_TO_VM_PAGE(src); 4743 struct vm_page * const src_pg = PHYS_TO_VM_PAGE(src);
4732 struct vm_page_md *src_md = VM_PAGE_TO_MD(src_pg); 4744 struct vm_page_md *src_md = VM_PAGE_TO_MD(src_pg);
4733#if defined(PMAP_CACHE_VIPT) || defined(DEBUG) 4745#if defined(PMAP_CACHE_VIPT) || defined(DEBUG)
4734 struct vm_page * const dst_pg = PHYS_TO_VM_PAGE(dst); 4746 struct vm_page * const dst_pg = PHYS_TO_VM_PAGE(dst);
4735 struct vm_page_md *dst_md = VM_PAGE_TO_MD(dst_pg); 4747 struct vm_page_md *dst_md = VM_PAGE_TO_MD(dst_pg);
4736#endif 4748#endif
4737#ifdef PMAP_CACHE_VIPT 4749#ifdef PMAP_CACHE_VIPT
4738 const vsize_t src_va_offset = src_md->pvh_attrs & arm_cache_prefer_mask; 4750 const vsize_t src_va_offset = src_md->pvh_attrs & arm_cache_prefer_mask;
4739 const vsize_t dst_va_offset = dst_md->pvh_attrs & arm_cache_prefer_mask; 4751 const vsize_t dst_va_offset = dst_md->pvh_attrs & arm_cache_prefer_mask;
4740#else 4752#else
4741 const vsize_t src_va_offset = 0; 4753 const vsize_t src_va_offset = 0;
4742 const vsize_t dst_va_offset = 0; 4754 const vsize_t dst_va_offset = 0;
4743#endif 4755#endif
4744#if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) 4756#if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
4745 /* 4757 /*
4746 * Is this page mapped at its natural color? 4758 * Is this page mapped at its natural color?
4747 * If we have all of memory mapped, then just convert PA to VA. 4759 * If we have all of memory mapped, then just convert PA to VA.
4748 */ 4760 */
4749 const bool src_okcolor = src_va_offset == (src & arm_cache_prefer_mask); 4761 const bool src_okcolor = src_va_offset == (src & arm_cache_prefer_mask);
4750 const bool dst_okcolor = dst_va_offset == (dst & arm_cache_prefer_mask); 4762 const bool dst_okcolor = dst_va_offset == (dst & arm_cache_prefer_mask);
4751 const vaddr_t vsrcp = src_okcolor 4763 const vaddr_t vsrcp = src_okcolor
4752 ? KERNEL_BASE + (src - physical_start) 4764 ? KERNEL_BASE + (src - physical_start)
4753 : csrcp + src_va_offset; 4765 : csrcp + src_va_offset;
4754 const vaddr_t vdstp = KERNEL_BASE + (dst - physical_start); 4766 const vaddr_t vdstp = KERNEL_BASE + (dst - physical_start);
4755#else 4767#else
4756 const bool src_okcolor = false; 4768 const bool src_okcolor = false;
4757 const bool dst_okcolor = false; 4769 const bool dst_okcolor = false;
4758 const vaddr_t vsrcp = csrcp + src_va_offset; 4770 const vaddr_t vsrcp = csrcp + src_va_offset;
4759 const vaddr_t vdstp = cdstp + dst_va_offset; 4771 const vaddr_t vdstp = cdstp + dst_va_offset;
4760#endif 4772#endif
4761 pt_entry_t * const src_ptep = &csrc_pte[src_va_offset >> PGSHIFT]; 4773 pt_entry_t * const src_ptep = &csrc_pte[src_va_offset >> PGSHIFT];
4762 pt_entry_t * const dst_ptep = &cdst_pte[dst_va_offset >> PGSHIFT]; 4774 pt_entry_t * const dst_ptep = &cdst_pte[dst_va_offset >> PGSHIFT];
4763 4775
4764#ifdef DEBUG 4776#ifdef DEBUG
4765 if (!SLIST_EMPTY(&dst_md->pvh_list)) 4777 if (!SLIST_EMPTY(&dst_md->pvh_list))
4766 panic("pmap_copy_page: dst page has mappings"); 4778 panic("pmap_copy_page: dst page has mappings");
4767#endif 4779#endif
4768 4780
4769#ifdef PMAP_CACHE_VIPT 4781#ifdef PMAP_CACHE_VIPT
4770 KASSERT(arm_cache_prefer_mask == 0 || src_md->pvh_attrs & (PVF_COLORED|PVF_NC)); 4782 KASSERT(arm_cache_prefer_mask == 0 || src_md->pvh_attrs & (PVF_COLORED|PVF_NC));
4771#endif 4783#endif
4772 KDASSERT((src & PGOFSET) == 0); 4784 KDASSERT((src & PGOFSET) == 0);
4773 KDASSERT((dst & PGOFSET) == 0); 4785 KDASSERT((dst & PGOFSET) == 0);
4774 4786
4775 /* 4787 /*
4776 * Clean the source page. Hold the source page's lock for 4788 * Clean the source page. Hold the source page's lock for
4777 * the duration of the copy so that no other mappings can 4789 * the duration of the copy so that no other mappings can
4778 * be created while we have a potentially aliased mapping. 4790 * be created while we have a potentially aliased mapping.
4779 */ 4791 */
4780#ifdef MULTIPROCESSOR 4792#ifdef MULTIPROCESSOR
4781 KASSERT(uvm_page_locked_p(src_pg)); 4793 KASSERT(uvm_page_locked_p(src_pg));
4782#endif 4794#endif
4783#ifdef PMAP_CACHE_VIVT 4795#ifdef PMAP_CACHE_VIVT
4784 (void) pmap_clean_page(SLIST_FIRST(&src_md->pvh_list), true); 4796 (void) pmap_clean_page(SLIST_FIRST(&src_md->pvh_list), true);
4785#endif 4797#endif
4786 4798
4787 /* 4799 /*
4788 * Map the pages into the page hook points, copy them, and purge 4800 * Map the pages into the page hook points, copy them, and purge
4789 * the cache for the appropriate page. Invalidate the TLB 4801 * the cache for the appropriate page. Invalidate the TLB
4790 * as required. 4802 * as required.
4791 */ 4803 */
4792 if (!src_okcolor) { 4804 if (!src_okcolor) {
4793 *src_ptep = L2_S_PROTO 4805 *src_ptep = L2_S_PROTO
4794 | src 4806 | src
4795#ifdef PMAP_CACHE_VIPT 4807#ifdef PMAP_CACHE_VIPT
4796 | ((src_md->pvh_attrs & PVF_NC) ? 0 : pte_l2_s_cache_mode) 4808 | ((src_md->pvh_attrs & PVF_NC) ? 0 : pte_l2_s_cache_mode)
4797#endif 4809#endif
4798#ifdef PMAP_CACHE_VIVT 4810#ifdef PMAP_CACHE_VIVT
4799 | pte_l2_s_cache_mode 4811 | pte_l2_s_cache_mode
4800#endif 4812#endif
4801 | L2_S_PROT(PTE_KERNEL, VM_PROT_READ); 4813 | L2_S_PROT(PTE_KERNEL, VM_PROT_READ);
4802 PTE_SYNC(src_ptep); 4814 PTE_SYNC(src_ptep);
4803 cpu_tlb_flushD_SE(csrcp + src_va_offset); 4815 cpu_tlb_flushD_SE(csrcp + src_va_offset);
4804 cpu_cpwait(); 4816 cpu_cpwait();
4805 } 4817 }
4806 if (!dst_okcolor) { 4818 if (!dst_okcolor) {
4807 *dst_ptep = L2_S_PROTO | dst | 4819 *dst_ptep = L2_S_PROTO | dst |
4808 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; 4820 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
4809 PTE_SYNC(dst_ptep); 4821 PTE_SYNC(dst_ptep);
4810 cpu_tlb_flushD_SE(cdstp + dst_va_offset); 4822 cpu_tlb_flushD_SE(cdstp + dst_va_offset);
4811 cpu_cpwait(); 4823 cpu_cpwait();
4812#if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) && defined(PMAP_CACHE_VIPT) 4824#if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) && defined(PMAP_CACHE_VIPT)
4813 /* 4825 /*
4814 * If we are direct-mapped and our color isn't ok, then before 4826 * If we are direct-mapped and our color isn't ok, then before
4815 * we bcopy to the new page invalidate its contents from the 4827 * we bcopy to the new page invalidate its contents from the
4816 * cache and reset its color to its natural color. 4828 * cache and reset its color to its natural color.
4817 */ 4829 */
4818 cpu_dcache_inv_range(cdstp + dst_va_offset, PAGE_SIZE); 4830 cpu_dcache_inv_range(cdstp + dst_va_offset, PAGE_SIZE);
4819 dst_md->pvh_attrs &= ~arm_cache_prefer_mask; 4831 dst_md->pvh_attrs &= ~arm_cache_prefer_mask;
4820 dst_md->pvh_attrs |= (dst & arm_cache_prefer_mask); 4832 dst_md->pvh_attrs |= (dst & arm_cache_prefer_mask);
4821#endif 4833#endif
4822 } 4834 }
4823 bcopy_page(vsrcp, vdstp); 4835 bcopy_page(vsrcp, vdstp);
4824#ifdef PMAP_CACHE_VIVT 4836#ifdef PMAP_CACHE_VIVT
4825 cpu_dcache_inv_range(vsrcp, PAGE_SIZE); 4837 cpu_dcache_inv_range(vsrcp, PAGE_SIZE);
4826 cpu_dcache_wbinv_range(vdstp, PAGE_SIZE); 4838 cpu_dcache_wbinv_range(vdstp, PAGE_SIZE);
4827#endif 4839#endif
4828 /* 4840 /*
4829 * Unmap the pages. 4841 * Unmap the pages.
4830 */ 4842 */
4831 if (!src_okcolor) { 4843 if (!src_okcolor) {
4832 *src_ptep = 0; 4844 *src_ptep = 0;
4833 PTE_SYNC(src_ptep); 4845 PTE_SYNC(src_ptep);
4834 cpu_tlb_flushD_SE(csrcp + src_va_offset); 4846 cpu_tlb_flushD_SE(csrcp + src_va_offset);
4835 cpu_cpwait(); 4847 cpu_cpwait();
4836 } 4848 }
4837 if (!dst_okcolor) { 4849 if (!dst_okcolor) {
4838 *dst_ptep = 0; 4850 *dst_ptep = 0;
4839 PTE_SYNC(dst_ptep); 4851 PTE_SYNC(dst_ptep);
4840 cpu_tlb_flushD_SE(cdstp + dst_va_offset); 4852 cpu_tlb_flushD_SE(cdstp + dst_va_offset);
4841 cpu_cpwait(); 4853 cpu_cpwait();
4842 } 4854 }
4843#ifdef PMAP_CACHE_VIPT 4855#ifdef PMAP_CACHE_VIPT
4844 /* 4856 /*
4845 * Now that the destination page is in the cache, mark it as colored. 4857 * Now that the destination page is in the cache, mark it as colored.
4846 * If this was an exec page, discard it. 4858 * If this was an exec page, discard it.
4847 */ 4859 */
4848 if (!pmap_is_page_colored_p(dst_md)) { 4860 if (!pmap_is_page_colored_p(dst_md)) {
4849 PMAPCOUNT(vac_color_new); 4861 PMAPCOUNT(vac_color_new);
4850 dst_md->pvh_attrs |= PVF_COLORED; 4862 dst_md->pvh_attrs |= PVF_COLORED;
4851 } 4863 }
4852 if (PV_IS_EXEC_P(dst_md->pvh_attrs)) { 4864 if (PV_IS_EXEC_P(dst_md->pvh_attrs)) {
4853 dst_md->pvh_attrs &= ~PVF_EXEC; 4865 dst_md->pvh_attrs &= ~PVF_EXEC;
4854 PMAPCOUNT(exec_discarded_copy); 4866 PMAPCOUNT(exec_discarded_copy);
4855 } 4867 }
4856 dst_md->pvh_attrs |= PVF_DIRTY; 4868 dst_md->pvh_attrs |= PVF_DIRTY;
4857#endif 4869#endif
4858} 4870}
4859#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */ 4871#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */
4860 4872
4861#if ARM_MMU_XSCALE == 1 4873#if ARM_MMU_XSCALE == 1
4862void 4874void
4863pmap_copy_page_xscale(paddr_t src, paddr_t dst) 4875pmap_copy_page_xscale(paddr_t src, paddr_t dst)
4864{ 4876{
4865 struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); 4877 struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
4866 struct vm_page_md *src_md = VM_PAGE_TO_MD(src_pg); 4878 struct vm_page_md *src_md = VM_PAGE_TO_MD(src_pg);
4867#ifdef DEBUG 4879#ifdef DEBUG
4868 struct vm_page_md *dst_md = VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(dst)); 4880 struct vm_page_md *dst_md = VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(dst));
4869 4881
4870 if (!SLIST_EMPTY(&dst_md->pvh_list)) 4882 if (!SLIST_EMPTY(&dst_md->pvh_list))
4871 panic("pmap_copy_page: dst page has mappings"); 4883 panic("pmap_copy_page: dst page has mappings");
4872#endif 4884#endif
4873 4885
4874 KDASSERT((src & PGOFSET) == 0); 4886 KDASSERT((src & PGOFSET) == 0);
4875 KDASSERT((dst & PGOFSET) == 0); 4887 KDASSERT((dst & PGOFSET) == 0);
4876 4888
4877 /* 4889 /*
4878 * Clean the source page. Hold the source page's lock for 4890 * Clean the source page. Hold the source page's lock for
4879 * the duration of the copy so that no other mappings can 4891 * the duration of the copy so that no other mappings can
4880 * be created while we have a potentially aliased mapping. 4892 * be created while we have a potentially aliased mapping.
4881 */ 4893 */
4882#ifdef MULTIPROCESSOR 4894#ifdef MULTIPROCESSOR
4883 KASSERT(uvm_page_locked_p(src_pg)); 4895 KASSERT(uvm_page_locked_p(src_pg));
4884#endif 4896#endif
4885#ifdef PMAP_CACHE_VIVT 4897#ifdef PMAP_CACHE_VIVT
4886 (void) pmap_clean_page(SLIST_FIRST(&src_md->pvh_list), true); 4898 (void) pmap_clean_page(SLIST_FIRST(&src_md->pvh_list), true);
4887#endif 4899#endif
4888 4900
4889 /* 4901 /*
4890 * Map the pages into the page hook points, copy them, and purge 4902 * Map the pages into the page hook points, copy them, and purge
4891 * the cache for the appropriate page. Invalidate the TLB 4903 * the cache for the appropriate page. Invalidate the TLB
4892 * as required. 4904 * as required.
4893 */ 4905 */
4894 *csrc_pte = L2_S_PROTO | src | 4906 *csrc_pte = L2_S_PROTO | src |
4895 L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | 4907 L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
4896 L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ 4908 L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */
4897 PTE_SYNC(csrc_pte); 4909 PTE_SYNC(csrc_pte);
4898 *cdst_pte = L2_S_PROTO | dst | 4910 *cdst_pte = L2_S_PROTO | dst |
4899 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | 4911 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
4900 L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */ 4912 L2_C | L2_XS_T_TEX(TEX_XSCALE_X); /* mini-data */
4901 PTE_SYNC(cdst_pte); 4913 PTE_SYNC(cdst_pte);
4902 cpu_tlb_flushD_SE(csrcp); 4914 cpu_tlb_flushD_SE(csrcp);
4903 cpu_tlb_flushD_SE(cdstp); 4915 cpu_tlb_flushD_SE(cdstp);
4904 cpu_cpwait(); 4916 cpu_cpwait();
4905 bcopy_page(csrcp, cdstp); 4917 bcopy_page(csrcp, cdstp);
4906 xscale_cache_clean_minidata(); 4918 xscale_cache_clean_minidata();
4907} 4919}
4908#endif /* ARM_MMU_XSCALE == 1 */ 4920#endif /* ARM_MMU_XSCALE == 1 */
4909 4921
4910/* 4922/*
4911 * void pmap_virtual_space(vaddr_t *start, vaddr_t *end) 4923 * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
4912 * 4924 *
4913 * Return the start and end addresses of the kernel's virtual space. 4925 * Return the start and end addresses of the kernel's virtual space.
4914 * These values are setup in pmap_bootstrap and are updated as pages 4926 * These values are setup in pmap_bootstrap and are updated as pages
4915 * are allocated. 4927 * are allocated.
4916 */ 4928 */
4917void 4929void
4918pmap_virtual_space(vaddr_t *start, vaddr_t *end) 4930pmap_virtual_space(vaddr_t *start, vaddr_t *end)
4919{ 4931{
4920 *start = virtual_avail; 4932 *start = virtual_avail;
4921 *end = virtual_end; 4933 *end = virtual_end;
4922} 4934}
4923 4935
4924/* 4936/*
4925 * Helper function for pmap_grow_l2_bucket() 4937 * Helper function for pmap_grow_l2_bucket()
4926 */ 4938 */
4927static inline int 4939static inline int
4928pmap_grow_map(vaddr_t va, pt_entry_t cache_mode, paddr_t *pap) 4940pmap_grow_map(vaddr_t va, pt_entry_t cache_mode, paddr_t *pap)
4929{ 4941{
4930 struct l2_bucket *l2b; 4942 struct l2_bucket *l2b;
4931 pt_entry_t *ptep; 4943 pt_entry_t *ptep;
4932 paddr_t pa; 4944 paddr_t pa;
4933 4945
4934 if (uvm.page_init_done == false) { 4946 if (uvm.page_init_done == false) {
4935#ifdef PMAP_STEAL_MEMORY 4947#ifdef PMAP_STEAL_MEMORY
4936 pv_addr_t pv; 4948 pv_addr_t pv;
4937 pmap_boot_pagealloc(PAGE_SIZE, 4949 pmap_boot_pagealloc(PAGE_SIZE,
4938#ifdef PMAP_CACHE_VIPT 4950#ifdef PMAP_CACHE_VIPT
4939 arm_cache_prefer_mask, 4951 arm_cache_prefer_mask,
4940 va & arm_cache_prefer_mask, 4952 va & arm_cache_prefer_mask,
4941#else 4953#else
4942 0, 0, 4954 0, 0,
4943#endif 4955#endif
4944 &pv); 4956 &pv);
4945 pa = pv.pv_pa; 4957 pa = pv.pv_pa;
4946#else 4958#else
4947 if (uvm_page_physget(&pa) == false) 4959 if (uvm_page_physget(&pa) == false)
4948 return (1); 4960 return (1);
4949#endif /* PMAP_STEAL_MEMORY */ 4961#endif /* PMAP_STEAL_MEMORY */
4950 } else { 4962 } else {
4951 struct vm_page *pg; 4963 struct vm_page *pg;
4952 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); 4964 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
4953 if (pg == NULL) 4965 if (pg == NULL)
4954 return (1); 4966 return (1);
4955 pa = VM_PAGE_TO_PHYS(pg); 4967 pa = VM_PAGE_TO_PHYS(pg);
4956#ifdef PMAP_CACHE_VIPT 4968#ifdef PMAP_CACHE_VIPT
4957#ifdef DIAGNOSTIC 4969#ifdef DIAGNOSTIC
4958 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 4970 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
4959#endif 4971#endif
4960 /* 4972 /*
4961 * This new page must not have any mappings. Enter it via 4973 * This new page must not have any mappings. Enter it via
4962 * pmap_kenter_pa and let that routine do the hard work. 4974 * pmap_kenter_pa and let that routine do the hard work.
4963 */ 4975 */
4964 KASSERT(SLIST_EMPTY(&md->pvh_list)); 4976 KASSERT(SLIST_EMPTY(&md->pvh_list));
4965 pmap_kenter_pa(va, pa, 4977 pmap_kenter_pa(va, pa,
4966 VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE); 4978 VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
4967#endif 4979#endif
4968 } 4980 }
4969 4981
4970 if (pap) 4982 if (pap)
4971 *pap = pa; 4983 *pap = pa;
4972 4984
4973 PMAPCOUNT(pt_mappings); 4985 PMAPCOUNT(pt_mappings);
4974 l2b = pmap_get_l2_bucket(pmap_kernel(), va); 4986 l2b = pmap_get_l2_bucket(pmap_kernel(), va);
4975 KDASSERT(l2b != NULL); 4987 KDASSERT(l2b != NULL);
4976 4988
4977 ptep = &l2b->l2b_kva[l2pte_index(va)]; 4989 ptep = &l2b->l2b_kva[l2pte_index(va)];
4978 *ptep = L2_S_PROTO | pa | cache_mode | 4990 *ptep = L2_S_PROTO | pa | cache_mode |
4979 L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE); 4991 L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE);
4980 PTE_SYNC(ptep); 4992 PTE_SYNC(ptep);
4981 memset((void *)va, 0, PAGE_SIZE); 4993 memset((void *)va, 0, PAGE_SIZE);
4982 return (0); 4994 return (0);
4983} 4995}
4984 4996
4985/* 4997/*
4986 * This is the same as pmap_alloc_l2_bucket(), except that it is only 4998 * This is the same as pmap_alloc_l2_bucket(), except that it is only
4987 * used by pmap_growkernel(). 4999 * used by pmap_growkernel().
4988 */ 5000 */
4989static inline struct l2_bucket * 5001static inline struct l2_bucket *
4990pmap_grow_l2_bucket(pmap_t pm, vaddr_t va) 5002pmap_grow_l2_bucket(pmap_t pm, vaddr_t va)
4991{ 5003{
4992 struct l2_dtable *l2; 5004 struct l2_dtable *l2;
4993 struct l2_bucket *l2b; 5005 struct l2_bucket *l2b;
4994 u_short l1idx; 5006 u_short l1idx;
4995 vaddr_t nva; 5007 vaddr_t nva;
4996 5008
4997 l1idx = L1_IDX(va); 5009 l1idx = L1_IDX(va);
4998 5010
4999 if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { 5011 if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
5000 /* 5012 /*
5001 * No mapping at this address, as there is 5013 * No mapping at this address, as there is
5002 * no entry in the L1 table. 5014 * no entry in the L1 table.
5003 * Need to allocate a new l2_dtable. 5015 * Need to allocate a new l2_dtable.
5004 */ 5016 */
5005 nva = pmap_kernel_l2dtable_kva; 5017 nva = pmap_kernel_l2dtable_kva;
5006 if ((nva & PGOFSET) == 0) { 5018 if ((nva & PGOFSET) == 0) {
5007 /* 5019 /*
5008 * Need to allocate a backing page 5020 * Need to allocate a backing page
5009 */ 5021 */
5010 if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) 5022 if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL))
5011 return (NULL); 5023 return (NULL);
5012 } 5024 }
5013 5025
5014 l2 = (struct l2_dtable *)nva; 5026 l2 = (struct l2_dtable *)nva;
5015 nva += sizeof(struct l2_dtable); 5027 nva += sizeof(struct l2_dtable);
5016 5028
5017 if ((nva & PGOFSET) < (pmap_kernel_l2dtable_kva & PGOFSET)) { 5029 if ((nva & PGOFSET) < (pmap_kernel_l2dtable_kva & PGOFSET)) {
5018 /* 5030 /*
5019 * The new l2_dtable straddles a page boundary. 5031 * The new l2_dtable straddles a page boundary.
5020 * Map in another page to cover it. 5032 * Map in another page to cover it.
5021 */ 5033 */
5022 if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) 5034 if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL))
5023 return (NULL); 5035 return (NULL);
5024 } 5036 }
5025 5037
5026 pmap_kernel_l2dtable_kva = nva; 5038 pmap_kernel_l2dtable_kva = nva;
5027 5039
5028 /* 5040 /*
5029 * Link it into the parent pmap 5041 * Link it into the parent pmap
5030 */ 5042 */
5031 pm->pm_l2[L2_IDX(l1idx)] = l2; 5043 pm->pm_l2[L2_IDX(l1idx)] = l2;
5032 } 5044 }
5033 5045
5034 l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 5046 l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
5035 5047
5036 /* 5048 /*
5037 * Fetch pointer to the L2 page table associated with the address. 5049 * Fetch pointer to the L2 page table associated with the address.
5038 */ 5050 */
5039 if (l2b->l2b_kva == NULL) { 5051 if (l2b->l2b_kva == NULL) {
5040 pt_entry_t *ptep; 5052 pt_entry_t *ptep;
5041 5053
5042 /* 5054 /*
5043 * No L2 page table has been allocated. Chances are, this 5055 * No L2 page table has been allocated. Chances are, this
5044 * is because we just allocated the l2_dtable, above. 5056 * is because we just allocated the l2_dtable, above.
5045 */ 5057 */
5046 nva = pmap_kernel_l2ptp_kva; 5058 nva = pmap_kernel_l2ptp_kva;
5047 ptep = (pt_entry_t *)nva; 5059 ptep = (pt_entry_t *)nva;
5048 if ((nva & PGOFSET) == 0) { 5060 if ((nva & PGOFSET) == 0) {
5049 /* 5061 /*
5050 * Need to allocate a backing page 5062 * Need to allocate a backing page
5051 */ 5063 */
5052 if (pmap_grow_map(nva, pte_l2_s_cache_mode_pt, 5064 if (pmap_grow_map(nva, pte_l2_s_cache_mode_pt,
5053 &pmap_kernel_l2ptp_phys)) 5065 &pmap_kernel_l2ptp_phys))
5054 return (NULL); 5066 return (NULL);
5055 PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t)); 5067 PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t));
5056 } 5068 }
5057 5069
5058 l2->l2_occupancy++; 5070 l2->l2_occupancy++;
5059 l2b->l2b_kva = ptep; 5071 l2b->l2b_kva = ptep;
5060 l2b->l2b_l1idx = l1idx; 5072 l2b->l2b_l1idx = l1idx;
5061 l2b->l2b_phys = pmap_kernel_l2ptp_phys; 5073 l2b->l2b_phys = pmap_kernel_l2ptp_phys;
5062 5074
5063 pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL; 5075 pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL;
5064 pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL; 5076 pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL;
5065 } 5077 }
5066 5078
5067 return (l2b); 5079 return (l2b);
5068} 5080}
5069 5081
5070vaddr_t 5082vaddr_t
5071pmap_growkernel(vaddr_t maxkvaddr) 5083pmap_growkernel(vaddr_t maxkvaddr)
5072{ 5084{
5073 pmap_t kpm = pmap_kernel(); 5085 pmap_t kpm = pmap_kernel();
5074 struct l1_ttable *l1; 5086 struct l1_ttable *l1;
5075 struct l2_bucket *l2b; 5087 struct l2_bucket *l2b;
5076 pd_entry_t *pl1pd; 5088 pd_entry_t *pl1pd;
5077 int s; 5089 int s;
5078 5090
5079 if (maxkvaddr <= pmap_curmaxkvaddr) 5091 if (maxkvaddr <= pmap_curmaxkvaddr)
5080 goto out; /* we are OK */ 5092 goto out; /* we are OK */
5081 5093
5082 NPDEBUG(PDB_GROWKERN, 5094 NPDEBUG(PDB_GROWKERN,
5083 printf("pmap_growkernel: growing kernel from 0x%lx to 0x%lx\n", 5095 printf("pmap_growkernel: growing kernel from 0x%lx to 0x%lx\n",
5084 pmap_curmaxkvaddr, maxkvaddr)); 5096 pmap_curmaxkvaddr, maxkvaddr));
5085 5097
5086 KDASSERT(maxkvaddr <= virtual_end); 5098 KDASSERT(maxkvaddr <= virtual_end);
5087 5099
5088 /* 5100 /*
5089 * whoops! we need to add kernel PTPs 5101 * whoops! we need to add kernel PTPs
5090 */ 5102 */
5091 5103
5092 s = splhigh(); /* to be safe */ 5104 s = splhigh(); /* to be safe */
5093 mutex_enter(kpm->pm_lock); 5105 mutex_enter(kpm->pm_lock);
5094 5106
5095 /* Map 1MB at a time */ 5107 /* Map 1MB at a time */
5096 for (; pmap_curmaxkvaddr < maxkvaddr; pmap_curmaxkvaddr += L1_S_SIZE) { 5108 for (; pmap_curmaxkvaddr < maxkvaddr; pmap_curmaxkvaddr += L1_S_SIZE) {
5097 5109
5098 l2b = pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr); 5110 l2b = pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr);
5099 KDASSERT(l2b != NULL); 5111 KDASSERT(l2b != NULL);
5100 5112
5101 /* Distribute new L1 entry to all other L1s */ 5113 /* Distribute new L1 entry to all other L1s */
5102 SLIST_FOREACH(l1, &l1_list, l1_link) { 5114 SLIST_FOREACH(l1, &l1_list, l1_link) {
5103 pl1pd = &l1->l1_kva[L1_IDX(pmap_curmaxkvaddr)]; 5115 pl1pd = &l1->l1_kva[L1_IDX(pmap_curmaxkvaddr)];
5104 *pl1pd = l2b->l2b_phys | L1_C_DOM(PMAP_DOMAIN_KERNEL) | 5116 *pl1pd = l2b->l2b_phys | L1_C_DOM(PMAP_DOMAIN_KERNEL) |
5105 L1_C_PROTO; 5117 L1_C_PROTO;
5106 PTE_SYNC(pl1pd); 5118 PTE_SYNC(pl1pd);
5107 } 5119 }
5108 } 5120 }
5109 5121
5110 /* 5122 /*
5111 * flush out the cache, expensive but growkernel will happen so 5123 * flush out the cache, expensive but growkernel will happen so
5112 * rarely 5124 * rarely
5113 */ 5125 */
5114 cpu_dcache_wbinv_all(); 5126 cpu_dcache_wbinv_all();
5115 cpu_tlb_flushD(); 5127 cpu_tlb_flushD();
5116 cpu_cpwait(); 5128 cpu_cpwait();
5117 5129
5118 mutex_exit(kpm->pm_lock); 5130 mutex_exit(kpm->pm_lock);
5119 splx(s); 5131 splx(s);
5120 5132
5121out: 5133out:
5122 return (pmap_curmaxkvaddr); 5134 return (pmap_curmaxkvaddr);
5123} 5135}
5124 5136
5125/************************ Utility routines ****************************/ 5137/************************ Utility routines ****************************/
5126 5138
 5139#ifndef ARM_HAS_VBAR
5127/* 5140/*
5128 * vector_page_setprot: 5141 * vector_page_setprot:
5129 * 5142 *
5130 * Manipulate the protection of the vector page. 5143 * Manipulate the protection of the vector page.
5131 */ 5144 */
5132void 5145void
5133vector_page_setprot(int prot) 5146vector_page_setprot(int prot)
5134{ 5147{
5135 struct l2_bucket *l2b; 5148 struct l2_bucket *l2b;
5136 pt_entry_t *ptep; 5149 pt_entry_t *ptep;
5137 5150
5138#if defined(CPU_ARMV7) || defined(CPU_ARM11) 5151#if defined(CPU_ARMV7) || defined(CPU_ARM11)
5139 /* 5152 /*
5140 * If we are using VBAR to use the vectors in the kernel, then it's 5153 * If we are using VBAR to use the vectors in the kernel, then it's
5141 * already mapped in the kernel text so no need to anything here. 5154 * already mapped in the kernel text so no need to anything here.
5142 */ 5155 */
5143 if (vector_page != ARM_VECTORS_LOW && vector_page != ARM_VECTORS_HIGH) { 5156 if (vector_page != ARM_VECTORS_LOW && vector_page != ARM_VECTORS_HIGH) {
5144 KASSERT((armreg_pfr1_read() & ARM_PFR1_SEC_MASK) != 0); 5157 KASSERT((armreg_pfr1_read() & ARM_PFR1_SEC_MASK) != 0);
5145 return; 5158 return;
5146 } 5159 }
5147#endif 5160#endif
5148 5161
5149 l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page); 5162 l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page);
5150 KDASSERT(l2b != NULL); 5163 KDASSERT(l2b != NULL);
5151 5164
5152 ptep = &l2b->l2b_kva[l2pte_index(vector_page)]; 5165 ptep = &l2b->l2b_kva[l2pte_index(vector_page)];
5153 5166
5154 *ptep = (*ptep & ~L2_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot); 5167 *ptep = (*ptep & ~L2_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot);
5155 PTE_SYNC(ptep); 5168 PTE_SYNC(ptep);
5156 cpu_tlb_flushD_SE(vector_page); 5169 cpu_tlb_flushD_SE(vector_page);
5157 cpu_cpwait(); 5170 cpu_cpwait();
5158} 5171}
 5172#endif
5159 5173
5160/* 5174/*
5161 * Fetch pointers to the PDE/PTE for the given pmap/VA pair. 5175 * Fetch pointers to the PDE/PTE for the given pmap/VA pair.
5162 * Returns true if the mapping exists, else false. 5176 * Returns true if the mapping exists, else false.
5163 * 5177 *
5164 * NOTE: This function is only used by a couple of arm-specific modules. 5178 * NOTE: This function is only used by a couple of arm-specific modules.
5165 * It is not safe to take any pmap locks here, since we could be right 5179 * It is not safe to take any pmap locks here, since we could be right
5166 * in the middle of debugging the pmap anyway... 5180 * in the middle of debugging the pmap anyway...
5167 * 5181 *
5168 * It is possible for this routine to return false even though a valid 5182 * It is possible for this routine to return false even though a valid
5169 * mapping does exist. This is because we don't lock, so the metadata 5183 * mapping does exist. This is because we don't lock, so the metadata
5170 * state may be inconsistent. 5184 * state may be inconsistent.
5171 * 5185 *
5172 * NOTE: We can return a NULL *ptp in the case where the L1 pde is 5186 * NOTE: We can return a NULL *ptp in the case where the L1 pde is
5173 * a "section" mapping. 5187 * a "section" mapping.
5174 */ 5188 */
5175bool 5189bool
5176pmap_get_pde_pte(pmap_t pm, vaddr_t va, pd_entry_t **pdp, pt_entry_t **ptp) 5190pmap_get_pde_pte(pmap_t pm, vaddr_t va, pd_entry_t **pdp, pt_entry_t **ptp)
5177{ 5191{
5178 struct l2_dtable *l2; 5192 struct l2_dtable *l2;
5179 pd_entry_t *pl1pd, l1pd; 5193 pd_entry_t *pl1pd, l1pd;
5180 pt_entry_t *ptep; 5194 pt_entry_t *ptep;
5181 u_short l1idx; 5195 u_short l1idx;
5182 5196
5183 if (pm->pm_l1 == NULL) 5197 if (pm->pm_l1 == NULL)
5184 return false; 5198 return false;
5185 5199
5186 l1idx = L1_IDX(va); 5200 l1idx = L1_IDX(va);
5187 *pdp = pl1pd = &pm->pm_l1->l1_kva[l1idx]; 5201 *pdp = pl1pd = &pm->pm_l1->l1_kva[l1idx];
5188 l1pd = *pl1pd; 5202 l1pd = *pl1pd;
5189 5203
5190 if (l1pte_section_p(l1pd)) { 5204 if (l1pte_section_p(l1pd)) {
5191 *ptp = NULL; 5205 *ptp = NULL;
5192 return true; 5206 return true;
5193 } 5207 }
5194 5208
5195 if (pm->pm_l2 == NULL) 5209 if (pm->pm_l2 == NULL)
5196 return false; 5210 return false;
5197 5211
5198 l2 = pm->pm_l2[L2_IDX(l1idx)]; 5212 l2 = pm->pm_l2[L2_IDX(l1idx)];
5199 5213
5200 if (l2 == NULL || 5214 if (l2 == NULL ||
5201 (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { 5215 (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
5202 return false; 5216 return false;
5203 } 5217 }
5204 5218
5205 *ptp = &ptep[l2pte_index(va)]; 5219 *ptp = &ptep[l2pte_index(va)];
5206 return true; 5220 return true;
5207} 5221}
5208 5222
5209bool 5223bool
5210pmap_get_pde(pmap_t pm, vaddr_t va, pd_entry_t **pdp) 5224pmap_get_pde(pmap_t pm, vaddr_t va, pd_entry_t **pdp)
5211{ 5225{
5212 u_short l1idx; 5226 u_short l1idx;
5213 5227
5214 if (pm->pm_l1 == NULL) 5228 if (pm->pm_l1 == NULL)
5215 return false; 5229 return false;
5216 5230
5217 l1idx = L1_IDX(va); 5231 l1idx = L1_IDX(va);
5218 *pdp = &pm->pm_l1->l1_kva[l1idx]; 5232 *pdp = &pm->pm_l1->l1_kva[l1idx];
5219 5233
5220 return true; 5234 return true;
5221} 5235}
5222 5236
5223/************************ Bootstrapping routines ****************************/ 5237/************************ Bootstrapping routines ****************************/
5224 5238
5225static void 5239static void
5226pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt) 5240pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt)
5227{ 5241{
5228 int i; 5242 int i;
5229 5243
5230 l1->l1_kva = l1pt; 5244 l1->l1_kva = l1pt;
5231 l1->l1_domain_use_count = 0; 5245 l1->l1_domain_use_count = 0;
5232 l1->l1_domain_first = 0; 5246 l1->l1_domain_first = 0;
5233 5247
5234 for (i = 0; i < PMAP_DOMAINS; i++) 5248 for (i = 0; i < PMAP_DOMAINS; i++)
5235 l1->l1_domain_free[i] = i + 1; 5249 l1->l1_domain_free[i] = i + 1;
5236 5250
5237 /* 5251 /*
5238 * Copy the kernel's L1 entries to each new L1. 5252 * Copy the kernel's L1 entries to each new L1.
5239 */ 5253 */
5240 if (pmap_initialized) 5254 if (pmap_initialized)
5241 memcpy(l1pt, pmap_kernel()->pm_l1->l1_kva, L1_TABLE_SIZE); 5255 memcpy(l1pt, pmap_kernel()->pm_l1->l1_kva, L1_TABLE_SIZE);
5242 5256
5243 if (pmap_extract(pmap_kernel(), (vaddr_t)l1pt, 5257 if (pmap_extract(pmap_kernel(), (vaddr_t)l1pt,
5244 &l1->l1_physaddr) == false) 5258 &l1->l1_physaddr) == false)
5245 panic("pmap_init_l1: can't get PA of L1 at %p", l1pt); 5259 panic("pmap_init_l1: can't get PA of L1 at %p", l1pt);
5246 5260
5247 SLIST_INSERT_HEAD(&l1_list, l1, l1_link); 5261 SLIST_INSERT_HEAD(&l1_list, l1, l1_link);
5248 TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 5262 TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
5249} 5263}
5250 5264
5251/* 5265/*
5252 * pmap_bootstrap() is called from the board-specific initarm() routine 5266 * pmap_bootstrap() is called from the board-specific initarm() routine
5253 * once the kernel L1/L2 descriptors tables have been set up. 5267 * once the kernel L1/L2 descriptors tables have been set up.
5254 * 5268 *
5255 * This is a somewhat convoluted process since pmap bootstrap is, effectively, 5269 * This is a somewhat convoluted process since pmap bootstrap is, effectively,
5256 * spread over a number of disparate files/functions. 5270 * spread over a number of disparate files/functions.
5257 * 5271 *
5258 * We are passed the following parameters 5272 * We are passed the following parameters
5259 * - kernel_l1pt 5273 * - kernel_l1pt
5260 * This is a pointer to the base of the kernel's L1 translation table. 5274 * This is a pointer to the base of the kernel's L1 translation table.
5261 * - vstart 5275 * - vstart
5262 * 1MB-aligned start of managed kernel virtual memory. 5276 * 1MB-aligned start of managed kernel virtual memory.
5263 * - vend 5277 * - vend
5264 * 1MB-aligned end of managed kernel virtual memory. 5278 * 1MB-aligned end of managed kernel virtual memory.
5265 * 5279 *
5266 * We use the first parameter to build the metadata (struct l1_ttable and 5280 * We use the first parameter to build the metadata (struct l1_ttable and
5267 * struct l2_dtable) necessary to track kernel mappings. 5281 * struct l2_dtable) necessary to track kernel mappings.
5268 */ 5282 */
5269#define PMAP_STATIC_L2_SIZE 16 5283#define PMAP_STATIC_L2_SIZE 16
5270void 5284void
5271pmap_bootstrap(vaddr_t vstart, vaddr_t vend) 5285pmap_bootstrap(vaddr_t vstart, vaddr_t vend)
5272{ 5286{
5273 static struct l1_ttable static_l1; 5287 static struct l1_ttable static_l1;
5274 static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE]; 5288 static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE];
5275 struct l1_ttable *l1 = &static_l1; 5289 struct l1_ttable *l1 = &static_l1;
5276 struct l2_dtable *l2; 5290 struct l2_dtable *l2;
5277 struct l2_bucket *l2b; 5291 struct l2_bucket *l2b;
5278 pd_entry_t *l1pt = (pd_entry_t *) kernel_l1pt.pv_va; 5292 pd_entry_t *l1pt = (pd_entry_t *) kernel_l1pt.pv_va;
5279 pmap_t pm = pmap_kernel(); 5293 pmap_t pm = pmap_kernel();
5280 pd_entry_t pde; 5294 pd_entry_t pde;
5281 pt_entry_t *ptep; 5295 pt_entry_t *ptep;
5282 paddr_t pa; 5296 paddr_t pa;
5283 vaddr_t va; 5297 vaddr_t va;
5284 vsize_t size; 5298 vsize_t size;
5285 int nptes, l1idx, l2idx, l2next = 0; 5299 int nptes, l1idx, l2idx, l2next = 0;
5286 5300
5287 /* 5301 /*
5288 * Initialise the kernel pmap object 5302 * Initialise the kernel pmap object
5289 */ 5303 */
5290 pm->pm_l1 = l1; 5304 pm->pm_l1 = l1;
5291 pm->pm_domain = PMAP_DOMAIN_KERNEL; 5305 pm->pm_domain = PMAP_DOMAIN_KERNEL;
5292 pm->pm_activated = true; 5306 pm->pm_activated = true;
5293 pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL; 5307 pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL;
5294 5308
5295 mutex_init(&pm->pm_obj_lock, MUTEX_DEFAULT, IPL_NONE); 5309 mutex_init(&pm->pm_obj_lock, MUTEX_DEFAULT, IPL_NONE);
5296 uvm_obj_init(&pm->pm_obj, NULL, false, 1); 5310 uvm_obj_init(&pm->pm_obj, NULL, false, 1);
5297 uvm_obj_setlock(&pm->pm_obj, &pm->pm_obj_lock); 5311 uvm_obj_setlock(&pm->pm_obj, &pm->pm_obj_lock);
5298 5312
5299 /* 5313 /*
5300 * Scan the L1 translation table created by initarm() and create 5314 * Scan the L1 translation table created by initarm() and create
5301 * the required metadata for all valid mappings found in it. 5315 * the required metadata for all valid mappings found in it.
5302 */ 5316 */
5303 for (l1idx = 0; l1idx < (L1_TABLE_SIZE / sizeof(pd_entry_t)); l1idx++) { 5317 for (l1idx = 0; l1idx < (L1_TABLE_SIZE / sizeof(pd_entry_t)); l1idx++) {
5304 pde = l1pt[l1idx]; 5318 pde = l1pt[l1idx];
5305 5319
5306 /* 5320 /*
5307 * We're only interested in Coarse mappings. 5321 * We're only interested in Coarse mappings.
5308 * pmap_extract() can deal with section mappings without 5322 * pmap_extract() can deal with section mappings without
5309 * recourse to checking L2 metadata. 5323 * recourse to checking L2 metadata.
5310 */ 5324 */
5311 if ((pde & L1_TYPE_MASK) != L1_TYPE_C) 5325 if ((pde & L1_TYPE_MASK) != L1_TYPE_C)
5312 continue; 5326 continue;
5313 5327
5314 /* 5328 /*
5315 * Lookup the KVA of this L2 descriptor table 5329 * Lookup the KVA of this L2 descriptor table
5316 */ 5330 */
5317 pa = (paddr_t)(pde & L1_C_ADDR_MASK); 5331 pa = (paddr_t)(pde & L1_C_ADDR_MASK);
5318 ptep = (pt_entry_t *)kernel_pt_lookup(pa); 5332 ptep = (pt_entry_t *)kernel_pt_lookup(pa);
5319 if (ptep == NULL) { 5333 if (ptep == NULL) {
5320 panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx", 5334 panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx",
5321 (u_int)l1idx << L1_S_SHIFT, pa); 5335 (u_int)l1idx << L1_S_SHIFT, pa);
5322 } 5336 }
5323 5337
5324 /* 5338 /*
5325 * Fetch the associated L2 metadata structure. 5339 * Fetch the associated L2 metadata structure.
5326 * Allocate a new one if necessary. 5340 * Allocate a new one if necessary.
5327 */ 5341 */
5328 if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { 5342 if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
5329 if (l2next == PMAP_STATIC_L2_SIZE) 5343 if (l2next == PMAP_STATIC_L2_SIZE)
5330 panic("pmap_bootstrap: out of static L2s"); 5344 panic("pmap_bootstrap: out of static L2s");
5331 pm->pm_l2[L2_IDX(l1idx)] = l2 = &static_l2[l2next++]; 5345 pm->pm_l2[L2_IDX(l1idx)] = l2 = &static_l2[l2next++];
5332 } 5346 }
5333 5347
5334 /* 5348 /*
5335 * One more L1 slot tracked... 5349 * One more L1 slot tracked...
5336 */ 5350 */
5337 l2->l2_occupancy++; 5351 l2->l2_occupancy++;
5338 5352
5339 /* 5353 /*
5340 * Fill in the details of the L2 descriptor in the 5354 * Fill in the details of the L2 descriptor in the
5341 * appropriate bucket. 5355 * appropriate bucket.
5342 */ 5356 */
5343 l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 5357 l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
5344 l2b->l2b_kva = ptep; 5358 l2b->l2b_kva = ptep;
5345 l2b->l2b_phys = pa; 5359 l2b->l2b_phys = pa;
5346 l2b->l2b_l1idx = l1idx; 5360 l2b->l2b_l1idx = l1idx;
5347 5361
5348 /* 5362 /*
5349 * Establish an initial occupancy count for this descriptor 5363 * Establish an initial occupancy count for this descriptor
5350 */ 5364 */
5351 for (l2idx = 0; 5365 for (l2idx = 0;
5352 l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); 5366 l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
5353 l2idx++) { 5367 l2idx++) {
5354 if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV) { 5368 if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV) {
5355 l2b->l2b_occupancy++; 5369 l2b->l2b_occupancy++;
5356 } 5370 }
5357 } 5371 }
5358 5372
5359 /* 5373 /*
5360 * Make sure the descriptor itself has the correct cache mode. 5374 * Make sure the descriptor itself has the correct cache mode.
5361 * If not, fix it, but whine about the problem. Port-meisters 5375 * If not, fix it, but whine about the problem. Port-meisters
5362 * should consider this a clue to fix up their initarm() 5376 * should consider this a clue to fix up their initarm()
5363 * function. :) 5377 * function. :)
5364 */ 5378 */
5365 if (pmap_set_pt_cache_mode(l1pt, (vaddr_t)ptep)) { 5379 if (pmap_set_pt_cache_mode(l1pt, (vaddr_t)ptep)) {
5366 printf("pmap_bootstrap: WARNING! wrong cache mode for " 5380 printf("pmap_bootstrap: WARNING! wrong cache mode for "
5367 "L2 pte @ %p\n", ptep); 5381 "L2 pte @ %p\n", ptep);
5368 } 5382 }
5369 } 5383 }
5370 5384
5371 /* 5385 /*
5372 * Ensure the primary (kernel) L1 has the correct cache mode for 5386 * Ensure the primary (kernel) L1 has the correct cache mode for
5373 * a page table. Bitch if it is not correctly set. 5387 * a page table. Bitch if it is not correctly set.
5374 */ 5388 */
5375 for (va = (vaddr_t)l1pt; 5389 for (va = (vaddr_t)l1pt;
5376 va < ((vaddr_t)l1pt + L1_TABLE_SIZE); va += PAGE_SIZE) { 5390 va < ((vaddr_t)l1pt + L1_TABLE_SIZE); va += PAGE_SIZE) {
5377 if (pmap_set_pt_cache_mode(l1pt, va)) 5391 if (pmap_set_pt_cache_mode(l1pt, va))
5378 printf("pmap_bootstrap: WARNING! wrong cache mode for " 5392 printf("pmap_bootstrap: WARNING! wrong cache mode for "
5379 "primary L1 @ 0x%lx\n", va); 5393 "primary L1 @ 0x%lx\n", va);
5380 } 5394 }
5381 5395
5382 cpu_dcache_wbinv_all(); 5396 cpu_dcache_wbinv_all();
5383 cpu_tlb_flushID(); 5397 cpu_tlb_flushID();
5384 cpu_cpwait(); 5398 cpu_cpwait();
5385 5399
5386 /* 5400 /*
5387 * now we allocate the "special" VAs which are used for tmp mappings 5401 * now we allocate the "special" VAs which are used for tmp mappings
5388 * by the pmap (and other modules). we allocate the VAs by advancing 5402 * by the pmap (and other modules). we allocate the VAs by advancing
5389 * virtual_avail (note that there are no pages mapped at these VAs). 5403 * virtual_avail (note that there are no pages mapped at these VAs).
5390 * 5404 *
5391 * Managed KVM space start from wherever initarm() tells us. 5405 * Managed KVM space start from wherever initarm() tells us.
5392 */ 5406 */
5393 virtual_avail = vstart; 5407 virtual_avail = vstart;
5394 virtual_end = vend; 5408 virtual_end = vend;
5395 5409
5396#ifdef PMAP_CACHE_VIPT 5410#ifdef PMAP_CACHE_VIPT
5397 /* 5411 /*
5398 * If we have a VIPT cache, we need one page/pte per possible alias 5412 * If we have a VIPT cache, we need one page/pte per possible alias
5399 * page so we won't violate cache aliasing rules. 5413 * page so we won't violate cache aliasing rules.
5400 */ 5414 */
5401 virtual_avail = (virtual_avail + arm_cache_prefer_mask) & ~arm_cache_prefer_mask;  5415 virtual_avail = (virtual_avail + arm_cache_prefer_mask) & ~arm_cache_prefer_mask;
5402 nptes = (arm_cache_prefer_mask >> PGSHIFT) + 1; 5416 nptes = (arm_cache_prefer_mask >> PGSHIFT) + 1;
5403#else 5417#else
5404 nptes = 1; 5418 nptes = 1;
5405#endif 5419#endif
5406 pmap_alloc_specials(&virtual_avail, nptes, &csrcp, &csrc_pte); 5420 pmap_alloc_specials(&virtual_avail, nptes, &csrcp, &csrc_pte);
5407 pmap_set_pt_cache_mode(l1pt, (vaddr_t)csrc_pte); 5421 pmap_set_pt_cache_mode(l1pt, (vaddr_t)csrc_pte);
5408 pmap_alloc_specials(&virtual_avail, nptes, &cdstp, &cdst_pte); 5422 pmap_alloc_specials(&virtual_avail, nptes, &cdstp, &cdst_pte);
5409 pmap_set_pt_cache_mode(l1pt, (vaddr_t)cdst_pte); 5423 pmap_set_pt_cache_mode(l1pt, (vaddr_t)cdst_pte);
5410 pmap_alloc_specials(&virtual_avail, nptes, &memhook, NULL); 5424 pmap_alloc_specials(&virtual_avail, nptes, &memhook, NULL);
5411 pmap_alloc_specials(&virtual_avail, round_page(MSGBUFSIZE) / PAGE_SIZE, 5425 pmap_alloc_specials(&virtual_avail, round_page(MSGBUFSIZE) / PAGE_SIZE,
5412 (void *)&msgbufaddr, NULL); 5426 (void *)&msgbufaddr, NULL);
5413 5427
5414 /* 5428 /*
5415 * Allocate a range of kernel virtual address space to be used 5429 * Allocate a range of kernel virtual address space to be used
5416 * for L2 descriptor tables and metadata allocation in 5430 * for L2 descriptor tables and metadata allocation in
5417 * pmap_growkernel(). 5431 * pmap_growkernel().
5418 */ 5432 */
5419 size = ((virtual_end - pmap_curmaxkvaddr) + L1_S_OFFSET) / L1_S_SIZE; 5433 size = ((virtual_end - pmap_curmaxkvaddr) + L1_S_OFFSET) / L1_S_SIZE;
5420 pmap_alloc_specials(&virtual_avail, 5434 pmap_alloc_specials(&virtual_avail,
5421 round_page(size * L2_TABLE_SIZE_REAL) / PAGE_SIZE, 5435 round_page(size * L2_TABLE_SIZE_REAL) / PAGE_SIZE,
5422 &pmap_kernel_l2ptp_kva, NULL); 5436 &pmap_kernel_l2ptp_kva, NULL);
5423 5437
5424 size = (size + (L2_BUCKET_SIZE - 1)) / L2_BUCKET_SIZE; 5438 size = (size + (L2_BUCKET_SIZE - 1)) / L2_BUCKET_SIZE;
5425 pmap_alloc_specials(&virtual_avail, 5439 pmap_alloc_specials(&virtual_avail,
5426 round_page(size * sizeof(struct l2_dtable)) / PAGE_SIZE, 5440 round_page(size * sizeof(struct l2_dtable)) / PAGE_SIZE,
5427 &pmap_kernel_l2dtable_kva, NULL); 5441 &pmap_kernel_l2dtable_kva, NULL);
5428 5442
5429 /* 5443 /*
5430 * init the static-global locks and global pmap list. 5444 * init the static-global locks and global pmap list.
5431 */ 5445 */
5432 mutex_init(&l1_lru_lock, MUTEX_DEFAULT, IPL_VM); 5446 mutex_init(&l1_lru_lock, MUTEX_DEFAULT, IPL_VM);
5433 5447
5434 /* 5448 /*
5435 * We can now initialise the first L1's metadata. 5449 * We can now initialise the first L1's metadata.
5436 */ 5450 */
5437 SLIST_INIT(&l1_list); 5451 SLIST_INIT(&l1_list);
5438 TAILQ_INIT(&l1_lru_list); 5452 TAILQ_INIT(&l1_lru_list);
5439 pmap_init_l1(l1, l1pt); 5453 pmap_init_l1(l1, l1pt);
5440 5454
 5455#ifndef ARM_HAS_VBAR
5441 /* Set up vector page L1 details, if necessary */ 5456 /* Set up vector page L1 details, if necessary */
5442 if (vector_page < KERNEL_BASE) { 5457 if (vector_page < KERNEL_BASE) {
5443 pm->pm_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)]; 5458 pm->pm_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)];
5444 l2b = pmap_get_l2_bucket(pm, vector_page); 5459 l2b = pmap_get_l2_bucket(pm, vector_page);
5445 KDASSERT(l2b != NULL); 5460 KDASSERT(l2b != NULL);
5446 pm->pm_l1vec = l2b->l2b_phys | L1_C_PROTO | 5461 pm->pm_l1vec = l2b->l2b_phys | L1_C_PROTO |
5447 L1_C_DOM(pm->pm_domain); 5462 L1_C_DOM(pm->pm_domain);
5448 } else 5463 } else
5449 pm->pm_pl1vec = NULL; 5464 pm->pm_pl1vec = NULL;
 5465#endif
5450 5466
5451 /* 5467 /*
5452 * Initialize the pmap cache 5468 * Initialize the pmap cache
5453 */ 5469 */
5454 pool_cache_bootstrap(&pmap_cache, sizeof(struct pmap), 0, 0, 0, 5470 pool_cache_bootstrap(&pmap_cache, sizeof(struct pmap), 0, 0, 0,
5455 "pmappl", NULL, IPL_NONE, pmap_pmap_ctor, NULL, NULL); 5471 "pmappl", NULL, IPL_NONE, pmap_pmap_ctor, NULL, NULL);
5456 LIST_INIT(&pmap_pmaps); 5472 LIST_INIT(&pmap_pmaps);
5457 LIST_INSERT_HEAD(&pmap_pmaps, pm, pm_list); 5473 LIST_INSERT_HEAD(&pmap_pmaps, pm, pm_list);
5458 5474
5459 /* 5475 /*
5460 * Initialize the pv pool. 5476 * Initialize the pv pool.
5461 */ 5477 */
5462 pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvepl", 5478 pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvepl",
5463 &pmap_bootstrap_pv_allocator, IPL_NONE); 5479 &pmap_bootstrap_pv_allocator, IPL_NONE);
5464 5480
5465 /* 5481 /*
5466 * Initialize the L2 dtable pool and cache. 5482 * Initialize the L2 dtable pool and cache.
5467 */ 5483 */
5468 pool_cache_bootstrap(&pmap_l2dtable_cache, sizeof(struct l2_dtable), 0, 5484 pool_cache_bootstrap(&pmap_l2dtable_cache, sizeof(struct l2_dtable), 0,
5469 0, 0, "l2dtblpl", NULL, IPL_NONE, pmap_l2dtable_ctor, NULL, NULL); 5485 0, 0, "l2dtblpl", NULL, IPL_NONE, pmap_l2dtable_ctor, NULL, NULL);
5470 5486
5471 /* 5487 /*
5472 * Initialise the L2 descriptor table pool and cache 5488 * Initialise the L2 descriptor table pool and cache
5473 */ 5489 */
5474 pool_cache_bootstrap(&pmap_l2ptp_cache, L2_TABLE_SIZE_REAL, 0, 5490 pool_cache_bootstrap(&pmap_l2ptp_cache, L2_TABLE_SIZE_REAL, 0,
5475 L2_TABLE_SIZE_REAL, 0, "l2ptppl", NULL, IPL_NONE, 5491 L2_TABLE_SIZE_REAL, 0, "l2ptppl", NULL, IPL_NONE,
5476 pmap_l2ptp_ctor, NULL, NULL); 5492 pmap_l2ptp_ctor, NULL, NULL);
5477 5493
5478 cpu_dcache_wbinv_all(); 5494 cpu_dcache_wbinv_all();
5479} 5495}
5480 5496
5481static int 5497static int
5482pmap_set_pt_cache_mode(pd_entry_t *kl1, vaddr_t va) 5498pmap_set_pt_cache_mode(pd_entry_t *kl1, vaddr_t va)
5483{ 5499{
5484 pd_entry_t *pdep, pde; 5500 pd_entry_t *pdep, pde;
5485 pt_entry_t *ptep, pte; 5501 pt_entry_t *ptep, pte;
5486 vaddr_t pa; 5502 vaddr_t pa;
5487 int rv = 0; 5503 int rv = 0;
5488 5504
5489 /* 5505 /*
5490 * Make sure the descriptor itself has the correct cache mode 5506 * Make sure the descriptor itself has the correct cache mode
5491 */ 5507 */
5492 pdep = &kl1[L1_IDX(va)]; 5508 pdep = &kl1[L1_IDX(va)];
5493 pde = *pdep; 5509 pde = *pdep;
5494 5510
5495 if (l1pte_section_p(pde)) { 5511 if (l1pte_section_p(pde)) {
5496 __CTASSERT((L1_S_CACHE_MASK & L1_S_V6_SUPER) == 0); 5512 __CTASSERT((L1_S_CACHE_MASK & L1_S_V6_SUPER) == 0);
5497 if ((pde & L1_S_CACHE_MASK) != pte_l1_s_cache_mode_pt) { 5513 if ((pde & L1_S_CACHE_MASK) != pte_l1_s_cache_mode_pt) {
5498 *pdep = (pde & ~L1_S_CACHE_MASK) | 5514 *pdep = (pde & ~L1_S_CACHE_MASK) |
5499 pte_l1_s_cache_mode_pt; 5515 pte_l1_s_cache_mode_pt;
5500 PTE_SYNC(pdep); 5516 PTE_SYNC(pdep);
5501 cpu_dcache_wbinv_range((vaddr_t)pdep, sizeof(*pdep)); 5517 cpu_dcache_wbinv_range((vaddr_t)pdep, sizeof(*pdep));
5502 rv = 1; 5518 rv = 1;
5503 } 5519 }
5504 } else { 5520 } else {
5505 pa = (paddr_t)(pde & L1_C_ADDR_MASK); 5521 pa = (paddr_t)(pde & L1_C_ADDR_MASK);
5506 ptep = (pt_entry_t *)kernel_pt_lookup(pa); 5522 ptep = (pt_entry_t *)kernel_pt_lookup(pa);
5507 if (ptep == NULL) 5523 if (ptep == NULL)
5508 panic("pmap_bootstrap: No L2 for L2 @ va %p\n", ptep); 5524 panic("pmap_bootstrap: No L2 for L2 @ va %p\n", ptep);
5509 5525
5510 ptep = &ptep[l2pte_index(va)]; 5526 ptep = &ptep[l2pte_index(va)];
5511 pte = *ptep; 5527 pte = *ptep;
5512 if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { 5528 if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
5513 *ptep = (pte & ~L2_S_CACHE_MASK) | 5529 *ptep = (pte & ~L2_S_CACHE_MASK) |
5514 pte_l2_s_cache_mode_pt; 5530 pte_l2_s_cache_mode_pt;
5515 PTE_SYNC(ptep); 5531 PTE_SYNC(ptep);
5516 cpu_dcache_wbinv_range((vaddr_t)ptep, sizeof(*ptep)); 5532 cpu_dcache_wbinv_range((vaddr_t)ptep, sizeof(*ptep));
5517 rv = 1; 5533 rv = 1;
5518 } 5534 }
5519 } 5535 }
5520 5536
5521 return (rv); 5537 return (rv);
5522} 5538}
5523 5539
5524static void 5540static void
5525pmap_alloc_specials(vaddr_t *availp, int pages, vaddr_t *vap, pt_entry_t **ptep) 5541pmap_alloc_specials(vaddr_t *availp, int pages, vaddr_t *vap, pt_entry_t **ptep)
5526{ 5542{
5527 vaddr_t va = *availp; 5543 vaddr_t va = *availp;
5528 struct l2_bucket *l2b; 5544 struct l2_bucket *l2b;
5529 5545
5530 if (ptep) { 5546 if (ptep) {
5531 l2b = pmap_get_l2_bucket(pmap_kernel(), va); 5547 l2b = pmap_get_l2_bucket(pmap_kernel(), va);
5532 if (l2b == NULL) 5548 if (l2b == NULL)
5533 panic("pmap_alloc_specials: no l2b for 0x%lx", va); 5549 panic("pmap_alloc_specials: no l2b for 0x%lx", va);
5534 5550
5535 if (ptep) 5551 if (ptep)
5536 *ptep = &l2b->l2b_kva[l2pte_index(va)]; 5552 *ptep = &l2b->l2b_kva[l2pte_index(va)];
5537 } 5553 }
5538 5554
5539 *vap = va; 5555 *vap = va;
5540 *availp = va + (PAGE_SIZE * pages); 5556 *availp = va + (PAGE_SIZE * pages);
5541} 5557}
5542 5558
5543void 5559void
5544pmap_init(void) 5560pmap_init(void)
5545{ 5561{
5546 5562
5547 /* 5563 /*
5548 * Set the available memory vars - These do not map to real memory 5564 * Set the available memory vars - These do not map to real memory
5549 * addresses and cannot as the physical memory is fragmented. 5565 * addresses and cannot as the physical memory is fragmented.
5550 * They are used by ps for %mem calculations. 5566 * They are used by ps for %mem calculations.
5551 * One could argue whether this should be the entire memory or just 5567 * One could argue whether this should be the entire memory or just
5552 * the memory that is useable in a user process. 5568 * the memory that is useable in a user process.
5553 */ 5569 */
5554 avail_start = ptoa(VM_PHYSMEM_PTR(0)->start); 5570 avail_start = ptoa(VM_PHYSMEM_PTR(0)->start);
5555 avail_end = ptoa(VM_PHYSMEM_PTR(vm_nphysseg - 1)->end); 5571 avail_end = ptoa(VM_PHYSMEM_PTR(vm_nphysseg - 1)->end);
5556 5572
5557 /* 5573 /*
5558 * Now we need to free enough pv_entry structures to allow us to get 5574 * Now we need to free enough pv_entry structures to allow us to get
5559 * the kmem_map/kmem_object allocated and inited (done after this 5575 * the kmem_map/kmem_object allocated and inited (done after this
5560 * function is finished). to do this we allocate one bootstrap page out 5576 * function is finished). to do this we allocate one bootstrap page out
5561 * of kernel_map and use it to provide an initial pool of pv_entry 5577 * of kernel_map and use it to provide an initial pool of pv_entry
5562 * structures. we never free this page. 5578 * structures. we never free this page.
5563 */ 5579 */
5564 pool_setlowat(&pmap_pv_pool, 5580 pool_setlowat(&pmap_pv_pool,
5565 (PAGE_SIZE / sizeof(struct pv_entry)) * 2); 5581 (PAGE_SIZE / sizeof(struct pv_entry)) * 2);
5566 5582
5567 mutex_init(&memlock, MUTEX_DEFAULT, IPL_NONE); 5583 mutex_init(&memlock, MUTEX_DEFAULT, IPL_NONE);
5568 zeropage = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0, 5584 zeropage = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
5569 UVM_KMF_WIRED|UVM_KMF_ZERO); 5585 UVM_KMF_WIRED|UVM_KMF_ZERO);
5570 5586
5571 pmap_initialized = true; 5587 pmap_initialized = true;
5572} 5588}
5573 5589
5574static vaddr_t last_bootstrap_page = 0; 5590static vaddr_t last_bootstrap_page = 0;
5575static void *free_bootstrap_pages = NULL; 5591static void *free_bootstrap_pages = NULL;
5576 5592
5577static void * 5593static void *
5578pmap_bootstrap_pv_page_alloc(struct pool *pp, int flags) 5594pmap_bootstrap_pv_page_alloc(struct pool *pp, int flags)
5579{ 5595{
5580 extern void *pool_page_alloc(struct pool *, int); 5596 extern void *pool_page_alloc(struct pool *, int);
5581 vaddr_t new_page; 5597 vaddr_t new_page;
5582 void *rv; 5598 void *rv;
5583 5599
5584 if (pmap_initialized) 5600 if (pmap_initialized)
5585 return (pool_page_alloc(pp, flags)); 5601 return (pool_page_alloc(pp, flags));
5586 5602
5587 if (free_bootstrap_pages) { 5603 if (free_bootstrap_pages) {
5588 rv = free_bootstrap_pages; 5604 rv = free_bootstrap_pages;
5589 free_bootstrap_pages = *((void **)rv); 5605 free_bootstrap_pages = *((void **)rv);
5590 return (rv); 5606 return (rv);
5591 } 5607 }
5592 5608
5593 new_page = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, 5609 new_page = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
5594 UVM_KMF_WIRED | ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT)); 5610 UVM_KMF_WIRED | ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT));
5595 5611
5596 KASSERT(new_page > last_bootstrap_page); 5612 KASSERT(new_page > last_bootstrap_page);
5597 last_bootstrap_page = new_page; 5613 last_bootstrap_page = new_page;
5598 return ((void *)new_page); 5614 return ((void *)new_page);
5599} 5615}
5600 5616
5601static void 5617static void
5602pmap_bootstrap_pv_page_free(struct pool *pp, void *v) 5618pmap_bootstrap_pv_page_free(struct pool *pp, void *v)
5603{ 5619{
5604 extern void pool_page_free(struct pool *, void *); 5620 extern void pool_page_free(struct pool *, void *);
5605 5621
5606 if ((vaddr_t)v <= last_bootstrap_page) { 5622 if ((vaddr_t)v <= last_bootstrap_page) {
5607 *((void **)v) = free_bootstrap_pages; 5623 *((void **)v) = free_bootstrap_pages;
5608 free_bootstrap_pages = v; 5624 free_bootstrap_pages = v;
5609 return; 5625 return;
5610 } 5626 }
5611 5627
5612 if (pmap_initialized) { 5628 if (pmap_initialized) {
5613 pool_page_free(pp, v); 5629 pool_page_free(pp, v);
5614 return; 5630 return;
5615 } 5631 }
5616} 5632}
5617 5633
5618/* 5634/*
5619 * pmap_postinit() 5635 * pmap_postinit()
5620 * 5636 *
5621 * This routine is called after the vm and kmem subsystems have been 5637 * This routine is called after the vm and kmem subsystems have been
5622 * initialised. This allows the pmap code to perform any initialisation 5638 * initialised. This allows the pmap code to perform any initialisation
5623 * that can only be done one the memory allocation is in place. 5639 * that can only be done one the memory allocation is in place.
5624 */ 5640 */
5625void 5641void
5626pmap_postinit(void) 5642pmap_postinit(void)
5627{ 5643{
5628 extern paddr_t physical_start, physical_end; 5644 extern paddr_t physical_start, physical_end;
5629 struct l2_bucket *l2b; 5645 struct l2_bucket *l2b;
5630 struct l1_ttable *l1; 5646 struct l1_ttable *l1;
5631 struct pglist plist; 5647 struct pglist plist;
5632 struct vm_page *m; 5648 struct vm_page *m;
5633 pd_entry_t *pl1pt; 5649 pd_entry_t *pl1pt;
5634 pt_entry_t *ptep, pte; 5650 pt_entry_t *ptep, pte;
5635 vaddr_t va, eva; 5651 vaddr_t va, eva;
5636 u_int loop, needed; 5652 u_int loop, needed;
5637 int error; 5653 int error;
5638 5654
5639 pool_cache_setlowat(&pmap_l2ptp_cache, 5655 pool_cache_setlowat(&pmap_l2ptp_cache,
5640 (PAGE_SIZE / L2_TABLE_SIZE_REAL) * 4); 5656 (PAGE_SIZE / L2_TABLE_SIZE_REAL) * 4);
5641 pool_cache_setlowat(&pmap_l2dtable_cache, 5657 pool_cache_setlowat(&pmap_l2dtable_cache,
5642 (PAGE_SIZE / sizeof(struct l2_dtable)) * 2); 5658 (PAGE_SIZE / sizeof(struct l2_dtable)) * 2);
5643 5659
5644 needed = (maxproc / PMAP_DOMAINS) + ((maxproc % PMAP_DOMAINS) ? 1 : 0); 5660 needed = (maxproc / PMAP_DOMAINS) + ((maxproc % PMAP_DOMAINS) ? 1 : 0);
5645 needed -= 1; 5661 needed -= 1;
5646 5662
5647 l1 = kmem_alloc(sizeof(*l1) * needed, KM_SLEEP); 5663 l1 = kmem_alloc(sizeof(*l1) * needed, KM_SLEEP);
5648 5664
5649 for (loop = 0; loop < needed; loop++, l1++) { 5665 for (loop = 0; loop < needed; loop++, l1++) {
5650 /* Allocate a L1 page table */ 5666 /* Allocate a L1 page table */
5651 va = uvm_km_alloc(kernel_map, L1_TABLE_SIZE, 0, UVM_KMF_VAONLY); 5667 va = uvm_km_alloc(kernel_map, L1_TABLE_SIZE, 0, UVM_KMF_VAONLY);
5652 if (va == 0) 5668 if (va == 0)
5653 panic("Cannot allocate L1 KVM"); 5669 panic("Cannot allocate L1 KVM");
5654 5670
5655 error = uvm_pglistalloc(L1_TABLE_SIZE, physical_start, 5671 error = uvm_pglistalloc(L1_TABLE_SIZE, physical_start,
5656 physical_end, L1_TABLE_SIZE, 0, &plist, 1, 1); 5672 physical_end, L1_TABLE_SIZE, 0, &plist, 1, 1);
5657 if (error) 5673 if (error)
5658 panic("Cannot allocate L1 physical pages"); 5674 panic("Cannot allocate L1 physical pages");
5659 5675
5660 m = TAILQ_FIRST(&plist); 5676 m = TAILQ_FIRST(&plist);
5661 eva = va + L1_TABLE_SIZE; 5677 eva = va + L1_TABLE_SIZE;
5662 pl1pt = (pd_entry_t *)va; 5678 pl1pt = (pd_entry_t *)va;
5663 5679
5664 while (m && va < eva) { 5680 while (m && va < eva) {
5665 paddr_t pa = VM_PAGE_TO_PHYS(m); 5681 paddr_t pa = VM_PAGE_TO_PHYS(m);
5666 5682
5667 pmap_kenter_pa(va, pa, 5683 pmap_kenter_pa(va, pa,
5668 VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE); 5684 VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
5669 5685
5670 /* 5686 /*
5671 * Make sure the L1 descriptor table is mapped 5687 * Make sure the L1 descriptor table is mapped
5672 * with the cache-mode set to write-through. 5688 * with the cache-mode set to write-through.
5673 */ 5689 */
5674 l2b = pmap_get_l2_bucket(pmap_kernel(), va); 5690 l2b = pmap_get_l2_bucket(pmap_kernel(), va);
5675 KDASSERT(l2b != NULL); 5691 KDASSERT(l2b != NULL);
5676 ptep = &l2b->l2b_kva[l2pte_index(va)]; 5692 ptep = &l2b->l2b_kva[l2pte_index(va)];
5677 pte = *ptep; 5693 pte = *ptep;
5678 pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; 5694 pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
5679 *ptep = pte; 5695 *ptep = pte;
5680 PTE_SYNC(ptep); 5696 PTE_SYNC(ptep);
5681 cpu_tlb_flushD_SE(va); 5697 cpu_tlb_flushD_SE(va);
5682 5698
5683 va += PAGE_SIZE; 5699 va += PAGE_SIZE;
5684 m = TAILQ_NEXT(m, pageq.queue); 5700 m = TAILQ_NEXT(m, pageq.queue);
5685 } 5701 }
5686 5702
5687#ifdef DIAGNOSTIC 5703#ifdef DIAGNOSTIC
5688 if (m) 5704 if (m)
5689 panic("pmap_alloc_l1pt: pglist not empty"); 5705 panic("pmap_alloc_l1pt: pglist not empty");
5690#endif /* DIAGNOSTIC */ 5706#endif /* DIAGNOSTIC */
5691 5707
5692 pmap_init_l1(l1, pl1pt); 5708 pmap_init_l1(l1, pl1pt);
5693 } 5709 }
5694 5710
5695#ifdef DEBUG 5711#ifdef DEBUG
5696 printf("pmap_postinit: Allocated %d static L1 descriptor tables\n", 5712 printf("pmap_postinit: Allocated %d static L1 descriptor tables\n",
5697 needed); 5713 needed);
5698#endif 5714#endif
5699} 5715}
5700 5716
5701/* 5717/*
5702 * Note that the following routines are used by board-specific initialisation 5718 * Note that the following routines are used by board-specific initialisation
5703 * code to configure the initial kernel page tables. 5719 * code to configure the initial kernel page tables.
5704 * 5720 *
5705 * If ARM32_NEW_VM_LAYOUT is *not* defined, they operate on the assumption that 5721 * If ARM32_NEW_VM_LAYOUT is *not* defined, they operate on the assumption that
5706 * L2 page-table pages are 4KB in size and use 4 L1 slots. This mimics the 5722 * L2 page-table pages are 4KB in size and use 4 L1 slots. This mimics the
5707 * behaviour of the old pmap, and provides an easy migration path for 5723 * behaviour of the old pmap, and provides an easy migration path for
5708 * initial bring-up of the new pmap on existing ports. Fortunately, 5724 * initial bring-up of the new pmap on existing ports. Fortunately,
5709 * pmap_bootstrap() compensates for this hackery. This is only a stop-gap and 5725 * pmap_bootstrap() compensates for this hackery. This is only a stop-gap and
5710 * will be deprecated. 5726 * will be deprecated.
5711 * 5727 *
5712 * If ARM32_NEW_VM_LAYOUT *is* defined, these functions deal with 1KB L2 page 5728 * If ARM32_NEW_VM_LAYOUT *is* defined, these functions deal with 1KB L2 page
5713 * tables. 5729 * tables.
5714 */ 5730 */
5715 5731
5716/* 5732/*
5717 * This list exists for the benefit of pmap_map_chunk(). It keeps track 5733 * This list exists for the benefit of pmap_map_chunk(). It keeps track
5718 * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can 5734 * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
5719 * find them as necessary. 5735 * find them as necessary.
5720 * 5736 *
5721 * Note that the data on this list MUST remain valid after initarm() returns, 5737 * Note that the data on this list MUST remain valid after initarm() returns,
5722 * as pmap_bootstrap() uses it to contruct L2 table metadata. 5738 * as pmap_bootstrap() uses it to contruct L2 table metadata.
5723 */ 5739 */
5724SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list); 5740SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
5725 5741
5726static vaddr_t 5742static vaddr_t
5727kernel_pt_lookup(paddr_t pa) 5743kernel_pt_lookup(paddr_t pa)
5728{ 5744{
5729 pv_addr_t *pv; 5745 pv_addr_t *pv;
5730 5746
5731 SLIST_FOREACH(pv, &kernel_pt_list, pv_list) { 5747 SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
5732#ifndef ARM32_NEW_VM_LAYOUT 5748#ifndef ARM32_NEW_VM_LAYOUT
5733 if (pv->pv_pa == (pa & ~PGOFSET)) 5749 if (pv->pv_pa == (pa & ~PGOFSET))
5734 return (pv->pv_va | (pa & PGOFSET)); 5750 return (pv->pv_va | (pa & PGOFSET));
5735#else 5751#else
5736 if (pv->pv_pa == pa) 5752 if (pv->pv_pa == pa)
5737 return (pv->pv_va); 5753 return (pv->pv_va);
5738#endif 5754#endif
5739 } 5755 }
5740 return (0); 5756 return (0);
5741} 5757}
5742 5758
5743/* 5759/*
5744 * pmap_map_section: 5760 * pmap_map_section:
5745 * 5761 *
5746 * Create a single section mapping. 5762 * Create a single section mapping.
5747 */ 5763 */
5748void 5764void
5749pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache) 5765pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
5750{ 5766{
5751 pd_entry_t *pde = (pd_entry_t *) l1pt; 5767 pd_entry_t *pde = (pd_entry_t *) l1pt;
5752 pd_entry_t fl; 5768 pd_entry_t fl;
5753 5769
5754 KASSERT(((va | pa) & L1_S_OFFSET) == 0); 5770 KASSERT(((va | pa) & L1_S_OFFSET) == 0);
5755 5771
5756 switch (cache) { 5772 switch (cache) {
5757 case PTE_NOCACHE: 5773 case PTE_NOCACHE:
5758 default: 5774 default:
5759 fl = 0; 5775 fl = 0;
5760 break; 5776 break;
5761 5777
5762 case PTE_CACHE: 5778 case PTE_CACHE:
5763 fl = pte_l1_s_cache_mode; 5779 fl = pte_l1_s_cache_mode;
5764 break; 5780 break;
5765 5781
5766 case PTE_PAGETABLE: 5782 case PTE_PAGETABLE:
5767 fl = pte_l1_s_cache_mode_pt; 5783 fl = pte_l1_s_cache_mode_pt;
5768 break; 5784 break;
5769 } 5785 }
5770 5786
5771 pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa | 5787 pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
5772 L1_S_PROT(PTE_KERNEL, prot) | fl | L1_S_DOM(PMAP_DOMAIN_KERNEL); 5788 L1_S_PROT(PTE_KERNEL, prot) | fl | L1_S_DOM(PMAP_DOMAIN_KERNEL);
5773 PTE_SYNC(&pde[va >> L1_S_SHIFT]); 5789 PTE_SYNC(&pde[va >> L1_S_SHIFT]);
5774} 5790}
5775 5791
5776/* 5792/*
5777 * pmap_map_entry: 5793 * pmap_map_entry:
5778 * 5794 *
5779 * Create a single page mapping. 5795 * Create a single page mapping.
5780 */ 5796 */
5781void 5797void
5782pmap_map_entry(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache) 5798pmap_map_entry(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
5783{ 5799{
5784 pd_entry_t *pde = (pd_entry_t *) l1pt; 5800 pd_entry_t *pde = (pd_entry_t *) l1pt;
5785 pt_entry_t fl; 5801 pt_entry_t fl;
5786 pt_entry_t *pte; 5802 pt_entry_t *pte;
5787 5803
5788 KASSERT(((va | pa) & PGOFSET) == 0); 5804 KASSERT(((va | pa) & PGOFSET) == 0);
5789 5805
5790 switch (cache) { 5806 switch (cache) {
5791 case PTE_NOCACHE: 5807 case PTE_NOCACHE:
5792 default: 5808 default:
5793 fl = 0; 5809 fl = 0;
5794 break; 5810 break;
5795 5811
5796 case PTE_CACHE: 5812 case PTE_CACHE:
5797 fl = pte_l2_s_cache_mode; 5813 fl = pte_l2_s_cache_mode;
5798 break; 5814 break;
5799 5815
5800 case PTE_PAGETABLE: 5816 case PTE_PAGETABLE:
5801 fl = pte_l2_s_cache_mode_pt; 5817 fl = pte_l2_s_cache_mode_pt;
5802 break; 5818 break;
5803 } 5819 }
5804 5820
5805 if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C) 5821 if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
5806 panic("pmap_map_entry: no L2 table for VA 0x%08lx", va); 5822 panic("pmap_map_entry: no L2 table for VA 0x%08lx", va);
5807 5823
5808#ifndef ARM32_NEW_VM_LAYOUT 5824#ifndef ARM32_NEW_VM_LAYOUT
5809 pte = (pt_entry_t *) 5825 pte = (pt_entry_t *)
5810 kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME); 5826 kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
5811#else 5827#else
5812 pte = (pt_entry_t *) kernel_pt_lookup(pde[L1_IDX(va)] & L1_C_ADDR_MASK); 5828 pte = (pt_entry_t *) kernel_pt_lookup(pde[L1_IDX(va)] & L1_C_ADDR_MASK);
5813#endif 5829#endif
5814 if (pte == NULL) 5830 if (pte == NULL)
5815 panic("pmap_map_entry: can't find L2 table for VA 0x%08lx", va); 5831 panic("pmap_map_entry: can't find L2 table for VA 0x%08lx", va);
5816 5832
5817 fl |= L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot); 5833 fl |= L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot);
5818#ifndef ARM32_NEW_VM_LAYOUT 5834#ifndef ARM32_NEW_VM_LAYOUT
5819 pte += (va >> PGSHIFT) & 0x3ff; 5835 pte += (va >> PGSHIFT) & 0x3ff;
5820#else 5836#else
5821 pte += l2pte_index(va); 5837 pte += l2pte_index(va);
5822 L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | fl; 5838 L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | fl;
5823#endif 5839#endif
5824 *pte = fl; 5840 *pte = fl;
5825 PTE_SYNC(pte); 5841 PTE_SYNC(pte);
5826} 5842}
5827 5843
5828/* 5844/*
5829 * pmap_link_l2pt: 5845 * pmap_link_l2pt:
5830 * 5846 *
5831 * Link the L2 page table specified by "l2pv" into the L1 5847 * Link the L2 page table specified by "l2pv" into the L1
5832 * page table at the slot for "va". 5848 * page table at the slot for "va".
5833 */ 5849 */
5834void 5850void
5835pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv) 5851pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv)
5836{ 5852{
5837 pd_entry_t *pde = (pd_entry_t *) l1pt, proto; 5853 pd_entry_t *pde = (pd_entry_t *) l1pt, proto;
5838 u_int slot = va >> L1_S_SHIFT; 5854 u_int slot = va >> L1_S_SHIFT;
5839 5855
5840#ifndef ARM32_NEW_VM_LAYOUT 5856#ifndef ARM32_NEW_VM_LAYOUT
5841 KASSERT((va & ((L1_S_SIZE * 4) - 1)) == 0); 5857 KASSERT((va & ((L1_S_SIZE * 4) - 1)) == 0);
5842 KASSERT((l2pv->pv_pa & PGOFSET) == 0); 5858 KASSERT((l2pv->pv_pa & PGOFSET) == 0);
5843#endif 5859#endif
5844 5860
5845 proto = L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO; 5861 proto = L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO;
5846 5862
5847 pde[slot + 0] = proto | (l2pv->pv_pa + 0x000); 5863 pde[slot + 0] = proto | (l2pv->pv_pa + 0x000);
5848#ifdef ARM32_NEW_VM_LAYOUT 5864#ifdef ARM32_NEW_VM_LAYOUT
5849 PTE_SYNC(&pde[slot]); 5865 PTE_SYNC(&pde[slot]);
5850#else 5866#else
5851 pde[slot + 1] = proto | (l2pv->pv_pa + 0x400); 5867 pde[slot + 1] = proto | (l2pv->pv_pa + 0x400);
5852 pde[slot + 2] = proto | (l2pv->pv_pa + 0x800); 5868 pde[slot + 2] = proto | (l2pv->pv_pa + 0x800);
5853 pde[slot + 3] = proto | (l2pv->pv_pa + 0xc00); 5869 pde[slot + 3] = proto | (l2pv->pv_pa + 0xc00);
5854 PTE_SYNC_RANGE(&pde[slot + 0], 4); 5870 PTE_SYNC_RANGE(&pde[slot + 0], 4);
5855#endif 5871#endif
5856 5872
5857 SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list); 5873 SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
5858} 5874}
5859 5875
5860/* 5876/*
5861 * pmap_map_chunk: 5877 * pmap_map_chunk:
5862 * 5878 *
5863 * Map a chunk of memory using the most efficient mappings 5879 * Map a chunk of memory using the most efficient mappings
5864 * possible (section, large page, small page) into the 5880 * possible (section, large page, small page) into the
5865 * provided L1 and L2 tables at the specified virtual address. 5881 * provided L1 and L2 tables at the specified virtual address.
5866 */ 5882 */
5867vsize_t 5883vsize_t
5868pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size, 5884pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
5869 int prot, int cache) 5885 int prot, int cache)
5870{ 5886{
5871 pd_entry_t *pdep = (pd_entry_t *) l1pt; 5887 pd_entry_t *pdep = (pd_entry_t *) l1pt;
5872 pt_entry_t *pte, f1, f2s, f2l; 5888 pt_entry_t *pte, f1, f2s, f2l;
5873 vsize_t resid;  5889 vsize_t resid;
5874 int i; 5890 int i;
5875 5891
5876 resid = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 5892 resid = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5877 5893
5878 if (l1pt == 0) 5894 if (l1pt == 0)
5879 panic("pmap_map_chunk: no L1 table provided"); 5895 panic("pmap_map_chunk: no L1 table provided");
5880 5896
5881#ifdef VERBOSE_INIT_ARM  5897#ifdef VERBOSE_INIT_ARM
5882 printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx " 5898 printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx "
5883 "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache); 5899 "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
5884#endif 5900#endif
5885 5901
5886 switch (cache) { 5902 switch (cache) {
5887 case PTE_NOCACHE: 5903 case PTE_NOCACHE:
5888 default: 5904 default:
5889 f1 = 0; 5905 f1 = 0;
5890 f2l = 0; 5906 f2l = 0;
5891 f2s = 0; 5907 f2s = 0;
5892 break; 5908 break;
5893 5909
5894 case PTE_CACHE: 5910 case PTE_CACHE:
5895 f1 = pte_l1_s_cache_mode; 5911 f1 = pte_l1_s_cache_mode;
5896 f2l = pte_l2_l_cache_mode; 5912 f2l = pte_l2_l_cache_mode;
5897 f2s = pte_l2_s_cache_mode; 5913 f2s = pte_l2_s_cache_mode;
5898 break; 5914 break;
5899 5915
5900 case PTE_PAGETABLE: 5916 case PTE_PAGETABLE:
5901 f1 = pte_l1_s_cache_mode_pt; 5917 f1 = pte_l1_s_cache_mode_pt;
5902 f2l = pte_l2_l_cache_mode_pt; 5918 f2l = pte_l2_l_cache_mode_pt;
5903 f2s = pte_l2_s_cache_mode_pt; 5919 f2s = pte_l2_s_cache_mode_pt;
5904 break; 5920 break;
5905 } 5921 }
5906 5922
5907 size = resid; 5923 size = resid;
5908 5924
5909 while (resid > 0) { 5925 while (resid > 0) {
5910#if (ARM_MMU_V6 + ARM_MMU_V7) > 0 5926#if (ARM_MMU_V6 + ARM_MMU_V7) > 0
5911 /* See if we can use a supersection mapping. */ 5927 /* See if we can use a supersection mapping. */
5912 if (L1_SS_PROTO && L1_SS_MAPPABLE_P(va, pa, resid)) { 5928 if (L1_SS_PROTO && L1_SS_MAPPABLE_P(va, pa, resid)) {
5913 /* Supersection are always domain 0 */ 5929 /* Supersection are always domain 0 */
5914 pd_entry_t pde = L1_SS_PROTO | pa | 5930 pd_entry_t pde = L1_SS_PROTO | pa |
5915 L1_S_PROT(PTE_KERNEL, prot) | f1; 5931 L1_S_PROT(PTE_KERNEL, prot) | f1;
5916#ifdef VERBOSE_INIT_ARM 5932#ifdef VERBOSE_INIT_ARM
5917 printf("sS"); 5933 printf("sS");
5918#endif 5934#endif
5919 for (size_t s = va >> L1_S_SHIFT, 5935 for (size_t s = va >> L1_S_SHIFT,
5920 e = s + L1_SS_SIZE / L1_S_SIZE; 5936 e = s + L1_SS_SIZE / L1_S_SIZE;
5921 s < e; 5937 s < e;
5922 s++) { 5938 s++) {
5923 pdep[s] = pde; 5939 pdep[s] = pde;
5924 PTE_SYNC(&pdep[s]); 5940 PTE_SYNC(&pdep[s]);
5925 } 5941 }
5926 va += L1_SS_SIZE; 5942 va += L1_SS_SIZE;
5927 pa += L1_SS_SIZE; 5943 pa += L1_SS_SIZE;
5928 resid -= L1_SS_SIZE; 5944 resid -= L1_SS_SIZE;
5929 continue; 5945 continue;
5930 } 5946 }
5931#endif 5947#endif
5932 /* See if we can use a section mapping. */ 5948 /* See if we can use a section mapping. */
5933 if (L1_S_MAPPABLE_P(va, pa, resid)) { 5949 if (L1_S_MAPPABLE_P(va, pa, resid)) {
5934#ifdef VERBOSE_INIT_ARM 5950#ifdef VERBOSE_INIT_ARM
5935 printf("S"); 5951 printf("S");
5936#endif 5952#endif
5937 pdep[va >> L1_S_SHIFT] = L1_S_PROTO | pa | 5953 pdep[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
5938 L1_S_PROT(PTE_KERNEL, prot) | f1 | 5954 L1_S_PROT(PTE_KERNEL, prot) | f1 |
5939 L1_S_DOM(PMAP_DOMAIN_KERNEL); 5955 L1_S_DOM(PMAP_DOMAIN_KERNEL);
5940 PTE_SYNC(&pdep[va >> L1_S_SHIFT]); 5956 PTE_SYNC(&pdep[va >> L1_S_SHIFT]);
5941 va += L1_S_SIZE; 5957 va += L1_S_SIZE;
5942 pa += L1_S_SIZE; 5958 pa += L1_S_SIZE;
5943 resid -= L1_S_SIZE; 5959 resid -= L1_S_SIZE;
5944 continue; 5960 continue;
5945 } 5961 }
5946 5962
5947 /* 5963 /*
5948 * Ok, we're going to use an L2 table. Make sure 5964 * Ok, we're going to use an L2 table. Make sure
5949 * one is actually in the corresponding L1 slot 5965 * one is actually in the corresponding L1 slot
5950 * for the current VA. 5966 * for the current VA.
5951 */ 5967 */
5952 if ((pdep[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C) 5968 if ((pdep[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
5953 panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va); 5969 panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va);
5954 5970
5955#ifndef ARM32_NEW_VM_LAYOUT 5971#ifndef ARM32_NEW_VM_LAYOUT
5956 pte = (pt_entry_t *) 5972 pte = (pt_entry_t *)
5957 kernel_pt_lookup(pdep[va >> L1_S_SHIFT] & L2_S_FRAME); 5973 kernel_pt_lookup(pdep[va >> L1_S_SHIFT] & L2_S_FRAME);
5958#else 5974#else
5959 pte = (pt_entry_t *) kernel_pt_lookup( 5975 pte = (pt_entry_t *) kernel_pt_lookup(
5960 pdep[L1_IDX(va)] & L1_C_ADDR_MASK); 5976 pdep[L1_IDX(va)] & L1_C_ADDR_MASK);
5961#endif 5977#endif
5962 if (pte == NULL) 5978 if (pte == NULL)
5963 panic("pmap_map_chunk: can't find L2 table for VA" 5979 panic("pmap_map_chunk: can't find L2 table for VA"
5964 "0x%08lx", va); 5980 "0x%08lx", va);
5965 5981
5966 /* See if we can use a L2 large page mapping. */ 5982 /* See if we can use a L2 large page mapping. */
5967 if (L2_L_MAPPABLE_P(va, pa, resid)) { 5983 if (L2_L_MAPPABLE_P(va, pa, resid)) {
5968#ifdef VERBOSE_INIT_ARM 5984#ifdef VERBOSE_INIT_ARM
5969 printf("L"); 5985 printf("L");
5970#endif 5986#endif
5971 for (i = 0; i < 16; i++) { 5987 for (i = 0; i < 16; i++) {
5972#ifndef ARM32_NEW_VM_LAYOUT 5988#ifndef ARM32_NEW_VM_LAYOUT
5973 pte[((va >> PGSHIFT) & 0x3f0) + i] = 5989 pte[((va >> PGSHIFT) & 0x3f0) + i] =
5974 L2_L_PROTO | pa | 5990 L2_L_PROTO | pa |
5975 L2_L_PROT(PTE_KERNEL, prot) | f2l; 5991 L2_L_PROT(PTE_KERNEL, prot) | f2l;
5976 PTE_SYNC(&pte[((va >> PGSHIFT) & 0x3f0) + i]); 5992 PTE_SYNC(&pte[((va >> PGSHIFT) & 0x3f0) + i]);
5977#else 5993#else
5978 pte[l2pte_index(va) + i] = 5994 pte[l2pte_index(va) + i] =
5979 L2_L_PROTO | pa | 5995 L2_L_PROTO | pa |
5980 L2_L_PROT(PTE_KERNEL, prot) | f2l; 5996 L2_L_PROT(PTE_KERNEL, prot) | f2l;
5981 PTE_SYNC(&pte[l2pte_index(va) + i]); 5997 PTE_SYNC(&pte[l2pte_index(va) + i]);
5982#endif 5998#endif
5983 } 5999 }
5984 va += L2_L_SIZE; 6000 va += L2_L_SIZE;
5985 pa += L2_L_SIZE; 6001 pa += L2_L_SIZE;
5986 resid -= L2_L_SIZE; 6002 resid -= L2_L_SIZE;
5987 continue; 6003 continue;
5988 } 6004 }
5989 6005
5990 /* Use a small page mapping. */ 6006 /* Use a small page mapping. */
5991#ifdef VERBOSE_INIT_ARM 6007#ifdef VERBOSE_INIT_ARM
5992 printf("P"); 6008 printf("P");
5993#endif 6009#endif
5994#ifndef ARM32_NEW_VM_LAYOUT 6010#ifndef ARM32_NEW_VM_LAYOUT
5995 pte[(va >> PGSHIFT) & 0x3ff] = 6011 pte[(va >> PGSHIFT) & 0x3ff] =
5996 L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s; 6012 L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s;
5997 PTE_SYNC(&pte[(va >> PGSHIFT) & 0x3ff]); 6013 PTE_SYNC(&pte[(va >> PGSHIFT) & 0x3ff]);
5998#else 6014#else
5999 pte[l2pte_index(va)] = 6015 pte[l2pte_index(va)] =
6000 L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s; 6016 L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s;
6001 PTE_SYNC(&pte[l2pte_index(va)]); 6017 PTE_SYNC(&pte[l2pte_index(va)]);
6002#endif 6018#endif
6003 va += PAGE_SIZE; 6019 va += PAGE_SIZE;
6004 pa += PAGE_SIZE; 6020 pa += PAGE_SIZE;
6005 resid -= PAGE_SIZE; 6021 resid -= PAGE_SIZE;
6006 } 6022 }
6007#ifdef VERBOSE_INIT_ARM 6023#ifdef VERBOSE_INIT_ARM
6008 printf("\n"); 6024 printf("\n");
6009#endif 6025#endif
6010 return (size); 6026 return (size);
6011} 6027}
6012 6028
6013/********************** Static device map routines ***************************/ 6029/********************** Static device map routines ***************************/
6014 6030
6015static const struct pmap_devmap *pmap_devmap_table; 6031static const struct pmap_devmap *pmap_devmap_table;
6016 6032
6017/* 6033/*
6018 * Register the devmap table. This is provided in case early console 6034 * Register the devmap table. This is provided in case early console
6019 * initialization needs to register mappings created by bootstrap code 6035 * initialization needs to register mappings created by bootstrap code
6020 * before pmap_devmap_bootstrap() is called. 6036 * before pmap_devmap_bootstrap() is called.
6021 */ 6037 */
6022void 6038void
6023pmap_devmap_register(const struct pmap_devmap *table) 6039pmap_devmap_register(const struct pmap_devmap *table)
6024{ 6040{
6025 6041
6026 pmap_devmap_table = table; 6042 pmap_devmap_table = table;
6027} 6043}
6028 6044
6029/* 6045/*
6030 * Map all of the static regions in the devmap table, and remember 6046 * Map all of the static regions in the devmap table, and remember
6031 * the devmap table so other parts of the kernel can look up entries 6047 * the devmap table so other parts of the kernel can look up entries
6032 * later. 6048 * later.
6033 */ 6049 */
6034void 6050void
6035pmap_devmap_bootstrap(vaddr_t l1pt, const struct pmap_devmap *table) 6051pmap_devmap_bootstrap(vaddr_t l1pt, const struct pmap_devmap *table)
6036{ 6052{
6037 int i; 6053 int i;
6038 6054
6039 pmap_devmap_table = table; 6055 pmap_devmap_table = table;
6040 6056
6041 for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) { 6057 for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
6042#ifdef VERBOSE_INIT_ARM 6058#ifdef VERBOSE_INIT_ARM
6043 printf("devmap: %08lx -> %08lx @ %08lx\n", 6059 printf("devmap: %08lx -> %08lx @ %08lx\n",
6044 pmap_devmap_table[i].pd_pa, 6060 pmap_devmap_table[i].pd_pa,
6045 pmap_devmap_table[i].pd_pa + 6061 pmap_devmap_table[i].pd_pa +
6046 pmap_devmap_table[i].pd_size - 1, 6062 pmap_devmap_table[i].pd_size - 1,
6047 pmap_devmap_table[i].pd_va); 6063 pmap_devmap_table[i].pd_va);
6048#endif 6064#endif
6049 pmap_map_chunk(l1pt, pmap_devmap_table[i].pd_va, 6065 pmap_map_chunk(l1pt, pmap_devmap_table[i].pd_va,
6050 pmap_devmap_table[i].pd_pa, 6066 pmap_devmap_table[i].pd_pa,
6051 pmap_devmap_table[i].pd_size, 6067 pmap_devmap_table[i].pd_size,
6052 pmap_devmap_table[i].pd_prot, 6068 pmap_devmap_table[i].pd_prot,
6053 pmap_devmap_table[i].pd_cache); 6069 pmap_devmap_table[i].pd_cache);
6054 } 6070 }
6055} 6071}
6056 6072
6057const struct pmap_devmap * 6073const struct pmap_devmap *
6058pmap_devmap_find_pa(paddr_t pa, psize_t size) 6074pmap_devmap_find_pa(paddr_t pa, psize_t size)
6059{ 6075{
6060 uint64_t endpa; 6076 uint64_t endpa;
6061 int i; 6077 int i;
6062 6078
6063 if (pmap_devmap_table == NULL) 6079 if (pmap_devmap_table == NULL)
6064 return (NULL); 6080 return (NULL);
6065 6081
6066 endpa = (uint64_t)pa + (uint64_t)(size - 1); 6082 endpa = (uint64_t)pa + (uint64_t)(size - 1);
6067 6083
6068 for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) { 6084 for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
6069 if (pa >= pmap_devmap_table[i].pd_pa && 6085 if (pa >= pmap_devmap_table[i].pd_pa &&
6070 endpa <= (uint64_t)pmap_devmap_table[i].pd_pa + 6086 endpa <= (uint64_t)pmap_devmap_table[i].pd_pa +
6071 (uint64_t)(pmap_devmap_table[i].pd_size - 1)) 6087 (uint64_t)(pmap_devmap_table[i].pd_size - 1))
6072 return (&pmap_devmap_table[i]); 6088 return (&pmap_devmap_table[i]);
6073 } 6089 }
6074 6090
6075 return (NULL); 6091 return (NULL);
6076} 6092}
6077 6093
6078const struct pmap_devmap * 6094const struct pmap_devmap *
6079pmap_devmap_find_va(vaddr_t va, vsize_t size) 6095pmap_devmap_find_va(vaddr_t va, vsize_t size)
6080{ 6096{
6081 int i; 6097 int i;
6082 6098
6083 if (pmap_devmap_table == NULL) 6099 if (pmap_devmap_table == NULL)
6084 return (NULL); 6100 return (NULL);
6085 6101
6086 for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) { 6102 for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
6087 if (va >= pmap_devmap_table[i].pd_va && 6103 if (va >= pmap_devmap_table[i].pd_va &&
6088 va + size - 1 <= pmap_devmap_table[i].pd_va + 6104 va + size - 1 <= pmap_devmap_table[i].pd_va +
6089 pmap_devmap_table[i].pd_size - 1) 6105 pmap_devmap_table[i].pd_size - 1)
6090 return (&pmap_devmap_table[i]); 6106 return (&pmap_devmap_table[i]);
6091 } 6107 }
6092 6108
6093 return (NULL); 6109 return (NULL);
6094} 6110}
6095 6111
6096/********************** PTE initialization routines **************************/ 6112/********************** PTE initialization routines **************************/
6097 6113
6098/* 6114/*
6099 * These routines are called when the CPU type is identified to set up 6115 * These routines are called when the CPU type is identified to set up
6100 * the PTE prototypes, cache modes, etc. 6116 * the PTE prototypes, cache modes, etc.
6101 * 6117 *
6102 * The variables are always here, just in case modules need to reference 6118 * The variables are always here, just in case modules need to reference
6103 * them (though, they shouldn't). 6119 * them (though, they shouldn't).
6104 */ 6120 */
6105 6121
6106pt_entry_t pte_l1_s_cache_mode; 6122pt_entry_t pte_l1_s_cache_mode;
6107pt_entry_t pte_l1_s_wc_mode; 6123pt_entry_t pte_l1_s_wc_mode;
6108pt_entry_t pte_l1_s_cache_mode_pt; 6124pt_entry_t pte_l1_s_cache_mode_pt;
6109pt_entry_t pte_l1_s_cache_mask; 6125pt_entry_t pte_l1_s_cache_mask;
6110 6126
6111pt_entry_t pte_l2_l_cache_mode; 6127pt_entry_t pte_l2_l_cache_mode;
6112pt_entry_t pte_l2_l_wc_mode; 6128pt_entry_t pte_l2_l_wc_mode;
6113pt_entry_t pte_l2_l_cache_mode_pt; 6129pt_entry_t pte_l2_l_cache_mode_pt;
6114pt_entry_t pte_l2_l_cache_mask; 6130pt_entry_t pte_l2_l_cache_mask;
6115 6131
6116pt_entry_t pte_l2_s_cache_mode; 6132pt_entry_t pte_l2_s_cache_mode;
6117pt_entry_t pte_l2_s_wc_mode; 6133pt_entry_t pte_l2_s_wc_mode;
6118pt_entry_t pte_l2_s_cache_mode_pt; 6134pt_entry_t pte_l2_s_cache_mode_pt;
6119pt_entry_t pte_l2_s_cache_mask; 6135pt_entry_t pte_l2_s_cache_mask;
6120 6136
6121pt_entry_t pte_l1_s_prot_u; 6137pt_entry_t pte_l1_s_prot_u;
6122pt_entry_t pte_l1_s_prot_w; 6138pt_entry_t pte_l1_s_prot_w;
6123pt_entry_t pte_l1_s_prot_ro; 6139pt_entry_t pte_l1_s_prot_ro;
6124pt_entry_t pte_l1_s_prot_mask; 6140pt_entry_t pte_l1_s_prot_mask;
6125 6141
6126pt_entry_t pte_l2_s_prot_u; 6142pt_entry_t pte_l2_s_prot_u;
6127pt_entry_t pte_l2_s_prot_w; 6143pt_entry_t pte_l2_s_prot_w;
6128pt_entry_t pte_l2_s_prot_ro; 6144pt_entry_t pte_l2_s_prot_ro;
6129pt_entry_t pte_l2_s_prot_mask; 6145pt_entry_t pte_l2_s_prot_mask;
6130 6146
6131pt_entry_t pte_l2_l_prot_u; 6147pt_entry_t pte_l2_l_prot_u;
6132pt_entry_t pte_l2_l_prot_w; 6148pt_entry_t pte_l2_l_prot_w;
6133pt_entry_t pte_l2_l_prot_ro; 6149pt_entry_t pte_l2_l_prot_ro;
6134pt_entry_t pte_l2_l_prot_mask; 6150pt_entry_t pte_l2_l_prot_mask;
6135 6151
6136pt_entry_t pte_l1_ss_proto; 6152pt_entry_t pte_l1_ss_proto;
6137pt_entry_t pte_l1_s_proto; 6153pt_entry_t pte_l1_s_proto;
6138pt_entry_t pte_l1_c_proto; 6154pt_entry_t pte_l1_c_proto;
6139pt_entry_t pte_l2_s_proto; 6155pt_entry_t pte_l2_s_proto;
6140 6156
6141void (*pmap_copy_page_func)(paddr_t, paddr_t); 6157void (*pmap_copy_page_func)(paddr_t, paddr_t);
6142void (*pmap_zero_page_func)(paddr_t); 6158void (*pmap_zero_page_func)(paddr_t);
6143 6159
6144#if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0 6160#if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0
6145void 6161void
6146pmap_pte_init_generic(void) 6162pmap_pte_init_generic(void)
6147{ 6163{
6148 6164
6149 pte_l1_s_cache_mode = L1_S_B|L1_S_C; 6165 pte_l1_s_cache_mode = L1_S_B|L1_S_C;
6150 pte_l1_s_wc_mode = L1_S_B; 6166 pte_l1_s_wc_mode = L1_S_B;
6151 pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic; 6167 pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic;
6152 6168
6153 pte_l2_l_cache_mode = L2_B|L2_C; 6169 pte_l2_l_cache_mode = L2_B|L2_C;
6154 pte_l2_l_wc_mode = L2_B; 6170 pte_l2_l_wc_mode = L2_B;
6155 pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic; 6171 pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic;
6156 6172
6157 pte_l2_s_cache_mode = L2_B|L2_C; 6173 pte_l2_s_cache_mode = L2_B|L2_C;
6158 pte_l2_s_wc_mode = L2_B; 6174 pte_l2_s_wc_mode = L2_B;
6159 pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic; 6175 pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic;
6160 6176
6161 /* 6177 /*
6162 * If we have a write-through cache, set B and C. If 6178 * If we have a write-through cache, set B and C. If
6163 * we have a write-back cache, then we assume setting 6179 * we have a write-back cache, then we assume setting
6164 * only C will make those pages write-through (except for those 6180 * only C will make those pages write-through (except for those
6165 * Cortex CPUs which can read the L1 caches). 6181 * Cortex CPUs which can read the L1 caches).
6166 */ 6182 */
6167 if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop 6183 if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop
6168#if ARM_MMU_V7 > 0 6184#if ARM_MMU_V7 > 0
6169 || CPU_ID_CORTEX_P(curcpu()->ci_arm_cpuid) 6185 || CPU_ID_CORTEX_P(curcpu()->ci_arm_cpuid)
6170#endif 6186#endif
6171#if ARM_MMU_V6 > 0 6187#if ARM_MMU_V6 > 0
6172 || CPU_ID_ARM11_P(curcpu()->ci_arm_cpuid) /* arm116 errata 399234 */ 6188 || CPU_ID_ARM11_P(curcpu()->ci_arm_cpuid) /* arm116 errata 399234 */
6173#endif 6189#endif
6174 || false) { 6190 || false) {
6175 pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C; 6191 pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C;
6176 pte_l2_l_cache_mode_pt = L2_B|L2_C; 6192 pte_l2_l_cache_mode_pt = L2_B|L2_C;
6177 pte_l2_s_cache_mode_pt = L2_B|L2_C; 6193 pte_l2_s_cache_mode_pt = L2_B|L2_C;
6178 } else { 6194 } else {
6179 pte_l1_s_cache_mode_pt = L1_S_C; /* write through */ 6195 pte_l1_s_cache_mode_pt = L1_S_C; /* write through */
6180 pte_l2_l_cache_mode_pt = L2_C; /* write through */ 6196 pte_l2_l_cache_mode_pt = L2_C; /* write through */
6181 pte_l2_s_cache_mode_pt = L2_C; /* write through */ 6197 pte_l2_s_cache_mode_pt = L2_C; /* write through */
6182 } 6198 }
6183 6199
6184 pte_l1_s_prot_u = L1_S_PROT_U_generic; 6200 pte_l1_s_prot_u = L1_S_PROT_U_generic;
6185 pte_l1_s_prot_w = L1_S_PROT_W_generic; 6201 pte_l1_s_prot_w = L1_S_PROT_W_generic;
6186 pte_l1_s_prot_ro = L1_S_PROT_RO_generic; 6202 pte_l1_s_prot_ro = L1_S_PROT_RO_generic;
6187 pte_l1_s_prot_mask = L1_S_PROT_MASK_generic; 6203 pte_l1_s_prot_mask = L1_S_PROT_MASK_generic;
6188 6204
6189 pte_l2_s_prot_u = L2_S_PROT_U_generic; 6205 pte_l2_s_prot_u = L2_S_PROT_U_generic;
6190 pte_l2_s_prot_w = L2_S_PROT_W_generic; 6206 pte_l2_s_prot_w = L2_S_PROT_W_generic;
6191 pte_l2_s_prot_ro = L2_S_PROT_RO_generic; 6207 pte_l2_s_prot_ro = L2_S_PROT_RO_generic;
6192 pte_l2_s_prot_mask = L2_S_PROT_MASK_generic; 6208 pte_l2_s_prot_mask = L2_S_PROT_MASK_generic;
6193 6209
6194 pte_l2_l_prot_u = L2_L_PROT_U_generic; 6210 pte_l2_l_prot_u = L2_L_PROT_U_generic;
6195 pte_l2_l_prot_w = L2_L_PROT_W_generic; 6211 pte_l2_l_prot_w = L2_L_PROT_W_generic;
6196 pte_l2_l_prot_ro = L2_L_PROT_RO_generic; 6212 pte_l2_l_prot_ro = L2_L_PROT_RO_generic;
6197 pte_l2_l_prot_mask = L2_L_PROT_MASK_generic; 6213 pte_l2_l_prot_mask = L2_L_PROT_MASK_generic;
6198 6214
6199 pte_l1_ss_proto = L1_SS_PROTO_generic; 6215 pte_l1_ss_proto = L1_SS_PROTO_generic;
6200 pte_l1_s_proto = L1_S_PROTO_generic; 6216 pte_l1_s_proto = L1_S_PROTO_generic;
6201 pte_l1_c_proto = L1_C_PROTO_generic; 6217 pte_l1_c_proto = L1_C_PROTO_generic;
6202 pte_l2_s_proto = L2_S_PROTO_generic; 6218 pte_l2_s_proto = L2_S_PROTO_generic;
6203 6219
6204 pmap_copy_page_func = pmap_copy_page_generic; 6220 pmap_copy_page_func = pmap_copy_page_generic;
6205 pmap_zero_page_func = pmap_zero_page_generic; 6221 pmap_zero_page_func = pmap_zero_page_generic;
6206} 6222}
6207 6223
6208#if defined(CPU_ARM8) 6224#if defined(CPU_ARM8)
6209void 6225void
6210pmap_pte_init_arm8(void) 6226pmap_pte_init_arm8(void)
6211{ 6227{
6212 6228
6213 /* 6229 /*
6214 * ARM8 is compatible with generic, but we need to use 6230 * ARM8 is compatible with generic, but we need to use
6215 * the page tables uncached. 6231 * the page tables uncached.
6216 */ 6232 */
6217 pmap_pte_init_generic(); 6233 pmap_pte_init_generic();
6218 6234
6219 pte_l1_s_cache_mode_pt = 0; 6235 pte_l1_s_cache_mode_pt = 0;
6220 pte_l2_l_cache_mode_pt = 0; 6236 pte_l2_l_cache_mode_pt = 0;
6221 pte_l2_s_cache_mode_pt = 0; 6237 pte_l2_s_cache_mode_pt = 0;
6222} 6238}
6223#endif /* CPU_ARM8 */ 6239#endif /* CPU_ARM8 */
6224 6240
6225#if defined(CPU_ARM9) && defined(ARM9_CACHE_WRITE_THROUGH) 6241#if defined(CPU_ARM9) && defined(ARM9_CACHE_WRITE_THROUGH)
6226void 6242void
6227pmap_pte_init_arm9(void) 6243pmap_pte_init_arm9(void)
6228{ 6244{
6229 6245
6230 /* 6246 /*
6231 * ARM9 is compatible with generic, but we want to use 6247 * ARM9 is compatible with generic, but we want to use
6232 * write-through caching for now. 6248 * write-through caching for now.
6233 */ 6249 */
6234 pmap_pte_init_generic(); 6250 pmap_pte_init_generic();
6235 6251
6236 pte_l1_s_cache_mode = L1_S_C; 6252 pte_l1_s_cache_mode = L1_S_C;
6237 pte_l2_l_cache_mode = L2_C; 6253 pte_l2_l_cache_mode = L2_C;
6238 pte_l2_s_cache_mode = L2_C; 6254 pte_l2_s_cache_mode = L2_C;
6239 6255
6240 pte_l1_s_wc_mode = L1_S_B; 6256 pte_l1_s_wc_mode = L1_S_B;
6241 pte_l2_l_wc_mode = L2_B; 6257 pte_l2_l_wc_mode = L2_B;
6242 pte_l2_s_wc_mode = L2_B; 6258 pte_l2_s_wc_mode = L2_B;
6243 6259
6244 pte_l1_s_cache_mode_pt = L1_S_C; 6260 pte_l1_s_cache_mode_pt = L1_S_C;
6245 pte_l2_l_cache_mode_pt = L2_C; 6261 pte_l2_l_cache_mode_pt = L2_C;
6246 pte_l2_s_cache_mode_pt = L2_C; 6262 pte_l2_s_cache_mode_pt = L2_C;
6247} 6263}
6248#endif /* CPU_ARM9 && ARM9_CACHE_WRITE_THROUGH */ 6264#endif /* CPU_ARM9 && ARM9_CACHE_WRITE_THROUGH */
6249#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */ 6265#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */
6250 6266
6251#if defined(CPU_ARM10) 6267#if defined(CPU_ARM10)
6252void 6268void
6253pmap_pte_init_arm10(void) 6269pmap_pte_init_arm10(void)
6254{ 6270{
6255 6271
6256 /* 6272 /*
6257 * ARM10 is compatible with generic, but we want to use 6273 * ARM10 is compatible with generic, but we want to use
6258 * write-through caching for now. 6274 * write-through caching for now.
6259 */ 6275 */
6260 pmap_pte_init_generic(); 6276 pmap_pte_init_generic();
6261 6277
6262 pte_l1_s_cache_mode = L1_S_B | L1_S_C; 6278 pte_l1_s_cache_mode = L1_S_B | L1_S_C;
6263 pte_l2_l_cache_mode = L2_B | L2_C; 6279 pte_l2_l_cache_mode = L2_B | L2_C;
6264 pte_l2_s_cache_mode = L2_B | L2_C; 6280 pte_l2_s_cache_mode = L2_B | L2_C;
6265 6281
6266 pte_l1_s_cache_mode = L1_S_B; 6282 pte_l1_s_cache_mode = L1_S_B;
6267 pte_l2_l_cache_mode = L2_B; 6283 pte_l2_l_cache_mode = L2_B;
6268 pte_l2_s_cache_mode = L2_B; 6284 pte_l2_s_cache_mode = L2_B;
6269 6285
6270 pte_l1_s_cache_mode_pt = L1_S_C; 6286 pte_l1_s_cache_mode_pt = L1_S_C;
6271 pte_l2_l_cache_mode_pt = L2_C; 6287 pte_l2_l_cache_mode_pt = L2_C;
6272 pte_l2_s_cache_mode_pt = L2_C; 6288 pte_l2_s_cache_mode_pt = L2_C;
6273 6289
6274} 6290}
6275#endif /* CPU_ARM10 */ 6291#endif /* CPU_ARM10 */
6276 6292
6277#if defined(CPU_ARM11) && defined(ARM11_CACHE_WRITE_THROUGH) 6293#if defined(CPU_ARM11) && defined(ARM11_CACHE_WRITE_THROUGH)
6278void 6294void
6279pmap_pte_init_arm11(void) 6295pmap_pte_init_arm11(void)
6280{ 6296{
6281 6297
6282 /* 6298 /*
6283 * ARM11 is compatible with generic, but we want to use 6299 * ARM11 is compatible with generic, but we want to use
6284 * write-through caching for now. 6300 * write-through caching for now.
6285 */ 6301 */
6286 pmap_pte_init_generic(); 6302 pmap_pte_init_generic();
6287 6303
6288 pte_l1_s_cache_mode = L1_S_C; 6304 pte_l1_s_cache_mode = L1_S_C;
6289 pte_l2_l_cache_mode = L2_C; 6305 pte_l2_l_cache_mode = L2_C;
6290 pte_l2_s_cache_mode = L2_C; 6306 pte_l2_s_cache_mode = L2_C;
6291 6307
6292 pte_l1_s_wc_mode = L1_S_B; 6308 pte_l1_s_wc_mode = L1_S_B;
6293 pte_l2_l_wc_mode = L2_B; 6309 pte_l2_l_wc_mode = L2_B;
6294 pte_l2_s_wc_mode = L2_B; 6310 pte_l2_s_wc_mode = L2_B;
6295 6311
6296 pte_l1_s_cache_mode_pt = L1_S_C; 6312 pte_l1_s_cache_mode_pt = L1_S_C;
6297 pte_l2_l_cache_mode_pt = L2_C; 6313 pte_l2_l_cache_mode_pt = L2_C;
6298 pte_l2_s_cache_mode_pt = L2_C; 6314 pte_l2_s_cache_mode_pt = L2_C;
6299} 6315}
6300#endif /* CPU_ARM11 && ARM11_CACHE_WRITE_THROUGH */ 6316#endif /* CPU_ARM11 && ARM11_CACHE_WRITE_THROUGH */
6301 6317
6302#if ARM_MMU_SA1 == 1 6318#if ARM_MMU_SA1 == 1
6303void 6319void
6304pmap_pte_init_sa1(void) 6320pmap_pte_init_sa1(void)
6305{ 6321{
6306 6322
6307 /* 6323 /*
6308 * The StrongARM SA-1 cache does not have a write-through 6324 * The StrongARM SA-1 cache does not have a write-through
6309 * mode. So, do the generic initialization, then reset 6325 * mode. So, do the generic initialization, then reset
6310 * the page table cache mode to B=1,C=1, and note that 6326 * the page table cache mode to B=1,C=1, and note that
6311 * the PTEs need to be sync'd. 6327 * the PTEs need to be sync'd.
6312 */ 6328 */
6313 pmap_pte_init_generic(); 6329 pmap_pte_init_generic();
6314 6330
6315 pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C; 6331 pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C;
6316 pte_l2_l_cache_mode_pt = L2_B|L2_C; 6332 pte_l2_l_cache_mode_pt = L2_B|L2_C;
6317 pte_l2_s_cache_mode_pt = L2_B|L2_C; 6333 pte_l2_s_cache_mode_pt = L2_B|L2_C;
6318 6334
6319 pmap_needs_pte_sync = 1; 6335 pmap_needs_pte_sync = 1;
6320} 6336}
6321#endif /* ARM_MMU_SA1 == 1*/ 6337#endif /* ARM_MMU_SA1 == 1*/
6322 6338
6323#if ARM_MMU_XSCALE == 1 6339#if ARM_MMU_XSCALE == 1
6324#if (ARM_NMMUS > 1) 6340#if (ARM_NMMUS > 1)
6325static u_int xscale_use_minidata; 6341static u_int xscale_use_minidata;
6326#endif 6342#endif
6327 6343
6328void 6344void
6329pmap_pte_init_xscale(void) 6345pmap_pte_init_xscale(void)
6330{ 6346{
6331 uint32_t auxctl; 6347 uint32_t auxctl;
6332 int write_through = 0; 6348 int write_through = 0;
6333 6349
6334 pte_l1_s_cache_mode = L1_S_B|L1_S_C; 6350 pte_l1_s_cache_mode = L1_S_B|L1_S_C;
6335 pte_l1_s_wc_mode = L1_S_B; 6351 pte_l1_s_wc_mode = L1_S_B;
6336 pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale; 6352 pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale;
6337 6353
6338 pte_l2_l_cache_mode = L2_B|L2_C; 6354 pte_l2_l_cache_mode = L2_B|L2_C;
6339 pte_l2_l_wc_mode = L2_B; 6355 pte_l2_l_wc_mode = L2_B;
6340 pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale; 6356 pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale;
6341 6357
6342 pte_l2_s_cache_mode = L2_B|L2_C; 6358 pte_l2_s_cache_mode = L2_B|L2_C;
6343 pte_l2_s_wc_mode = L2_B; 6359 pte_l2_s_wc_mode = L2_B;
6344 pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale; 6360 pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale;
6345 6361
6346 pte_l1_s_cache_mode_pt = L1_S_C; 6362 pte_l1_s_cache_mode_pt = L1_S_C;
6347 pte_l2_l_cache_mode_pt = L2_C; 6363 pte_l2_l_cache_mode_pt = L2_C;
6348 pte_l2_s_cache_mode_pt = L2_C; 6364 pte_l2_s_cache_mode_pt = L2_C;
6349 6365
6350#ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE 6366#ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE
6351 /* 6367 /*
6352 * The XScale core has an enhanced mode where writes that 6368 * The XScale core has an enhanced mode where writes that
6353 * miss the cache cause a cache line to be allocated. This 6369 * miss the cache cause a cache line to be allocated. This
6354 * is significantly faster than the traditional, write-through 6370 * is significantly faster than the traditional, write-through
6355 * behavior of this case. 6371 * behavior of this case.
6356 */ 6372 */
6357 pte_l1_s_cache_mode |= L1_S_XS_TEX(TEX_XSCALE_X); 6373 pte_l1_s_cache_mode |= L1_S_XS_TEX(TEX_XSCALE_X);
6358 pte_l2_l_cache_mode |= L2_XS_L_TEX(TEX_XSCALE_X); 6374 pte_l2_l_cache_mode |= L2_XS_L_TEX(TEX_XSCALE_X);
6359 pte_l2_s_cache_mode |= L2_XS_T_TEX(TEX_XSCALE_X); 6375 pte_l2_s_cache_mode |= L2_XS_T_TEX(TEX_XSCALE_X);
6360#endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */ 6376#endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */
6361 6377
6362#ifdef XSCALE_CACHE_WRITE_THROUGH 6378#ifdef XSCALE_CACHE_WRITE_THROUGH
6363 /* 6379 /*
6364 * Some versions of the XScale core have various bugs in 6380 * Some versions of the XScale core have various bugs in
6365 * their cache units, the work-around for which is to run 6381 * their cache units, the work-around for which is to run
6366 * the cache in write-through mode. Unfortunately, this 6382 * the cache in write-through mode. Unfortunately, this
6367 * has a major (negative) impact on performance. So, we 6383 * has a major (negative) impact on performance. So, we
6368 * go ahead and run fast-and-loose, in the hopes that we 6384 * go ahead and run fast-and-loose, in the hopes that we
6369 * don't line up the planets in a way that will trip the 6385 * don't line up the planets in a way that will trip the
6370 * bugs. 6386 * bugs.
6371 * 6387 *
6372 * However, we give you the option to be slow-but-correct. 6388 * However, we give you the option to be slow-but-correct.
6373 */ 6389 */
6374 write_through = 1; 6390 write_through = 1;
6375#elif defined(XSCALE_CACHE_WRITE_BACK) 6391#elif defined(XSCALE_CACHE_WRITE_BACK)
6376 /* force write back cache mode */ 6392 /* force write back cache mode */
6377 write_through = 0; 6393 write_through = 0;
6378#elif defined(CPU_XSCALE_PXA250) || defined(CPU_XSCALE_PXA270) 6394#elif defined(CPU_XSCALE_PXA250) || defined(CPU_XSCALE_PXA270)
6379 /* 6395 /*
6380 * Intel PXA2[15]0 processors are known to have a bug in 6396 * Intel PXA2[15]0 processors are known to have a bug in
6381 * write-back cache on revision 4 and earlier (stepping 6397 * write-back cache on revision 4 and earlier (stepping
6382 * A[01] and B[012]). Fixed for C0 and later. 6398 * A[01] and B[012]). Fixed for C0 and later.
6383 */ 6399 */
6384 { 6400 {
6385 uint32_t id, type; 6401 uint32_t id, type;
6386 6402
6387 id = cpufunc_id(); 6403 id = cpufunc_id();
6388 type = id & ~(CPU_ID_XSCALE_COREREV_MASK|CPU_ID_REVISION_MASK); 6404 type = id & ~(CPU_ID_XSCALE_COREREV_MASK|CPU_ID_REVISION_MASK);
6389 6405
6390 if (type == CPU_ID_PXA250 || type == CPU_ID_PXA210) { 6406 if (type == CPU_ID_PXA250 || type == CPU_ID_PXA210) {
6391 if ((id & CPU_ID_REVISION_MASK) < 5) { 6407 if ((id & CPU_ID_REVISION_MASK) < 5) {
6392 /* write through for stepping A0-1 and B0-2 */ 6408 /* write through for stepping A0-1 and B0-2 */
6393 write_through = 1; 6409 write_through = 1;
6394 } 6410 }
6395 } 6411 }
6396 } 6412 }
6397#endif /* XSCALE_CACHE_WRITE_THROUGH */ 6413#endif /* XSCALE_CACHE_WRITE_THROUGH */
6398 6414
6399 if (write_through) { 6415 if (write_through) {
6400 pte_l1_s_cache_mode = L1_S_C; 6416 pte_l1_s_cache_mode = L1_S_C;
6401 pte_l2_l_cache_mode = L2_C; 6417 pte_l2_l_cache_mode = L2_C;
6402 pte_l2_s_cache_mode = L2_C; 6418 pte_l2_s_cache_mode = L2_C;
6403 } 6419 }
6404 6420
6405#if (ARM_NMMUS > 1) 6421#if (ARM_NMMUS > 1)
6406 xscale_use_minidata = 1; 6422 xscale_use_minidata = 1;
6407#endif 6423#endif
6408 6424
6409 pte_l1_s_prot_u = L1_S_PROT_U_xscale; 6425 pte_l1_s_prot_u = L1_S_PROT_U_xscale;
6410 pte_l1_s_prot_w = L1_S_PROT_W_xscale; 6426 pte_l1_s_prot_w = L1_S_PROT_W_xscale;
6411 pte_l1_s_prot_ro = L1_S_PROT_RO_xscale; 6427 pte_l1_s_prot_ro = L1_S_PROT_RO_xscale;
6412 pte_l1_s_prot_mask = L1_S_PROT_MASK_xscale; 6428 pte_l1_s_prot_mask = L1_S_PROT_MASK_xscale;
6413 6429
6414 pte_l2_s_prot_u = L2_S_PROT_U_xscale; 6430 pte_l2_s_prot_u = L2_S_PROT_U_xscale;
6415 pte_l2_s_prot_w = L2_S_PROT_W_xscale; 6431 pte_l2_s_prot_w = L2_S_PROT_W_xscale;
6416 pte_l2_s_prot_ro = L2_S_PROT_RO_xscale; 6432 pte_l2_s_prot_ro = L2_S_PROT_RO_xscale;
6417 pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale; 6433 pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale;
6418 6434
6419 pte_l2_l_prot_u = L2_L_PROT_U_xscale; 6435 pte_l2_l_prot_u = L2_L_PROT_U_xscale;
6420 pte_l2_l_prot_w = L2_L_PROT_W_xscale; 6436 pte_l2_l_prot_w = L2_L_PROT_W_xscale;
6421 pte_l2_l_prot_ro = L2_L_PROT_RO_xscale; 6437 pte_l2_l_prot_ro = L2_L_PROT_RO_xscale;
6422 pte_l2_l_prot_mask = L2_L_PROT_MASK_xscale; 6438 pte_l2_l_prot_mask = L2_L_PROT_MASK_xscale;
6423 6439
6424 pte_l1_ss_proto = L1_SS_PROTO_xscale; 6440 pte_l1_ss_proto = L1_SS_PROTO_xscale;
6425 pte_l1_s_proto = L1_S_PROTO_xscale; 6441 pte_l1_s_proto = L1_S_PROTO_xscale;
6426 pte_l1_c_proto = L1_C_PROTO_xscale; 6442 pte_l1_c_proto = L1_C_PROTO_xscale;
6427 pte_l2_s_proto = L2_S_PROTO_xscale; 6443 pte_l2_s_proto = L2_S_PROTO_xscale;
6428 6444
6429 pmap_copy_page_func = pmap_copy_page_xscale; 6445 pmap_copy_page_func = pmap_copy_page_xscale;
6430 pmap_zero_page_func = pmap_zero_page_xscale; 6446 pmap_zero_page_func = pmap_zero_page_xscale;
6431 6447
6432 /* 6448 /*
6433 * Disable ECC protection of page table access, for now. 6449 * Disable ECC protection of page table access, for now.
6434 */ 6450 */
6435 __asm volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl)); 6451 __asm volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl));
6436 auxctl &= ~XSCALE_AUXCTL_P; 6452 auxctl &= ~XSCALE_AUXCTL_P;
6437 __asm volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl)); 6453 __asm volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl));
6438} 6454}
6439 6455
6440/* 6456/*
6441 * xscale_setup_minidata: 6457 * xscale_setup_minidata:
6442 * 6458 *
6443 * Set up the mini-data cache clean area. We require the 6459 * Set up the mini-data cache clean area. We require the
6444 * caller to allocate the right amount of physically and 6460 * caller to allocate the right amount of physically and
6445 * virtually contiguous space. 6461 * virtually contiguous space.
6446 */ 6462 */
6447void 6463void
6448xscale_setup_minidata(vaddr_t l1pt, vaddr_t va, paddr_t pa) 6464xscale_setup_minidata(vaddr_t l1pt, vaddr_t va, paddr_t pa)

cvs diff -r1.118 -r1.119 src/sys/arch/arm/conf/files.arm (switch to unified diff)

--- src/sys/arch/arm/conf/files.arm 2013/06/12 07:12:10 1.118
+++ src/sys/arch/arm/conf/files.arm 2013/06/12 21:34:12 1.119
@@ -1,203 +1,204 @@ @@ -1,203 +1,204 @@
1# $NetBSD: files.arm,v 1.118 2013/06/12 07:12:10 matt Exp $ 1# $NetBSD: files.arm,v 1.119 2013/06/12 21:34:12 matt Exp $
2 2
3# temporary define to allow easy moving to ../arch/arm/arm32 3# temporary define to allow easy moving to ../arch/arm/arm32
4defflag ARM32 4defflag ARM32
5 5
6# CPU types. Make sure to update <arm/cpuconf.h> if you change this list. 6# CPU types. Make sure to update <arm/cpuconf.h> if you change this list.
7defflag opt_cputypes.h CPU_ARM2 CPU_ARM250 CPU_ARM3 7defflag opt_cputypes.h CPU_ARM2 CPU_ARM250 CPU_ARM3
8defflag opt_cputypes.h CPU_ARM6 CPU_ARM7 CPU_ARM7TDMI CPU_ARM8 8defflag opt_cputypes.h CPU_ARM6 CPU_ARM7 CPU_ARM7TDMI CPU_ARM8
9 CPU_ARM9 CPU_ARM9E CPU_ARM10 CPU_ARM11 CPU_ARMV7 9 CPU_ARM9 CPU_ARM9E CPU_ARM10 CPU_ARM11 CPU_ARMV7
10 CPU_SA110 CPU_SA1100 CPU_SA1110 CPU_IXP12X0 10 CPU_SA110 CPU_SA1100 CPU_SA1110 CPU_IXP12X0
11 CPU_FA526 CPU_XSCALE_80200 CPU_XSCALE_80321 11 CPU_FA526 CPU_XSCALE_80200 CPU_XSCALE_80321
12 CPU_XSCALE_PXA250 CPU_XSCALE_PXA270 12 CPU_XSCALE_PXA250 CPU_XSCALE_PXA270
13 CPU_XSCALE_IXP425  13 CPU_XSCALE_IXP425
14 CPU_SHEEVA 14 CPU_SHEEVA
15defflag opt_cputypes.h CPU_ARM1136: CPU_ARM11 15defflag opt_cputypes.h CPU_ARM1136: CPU_ARM11
16defflag opt_cputypes.h CPU_ARM1176: CPU_ARM11 16defflag opt_cputypes.h CPU_ARM1176: CPU_ARM11
17defflag opt_cputypes.h CPU_ARM11MPCORE: CPU_ARM11 17defflag opt_cputypes.h CPU_ARM11MPCORE: CPU_ARM11
18defflag opt_cputypes.h CPU_PJ4B: CPU_ARMV7 18defflag opt_cputypes.h CPU_PJ4B: CPU_ARMV7
19defflag opt_cputypes.h CPU_CORTEX: CPU_ARMV7 19defflag opt_cputypes.h CPU_CORTEX: CPU_ARMV7
20defflag opt_cputypes.h CPU_CORTEXA5: CPU_CORTEX 20defflag opt_cputypes.h CPU_CORTEXA5: CPU_CORTEX
21defflag opt_cputypes.h CPU_CORTEXA7: CPU_CORTEX 21defflag opt_cputypes.h CPU_CORTEXA7: CPU_CORTEX
22defflag opt_cputypes.h CPU_CORTEXA8: CPU_CORTEX 22defflag opt_cputypes.h CPU_CORTEXA8: CPU_CORTEX
23defflag opt_cputypes.h CPU_CORTEXA9: CPU_CORTEX 23defflag opt_cputypes.h CPU_CORTEXA9: CPU_CORTEX
24defflag opt_cputypes.h CPU_CORTEXA15: CPU_CORTEX 24defflag opt_cputypes.h CPU_CORTEXA15: CPU_CORTEX
25defflag opt_cputypes.h FPU_VFP 25defflag opt_cputypes.h FPU_VFP
26 26
27defparam opt_cpuoptions.h XSCALE_CCLKCFG 27defparam opt_cpuoptions.h XSCALE_CCLKCFG
28defflag opt_cpuoptions.h XSCALE_CACHE_WRITE_THROUGH 28defflag opt_cpuoptions.h XSCALE_CACHE_WRITE_THROUGH
29defflag opt_cpuoptions.h XSCALE_CACHE_WRITE_BACK 29defflag opt_cpuoptions.h XSCALE_CACHE_WRITE_BACK
30defflag opt_cpuoptions.h XSCALE_NO_COALESCE_WRITES 30defflag opt_cpuoptions.h XSCALE_NO_COALESCE_WRITES
31defflag opt_cpuoptions.h XSCALE_CACHE_READ_WRITE_ALLOCATE 31defflag opt_cpuoptions.h XSCALE_CACHE_READ_WRITE_ALLOCATE
32defflag opt_cpuoptions.h ARM32_DISABLE_ALIGNMENT_FAULTS 32defflag opt_cpuoptions.h ARM32_DISABLE_ALIGNMENT_FAULTS
33defflag opt_cpuoptions.h ARM9_CACHE_WRITE_THROUGH 33defflag opt_cpuoptions.h ARM9_CACHE_WRITE_THROUGH
34defflag opt_cpuoptions.h TPIDRPRW_IS_CURLWP 34defflag opt_cpuoptions.h TPIDRPRW_IS_CURLWP
35defflag opt_cpuoptions.h TPIDRPRW_IS_CURCPU 35defflag opt_cpuoptions.h TPIDRPRW_IS_CURCPU
36defflag opt_cpuoptions.h ARM11_PMC CORTEX_PMC 36defflag opt_cpuoptions.h ARM11_PMC CORTEX_PMC
37defflag opt_cpuoptions.h ARM11_CACHE_WRITE_THROUGH 37defflag opt_cpuoptions.h ARM11_CACHE_WRITE_THROUGH
38defflag opt_cpuoptions.h ARM11MPCORE_COMPAT_MMU 38defflag opt_cpuoptions.h ARM11MPCORE_COMPAT_MMU
 39defflag opt_cpuoptions.h ARM_USE_VBAR
39# use extended small page in compatible MMU mode for ARMv6 40# use extended small page in compatible MMU mode for ARMv6
40defflag opt_cpuoptions.h ARMV6_EXTENDED_SMALL_PAGE 41defflag opt_cpuoptions.h ARMV6_EXTENDED_SMALL_PAGE
41 42
42# Interrupt implementation header definition. 43# Interrupt implementation header definition.
43defparam opt_arm_intr_impl.h ARM_INTR_IMPL 44defparam opt_arm_intr_impl.h ARM_INTR_IMPL
44 45
45# ARM-specific debug options 46# ARM-specific debug options
46defflag opt_arm_debug.h ARM_LOCK_CAS_DEBUG 47defflag opt_arm_debug.h ARM_LOCK_CAS_DEBUG
47 48
48# Board-specific bus_space(9)/bus_dma(9) definitions 49# Board-specific bus_space(9)/bus_dma(9) definitions
49defflag opt_arm_bus_space.h __BUS_SPACE_HAS_STREAM_METHODS 50defflag opt_arm_bus_space.h __BUS_SPACE_HAS_STREAM_METHODS
50 _ARM32_NEED_BUS_DMA_BOUNCE 51 _ARM32_NEED_BUS_DMA_BOUNCE
51 BUSDMA_COUNTERS 52 BUSDMA_COUNTERS
52 53
53# Floating point emulator 54# Floating point emulator
54obsolete defflag ARMFPE 55obsolete defflag ARMFPE
55 56
56# VFP support 57# VFP support
57file arch/arm/vfp/vfp_init.c arm32 58file arch/arm/vfp/vfp_init.c arm32
58#file arch/arm/vfp/pmap_vfp.S arm32 & fpu_vfp 59#file arch/arm/vfp/pmap_vfp.S arm32 & fpu_vfp
59 60
60# PMAP_DEBUG (heavily abused option) 61# PMAP_DEBUG (heavily abused option)
61defflag PMAP_DEBUG 62defflag PMAP_DEBUG
62 63
63# New PMAP options 64# New PMAP options
64defflag opt_arm32_pmap.h ARM32_NEW_VM_LAYOUT PMAPCOUNTERS 65defflag opt_arm32_pmap.h ARM32_NEW_VM_LAYOUT PMAPCOUNTERS
65 PMAP_STEAL_MEMORY PMAP_NEED_ALLOC_POOLPAGE 66 PMAP_STEAL_MEMORY PMAP_NEED_ALLOC_POOLPAGE
66 67
67# MI console support 68# MI console support
68file dev/cons.c 69file dev/cons.c
69 70
70# DDB 71# DDB
71file arch/arm/arm/db_disasm.c ddb 72file arch/arm/arm/db_disasm.c ddb
72file arch/arm/arm32/db_interface.c (ddb|kgdb) & arm32 73file arch/arm/arm32/db_interface.c (ddb|kgdb) & arm32
73file arch/arm/arm/db_trace.c ddb 74file arch/arm/arm/db_trace.c ddb
74file arch/arm/arm32/db_machdep.c ddb & arm32 75file arch/arm/arm32/db_machdep.c ddb & arm32
75file arch/arm/arm32/kgdb_machdep.c kgdb & arm32 76file arch/arm/arm32/kgdb_machdep.c kgdb & arm32
76 77
77# FIQ support 78# FIQ support
78file arch/arm/arm/fiq.c 79file arch/arm/arm/fiq.c
79file arch/arm/arm/fiq_subr.S 80file arch/arm/arm/fiq_subr.S
80 81
81# mainbus files 82# mainbus files
82device mainbus { [base = -1], [size = 0], [dack = -1], [irq = -1], [intrbase = -1], [core = -1] } 83device mainbus { [base = -1], [size = 0], [dack = -1], [irq = -1], [intrbase = -1], [core = -1] }
83attach mainbus at root 84attach mainbus at root
84file arch/arm/mainbus/mainbus.c mainbus & arm32 85file arch/arm/mainbus/mainbus.c mainbus & arm32
85file arch/arm/mainbus/mainbus_io.c mainbus & arm32 86file arch/arm/mainbus/mainbus_io.c mainbus & arm32
86file arch/arm/mainbus/mainbus_io_asm.S mainbus & arm32 87file arch/arm/mainbus/mainbus_io_asm.S mainbus & arm32
87 88
88device cpu { } 89device cpu { }
89attach cpu at mainbus with cpu_mainbus 90attach cpu at mainbus with cpu_mainbus
90file arch/arm/mainbus/cpu_mainbus.c cpu_mainbus & arm32 91file arch/arm/mainbus/cpu_mainbus.c cpu_mainbus & arm32
91 92
92# files related to debugging 93# files related to debugging
93file arch/arm/arm/disassem.c 94file arch/arm/arm/disassem.c
94 95
95# bus_space(9) 96# bus_space(9)
96define bus_space_generic 97define bus_space_generic
97file arch/arm/arm/bus_space_asm_generic.S bus_space_generic 98file arch/arm/arm/bus_space_asm_generic.S bus_space_generic
98file arch/arm/arm/bus_space_notimpl.S arm32 99file arch/arm/arm/bus_space_notimpl.S arm32
99 100
100file arch/arm/arm/arm_machdep.c 101file arch/arm/arm/arm_machdep.c
101file arch/arm/arm/ast.c 102file arch/arm/arm/ast.c
102file arch/arm/arm/bcopyinout.S 103file arch/arm/arm/bcopyinout.S
103file arch/arm/arm/blockio.S 104file arch/arm/arm/blockio.S
104file arch/arm/arm/bootconfig.c 105file arch/arm/arm/bootconfig.c
105file arch/arm/arm/compat_13_machdep.c compat_13 106file arch/arm/arm/compat_13_machdep.c compat_13
106file arch/arm/arm/compat_16_machdep.c compat_16 107file arch/arm/arm/compat_16_machdep.c compat_16
107file arch/arm/arm/copystr.S 108file arch/arm/arm/copystr.S
108file arch/arm/arm/core_machdep.c 109file arch/arm/arm/core_machdep.c
109file arch/arm/arm/cpu_in_cksum.S (inet | inet6) & cpu_in_cksum 110file arch/arm/arm/cpu_in_cksum.S (inet | inet6) & cpu_in_cksum
110file arch/arm/arm/cpufunc.c 111file arch/arm/arm/cpufunc.c
111file arch/arm/arm/cpufunc_asm.S 112file arch/arm/arm/cpufunc_asm.S
112file arch/arm/arm/cpufunc_asm_arm3.S cpu_arm2 | cpu_arm250 | cpu_arm3 113file arch/arm/arm/cpufunc_asm_arm3.S cpu_arm2 | cpu_arm250 | cpu_arm3
113file arch/arm/arm/cpufunc_asm_arm67.S cpu_arm6 | cpu_arm7 114file arch/arm/arm/cpufunc_asm_arm67.S cpu_arm6 | cpu_arm7
114file arch/arm/arm/cpufunc_asm_arm7tdmi.S cpu_arm7tdmi 115file arch/arm/arm/cpufunc_asm_arm7tdmi.S cpu_arm7tdmi
115file arch/arm/arm/cpufunc_asm_arm8.S cpu_arm8 116file arch/arm/arm/cpufunc_asm_arm8.S cpu_arm8
116file arch/arm/arm/cpufunc_asm_arm9.S cpu_arm9 117file arch/arm/arm/cpufunc_asm_arm9.S cpu_arm9
117file arch/arm/arm/cpufunc_asm_arm10.S cpu_arm9e | cpu_arm10 | 118file arch/arm/arm/cpufunc_asm_arm10.S cpu_arm9e | cpu_arm10 |
118 cpu_sheeva 119 cpu_sheeva
119file arch/arm/arm/cpufunc_asm_arm11.S cpu_arm11 | cpu_cortex 120file arch/arm/arm/cpufunc_asm_arm11.S cpu_arm11 | cpu_cortex
120file arch/arm/arm/cpufunc_asm_arm1136.S cpu_arm1136 121file arch/arm/arm/cpufunc_asm_arm1136.S cpu_arm1136
121file arch/arm/arm/cpufunc_asm_arm11x6.S cpu_arm1136 | cpu_arm1176 122file arch/arm/arm/cpufunc_asm_arm11x6.S cpu_arm1136 | cpu_arm1176
122file arch/arm/arm/cpufunc_asm_armv4.S cpu_arm9 | cpu_arm9e | 123file arch/arm/arm/cpufunc_asm_armv4.S cpu_arm9 | cpu_arm9e |
123 cpu_arm10 | 124 cpu_arm10 |
124 cpu_fa526 | 125 cpu_fa526 |
125 cpu_sa110 | 126 cpu_sa110 |
126 cpu_sa1100 | 127 cpu_sa1100 |
127 cpu_sa1110 | 128 cpu_sa1110 |
128 cpu_ixp12x0 | 129 cpu_ixp12x0 |
129 cpu_xscale_80200 | 130 cpu_xscale_80200 |
130 cpu_xscale_80321 | 131 cpu_xscale_80321 |
131 cpu_xscale_ixp425 | 132 cpu_xscale_ixp425 |
132 cpu_xscale_pxa250 | 133 cpu_xscale_pxa250 |
133 cpu_xscale_pxa270 | 134 cpu_xscale_pxa270 |
134 cpu_cortex | 135 cpu_cortex |
135 cpu_sheeva 136 cpu_sheeva
136file arch/arm/arm/cpufunc_asm_armv5.S cpu_arm10 | cpu_arm11mpcore 137file arch/arm/arm/cpufunc_asm_armv5.S cpu_arm10 | cpu_arm11mpcore
137file arch/arm/arm/cpufunc_asm_armv5_ec.S cpu_arm9e | cpu_arm10 | 138file arch/arm/arm/cpufunc_asm_armv5_ec.S cpu_arm9e | cpu_arm10 |
138 cpu_sheeva 139 cpu_sheeva
139file arch/arm/arm/cpufunc_asm_armv6.S cpu_arm11 | cpu_cortex 140file arch/arm/arm/cpufunc_asm_armv6.S cpu_arm11 | cpu_cortex
140file arch/arm/arm/cpufunc_asm_armv7.S cpu_cortex | cpu_pj4b 141file arch/arm/arm/cpufunc_asm_armv7.S cpu_cortex | cpu_pj4b
141file arch/arm/arm/cpufunc_asm_pj4b.S cpu_pj4b 142file arch/arm/arm/cpufunc_asm_pj4b.S cpu_pj4b
142file arch/arm/arm/cpufunc_asm_sa1.S cpu_sa110 | cpu_sa1100 | 143file arch/arm/arm/cpufunc_asm_sa1.S cpu_sa110 | cpu_sa1100 |
143 cpu_sa1110 | 144 cpu_sa1110 |
144 cpu_ixp12x0 145 cpu_ixp12x0
145file arch/arm/arm/cpufunc_asm_sa11x0.S cpu_sa1100 | cpu_sa1110 146file arch/arm/arm/cpufunc_asm_sa11x0.S cpu_sa1100 | cpu_sa1110
146file arch/arm/arm/cpufunc_asm_fa526.S cpu_fa526 147file arch/arm/arm/cpufunc_asm_fa526.S cpu_fa526
147file arch/arm/arm/cpufunc_asm_xscale.S cpu_xscale_80200 | 148file arch/arm/arm/cpufunc_asm_xscale.S cpu_xscale_80200 |
148 cpu_xscale_80321 | 149 cpu_xscale_80321 |
149 cpu_xscale_ixp425 | 150 cpu_xscale_ixp425 |
150 cpu_xscale_pxa250 | 151 cpu_xscale_pxa250 |
151 cpu_xscale_pxa270 | 152 cpu_xscale_pxa270 |
152 cpu_cortex 153 cpu_cortex
153file arch/arm/arm/cpufunc_asm_ixp12x0.S cpu_ixp12x0 154file arch/arm/arm/cpufunc_asm_ixp12x0.S cpu_ixp12x0
154file arch/arm/arm/cpufunc_asm_sheeva.S cpu_sheeva 155file arch/arm/arm/cpufunc_asm_sheeva.S cpu_sheeva
155file arch/arm/arm/cpu_exec.c 156file arch/arm/arm/cpu_exec.c
156file arch/arm/arm/fusu.S 157file arch/arm/arm/fusu.S
157file arch/arm/arm/idle_machdep.c 158file arch/arm/arm/idle_machdep.c
158file arch/arm/arm/lock_cas.S 159file arch/arm/arm/lock_cas.S
159file arch/arm/arm/process_machdep.c 160file arch/arm/arm/process_machdep.c
160file arch/arm/arm/procfs_machdep.c procfs 161file arch/arm/arm/procfs_machdep.c procfs
161file arch/arm/arm/sig_machdep.c 162file arch/arm/arm/sig_machdep.c
162file arch/arm/arm/sigcode.S 163file arch/arm/arm/sigcode.S
163file arch/arm/arm/syscall.c 164file arch/arm/arm/syscall.c
164file arch/arm/arm/undefined.c 165file arch/arm/arm/undefined.c
165# vectors.S gets included manually by Makefile.acorn26, since it needs 166# vectors.S gets included manually by Makefile.acorn26, since it needs
166# to be at the start of the text segment on those machines. 167# to be at the start of the text segment on those machines.
167file arch/arm/arm/vectors.S arm32 168file arch/arm/arm/vectors.S arm32
168 169
169# files common to arm32 implementations 170# files common to arm32 implementations
170file arch/arm/arm32/arm32_machdep.c arm32 171file arch/arm/arm32/arm32_machdep.c arm32
171file arch/arm/arm32/bus_dma.c arm32 172file arch/arm/arm32/bus_dma.c arm32
172file arch/arm/arm32/cpu.c arm32 & cpu 173file arch/arm/arm32/cpu.c arm32 & cpu
173file arch/arm/arm32/cpuswitch.S arm32 174file arch/arm/arm32/cpuswitch.S arm32
174file arch/arm/arm32/exception.S arm32 175file arch/arm/arm32/exception.S arm32
175file arch/arm/arm32/fault.c arm32 176file arch/arm/arm32/fault.c arm32
176file arch/arm/arm32/kobj_machdep.c arm32 & modular 177file arch/arm/arm32/kobj_machdep.c arm32 & modular
177file arch/arm/arm32/pmap.c arm32 178file arch/arm/arm32/pmap.c arm32
178file arch/arm/arm32/setcpsr.S arm32 179file arch/arm/arm32/setcpsr.S arm32
179file arch/arm/arm32/setstack.S arm32 180file arch/arm/arm32/setstack.S arm32
180file arch/arm/arm32/stubs.c arm32 181file arch/arm/arm32/stubs.c arm32
181file arch/arm/arm32/sys_machdep.c arm32 182file arch/arm/arm32/sys_machdep.c arm32
182file arch/arm/arm32/vm_machdep.c arm32 183file arch/arm/arm32/vm_machdep.c arm32
183file arch/arm/arm32/atomic.S arm32 184file arch/arm/arm32/atomic.S arm32
184 185
185# files less common to arm32 implementations... 186# files less common to arm32 implementations...
186file kern/kern_cctr.c arm11 187file kern/kern_cctr.c arm11
187file arch/arm/arm32/arm11_pmc.c arm11_pmc 188file arch/arm/arm32/arm11_pmc.c arm11_pmc
188file arch/arm/arm32/cortex_pmc.c cortex_pmc 189file arch/arm/arm32/cortex_pmc.c cortex_pmc
189 190
190# arm32 library functions 191# arm32 library functions
191file arch/arm/arm32/bcopy_page.S arm32 192file arch/arm/arm32/bcopy_page.S arm32
192 193
193# 194#
194include "compat/netbsd32/files.netbsd32" 195include "compat/netbsd32/files.netbsd32"
195file arch/arm/arm32/netbsd32_machdep.c arm32 & compat_netbsd32 196file arch/arm/arm32/netbsd32_machdep.c arm32 & compat_netbsd32
196 197
197# Linux binary compatibility (COMPAT_LINUX) 198# Linux binary compatibility (COMPAT_LINUX)
198include "compat/ossaudio/files.ossaudio" 199include "compat/ossaudio/files.ossaudio"
199include "compat/linux/files.linux" 200include "compat/linux/files.linux"
200include "compat/linux/arch/arm/files.linux_arm" 201include "compat/linux/arch/arm/files.linux_arm"
201file arch/arm/arm/linux_sigcode.S compat_linux 202file arch/arm/arm/linux_sigcode.S compat_linux
202file arch/arm/arm/linux_syscall.c compat_linux 203file arch/arm/arm/linux_syscall.c compat_linux
203file arch/arm/arm/linux_trap.c compat_linux 204file arch/arm/arm/linux_trap.c compat_linux

cvs diff -r1.119 -r1.120 src/sys/arch/arm/include/arm32/pmap.h (switch to unified diff)

--- src/sys/arch/arm/include/arm32/pmap.h 2012/12/12 15:09:37 1.119
+++ src/sys/arch/arm/include/arm32/pmap.h 2013/06/12 21:34:12 1.120
@@ -1,952 +1,954 @@ @@ -1,952 +1,954 @@
1/* $NetBSD: pmap.h,v 1.119 2012/12/12 15:09:37 matt Exp $ */ 1/* $NetBSD: pmap.h,v 1.120 2013/06/12 21:34:12 matt Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2002, 2003 Wasabi Systems, Inc. 4 * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the 15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution. 16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software 17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement: 18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by 19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc. 20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior 22 * or promote products derived from this software without specific prior
23 * written permission. 23 * written permission.
24 * 24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE. 35 * POSSIBILITY OF SUCH DAMAGE.
36 */ 36 */
37 37
38/* 38/*
39 * Copyright (c) 1994,1995 Mark Brinicombe. 39 * Copyright (c) 1994,1995 Mark Brinicombe.
40 * All rights reserved. 40 * All rights reserved.
41 * 41 *
42 * Redistribution and use in source and binary forms, with or without 42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions 43 * modification, are permitted provided that the following conditions
44 * are met: 44 * are met:
45 * 1. Redistributions of source code must retain the above copyright 45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer. 46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright 47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the 48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution. 49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software 50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement: 51 * must display the following acknowledgement:
52 * This product includes software developed by Mark Brinicombe 52 * This product includes software developed by Mark Brinicombe
53 * 4. The name of the author may not be used to endorse or promote products 53 * 4. The name of the author may not be used to endorse or promote products
54 * derived from this software without specific prior written permission. 54 * derived from this software without specific prior written permission.
55 * 55 *
56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
58 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
59 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 59 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
60 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 60 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
61 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 61 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
62 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 62 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
63 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 63 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
64 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 64 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
65 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 */ 66 */
67 67
68#ifndef _ARM32_PMAP_H_ 68#ifndef _ARM32_PMAP_H_
69#define _ARM32_PMAP_H_ 69#define _ARM32_PMAP_H_
70 70
71#ifdef _KERNEL 71#ifdef _KERNEL
72 72
73#include <arm/cpuconf.h> 73#include <arm/cpuconf.h>
74#include <arm/arm32/pte.h> 74#include <arm/arm32/pte.h>
75#ifndef _LOCORE 75#ifndef _LOCORE
76#if defined(_KERNEL_OPT) 76#if defined(_KERNEL_OPT)
77#include "opt_arm32_pmap.h" 77#include "opt_arm32_pmap.h"
78#endif 78#endif
79#include <arm/cpufunc.h> 79#include <arm/cpufunc.h>
80#include <uvm/uvm_object.h> 80#include <uvm/uvm_object.h>
81#endif 81#endif
82 82
83/* 83/*
84 * a pmap describes a processes' 4GB virtual address space. this 84 * a pmap describes a processes' 4GB virtual address space. this
85 * virtual address space can be broken up into 4096 1MB regions which 85 * virtual address space can be broken up into 4096 1MB regions which
86 * are described by L1 PTEs in the L1 table. 86 * are described by L1 PTEs in the L1 table.
87 * 87 *
88 * There is a line drawn at KERNEL_BASE. Everything below that line 88 * There is a line drawn at KERNEL_BASE. Everything below that line
89 * changes when the VM context is switched. Everything above that line 89 * changes when the VM context is switched. Everything above that line
90 * is the same no matter which VM context is running. This is achieved 90 * is the same no matter which VM context is running. This is achieved
91 * by making the L1 PTEs for those slots above KERNEL_BASE reference 91 * by making the L1 PTEs for those slots above KERNEL_BASE reference
92 * kernel L2 tables. 92 * kernel L2 tables.
93 * 93 *
94 * The basic layout of the virtual address space thus looks like this: 94 * The basic layout of the virtual address space thus looks like this:
95 * 95 *
96 * 0xffffffff 96 * 0xffffffff
97 * . 97 * .
98 * . 98 * .
99 * . 99 * .
100 * KERNEL_BASE 100 * KERNEL_BASE
101 * -------------------- 101 * --------------------
102 * . 102 * .
103 * . 103 * .
104 * . 104 * .
105 * 0x00000000 105 * 0x00000000
106 */ 106 */
107 107
108/* 108/*
109 * The number of L2 descriptor tables which can be tracked by an l2_dtable. 109 * The number of L2 descriptor tables which can be tracked by an l2_dtable.
110 * A bucket size of 16 provides for 16MB of contiguous virtual address 110 * A bucket size of 16 provides for 16MB of contiguous virtual address
111 * space per l2_dtable. Most processes will, therefore, require only two or 111 * space per l2_dtable. Most processes will, therefore, require only two or
112 * three of these to map their whole working set. 112 * three of these to map their whole working set.
113 */ 113 */
114#define L2_BUCKET_LOG2 4 114#define L2_BUCKET_LOG2 4
115#define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2) 115#define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2)
116 116
117/* 117/*
118 * Given the above "L2-descriptors-per-l2_dtable" constant, the number 118 * Given the above "L2-descriptors-per-l2_dtable" constant, the number
119 * of l2_dtable structures required to track all possible page descriptors 119 * of l2_dtable structures required to track all possible page descriptors
120 * mappable by an L1 translation table is given by the following constants: 120 * mappable by an L1 translation table is given by the following constants:
121 */ 121 */
122#define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2) 122#define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
123#define L2_SIZE (1 << L2_LOG2) 123#define L2_SIZE (1 << L2_LOG2)
124 124
125/* 125/*
126 * tell MI code that the cache is virtually-indexed. 126 * tell MI code that the cache is virtually-indexed.
127 * ARMv6 is physically-tagged but all others are virtually-tagged. 127 * ARMv6 is physically-tagged but all others are virtually-tagged.
128 */ 128 */
129#if (ARM_MMU_V6 + ARM_MMU_V7) > 0 129#if (ARM_MMU_V6 + ARM_MMU_V7) > 0
130#define PMAP_CACHE_VIPT 130#define PMAP_CACHE_VIPT
131#else 131#else
132#define PMAP_CACHE_VIVT 132#define PMAP_CACHE_VIVT
133#endif 133#endif
134 134
135#ifndef _LOCORE 135#ifndef _LOCORE
136 136
137struct l1_ttable; 137struct l1_ttable;
138struct l2_dtable; 138struct l2_dtable;
139 139
140/* 140/*
141 * Track cache/tlb occupancy using the following structure 141 * Track cache/tlb occupancy using the following structure
142 */ 142 */
143union pmap_cache_state { 143union pmap_cache_state {
144 struct { 144 struct {
145 union { 145 union {
146 uint8_t csu_cache_b[2]; 146 uint8_t csu_cache_b[2];
147 uint16_t csu_cache; 147 uint16_t csu_cache;
148 } cs_cache_u; 148 } cs_cache_u;
149 149
150 union { 150 union {
151 uint8_t csu_tlb_b[2]; 151 uint8_t csu_tlb_b[2];
152 uint16_t csu_tlb; 152 uint16_t csu_tlb;
153 } cs_tlb_u; 153 } cs_tlb_u;
154 } cs_s; 154 } cs_s;
155 uint32_t cs_all; 155 uint32_t cs_all;
156}; 156};
157#define cs_cache_id cs_s.cs_cache_u.csu_cache_b[0] 157#define cs_cache_id cs_s.cs_cache_u.csu_cache_b[0]
158#define cs_cache_d cs_s.cs_cache_u.csu_cache_b[1] 158#define cs_cache_d cs_s.cs_cache_u.csu_cache_b[1]
159#define cs_cache cs_s.cs_cache_u.csu_cache 159#define cs_cache cs_s.cs_cache_u.csu_cache
160#define cs_tlb_id cs_s.cs_tlb_u.csu_tlb_b[0] 160#define cs_tlb_id cs_s.cs_tlb_u.csu_tlb_b[0]
161#define cs_tlb_d cs_s.cs_tlb_u.csu_tlb_b[1] 161#define cs_tlb_d cs_s.cs_tlb_u.csu_tlb_b[1]
162#define cs_tlb cs_s.cs_tlb_u.csu_tlb 162#define cs_tlb cs_s.cs_tlb_u.csu_tlb
163 163
164/* 164/*
165 * Assigned to cs_all to force cacheops to work for a particular pmap 165 * Assigned to cs_all to force cacheops to work for a particular pmap
166 */ 166 */
167#define PMAP_CACHE_STATE_ALL 0xffffffffu 167#define PMAP_CACHE_STATE_ALL 0xffffffffu
168 168
169/* 169/*
170 * This structure is used by machine-dependent code to describe 170 * This structure is used by machine-dependent code to describe
171 * static mappings of devices, created at bootstrap time. 171 * static mappings of devices, created at bootstrap time.
172 */ 172 */
173struct pmap_devmap { 173struct pmap_devmap {
174 vaddr_t pd_va; /* virtual address */ 174 vaddr_t pd_va; /* virtual address */
175 paddr_t pd_pa; /* physical address */ 175 paddr_t pd_pa; /* physical address */
176 psize_t pd_size; /* size of region */ 176 psize_t pd_size; /* size of region */
177 vm_prot_t pd_prot; /* protection code */ 177 vm_prot_t pd_prot; /* protection code */
178 int pd_cache; /* cache attributes */ 178 int pd_cache; /* cache attributes */
179}; 179};
180 180
181/* 181/*
182 * The pmap structure itself 182 * The pmap structure itself
183 */ 183 */
184struct pmap { 184struct pmap {
185 uint8_t pm_domain; 185 uint8_t pm_domain;
186 bool pm_remove_all; 186 bool pm_remove_all;
187 bool pm_activated; 187 bool pm_activated;
188 struct l1_ttable *pm_l1; 188 struct l1_ttable *pm_l1;
 189#ifndef ARM_HAS_VBAR
189 pd_entry_t *pm_pl1vec; 190 pd_entry_t *pm_pl1vec;
 191#endif
190 pd_entry_t pm_l1vec; 192 pd_entry_t pm_l1vec;
191 union pmap_cache_state pm_cstate; 193 union pmap_cache_state pm_cstate;
192 struct uvm_object pm_obj; 194 struct uvm_object pm_obj;
193 kmutex_t pm_obj_lock; 195 kmutex_t pm_obj_lock;
194#define pm_lock pm_obj.vmobjlock 196#define pm_lock pm_obj.vmobjlock
195 struct l2_dtable *pm_l2[L2_SIZE]; 197 struct l2_dtable *pm_l2[L2_SIZE];
196 struct pmap_statistics pm_stats; 198 struct pmap_statistics pm_stats;
197 LIST_ENTRY(pmap) pm_list; 199 LIST_ENTRY(pmap) pm_list;
198}; 200};
199 201
200/* 202/*
201 * Physical / virtual address structure. In a number of places (particularly 203 * Physical / virtual address structure. In a number of places (particularly
202 * during bootstrapping) we need to keep track of the physical and virtual 204 * during bootstrapping) we need to keep track of the physical and virtual
203 * addresses of various pages 205 * addresses of various pages
204 */ 206 */
205typedef struct pv_addr { 207typedef struct pv_addr {
206 SLIST_ENTRY(pv_addr) pv_list; 208 SLIST_ENTRY(pv_addr) pv_list;
207 paddr_t pv_pa; 209 paddr_t pv_pa;
208 vaddr_t pv_va; 210 vaddr_t pv_va;
209 vsize_t pv_size; 211 vsize_t pv_size;
210 uint8_t pv_cache; 212 uint8_t pv_cache;
211 uint8_t pv_prot; 213 uint8_t pv_prot;
212} pv_addr_t; 214} pv_addr_t;
213typedef SLIST_HEAD(, pv_addr) pv_addrqh_t; 215typedef SLIST_HEAD(, pv_addr) pv_addrqh_t;
214 216
215extern pv_addrqh_t pmap_freeq; 217extern pv_addrqh_t pmap_freeq;
216extern pv_addr_t kernelstack; 218extern pv_addr_t kernelstack;
217extern pv_addr_t abtstack; 219extern pv_addr_t abtstack;
218extern pv_addr_t fiqstack; 220extern pv_addr_t fiqstack;
219extern pv_addr_t irqstack; 221extern pv_addr_t irqstack;
220extern pv_addr_t undstack; 222extern pv_addr_t undstack;
221extern pv_addr_t idlestack; 223extern pv_addr_t idlestack;
222extern pv_addr_t systempage; 224extern pv_addr_t systempage;
223extern pv_addr_t kernel_l1pt; 225extern pv_addr_t kernel_l1pt;
224 226
225/* 227/*
226 * Determine various modes for PTEs (user vs. kernel, cacheable 228 * Determine various modes for PTEs (user vs. kernel, cacheable
227 * vs. non-cacheable). 229 * vs. non-cacheable).
228 */ 230 */
229#define PTE_KERNEL 0 231#define PTE_KERNEL 0
230#define PTE_USER 1 232#define PTE_USER 1
231#define PTE_NOCACHE 0 233#define PTE_NOCACHE 0
232#define PTE_CACHE 1 234#define PTE_CACHE 1
233#define PTE_PAGETABLE 2 235#define PTE_PAGETABLE 2
234 236
235/* 237/*
236 * Flags that indicate attributes of pages or mappings of pages. 238 * Flags that indicate attributes of pages or mappings of pages.
237 * 239 *
238 * The PVF_MOD and PVF_REF flags are stored in the mdpage for each 240 * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
239 * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual 241 * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
240 * pv_entry's for each page. They live in the same "namespace" so 242 * pv_entry's for each page. They live in the same "namespace" so
241 * that we can clear multiple attributes at a time. 243 * that we can clear multiple attributes at a time.
242 * 244 *
243 * Note the "non-cacheable" flag generally means the page has 245 * Note the "non-cacheable" flag generally means the page has
244 * multiple mappings in a given address space. 246 * multiple mappings in a given address space.
245 */ 247 */
246#define PVF_MOD 0x01 /* page is modified */ 248#define PVF_MOD 0x01 /* page is modified */
247#define PVF_REF 0x02 /* page is referenced */ 249#define PVF_REF 0x02 /* page is referenced */
248#define PVF_WIRED 0x04 /* mapping is wired */ 250#define PVF_WIRED 0x04 /* mapping is wired */
249#define PVF_WRITE 0x08 /* mapping is writable */ 251#define PVF_WRITE 0x08 /* mapping is writable */
250#define PVF_EXEC 0x10 /* mapping is executable */ 252#define PVF_EXEC 0x10 /* mapping is executable */
251#ifdef PMAP_CACHE_VIVT 253#ifdef PMAP_CACHE_VIVT
252#define PVF_UNC 0x20 /* mapping is 'user' non-cacheable */ 254#define PVF_UNC 0x20 /* mapping is 'user' non-cacheable */
253#define PVF_KNC 0x40 /* mapping is 'kernel' non-cacheable */ 255#define PVF_KNC 0x40 /* mapping is 'kernel' non-cacheable */
254#define PVF_NC (PVF_UNC|PVF_KNC) 256#define PVF_NC (PVF_UNC|PVF_KNC)
255#endif 257#endif
256#ifdef PMAP_CACHE_VIPT 258#ifdef PMAP_CACHE_VIPT
257#define PVF_NC 0x20 /* mapping is 'kernel' non-cacheable */ 259#define PVF_NC 0x20 /* mapping is 'kernel' non-cacheable */
258#define PVF_MULTCLR 0x40 /* mapping is multi-colored */ 260#define PVF_MULTCLR 0x40 /* mapping is multi-colored */
259#endif 261#endif
260#define PVF_COLORED 0x80 /* page has or had a color */ 262#define PVF_COLORED 0x80 /* page has or had a color */
261#define PVF_KENTRY 0x0100 /* page entered via pmap_kenter_pa */ 263#define PVF_KENTRY 0x0100 /* page entered via pmap_kenter_pa */
262#define PVF_KMPAGE 0x0200 /* page is used for kmem */ 264#define PVF_KMPAGE 0x0200 /* page is used for kmem */
263#define PVF_DIRTY 0x0400 /* page may have dirty cache lines */ 265#define PVF_DIRTY 0x0400 /* page may have dirty cache lines */
264#define PVF_KMOD 0x0800 /* unmanaged page is modified */ 266#define PVF_KMOD 0x0800 /* unmanaged page is modified */
265#define PVF_KWRITE (PVF_KENTRY|PVF_WRITE) 267#define PVF_KWRITE (PVF_KENTRY|PVF_WRITE)
266#define PVF_DMOD (PVF_MOD|PVF_KMOD|PVF_KMPAGE) 268#define PVF_DMOD (PVF_MOD|PVF_KMOD|PVF_KMPAGE)
267 269
268/* 270/*
269 * Commonly referenced structures 271 * Commonly referenced structures
270 */ 272 */
271extern int pmap_debug_level; /* Only exists if PMAP_DEBUG */ 273extern int pmap_debug_level; /* Only exists if PMAP_DEBUG */
272extern int arm_poolpage_vmfreelist; 274extern int arm_poolpage_vmfreelist;
273 275
274/* 276/*
275 * Macros that we need to export 277 * Macros that we need to export
276 */ 278 */
277#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) 279#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
278#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) 280#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
279 281
280#define pmap_is_modified(pg) \ 282#define pmap_is_modified(pg) \
281 (((pg)->mdpage.pvh_attrs & PVF_MOD) != 0) 283 (((pg)->mdpage.pvh_attrs & PVF_MOD) != 0)
282#define pmap_is_referenced(pg) \ 284#define pmap_is_referenced(pg) \
283 (((pg)->mdpage.pvh_attrs & PVF_REF) != 0) 285 (((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
284#define pmap_is_page_colored_p(md) \ 286#define pmap_is_page_colored_p(md) \
285 (((md)->pvh_attrs & PVF_COLORED) != 0) 287 (((md)->pvh_attrs & PVF_COLORED) != 0)
286 288
287#define pmap_copy(dp, sp, da, l, sa) /* nothing */ 289#define pmap_copy(dp, sp, da, l, sa) /* nothing */
288 290
289#define pmap_phys_address(ppn) (arm_ptob((ppn))) 291#define pmap_phys_address(ppn) (arm_ptob((ppn)))
290u_int arm32_mmap_flags(paddr_t); 292u_int arm32_mmap_flags(paddr_t);
291#define ARM32_MMAP_WRITECOMBINE 0x40000000 293#define ARM32_MMAP_WRITECOMBINE 0x40000000
292#define ARM32_MMAP_CACHEABLE 0x20000000 294#define ARM32_MMAP_CACHEABLE 0x20000000
293#define pmap_mmap_flags(ppn) arm32_mmap_flags(ppn) 295#define pmap_mmap_flags(ppn) arm32_mmap_flags(ppn)
294 296
295/* 297/*
296 * Functions that we need to export 298 * Functions that we need to export
297 */ 299 */
298void pmap_procwr(struct proc *, vaddr_t, int); 300void pmap_procwr(struct proc *, vaddr_t, int);
299void pmap_remove_all(pmap_t); 301void pmap_remove_all(pmap_t);
300bool pmap_extract(pmap_t, vaddr_t, paddr_t *); 302bool pmap_extract(pmap_t, vaddr_t, paddr_t *);
301 303
302#define PMAP_NEED_PROCWR 304#define PMAP_NEED_PROCWR
303#define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */ 305#define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
304#define PMAP_ENABLE_PMAP_KMPAGE /* enable the PMAP_KMPAGE flag */ 306#define PMAP_ENABLE_PMAP_KMPAGE /* enable the PMAP_KMPAGE flag */
305 307
306#if (ARM_MMU_V6 + ARM_MMU_V7) > 0 308#if (ARM_MMU_V6 + ARM_MMU_V7) > 0
307#define PMAP_PREFER(hint, vap, sz, td) pmap_prefer((hint), (vap), (td)) 309#define PMAP_PREFER(hint, vap, sz, td) pmap_prefer((hint), (vap), (td))
308void pmap_prefer(vaddr_t, vaddr_t *, int); 310void pmap_prefer(vaddr_t, vaddr_t *, int);
309#endif 311#endif
310 312
311void pmap_icache_sync_range(pmap_t, vaddr_t, vaddr_t); 313void pmap_icache_sync_range(pmap_t, vaddr_t, vaddr_t);
312 314
313/* Functions we use internally. */ 315/* Functions we use internally. */
314#ifdef PMAP_STEAL_MEMORY 316#ifdef PMAP_STEAL_MEMORY
315void pmap_boot_pagealloc(psize_t, psize_t, psize_t, pv_addr_t *); 317void pmap_boot_pagealloc(psize_t, psize_t, psize_t, pv_addr_t *);
316void pmap_boot_pageadd(pv_addr_t *); 318void pmap_boot_pageadd(pv_addr_t *);
317vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *); 319vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
318#endif 320#endif
319void pmap_bootstrap(vaddr_t, vaddr_t); 321void pmap_bootstrap(vaddr_t, vaddr_t);
320 322
321void pmap_do_remove(pmap_t, vaddr_t, vaddr_t, int); 323void pmap_do_remove(pmap_t, vaddr_t, vaddr_t, int);
322int pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int); 324int pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int);
323bool pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **); 325bool pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **);
324bool pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **); 326bool pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **);
325void pmap_set_pcb_pagedir(pmap_t, struct pcb *); 327void pmap_set_pcb_pagedir(pmap_t, struct pcb *);
326 328
327void pmap_debug(int); 329void pmap_debug(int);
328void pmap_postinit(void); 330void pmap_postinit(void);
329 331
330void vector_page_setprot(int); 332void vector_page_setprot(int);
331 333
332const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t); 334const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
333const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t); 335const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
334 336
335/* Bootstrapping routines. */ 337/* Bootstrapping routines. */
336void pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int); 338void pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
337void pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int); 339void pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
338vsize_t pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int); 340vsize_t pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
339void pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *); 341void pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
340void pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *); 342void pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
341void pmap_devmap_register(const struct pmap_devmap *); 343void pmap_devmap_register(const struct pmap_devmap *);
342 344
343/* 345/*
344 * Special page zero routine for use by the idle loop (no cache cleans).  346 * Special page zero routine for use by the idle loop (no cache cleans).
345 */ 347 */
346bool pmap_pageidlezero(paddr_t); 348bool pmap_pageidlezero(paddr_t);
347#define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa)) 349#define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa))
348 350
349/* 351/*
350 * used by dumpsys to record the PA of the L1 table 352 * used by dumpsys to record the PA of the L1 table
351 */ 353 */
352uint32_t pmap_kernel_L1_addr(void); 354uint32_t pmap_kernel_L1_addr(void);
353/* 355/*
354 * The current top of kernel VM 356 * The current top of kernel VM
355 */ 357 */
356extern vaddr_t pmap_curmaxkvaddr; 358extern vaddr_t pmap_curmaxkvaddr;
357 359
358/* 360/*
359 * Useful macros and constants  361 * Useful macros and constants
360 */ 362 */
361 363
362/* Virtual address to page table entry */ 364/* Virtual address to page table entry */
363static inline pt_entry_t * 365static inline pt_entry_t *
364vtopte(vaddr_t va) 366vtopte(vaddr_t va)
365{ 367{
366 pd_entry_t *pdep; 368 pd_entry_t *pdep;
367 pt_entry_t *ptep; 369 pt_entry_t *ptep;
368 370
369 if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == false) 371 if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == false)
370 return (NULL); 372 return (NULL);
371 return (ptep); 373 return (ptep);
372} 374}
373 375
374/* 376/*
375 * Virtual address to physical address 377 * Virtual address to physical address
376 */ 378 */
377static inline paddr_t 379static inline paddr_t
378vtophys(vaddr_t va) 380vtophys(vaddr_t va)
379{ 381{
380 paddr_t pa; 382 paddr_t pa;
381 383
382 if (pmap_extract(pmap_kernel(), va, &pa) == false) 384 if (pmap_extract(pmap_kernel(), va, &pa) == false)
383 return (0); /* XXXSCW: Panic? */ 385 return (0); /* XXXSCW: Panic? */
384 386
385 return (pa); 387 return (pa);
386} 388}
387 389
388/* 390/*
389 * The new pmap ensures that page-tables are always mapping Write-Thru. 391 * The new pmap ensures that page-tables are always mapping Write-Thru.
390 * Thus, on some platforms we can run fast and loose and avoid syncing PTEs 392 * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
391 * on every change. 393 * on every change.
392 * 394 *
393 * Unfortunately, not all CPUs have a write-through cache mode. So we 395 * Unfortunately, not all CPUs have a write-through cache mode. So we
394 * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs, 396 * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
395 * and if there is the chance for PTE syncs to be needed, we define 397 * and if there is the chance for PTE syncs to be needed, we define
396 * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run) 398 * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
397 * the code. 399 * the code.
398 */ 400 */
399extern int pmap_needs_pte_sync; 401extern int pmap_needs_pte_sync;
400#if defined(_KERNEL_OPT) 402#if defined(_KERNEL_OPT)
401/* 403/*
402 * StrongARM SA-1 caches do not have a write-through mode. So, on these, 404 * StrongARM SA-1 caches do not have a write-through mode. So, on these,
403 * we need to do PTE syncs. If only SA-1 is configured, then evaluate 405 * we need to do PTE syncs. If only SA-1 is configured, then evaluate
404 * this at compile time. 406 * this at compile time.
405 */ 407 */
406#if (ARM_MMU_SA1 + ARM_MMU_V6 != 0) && (ARM_NMMUS == 1)  408#if (ARM_MMU_SA1 + ARM_MMU_V6 != 0) && (ARM_NMMUS == 1)
407#define PMAP_INCLUDE_PTE_SYNC 409#define PMAP_INCLUDE_PTE_SYNC
408#if (ARM_MMU_V6 > 0) 410#if (ARM_MMU_V6 > 0)
409#define PMAP_NEEDS_PTE_SYNC 1 411#define PMAP_NEEDS_PTE_SYNC 1
410#elif (ARM_MMU_SA1 == 0) 412#elif (ARM_MMU_SA1 == 0)
411#define PMAP_NEEDS_PTE_SYNC 0 413#define PMAP_NEEDS_PTE_SYNC 0
412#endif 414#endif
413#endif 415#endif
414#endif /* _KERNEL_OPT */ 416#endif /* _KERNEL_OPT */
415 417
416/* 418/*
417 * Provide a fallback in case we were not able to determine it at 419 * Provide a fallback in case we were not able to determine it at
418 * compile-time. 420 * compile-time.
419 */ 421 */
420#ifndef PMAP_NEEDS_PTE_SYNC 422#ifndef PMAP_NEEDS_PTE_SYNC
421#define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync 423#define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync
422#define PMAP_INCLUDE_PTE_SYNC 424#define PMAP_INCLUDE_PTE_SYNC
423#endif 425#endif
424 426
425static inline void 427static inline void
426pmap_ptesync(pt_entry_t *ptep, size_t cnt) 428pmap_ptesync(pt_entry_t *ptep, size_t cnt)
427{ 429{
428 if (PMAP_NEEDS_PTE_SYNC) 430 if (PMAP_NEEDS_PTE_SYNC)
429 cpu_dcache_wb_range((vaddr_t)ptep, cnt * sizeof(pt_entry_t)); 431 cpu_dcache_wb_range((vaddr_t)ptep, cnt * sizeof(pt_entry_t));
430#if ARM_MMU_V7 > 0 432#if ARM_MMU_V7 > 0
431 __asm("dsb"); 433 __asm("dsb");
432#endif 434#endif
433} 435}
434 436
435#define PTE_SYNC(ptep) pmap_ptesync((ptep), 1) 437#define PTE_SYNC(ptep) pmap_ptesync((ptep), 1)
436#define PTE_SYNC_RANGE(ptep, cnt) pmap_ptesync((ptep), (cnt)) 438#define PTE_SYNC_RANGE(ptep, cnt) pmap_ptesync((ptep), (cnt))
437 439
438#define l1pte_valid(pde) ((pde) != 0) 440#define l1pte_valid(pde) ((pde) != 0)
439#define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S) 441#define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S)
440#define l1pte_supersection_p(pde) (l1pte_section_p(pde) \ 442#define l1pte_supersection_p(pde) (l1pte_section_p(pde) \
441 && ((pde) & L1_S_V6_SUPER) != 0) 443 && ((pde) & L1_S_V6_SUPER) != 0)
442#define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C) 444#define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C)
443#define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F) 445#define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F)
444 446
445#define l2pte_index(v) (((v) & L2_ADDR_BITS) >> L2_S_SHIFT) 447#define l2pte_index(v) (((v) & L2_ADDR_BITS) >> L2_S_SHIFT)
446#define l2pte_valid(pte) (((pte) & L2_TYPE_MASK) != L2_TYPE_INV) 448#define l2pte_valid(pte) (((pte) & L2_TYPE_MASK) != L2_TYPE_INV)
447#define l2pte_pa(pte) ((pte) & L2_S_FRAME) 449#define l2pte_pa(pte) ((pte) & L2_S_FRAME)
448#define l2pte_minidata(pte) (((pte) & \ 450#define l2pte_minidata(pte) (((pte) & \
449 (L2_B | L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))\ 451 (L2_B | L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))\
450 == (L2_C | L2_XS_T_TEX(TEX_XSCALE_X))) 452 == (L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))
451 453
452/* L1 and L2 page table macros */ 454/* L1 and L2 page table macros */
453#define pmap_pde_v(pde) l1pte_valid(*(pde)) 455#define pmap_pde_v(pde) l1pte_valid(*(pde))
454#define pmap_pde_section(pde) l1pte_section_p(*(pde)) 456#define pmap_pde_section(pde) l1pte_section_p(*(pde))
455#define pmap_pde_supersection(pde) l1pte_supersection_p(*(pde)) 457#define pmap_pde_supersection(pde) l1pte_supersection_p(*(pde))
456#define pmap_pde_page(pde) l1pte_page_p(*(pde)) 458#define pmap_pde_page(pde) l1pte_page_p(*(pde))
457#define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde)) 459#define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde))
458 460
459#define pmap_pte_v(pte) l2pte_valid(*(pte)) 461#define pmap_pte_v(pte) l2pte_valid(*(pte))
460#define pmap_pte_pa(pte) l2pte_pa(*(pte)) 462#define pmap_pte_pa(pte) l2pte_pa(*(pte))
461 463
462/* Size of the kernel part of the L1 page table */ 464/* Size of the kernel part of the L1 page table */
463#define KERNEL_PD_SIZE \ 465#define KERNEL_PD_SIZE \
464 (L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t)) 466 (L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
465 467
466void bzero_page(vaddr_t); 468void bzero_page(vaddr_t);
467void bcopy_page(vaddr_t, vaddr_t); 469void bcopy_page(vaddr_t, vaddr_t);
468 470
469#ifdef FPU_VFP 471#ifdef FPU_VFP
470void bzero_page_vfp(vaddr_t); 472void bzero_page_vfp(vaddr_t);
471void bcopy_page_vfp(vaddr_t, vaddr_t); 473void bcopy_page_vfp(vaddr_t, vaddr_t);
472#endif 474#endif
473 475
474/************************* ARM MMU configuration *****************************/ 476/************************* ARM MMU configuration *****************************/
475 477
476#if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0 478#if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0
477void pmap_copy_page_generic(paddr_t, paddr_t); 479void pmap_copy_page_generic(paddr_t, paddr_t);
478void pmap_zero_page_generic(paddr_t); 480void pmap_zero_page_generic(paddr_t);
479 481
480void pmap_pte_init_generic(void); 482void pmap_pte_init_generic(void);
481#if defined(CPU_ARM8) 483#if defined(CPU_ARM8)
482void pmap_pte_init_arm8(void); 484void pmap_pte_init_arm8(void);
483#endif 485#endif
484#if defined(CPU_ARM9) 486#if defined(CPU_ARM9)
485void pmap_pte_init_arm9(void); 487void pmap_pte_init_arm9(void);
486#endif /* CPU_ARM9 */ 488#endif /* CPU_ARM9 */
487#if defined(CPU_ARM10) 489#if defined(CPU_ARM10)
488void pmap_pte_init_arm10(void); 490void pmap_pte_init_arm10(void);
489#endif /* CPU_ARM10 */ 491#endif /* CPU_ARM10 */
490#if defined(CPU_ARM11) /* ARM_MMU_V6 */ 492#if defined(CPU_ARM11) /* ARM_MMU_V6 */
491void pmap_pte_init_arm11(void); 493void pmap_pte_init_arm11(void);
492#endif /* CPU_ARM11 */ 494#endif /* CPU_ARM11 */
493#if defined(CPU_ARM11MPCORE) /* ARM_MMU_V6 */ 495#if defined(CPU_ARM11MPCORE) /* ARM_MMU_V6 */
494void pmap_pte_init_arm11mpcore(void); 496void pmap_pte_init_arm11mpcore(void);
495#endif 497#endif
496#if ARM_MMU_V7 == 1 498#if ARM_MMU_V7 == 1
497void pmap_pte_init_armv7(void); 499void pmap_pte_init_armv7(void);
498#endif /* ARM_MMU_V7 */ 500#endif /* ARM_MMU_V7 */
499#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */ 501#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
500 502
501#if ARM_MMU_SA1 == 1 503#if ARM_MMU_SA1 == 1
502void pmap_pte_init_sa1(void); 504void pmap_pte_init_sa1(void);
503#endif /* ARM_MMU_SA1 == 1 */ 505#endif /* ARM_MMU_SA1 == 1 */
504 506
505#if ARM_MMU_XSCALE == 1 507#if ARM_MMU_XSCALE == 1
506void pmap_copy_page_xscale(paddr_t, paddr_t); 508void pmap_copy_page_xscale(paddr_t, paddr_t);
507void pmap_zero_page_xscale(paddr_t); 509void pmap_zero_page_xscale(paddr_t);
508 510
509void pmap_pte_init_xscale(void); 511void pmap_pte_init_xscale(void);
510 512
511void xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t); 513void xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t);
512 514
513#define PMAP_UAREA(va) pmap_uarea(va) 515#define PMAP_UAREA(va) pmap_uarea(va)
514void pmap_uarea(vaddr_t); 516void pmap_uarea(vaddr_t);
515#endif /* ARM_MMU_XSCALE == 1 */ 517#endif /* ARM_MMU_XSCALE == 1 */
516 518
517extern pt_entry_t pte_l1_s_cache_mode; 519extern pt_entry_t pte_l1_s_cache_mode;
518extern pt_entry_t pte_l1_s_cache_mask; 520extern pt_entry_t pte_l1_s_cache_mask;
519 521
520extern pt_entry_t pte_l2_l_cache_mode; 522extern pt_entry_t pte_l2_l_cache_mode;
521extern pt_entry_t pte_l2_l_cache_mask; 523extern pt_entry_t pte_l2_l_cache_mask;
522 524
523extern pt_entry_t pte_l2_s_cache_mode; 525extern pt_entry_t pte_l2_s_cache_mode;
524extern pt_entry_t pte_l2_s_cache_mask; 526extern pt_entry_t pte_l2_s_cache_mask;
525 527
526extern pt_entry_t pte_l1_s_cache_mode_pt; 528extern pt_entry_t pte_l1_s_cache_mode_pt;
527extern pt_entry_t pte_l2_l_cache_mode_pt; 529extern pt_entry_t pte_l2_l_cache_mode_pt;
528extern pt_entry_t pte_l2_s_cache_mode_pt; 530extern pt_entry_t pte_l2_s_cache_mode_pt;
529 531
530extern pt_entry_t pte_l1_s_wc_mode; 532extern pt_entry_t pte_l1_s_wc_mode;
531extern pt_entry_t pte_l2_l_wc_mode; 533extern pt_entry_t pte_l2_l_wc_mode;
532extern pt_entry_t pte_l2_s_wc_mode; 534extern pt_entry_t pte_l2_s_wc_mode;
533 535
534extern pt_entry_t pte_l1_s_prot_u; 536extern pt_entry_t pte_l1_s_prot_u;
535extern pt_entry_t pte_l1_s_prot_w; 537extern pt_entry_t pte_l1_s_prot_w;
536extern pt_entry_t pte_l1_s_prot_ro; 538extern pt_entry_t pte_l1_s_prot_ro;
537extern pt_entry_t pte_l1_s_prot_mask; 539extern pt_entry_t pte_l1_s_prot_mask;
538 540
539extern pt_entry_t pte_l2_s_prot_u; 541extern pt_entry_t pte_l2_s_prot_u;
540extern pt_entry_t pte_l2_s_prot_w; 542extern pt_entry_t pte_l2_s_prot_w;
541extern pt_entry_t pte_l2_s_prot_ro; 543extern pt_entry_t pte_l2_s_prot_ro;
542extern pt_entry_t pte_l2_s_prot_mask; 544extern pt_entry_t pte_l2_s_prot_mask;
543 545
544extern pt_entry_t pte_l2_l_prot_u; 546extern pt_entry_t pte_l2_l_prot_u;
545extern pt_entry_t pte_l2_l_prot_w; 547extern pt_entry_t pte_l2_l_prot_w;
546extern pt_entry_t pte_l2_l_prot_ro; 548extern pt_entry_t pte_l2_l_prot_ro;
547extern pt_entry_t pte_l2_l_prot_mask; 549extern pt_entry_t pte_l2_l_prot_mask;
548 550
549extern pt_entry_t pte_l1_ss_proto; 551extern pt_entry_t pte_l1_ss_proto;
550extern pt_entry_t pte_l1_s_proto; 552extern pt_entry_t pte_l1_s_proto;
551extern pt_entry_t pte_l1_c_proto; 553extern pt_entry_t pte_l1_c_proto;
552extern pt_entry_t pte_l2_s_proto; 554extern pt_entry_t pte_l2_s_proto;
553 555
554extern void (*pmap_copy_page_func)(paddr_t, paddr_t); 556extern void (*pmap_copy_page_func)(paddr_t, paddr_t);
555extern void (*pmap_zero_page_func)(paddr_t); 557extern void (*pmap_zero_page_func)(paddr_t);
556 558
557#endif /* !_LOCORE */ 559#endif /* !_LOCORE */
558 560
559/*****************************************************************************/ 561/*****************************************************************************/
560 562
561/* 563/*
562 * Definitions for MMU domains 564 * Definitions for MMU domains
563 */ 565 */
564#define PMAP_DOMAINS 15 /* 15 'user' domains (1-15) */ 566#define PMAP_DOMAINS 15 /* 15 'user' domains (1-15) */
565#define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */ 567#define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */
566 568
567/* 569/*
568 * These macros define the various bit masks in the PTE. 570 * These macros define the various bit masks in the PTE.
569 * 571 *
570 * We use these macros since we use different bits on different processor 572 * We use these macros since we use different bits on different processor
571 * models. 573 * models.
572 */ 574 */
573#define L1_S_PROT_U_generic (L1_S_AP(AP_U)) 575#define L1_S_PROT_U_generic (L1_S_AP(AP_U))
574#define L1_S_PROT_W_generic (L1_S_AP(AP_W)) 576#define L1_S_PROT_W_generic (L1_S_AP(AP_W))
575#define L1_S_PROT_RO_generic (0) 577#define L1_S_PROT_RO_generic (0)
576#define L1_S_PROT_MASK_generic (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO) 578#define L1_S_PROT_MASK_generic (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
577 579
578#define L1_S_PROT_U_xscale (L1_S_AP(AP_U)) 580#define L1_S_PROT_U_xscale (L1_S_AP(AP_U))
579#define L1_S_PROT_W_xscale (L1_S_AP(AP_W)) 581#define L1_S_PROT_W_xscale (L1_S_AP(AP_W))
580#define L1_S_PROT_RO_xscale (0) 582#define L1_S_PROT_RO_xscale (0)
581#define L1_S_PROT_MASK_xscale (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO) 583#define L1_S_PROT_MASK_xscale (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
582 584
583#define L1_S_PROT_U_armv6 (L1_S_AP(AP_R) | L1_S_AP(AP_U)) 585#define L1_S_PROT_U_armv6 (L1_S_AP(AP_R) | L1_S_AP(AP_U))
584#define L1_S_PROT_W_armv6 (L1_S_AP(AP_W)) 586#define L1_S_PROT_W_armv6 (L1_S_AP(AP_W))
585#define L1_S_PROT_RO_armv6 (L1_S_AP(AP_R) | L1_S_AP(AP_RO)) 587#define L1_S_PROT_RO_armv6 (L1_S_AP(AP_R) | L1_S_AP(AP_RO))
586#define L1_S_PROT_MASK_armv6 (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO) 588#define L1_S_PROT_MASK_armv6 (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
587 589
588#define L1_S_PROT_U_armv7 (L1_S_AP(AP_R) | L1_S_AP(AP_U)) 590#define L1_S_PROT_U_armv7 (L1_S_AP(AP_R) | L1_S_AP(AP_U))
589#define L1_S_PROT_W_armv7 (L1_S_AP(AP_W)) 591#define L1_S_PROT_W_armv7 (L1_S_AP(AP_W))
590#define L1_S_PROT_RO_armv7 (L1_S_AP(AP_R) | L1_S_AP(AP_RO)) 592#define L1_S_PROT_RO_armv7 (L1_S_AP(AP_R) | L1_S_AP(AP_RO))
591#define L1_S_PROT_MASK_armv7 (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO) 593#define L1_S_PROT_MASK_armv7 (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
592 594
593#define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C) 595#define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C)
594#define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_XSCALE_X)) 596#define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_XSCALE_X))
595#define L1_S_CACHE_MASK_armv6 (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX)) 597#define L1_S_CACHE_MASK_armv6 (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX))
596#define L1_S_CACHE_MASK_armv7 (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX)|L1_S_V6_S) 598#define L1_S_CACHE_MASK_armv7 (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX)|L1_S_V6_S)
597 599
598#define L2_L_PROT_U_generic (L2_AP(AP_U)) 600#define L2_L_PROT_U_generic (L2_AP(AP_U))
599#define L2_L_PROT_W_generic (L2_AP(AP_W)) 601#define L2_L_PROT_W_generic (L2_AP(AP_W))
600#define L2_L_PROT_RO_generic (0) 602#define L2_L_PROT_RO_generic (0)
601#define L2_L_PROT_MASK_generic (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO) 603#define L2_L_PROT_MASK_generic (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
602 604
603#define L2_L_PROT_U_xscale (L2_AP(AP_U)) 605#define L2_L_PROT_U_xscale (L2_AP(AP_U))
604#define L2_L_PROT_W_xscale (L2_AP(AP_W)) 606#define L2_L_PROT_W_xscale (L2_AP(AP_W))
605#define L2_L_PROT_RO_xscale (0) 607#define L2_L_PROT_RO_xscale (0)
606#define L2_L_PROT_MASK_xscale (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO) 608#define L2_L_PROT_MASK_xscale (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
607 609
608#define L2_L_PROT_U_armv6n (L2_AP0(AP_R) | L2_AP0(AP_U)) 610#define L2_L_PROT_U_armv6n (L2_AP0(AP_R) | L2_AP0(AP_U))
609#define L2_L_PROT_W_armv6n (L2_AP0(AP_W)) 611#define L2_L_PROT_W_armv6n (L2_AP0(AP_W))
610#define L2_L_PROT_RO_armv6n (L2_AP0(AP_R) | L2_AP0(AP_RO)) 612#define L2_L_PROT_RO_armv6n (L2_AP0(AP_R) | L2_AP0(AP_RO))
611#define L2_L_PROT_MASK_armv6n (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO) 613#define L2_L_PROT_MASK_armv6n (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
612 614
613#define L2_L_PROT_U_armv7 (L2_AP0(AP_R) | L2_AP0(AP_U)) 615#define L2_L_PROT_U_armv7 (L2_AP0(AP_R) | L2_AP0(AP_U))
614#define L2_L_PROT_W_armv7 (L2_AP0(AP_W)) 616#define L2_L_PROT_W_armv7 (L2_AP0(AP_W))
615#define L2_L_PROT_RO_armv7 (L2_AP0(AP_R) | L2_AP0(AP_RO)) 617#define L2_L_PROT_RO_armv7 (L2_AP0(AP_R) | L2_AP0(AP_RO))
616#define L2_L_PROT_MASK_armv7 (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO) 618#define L2_L_PROT_MASK_armv7 (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
617 619
618#define L2_L_CACHE_MASK_generic (L2_B|L2_C) 620#define L2_L_CACHE_MASK_generic (L2_B|L2_C)
619#define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XS_L_TEX(TEX_XSCALE_X)) 621#define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XS_L_TEX(TEX_XSCALE_X))
620#define L2_L_CACHE_MASK_armv6 (L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX)) 622#define L2_L_CACHE_MASK_armv6 (L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX))
621#define L2_L_CACHE_MASK_armv7 (L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX)|L2_XS_S) 623#define L2_L_CACHE_MASK_armv7 (L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX)|L2_XS_S)
622 624
623#define L2_S_PROT_U_generic (L2_AP(AP_U)) 625#define L2_S_PROT_U_generic (L2_AP(AP_U))
624#define L2_S_PROT_W_generic (L2_AP(AP_W)) 626#define L2_S_PROT_W_generic (L2_AP(AP_W))
625#define L2_S_PROT_RO_generic (0) 627#define L2_S_PROT_RO_generic (0)
626#define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO) 628#define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
627 629
628#define L2_S_PROT_U_xscale (L2_AP0(AP_U)) 630#define L2_S_PROT_U_xscale (L2_AP0(AP_U))
629#define L2_S_PROT_W_xscale (L2_AP0(AP_W)) 631#define L2_S_PROT_W_xscale (L2_AP0(AP_W))
630#define L2_S_PROT_RO_xscale (0) 632#define L2_S_PROT_RO_xscale (0)
631#define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO) 633#define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
632 634
633#define L2_S_PROT_U_armv6n (L2_AP0(AP_R) | L2_AP0(AP_U)) 635#define L2_S_PROT_U_armv6n (L2_AP0(AP_R) | L2_AP0(AP_U))
634#define L2_S_PROT_W_armv6n (L2_AP0(AP_W)) 636#define L2_S_PROT_W_armv6n (L2_AP0(AP_W))
635#define L2_S_PROT_RO_armv6n (L2_AP0(AP_R) | L2_AP0(AP_RO)) 637#define L2_S_PROT_RO_armv6n (L2_AP0(AP_R) | L2_AP0(AP_RO))
636#define L2_S_PROT_MASK_armv6n (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO) 638#define L2_S_PROT_MASK_armv6n (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
637 639
638#define L2_S_PROT_U_armv7 (L2_AP0(AP_R) | L2_AP0(AP_U)) 640#define L2_S_PROT_U_armv7 (L2_AP0(AP_R) | L2_AP0(AP_U))
639#define L2_S_PROT_W_armv7 (L2_AP0(AP_W)) 641#define L2_S_PROT_W_armv7 (L2_AP0(AP_W))
640#define L2_S_PROT_RO_armv7 (L2_AP0(AP_R) | L2_AP0(AP_RO)) 642#define L2_S_PROT_RO_armv7 (L2_AP0(AP_R) | L2_AP0(AP_RO))
641#define L2_S_PROT_MASK_armv7 (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO) 643#define L2_S_PROT_MASK_armv7 (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
642 644
643#define L2_S_CACHE_MASK_generic (L2_B|L2_C) 645#define L2_S_CACHE_MASK_generic (L2_B|L2_C)
644#define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XS_T_TEX(TEX_XSCALE_X)) 646#define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XS_T_TEX(TEX_XSCALE_X))
645#define L2_XS_CACHE_MASK_armv6 (L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX)) 647#define L2_XS_CACHE_MASK_armv6 (L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX))
646#define L2_S_CACHE_MASK_armv6n L2_XS_CACHE_MASK_armv6 648#define L2_S_CACHE_MASK_armv6n L2_XS_CACHE_MASK_armv6
647#ifdef ARMV6_EXTENDED_SMALL_PAGE 649#ifdef ARMV6_EXTENDED_SMALL_PAGE
648#define L2_S_CACHE_MASK_armv6c L2_XS_CACHE_MASK_armv6 650#define L2_S_CACHE_MASK_armv6c L2_XS_CACHE_MASK_armv6
649#else 651#else
650#define L2_S_CACHE_MASK_armv6c L2_S_CACHE_MASK_generic 652#define L2_S_CACHE_MASK_armv6c L2_S_CACHE_MASK_generic
651#endif 653#endif
652#define L2_S_CACHE_MASK_armv7 (L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX)|L2_XS_S) 654#define L2_S_CACHE_MASK_armv7 (L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX)|L2_XS_S)
653 655
654 656
655#define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP) 657#define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP)
656#define L1_S_PROTO_xscale (L1_TYPE_S) 658#define L1_S_PROTO_xscale (L1_TYPE_S)
657#define L1_S_PROTO_armv6 (L1_TYPE_S) 659#define L1_S_PROTO_armv6 (L1_TYPE_S)
658#define L1_S_PROTO_armv7 (L1_TYPE_S) 660#define L1_S_PROTO_armv7 (L1_TYPE_S)
659 661
660#define L1_SS_PROTO_generic 0 662#define L1_SS_PROTO_generic 0
661#define L1_SS_PROTO_xscale 0 663#define L1_SS_PROTO_xscale 0
662#define L1_SS_PROTO_armv6 (L1_TYPE_S | L1_S_V6_SS) 664#define L1_SS_PROTO_armv6 (L1_TYPE_S | L1_S_V6_SS)
663#define L1_SS_PROTO_armv7 (L1_TYPE_S | L1_S_V6_SS) 665#define L1_SS_PROTO_armv7 (L1_TYPE_S | L1_S_V6_SS)
664 666
665#define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2) 667#define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2)
666#define L1_C_PROTO_xscale (L1_TYPE_C) 668#define L1_C_PROTO_xscale (L1_TYPE_C)
667#define L1_C_PROTO_armv6 (L1_TYPE_C) 669#define L1_C_PROTO_armv6 (L1_TYPE_C)
668#define L1_C_PROTO_armv7 (L1_TYPE_C) 670#define L1_C_PROTO_armv7 (L1_TYPE_C)
669 671
670#define L2_L_PROTO (L2_TYPE_L) 672#define L2_L_PROTO (L2_TYPE_L)
671 673
672#define L2_S_PROTO_generic (L2_TYPE_S) 674#define L2_S_PROTO_generic (L2_TYPE_S)
673#define L2_S_PROTO_xscale (L2_TYPE_XS) 675#define L2_S_PROTO_xscale (L2_TYPE_XS)
674#ifdef ARMV6_EXTENDED_SMALL_PAGE 676#ifdef ARMV6_EXTENDED_SMALL_PAGE
675#define L2_S_PROTO_armv6c (L2_TYPE_XS) /* XP=0, extended small page */ 677#define L2_S_PROTO_armv6c (L2_TYPE_XS) /* XP=0, extended small page */
676#else 678#else
677#define L2_S_PROTO_armv6c (L2_TYPE_S) /* XP=0, subpage APs */ 679#define L2_S_PROTO_armv6c (L2_TYPE_S) /* XP=0, subpage APs */
678#endif 680#endif
679#define L2_S_PROTO_armv6n (L2_TYPE_S) /* with XP=1 */ 681#define L2_S_PROTO_armv6n (L2_TYPE_S) /* with XP=1 */
680#define L2_S_PROTO_armv7 (L2_TYPE_S) 682#define L2_S_PROTO_armv7 (L2_TYPE_S)
681 683
682/* 684/*
683 * User-visible names for the ones that vary with MMU class. 685 * User-visible names for the ones that vary with MMU class.
684 */ 686 */
685 687
686#if ARM_NMMUS > 1 688#if ARM_NMMUS > 1
687/* More than one MMU class configured; use variables. */ 689/* More than one MMU class configured; use variables. */
688#define L1_S_PROT_U pte_l1_s_prot_u 690#define L1_S_PROT_U pte_l1_s_prot_u
689#define L1_S_PROT_W pte_l1_s_prot_w 691#define L1_S_PROT_W pte_l1_s_prot_w
690#define L1_S_PROT_RO pte_l1_s_prot_ro 692#define L1_S_PROT_RO pte_l1_s_prot_ro
691#define L1_S_PROT_MASK pte_l1_s_prot_mask 693#define L1_S_PROT_MASK pte_l1_s_prot_mask
692 694
693#define L2_S_PROT_U pte_l2_s_prot_u 695#define L2_S_PROT_U pte_l2_s_prot_u
694#define L2_S_PROT_W pte_l2_s_prot_w 696#define L2_S_PROT_W pte_l2_s_prot_w
695#define L2_S_PROT_RO pte_l2_s_prot_ro 697#define L2_S_PROT_RO pte_l2_s_prot_ro
696#define L2_S_PROT_MASK pte_l2_s_prot_mask 698#define L2_S_PROT_MASK pte_l2_s_prot_mask
697 699
698#define L2_L_PROT_U pte_l2_l_prot_u 700#define L2_L_PROT_U pte_l2_l_prot_u
699#define L2_L_PROT_W pte_l2_l_prot_w 701#define L2_L_PROT_W pte_l2_l_prot_w
700#define L2_L_PROT_RO pte_l2_l_prot_ro 702#define L2_L_PROT_RO pte_l2_l_prot_ro
701#define L2_L_PROT_MASK pte_l2_l_prot_mask 703#define L2_L_PROT_MASK pte_l2_l_prot_mask
702 704
703#define L1_S_CACHE_MASK pte_l1_s_cache_mask 705#define L1_S_CACHE_MASK pte_l1_s_cache_mask
704#define L2_L_CACHE_MASK pte_l2_l_cache_mask 706#define L2_L_CACHE_MASK pte_l2_l_cache_mask
705#define L2_S_CACHE_MASK pte_l2_s_cache_mask 707#define L2_S_CACHE_MASK pte_l2_s_cache_mask
706 708
707#define L1_SS_PROTO pte_l1_ss_proto 709#define L1_SS_PROTO pte_l1_ss_proto
708#define L1_S_PROTO pte_l1_s_proto 710#define L1_S_PROTO pte_l1_s_proto
709#define L1_C_PROTO pte_l1_c_proto 711#define L1_C_PROTO pte_l1_c_proto
710#define L2_S_PROTO pte_l2_s_proto 712#define L2_S_PROTO pte_l2_s_proto
711 713
712#define pmap_copy_page(s, d) (*pmap_copy_page_func)((s), (d)) 714#define pmap_copy_page(s, d) (*pmap_copy_page_func)((s), (d))
713#define pmap_zero_page(d) (*pmap_zero_page_func)((d)) 715#define pmap_zero_page(d) (*pmap_zero_page_func)((d))
714#elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 716#elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
715#define L1_S_PROT_U L1_S_PROT_U_generic 717#define L1_S_PROT_U L1_S_PROT_U_generic
716#define L1_S_PROT_W L1_S_PROT_W_generic 718#define L1_S_PROT_W L1_S_PROT_W_generic
717#define L1_S_PROT_RO L1_S_PROT_RO_generic 719#define L1_S_PROT_RO L1_S_PROT_RO_generic
718#define L1_S_PROT_MASK L1_S_PROT_MASK_generic 720#define L1_S_PROT_MASK L1_S_PROT_MASK_generic
719 721
720#define L2_S_PROT_U L2_S_PROT_U_generic 722#define L2_S_PROT_U L2_S_PROT_U_generic
721#define L2_S_PROT_W L2_S_PROT_W_generic 723#define L2_S_PROT_W L2_S_PROT_W_generic
722#define L2_S_PROT_RO L2_S_PROT_RO_generic 724#define L2_S_PROT_RO L2_S_PROT_RO_generic
723#define L2_S_PROT_MASK L2_S_PROT_MASK_generic 725#define L2_S_PROT_MASK L2_S_PROT_MASK_generic
724 726
725#define L2_L_PROT_U L2_L_PROT_U_generic 727#define L2_L_PROT_U L2_L_PROT_U_generic
726#define L2_L_PROT_W L2_L_PROT_W_generic 728#define L2_L_PROT_W L2_L_PROT_W_generic
727#define L2_L_PROT_RO L2_L_PROT_RO_generic 729#define L2_L_PROT_RO L2_L_PROT_RO_generic
728#define L2_L_PROT_MASK L2_L_PROT_MASK_generic 730#define L2_L_PROT_MASK L2_L_PROT_MASK_generic
729 731
730#define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic 732#define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic
731#define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic 733#define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic
732#define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic 734#define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic
733 735
734#define L1_SS_PROTO L1_SS_PROTO_generic 736#define L1_SS_PROTO L1_SS_PROTO_generic
735#define L1_S_PROTO L1_S_PROTO_generic 737#define L1_S_PROTO L1_S_PROTO_generic
736#define L1_C_PROTO L1_C_PROTO_generic 738#define L1_C_PROTO L1_C_PROTO_generic
737#define L2_S_PROTO L2_S_PROTO_generic 739#define L2_S_PROTO L2_S_PROTO_generic
738 740
739#define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d)) 741#define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
740#define pmap_zero_page(d) pmap_zero_page_generic((d)) 742#define pmap_zero_page(d) pmap_zero_page_generic((d))
741#elif ARM_MMU_V6N != 0 743#elif ARM_MMU_V6N != 0
742#define L1_S_PROT_U L1_S_PROT_U_armv6 744#define L1_S_PROT_U L1_S_PROT_U_armv6
743#define L1_S_PROT_W L1_S_PROT_W_armv6 745#define L1_S_PROT_W L1_S_PROT_W_armv6
744#define L1_S_PROT_RO L1_S_PROT_RO_armv6 746#define L1_S_PROT_RO L1_S_PROT_RO_armv6
745#define L1_S_PROT_MASK L1_S_PROT_MASK_armv6 747#define L1_S_PROT_MASK L1_S_PROT_MASK_armv6
746 748
747#define L2_S_PROT_U L2_S_PROT_U_armv6n 749#define L2_S_PROT_U L2_S_PROT_U_armv6n
748#define L2_S_PROT_W L2_S_PROT_W_armv6n 750#define L2_S_PROT_W L2_S_PROT_W_armv6n
749#define L2_S_PROT_RO L2_S_PROT_RO_armv6n 751#define L2_S_PROT_RO L2_S_PROT_RO_armv6n
750#define L2_S_PROT_MASK L2_S_PROT_MASK_armv6n 752#define L2_S_PROT_MASK L2_S_PROT_MASK_armv6n
751 753
752#define L2_L_PROT_U L2_L_PROT_U_armv6n 754#define L2_L_PROT_U L2_L_PROT_U_armv6n
753#define L2_L_PROT_W L2_L_PROT_W_armv6n 755#define L2_L_PROT_W L2_L_PROT_W_armv6n
754#define L2_L_PROT_RO L2_L_PROT_RO_armv6n 756#define L2_L_PROT_RO L2_L_PROT_RO_armv6n
755#define L2_L_PROT_MASK L2_L_PROT_MASK_armv6n 757#define L2_L_PROT_MASK L2_L_PROT_MASK_armv6n
756 758
757#define L1_S_CACHE_MASK L1_S_CACHE_MASK_armv6 759#define L1_S_CACHE_MASK L1_S_CACHE_MASK_armv6
758#define L2_L_CACHE_MASK L2_L_CACHE_MASK_armv6 760#define L2_L_CACHE_MASK L2_L_CACHE_MASK_armv6
759#define L2_S_CACHE_MASK L2_S_CACHE_MASK_armv6n 761#define L2_S_CACHE_MASK L2_S_CACHE_MASK_armv6n
760 762
761/* These prototypes make writeable mappings, while the other MMU types 763/* These prototypes make writeable mappings, while the other MMU types
762 * make read-only mappings. */ 764 * make read-only mappings. */
763#define L1_SS_PROTO L1_SS_PROTO_armv6 765#define L1_SS_PROTO L1_SS_PROTO_armv6
764#define L1_S_PROTO L1_S_PROTO_armv6 766#define L1_S_PROTO L1_S_PROTO_armv6
765#define L1_C_PROTO L1_C_PROTO_armv6 767#define L1_C_PROTO L1_C_PROTO_armv6
766#define L2_S_PROTO L2_S_PROTO_armv6n 768#define L2_S_PROTO L2_S_PROTO_armv6n
767 769
768#define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d)) 770#define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
769#define pmap_zero_page(d) pmap_zero_page_generic((d)) 771#define pmap_zero_page(d) pmap_zero_page_generic((d))
770#elif ARM_MMU_V6C != 0 772#elif ARM_MMU_V6C != 0
771#define L1_S_PROT_U L1_S_PROT_U_generic 773#define L1_S_PROT_U L1_S_PROT_U_generic
772#define L1_S_PROT_W L1_S_PROT_W_generic 774#define L1_S_PROT_W L1_S_PROT_W_generic
773#define L1_S_PROT_RO L1_S_PROT_RO_generic 775#define L1_S_PROT_RO L1_S_PROT_RO_generic
774#define L1_S_PROT_MASK L1_S_PROT_MASK_generic 776#define L1_S_PROT_MASK L1_S_PROT_MASK_generic
775 777
776#define L2_S_PROT_U L2_S_PROT_U_generic 778#define L2_S_PROT_U L2_S_PROT_U_generic
777#define L2_S_PROT_W L2_S_PROT_W_generic 779#define L2_S_PROT_W L2_S_PROT_W_generic
778#define L2_S_PROT_RO L2_S_PROT_RO_generic 780#define L2_S_PROT_RO L2_S_PROT_RO_generic
779#define L2_S_PROT_MASK L2_S_PROT_MASK_generic 781#define L2_S_PROT_MASK L2_S_PROT_MASK_generic
780 782
781#define L2_L_PROT_U L2_L_PROT_U_generic 783#define L2_L_PROT_U L2_L_PROT_U_generic
782#define L2_L_PROT_W L2_L_PROT_W_generic 784#define L2_L_PROT_W L2_L_PROT_W_generic
783#define L2_L_PROT_RO L2_L_PROT_RO_generic 785#define L2_L_PROT_RO L2_L_PROT_RO_generic
784#define L2_L_PROT_MASK L2_L_PROT_MASK_generic 786#define L2_L_PROT_MASK L2_L_PROT_MASK_generic
785 787
786#define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic 788#define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic
787#define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic 789#define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic
788#define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic 790#define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic
789 791
790#define L1_SS_PROTO L1_SS_PROTO_generic 792#define L1_SS_PROTO L1_SS_PROTO_generic
791#define L1_S_PROTO L1_S_PROTO_generic 793#define L1_S_PROTO L1_S_PROTO_generic
792#define L1_C_PROTO L1_C_PROTO_generic 794#define L1_C_PROTO L1_C_PROTO_generic
793#define L2_S_PROTO L2_S_PROTO_generic 795#define L2_S_PROTO L2_S_PROTO_generic
794 796
795#define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d)) 797#define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
796#define pmap_zero_page(d) pmap_zero_page_generic((d)) 798#define pmap_zero_page(d) pmap_zero_page_generic((d))
797#elif ARM_MMU_XSCALE == 1 799#elif ARM_MMU_XSCALE == 1
798#define L1_S_PROT_U L1_S_PROT_U_generic 800#define L1_S_PROT_U L1_S_PROT_U_generic
799#define L1_S_PROT_W L1_S_PROT_W_generic 801#define L1_S_PROT_W L1_S_PROT_W_generic
800#define L1_S_PROT_RO L1_S_PROT_RO_generic 802#define L1_S_PROT_RO L1_S_PROT_RO_generic
801#define L1_S_PROT_MASK L1_S_PROT_MASK_generic 803#define L1_S_PROT_MASK L1_S_PROT_MASK_generic
802 804
803#define L2_S_PROT_U L2_S_PROT_U_xscale 805#define L2_S_PROT_U L2_S_PROT_U_xscale
804#define L2_S_PROT_W L2_S_PROT_W_xscale 806#define L2_S_PROT_W L2_S_PROT_W_xscale
805#define L2_S_PROT_RO L2_S_PROT_RO_xscale 807#define L2_S_PROT_RO L2_S_PROT_RO_xscale
806#define L2_S_PROT_MASK L2_S_PROT_MASK_xscale 808#define L2_S_PROT_MASK L2_S_PROT_MASK_xscale
807 809
808#define L2_L_PROT_U L2_L_PROT_U_generic 810#define L2_L_PROT_U L2_L_PROT_U_generic
809#define L2_L_PROT_W L2_L_PROT_W_generic 811#define L2_L_PROT_W L2_L_PROT_W_generic
810#define L2_L_PROT_RO L2_L_PROT_RO_generic 812#define L2_L_PROT_RO L2_L_PROT_RO_generic
811#define L2_L_PROT_MASK L2_L_PROT_MASK_generic 813#define L2_L_PROT_MASK L2_L_PROT_MASK_generic
812 814
813#define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale 815#define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale
814#define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale 816#define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale
815#define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale 817#define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale
816 818
817#define L1_SS_PROTO L1_SS_PROTO_xscale 819#define L1_SS_PROTO L1_SS_PROTO_xscale
818#define L1_S_PROTO L1_S_PROTO_xscale 820#define L1_S_PROTO L1_S_PROTO_xscale
819#define L1_C_PROTO L1_C_PROTO_xscale 821#define L1_C_PROTO L1_C_PROTO_xscale
820#define L2_S_PROTO L2_S_PROTO_xscale 822#define L2_S_PROTO L2_S_PROTO_xscale
821 823
822#define pmap_copy_page(s, d) pmap_copy_page_xscale((s), (d)) 824#define pmap_copy_page(s, d) pmap_copy_page_xscale((s), (d))
823#define pmap_zero_page(d) pmap_zero_page_xscale((d)) 825#define pmap_zero_page(d) pmap_zero_page_xscale((d))
824#elif ARM_MMU_V7 == 1 826#elif ARM_MMU_V7 == 1
825#define L1_S_PROT_U L1_S_PROT_U_armv7 827#define L1_S_PROT_U L1_S_PROT_U_armv7
826#define L1_S_PROT_W L1_S_PROT_W_armv7 828#define L1_S_PROT_W L1_S_PROT_W_armv7
827#define L1_S_PROT_RO L1_S_PROT_RO_armv7 829#define L1_S_PROT_RO L1_S_PROT_RO_armv7
828#define L1_S_PROT_MASK L1_S_PROT_MASK_armv7 830#define L1_S_PROT_MASK L1_S_PROT_MASK_armv7
829 831
830#define L2_S_PROT_U L2_S_PROT_U_armv7 832#define L2_S_PROT_U L2_S_PROT_U_armv7
831#define L2_S_PROT_W L2_S_PROT_W_armv7 833#define L2_S_PROT_W L2_S_PROT_W_armv7
832#define L2_S_PROT_RO L2_S_PROT_RO_armv7 834#define L2_S_PROT_RO L2_S_PROT_RO_armv7
833#define L2_S_PROT_MASK L2_S_PROT_MASK_armv7 835#define L2_S_PROT_MASK L2_S_PROT_MASK_armv7
834 836
835#define L2_L_PROT_U L2_L_PROT_U_armv7 837#define L2_L_PROT_U L2_L_PROT_U_armv7
836#define L2_L_PROT_W L2_L_PROT_W_armv7 838#define L2_L_PROT_W L2_L_PROT_W_armv7
837#define L2_L_PROT_RO L2_L_PROT_RO_armv7 839#define L2_L_PROT_RO L2_L_PROT_RO_armv7
838#define L2_L_PROT_MASK L2_L_PROT_MASK_armv7 840#define L2_L_PROT_MASK L2_L_PROT_MASK_armv7
839 841
840#define L1_S_CACHE_MASK L1_S_CACHE_MASK_armv7 842#define L1_S_CACHE_MASK L1_S_CACHE_MASK_armv7
841#define L2_L_CACHE_MASK L2_L_CACHE_MASK_armv7 843#define L2_L_CACHE_MASK L2_L_CACHE_MASK_armv7
842#define L2_S_CACHE_MASK L2_S_CACHE_MASK_armv7 844#define L2_S_CACHE_MASK L2_S_CACHE_MASK_armv7
843 845
844/* These prototypes make writeable mappings, while the other MMU types 846/* These prototypes make writeable mappings, while the other MMU types
845 * make read-only mappings. */ 847 * make read-only mappings. */
846#define L1_SS_PROTO L1_SS_PROTO_armv7 848#define L1_SS_PROTO L1_SS_PROTO_armv7
847#define L1_S_PROTO L1_S_PROTO_armv7 849#define L1_S_PROTO L1_S_PROTO_armv7
848#define L1_C_PROTO L1_C_PROTO_armv7 850#define L1_C_PROTO L1_C_PROTO_armv7
849#define L2_S_PROTO L2_S_PROTO_armv7 851#define L2_S_PROTO L2_S_PROTO_armv7
850 852
851#define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d)) 853#define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
852#define pmap_zero_page(d) pmap_zero_page_generic((d)) 854#define pmap_zero_page(d) pmap_zero_page_generic((d))
853#endif /* ARM_NMMUS > 1 */ 855#endif /* ARM_NMMUS > 1 */
854 856
855/* 857/*
856 * Macros to set and query the write permission on page descriptors. 858 * Macros to set and query the write permission on page descriptors.
857 */ 859 */
858#define l1pte_set_writable(pte) (((pte) & ~L1_S_PROT_RO) | L1_S_PROT_W) 860#define l1pte_set_writable(pte) (((pte) & ~L1_S_PROT_RO) | L1_S_PROT_W)
859#define l1pte_set_readonly(pte) (((pte) & ~L1_S_PROT_W) | L1_S_PROT_RO) 861#define l1pte_set_readonly(pte) (((pte) & ~L1_S_PROT_W) | L1_S_PROT_RO)
860#define l2pte_set_writable(pte) (((pte) & ~L2_S_PROT_RO) | L2_S_PROT_W) 862#define l2pte_set_writable(pte) (((pte) & ~L2_S_PROT_RO) | L2_S_PROT_W)
861#define l2pte_set_readonly(pte) (((pte) & ~L2_S_PROT_W) | L2_S_PROT_RO) 863#define l2pte_set_readonly(pte) (((pte) & ~L2_S_PROT_W) | L2_S_PROT_RO)
862 864
863#define l2pte_writable_p(pte) (((pte) & L2_S_PROT_W) == L2_S_PROT_W && \ 865#define l2pte_writable_p(pte) (((pte) & L2_S_PROT_W) == L2_S_PROT_W && \
864 (L2_S_PROT_RO == 0 || \ 866 (L2_S_PROT_RO == 0 || \
865 ((pte) & L2_S_PROT_RO) != L2_S_PROT_RO)) 867 ((pte) & L2_S_PROT_RO) != L2_S_PROT_RO))
866 868
867/* 869/*
868 * These macros return various bits based on kernel/user and protection. 870 * These macros return various bits based on kernel/user and protection.
869 * Note that the compiler will usually fold these at compile time. 871 * Note that the compiler will usually fold these at compile time.
870 */ 872 */
871#define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \ 873#define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
872 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : L1_S_PROT_RO)) 874 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : L1_S_PROT_RO))
873 875
874#define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \ 876#define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
875 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : L2_L_PROT_RO)) 877 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : L2_L_PROT_RO))
876 878
877#define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \ 879#define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
878 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : L2_S_PROT_RO)) 880 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : L2_S_PROT_RO))
879 881
880/* 882/*
881 * Macros to test if a mapping is mappable with an L1 SuperSection, 883 * Macros to test if a mapping is mappable with an L1 SuperSection,
882 * L1 Section, or an L2 Large Page mapping. 884 * L1 Section, or an L2 Large Page mapping.
883 */ 885 */
884#define L1_SS_MAPPABLE_P(va, pa, size) \ 886#define L1_SS_MAPPABLE_P(va, pa, size) \
885 ((((va) | (pa)) & L1_SS_OFFSET) == 0 && (size) >= L1_SS_SIZE) 887 ((((va) | (pa)) & L1_SS_OFFSET) == 0 && (size) >= L1_SS_SIZE)
886 888
887#define L1_S_MAPPABLE_P(va, pa, size) \ 889#define L1_S_MAPPABLE_P(va, pa, size) \
888 ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE) 890 ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
889 891
890#define L2_L_MAPPABLE_P(va, pa, size) \ 892#define L2_L_MAPPABLE_P(va, pa, size) \
891 ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE) 893 ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
892 894
893#ifndef _LOCORE 895#ifndef _LOCORE
894/* 896/*
895 * Hooks for the pool allocator. 897 * Hooks for the pool allocator.
896 */ 898 */
897#define POOL_VTOPHYS(va) vtophys((vaddr_t) (va)) 899#define POOL_VTOPHYS(va) vtophys((vaddr_t) (va))
898extern paddr_t physical_start, physical_end; 900extern paddr_t physical_start, physical_end;
899#ifdef PMAP_NEED_ALLOC_POOLPAGE 901#ifdef PMAP_NEED_ALLOC_POOLPAGE
900struct vm_page *arm_pmap_alloc_poolpage(int); 902struct vm_page *arm_pmap_alloc_poolpage(int);
901#define PMAP_ALLOC_POOLPAGE arm_pmap_alloc_poolpage 903#define PMAP_ALLOC_POOLPAGE arm_pmap_alloc_poolpage
902#endif 904#endif
903#if defined(PMAP_NEED_ALLOC_POOLPAGE) || defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) 905#if defined(PMAP_NEED_ALLOC_POOLPAGE) || defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
904#define PMAP_MAP_POOLPAGE(pa) \ 906#define PMAP_MAP_POOLPAGE(pa) \
905 ((vaddr_t)((paddr_t)(pa) - physical_start + KERNEL_BASE)) 907 ((vaddr_t)((paddr_t)(pa) - physical_start + KERNEL_BASE))
906#define PMAP_UNMAP_POOLPAGE(va) \ 908#define PMAP_UNMAP_POOLPAGE(va) \
907 ((paddr_t)((vaddr_t)(va) - KERNEL_BASE + physical_start)) 909 ((paddr_t)((vaddr_t)(va) - KERNEL_BASE + physical_start))
908#endif 910#endif
909 911
910/* 912/*
911 * pmap-specific data store in the vm_page structure. 913 * pmap-specific data store in the vm_page structure.
912 */ 914 */
913#define __HAVE_VM_PAGE_MD 915#define __HAVE_VM_PAGE_MD
914struct vm_page_md { 916struct vm_page_md {
915 SLIST_HEAD(,pv_entry) pvh_list; /* pv_entry list */ 917 SLIST_HEAD(,pv_entry) pvh_list; /* pv_entry list */
916 int pvh_attrs; /* page attributes */ 918 int pvh_attrs; /* page attributes */
917 u_int uro_mappings; 919 u_int uro_mappings;
918 u_int urw_mappings; 920 u_int urw_mappings;
919 union { 921 union {
920 u_short s_mappings[2]; /* Assume kernel count <= 65535 */ 922 u_short s_mappings[2]; /* Assume kernel count <= 65535 */
921 u_int i_mappings; 923 u_int i_mappings;
922 } k_u; 924 } k_u;
923#define kro_mappings k_u.s_mappings[0] 925#define kro_mappings k_u.s_mappings[0]
924#define krw_mappings k_u.s_mappings[1] 926#define krw_mappings k_u.s_mappings[1]
925#define k_mappings k_u.i_mappings 927#define k_mappings k_u.i_mappings
926}; 928};
927 929
928/* 930/*
929 * Set the default color of each page. 931 * Set the default color of each page.
930 */ 932 */
931#if ARM_MMU_V6 > 0 933#if ARM_MMU_V6 > 0
932#define VM_MDPAGE_PVH_ATTRS_INIT(pg) \ 934#define VM_MDPAGE_PVH_ATTRS_INIT(pg) \
933 (pg)->mdpage.pvh_attrs = (pg)->phys_addr & arm_cache_prefer_mask 935 (pg)->mdpage.pvh_attrs = (pg)->phys_addr & arm_cache_prefer_mask
934#else 936#else
935#define VM_MDPAGE_PVH_ATTRS_INIT(pg) \ 937#define VM_MDPAGE_PVH_ATTRS_INIT(pg) \
936 (pg)->mdpage.pvh_attrs = 0 938 (pg)->mdpage.pvh_attrs = 0
937#endif 939#endif
938  940
939#define VM_MDPAGE_INIT(pg) \ 941#define VM_MDPAGE_INIT(pg) \
940do { \ 942do { \
941 SLIST_INIT(&(pg)->mdpage.pvh_list); \ 943 SLIST_INIT(&(pg)->mdpage.pvh_list); \
942 VM_MDPAGE_PVH_ATTRS_INIT(pg); \ 944 VM_MDPAGE_PVH_ATTRS_INIT(pg); \
943 (pg)->mdpage.uro_mappings = 0; \ 945 (pg)->mdpage.uro_mappings = 0; \
944 (pg)->mdpage.urw_mappings = 0; \ 946 (pg)->mdpage.urw_mappings = 0; \
945 (pg)->mdpage.k_mappings = 0; \ 947 (pg)->mdpage.k_mappings = 0; \
946} while (/*CONSTCOND*/0) 948} while (/*CONSTCOND*/0)
947 949
948#endif /* !_LOCORE */ 950#endif /* !_LOCORE */
949 951
950#endif /* _KERNEL */ 952#endif /* _KERNEL */
951 953
952#endif /* _ARM32_PMAP_H_ */ 954#endif /* _ARM32_PMAP_H_ */