Fri Jun 26 08:42:27 2020 UTC ()
Avoid unbounded stack usage warning


(skrll)
diff -r1.59 -r1.60 src/sys/arch/arm/arm32/arm32_kvminit.c

cvs diff -r1.59 -r1.60 src/sys/arch/arm/arm32/arm32_kvminit.c (switch to unified diff)

--- src/sys/arch/arm/arm32/arm32_kvminit.c 2020/06/20 07:10:36 1.59
+++ src/sys/arch/arm/arm32/arm32_kvminit.c 2020/06/26 08:42:27 1.60
@@ -1,1024 +1,1024 @@ @@ -1,1024 +1,1024 @@
1/* $NetBSD: arm32_kvminit.c,v 1.59 2020/06/20 07:10:36 skrll Exp $ */ 1/* $NetBSD: arm32_kvminit.c,v 1.60 2020/06/26 08:42:27 skrll Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2002, 2003, 2005 Genetec Corporation. All rights reserved. 4 * Copyright (c) 2002, 2003, 2005 Genetec Corporation. All rights reserved.
5 * Written by Hiroyuki Bessho for Genetec Corporation. 5 * Written by Hiroyuki Bessho for Genetec Corporation.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of Genetec Corporation may not be used to endorse or 15 * 3. The name of Genetec Corporation may not be used to endorse or
16 * promote products derived from this software without specific prior 16 * promote products derived from this software without specific prior
17 * written permission. 17 * written permission.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY GENETEC CORPORATION ``AS IS'' AND 19 * THIS SOFTWARE IS PROVIDED BY GENETEC CORPORATION ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GENETEC CORPORATION 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GENETEC CORPORATION
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 * 30 *
31 * Copyright (c) 2001 Wasabi Systems, Inc. 31 * Copyright (c) 2001 Wasabi Systems, Inc.
32 * All rights reserved. 32 * All rights reserved.
33 * 33 *
34 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 34 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions 37 * modification, are permitted provided that the following conditions
38 * are met: 38 * are met:
39 * 1. Redistributions of source code must retain the above copyright 39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer. 40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright 41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the 42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution. 43 * documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software 44 * 3. All advertising materials mentioning features or use of this software
45 * must display the following acknowledgement: 45 * must display the following acknowledgement:
46 * This product includes software developed for the NetBSD Project by 46 * This product includes software developed for the NetBSD Project by
47 * Wasabi Systems, Inc. 47 * Wasabi Systems, Inc.
48 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 48 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
49 * or promote products derived from this software without specific prior 49 * or promote products derived from this software without specific prior
50 * written permission. 50 * written permission.
51 * 51 *
52 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 52 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
54 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 54 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
55 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 55 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
56 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 56 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
62 * POSSIBILITY OF SUCH DAMAGE. 62 * POSSIBILITY OF SUCH DAMAGE.
63 * 63 *
64 * Copyright (c) 1997,1998 Mark Brinicombe. 64 * Copyright (c) 1997,1998 Mark Brinicombe.
65 * Copyright (c) 1997,1998 Causality Limited. 65 * Copyright (c) 1997,1998 Causality Limited.
66 * All rights reserved. 66 * All rights reserved.
67 * 67 *
68 * Redistribution and use in source and binary forms, with or without 68 * Redistribution and use in source and binary forms, with or without
69 * modification, are permitted provided that the following conditions 69 * modification, are permitted provided that the following conditions
70 * are met: 70 * are met:
71 * 1. Redistributions of source code must retain the above copyright 71 * 1. Redistributions of source code must retain the above copyright
72 * notice, this list of conditions and the following disclaimer. 72 * notice, this list of conditions and the following disclaimer.
73 * 2. Redistributions in binary form must reproduce the above copyright 73 * 2. Redistributions in binary form must reproduce the above copyright
74 * notice, this list of conditions and the following disclaimer in the 74 * notice, this list of conditions and the following disclaimer in the
75 * documentation and/or other materials provided with the distribution. 75 * documentation and/or other materials provided with the distribution.
76 * 3. All advertising materials mentioning features or use of this software 76 * 3. All advertising materials mentioning features or use of this software
77 * must display the following acknowledgement: 77 * must display the following acknowledgement:
78 * This product includes software developed by Mark Brinicombe 78 * This product includes software developed by Mark Brinicombe
79 * for the NetBSD Project. 79 * for the NetBSD Project.
80 * 4. The name of the company nor the name of the author may be used to 80 * 4. The name of the company nor the name of the author may be used to
81 * endorse or promote products derived from this software without specific 81 * endorse or promote products derived from this software without specific
82 * prior written permission. 82 * prior written permission.
83 * 83 *
84 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 84 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
85 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 85 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
86 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 86 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
87 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 87 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
88 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 88 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
89 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 89 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
90 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 90 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
91 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 91 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
92 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 92 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
93 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 93 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
94 * SUCH DAMAGE. 94 * SUCH DAMAGE.
95 * 95 *
96 * Copyright (c) 2007 Microsoft 96 * Copyright (c) 2007 Microsoft
97 * All rights reserved. 97 * All rights reserved.
98 * 98 *
99 * Redistribution and use in source and binary forms, with or without 99 * Redistribution and use in source and binary forms, with or without
100 * modification, are permitted provided that the following conditions 100 * modification, are permitted provided that the following conditions
101 * are met: 101 * are met:
102 * 1. Redistributions of source code must retain the above copyright 102 * 1. Redistributions of source code must retain the above copyright
103 * notice, this list of conditions and the following disclaimer. 103 * notice, this list of conditions and the following disclaimer.
104 * 2. Redistributions in binary form must reproduce the above copyright 104 * 2. Redistributions in binary form must reproduce the above copyright
105 * notice, this list of conditions and the following disclaimer in the 105 * notice, this list of conditions and the following disclaimer in the
106 * documentation and/or other materials provided with the distribution. 106 * documentation and/or other materials provided with the distribution.
107 * 3. All advertising materials mentioning features or use of this software 107 * 3. All advertising materials mentioning features or use of this software
108 * must display the following acknowledgement: 108 * must display the following acknowledgement:
109 * This product includes software developed by Microsoft 109 * This product includes software developed by Microsoft
110 * 110 *
111 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 111 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
112 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 112 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
113 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 113 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
114 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTERS BE LIABLE FOR ANY DIRECT, 114 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTERS BE LIABLE FOR ANY DIRECT,
115 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 115 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
116 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 116 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
117 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 117 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
118 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 118 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
119 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 119 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
120 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 120 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
121 * SUCH DAMAGE. 121 * SUCH DAMAGE.
122 */ 122 */
123 123
124#include "opt_arm_debug.h" 124#include "opt_arm_debug.h"
125#include "opt_arm_start.h" 125#include "opt_arm_start.h"
126#include "opt_fdt.h" 126#include "opt_fdt.h"
127#include "opt_multiprocessor.h" 127#include "opt_multiprocessor.h"
128 128
129#include <sys/cdefs.h> 129#include <sys/cdefs.h>
130__KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.59 2020/06/20 07:10:36 skrll Exp $"); 130__KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.60 2020/06/26 08:42:27 skrll Exp $");
131 131
132#include <sys/param.h> 132#include <sys/param.h>
133 133
134#include <sys/bus.h> 134#include <sys/bus.h>
135#include <sys/device.h> 135#include <sys/device.h>
136#include <sys/kernel.h> 136#include <sys/kernel.h>
137#include <sys/reboot.h> 137#include <sys/reboot.h>
138 138
139#include <dev/cons.h> 139#include <dev/cons.h>
140 140
141#include <uvm/uvm_extern.h> 141#include <uvm/uvm_extern.h>
142 142
143#include <arm/arm32/machdep.h> 143#include <arm/arm32/machdep.h>
144#include <arm/bootconfig.h> 144#include <arm/bootconfig.h>
145#include <arm/db_machdep.h> 145#include <arm/db_machdep.h>
146#include <arm/locore.h> 146#include <arm/locore.h>
147#include <arm/undefined.h> 147#include <arm/undefined.h>
148 148
149#if defined(FDT) 149#if defined(FDT)
150#include <arch/evbarm/fdt/platform.h> 150#include <arch/evbarm/fdt/platform.h>
151#include <arm/fdt/arm_fdtvar.h> 151#include <arm/fdt/arm_fdtvar.h>
152#endif 152#endif
153 153
154#ifdef MULTIPROCESSOR 154#ifdef MULTIPROCESSOR
155#ifndef __HAVE_CPU_UAREA_ALLOC_IDLELWP 155#ifndef __HAVE_CPU_UAREA_ALLOC_IDLELWP
156#error __HAVE_CPU_UAREA_ALLOC_IDLELWP required to not waste pages for idlestack 156#error __HAVE_CPU_UAREA_ALLOC_IDLELWP required to not waste pages for idlestack
157#endif 157#endif
158#endif 158#endif
159 159
160#ifdef VERBOSE_INIT_ARM 160#ifdef VERBOSE_INIT_ARM
161#define VPRINTF(...) printf(__VA_ARGS__) 161#define VPRINTF(...) printf(__VA_ARGS__)
162#else 162#else
163#define VPRINTF(...) __nothing 163#define VPRINTF(...) __nothing
164#endif 164#endif
165 165
166struct bootmem_info bootmem_info; 166struct bootmem_info bootmem_info;
167 167
168extern void *msgbufaddr; 168extern void *msgbufaddr;
169paddr_t msgbufphys; 169paddr_t msgbufphys;
170paddr_t physical_start; 170paddr_t physical_start;
171paddr_t physical_end; 171paddr_t physical_end;
172 172
173extern char etext[]; 173extern char etext[];
174extern char __data_start[], _edata[]; 174extern char __data_start[], _edata[];
175extern char __bss_start[], __bss_end__[]; 175extern char __bss_start[], __bss_end__[];
176extern char _end[]; 176extern char _end[];
177 177
178/* Page tables for mapping kernel VM */ 178/* Page tables for mapping kernel VM */
179#define KERNEL_L2PT_VMDATA_NUM 8 /* start with 32MB of KVM */ 179#define KERNEL_L2PT_VMDATA_NUM 8 /* start with 32MB of KVM */
180 180
181u_long kern_vtopdiff __attribute__((__section__(".data"))); 181u_long kern_vtopdiff __attribute__((__section__(".data")));
182 182
183void 183void
184arm32_bootmem_init(paddr_t memstart, psize_t memsize, vsize_t kernelstart) 184arm32_bootmem_init(paddr_t memstart, psize_t memsize, vsize_t kernelstart)
185{ 185{
186 struct bootmem_info * const bmi = &bootmem_info; 186 struct bootmem_info * const bmi = &bootmem_info;
187 pv_addr_t *pv = bmi->bmi_freeblocks; 187 pv_addr_t *pv = bmi->bmi_freeblocks;
188 188
189 /* 189 /*
190 * FDT/generic start fills in kern_vtopdiff early 190 * FDT/generic start fills in kern_vtopdiff early
191 */ 191 */
192#if defined(__HAVE_GENERIC_START) 192#if defined(__HAVE_GENERIC_START)
193 extern char KERNEL_BASE_virt[]; 193 extern char KERNEL_BASE_virt[];
194 extern char const __stop__init_memory[]; 194 extern char const __stop__init_memory[];
195 195
196 VPRINTF("%s: kern_vtopdiff=%#lx\n", __func__, kern_vtopdiff); 196 VPRINTF("%s: kern_vtopdiff=%#lx\n", __func__, kern_vtopdiff);
197 197
198 vaddr_t kstartva = trunc_page((vaddr_t)KERNEL_BASE_virt); 198 vaddr_t kstartva = trunc_page((vaddr_t)KERNEL_BASE_virt);
199 vaddr_t kendva = round_page((vaddr_t)__stop__init_memory); 199 vaddr_t kendva = round_page((vaddr_t)__stop__init_memory);
200 200
201 kernelstart = KERN_VTOPHYS(kstartva); 201 kernelstart = KERN_VTOPHYS(kstartva);
202 202
203 VPRINTF("%s: kstartva=%#lx, kernelstart=%#lx\n", __func__, kstartva, kernelstart); 203 VPRINTF("%s: kstartva=%#lx, kernelstart=%#lx\n", __func__, kstartva, kernelstart);
204#else 204#else
205 vaddr_t kendva = round_page((vaddr_t)_end); 205 vaddr_t kendva = round_page((vaddr_t)_end);
206 206
207#if defined(KERNEL_BASE_VOFFSET) 207#if defined(KERNEL_BASE_VOFFSET)
208 kern_vtopdiff = KERNEL_BASE_VOFFSET; 208 kern_vtopdiff = KERNEL_BASE_VOFFSET;
209#else 209#else
210 KASSERT(memstart == kernelstart); 210 KASSERT(memstart == kernelstart);
211 kern_vtopdiff = KERNEL_BASE + memstart; 211 kern_vtopdiff = KERNEL_BASE + memstart;
212#endif 212#endif
213#endif 213#endif
214 paddr_t kernelend = KERN_VTOPHYS(kendva); 214 paddr_t kernelend = KERN_VTOPHYS(kendva);
215 215
216 VPRINTF("%s: memstart=%#lx, memsize=%#lx\n", __func__, 216 VPRINTF("%s: memstart=%#lx, memsize=%#lx\n", __func__,
217 memstart, memsize); 217 memstart, memsize);
218 VPRINTF("%s: kernelstart=%#lx, kernelend=%#lx\n", __func__, 218 VPRINTF("%s: kernelstart=%#lx, kernelend=%#lx\n", __func__,
219 kernelstart, kernelend); 219 kernelstart, kernelend);
220 220
221 physical_start = bmi->bmi_start = memstart; 221 physical_start = bmi->bmi_start = memstart;
222 physical_end = bmi->bmi_end = memstart + memsize; 222 physical_end = bmi->bmi_end = memstart + memsize;
223#ifndef ARM_HAS_LPAE 223#ifndef ARM_HAS_LPAE
224 if (physical_end == 0) { 224 if (physical_end == 0) {
225 physical_end = -PAGE_SIZE; 225 physical_end = -PAGE_SIZE;
226 memsize -= PAGE_SIZE; 226 memsize -= PAGE_SIZE;
227 bmi->bmi_end -= PAGE_SIZE; 227 bmi->bmi_end -= PAGE_SIZE;
228 VPRINTF("%s: memsize shrunk by a page to avoid ending at 4GB\n", 228 VPRINTF("%s: memsize shrunk by a page to avoid ending at 4GB\n",
229 __func__); 229 __func__);
230 } 230 }
231#endif 231#endif
232 physmem = memsize / PAGE_SIZE; 232 physmem = memsize / PAGE_SIZE;
233 233
234 /* 234 /*
235 * Let's record where the kernel lives. 235 * Let's record where the kernel lives.
236 */ 236 */
237 237
238 bmi->bmi_kernelstart = kernelstart; 238 bmi->bmi_kernelstart = kernelstart;
239 bmi->bmi_kernelend = kernelend; 239 bmi->bmi_kernelend = kernelend;
240 240
241#if defined(FDT) 241#if defined(FDT)
242 fdt_add_reserved_memory_range(bmi->bmi_kernelstart, 242 fdt_add_reserved_memory_range(bmi->bmi_kernelstart,
243 bmi->bmi_kernelend - bmi->bmi_kernelstart); 243 bmi->bmi_kernelend - bmi->bmi_kernelstart);
244#endif 244#endif
245 245
246 VPRINTF("%s: kernel phys start %#lx end %#lx\n", __func__, kernelstart, 246 VPRINTF("%s: kernel phys start %#lx end %#lx\n", __func__, kernelstart,
247 kernelend); 247 kernelend);
248 248
249#if 0 249#if 0
250 // XXX Makes RPI abort 250 // XXX Makes RPI abort
251 KASSERT((kernelstart & (L2_S_SEGSIZE - 1)) == 0); 251 KASSERT((kernelstart & (L2_S_SEGSIZE - 1)) == 0);
252#endif 252#endif
253 /* 253 /*
254 * Now the rest of the free memory must be after the kernel. 254 * Now the rest of the free memory must be after the kernel.
255 */ 255 */
256 pv->pv_pa = bmi->bmi_kernelend; 256 pv->pv_pa = bmi->bmi_kernelend;
257 pv->pv_va = KERN_PHYSTOV(pv->pv_pa); 257 pv->pv_va = KERN_PHYSTOV(pv->pv_pa);
258 pv->pv_size = bmi->bmi_end - bmi->bmi_kernelend; 258 pv->pv_size = bmi->bmi_end - bmi->bmi_kernelend;
259 bmi->bmi_freepages += pv->pv_size / PAGE_SIZE; 259 bmi->bmi_freepages += pv->pv_size / PAGE_SIZE;
260 VPRINTF("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n", 260 VPRINTF("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n",
261 __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa, 261 __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa,
262 pv->pv_pa + pv->pv_size - 1, pv->pv_va); 262 pv->pv_pa + pv->pv_size - 1, pv->pv_va);
263 pv++; 263 pv++;
264 264
265 /* 265 /*
266 * Add a free block for any memory before the kernel. 266 * Add a free block for any memory before the kernel.
267 */ 267 */
268 if (bmi->bmi_start < bmi->bmi_kernelstart) { 268 if (bmi->bmi_start < bmi->bmi_kernelstart) {
269 pv->pv_pa = bmi->bmi_start; 269 pv->pv_pa = bmi->bmi_start;
270 pv->pv_va = KERN_PHYSTOV(pv->pv_pa); 270 pv->pv_va = KERN_PHYSTOV(pv->pv_pa);
271 pv->pv_size = bmi->bmi_kernelstart - pv->pv_pa; 271 pv->pv_size = bmi->bmi_kernelstart - pv->pv_pa;
272 bmi->bmi_freepages += pv->pv_size / PAGE_SIZE; 272 bmi->bmi_freepages += pv->pv_size / PAGE_SIZE;
273 VPRINTF("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n", 273 VPRINTF("%s: adding %lu free pages: [%#lx..%#lx] (VA %#lx)\n",
274 __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa, 274 __func__, pv->pv_size / PAGE_SIZE, pv->pv_pa,
275 pv->pv_pa + pv->pv_size - 1, pv->pv_va); 275 pv->pv_pa + pv->pv_size - 1, pv->pv_va);
276 pv++; 276 pv++;
277 } 277 }
278 278
279 bmi->bmi_nfreeblocks = pv - bmi->bmi_freeblocks; 279 bmi->bmi_nfreeblocks = pv - bmi->bmi_freeblocks;
280 280
281 SLIST_INIT(&bmi->bmi_freechunks); 281 SLIST_INIT(&bmi->bmi_freechunks);
282 SLIST_INIT(&bmi->bmi_chunks); 282 SLIST_INIT(&bmi->bmi_chunks);
283} 283}
284 284
285static bool 285static bool
286concat_pvaddr(pv_addr_t *acc_pv, pv_addr_t *pv) 286concat_pvaddr(pv_addr_t *acc_pv, pv_addr_t *pv)
287{ 287{
288 if (acc_pv->pv_pa + acc_pv->pv_size == pv->pv_pa 288 if (acc_pv->pv_pa + acc_pv->pv_size == pv->pv_pa
289 && acc_pv->pv_va + acc_pv->pv_size == pv->pv_va 289 && acc_pv->pv_va + acc_pv->pv_size == pv->pv_va
290 && acc_pv->pv_prot == pv->pv_prot 290 && acc_pv->pv_prot == pv->pv_prot
291 && acc_pv->pv_cache == pv->pv_cache) { 291 && acc_pv->pv_cache == pv->pv_cache) {
292#if 0 292#if 0
293 VPRINTF("%s: appending pv %p (%#lx..%#lx) to %#lx..%#lx\n", 293 VPRINTF("%s: appending pv %p (%#lx..%#lx) to %#lx..%#lx\n",
294 __func__, pv, pv->pv_pa, pv->pv_pa + pv->pv_size, 294 __func__, pv, pv->pv_pa, pv->pv_pa + pv->pv_size,
295 acc_pv->pv_pa, acc_pv->pv_pa + acc_pv->pv_size); 295 acc_pv->pv_pa, acc_pv->pv_pa + acc_pv->pv_size);
296#endif 296#endif
297 acc_pv->pv_size += pv->pv_size; 297 acc_pv->pv_size += pv->pv_size;
298 return true; 298 return true;
299 } 299 }
300 300
301 return false; 301 return false;
302} 302}
303 303
304static void 304static void
305add_pages(struct bootmem_info *bmi, pv_addr_t *pv) 305add_pages(struct bootmem_info *bmi, pv_addr_t *pv)
306{ 306{
307 pv_addr_t **pvp = &SLIST_FIRST(&bmi->bmi_chunks); 307 pv_addr_t **pvp = &SLIST_FIRST(&bmi->bmi_chunks);
308 while ((*pvp) != NULL && (*pvp)->pv_va <= pv->pv_va) { 308 while ((*pvp) != NULL && (*pvp)->pv_va <= pv->pv_va) {
309 pv_addr_t * const pv0 = (*pvp); 309 pv_addr_t * const pv0 = (*pvp);
310 KASSERT(SLIST_NEXT(pv0, pv_list) == NULL || pv0->pv_pa < SLIST_NEXT(pv0, pv_list)->pv_pa); 310 KASSERT(SLIST_NEXT(pv0, pv_list) == NULL || pv0->pv_pa < SLIST_NEXT(pv0, pv_list)->pv_pa);
311 if (concat_pvaddr(pv0, pv)) { 311 if (concat_pvaddr(pv0, pv)) {
312 VPRINTF("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n", 312 VPRINTF("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n",
313 __func__, "appending", pv, 313 __func__, "appending", pv,
314 pv->pv_pa, pv->pv_pa + pv->pv_size - 1, 314 pv->pv_pa, pv->pv_pa + pv->pv_size - 1,
315 pv0->pv_pa, pv0->pv_pa + pv0->pv_size - pv->pv_size - 1); 315 pv0->pv_pa, pv0->pv_pa + pv0->pv_size - pv->pv_size - 1);
316 pv = SLIST_NEXT(pv0, pv_list); 316 pv = SLIST_NEXT(pv0, pv_list);
317 if (pv != NULL && concat_pvaddr(pv0, pv)) { 317 if (pv != NULL && concat_pvaddr(pv0, pv)) {
318 VPRINTF("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n", 318 VPRINTF("%s: %s pv %p (%#lx..%#lx) to %#lx..%#lx\n",
319 __func__, "merging", pv, 319 __func__, "merging", pv,
320 pv->pv_pa, pv->pv_pa + pv->pv_size - 1, 320 pv->pv_pa, pv->pv_pa + pv->pv_size - 1,
321 pv0->pv_pa, 321 pv0->pv_pa,
322 pv0->pv_pa + pv0->pv_size - pv->pv_size - 1); 322 pv0->pv_pa + pv0->pv_size - pv->pv_size - 1);
323 SLIST_REMOVE_AFTER(pv0, pv_list); 323 SLIST_REMOVE_AFTER(pv0, pv_list);
324 SLIST_INSERT_HEAD(&bmi->bmi_freechunks, pv, pv_list); 324 SLIST_INSERT_HEAD(&bmi->bmi_freechunks, pv, pv_list);
325 } 325 }
326 return; 326 return;
327 } 327 }
328 KASSERT(pv->pv_va != (*pvp)->pv_va); 328 KASSERT(pv->pv_va != (*pvp)->pv_va);
329 pvp = &SLIST_NEXT(*pvp, pv_list); 329 pvp = &SLIST_NEXT(*pvp, pv_list);
330 } 330 }
331 KASSERT((*pvp) == NULL || pv->pv_va < (*pvp)->pv_va); 331 KASSERT((*pvp) == NULL || pv->pv_va < (*pvp)->pv_va);
332 pv_addr_t * const new_pv = SLIST_FIRST(&bmi->bmi_freechunks); 332 pv_addr_t * const new_pv = SLIST_FIRST(&bmi->bmi_freechunks);
333 KASSERT(new_pv != NULL); 333 KASSERT(new_pv != NULL);
334 SLIST_REMOVE_HEAD(&bmi->bmi_freechunks, pv_list); 334 SLIST_REMOVE_HEAD(&bmi->bmi_freechunks, pv_list);
335 *new_pv = *pv; 335 *new_pv = *pv;
336 SLIST_NEXT(new_pv, pv_list) = *pvp; 336 SLIST_NEXT(new_pv, pv_list) = *pvp;
337 (*pvp) = new_pv; 337 (*pvp) = new_pv;
338 338
339 VPRINTF("%s: adding pv %p (pa %#lx, va %#lx, %lu pages) ", 339 VPRINTF("%s: adding pv %p (pa %#lx, va %#lx, %lu pages) ",
340 __func__, new_pv, new_pv->pv_pa, new_pv->pv_va, 340 __func__, new_pv, new_pv->pv_pa, new_pv->pv_va,
341 new_pv->pv_size / PAGE_SIZE); 341 new_pv->pv_size / PAGE_SIZE);
342 if (SLIST_NEXT(new_pv, pv_list)) { 342 if (SLIST_NEXT(new_pv, pv_list)) {
343 VPRINTF("before pa %#lx\n", SLIST_NEXT(new_pv, pv_list)->pv_pa); 343 VPRINTF("before pa %#lx\n", SLIST_NEXT(new_pv, pv_list)->pv_pa);
344 } else { 344 } else {
345 VPRINTF("at tail\n"); 345 VPRINTF("at tail\n");
346 } 346 }
347} 347}
348 348
349static void 349static void
350valloc_pages(struct bootmem_info *bmi, pv_addr_t *pv, size_t npages, 350valloc_pages(struct bootmem_info *bmi, pv_addr_t *pv, size_t npages,
351 int prot, int cache, bool zero_p) 351 int prot, int cache, bool zero_p)
352{ 352{
353 size_t nbytes = npages * PAGE_SIZE; 353 size_t nbytes = npages * PAGE_SIZE;
354 pv_addr_t *free_pv = bmi->bmi_freeblocks; 354 pv_addr_t *free_pv = bmi->bmi_freeblocks;
355 size_t free_idx = 0; 355 size_t free_idx = 0;
356 static bool l1pt_found; 356 static bool l1pt_found;
357 357
358 KASSERT(npages > 0); 358 KASSERT(npages > 0);
359 359
360 /* 360 /*
361 * If we haven't allocated the kernel L1 page table and we are aligned 361 * If we haven't allocated the kernel L1 page table and we are aligned
362 * at a L1 table boundary, alloc the memory for it. 362 * at a L1 table boundary, alloc the memory for it.
363 */ 363 */
364 if (!l1pt_found 364 if (!l1pt_found
365 && (free_pv->pv_pa & (L1_TABLE_SIZE - 1)) == 0 365 && (free_pv->pv_pa & (L1_TABLE_SIZE - 1)) == 0
366 && free_pv->pv_size >= L1_TABLE_SIZE) { 366 && free_pv->pv_size >= L1_TABLE_SIZE) {
367 l1pt_found = true; 367 l1pt_found = true;
368 VPRINTF(" l1pt"); 368 VPRINTF(" l1pt");
369 369
370 valloc_pages(bmi, &kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE, 370 valloc_pages(bmi, &kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE,
371 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true); 371 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true);
372 add_pages(bmi, &kernel_l1pt); 372 add_pages(bmi, &kernel_l1pt);
373 } 373 }
374 374
375 while (nbytes > free_pv->pv_size) { 375 while (nbytes > free_pv->pv_size) {
376 free_pv++; 376 free_pv++;
377 free_idx++; 377 free_idx++;
378 if (free_idx == bmi->bmi_nfreeblocks) { 378 if (free_idx == bmi->bmi_nfreeblocks) {
379 panic("%s: could not allocate %zu bytes", 379 panic("%s: could not allocate %zu bytes",
380 __func__, nbytes); 380 __func__, nbytes);
381 } 381 }
382 } 382 }
383 383
384 /* 384 /*
385 * As we allocate the memory, make sure that we don't walk over 385 * As we allocate the memory, make sure that we don't walk over
386 * our current first level translation table. 386 * our current first level translation table.
387 */ 387 */
388 KASSERT((armreg_ttbr_read() & ~(L1_TABLE_SIZE - 1)) != free_pv->pv_pa); 388 KASSERT((armreg_ttbr_read() & ~(L1_TABLE_SIZE - 1)) != free_pv->pv_pa);
389 389
390#if defined(FDT) 390#if defined(FDT)
391 fdt_add_reserved_memory_range(free_pv->pv_pa, nbytes); 391 fdt_add_reserved_memory_range(free_pv->pv_pa, nbytes);
392#endif 392#endif
393 pv->pv_pa = free_pv->pv_pa; 393 pv->pv_pa = free_pv->pv_pa;
394 pv->pv_va = free_pv->pv_va; 394 pv->pv_va = free_pv->pv_va;
395 pv->pv_size = nbytes; 395 pv->pv_size = nbytes;
396 pv->pv_prot = prot; 396 pv->pv_prot = prot;
397 pv->pv_cache = cache; 397 pv->pv_cache = cache;
398 398
399 /* 399 /*
400 * If PTE_PAGETABLE uses the same cache modes as PTE_CACHE 400 * If PTE_PAGETABLE uses the same cache modes as PTE_CACHE
401 * just use PTE_CACHE. 401 * just use PTE_CACHE.
402 */ 402 */
403 if (cache == PTE_PAGETABLE 403 if (cache == PTE_PAGETABLE
404 && pte_l1_s_cache_mode == pte_l1_s_cache_mode_pt 404 && pte_l1_s_cache_mode == pte_l1_s_cache_mode_pt
405 && pte_l2_l_cache_mode == pte_l2_l_cache_mode_pt 405 && pte_l2_l_cache_mode == pte_l2_l_cache_mode_pt
406 && pte_l2_s_cache_mode == pte_l2_s_cache_mode_pt) 406 && pte_l2_s_cache_mode == pte_l2_s_cache_mode_pt)
407 pv->pv_cache = PTE_CACHE; 407 pv->pv_cache = PTE_CACHE;
408 408
409 free_pv->pv_pa += nbytes; 409 free_pv->pv_pa += nbytes;
410 free_pv->pv_va += nbytes; 410 free_pv->pv_va += nbytes;
411 free_pv->pv_size -= nbytes; 411 free_pv->pv_size -= nbytes;
412 if (free_pv->pv_size == 0) { 412 if (free_pv->pv_size == 0) {
413 --bmi->bmi_nfreeblocks; 413 --bmi->bmi_nfreeblocks;
414 for (; free_idx < bmi->bmi_nfreeblocks; free_idx++) { 414 for (; free_idx < bmi->bmi_nfreeblocks; free_idx++) {
415 free_pv[0] = free_pv[1]; 415 free_pv[0] = free_pv[1];
416 } 416 }
417 } 417 }
418 418
419 bmi->bmi_freepages -= npages; 419 bmi->bmi_freepages -= npages;
420 420
421 if (zero_p) 421 if (zero_p)
422 memset((void *)pv->pv_va, 0, nbytes); 422 memset((void *)pv->pv_va, 0, nbytes);
423} 423}
424 424
425void 425void
426arm32_kernel_vm_init(vaddr_t kernel_vm_base, vaddr_t vectors, vaddr_t iovbase, 426arm32_kernel_vm_init(vaddr_t kernel_vm_base, vaddr_t vectors, vaddr_t iovbase,
427 const struct pmap_devmap *devmap, bool mapallmem_p) 427 const struct pmap_devmap *devmap, bool mapallmem_p)
428{ 428{
429 struct bootmem_info * const bmi = &bootmem_info; 429 struct bootmem_info * const bmi = &bootmem_info;
430#ifdef MULTIPROCESSOR 430#ifdef MULTIPROCESSOR
431 const size_t cpu_num = arm_cpu_max; 431 const size_t cpu_num = arm_cpu_max;
432#else 432#else
433 const size_t cpu_num = 1; 433 const size_t cpu_num = 1;
434#endif 434#endif
435 435
436#ifdef ARM_HAS_VBAR 436#ifdef ARM_HAS_VBAR
437 const bool map_vectors_p = false; 437 const bool map_vectors_p = false;
438#elif defined(CPU_ARMV7) || defined(CPU_ARM11) 438#elif defined(CPU_ARMV7) || defined(CPU_ARM11)
439 const bool map_vectors_p = vectors == ARM_VECTORS_HIGH 439 const bool map_vectors_p = vectors == ARM_VECTORS_HIGH
440 || (armreg_pfr1_read() & ARM_PFR1_SEC_MASK) == 0; 440 || (armreg_pfr1_read() & ARM_PFR1_SEC_MASK) == 0;
441#else 441#else
442 const bool map_vectors_p = true; 442 const bool map_vectors_p = true;
443#endif 443#endif
444 444
445#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS 445#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
446 KASSERT(mapallmem_p); 446 KASSERT(mapallmem_p);
447#ifdef ARM_MMU_EXTENDED 447#ifdef ARM_MMU_EXTENDED
448 /* 448 /*
449 * The direct map VA space ends at the start of the kernel VM space. 449 * The direct map VA space ends at the start of the kernel VM space.
450 */ 450 */
451 pmap_directlimit = kernel_vm_base; 451 pmap_directlimit = kernel_vm_base;
452#else 452#else
453 KASSERT(kernel_vm_base - KERNEL_BASE >= physical_end - physical_start); 453 KASSERT(kernel_vm_base - KERNEL_BASE >= physical_end - physical_start);
454#endif /* ARM_MMU_EXTENDED */ 454#endif /* ARM_MMU_EXTENDED */
455#endif /* __HAVE_MM_MD_DIRECT_MAPPED_PHYS */ 455#endif /* __HAVE_MM_MD_DIRECT_MAPPED_PHYS */
456 456
457 /* 457 /*
458 * Calculate the number of L2 pages needed for mapping the 458 * Calculate the number of L2 pages needed for mapping the
459 * kernel + data + stuff. Assume 2 L2 pages for kernel, 1 for vectors, 459 * kernel + data + stuff. Assume 2 L2 pages for kernel, 1 for vectors,
460 * and 1 for IO 460 * and 1 for IO
461 */ 461 */
462 size_t kernel_size = bmi->bmi_kernelend; 462 size_t kernel_size = bmi->bmi_kernelend;
463 kernel_size -= (bmi->bmi_kernelstart & -L2_S_SEGSIZE); 463 kernel_size -= (bmi->bmi_kernelstart & -L2_S_SEGSIZE);
464 kernel_size += L1_TABLE_SIZE; 464 kernel_size += L1_TABLE_SIZE;
465 kernel_size += PAGE_SIZE * KERNEL_L2PT_VMDATA_NUM; 465 kernel_size += PAGE_SIZE * KERNEL_L2PT_VMDATA_NUM;
466 if (map_vectors_p) { 466 if (map_vectors_p) {
467 kernel_size += PAGE_SIZE; /* L2PT for VECTORS */ 467 kernel_size += PAGE_SIZE; /* L2PT for VECTORS */
468 } 468 }
469 if (iovbase) { 469 if (iovbase) {
470 kernel_size += PAGE_SIZE; /* L2PT for IO */ 470 kernel_size += PAGE_SIZE; /* L2PT for IO */
471 } 471 }
472 kernel_size += 472 kernel_size +=
473 cpu_num * (ABT_STACK_SIZE + FIQ_STACK_SIZE + IRQ_STACK_SIZE 473 cpu_num * (ABT_STACK_SIZE + FIQ_STACK_SIZE + IRQ_STACK_SIZE
474 + UND_STACK_SIZE + UPAGES) * PAGE_SIZE; 474 + UND_STACK_SIZE + UPAGES) * PAGE_SIZE;
475 kernel_size += round_page(MSGBUFSIZE); 475 kernel_size += round_page(MSGBUFSIZE);
476 kernel_size += 0x10000; /* slop */ 476 kernel_size += 0x10000; /* slop */
477 if (!mapallmem_p) { 477 if (!mapallmem_p) {
478 kernel_size += PAGE_SIZE 478 kernel_size += PAGE_SIZE
479 * ((kernel_size + L2_S_SEGSIZE - 1) / L2_S_SEGSIZE); 479 * ((kernel_size + L2_S_SEGSIZE - 1) / L2_S_SEGSIZE);
480 } 480 }
481 kernel_size = round_page(kernel_size); 481 kernel_size = round_page(kernel_size);
482 482
483 /* 483 /*
484 * Now we know how many L2 pages it will take. 484 * Now we know how many L2 pages it will take.
485 */ 485 */
486 const size_t KERNEL_L2PT_KERNEL_NUM = 486 const size_t KERNEL_L2PT_KERNEL_NUM =
487 round_page(kernel_size + L2_S_SEGSIZE - 1) / L2_S_SEGSIZE; 487 round_page(kernel_size + L2_S_SEGSIZE - 1) / L2_S_SEGSIZE;
488 488
489 VPRINTF("%s: %zu L2 pages are needed to map %#zx kernel bytes\n", 489 VPRINTF("%s: %zu L2 pages are needed to map %#zx kernel bytes\n",
490 __func__, KERNEL_L2PT_KERNEL_NUM, kernel_size); 490 __func__, KERNEL_L2PT_KERNEL_NUM, kernel_size);
491 491
492 KASSERT(KERNEL_L2PT_KERNEL_NUM + KERNEL_L2PT_VMDATA_NUM < __arraycount(bmi->bmi_l2pts)); 492 KASSERT(KERNEL_L2PT_KERNEL_NUM + KERNEL_L2PT_VMDATA_NUM < __arraycount(bmi->bmi_l2pts));
493 pv_addr_t * const kernel_l2pt = bmi->bmi_l2pts; 493 pv_addr_t * const kernel_l2pt = bmi->bmi_l2pts;
494 pv_addr_t * const vmdata_l2pt = kernel_l2pt + KERNEL_L2PT_KERNEL_NUM; 494 pv_addr_t * const vmdata_l2pt = kernel_l2pt + KERNEL_L2PT_KERNEL_NUM;
495 pv_addr_t msgbuf; 495 pv_addr_t msgbuf;
496 pv_addr_t text; 496 pv_addr_t text;
497 pv_addr_t data; 497 pv_addr_t data;
498 pv_addr_t chunks[KERNEL_L2PT_KERNEL_NUM + KERNEL_L2PT_VMDATA_NUM + 11]; 498 pv_addr_t chunks[__arraycount(bmi->bmi_l2pts) + 11];
499#if ARM_MMU_XSCALE == 1 499#if ARM_MMU_XSCALE == 1
500 pv_addr_t minidataclean; 500 pv_addr_t minidataclean;
501#endif 501#endif
502 502
503 /* 503 /*
504 * We need to allocate some fixed page tables to get the kernel going. 504 * We need to allocate some fixed page tables to get the kernel going.
505 * 505 *
506 * We are going to allocate our bootstrap pages from the beginning of 506 * We are going to allocate our bootstrap pages from the beginning of
507 * the free space that we just calculated. We allocate one page 507 * the free space that we just calculated. We allocate one page
508 * directory and a number of page tables and store the physical 508 * directory and a number of page tables and store the physical
509 * addresses in the bmi_l2pts array in bootmem_info. 509 * addresses in the bmi_l2pts array in bootmem_info.
510 * 510 *
511 * The kernel page directory must be on a 16K boundary. The page 511 * The kernel page directory must be on a 16K boundary. The page
512 * tables must be on 4K boundaries. What we do is allocate the 512 * tables must be on 4K boundaries. What we do is allocate the
513 * page directory on the first 16K boundary that we encounter, and 513 * page directory on the first 16K boundary that we encounter, and
514 * the page tables on 4K boundaries otherwise. Since we allocate 514 * the page tables on 4K boundaries otherwise. Since we allocate
515 * at least 3 L2 page tables, we are guaranteed to encounter at 515 * at least 3 L2 page tables, we are guaranteed to encounter at
516 * least one 16K aligned region. 516 * least one 16K aligned region.
517 */ 517 */
518 518
519 VPRINTF("%s: allocating page tables for", __func__); 519 VPRINTF("%s: allocating page tables for", __func__);
520 for (size_t i = 0; i < __arraycount(chunks); i++) { 520 for (size_t i = 0; i < __arraycount(chunks); i++) {
521 SLIST_INSERT_HEAD(&bmi->bmi_freechunks, &chunks[i], pv_list); 521 SLIST_INSERT_HEAD(&bmi->bmi_freechunks, &chunks[i], pv_list);
522 } 522 }
523 523
524 kernel_l1pt.pv_pa = 0; 524 kernel_l1pt.pv_pa = 0;
525 kernel_l1pt.pv_va = 0; 525 kernel_l1pt.pv_va = 0;
526 526
527 /* 527 /*
528 * Allocate the L2 pages, but if we get to a page that is aligned for 528 * Allocate the L2 pages, but if we get to a page that is aligned for
529 * an L1 page table, we will allocate the pages for it first and then 529 * an L1 page table, we will allocate the pages for it first and then
530 * allocate the L2 page. 530 * allocate the L2 page.
531 */ 531 */
532 532
533 if (map_vectors_p) { 533 if (map_vectors_p) {
534 /* 534 /*
535 * First allocate L2 page for the vectors. 535 * First allocate L2 page for the vectors.
536 */ 536 */
537 VPRINTF(" vector"); 537 VPRINTF(" vector");
538 valloc_pages(bmi, &bmi->bmi_vector_l2pt, 1, 538 valloc_pages(bmi, &bmi->bmi_vector_l2pt, 1,
539 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true); 539 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true);
540 add_pages(bmi, &bmi->bmi_vector_l2pt); 540 add_pages(bmi, &bmi->bmi_vector_l2pt);
541 } 541 }
542 542
543 /* 543 /*
544 * Now allocate L2 pages for the kernel 544 * Now allocate L2 pages for the kernel
545 */ 545 */
546 VPRINTF(" kernel"); 546 VPRINTF(" kernel");
547 for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; ++idx) { 547 for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; ++idx) {
548 valloc_pages(bmi, &kernel_l2pt[idx], 1, 548 valloc_pages(bmi, &kernel_l2pt[idx], 1,
549 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true); 549 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true);
550 add_pages(bmi, &kernel_l2pt[idx]); 550 add_pages(bmi, &kernel_l2pt[idx]);
551 } 551 }
552 552
553 /* 553 /*
554 * Now allocate L2 pages for the initial kernel VA space. 554 * Now allocate L2 pages for the initial kernel VA space.
555 */ 555 */
556 VPRINTF(" vm"); 556 VPRINTF(" vm");
557 for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; ++idx) { 557 for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; ++idx) {
558 valloc_pages(bmi, &vmdata_l2pt[idx], 1, 558 valloc_pages(bmi, &vmdata_l2pt[idx], 1,
559 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true); 559 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true);
560 add_pages(bmi, &vmdata_l2pt[idx]); 560 add_pages(bmi, &vmdata_l2pt[idx]);
561 } 561 }
562 562
563 /* 563 /*
564 * If someone wanted a L2 page for I/O, allocate it now. 564 * If someone wanted a L2 page for I/O, allocate it now.
565 */ 565 */
566 if (iovbase) { 566 if (iovbase) {
567 VPRINTF(" io"); 567 VPRINTF(" io");
568 valloc_pages(bmi, &bmi->bmi_io_l2pt, 1, 568 valloc_pages(bmi, &bmi->bmi_io_l2pt, 1,
569 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true); 569 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true);
570 add_pages(bmi, &bmi->bmi_io_l2pt); 570 add_pages(bmi, &bmi->bmi_io_l2pt);
571 } 571 }
572 572
573 VPRINTF("%s: allocating stacks\n", __func__); 573 VPRINTF("%s: allocating stacks\n", __func__);
574 574
575 /* Allocate stacks for all modes and CPUs */ 575 /* Allocate stacks for all modes and CPUs */
576 valloc_pages(bmi, &abtstack, ABT_STACK_SIZE * cpu_num, 576 valloc_pages(bmi, &abtstack, ABT_STACK_SIZE * cpu_num,
577 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); 577 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
578 add_pages(bmi, &abtstack); 578 add_pages(bmi, &abtstack);
579 valloc_pages(bmi, &fiqstack, FIQ_STACK_SIZE * cpu_num, 579 valloc_pages(bmi, &fiqstack, FIQ_STACK_SIZE * cpu_num,
580 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); 580 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
581 add_pages(bmi, &fiqstack); 581 add_pages(bmi, &fiqstack);
582 valloc_pages(bmi, &irqstack, IRQ_STACK_SIZE * cpu_num, 582 valloc_pages(bmi, &irqstack, IRQ_STACK_SIZE * cpu_num,
583 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); 583 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
584 add_pages(bmi, &irqstack); 584 add_pages(bmi, &irqstack);
585 valloc_pages(bmi, &undstack, UND_STACK_SIZE * cpu_num, 585 valloc_pages(bmi, &undstack, UND_STACK_SIZE * cpu_num,
586 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); 586 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
587 add_pages(bmi, &undstack); 587 add_pages(bmi, &undstack);
588 valloc_pages(bmi, &idlestack, UPAGES * cpu_num, /* SVC32 */ 588 valloc_pages(bmi, &idlestack, UPAGES * cpu_num, /* SVC32 */
589 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); 589 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
590 add_pages(bmi, &idlestack); 590 add_pages(bmi, &idlestack);
591 valloc_pages(bmi, &kernelstack, UPAGES, /* SVC32 */ 591 valloc_pages(bmi, &kernelstack, UPAGES, /* SVC32 */
592 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true); 592 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
593 add_pages(bmi, &kernelstack); 593 add_pages(bmi, &kernelstack);
594 594
595 /* Allocate the message buffer from the end of memory. */ 595 /* Allocate the message buffer from the end of memory. */
596 const size_t msgbuf_pgs = round_page(MSGBUFSIZE) / PAGE_SIZE; 596 const size_t msgbuf_pgs = round_page(MSGBUFSIZE) / PAGE_SIZE;
597 valloc_pages(bmi, &msgbuf, msgbuf_pgs, 597 valloc_pages(bmi, &msgbuf, msgbuf_pgs,
598 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, false); 598 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, false);
599 add_pages(bmi, &msgbuf); 599 add_pages(bmi, &msgbuf);
600 msgbufphys = msgbuf.pv_pa; 600 msgbufphys = msgbuf.pv_pa;
601 msgbufaddr = (void *)msgbuf.pv_va; 601 msgbufaddr = (void *)msgbuf.pv_va;
602 602
603 if (map_vectors_p) { 603 if (map_vectors_p) {
604 /* 604 /*
605 * Allocate a page for the system vector page. 605 * Allocate a page for the system vector page.
606 * This page will just contain the system vectors and can be 606 * This page will just contain the system vectors and can be
607 * shared by all processes. 607 * shared by all processes.
608 */ 608 */
609 VPRINTF(" vector"); 609 VPRINTF(" vector");
610 610
611 valloc_pages(bmi, &systempage, 1, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, 611 valloc_pages(bmi, &systempage, 1, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE,
612 PTE_CACHE, true); 612 PTE_CACHE, true);
613 } 613 }
614 systempage.pv_va = vectors; 614 systempage.pv_va = vectors;
615 615
616 /* 616 /*
617 * If the caller needed a few extra pages for some reason, allocate 617 * If the caller needed a few extra pages for some reason, allocate
618 * them now. 618 * them now.
619 */ 619 */
620#if ARM_MMU_XSCALE == 1 620#if ARM_MMU_XSCALE == 1
621#if (ARM_NMMUS > 1) 621#if (ARM_NMMUS > 1)
622 if (xscale_use_minidata) 622 if (xscale_use_minidata)
623#endif 623#endif
624 valloc_pages(bmi, &minidataclean, 1, 624 valloc_pages(bmi, &minidataclean, 1,
625 VM_PROT_READ|VM_PROT_WRITE, 0, true); 625 VM_PROT_READ|VM_PROT_WRITE, 0, true);
626#endif 626#endif
627 627
628 /* 628 /*
629 * Ok we have allocated physical pages for the primary kernel 629 * Ok we have allocated physical pages for the primary kernel
630 * page tables and stacks. Let's just confirm that. 630 * page tables and stacks. Let's just confirm that.
631 */ 631 */
632 if (kernel_l1pt.pv_va == 0 632 if (kernel_l1pt.pv_va == 0
633 && (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE - 1)) != 0)) 633 && (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE - 1)) != 0))
634 panic("%s: Failed to allocate or align the kernel " 634 panic("%s: Failed to allocate or align the kernel "
635 "page directory", __func__); 635 "page directory", __func__);
636 636
637 VPRINTF("Creating L1 page table at 0x%08lx/0x%08lx\n", 637 VPRINTF("Creating L1 page table at 0x%08lx/0x%08lx\n",
638 kernel_l1pt.pv_va, kernel_l1pt.pv_pa); 638 kernel_l1pt.pv_va, kernel_l1pt.pv_pa);
639 639
640 /* 640 /*
641 * Now we start construction of the L1 page table 641 * Now we start construction of the L1 page table
642 * We start by mapping the L2 page tables into the L1. 642 * We start by mapping the L2 page tables into the L1.
643 * This means that we can replace L1 mappings later on if necessary 643 * This means that we can replace L1 mappings later on if necessary
644 */ 644 */
645 vaddr_t l1pt_va = kernel_l1pt.pv_va; 645 vaddr_t l1pt_va = kernel_l1pt.pv_va;
646 paddr_t l1pt_pa = kernel_l1pt.pv_pa; 646 paddr_t l1pt_pa = kernel_l1pt.pv_pa;
647 647
648 if (map_vectors_p) { 648 if (map_vectors_p) {
649 /* Map the L2 pages tables in the L1 page table */ 649 /* Map the L2 pages tables in the L1 page table */
650 pmap_link_l2pt(l1pt_va, systempage.pv_va & -L2_S_SEGSIZE, 650 pmap_link_l2pt(l1pt_va, systempage.pv_va & -L2_S_SEGSIZE,
651 &bmi->bmi_vector_l2pt); 651 &bmi->bmi_vector_l2pt);
652 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) " 652 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) "
653 "for VA %#lx\n (vectors)", 653 "for VA %#lx\n (vectors)",
654 __func__, bmi->bmi_vector_l2pt.pv_va, 654 __func__, bmi->bmi_vector_l2pt.pv_va,
655 bmi->bmi_vector_l2pt.pv_pa, systempage.pv_va); 655 bmi->bmi_vector_l2pt.pv_pa, systempage.pv_va);
656 } 656 }
657 657
658 /* 658 /*
659 * This enforces an alignment requirement of L2_S_SEGSIZE for kernel 659 * This enforces an alignment requirement of L2_S_SEGSIZE for kernel
660 * start PA 660 * start PA
661 */ 661 */
662 const vaddr_t kernel_base = 662 const vaddr_t kernel_base =
663 KERN_PHYSTOV(bmi->bmi_kernelstart & -L2_S_SEGSIZE); 663 KERN_PHYSTOV(bmi->bmi_kernelstart & -L2_S_SEGSIZE);
664 664
665 VPRINTF("%s: kernel_base %lx KERNEL_L2PT_KERNEL_NUM %zu\n", __func__, 665 VPRINTF("%s: kernel_base %lx KERNEL_L2PT_KERNEL_NUM %zu\n", __func__,
666 kernel_base, KERNEL_L2PT_KERNEL_NUM); 666 kernel_base, KERNEL_L2PT_KERNEL_NUM);
667 667
668 for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; idx++) { 668 for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; idx++) {
669 pmap_link_l2pt(l1pt_va, kernel_base + idx * L2_S_SEGSIZE, 669 pmap_link_l2pt(l1pt_va, kernel_base + idx * L2_S_SEGSIZE,
670 &kernel_l2pt[idx]); 670 &kernel_l2pt[idx]);
671 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (kernel)\n", 671 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (kernel)\n",
672 __func__, kernel_l2pt[idx].pv_va, 672 __func__, kernel_l2pt[idx].pv_va,
673 kernel_l2pt[idx].pv_pa, kernel_base + idx * L2_S_SEGSIZE); 673 kernel_l2pt[idx].pv_pa, kernel_base + idx * L2_S_SEGSIZE);
674 } 674 }
675 675
676 VPRINTF("%s: kernel_vm_base %lx KERNEL_L2PT_VMDATA_NUM %d\n", __func__, 676 VPRINTF("%s: kernel_vm_base %lx KERNEL_L2PT_VMDATA_NUM %d\n", __func__,
677 kernel_vm_base, KERNEL_L2PT_VMDATA_NUM); 677 kernel_vm_base, KERNEL_L2PT_VMDATA_NUM);
678 678
679 for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; idx++) { 679 for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; idx++) {
680 pmap_link_l2pt(l1pt_va, kernel_vm_base + idx * L2_S_SEGSIZE, 680 pmap_link_l2pt(l1pt_va, kernel_vm_base + idx * L2_S_SEGSIZE,
681 &vmdata_l2pt[idx]); 681 &vmdata_l2pt[idx]);
682 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (vm)\n", 682 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (vm)\n",
683 __func__, vmdata_l2pt[idx].pv_va, vmdata_l2pt[idx].pv_pa, 683 __func__, vmdata_l2pt[idx].pv_va, vmdata_l2pt[idx].pv_pa,
684 kernel_vm_base + idx * L2_S_SEGSIZE); 684 kernel_vm_base + idx * L2_S_SEGSIZE);
685 } 685 }
686 if (iovbase) { 686 if (iovbase) {
687 pmap_link_l2pt(l1pt_va, iovbase & -L2_S_SEGSIZE, &bmi->bmi_io_l2pt); 687 pmap_link_l2pt(l1pt_va, iovbase & -L2_S_SEGSIZE, &bmi->bmi_io_l2pt);
688 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (io)\n", 688 VPRINTF("%s: adding L2 pt (VA %#lx, PA %#lx) for VA %#lx (io)\n",
689 __func__, bmi->bmi_io_l2pt.pv_va, bmi->bmi_io_l2pt.pv_pa, 689 __func__, bmi->bmi_io_l2pt.pv_va, bmi->bmi_io_l2pt.pv_pa,
690 iovbase & -L2_S_SEGSIZE); 690 iovbase & -L2_S_SEGSIZE);
691 } 691 }
692 692
693 /* update the top of the kernel VM */ 693 /* update the top of the kernel VM */
694 pmap_curmaxkvaddr = 694 pmap_curmaxkvaddr =
695 kernel_vm_base + (KERNEL_L2PT_VMDATA_NUM * L2_S_SEGSIZE); 695 kernel_vm_base + (KERNEL_L2PT_VMDATA_NUM * L2_S_SEGSIZE);
696 696
697 // This could be done earlier and then the kernel data and pages 697 // This could be done earlier and then the kernel data and pages
698 // allocated above would get merged (concatentated) 698 // allocated above would get merged (concatentated)
699 699
700 VPRINTF("Mapping kernel\n"); 700 VPRINTF("Mapping kernel\n");
701 701
702 extern char etext[]; 702 extern char etext[];
703 size_t totalsize = bmi->bmi_kernelend - bmi->bmi_kernelstart; 703 size_t totalsize = bmi->bmi_kernelend - bmi->bmi_kernelstart;
704 size_t textsize = KERN_VTOPHYS((uintptr_t)etext) - bmi->bmi_kernelstart; 704 size_t textsize = KERN_VTOPHYS((uintptr_t)etext) - bmi->bmi_kernelstart;
705 705
706 textsize = (textsize + PGOFSET) & ~PGOFSET; 706 textsize = (textsize + PGOFSET) & ~PGOFSET;
707 707
708 /* start at offset of kernel in RAM */ 708 /* start at offset of kernel in RAM */
709 709
710 text.pv_pa = bmi->bmi_kernelstart; 710 text.pv_pa = bmi->bmi_kernelstart;
711 text.pv_va = KERN_PHYSTOV(bmi->bmi_kernelstart); 711 text.pv_va = KERN_PHYSTOV(bmi->bmi_kernelstart);
712 text.pv_size = textsize; 712 text.pv_size = textsize;
713 text.pv_prot = VM_PROT_READ | VM_PROT_EXECUTE; 713 text.pv_prot = VM_PROT_READ | VM_PROT_EXECUTE;
714 text.pv_cache = PTE_CACHE; 714 text.pv_cache = PTE_CACHE;
715 715
716 VPRINTF("%s: adding chunk for kernel text %#lx..%#lx (VA %#lx)\n", 716 VPRINTF("%s: adding chunk for kernel text %#lx..%#lx (VA %#lx)\n",
717 __func__, text.pv_pa, text.pv_pa + text.pv_size - 1, text.pv_va); 717 __func__, text.pv_pa, text.pv_pa + text.pv_size - 1, text.pv_va);
718 718
719 add_pages(bmi, &text); 719 add_pages(bmi, &text);
720 720
721 data.pv_pa = text.pv_pa + textsize; 721 data.pv_pa = text.pv_pa + textsize;
722 data.pv_va = text.pv_va + textsize; 722 data.pv_va = text.pv_va + textsize;
723 data.pv_size = totalsize - textsize; 723 data.pv_size = totalsize - textsize;
724 data.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 724 data.pv_prot = VM_PROT_READ | VM_PROT_WRITE;
725 data.pv_cache = PTE_CACHE; 725 data.pv_cache = PTE_CACHE;
726 726
727 VPRINTF("%s: adding chunk for kernel data/bss %#lx..%#lx (VA %#lx)\n", 727 VPRINTF("%s: adding chunk for kernel data/bss %#lx..%#lx (VA %#lx)\n",
728 __func__, data.pv_pa, data.pv_pa + data.pv_size - 1, data.pv_va); 728 __func__, data.pv_pa, data.pv_pa + data.pv_size - 1, data.pv_va);
729 729
730 add_pages(bmi, &data); 730 add_pages(bmi, &data);
731 731
732 VPRINTF("Listing Chunks\n"); 732 VPRINTF("Listing Chunks\n");
733 733
734 pv_addr_t *lpv; 734 pv_addr_t *lpv;
735 SLIST_FOREACH(lpv, &bmi->bmi_chunks, pv_list) { 735 SLIST_FOREACH(lpv, &bmi->bmi_chunks, pv_list) {
736 VPRINTF("%s: pv %p: chunk VA %#lx..%#lx " 736 VPRINTF("%s: pv %p: chunk VA %#lx..%#lx "
737 "(PA %#lx, prot %d, cache %d)\n", 737 "(PA %#lx, prot %d, cache %d)\n",
738 __func__, lpv, lpv->pv_va, lpv->pv_va + lpv->pv_size - 1, 738 __func__, lpv, lpv->pv_va, lpv->pv_va + lpv->pv_size - 1,
739 lpv->pv_pa, lpv->pv_prot, lpv->pv_cache); 739 lpv->pv_pa, lpv->pv_prot, lpv->pv_cache);
740 } 740 }
741 VPRINTF("\nMapping Chunks\n"); 741 VPRINTF("\nMapping Chunks\n");
742 742
743 pv_addr_t cur_pv; 743 pv_addr_t cur_pv;
744 pv_addr_t *pv = SLIST_FIRST(&bmi->bmi_chunks); 744 pv_addr_t *pv = SLIST_FIRST(&bmi->bmi_chunks);
745 if (!mapallmem_p || pv->pv_pa == bmi->bmi_start) { 745 if (!mapallmem_p || pv->pv_pa == bmi->bmi_start) {
746 cur_pv = *pv; 746 cur_pv = *pv;
747 KASSERTMSG(cur_pv.pv_va >= KERNEL_BASE, "%#lx", cur_pv.pv_va); 747 KASSERTMSG(cur_pv.pv_va >= KERNEL_BASE, "%#lx", cur_pv.pv_va);
748 pv = SLIST_NEXT(pv, pv_list); 748 pv = SLIST_NEXT(pv, pv_list);
749 } else { 749 } else {
750 cur_pv.pv_va = KERNEL_BASE; 750 cur_pv.pv_va = KERNEL_BASE;
751 cur_pv.pv_pa = KERN_VTOPHYS(cur_pv.pv_va); 751 cur_pv.pv_pa = KERN_VTOPHYS(cur_pv.pv_va);
752 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_pa; 752 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_pa;
753 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 753 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE;
754 cur_pv.pv_cache = PTE_CACHE; 754 cur_pv.pv_cache = PTE_CACHE;
755 } 755 }
756 while (pv != NULL) { 756 while (pv != NULL) {
757 if (mapallmem_p) { 757 if (mapallmem_p) {
758 if (concat_pvaddr(&cur_pv, pv)) { 758 if (concat_pvaddr(&cur_pv, pv)) {
759 pv = SLIST_NEXT(pv, pv_list); 759 pv = SLIST_NEXT(pv, pv_list);
760 continue; 760 continue;
761 } 761 }
762 if (cur_pv.pv_pa + cur_pv.pv_size < pv->pv_pa) { 762 if (cur_pv.pv_pa + cur_pv.pv_size < pv->pv_pa) {
763 /* 763 /*
764 * See if we can extend the current pv to emcompass the 764 * See if we can extend the current pv to emcompass the
765 * hole, and if so do it and retry the concatenation. 765 * hole, and if so do it and retry the concatenation.
766 */ 766 */
767 if (cur_pv.pv_prot == (VM_PROT_READ|VM_PROT_WRITE) 767 if (cur_pv.pv_prot == (VM_PROT_READ|VM_PROT_WRITE)
768 && cur_pv.pv_cache == PTE_CACHE) { 768 && cur_pv.pv_cache == PTE_CACHE) {
769 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va; 769 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va;
770 continue; 770 continue;
771 } 771 }
772 772
773 /* 773 /*
774 * We couldn't so emit the current chunk and then 774 * We couldn't so emit the current chunk and then
775 */ 775 */
776 VPRINTF("%s: mapping chunk VA %#lx..%#lx " 776 VPRINTF("%s: mapping chunk VA %#lx..%#lx "
777 "(PA %#lx, prot %d, cache %d)\n", 777 "(PA %#lx, prot %d, cache %d)\n",
778 __func__, 778 __func__,
779 cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 779 cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
780 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 780 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
781 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 781 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
782 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 782 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
783 783
784 /* 784 /*
785 * set the current chunk to the hole and try again. 785 * set the current chunk to the hole and try again.
786 */ 786 */
787 cur_pv.pv_pa += cur_pv.pv_size; 787 cur_pv.pv_pa += cur_pv.pv_size;
788 cur_pv.pv_va += cur_pv.pv_size; 788 cur_pv.pv_va += cur_pv.pv_size;
789 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va; 789 cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va;
790 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 790 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE;
791 cur_pv.pv_cache = PTE_CACHE; 791 cur_pv.pv_cache = PTE_CACHE;
792 continue; 792 continue;
793 } 793 }
794 } 794 }
795 795
796 /* 796 /*
797 * The new pv didn't concatenate so emit the current one 797 * The new pv didn't concatenate so emit the current one
798 * and use the new pv as the current pv. 798 * and use the new pv as the current pv.
799 */ 799 */
800 VPRINTF("%s: mapping chunk VA %#lx..%#lx " 800 VPRINTF("%s: mapping chunk VA %#lx..%#lx "
801 "(PA %#lx, prot %d, cache %d)\n", 801 "(PA %#lx, prot %d, cache %d)\n",
802 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 802 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
803 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 803 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
804 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 804 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
805 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 805 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
806 cur_pv = *pv; 806 cur_pv = *pv;
807 pv = SLIST_NEXT(pv, pv_list); 807 pv = SLIST_NEXT(pv, pv_list);
808 } 808 }
809 809
810 /* 810 /*
811 * If we are mapping all of memory, let's map the rest of memory. 811 * If we are mapping all of memory, let's map the rest of memory.
812 */ 812 */
813 if (mapallmem_p && cur_pv.pv_pa + cur_pv.pv_size < bmi->bmi_end) { 813 if (mapallmem_p && cur_pv.pv_pa + cur_pv.pv_size < bmi->bmi_end) {
814 if (cur_pv.pv_prot == (VM_PROT_READ | VM_PROT_WRITE) 814 if (cur_pv.pv_prot == (VM_PROT_READ | VM_PROT_WRITE)
815 && cur_pv.pv_cache == PTE_CACHE) { 815 && cur_pv.pv_cache == PTE_CACHE) {
816 cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa; 816 cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa;
817 } else { 817 } else {
818 KASSERTMSG(cur_pv.pv_va + cur_pv.pv_size <= kernel_vm_base, 818 KASSERTMSG(cur_pv.pv_va + cur_pv.pv_size <= kernel_vm_base,
819 "%#lx >= %#lx", cur_pv.pv_va + cur_pv.pv_size, 819 "%#lx >= %#lx", cur_pv.pv_va + cur_pv.pv_size,
820 kernel_vm_base); 820 kernel_vm_base);
821 VPRINTF("%s: mapping chunk VA %#lx..%#lx " 821 VPRINTF("%s: mapping chunk VA %#lx..%#lx "
822 "(PA %#lx, prot %d, cache %d)\n", 822 "(PA %#lx, prot %d, cache %d)\n",
823 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 823 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
824 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 824 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
825 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 825 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
826 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 826 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
827 cur_pv.pv_pa += cur_pv.pv_size; 827 cur_pv.pv_pa += cur_pv.pv_size;
828 cur_pv.pv_va += cur_pv.pv_size; 828 cur_pv.pv_va += cur_pv.pv_size;
829 cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa; 829 cur_pv.pv_size = bmi->bmi_end - cur_pv.pv_pa;
830 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE; 830 cur_pv.pv_prot = VM_PROT_READ | VM_PROT_WRITE;
831 cur_pv.pv_cache = PTE_CACHE; 831 cur_pv.pv_cache = PTE_CACHE;
832 } 832 }
833 } 833 }
834 834
835 /* 835 /*
836 * The amount we can direct map is limited by the start of the 836 * The amount we can direct map is limited by the start of the
837 * virtual part of the kernel address space. Don't overrun 837 * virtual part of the kernel address space. Don't overrun
838 * into it. 838 * into it.
839 */ 839 */
840 if (mapallmem_p && cur_pv.pv_va + cur_pv.pv_size > kernel_vm_base) { 840 if (mapallmem_p && cur_pv.pv_va + cur_pv.pv_size > kernel_vm_base) {
841 cur_pv.pv_size = kernel_vm_base - cur_pv.pv_va; 841 cur_pv.pv_size = kernel_vm_base - cur_pv.pv_va;
842 } 842 }
843 843
844 /* 844 /*
845 * Now we map the final chunk. 845 * Now we map the final chunk.
846 */ 846 */
847 VPRINTF("%s: mapping last chunk VA %#lx..%#lx (PA %#lx, prot %d, cache %d)\n", 847 VPRINTF("%s: mapping last chunk VA %#lx..%#lx (PA %#lx, prot %d, cache %d)\n",
848 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1, 848 __func__, cur_pv.pv_va, cur_pv.pv_va + cur_pv.pv_size - 1,
849 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache); 849 cur_pv.pv_pa, cur_pv.pv_prot, cur_pv.pv_cache);
850 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa, 850 pmap_map_chunk(l1pt_va, cur_pv.pv_va, cur_pv.pv_pa,
851 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache); 851 cur_pv.pv_size, cur_pv.pv_prot, cur_pv.pv_cache);
852 852
853 /* 853 /*
854 * Now we map the stuff that isn't directly after the kernel 854 * Now we map the stuff that isn't directly after the kernel
855 */ 855 */
856 if (map_vectors_p) { 856 if (map_vectors_p) {
857 /* Map the vector page. */ 857 /* Map the vector page. */
858 pmap_map_entry(l1pt_va, systempage.pv_va, systempage.pv_pa, 858 pmap_map_entry(l1pt_va, systempage.pv_va, systempage.pv_pa,
859 VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE); 859 VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE);
860 } 860 }
861 861
862 /* Map the Mini-Data cache clean area. */ 862 /* Map the Mini-Data cache clean area. */
863#if ARM_MMU_XSCALE == 1 863#if ARM_MMU_XSCALE == 1
864#if (ARM_NMMUS > 1) 864#if (ARM_NMMUS > 1)
865 if (xscale_use_minidata) 865 if (xscale_use_minidata)
866#endif 866#endif
867 xscale_setup_minidata(l1pt_va, minidataclean.pv_va, 867 xscale_setup_minidata(l1pt_va, minidataclean.pv_va,
868 minidataclean.pv_pa); 868 minidataclean.pv_pa);
869#endif 869#endif
870 870
871 /* 871 /*
872 * Map integrated peripherals at same address in first level page 872 * Map integrated peripherals at same address in first level page
873 * table so that we can continue to use console. 873 * table so that we can continue to use console.
874 */ 874 */
875 if (devmap) 875 if (devmap)
876 pmap_devmap_bootstrap(l1pt_va, devmap); 876 pmap_devmap_bootstrap(l1pt_va, devmap);
877 877
878 /* Tell the user about where all the bits and pieces live. */ 878 /* Tell the user about where all the bits and pieces live. */
879 VPRINTF("%22s Physical Virtual Num\n", " "); 879 VPRINTF("%22s Physical Virtual Num\n", " ");
880 VPRINTF("%22s Starting Ending Starting Ending Pages\n", " "); 880 VPRINTF("%22s Starting Ending Starting Ending Pages\n", " ");
881 881
882#ifdef VERBOSE_INIT_ARM 882#ifdef VERBOSE_INIT_ARM
883 static const char mem_fmt[] = 883 static const char mem_fmt[] =
884 "%20s: 0x%08lx 0x%08lx 0x%08lx 0x%08lx %u\n"; 884 "%20s: 0x%08lx 0x%08lx 0x%08lx 0x%08lx %u\n";
885 static const char mem_fmt_nov[] = 885 static const char mem_fmt_nov[] =
886 "%20s: 0x%08lx 0x%08lx %zu\n"; 886 "%20s: 0x%08lx 0x%08lx %zu\n";
887#endif 887#endif
888 888
889#if 0 889#if 0
890 // XXX Doesn't make sense if kernel not at bottom of RAM 890 // XXX Doesn't make sense if kernel not at bottom of RAM
891 VPRINTF(mem_fmt, "SDRAM", bmi->bmi_start, bmi->bmi_end - 1, 891 VPRINTF(mem_fmt, "SDRAM", bmi->bmi_start, bmi->bmi_end - 1,
892 KERN_PHYSTOV(bmi->bmi_start), KERN_PHYSTOV(bmi->bmi_end - 1), 892 KERN_PHYSTOV(bmi->bmi_start), KERN_PHYSTOV(bmi->bmi_end - 1),
893 (int)physmem); 893 (int)physmem);
894#endif 894#endif
895 VPRINTF(mem_fmt, "text section", 895 VPRINTF(mem_fmt, "text section",
896 text.pv_pa, text.pv_pa + text.pv_size - 1, 896 text.pv_pa, text.pv_pa + text.pv_size - 1,
897 text.pv_va, text.pv_va + text.pv_size - 1, 897 text.pv_va, text.pv_va + text.pv_size - 1,
898 (int)(text.pv_size / PAGE_SIZE)); 898 (int)(text.pv_size / PAGE_SIZE));
899 VPRINTF(mem_fmt, "data section", 899 VPRINTF(mem_fmt, "data section",
900 KERN_VTOPHYS((vaddr_t)__data_start), KERN_VTOPHYS((vaddr_t)_edata), 900 KERN_VTOPHYS((vaddr_t)__data_start), KERN_VTOPHYS((vaddr_t)_edata),
901 (vaddr_t)__data_start, (vaddr_t)_edata, 901 (vaddr_t)__data_start, (vaddr_t)_edata,
902 (int)((round_page((vaddr_t)_edata) 902 (int)((round_page((vaddr_t)_edata)
903 - trunc_page((vaddr_t)__data_start)) / PAGE_SIZE)); 903 - trunc_page((vaddr_t)__data_start)) / PAGE_SIZE));
904 VPRINTF(mem_fmt, "bss section", 904 VPRINTF(mem_fmt, "bss section",
905 KERN_VTOPHYS((vaddr_t)__bss_start), KERN_VTOPHYS((vaddr_t)__bss_end__), 905 KERN_VTOPHYS((vaddr_t)__bss_start), KERN_VTOPHYS((vaddr_t)__bss_end__),
906 (vaddr_t)__bss_start, (vaddr_t)__bss_end__, 906 (vaddr_t)__bss_start, (vaddr_t)__bss_end__,
907 (int)((round_page((vaddr_t)__bss_end__) 907 (int)((round_page((vaddr_t)__bss_end__)
908 - trunc_page((vaddr_t)__bss_start)) / PAGE_SIZE)); 908 - trunc_page((vaddr_t)__bss_start)) / PAGE_SIZE));
909 VPRINTF(mem_fmt, "L1 page directory", 909 VPRINTF(mem_fmt, "L1 page directory",
910 kernel_l1pt.pv_pa, kernel_l1pt.pv_pa + L1_TABLE_SIZE - 1, 910 kernel_l1pt.pv_pa, kernel_l1pt.pv_pa + L1_TABLE_SIZE - 1,
911 kernel_l1pt.pv_va, kernel_l1pt.pv_va + L1_TABLE_SIZE - 1, 911 kernel_l1pt.pv_va, kernel_l1pt.pv_va + L1_TABLE_SIZE - 1,
912 L1_TABLE_SIZE / PAGE_SIZE); 912 L1_TABLE_SIZE / PAGE_SIZE);
913 VPRINTF(mem_fmt, "ABT stack (CPU 0)", 913 VPRINTF(mem_fmt, "ABT stack (CPU 0)",
914 abtstack.pv_pa, abtstack.pv_pa + (ABT_STACK_SIZE * PAGE_SIZE) - 1, 914 abtstack.pv_pa, abtstack.pv_pa + (ABT_STACK_SIZE * PAGE_SIZE) - 1,
915 abtstack.pv_va, abtstack.pv_va + (ABT_STACK_SIZE * PAGE_SIZE) - 1, 915 abtstack.pv_va, abtstack.pv_va + (ABT_STACK_SIZE * PAGE_SIZE) - 1,
916 ABT_STACK_SIZE); 916 ABT_STACK_SIZE);
917 VPRINTF(mem_fmt, "FIQ stack (CPU 0)", 917 VPRINTF(mem_fmt, "FIQ stack (CPU 0)",
918 fiqstack.pv_pa, fiqstack.pv_pa + (FIQ_STACK_SIZE * PAGE_SIZE) - 1, 918 fiqstack.pv_pa, fiqstack.pv_pa + (FIQ_STACK_SIZE * PAGE_SIZE) - 1,
919 fiqstack.pv_va, fiqstack.pv_va + (FIQ_STACK_SIZE * PAGE_SIZE) - 1, 919 fiqstack.pv_va, fiqstack.pv_va + (FIQ_STACK_SIZE * PAGE_SIZE) - 1,
920 FIQ_STACK_SIZE); 920 FIQ_STACK_SIZE);
921 VPRINTF(mem_fmt, "IRQ stack (CPU 0)", 921 VPRINTF(mem_fmt, "IRQ stack (CPU 0)",
922 irqstack.pv_pa, irqstack.pv_pa + (IRQ_STACK_SIZE * PAGE_SIZE) - 1, 922 irqstack.pv_pa, irqstack.pv_pa + (IRQ_STACK_SIZE * PAGE_SIZE) - 1,
923 irqstack.pv_va, irqstack.pv_va + (IRQ_STACK_SIZE * PAGE_SIZE) - 1, 923 irqstack.pv_va, irqstack.pv_va + (IRQ_STACK_SIZE * PAGE_SIZE) - 1,
924 IRQ_STACK_SIZE); 924 IRQ_STACK_SIZE);
925 VPRINTF(mem_fmt, "UND stack (CPU 0)", 925 VPRINTF(mem_fmt, "UND stack (CPU 0)",
926 undstack.pv_pa, undstack.pv_pa + (UND_STACK_SIZE * PAGE_SIZE) - 1, 926 undstack.pv_pa, undstack.pv_pa + (UND_STACK_SIZE * PAGE_SIZE) - 1,
927 undstack.pv_va, undstack.pv_va + (UND_STACK_SIZE * PAGE_SIZE) - 1, 927 undstack.pv_va, undstack.pv_va + (UND_STACK_SIZE * PAGE_SIZE) - 1,
928 UND_STACK_SIZE); 928 UND_STACK_SIZE);
929 VPRINTF(mem_fmt, "IDLE stack (CPU 0)", 929 VPRINTF(mem_fmt, "IDLE stack (CPU 0)",
930 idlestack.pv_pa, idlestack.pv_pa + (UPAGES * PAGE_SIZE) - 1, 930 idlestack.pv_pa, idlestack.pv_pa + (UPAGES * PAGE_SIZE) - 1,
931 idlestack.pv_va, idlestack.pv_va + (UPAGES * PAGE_SIZE) - 1, 931 idlestack.pv_va, idlestack.pv_va + (UPAGES * PAGE_SIZE) - 1,
932 UPAGES); 932 UPAGES);
933 VPRINTF(mem_fmt, "SVC stack", 933 VPRINTF(mem_fmt, "SVC stack",
934 kernelstack.pv_pa, kernelstack.pv_pa + (UPAGES * PAGE_SIZE) - 1, 934 kernelstack.pv_pa, kernelstack.pv_pa + (UPAGES * PAGE_SIZE) - 1,
935 kernelstack.pv_va, kernelstack.pv_va + (UPAGES * PAGE_SIZE) - 1, 935 kernelstack.pv_va, kernelstack.pv_va + (UPAGES * PAGE_SIZE) - 1,
936 UPAGES); 936 UPAGES);
937 VPRINTF(mem_fmt, "Message Buffer", 937 VPRINTF(mem_fmt, "Message Buffer",
938 msgbuf.pv_pa, msgbuf.pv_pa + (msgbuf_pgs * PAGE_SIZE) - 1, 938 msgbuf.pv_pa, msgbuf.pv_pa + (msgbuf_pgs * PAGE_SIZE) - 1,
939 msgbuf.pv_va, msgbuf.pv_va + (msgbuf_pgs * PAGE_SIZE) - 1, 939 msgbuf.pv_va, msgbuf.pv_va + (msgbuf_pgs * PAGE_SIZE) - 1,
940 (int)msgbuf_pgs); 940 (int)msgbuf_pgs);
941 if (map_vectors_p) { 941 if (map_vectors_p) {
942 VPRINTF(mem_fmt, "Exception Vectors", 942 VPRINTF(mem_fmt, "Exception Vectors",
943 systempage.pv_pa, systempage.pv_pa + PAGE_SIZE - 1, 943 systempage.pv_pa, systempage.pv_pa + PAGE_SIZE - 1,
944 systempage.pv_va, systempage.pv_va + PAGE_SIZE - 1, 944 systempage.pv_va, systempage.pv_va + PAGE_SIZE - 1,
945 1); 945 1);
946 } 946 }
947 for (size_t i = 0; i < bmi->bmi_nfreeblocks; i++) { 947 for (size_t i = 0; i < bmi->bmi_nfreeblocks; i++) {
948 pv = &bmi->bmi_freeblocks[i]; 948 pv = &bmi->bmi_freeblocks[i];
949 949
950 VPRINTF(mem_fmt_nov, "Free Memory", 950 VPRINTF(mem_fmt_nov, "Free Memory",
951 pv->pv_pa, pv->pv_pa + pv->pv_size - 1, 951 pv->pv_pa, pv->pv_pa + pv->pv_size - 1,
952 pv->pv_size / PAGE_SIZE); 952 pv->pv_size / PAGE_SIZE);
953 } 953 }
954 /* 954 /*
955 * Now we have the real page tables in place so we can switch to them. 955 * Now we have the real page tables in place so we can switch to them.
956 * Once this is done we will be running with the REAL kernel page 956 * Once this is done we will be running with the REAL kernel page
957 * tables. 957 * tables.
958 */ 958 */
959 959
960 VPRINTF("TTBR0=%#x", armreg_ttbr_read()); 960 VPRINTF("TTBR0=%#x", armreg_ttbr_read());
961#ifdef _ARM_ARCH_6 961#ifdef _ARM_ARCH_6
962 VPRINTF(" TTBR1=%#x TTBCR=%#x CONTEXTIDR=%#x", 962 VPRINTF(" TTBR1=%#x TTBCR=%#x CONTEXTIDR=%#x",
963 armreg_ttbr1_read(), armreg_ttbcr_read(), 963 armreg_ttbr1_read(), armreg_ttbcr_read(),
964 armreg_contextidr_read()); 964 armreg_contextidr_read());
965#endif 965#endif
966 VPRINTF("\n"); 966 VPRINTF("\n");
967 967
968 /* Switch tables */ 968 /* Switch tables */
969 VPRINTF("switching to new L1 page table @%#lx...\n", l1pt_pa); 969 VPRINTF("switching to new L1 page table @%#lx...\n", l1pt_pa);
970 970
971 cpu_ttb = l1pt_pa; 971 cpu_ttb = l1pt_pa;
972 972
973 cpu_domains(DOMAIN_DEFAULT); 973 cpu_domains(DOMAIN_DEFAULT);
974 974
975 cpu_idcache_wbinv_all(); 975 cpu_idcache_wbinv_all();
976 976
977#ifdef __HAVE_GENERIC_START 977#ifdef __HAVE_GENERIC_START
978 978
979 /* 979 /*
980 * Turn on caches and set SCTLR/ACTLR 980 * Turn on caches and set SCTLR/ACTLR
981 */ 981 */
982 cpu_setup(boot_args); 982 cpu_setup(boot_args);
983#endif 983#endif
984 984
985 VPRINTF(" ttb"); 985 VPRINTF(" ttb");
986 986
987#ifdef ARM_MMU_EXTENDED 987#ifdef ARM_MMU_EXTENDED
988 /* 988 /*
989 * TTBCR should have been initialized by the MD start code. 989 * TTBCR should have been initialized by the MD start code.
990 */ 990 */
991 KASSERT((armreg_contextidr_read() & 0xff) == 0); 991 KASSERT((armreg_contextidr_read() & 0xff) == 0);
992 KASSERT(armreg_ttbcr_read() == __SHIFTIN(1, TTBCR_S_N)); 992 KASSERT(armreg_ttbcr_read() == __SHIFTIN(1, TTBCR_S_N));
993 /* 993 /*
994 * Disable lookups via TTBR0 until there is an activated pmap. 994 * Disable lookups via TTBR0 until there is an activated pmap.
995 */ 995 */
996 armreg_ttbcr_write(armreg_ttbcr_read() | TTBCR_S_PD0); 996 armreg_ttbcr_write(armreg_ttbcr_read() | TTBCR_S_PD0);
997 cpu_setttb(l1pt_pa, KERNEL_PID); 997 cpu_setttb(l1pt_pa, KERNEL_PID);
998 arm_isb(); 998 arm_isb();
999#else 999#else
1000 cpu_setttb(l1pt_pa, true); 1000 cpu_setttb(l1pt_pa, true);
1001#endif 1001#endif
1002 1002
1003 cpu_tlb_flushID(); 1003 cpu_tlb_flushID();
1004 1004
1005#ifdef ARM_MMU_EXTENDED 1005#ifdef ARM_MMU_EXTENDED
1006 VPRINTF("\nsctlr=%#x actlr=%#x\n", 1006 VPRINTF("\nsctlr=%#x actlr=%#x\n",
1007 armreg_sctlr_read(), armreg_auxctl_read()); 1007 armreg_sctlr_read(), armreg_auxctl_read());
1008#else 1008#else
1009 VPRINTF(" (TTBR0=%#x)", armreg_ttbr_read()); 1009 VPRINTF(" (TTBR0=%#x)", armreg_ttbr_read());
1010#endif 1010#endif
1011 1011
1012#ifdef MULTIPROCESSOR 1012#ifdef MULTIPROCESSOR
1013#ifndef __HAVE_GENERIC_START 1013#ifndef __HAVE_GENERIC_START
1014 /* 1014 /*
1015 * Kick the secondaries to load the TTB. After which they'll go 1015 * Kick the secondaries to load the TTB. After which they'll go
1016 * back to sleep to wait for the final kick so they will hatch. 1016 * back to sleep to wait for the final kick so they will hatch.
1017 */ 1017 */
1018 VPRINTF(" hatchlings"); 1018 VPRINTF(" hatchlings");
1019 cpu_boot_secondary_processors(); 1019 cpu_boot_secondary_processors();
1020#endif 1020#endif
1021#endif 1021#endif
1022 1022
1023 VPRINTF(" OK\n"); 1023 VPRINTF(" OK\n");
1024} 1024}