Wed Jan 8 05:41:07 2020 UTC ()
fix panic when modload.

>panic: kernel diagnostic assertion "!pmap_extract(pmap_kernel(), loopva, NULL)" failed: file "../../../../uvm/uvm_km.c", line 674 loopva=0xffffffc001000000'

The space allocated by bootpage_alloc() is only used as a physical page
for pagetable pages, so there is no need to map it with KVA.
And kernend_extra should not have consumed any KVA space.


(ryo)
diff -r1.36 -r1.37 src/sys/arch/aarch64/aarch64/aarch64_machdep.c
diff -r1.49 -r1.50 src/sys/arch/aarch64/aarch64/locore.S

cvs diff -r1.36 -r1.37 src/sys/arch/aarch64/aarch64/aarch64_machdep.c (expand / switch to unified diff)

--- src/sys/arch/aarch64/aarch64/aarch64_machdep.c 2019/12/30 15:54:55 1.36
+++ src/sys/arch/aarch64/aarch64/aarch64_machdep.c 2020/01/08 05:41:07 1.37
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: aarch64_machdep.c,v 1.36 2019/12/30 15:54:55 skrll Exp $ */ 1/* $NetBSD: aarch64_machdep.c,v 1.37 2020/01/08 05:41:07 ryo Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2014 The NetBSD Foundation, Inc. 4 * Copyright (c) 2014 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry. 8 * by Matt Thomas of 3am Software Foundry.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -20,27 +20,27 @@ @@ -20,27 +20,27 @@
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33__KERNEL_RCSID(1, "$NetBSD: aarch64_machdep.c,v 1.36 2019/12/30 15:54:55 skrll Exp $"); 33__KERNEL_RCSID(1, "$NetBSD: aarch64_machdep.c,v 1.37 2020/01/08 05:41:07 ryo Exp $");
34 34
35#include "opt_arm_debug.h" 35#include "opt_arm_debug.h"
36#include "opt_ddb.h" 36#include "opt_ddb.h"
37#include "opt_kernhist.h" 37#include "opt_kernhist.h"
38#include "opt_modular.h" 38#include "opt_modular.h"
39#include "opt_fdt.h" 39#include "opt_fdt.h"
40 40
41#include <sys/param.h> 41#include <sys/param.h>
42#include <sys/types.h> 42#include <sys/types.h>
43#include <sys/asan.h> 43#include <sys/asan.h>
44#include <sys/bus.h> 44#include <sys/bus.h>
45#include <sys/core.h> 45#include <sys/core.h>
46#include <sys/conf.h> 46#include <sys/conf.h>
@@ -89,27 +89,27 @@ const pcu_ops_t * const pcu_ops_md_defs[ @@ -89,27 +89,27 @@ const pcu_ops_t * const pcu_ops_md_defs[
89struct vm_map *phys_map; 89struct vm_map *phys_map;
90 90
91#ifdef MODULAR 91#ifdef MODULAR
92vaddr_t module_start, module_end; 92vaddr_t module_start, module_end;
93static struct vm_map module_map_store; 93static struct vm_map module_map_store;
94#endif 94#endif
95 95
96/* XXX */ 96/* XXX */
97vaddr_t physical_start; 97vaddr_t physical_start;
98vaddr_t physical_end; 98vaddr_t physical_end;
99/* filled in before cleaning bss. keep in .data */ 99/* filled in before cleaning bss. keep in .data */
100u_long kern_vtopdiff __attribute__((__section__(".data"))); 100u_long kern_vtopdiff __attribute__((__section__(".data")));
101 101
102long kernend_extra; /* extra memory allocated from round_page(_end[]) */ 102long kernend_extra; /* extra physicalmemory allocated from round_page(_end[]) */
103 103
104/* dump configuration */ 104/* dump configuration */
105int cpu_dump(void); 105int cpu_dump(void);
106int cpu_dumpsize(void); 106int cpu_dumpsize(void);
107u_long cpu_dump_mempagecnt(void); 107u_long cpu_dump_mempagecnt(void);
108 108
109uint32_t dumpmag = 0x8fca0101; /* magic number for savecore */ 109uint32_t dumpmag = 0x8fca0101; /* magic number for savecore */
110int dumpsize = 0; /* also for savecore */ 110int dumpsize = 0; /* also for savecore */
111long dumplo = 0; 111long dumplo = 0;
112 112
113void 113void
114cpu_kernel_vm_init(uint64_t memory_start __unused, uint64_t memory_size __unused) 114cpu_kernel_vm_init(uint64_t memory_start __unused, uint64_t memory_size __unused)
115{ 115{
@@ -217,27 +217,27 @@ cpu_kernel_vm_init(uint64_t memory_start @@ -217,27 +217,27 @@ cpu_kernel_vm_init(uint64_t memory_start
217 /* 217 /*
218 * at this point, whole kernel image is mapped as "rwx". 218 * at this point, whole kernel image is mapped as "rwx".
219 * permission should be changed to: 219 * permission should be changed to:
220 * 220 *
221 * text rwx => r-x 221 * text rwx => r-x
222 * rodata rwx => r-- 222 * rodata rwx => r--
223 * data rwx => rw- 223 * data rwx => rw-
224 * 224 *
225 * kernel image has mapped by L2 block. (2Mbyte) 225 * kernel image has mapped by L2 block. (2Mbyte)
226 */ 226 */
227 pmapboot_protect(L2_TRUNC_BLOCK(kernstart), 227 pmapboot_protect(L2_TRUNC_BLOCK(kernstart),
228 L2_TRUNC_BLOCK(data_start), VM_PROT_WRITE); 228 L2_TRUNC_BLOCK(data_start), VM_PROT_WRITE);
229 pmapboot_protect(L2_ROUND_BLOCK(rodata_start), 229 pmapboot_protect(L2_ROUND_BLOCK(rodata_start),
230 L2_ROUND_BLOCK(kernend + kernend_extra), VM_PROT_EXECUTE); 230 L2_ROUND_BLOCK(kernend), VM_PROT_EXECUTE);
231 231
232 aarch64_tlbi_all(); 232 aarch64_tlbi_all();
233 233
234 VPRINTF("%s: kernel phys start %lx end %lx+%lx\n", __func__, 234 VPRINTF("%s: kernel phys start %lx end %lx+%lx\n", __func__,
235 kernstart_phys, kernend_phys, kernend_extra); 235 kernstart_phys, kernend_phys, kernend_extra);
236 fdt_add_reserved_memory_range(kernstart_phys, 236 fdt_add_reserved_memory_range(kernstart_phys,
237 kernend_phys - kernstart_phys + kernend_extra); 237 kernend_phys - kernstart_phys + kernend_extra);
238} 238}
239 239
240 240
241 241
242/* 242/*
243 * Upper region: 0xffff_ffff_ffff_ffff Top of virtual memory 243 * Upper region: 0xffff_ffff_ffff_ffff Top of virtual memory
@@ -277,27 +277,27 @@ initarm_common(vaddr_t kvm_base, vsize_t @@ -277,27 +277,27 @@ initarm_common(vaddr_t kvm_base, vsize_t
277 struct pcb *pcb; 277 struct pcb *pcb;
278 struct trapframe *tf; 278 struct trapframe *tf;
279 psize_t memsize_total; 279 psize_t memsize_total;
280 vaddr_t kernstart, kernend; 280 vaddr_t kernstart, kernend;
281 vaddr_t kernstart_l2 __unused, kernend_l2; /* L2 table 2MB aligned */ 281 vaddr_t kernstart_l2 __unused, kernend_l2; /* L2 table 2MB aligned */
282 vaddr_t kernelvmstart; 282 vaddr_t kernelvmstart;
283 int i; 283 int i;
284 284
285 cputype = cpu_idnum(); /* for compatible arm */ 285 cputype = cpu_idnum(); /* for compatible arm */
286 286
287 kernstart = trunc_page((vaddr_t)__kernel_text); 287 kernstart = trunc_page((vaddr_t)__kernel_text);
288 kernend = round_page((vaddr_t)_end); 288 kernend = round_page((vaddr_t)_end);
289 kernstart_l2 = L2_TRUNC_BLOCK(kernstart); 289 kernstart_l2 = L2_TRUNC_BLOCK(kernstart);
290 kernend_l2 = L2_ROUND_BLOCK(kernend + kernend_extra); 290 kernend_l2 = L2_ROUND_BLOCK(kernend);
291 kernelvmstart = kernend_l2; 291 kernelvmstart = kernend_l2;
292 292
293#ifdef MODULAR 293#ifdef MODULAR
294 /* 294 /*
295 * aarch64 compiler (gcc & llvm) uses R_AARCH_CALL26/R_AARCH_JUMP26 295 * aarch64 compiler (gcc & llvm) uses R_AARCH_CALL26/R_AARCH_JUMP26
296 * for function calling/jumping. 296 * for function calling/jumping.
297 * (at this time, both compilers doesn't support -mlong-calls) 297 * (at this time, both compilers doesn't support -mlong-calls)
298 * therefore kernel modules should be loaded within maximum 26bit word, 298 * therefore kernel modules should be loaded within maximum 26bit word,
299 * or +-128MB from kernel. 299 * or +-128MB from kernel.
300 */ 300 */
301#define MODULE_RESERVED_MAX (1024 * 1024 * 128) 301#define MODULE_RESERVED_MAX (1024 * 1024 * 128)
302#define MODULE_RESERVED_SIZE (1024 * 1024 * 32) /* good enough? */ 302#define MODULE_RESERVED_SIZE (1024 * 1024 * 32) /* good enough? */
303 module_start = kernelvmstart; 303 module_start = kernelvmstart;
@@ -331,55 +331,55 @@ initarm_common(vaddr_t kvm_base, vsize_t @@ -331,55 +331,55 @@ initarm_common(vaddr_t kvm_base, vsize_t
331 msgbufaddr = bootconfig.dram[i].address + 331 msgbufaddr = bootconfig.dram[i].address +
332 ptoa(bootconfig.dram[i].pages); 332 ptoa(bootconfig.dram[i].pages);
333 break; 333 break;
334 } 334 }
335 KASSERT(msgbufaddr != 0); /* no space for msgbuf */ 335 KASSERT(msgbufaddr != 0); /* no space for msgbuf */
336 initmsgbuf((void *)AARCH64_PA_TO_KVA(msgbufaddr), MSGBUFSIZE); 336 initmsgbuf((void *)AARCH64_PA_TO_KVA(msgbufaddr), MSGBUFSIZE);
337 337
338 VPRINTF( 338 VPRINTF(
339 "------------------------------------------\n" 339 "------------------------------------------\n"
340 "kern_vtopdiff = 0x%016lx\n" 340 "kern_vtopdiff = 0x%016lx\n"
341 "physical_start = 0x%016lx\n" 341 "physical_start = 0x%016lx\n"
342 "kernel_start_phys = 0x%016lx\n" 342 "kernel_start_phys = 0x%016lx\n"
343 "kernel_end_phys = 0x%016lx\n" 343 "kernel_end_phys = 0x%016lx\n"
 344 "pagetables_start_phys = 0x%016lx\n"
 345 "pagetables_end_phys = 0x%016lx\n"
344 "msgbuf = 0x%016lx\n" 346 "msgbuf = 0x%016lx\n"
345 "physical_end = 0x%016lx\n" 347 "physical_end = 0x%016lx\n"
346 "VM_MIN_KERNEL_ADDRESS = 0x%016lx\n" 348 "VM_MIN_KERNEL_ADDRESS = 0x%016lx\n"
347 "kernel_start_l2 = 0x%016lx\n" 349 "kernel_start_l2 = 0x%016lx\n"
348 "kernel_start = 0x%016lx\n" 350 "kernel_start = 0x%016lx\n"
349 "kernel_end = 0x%016lx\n" 351 "kernel_end = 0x%016lx\n"
350 "pagetables = 0x%016lx\n" 
351 "pagetables_end = 0x%016lx\n" 
352 "kernel_end_l2 = 0x%016lx\n" 352 "kernel_end_l2 = 0x%016lx\n"
353#ifdef MODULAR 353#ifdef MODULAR
354 "module_start = 0x%016lx\n" 354 "module_start = 0x%016lx\n"
355 "module_end = 0x%016lx\n" 355 "module_end = 0x%016lx\n"
356#endif 356#endif
357 "(kernel va area)\n" 357 "(kernel va area)\n"
358 "(devmap va area) = 0x%016lx\n" 358 "(devmap va area) = 0x%016lx\n"
359 "VM_MAX_KERNEL_ADDRESS = 0x%016lx\n" 359 "VM_MAX_KERNEL_ADDRESS = 0x%016lx\n"
360 "------------------------------------------\n", 360 "------------------------------------------\n",
361 kern_vtopdiff, 361 kern_vtopdiff,
362 physical_start, 362 physical_start,
363 kernstart_phys, 363 kernstart_phys,
364 kernend_phys, 364 kernend_phys,
 365 round_page(kernend_phys),
 366 round_page(kernend_phys) + kernend_extra,
365 msgbufaddr, 367 msgbufaddr,
366 physical_end, 368 physical_end,
367 VM_MIN_KERNEL_ADDRESS, 369 VM_MIN_KERNEL_ADDRESS,
368 kernstart_l2, 370 kernstart_l2,
369 kernstart, 371 kernstart,
370 kernend, 372 kernend,
371 round_page(kernend), 
372 round_page(kernend) + kernend_extra, 
373 kernend_l2, 373 kernend_l2,
374#ifdef MODULAR 374#ifdef MODULAR
375 module_start, 375 module_start,
376 module_end, 376 module_end,
377#endif 377#endif
378 VM_KERNEL_IO_ADDRESS, 378 VM_KERNEL_IO_ADDRESS,
379 VM_MAX_KERNEL_ADDRESS); 379 VM_MAX_KERNEL_ADDRESS);
380 380
381#ifdef DDB 381#ifdef DDB
382 db_machdep_init(); 382 db_machdep_init();
383#endif 383#endif
384 384
385 uvm_md_init(); 385 uvm_md_init();
@@ -543,27 +543,27 @@ mm_md_kernacc(void *ptr, vm_prot_t prot, @@ -543,27 +543,27 @@ mm_md_kernacc(void *ptr, vm_prot_t prot,
543 543
544 vaddr_t kernstart = trunc_page((vaddr_t)__kernel_text); 544 vaddr_t kernstart = trunc_page((vaddr_t)__kernel_text);
545 vaddr_t kernend = round_page((vaddr_t)_end); 545 vaddr_t kernend = round_page((vaddr_t)_end);
546 paddr_t kernstart_phys = KERN_VTOPHYS(kernstart); 546 paddr_t kernstart_phys = KERN_VTOPHYS(kernstart);
547 vaddr_t data_start = (vaddr_t)__data_start; 547 vaddr_t data_start = (vaddr_t)__data_start;
548 vaddr_t rodata_start = (vaddr_t)__rodata_start; 548 vaddr_t rodata_start = (vaddr_t)__rodata_start;
549 vsize_t rosize = kernend - rodata_start; 549 vsize_t rosize = kernend - rodata_start;
550 550
551 const vaddr_t v = (vaddr_t)ptr; 551 const vaddr_t v = (vaddr_t)ptr;
552 552
553#define IN_RANGE(addr,sta,end) (((sta) <= (addr)) && ((addr) < (end))) 553#define IN_RANGE(addr,sta,end) (((sta) <= (addr)) && ((addr) < (end)))
554 554
555 *handled = false; 555 *handled = false;
556 if (IN_RANGE(v, kernstart, kernend + kernend_extra)) { 556 if (IN_RANGE(v, kernstart, kernend)) {
557 *handled = true; 557 *handled = true;
558 if ((v < data_start) && (prot & VM_PROT_WRITE)) 558 if ((v < data_start) && (prot & VM_PROT_WRITE))
559 return EFAULT; 559 return EFAULT;
560 } else if (IN_RANGE(v, AARCH64_KSEG_START, AARCH64_KSEG_END)) { 560 } else if (IN_RANGE(v, AARCH64_KSEG_START, AARCH64_KSEG_END)) {
561 /* 561 /*
562 * if defined PMAP_MAP_POOLPAGE, direct mapped address (KSEG) 562 * if defined PMAP_MAP_POOLPAGE, direct mapped address (KSEG)
563 * will be appeared as kvm(3) address. 563 * will be appeared as kvm(3) address.
564 */ 564 */
565 paddr_t pa = AARCH64_KVA_TO_PA(v); 565 paddr_t pa = AARCH64_KVA_TO_PA(v);
566 if (in_dram_p(pa, 0)) { 566 if (in_dram_p(pa, 0)) {
567 *handled = true; 567 *handled = true;
568 if (IN_RANGE(pa, kernstart_phys, 568 if (IN_RANGE(pa, kernstart_phys,
569 kernstart_phys + rosize) && 569 kernstart_phys + rosize) &&

cvs diff -r1.49 -r1.50 src/sys/arch/aarch64/aarch64/locore.S (expand / switch to unified diff)

--- src/sys/arch/aarch64/aarch64/locore.S 2019/12/28 17:19:43 1.49
+++ src/sys/arch/aarch64/aarch64/locore.S 2020/01/08 05:41:07 1.50
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: locore.S,v 1.49 2019/12/28 17:19:43 jmcneill Exp $ */ 1/* $NetBSD: locore.S,v 1.50 2020/01/08 05:41:07 ryo Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org> 4 * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -28,27 +28,27 @@ @@ -28,27 +28,27 @@
28 28
29#include "opt_arm_debug.h" 29#include "opt_arm_debug.h"
30#include "opt_console.h" 30#include "opt_console.h"
31#include "opt_cpuoptions.h" 31#include "opt_cpuoptions.h"
32#include "opt_ddb.h" 32#include "opt_ddb.h"
33#include "opt_fdt.h" 33#include "opt_fdt.h"
34#include "opt_kasan.h" 34#include "opt_kasan.h"
35#include "opt_multiprocessor.h" 35#include "opt_multiprocessor.h"
36 36
37#include <aarch64/asm.h> 37#include <aarch64/asm.h>
38#include <aarch64/hypervisor.h> 38#include <aarch64/hypervisor.h>
39#include "assym.h" 39#include "assym.h"
40 40
41RCSID("$NetBSD: locore.S,v 1.49 2019/12/28 17:19:43 jmcneill Exp $") 41RCSID("$NetBSD: locore.S,v 1.50 2020/01/08 05:41:07 ryo Exp $")
42 42
43#ifdef AARCH64_DEVICE_MEM_STRONGLY_ORDERED 43#ifdef AARCH64_DEVICE_MEM_STRONGLY_ORDERED
44#define MAIR_DEVICE_MEM MAIR_DEVICE_nGnRnE 44#define MAIR_DEVICE_MEM MAIR_DEVICE_nGnRnE
45#else 45#else
46#define MAIR_DEVICE_MEM MAIR_DEVICE_nGnRE 46#define MAIR_DEVICE_MEM MAIR_DEVICE_nGnRE
47#endif 47#endif
48#define MAIR_DEVICE_MEM_SO MAIR_DEVICE_nGnRnE 48#define MAIR_DEVICE_MEM_SO MAIR_DEVICE_nGnRnE
49 49
50/*#define DEBUG_LOCORE /* debug print */ 50/*#define DEBUG_LOCORE /* debug print */
51/*#define DEBUG_LOCORE_PRINT_LOCK /* avoid mixing AP's output */ 51/*#define DEBUG_LOCORE_PRINT_LOCK /* avoid mixing AP's output */
52/*#define DEBUG_MMU /* dump MMU table */ 52/*#define DEBUG_MMU /* dump MMU table */
53 53
54#define LOCORE_EL2 54#define LOCORE_EL2
@@ -838,27 +838,26 @@ init_mmutable: @@ -838,27 +838,26 @@ init_mmutable:
838 cbnz x0, init_mmutable_error 838 cbnz x0, init_mmutable_error
839#endif 839#endif
840 840
841 VPRINT("Creating KVA=PA tables\n") 841 VPRINT("Creating KVA=PA tables\n")
842 mov x7, x26 /* pr func */ 842 mov x7, x26 /* pr func */
843 adr x6, bootpage_alloc /* allocator */ 843 adr x6, bootpage_alloc /* allocator */
844 mov x5, xzr /* flags = 0 */ 844 mov x5, xzr /* flags = 0 */
845 mov x4, #LX_BLKPAG_ATTR_NORMAL_WB|LX_BLKPAG_AP_RW /* attr */ 845 mov x4, #LX_BLKPAG_ATTR_NORMAL_WB|LX_BLKPAG_AP_RW /* attr */
846 orr x4, x4, #LX_BLKPAG_UXN 846 orr x4, x4, #LX_BLKPAG_UXN
847 mov x3, #L2_SIZE /* blocksize */ 847 mov x3, #L2_SIZE /* blocksize */
848 adr x1, start /* pa = start */ 848 adr x1, start /* pa = start */
849 ADDR x2, _end 849 ADDR x2, _end
850 sub x2, x2, x1 /* size = _end - start */ 850 sub x2, x2, x1 /* size = _end - start */
851 add x2, x2, #BOOTPAGE_ALLOC_MAX /* for bootpage_alloc() */ 
852 ldr x0, =start /* va */ 851 ldr x0, =start /* va */
853 bl pmapboot_enter 852 bl pmapboot_enter
854 cbnz x0, init_mmutable_error 853 cbnz x0, init_mmutable_error
855 854
856 VPRINT("OK\n"); 855 VPRINT("OK\n");
857 mov x0, xzr 856 mov x0, xzr
858 b init_mmutable_done 857 b init_mmutable_done
859init_mmutable_error: 858init_mmutable_error:
860 mvn x0, xzr 859 mvn x0, xzr
861init_mmutable_done: 860init_mmutable_done:
862 ldp x26, lr, [sp], #16 861 ldp x26, lr, [sp], #16
863 ret 862 ret
864 863