Mon Dec 30 15:42:40 2019 UTC ()
Remove unnecessary brackets and unwrap a conditional.  Same code before
and after.


(skrll)
diff -r1.58 -r1.59 src/sys/arch/aarch64/aarch64/pmap.c

cvs diff -r1.58 -r1.59 src/sys/arch/aarch64/aarch64/pmap.c (expand / switch to unified diff)

--- src/sys/arch/aarch64/aarch64/pmap.c 2019/12/28 17:19:43 1.58
+++ src/sys/arch/aarch64/aarch64/pmap.c 2019/12/30 15:42:39 1.59
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.58 2019/12/28 17:19:43 jmcneill Exp $ */ 1/* $NetBSD: pmap.c,v 1.59 2019/12/30 15:42:39 skrll Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org> 4 * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -17,27 +17,27 @@ @@ -17,27 +17,27 @@
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE. 26 * POSSIBILITY OF SUCH DAMAGE.
27 */ 27 */
28 28
29#include <sys/cdefs.h> 29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.58 2019/12/28 17:19:43 jmcneill Exp $"); 30__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.59 2019/12/30 15:42:39 skrll Exp $");
31 31
32#include "opt_arm_debug.h" 32#include "opt_arm_debug.h"
33#include "opt_ddb.h" 33#include "opt_ddb.h"
34#include "opt_multiprocessor.h" 34#include "opt_multiprocessor.h"
35#include "opt_pmap.h" 35#include "opt_pmap.h"
36#include "opt_uvmhist.h" 36#include "opt_uvmhist.h"
37 37
38#include <sys/param.h> 38#include <sys/param.h>
39#include <sys/types.h> 39#include <sys/types.h>
40#include <sys/kmem.h> 40#include <sys/kmem.h>
41#include <sys/vmem.h> 41#include <sys/vmem.h>
42#include <sys/atomic.h> 42#include <sys/atomic.h>
43#include <sys/asan.h> 43#include <sys/asan.h>
@@ -313,28 +313,27 @@ pmap_devmap_bootstrap(vaddr_t l0pt, cons @@ -313,28 +313,27 @@ pmap_devmap_bootstrap(vaddr_t l0pt, cons
313 313
314 VPRINTF("%s:\n", __func__); 314 VPRINTF("%s:\n", __func__);
315 for (i = 0; table[i].pd_size != 0; i++) { 315 for (i = 0; table[i].pd_size != 0; i++) {
316 VPRINTF(" devmap: pa %08lx-%08lx = va %016lx\n", 316 VPRINTF(" devmap: pa %08lx-%08lx = va %016lx\n",
317 table[i].pd_pa, 317 table[i].pd_pa,
318 table[i].pd_pa + table[i].pd_size - 1, 318 table[i].pd_pa + table[i].pd_size - 1,
319 table[i].pd_va); 319 table[i].pd_va);
320 va = table[i].pd_va; 320 va = table[i].pd_va;
321 321
322 KASSERT((VM_KERNEL_IO_ADDRESS <= va) && 322 KASSERT((VM_KERNEL_IO_ADDRESS <= va) &&
323 (va < (VM_KERNEL_IO_ADDRESS + VM_KERNEL_IO_SIZE))); 323 (va < (VM_KERNEL_IO_ADDRESS + VM_KERNEL_IO_SIZE)));
324 324
325 /* update and check virtual_devmap_addr */ 325 /* update and check virtual_devmap_addr */
326 if ((virtual_devmap_addr == 0) || 326 if (virtual_devmap_addr == 0 || virtual_devmap_addr > va) {
327 (virtual_devmap_addr > va)) { 
328 virtual_devmap_addr = va; 327 virtual_devmap_addr = va;
329 } 328 }
330 329
331 pmap_map_chunk( 330 pmap_map_chunk(
332 table[i].pd_va, 331 table[i].pd_va,
333 table[i].pd_pa, 332 table[i].pd_pa,
334 table[i].pd_size, 333 table[i].pd_size,
335 table[i].pd_prot, 334 table[i].pd_prot,
336 table[i].pd_flags); 335 table[i].pd_flags);
337 } 336 }
338 337
339 pmap_devmap_bootstrap_done = true; 338 pmap_devmap_bootstrap_done = true;
340} 339}
@@ -413,27 +412,27 @@ pmap_bootstrap(vaddr_t vstart, vaddr_t v @@ -413,27 +412,27 @@ pmap_bootstrap(vaddr_t vstart, vaddr_t v
413 paddr_t l0pa; 412 paddr_t l0pa;
414 413
415 PMAP_HIST_INIT(); /* init once */ 414 PMAP_HIST_INIT(); /* init once */
416 415
417 UVMHIST_FUNC(__func__); 416 UVMHIST_FUNC(__func__);
418 UVMHIST_CALLED(pmaphist); 417 UVMHIST_CALLED(pmaphist);
419 418
420#if 0 419#if 0
421 /* uvmexp.ncolors = icachesize / icacheways / PAGE_SIZE; */ 420 /* uvmexp.ncolors = icachesize / icacheways / PAGE_SIZE; */
422 uvmexp.ncolors = aarch64_cache_vindexsize / PAGE_SIZE; 421 uvmexp.ncolors = aarch64_cache_vindexsize / PAGE_SIZE;
423#endif 422#endif
424 423
425 /* devmap already uses last of va? */ 424 /* devmap already uses last of va? */
426 if ((virtual_devmap_addr != 0) && (virtual_devmap_addr < vend)) 425 if (virtual_devmap_addr != 0 && virtual_devmap_addr < vend)
427 vend = virtual_devmap_addr; 426 vend = virtual_devmap_addr;
428 427
429 virtual_avail = vstart; 428 virtual_avail = vstart;
430 virtual_end = vend; 429 virtual_end = vend;
431 pmap_maxkvaddr = vstart; 430 pmap_maxkvaddr = vstart;
432 431
433 aarch64_tlbi_all(); 432 aarch64_tlbi_all();
434 433
435 l0pa = reg_ttbr1_el1_read(); 434 l0pa = reg_ttbr1_el1_read();
436 l0 = (void *)AARCH64_PA_TO_KVA(l0pa); 435 l0 = (void *)AARCH64_PA_TO_KVA(l0pa);
437 436
438 memset(&kernel_pmap, 0, sizeof(kernel_pmap)); 437 memset(&kernel_pmap, 0, sizeof(kernel_pmap));
439 kpm = pmap_kernel(); 438 kpm = pmap_kernel();