Sat Jul 4 16:58:11 2020 UTC ()
Use tlen for temporary length variable instead of l, which is usually
used for struct lwp *.

No binary changes.


(rin)
diff -r1.82 -r1.83 src/sys/arch/aarch64/aarch64/pmap.c

cvs diff -r1.82 -r1.83 src/sys/arch/aarch64/aarch64/pmap.c (expand / switch to unified diff)

--- src/sys/arch/aarch64/aarch64/pmap.c 2020/07/02 13:01:11 1.82
+++ src/sys/arch/aarch64/aarch64/pmap.c 2020/07/04 16:58:11 1.83
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.82 2020/07/02 13:01:11 rin Exp $ */ 1/* $NetBSD: pmap.c,v 1.83 2020/07/04 16:58:11 rin Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org> 4 * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -17,27 +17,27 @@ @@ -17,27 +17,27 @@
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE. 26 * POSSIBILITY OF SUCH DAMAGE.
27 */ 27 */
28 28
29#include <sys/cdefs.h> 29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.82 2020/07/02 13:01:11 rin Exp $"); 30__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.83 2020/07/04 16:58:11 rin Exp $");
31 31
32#include "opt_arm_debug.h" 32#include "opt_arm_debug.h"
33#include "opt_ddb.h" 33#include "opt_ddb.h"
34#include "opt_multiprocessor.h" 34#include "opt_multiprocessor.h"
35#include "opt_pmap.h" 35#include "opt_pmap.h"
36#include "opt_uvmhist.h" 36#include "opt_uvmhist.h"
37 37
38#include <sys/param.h> 38#include <sys/param.h>
39#include <sys/types.h> 39#include <sys/types.h>
40#include <sys/kmem.h> 40#include <sys/kmem.h>
41#include <sys/vmem.h> 41#include <sys/vmem.h>
42#include <sys/atomic.h> 42#include <sys/atomic.h>
43#include <sys/asan.h> 43#include <sys/asan.h>
@@ -941,35 +941,35 @@ pmap_icache_sync_range(pmap_t pm, vaddr_ @@ -941,35 +941,35 @@ pmap_icache_sync_range(pmap_t pm, vaddr_
941 * Synchronize caches corresponding to [addr, addr+len) in p. 941 * Synchronize caches corresponding to [addr, addr+len) in p.
942 * 942 *
943 */ 943 */
944void 944void
945pmap_procwr(struct proc *p, vaddr_t sva, int len) 945pmap_procwr(struct proc *p, vaddr_t sva, int len)
946{ 946{
947 947
948 if (__predict_true(p == curproc)) 948 if (__predict_true(p == curproc))
949 cpu_icache_sync_range(sva, len); 949 cpu_icache_sync_range(sva, len);
950 else { 950 else {
951 struct pmap *pm = p->p_vmspace->vm_map.pmap; 951 struct pmap *pm = p->p_vmspace->vm_map.pmap;
952 paddr_t pa; 952 paddr_t pa;
953 vaddr_t va, eva; 953 vaddr_t va, eva;
954 int l; 954 int tlen;
955 955
956 for (va = sva; len > 0; va = eva, len -= l) { 956 for (va = sva; len > 0; va = eva, len -= tlen) {
957 eva = uimin(va + len, trunc_page(va + PAGE_SIZE)); 957 eva = uimin(va + len, trunc_page(va + PAGE_SIZE));
958 l = eva - va; 958 tlen = eva - va;
959 if (!pmap_extract(pm, va, &pa)) 959 if (!pmap_extract(pm, va, &pa))
960 continue; 960 continue;
961 va = AARCH64_PA_TO_KVA(pa); 961 va = AARCH64_PA_TO_KVA(pa);
962 cpu_icache_sync_range(va, l); 962 cpu_icache_sync_range(va, tlen);
963 } 963 }
964 } 964 }
965} 965}
966 966
967static pt_entry_t 967static pt_entry_t
968_pmap_pte_adjust_prot(pt_entry_t pte, vm_prot_t prot, vm_prot_t protmask, 968_pmap_pte_adjust_prot(pt_entry_t pte, vm_prot_t prot, vm_prot_t protmask,
969 bool user) 969 bool user)
970{ 970{
971 vm_prot_t masked; 971 vm_prot_t masked;
972 pt_entry_t xn; 972 pt_entry_t xn;
973 973
974 masked = prot & protmask; 974 masked = prot & protmask;
975 pte &= ~(LX_BLKPAG_OS_RWMASK|LX_BLKPAG_AF|LX_BLKPAG_AP); 975 pte &= ~(LX_BLKPAG_OS_RWMASK|LX_BLKPAG_AF|LX_BLKPAG_AP);