Mon Jun 1 02:42:24 2020 UTC ()
no need to make the PTE writable to do icache_sync, enough to accessible.


(ryo)
diff -r1.75 -r1.76 src/sys/arch/aarch64/aarch64/pmap.c

cvs diff -r1.75 -r1.76 src/sys/arch/aarch64/aarch64/pmap.c (expand / switch to unified diff)

--- src/sys/arch/aarch64/aarch64/pmap.c 2020/05/15 05:39:15 1.75
+++ src/sys/arch/aarch64/aarch64/pmap.c 2020/06/01 02:42:24 1.76
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.75 2020/05/15 05:39:15 skrll Exp $ */ 1/* $NetBSD: pmap.c,v 1.76 2020/06/01 02:42:24 ryo Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org> 4 * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org>
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
@@ -17,27 +17,27 @@ @@ -17,27 +17,27 @@
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE. 26 * POSSIBILITY OF SUCH DAMAGE.
27 */ 27 */
28 28
29#include <sys/cdefs.h> 29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.75 2020/05/15 05:39:15 skrll Exp $"); 30__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.76 2020/06/01 02:42:24 ryo Exp $");
31 31
32#include "opt_arm_debug.h" 32#include "opt_arm_debug.h"
33#include "opt_ddb.h" 33#include "opt_ddb.h"
34#include "opt_multiprocessor.h" 34#include "opt_multiprocessor.h"
35#include "opt_pmap.h" 35#include "opt_pmap.h"
36#include "opt_uvmhist.h" 36#include "opt_uvmhist.h"
37 37
38#include <sys/param.h> 38#include <sys/param.h>
39#include <sys/types.h> 39#include <sys/types.h>
40#include <sys/kmem.h> 40#include <sys/kmem.h>
41#include <sys/vmem.h> 41#include <sys/vmem.h>
42#include <sys/atomic.h> 42#include <sys/atomic.h>
43#include <sys/asan.h> 43#include <sys/asan.h>
@@ -161,37 +161,33 @@ PMAP_COUNTER(unwire_failure, "pmap_unwir @@ -161,37 +161,33 @@ PMAP_COUNTER(unwire_failure, "pmap_unwir
161 if ((asid) == 0) \ 161 if ((asid) == 0) \
162 aarch64_tlbi_by_va_ll((va)); \ 162 aarch64_tlbi_by_va_ll((va)); \
163 else \ 163 else \
164 aarch64_tlbi_by_asid_va_ll((asid), (va)); \ 164 aarch64_tlbi_by_asid_va_ll((asid), (va)); \
165 } else { \ 165 } else { \
166 if ((asid) == 0) \ 166 if ((asid) == 0) \
167 aarch64_tlbi_by_va((va)); \ 167 aarch64_tlbi_by_va((va)); \
168 else \ 168 else \
169 aarch64_tlbi_by_asid_va((asid), (va)); \ 169 aarch64_tlbi_by_asid_va((asid), (va)); \
170 } \ 170 } \
171 } while (0/*CONSTCOND*/) 171 } while (0/*CONSTCOND*/)
172 172
173/* 173/*
174 * aarch64 require write permission in pte to invalidate instruction cache. 174 * require access permission in pte to invalidate instruction cache.
175 * changing pte to writable temporarly before cpu_icache_sync_range(). 175 * change the pte to accessible temporarly before cpu_icache_sync_range().
176 * this macro modifies PTE (*ptep). need to update PTE after this. 176 * this macro modifies PTE (*ptep). need to update PTE after this.
177 */ 177 */
178#define PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, ll) \ 178#define PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, ll) \
179 do { \ 179 do { \
180 pt_entry_t tpte; \ 180 atomic_swap_64((ptep), (pte) | LX_BLKPAG_AF); \
181 tpte = (pte) & ~(LX_BLKPAG_AF|LX_BLKPAG_AP); \ 
182 tpte |= (LX_BLKPAG_AF|LX_BLKPAG_AP_RW); \ 
183 tpte |= (LX_BLKPAG_UXN|LX_BLKPAG_PXN); \ 
184 atomic_swap_64((ptep), tpte); \ 
185 AARCH64_TLBI_BY_ASID_VA((pm)->pm_asid, (va), (ll)); \ 181 AARCH64_TLBI_BY_ASID_VA((pm)->pm_asid, (va), (ll)); \
186 cpu_icache_sync_range((va), PAGE_SIZE); \ 182 cpu_icache_sync_range((va), PAGE_SIZE); \
187 } while (0/*CONSTCOND*/) 183 } while (0/*CONSTCOND*/)
188 184
189#define VM_PAGE_TO_PP(pg) (&(pg)->mdpage.mdpg_pp) 185#define VM_PAGE_TO_PP(pg) (&(pg)->mdpage.mdpg_pp)
190 186
191struct pv_entry { 187struct pv_entry {
192 LIST_ENTRY(pv_entry) pv_link; 188 LIST_ENTRY(pv_entry) pv_link;
193 struct pmap *pv_pmap; 189 struct pmap *pv_pmap;
194 vaddr_t pv_va; 190 vaddr_t pv_va;
195 paddr_t pv_pa; /* debug */ 191 paddr_t pv_pa; /* debug */
196 pt_entry_t *pv_ptep; /* for fast pte lookup */ 192 pt_entry_t *pv_ptep; /* for fast pte lookup */
197}; 193};
@@ -1318,27 +1314,27 @@ pmap_protect(struct pmap *pm, vaddr_t sv @@ -1318,27 +1314,27 @@ pmap_protect(struct pmap *pm, vaddr_t sv
1318 } 1314 }
1319 1315
1320#ifdef UVMHIST 1316#ifdef UVMHIST
1321 opte = pte; 1317 opte = pte;
1322#endif 1318#endif
1323 executable = l3pte_executable(pte, user); 1319 executable = l3pte_executable(pte, user);
1324 pte = _pmap_pte_adjust_prot(pte, prot, mdattr, user); 1320 pte = _pmap_pte_adjust_prot(pte, prot, mdattr, user);
1325 1321
1326 if (!executable && (prot & VM_PROT_EXECUTE)) { 1322 if (!executable && (prot & VM_PROT_EXECUTE)) {
1327 /* non-exec -> exec */ 1323 /* non-exec -> exec */
1328 UVMHIST_LOG(pmaphist, "icache_sync: " 1324 UVMHIST_LOG(pmaphist, "icache_sync: "
1329 "pm=%p, va=%016lx, pte: %016lx -> %016lx", 1325 "pm=%p, va=%016lx, pte: %016lx -> %016lx",
1330 pm, va, opte, pte); 1326 pm, va, opte, pte);
1331 if (!l3pte_writable(pte)) { 1327 if (!l3pte_readable(pte)) {
1332 PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, true); 1328 PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, true);
1333 atomic_swap_64(ptep, pte); 1329 atomic_swap_64(ptep, pte);
1334 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true); 1330 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
1335 } else { 1331 } else {
1336 atomic_swap_64(ptep, pte); 1332 atomic_swap_64(ptep, pte);
1337 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true); 1333 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
1338 cpu_icache_sync_range(va, PAGE_SIZE); 1334 cpu_icache_sync_range(va, PAGE_SIZE);
1339 } 1335 }
1340 } else { 1336 } else {
1341 atomic_swap_64(ptep, pte); 1337 atomic_swap_64(ptep, pte);
1342 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true); 1338 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
1343 } 1339 }
1344 } 1340 }
@@ -1879,27 +1875,27 @@ _pmap_enter(struct pmap *pm, vaddr_t va, @@ -1879,27 +1875,27 @@ _pmap_enter(struct pmap *pm, vaddr_t va,
1879 if (flags & PMAP_WIRED) 1875 if (flags & PMAP_WIRED)
1880 attr |= LX_BLKPAG_OS_WIRED; 1876 attr |= LX_BLKPAG_OS_WIRED;
1881#ifdef MULTIPROCESSOR 1877#ifdef MULTIPROCESSOR
1882 attr |= LX_BLKPAG_SH_IS; 1878 attr |= LX_BLKPAG_SH_IS;
1883#endif 1879#endif
1884 1880
1885 pte = pa | attr; 1881 pte = pa | attr;
1886 1882
1887 if (need_sync_icache) { 1883 if (need_sync_icache) {
1888 /* non-exec -> exec */ 1884 /* non-exec -> exec */
1889 UVMHIST_LOG(pmaphist, 1885 UVMHIST_LOG(pmaphist,
1890 "icache_sync: pm=%p, va=%016lx, pte: %016lx -> %016lx", 1886 "icache_sync: pm=%p, va=%016lx, pte: %016lx -> %016lx",
1891 pm, va, opte, pte); 1887 pm, va, opte, pte);
1892 if (!l3pte_writable(pte)) { 1888 if (!l3pte_readable(pte)) {
1893 PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, l3only); 1889 PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, l3only);
1894 atomic_swap_64(ptep, pte); 1890 atomic_swap_64(ptep, pte);
1895 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va ,true); 1891 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va ,true);
1896 } else { 1892 } else {
1897 atomic_swap_64(ptep, pte); 1893 atomic_swap_64(ptep, pte);
1898 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, l3only); 1894 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, l3only);
1899 cpu_icache_sync_range(va, PAGE_SIZE); 1895 cpu_icache_sync_range(va, PAGE_SIZE);
1900 } 1896 }
1901 } else { 1897 } else {
1902 atomic_swap_64(ptep, pte); 1898 atomic_swap_64(ptep, pte);
1903 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, l3only); 1899 AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, l3only);
1904 } 1900 }
1905 1901