Thu Apr 18 18:14:22 2024 UTC (21d)
Pull up following revision(s) (requested by skrll in ticket #666):

	sys/arch/arm/arm32/pmap.c: revision 1.443

port-arm/58135: reproducible pmap KASSERT failure for armv7 with NFS root

Don't unconditionally set XN in pmap_clearbit - only set it if a mapping
exists VM_PROT_EXEC is being cleared.

I've simplified the #ifdefs in the patch from the PR.


(martin)
diff -r1.437.4.3 -r1.437.4.4 src/sys/arch/arm/arm32/pmap.c

cvs diff -r1.437.4.3 -r1.437.4.4 src/sys/arch/arm/arm32/pmap.c (expand / switch to unified diff)

--- src/sys/arch/arm/arm32/pmap.c 2023/12/14 17:43:10 1.437.4.3
+++ src/sys/arch/arm/arm32/pmap.c 2024/04/18 18:14:22 1.437.4.4
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.437.4.3 2023/12/14 17:43:10 martin Exp $ */ 1/* $NetBSD: pmap.c,v 1.437.4.4 2024/04/18 18:14:22 martin Exp $ */
2 2
3/* 3/*
4 * Copyright 2003 Wasabi Systems, Inc. 4 * Copyright 2003 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Steve C. Woodford for Wasabi Systems, Inc. 7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -183,27 +183,27 @@ @@ -183,27 +183,27 @@
183 183
184#include "opt_arm_debug.h" 184#include "opt_arm_debug.h"
185#include "opt_cpuoptions.h" 185#include "opt_cpuoptions.h"
186#include "opt_ddb.h" 186#include "opt_ddb.h"
187#include "opt_efi.h" 187#include "opt_efi.h"
188#include "opt_lockdebug.h" 188#include "opt_lockdebug.h"
189#include "opt_multiprocessor.h" 189#include "opt_multiprocessor.h"
190 190
191#ifdef MULTIPROCESSOR 191#ifdef MULTIPROCESSOR
192#define _INTR_PRIVATE 192#define _INTR_PRIVATE
193#endif 193#endif
194 194
195#include <sys/cdefs.h> 195#include <sys/cdefs.h>
196__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.437.4.3 2023/12/14 17:43:10 martin Exp $"); 196__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.437.4.4 2024/04/18 18:14:22 martin Exp $");
197 197
198#include <sys/param.h> 198#include <sys/param.h>
199#include <sys/types.h> 199#include <sys/types.h>
200 200
201#include <sys/asan.h> 201#include <sys/asan.h>
202#include <sys/atomic.h> 202#include <sys/atomic.h>
203#include <sys/bus.h> 203#include <sys/bus.h>
204#include <sys/cpu.h> 204#include <sys/cpu.h>
205#include <sys/intr.h> 205#include <sys/intr.h>
206#include <sys/kernel.h> 206#include <sys/kernel.h>
207#include <sys/kernhist.h> 207#include <sys/kernhist.h>
208#include <sys/kmem.h> 208#include <sys/kmem.h>
209#include <sys/pool.h> 209#include <sys/pool.h>
@@ -2320,35 +2320,30 @@ pmap_vac_me_harder(struct vm_page_md *md @@ -2320,35 +2320,30 @@ pmap_vac_me_harder(struct vm_page_md *md
2320 2320
2321/* 2321/*
2322 * Modify pte bits for all ptes corresponding to the given physical address. 2322 * Modify pte bits for all ptes corresponding to the given physical address.
2323 * We use `maskbits' rather than `clearbits' because we're always passing 2323 * We use `maskbits' rather than `clearbits' because we're always passing
2324 * constants and the latter would require an extra inversion at run-time. 2324 * constants and the latter would require an extra inversion at run-time.
2325 */ 2325 */
2326static void 2326static void
2327pmap_clearbit(struct vm_page_md *md, paddr_t pa, u_int maskbits) 2327pmap_clearbit(struct vm_page_md *md, paddr_t pa, u_int maskbits)
2328{ 2328{
2329 struct pv_entry *pv; 2329 struct pv_entry *pv;
2330#ifdef PMAP_CACHE_VIPT 2330#ifdef PMAP_CACHE_VIPT
2331 const bool want_syncicache = PV_IS_EXEC_P(md->pvh_attrs); 2331 const bool want_syncicache = PV_IS_EXEC_P(md->pvh_attrs);
2332 bool need_syncicache = false; 2332 bool need_syncicache = false;
2333#ifdef ARM_MMU_EXTENDED 2333#ifndef ARM_MMU_EXTENDED
2334 const u_int execbits = (maskbits & PVF_EXEC) ? L2_XS_XN : 0; 
2335#else 
2336 const u_int execbits = 0; 
2337 bool need_vac_me_harder = false; 2334 bool need_vac_me_harder = false;
2338#endif 2335#endif
2339#else 2336#endif /* PMAP_CACHE_VIPT */
2340 const u_int execbits = 0; 
2341#endif 
2342 2337
2343 UVMHIST_FUNC(__func__); 2338 UVMHIST_FUNC(__func__);
2344 UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx maskbits %#jx", 2339 UVMHIST_CALLARGS(maphist, "md %#jx pa %#jx maskbits %#jx",
2345 (uintptr_t)md, pa, maskbits, 0); 2340 (uintptr_t)md, pa, maskbits, 0);
2346 2341
2347#ifdef PMAP_CACHE_VIPT 2342#ifdef PMAP_CACHE_VIPT
2348 /* 2343 /*
2349 * If we might want to sync the I-cache and we've modified it, 2344 * If we might want to sync the I-cache and we've modified it,
2350 * then we know we definitely need to sync or discard it. 2345 * then we know we definitely need to sync or discard it.
2351 */ 2346 */
2352 if (want_syncicache) { 2347 if (want_syncicache) {
2353 if (md->pvh_attrs & PVF_MOD) { 2348 if (md->pvh_attrs & PVF_MOD) {
2354 need_syncicache = true; 2349 need_syncicache = true;
@@ -2411,29 +2406,34 @@ pmap_clearbit(struct vm_page_md *md, pad @@ -2411,29 +2406,34 @@ pmap_clearbit(struct vm_page_md *md, pad
2411 pmap_destroy(pm); 2406 pmap_destroy(pm);
2412 /* Restart from the beginning. */ 2407 /* Restart from the beginning. */
2413 pmap_acquire_page_lock(md); 2408 pmap_acquire_page_lock(md);
2414 pv = SLIST_FIRST(&md->pvh_list); 2409 pv = SLIST_FIRST(&md->pvh_list);
2415 continue; 2410 continue;
2416 } 2411 }
2417 pv->pv_flags &= ~maskbits; 2412 pv->pv_flags &= ~maskbits;
2418 2413
2419 struct l2_bucket * const l2b = pmap_get_l2_bucket(pm, va); 2414 struct l2_bucket * const l2b = pmap_get_l2_bucket(pm, va);
2420 KASSERTMSG(l2b != NULL, "%#lx", va); 2415 KASSERTMSG(l2b != NULL, "%#lx", va);
2421 2416
2422 pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)]; 2417 pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)];
2423 const pt_entry_t opte = *ptep; 2418 const pt_entry_t opte = *ptep;
2424 pt_entry_t npte = opte | execbits; 2419 pt_entry_t npte = opte;
 2420
 2421#if defined(ARM_MMU_EXTENDED)
 2422 if ((maskbits & PVF_EXEC) != 0 && l2pte_valid_p(opte)) {
 2423 KASSERT((opte & L2_TYPE_S) != 0);
 2424 npte |= L2_XS_XN;
 2425 }
2425 2426
2426#ifdef ARM_MMU_EXTENDED 
2427 KASSERT((opte & L2_XS_nG) == (pm == pmap_kernel() ? 0 : L2_XS_nG)); 2427 KASSERT((opte & L2_XS_nG) == (pm == pmap_kernel() ? 0 : L2_XS_nG));
2428#endif 2428#endif
2429 2429
2430 UVMHIST_LOG(maphist, "pv %#jx pm %#jx va %#jx flag %#jx", 2430 UVMHIST_LOG(maphist, "pv %#jx pm %#jx va %#jx flag %#jx",
2431 (uintptr_t)pv, (uintptr_t)pm, va, oflags); 2431 (uintptr_t)pv, (uintptr_t)pm, va, oflags);
2432 2432
2433 if (maskbits & (PVF_WRITE|PVF_MOD)) { 2433 if (maskbits & (PVF_WRITE|PVF_MOD)) {
2434#ifdef PMAP_CACHE_VIVT 2434#ifdef PMAP_CACHE_VIVT
2435 if ((oflags & PVF_NC)) { 2435 if ((oflags & PVF_NC)) {
2436 /* 2436 /*
2437 * Entry is not cacheable: 2437 * Entry is not cacheable:
2438 * 2438 *
2439 * Don't turn caching on again if this is a 2439 * Don't turn caching on again if this is a