Sat Jan 25 16:19:30 2020 UTC ()
A fix and an optimisation to pmap_l1tt_free
- in the !__HAVE_MM_MD_DIRECT_MAPPED_PHYS case pass UVM_KMF_WIRED so that
  the mappings are removed and the KVA is released.  Fixes the KASSERT
  seen in the automated test runs.
- in the __HAVE_MM_MD_DIRECT_MAPPED_PHYS case we can work out pa much
  easier than caling pmap_extract.


(skrll)
diff -r1.381 -r1.382 src/sys/arch/arm/arm32/pmap.c

cvs diff -r1.381 -r1.382 src/sys/arch/arm/arm32/pmap.c (expand / switch to unified diff)

--- src/sys/arch/arm/arm32/pmap.c 2020/01/19 10:59:56 1.381
+++ src/sys/arch/arm/arm32/pmap.c 2020/01/25 16:19:29 1.382
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.381 2020/01/19 10:59:56 skrll Exp $ */ 1/* $NetBSD: pmap.c,v 1.382 2020/01/25 16:19:29 skrll Exp $ */
2 2
3/* 3/*
4 * Copyright 2003 Wasabi Systems, Inc. 4 * Copyright 2003 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Steve C. Woodford for Wasabi Systems, Inc. 7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -211,27 +211,27 @@ @@ -211,27 +211,27 @@
211#include <sys/bus.h> 211#include <sys/bus.h>
212#include <sys/atomic.h> 212#include <sys/atomic.h>
213#include <sys/kernhist.h> 213#include <sys/kernhist.h>
214 214
215#include <uvm/uvm.h> 215#include <uvm/uvm.h>
216#include <uvm/pmap/pmap_pvt.h> 216#include <uvm/pmap/pmap_pvt.h>
217 217
218#include <arm/locore.h> 218#include <arm/locore.h>
219 219
220#ifdef DDB 220#ifdef DDB
221#include <arm/db_machdep.h> 221#include <arm/db_machdep.h>
222#endif 222#endif
223 223
224__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.381 2020/01/19 10:59:56 skrll Exp $"); 224__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.382 2020/01/25 16:19:29 skrll Exp $");
225 225
226//#define PMAP_DEBUG 226//#define PMAP_DEBUG
227#ifdef PMAP_DEBUG 227#ifdef PMAP_DEBUG
228 228
229/* XXX need to get rid of all refs to this */ 229/* XXX need to get rid of all refs to this */
230int pmap_debug_level = 0; 230int pmap_debug_level = 0;
231 231
232/* 232/*
233 * for switching to potentially finer grained debugging 233 * for switching to potentially finer grained debugging
234 */ 234 */
235#define PDB_FOLLOW 0x0001 235#define PDB_FOLLOW 0x0001
236#define PDB_INIT 0x0002 236#define PDB_INIT 0x0002
237#define PDB_ENTER 0x0004 237#define PDB_ENTER 0x0004
@@ -6617,32 +6617,33 @@ pmap_l1tt_alloc(struct pool *pp, int fla @@ -6617,32 +6617,33 @@ pmap_l1tt_alloc(struct pool *pp, int fla
6617 KASSERT(ok); 6617 KASSERT(ok);
6618 KASSERT(va >= KERNEL_BASE); 6618 KASSERT(va >= KERNEL_BASE);
6619#endif 6619#endif
6620 6620
6621 return (void *)va; 6621 return (void *)va;
6622} 6622}
6623 6623
6624static void 6624static void
6625pmap_l1tt_free(struct pool *pp, void *v) 6625pmap_l1tt_free(struct pool *pp, void *v)
6626{ 6626{
6627 vaddr_t va = (vaddr_t)v; 6627 vaddr_t va = (vaddr_t)v;
6628 6628
6629#if !defined( __HAVE_MM_MD_DIRECT_MAPPED_PHYS) 6629#if !defined( __HAVE_MM_MD_DIRECT_MAPPED_PHYS)
6630 uvm_km_free(kernel_map, va, L1TT_SIZE, 0); 6630 uvm_km_free(kernel_map, va, L1TT_SIZE, UVM_KMF_WIRED);
6631#else 6631#else
6632 paddr_t pa; 6632#if defined(KERNEL_BASE_VOFFSET)
6633 6633 paddr_t pa = va - KERNEL_BASE_VOFFSET;
6634 bool ok = pmap_extract(pmap_kernel(), va, &pa); 6634#else
6635 KASSERT(ok); 6635 paddr_t pa = va - KERNEL_BASE + physical_start;
 6636#endif
6636 const paddr_t epa = pa + L1TT_SIZE; 6637 const paddr_t epa = pa + L1TT_SIZE;
6637 6638
6638 for (; pa < epa; pa += PAGE_SIZE) { 6639 for (; pa < epa; pa += PAGE_SIZE) {
6639 struct vm_page *pg = PHYS_TO_VM_PAGE(pa); 6640 struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
6640 uvm_pagefree(pg); 6641 uvm_pagefree(pg);
6641 } 6642 }
6642#endif 6643#endif
6643} 6644}
6644#endif 6645#endif
6645 6646
6646/* 6647/*
6647 * pmap_postinit() 6648 * pmap_postinit()
6648 * 6649 *