Sun May 30 01:41:45 2021 UTC ()
Track the PT pages allocated to a pmap in the pmap itself.


(thorpej)
diff -r1.283 -r1.284 src/sys/arch/alpha/alpha/pmap.c
diff -r1.89 -r1.90 src/sys/arch/alpha/include/pmap.h

cvs diff -r1.283 -r1.284 src/sys/arch/alpha/alpha/pmap.c (expand / switch to unified diff)

--- src/sys/arch/alpha/alpha/pmap.c 2021/05/30 01:24:19 1.283
+++ src/sys/arch/alpha/alpha/pmap.c 2021/05/30 01:41:45 1.284
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.283 2021/05/30 01:24:19 thorpej Exp $ */ 1/* $NetBSD: pmap.c,v 1.284 2021/05/30 01:41:45 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008, 2020 4 * Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008, 2020
5 * The NetBSD Foundation, Inc. 5 * The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 * NASA Ames Research Center, by Andrew Doran and Mindaugas Rasiukevicius, 10 * NASA Ames Research Center, by Andrew Doran and Mindaugas Rasiukevicius,
11 * and by Chris G. Demetriou. 11 * and by Chris G. Demetriou.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions 14 * modification, are permitted provided that the following conditions
@@ -125,27 +125,27 @@ @@ -125,27 +125,27 @@
125 * this module may delay invalidate or reduced protection 125 * this module may delay invalidate or reduced protection
126 * operations until such time as they are actually 126 * operations until such time as they are actually
127 * necessary. This module is given full information as 127 * necessary. This module is given full information as
128 * to which processors are currently using which maps, 128 * to which processors are currently using which maps,
129 * and to when physical maps must be made correct. 129 * and to when physical maps must be made correct.
130 */ 130 */
131 131
132#include "opt_lockdebug.h" 132#include "opt_lockdebug.h"
133#include "opt_sysv.h" 133#include "opt_sysv.h"
134#include "opt_multiprocessor.h" 134#include "opt_multiprocessor.h"
135 135
136#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ 136#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
137 137
138__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.283 2021/05/30 01:24:19 thorpej Exp $"); 138__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.284 2021/05/30 01:41:45 thorpej Exp $");
139 139
140#include <sys/param.h> 140#include <sys/param.h>
141#include <sys/systm.h> 141#include <sys/systm.h>
142#include <sys/kernel.h> 142#include <sys/kernel.h>
143#include <sys/proc.h> 143#include <sys/proc.h>
144#include <sys/malloc.h> 144#include <sys/malloc.h>
145#include <sys/pool.h> 145#include <sys/pool.h>
146#include <sys/buf.h> 146#include <sys/buf.h>
147#include <sys/evcnt.h> 147#include <sys/evcnt.h>
148#include <sys/atomic.h> 148#include <sys/atomic.h>
149#include <sys/cpu.h> 149#include <sys/cpu.h>
150 150
151#include <uvm/uvm.h> 151#include <uvm/uvm.h>
@@ -1071,63 +1071,48 @@ pmap_tlb_shootnow(const struct pmap_tlb_ @@ -1071,63 +1071,48 @@ pmap_tlb_shootnow(const struct pmap_tlb_
1071void 1071void
1072pmap_tlb_shootdown_ipi(struct cpu_info * const ci, 1072pmap_tlb_shootdown_ipi(struct cpu_info * const ci,
1073 1073
1074 struct trapframe * const tf __unused) 1074 struct trapframe * const tf __unused)
1075{ 1075{
1076 KASSERT(tlb_context != NULL); 1076 KASSERT(tlb_context != NULL);
1077 pmap_tlb_invalidate(tlb_context, ci); 1077 pmap_tlb_invalidate(tlb_context, ci);
1078 if (atomic_and_ulong_nv(&tlb_pending, ~(1UL << ci->ci_cpuid)) == 0) { 1078 if (atomic_and_ulong_nv(&tlb_pending, ~(1UL << ci->ci_cpuid)) == 0) {
1079 atomic_store_release(&tlb_context, NULL); 1079 atomic_store_release(&tlb_context, NULL);
1080 } 1080 }
1081} 1081}
1082#endif /* MULTIPROCESSOR */ 1082#endif /* MULTIPROCESSOR */
1083 1083
1084static void 
1085pmap_tlb_physpage_free(paddr_t const ptpa, 
1086 struct pmap_tlb_context * const tlbctx) 
1087{ 
1088 struct vm_page * const pg = PHYS_TO_VM_PAGE(ptpa); 
1089 
1090 KASSERT(pg != NULL); 
1091 
1092#ifdef DEBUG 
1093 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 
1094 KDASSERT(md->pvh_refcnt == 0); 
1095#endif 
1096 
1097 LIST_INSERT_HEAD(&tlbctx->t_freeptq, pg, pageq.list); 
1098} 
1099 
1100static __inline void 1084static __inline void
1101pmap_tlb_ptpage_drain(struct pmap_tlb_context * const tlbctx) 1085pmap_tlb_ptpage_drain(struct pmap_tlb_context * const tlbctx)
1102{ 1086{
1103 pmap_pagelist_free(&tlbctx->t_freeptq); 1087 pmap_pagelist_free(&tlbctx->t_freeptq);
1104} 1088}
1105 1089
1106/* 1090/*
1107 * Internal routines 1091 * Internal routines
1108 */ 1092 */
1109static void alpha_protection_init(void); 1093static void alpha_protection_init(void);
1110static pt_entry_t pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, bool, 1094static pt_entry_t pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, bool,
1111 pv_entry_t *, 1095 pv_entry_t *,
1112 struct pmap_tlb_context *); 1096 struct pmap_tlb_context *);
1113static void pmap_changebit(struct vm_page *, pt_entry_t, pt_entry_t, 1097static void pmap_changebit(struct vm_page *, pt_entry_t, pt_entry_t,
1114 struct pmap_tlb_context *); 1098 struct pmap_tlb_context *);
1115 1099
1116/* 1100/*
1117 * PT page management functions. 1101 * PT page management functions.
1118 */ 1102 */
1119static int pmap_ptpage_alloc(pt_entry_t *, int); 1103static int pmap_ptpage_alloc(pmap_t, pt_entry_t *, int);
1120static void pmap_ptpage_free(pt_entry_t *, struct pmap_tlb_context *); 1104static void pmap_ptpage_free(pmap_t, pt_entry_t *,
 1105 struct pmap_tlb_context *);
1121static void pmap_l3pt_delref(pmap_t, vaddr_t, pt_entry_t *, 1106static void pmap_l3pt_delref(pmap_t, vaddr_t, pt_entry_t *,
1122 struct pmap_tlb_context *); 1107 struct pmap_tlb_context *);
1123static void pmap_l2pt_delref(pmap_t, pt_entry_t *, pt_entry_t *, 1108static void pmap_l2pt_delref(pmap_t, pt_entry_t *, pt_entry_t *,
1124 struct pmap_tlb_context *); 1109 struct pmap_tlb_context *);
1125static void pmap_l1pt_delref(pmap_t, pt_entry_t *); 1110static void pmap_l1pt_delref(pmap_t, pt_entry_t *);
1126 1111
1127static void *pmap_l1pt_alloc(struct pool *, int); 1112static void *pmap_l1pt_alloc(struct pool *, int);
1128static void pmap_l1pt_free(struct pool *, void *); 1113static void pmap_l1pt_free(struct pool *, void *);
1129 1114
1130static struct pool_allocator pmap_l1pt_allocator = { 1115static struct pool_allocator pmap_l1pt_allocator = {
1131 pmap_l1pt_alloc, pmap_l1pt_free, 0, 1116 pmap_l1pt_alloc, pmap_l1pt_free, 0,
1132}; 1117};
1133 1118
@@ -1388,26 +1373,27 @@ pmap_bootstrap(paddr_t ptaddr, u_int max @@ -1388,26 +1373,27 @@ pmap_bootstrap(paddr_t ptaddr, u_int max
1388 * This must block any interrupt from which a TLB shootdown 1373 * This must block any interrupt from which a TLB shootdown
1389 * could be issued, but must NOT block IPIs. 1374 * could be issued, but must NOT block IPIs.
1390 */ 1375 */
1391 mutex_init(&tlb_lock, MUTEX_SPIN, IPL_VM); 1376 mutex_init(&tlb_lock, MUTEX_SPIN, IPL_VM);
1392 1377
1393 /* 1378 /*
1394 * Initialize kernel pmap. Note that all kernel mappings 1379 * Initialize kernel pmap. Note that all kernel mappings
1395 * have PG_ASM set, so the ASN doesn't really matter for 1380 * have PG_ASM set, so the ASN doesn't really matter for
1396 * the kernel pmap. Also, since the kernel pmap always 1381 * the kernel pmap. Also, since the kernel pmap always
1397 * references kernel_lev1map, it always has an invalid ASN 1382 * references kernel_lev1map, it always has an invalid ASN
1398 * generation. 1383 * generation.
1399 */ 1384 */
1400 memset(pmap_kernel(), 0, sizeof(struct pmap)); 1385 memset(pmap_kernel(), 0, sizeof(struct pmap));
 1386 LIST_INIT(&pmap_kernel()->pm_ptpages);
1401 atomic_store_relaxed(&pmap_kernel()->pm_count, 1); 1387 atomic_store_relaxed(&pmap_kernel()->pm_count, 1);
1402 /* Kernel pmap does not have per-CPU info. */ 1388 /* Kernel pmap does not have per-CPU info. */
1403 TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap_kernel(), pm_list); 1389 TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap_kernel(), pm_list);
1404 1390
1405 /* 1391 /*
1406 * Set up lwp0's PCB such that the ptbr points to the right place 1392 * Set up lwp0's PCB such that the ptbr points to the right place
1407 * and has the kernel pmap's (really unused) ASN. 1393 * and has the kernel pmap's (really unused) ASN.
1408 */ 1394 */
1409 pcb = lwp_getpcb(&lwp0); 1395 pcb = lwp_getpcb(&lwp0);
1410 pcb->pcb_hw.apcb_ptbr = 1396 pcb->pcb_hw.apcb_ptbr =
1411 ALPHA_K0SEG_TO_PHYS((vaddr_t)kernel_lev1map) >> PGSHIFT; 1397 ALPHA_K0SEG_TO_PHYS((vaddr_t)kernel_lev1map) >> PGSHIFT;
1412 pcb->pcb_hw.apcb_asn = PMAP_ASN_KERNEL; 1398 pcb->pcb_hw.apcb_asn = PMAP_ASN_KERNEL;
1413 1399
@@ -1571,26 +1557,27 @@ pmap_t @@ -1571,26 +1557,27 @@ pmap_t
1571pmap_create(void) 1557pmap_create(void)
1572{ 1558{
1573 pmap_t pmap; 1559 pmap_t pmap;
1574 pt_entry_t *lev1map; 1560 pt_entry_t *lev1map;
1575 int i; 1561 int i;
1576 1562
1577#ifdef DEBUG 1563#ifdef DEBUG
1578 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 1564 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
1579 printf("pmap_create()\n"); 1565 printf("pmap_create()\n");
1580#endif 1566#endif
1581 1567
1582 pmap = pool_cache_get(&pmap_pmap_cache, PR_WAITOK); 1568 pmap = pool_cache_get(&pmap_pmap_cache, PR_WAITOK);
1583 memset(pmap, 0, sizeof(*pmap)); 1569 memset(pmap, 0, sizeof(*pmap));
 1570 LIST_INIT(&pmap->pm_ptpages);
1584 1571
1585 atomic_store_relaxed(&pmap->pm_count, 1); 1572 atomic_store_relaxed(&pmap->pm_count, 1);
1586 1573
1587 try_again: 1574 try_again:
1588 rw_enter(&pmap_growkernel_lock, RW_READER); 1575 rw_enter(&pmap_growkernel_lock, RW_READER);
1589 1576
1590 lev1map = pool_cache_get(&pmap_l1pt_cache, PR_NOWAIT); 1577 lev1map = pool_cache_get(&pmap_l1pt_cache, PR_NOWAIT);
1591 if (__predict_false(lev1map == NULL)) { 1578 if (__predict_false(lev1map == NULL)) {
1592 rw_exit(&pmap_growkernel_lock); 1579 rw_exit(&pmap_growkernel_lock);
1593 (void) kpause("pmap_create", false, hz >> 2, NULL); 1580 (void) kpause("pmap_create", false, hz >> 2, NULL);
1594 goto try_again; 1581 goto try_again;
1595 } 1582 }
1596 1583
@@ -2095,51 +2082,51 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd @@ -2095,51 +2082,51 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd
2095 2082
2096 KASSERT(va < VM_MAXUSER_ADDRESS); 2083 KASSERT(va < VM_MAXUSER_ADDRESS);
2097 KASSERT(lev1map != kernel_lev1map); 2084 KASSERT(lev1map != kernel_lev1map);
2098 2085
2099 /* 2086 /*
2100 * Check to see if the level 1 PTE is valid, and 2087 * Check to see if the level 1 PTE is valid, and
2101 * allocate a new level 2 page table page if it's not. 2088 * allocate a new level 2 page table page if it's not.
2102 * A reference will be added to the level 2 table when 2089 * A reference will be added to the level 2 table when
2103 * the level 3 table is created. 2090 * the level 3 table is created.
2104 */ 2091 */
2105 l1pte = pmap_l1pte(lev1map, va); 2092 l1pte = pmap_l1pte(lev1map, va);
2106 if (pmap_pte_v(l1pte) == 0) { 2093 if (pmap_pte_v(l1pte) == 0) {
2107 pmap_physpage_addref(l1pte); 2094 pmap_physpage_addref(l1pte);
2108 error = pmap_ptpage_alloc(l1pte, PGU_L2PT); 2095 error = pmap_ptpage_alloc(pmap, l1pte, PGU_L2PT);
2109 if (error) { 2096 if (error) {
2110 pmap_l1pt_delref(pmap, l1pte); 2097 pmap_l1pt_delref(pmap, l1pte);
2111 if (flags & PMAP_CANFAIL) 2098 if (flags & PMAP_CANFAIL)
2112 goto out; 2099 goto out;
2113 panic("pmap_enter: unable to create L2 PT " 2100 panic("pmap_enter: unable to create L2 PT "
2114 "page"); 2101 "page");
2115 } 2102 }
2116#ifdef DEBUG 2103#ifdef DEBUG
2117 if (pmapdebug & PDB_PTPAGE) 2104 if (pmapdebug & PDB_PTPAGE)
2118 printf("pmap_enter: new level 2 table at " 2105 printf("pmap_enter: new level 2 table at "
2119 "0x%lx\n", pmap_pte_pa(l1pte)); 2106 "0x%lx\n", pmap_pte_pa(l1pte));
2120#endif 2107#endif
2121 } 2108 }
2122 2109
2123 /* 2110 /*
2124 * Check to see if the level 2 PTE is valid, and 2111 * Check to see if the level 2 PTE is valid, and
2125 * allocate a new level 3 page table page if it's not. 2112 * allocate a new level 3 page table page if it's not.
2126 * A reference will be added to the level 3 table when 2113 * A reference will be added to the level 3 table when
2127 * the mapping is validated. 2114 * the mapping is validated.
2128 */ 2115 */
2129 l2pte = pmap_l2pte(lev1map, va, l1pte); 2116 l2pte = pmap_l2pte(lev1map, va, l1pte);
2130 if (pmap_pte_v(l2pte) == 0) { 2117 if (pmap_pte_v(l2pte) == 0) {
2131 pmap_physpage_addref(l2pte); 2118 pmap_physpage_addref(l2pte);
2132 error = pmap_ptpage_alloc(l2pte, PGU_L3PT); 2119 error = pmap_ptpage_alloc(pmap, l2pte, PGU_L3PT);
2133 if (error) { 2120 if (error) {
2134 /* unlocks pmap */ 2121 /* unlocks pmap */
2135 pmap_enter_l2pt_delref(pmap, l1pte, l2pte); 2122 pmap_enter_l2pt_delref(pmap, l1pte, l2pte);
2136 if (flags & PMAP_CANFAIL) { 2123 if (flags & PMAP_CANFAIL) {
2137 PMAP_LOCK(pmap); 2124 PMAP_LOCK(pmap);
2138 goto out; 2125 goto out;
2139 } 2126 }
2140 panic("pmap_enter: unable to create L3 PT " 2127 panic("pmap_enter: unable to create L3 PT "
2141 "page"); 2128 "page");
2142 } 2129 }
2143#ifdef DEBUG 2130#ifdef DEBUG
2144 if (pmapdebug & PDB_PTPAGE) 2131 if (pmapdebug & PDB_PTPAGE)
2145 printf("pmap_enter: new level 3 table at " 2132 printf("pmap_enter: new level 3 table at "
@@ -3642,70 +3629,81 @@ pmap_l1pt_free(struct pool *pp, void *v) @@ -3642,70 +3629,81 @@ pmap_l1pt_free(struct pool *pp, void *v)
3642 3629
3643 pmap_physpage_free(ALPHA_K0SEG_TO_PHYS((vaddr_t) v)); 3630 pmap_physpage_free(ALPHA_K0SEG_TO_PHYS((vaddr_t) v));
3644} 3631}
3645 3632
3646/* 3633/*
3647 * pmap_ptpage_alloc: 3634 * pmap_ptpage_alloc:
3648 * 3635 *
3649 * Allocate a level 2 or level 3 page table page for a user 3636 * Allocate a level 2 or level 3 page table page for a user
3650 * pmap, and initialize the PTE that references it. 3637 * pmap, and initialize the PTE that references it.
3651 * 3638 *
3652 * Note: the pmap must already be locked. 3639 * Note: the pmap must already be locked.
3653 */ 3640 */
3654static int 3641static int
3655pmap_ptpage_alloc(pt_entry_t * const pte, int const usage) 3642pmap_ptpage_alloc(pmap_t pmap, pt_entry_t * const pte, int const usage)
3656{ 3643{
3657 /* 3644 /*
3658 * Allocate the page table page. 3645 * Allocate the page table page.
3659 */ 3646 */
3660 struct vm_page * const pg = pmap_physpage_alloc(usage); 3647 struct vm_page * const pg = pmap_physpage_alloc(usage);
3661 if (__predict_false(pg == NULL)) { 3648 if (__predict_false(pg == NULL)) {
3662 return ENOMEM; 3649 return ENOMEM;
3663 } 3650 }
3664 3651
 3652 LIST_INSERT_HEAD(&pmap->pm_ptpages, pg, pageq.list);
 3653
3665 /* 3654 /*
3666 * Initialize the referencing PTE. 3655 * Initialize the referencing PTE.
3667 */ 3656 */
3668 const pt_entry_t npte = ((VM_PAGE_TO_PHYS(pg) >> PGSHIFT) << PG_SHIFT) | 3657 const pt_entry_t npte = ((VM_PAGE_TO_PHYS(pg) >> PGSHIFT) << PG_SHIFT) |
3669 PG_V | PG_KRE | PG_KWE | PG_WIRED; 3658 PG_V | PG_KRE | PG_KWE | PG_WIRED;
3670 3659
3671 atomic_store_relaxed(pte, npte); 3660 atomic_store_relaxed(pte, npte);
3672 3661
3673 return (0); 3662 return (0);
3674} 3663}
3675 3664
3676/* 3665/*
3677 * pmap_ptpage_free: 3666 * pmap_ptpage_free:
3678 * 3667 *
3679 * Free the level 2 or level 3 page table page referenced 3668 * Free the level 2 or level 3 page table page referenced
3680 * be the provided PTE. 3669 * be the provided PTE.
3681 * 3670 *
3682 * Note: the pmap must already be locked. 3671 * Note: the pmap must already be locked.
3683 */ 3672 */
3684static void 3673static void
3685pmap_ptpage_free(pt_entry_t * const pte, struct pmap_tlb_context * const tlbctx) 3674pmap_ptpage_free(pmap_t pmap, pt_entry_t * const pte,
 3675 struct pmap_tlb_context * const tlbctx)
3686{ 3676{
3687 3677
3688 /* 3678 /*
3689 * Extract the physical address of the page from the PTE 3679 * Extract the physical address of the page from the PTE
3690 * and clear the entry. 3680 * and clear the entry.
3691 */ 3681 */
3692 const paddr_t ptpa = pmap_pte_pa(pte); 3682 const paddr_t ptpa = pmap_pte_pa(pte);
3693 atomic_store_relaxed(pte, PG_NV); 3683 atomic_store_relaxed(pte, PG_NV);
3694 3684
 3685 struct vm_page * const pg = PHYS_TO_VM_PAGE(ptpa);
 3686 KASSERT(pg != NULL);
 3687
3695#ifdef DEBUG 3688#ifdef DEBUG
 3689 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 3690 KDASSERT(md->pvh_refcnt == 0);
 3691
3696 pmap_zero_page(ptpa); 3692 pmap_zero_page(ptpa);
3697#endif 3693#endif
3698 pmap_tlb_physpage_free(ptpa, tlbctx); 3694
 3695 LIST_REMOVE(pg, pageq.list);
 3696 LIST_INSERT_HEAD(&tlbctx->t_freeptq, pg, pageq.list);
3699} 3697}
3700 3698
3701/* 3699/*
3702 * pmap_l3pt_delref: 3700 * pmap_l3pt_delref:
3703 * 3701 *
3704 * Delete a reference on a level 3 PT page. If the reference drops 3702 * Delete a reference on a level 3 PT page. If the reference drops
3705 * to zero, free it. 3703 * to zero, free it.
3706 * 3704 *
3707 * Note: the pmap must already be locked. 3705 * Note: the pmap must already be locked.
3708 */ 3706 */
3709static void 3707static void
3710pmap_l3pt_delref(pmap_t pmap, vaddr_t va, pt_entry_t *l3pte, 3708pmap_l3pt_delref(pmap_t pmap, vaddr_t va, pt_entry_t *l3pte,
3711 struct pmap_tlb_context * const tlbctx) 3709 struct pmap_tlb_context * const tlbctx)
@@ -3725,27 +3723,27 @@ pmap_l3pt_delref(pmap_t pmap, vaddr_t va @@ -3725,27 +3723,27 @@ pmap_l3pt_delref(pmap_t pmap, vaddr_t va
3725 /* 3723 /*
3726 * No more mappings; we can free the level 3 table. 3724 * No more mappings; we can free the level 3 table.
3727 */ 3725 */
3728#ifdef DEBUG 3726#ifdef DEBUG
3729 if (pmapdebug & PDB_PTPAGE) 3727 if (pmapdebug & PDB_PTPAGE)
3730 printf("pmap_l3pt_delref: freeing level 3 table at " 3728 printf("pmap_l3pt_delref: freeing level 3 table at "
3731 "0x%lx\n", pmap_pte_pa(l2pte)); 3729 "0x%lx\n", pmap_pte_pa(l2pte));
3732#endif 3730#endif
3733 /* 3731 /*
3734 * You can pass NULL if you know the last refrence won't 3732 * You can pass NULL if you know the last refrence won't
3735 * be dropped. 3733 * be dropped.
3736 */ 3734 */
3737 KASSERT(tlbctx != NULL); 3735 KASSERT(tlbctx != NULL);
3738 pmap_ptpage_free(l2pte, tlbctx); 3736 pmap_ptpage_free(pmap, l2pte, tlbctx);
3739 3737
3740 /* 3738 /*
3741 * We've freed a level 3 table, so we must invalidate 3739 * We've freed a level 3 table, so we must invalidate
3742 * any now-stale TLB entries for the corresponding VPT 3740 * any now-stale TLB entries for the corresponding VPT
3743 * VA range. Easiest way to guarantee this is to hit 3741 * VA range. Easiest way to guarantee this is to hit
3744 * all of the user TLB entries. 3742 * all of the user TLB entries.
3745 */ 3743 */
3746 pmap_tlb_shootdown_all_user(pmap, PG_V, tlbctx); 3744 pmap_tlb_shootdown_all_user(pmap, PG_V, tlbctx);
3747 3745
3748 /* 3746 /*
3749 * We've freed a level 3 table, so delete the reference 3747 * We've freed a level 3 table, so delete the reference
3750 * on the level 2 table. 3748 * on the level 2 table.
3751 */ 3749 */
@@ -3776,27 +3774,27 @@ pmap_l2pt_delref(pmap_t pmap, pt_entry_t @@ -3776,27 +3774,27 @@ pmap_l2pt_delref(pmap_t pmap, pt_entry_t
3776 * No more mappings in this segment; we can free the 3774 * No more mappings in this segment; we can free the
3777 * level 2 table. 3775 * level 2 table.
3778 */ 3776 */
3779#ifdef DEBUG 3777#ifdef DEBUG
3780 if (pmapdebug & PDB_PTPAGE) 3778 if (pmapdebug & PDB_PTPAGE)
3781 printf("pmap_l2pt_delref: freeing level 2 table at " 3779 printf("pmap_l2pt_delref: freeing level 2 table at "
3782 "0x%lx\n", pmap_pte_pa(l1pte)); 3780 "0x%lx\n", pmap_pte_pa(l1pte));
3783#endif 3781#endif
3784 /* 3782 /*
3785 * You can pass NULL if you know the last refrence won't 3783 * You can pass NULL if you know the last refrence won't
3786 * be dropped. 3784 * be dropped.
3787 */ 3785 */
3788 KASSERT(tlbctx != NULL); 3786 KASSERT(tlbctx != NULL);
3789 pmap_ptpage_free(l1pte, tlbctx); 3787 pmap_ptpage_free(pmap, l1pte, tlbctx);
3790 3788
3791 /* 3789 /*
3792 * We've freed a level 2 table, so we must invalidate 3790 * We've freed a level 2 table, so we must invalidate
3793 * any now-stale TLB entries for the corresponding VPT 3791 * any now-stale TLB entries for the corresponding VPT
3794 * VA range. Easiest way to guarantee this is to hit 3792 * VA range. Easiest way to guarantee this is to hit
3795 * all of the user TLB entries. 3793 * all of the user TLB entries.
3796 */ 3794 */
3797 pmap_tlb_shootdown_all_user(pmap, PG_V, tlbctx); 3795 pmap_tlb_shootdown_all_user(pmap, PG_V, tlbctx);
3798 3796
3799 /* 3797 /*
3800 * We've freed a level 2 table, so delete the reference 3798 * We've freed a level 2 table, so delete the reference
3801 * on the level 1 table. 3799 * on the level 1 table.
3802 */ 3800 */

cvs diff -r1.89 -r1.90 src/sys/arch/alpha/include/pmap.h (expand / switch to unified diff)

--- src/sys/arch/alpha/include/pmap.h 2021/05/30 00:34:27 1.89
+++ src/sys/arch/alpha/include/pmap.h 2021/05/30 01:41:45 1.90
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.h,v 1.89 2021/05/30 00:34:27 thorpej Exp $ */ 1/* $NetBSD: pmap.h,v 1.90 2021/05/30 01:41:45 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 1999, 2000, 2001, 2007 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 1999, 2000, 2001, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center and by Chris G. Demetriou. 9 * NASA Ames Research Center and by Chris G. Demetriou.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -140,28 +140,28 @@ struct pmap_percpu { @@ -140,28 +140,28 @@ struct pmap_percpu {
140 unsigned long pmc_asngen; /* ASN generation number */ 140 unsigned long pmc_asngen; /* ASN generation number */
141 unsigned int pmc_needisync; /* CPU needes isync */ 141 unsigned int pmc_needisync; /* CPU needes isync */
142 unsigned int pmc_pad1; 142 unsigned int pmc_pad1;
143 pt_entry_t *pmc_lev1map; /* level 1 map */ 143 pt_entry_t *pmc_lev1map; /* level 1 map */
144 unsigned long pmc_padN[(COHERENCY_UNIT / 8) - 4]; 144 unsigned long pmc_padN[(COHERENCY_UNIT / 8) - 4];
145}; 145};
146 146
147struct pmap { /* pmaps are aligned to COHERENCY_UNIT boundaries */ 147struct pmap { /* pmaps are aligned to COHERENCY_UNIT boundaries */
148 /* pmaps are locked by hashed mutexes */ 148 /* pmaps are locked by hashed mutexes */
149 unsigned long pm_cpus; /* [ 0] CPUs using pmap */ 149 unsigned long pm_cpus; /* [ 0] CPUs using pmap */
150 struct pmap_statistics pm_stats; /* [ 8] statistics */ 150 struct pmap_statistics pm_stats; /* [ 8] statistics */
151 unsigned int pm_count; /* [24] reference count */ 151 unsigned int pm_count; /* [24] reference count */
152 unsigned int __pm_spare0; /* [28] spare field */ 152 unsigned int __pm_spare0; /* [28] spare field */
153 unsigned long __pm_spare1; /* [32] spare field */ 153 struct pmap_pagelist pm_ptpages; /* [32] list of PT pages */
154 unsigned long __pm_spare2; /* [40] spare field */ 154 unsigned long __pm_spare1; /* [40] spare field */
155 TAILQ_ENTRY(pmap) pm_list; /* [48] list of all pmaps */ 155 TAILQ_ENTRY(pmap) pm_list; /* [48] list of all pmaps */
156 /* -- COHERENCY_UNIT boundary -- */ 156 /* -- COHERENCY_UNIT boundary -- */
157 struct pmap_percpu pm_percpu[]; /* [64] per-CPU data */ 157 struct pmap_percpu pm_percpu[]; /* [64] per-CPU data */
158 /* variable length */ 158 /* variable length */
159}; 159};
160 160
161#define PMAP_SIZEOF(x) \ 161#define PMAP_SIZEOF(x) \
162 (ALIGN(offsetof(struct pmap, pm_percpu[(x)]))) 162 (ALIGN(offsetof(struct pmap, pm_percpu[(x)])))
163 163
164#define PMAP_ASN_KERNEL 0 /* kernel-reserved ASN */ 164#define PMAP_ASN_KERNEL 0 /* kernel-reserved ASN */
165#define PMAP_ASN_FIRST_USER 1 /* first user ASN */ 165#define PMAP_ASN_FIRST_USER 1 /* first user ASN */
166#define PMAP_ASNGEN_INVALID 0 /* reserved (invalid) ASN generation */ 166#define PMAP_ASNGEN_INVALID 0 /* reserved (invalid) ASN generation */
167#define PMAP_ASNGEN_INITIAL 1 /* first valid generatation */ 167#define PMAP_ASNGEN_INITIAL 1 /* first valid generatation */