Sun May 30 00:34:27 2021 UTC ()
Define a pmap_pagelist LIST_HEAD and use it where we used ad hoc LIST_HEADs
of vm_page structures.  Define and use a generic routine to free such a list
back to UVM.

In pmap_remove_internal(), KASSERT that no PT pages are queued up to be
freed when removing mappings from the kernel pmap.


(thorpej)
diff -r1.281 -r1.282 src/sys/arch/alpha/alpha/pmap.c
diff -r1.88 -r1.89 src/sys/arch/alpha/include/pmap.h

cvs diff -r1.281 -r1.282 src/sys/arch/alpha/alpha/pmap.c (expand / switch to unified diff)

--- src/sys/arch/alpha/alpha/pmap.c 2021/05/29 23:27:22 1.281
+++ src/sys/arch/alpha/alpha/pmap.c 2021/05/30 00:34:27 1.282
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.281 2021/05/29 23:27:22 thorpej Exp $ */ 1/* $NetBSD: pmap.c,v 1.282 2021/05/30 00:34:27 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008, 2020 4 * Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008, 2020
5 * The NetBSD Foundation, Inc. 5 * The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 * NASA Ames Research Center, by Andrew Doran and Mindaugas Rasiukevicius, 10 * NASA Ames Research Center, by Andrew Doran and Mindaugas Rasiukevicius,
11 * and by Chris G. Demetriou. 11 * and by Chris G. Demetriou.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions 14 * modification, are permitted provided that the following conditions
@@ -125,27 +125,27 @@ @@ -125,27 +125,27 @@
125 * this module may delay invalidate or reduced protection 125 * this module may delay invalidate or reduced protection
126 * operations until such time as they are actually 126 * operations until such time as they are actually
127 * necessary. This module is given full information as 127 * necessary. This module is given full information as
128 * to which processors are currently using which maps, 128 * to which processors are currently using which maps,
129 * and to when physical maps must be made correct. 129 * and to when physical maps must be made correct.
130 */ 130 */
131 131
132#include "opt_lockdebug.h" 132#include "opt_lockdebug.h"
133#include "opt_sysv.h" 133#include "opt_sysv.h"
134#include "opt_multiprocessor.h" 134#include "opt_multiprocessor.h"
135 135
136#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ 136#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
137 137
138__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.281 2021/05/29 23:27:22 thorpej Exp $"); 138__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.282 2021/05/30 00:34:27 thorpej Exp $");
139 139
140#include <sys/param.h> 140#include <sys/param.h>
141#include <sys/systm.h> 141#include <sys/systm.h>
142#include <sys/kernel.h> 142#include <sys/kernel.h>
143#include <sys/proc.h> 143#include <sys/proc.h>
144#include <sys/malloc.h> 144#include <sys/malloc.h>
145#include <sys/pool.h> 145#include <sys/pool.h>
146#include <sys/buf.h> 146#include <sys/buf.h>
147#include <sys/evcnt.h> 147#include <sys/evcnt.h>
148#include <sys/atomic.h> 148#include <sys/atomic.h>
149#include <sys/cpu.h> 149#include <sys/cpu.h>
150 150
151#include <uvm/uvm.h> 151#include <uvm/uvm.h>
@@ -422,26 +422,41 @@ pmap_activation_lock(pmap_t const pmap) @@ -422,26 +422,41 @@ pmap_activation_lock(pmap_t const pmap)
422#define PMAP_UNLOCK(pmap) mutex_exit(pmap_pmap_lock(pmap)) 422#define PMAP_UNLOCK(pmap) mutex_exit(pmap_pmap_lock(pmap))
423 423
424#define PMAP_ACT_LOCK(pmap) mutex_spin_enter(pmap_activation_lock(pmap)) 424#define PMAP_ACT_LOCK(pmap) mutex_spin_enter(pmap_activation_lock(pmap))
425#define PMAP_ACT_TRYLOCK(pmap) mutex_tryenter(pmap_activation_lock(pmap)) 425#define PMAP_ACT_TRYLOCK(pmap) mutex_tryenter(pmap_activation_lock(pmap))
426#define PMAP_ACT_UNLOCK(pmap) mutex_spin_exit(pmap_activation_lock(pmap)) 426#define PMAP_ACT_UNLOCK(pmap) mutex_spin_exit(pmap_activation_lock(pmap))
427 427
428#if defined(MULTIPROCESSOR) 428#if defined(MULTIPROCESSOR)
429#define pmap_all_cpus() cpus_running 429#define pmap_all_cpus() cpus_running
430#else 430#else
431#define pmap_all_cpus() ~0UL 431#define pmap_all_cpus() ~0UL
432#endif /* MULTIPROCESSOR */ 432#endif /* MULTIPROCESSOR */
433 433
434/* 434/*
 435 * Generic routine for freeing pages on a pmap_pagelist back to
 436 * the system.
 437 */
 438static void
 439pmap_pagelist_free(struct pmap_pagelist * const list)
 440{
 441 struct vm_page *pg;
 442
 443 while ((pg = LIST_FIRST(list)) != NULL) {
 444 LIST_REMOVE(pg, pageq.list);
 445 uvm_pagefree(pg);
 446 }
 447}
 448
 449/*
435 * TLB management. 450 * TLB management.
436 * 451 *
437 * TLB invalidations need to be performed on local and remote CPUs 452 * TLB invalidations need to be performed on local and remote CPUs
438 * whenever parts of the PTE that the hardware or PALcode understands 453 * whenever parts of the PTE that the hardware or PALcode understands
439 * changes. In order amortize the cost of these operations, we will 454 * changes. In order amortize the cost of these operations, we will
440 * queue up to 8 addresses to invalidate in a batch. Any more than 455 * queue up to 8 addresses to invalidate in a batch. Any more than
441 * that, and we will hit the entire TLB. 456 * that, and we will hit the entire TLB.
442 * 457 *
443 * Some things that add complexity: 458 * Some things that add complexity:
444 * 459 *
445 * ==> ASNs. A CPU may have valid TLB entries for other than the current 460 * ==> ASNs. A CPU may have valid TLB entries for other than the current
446 * address spaace. We can only invalidate TLB entries for the current 461 * address spaace. We can only invalidate TLB entries for the current
447 * address space, so when asked to invalidate a VA for the non-current 462 * address space, so when asked to invalidate a VA for the non-current
@@ -516,27 +531,27 @@ pmap_activation_lock(pmap_t const pmap) @@ -516,27 +531,27 @@ pmap_activation_lock(pmap_t const pmap)
516#define TLB_CTX_INC_COUNT(ctx) (ctx)->t_addrdata[0]++ 531#define TLB_CTX_INC_COUNT(ctx) (ctx)->t_addrdata[0]++
517#define TLB_CTX_SET_ALLVA(ctx) (ctx)->t_addrdata[0] |= TLB_CTX_ALLVA 532#define TLB_CTX_SET_ALLVA(ctx) (ctx)->t_addrdata[0] |= TLB_CTX_ALLVA
518 533
519#define TLB_CTX_FLAGS(ctx) ((ctx)->t_addrdata[1] & PAGE_MASK) 534#define TLB_CTX_FLAGS(ctx) ((ctx)->t_addrdata[1] & PAGE_MASK)
520#define TLB_CTX_SET_FLAG(ctx, f) (ctx)->t_addrdata[1] |= (f) 535#define TLB_CTX_SET_FLAG(ctx, f) (ctx)->t_addrdata[1] |= (f)
521 536
522#define TLB_CTX_VA(ctx, i) ((ctx)->t_addrdata[(i)] & ~PAGE_MASK) 537#define TLB_CTX_VA(ctx, i) ((ctx)->t_addrdata[(i)] & ~PAGE_MASK)
523#define TLB_CTX_SETVA(ctx, i, va) \ 538#define TLB_CTX_SETVA(ctx, i, va) \
524 (ctx)->t_addrdata[(i)] = (va) | ((ctx)->t_addrdata[(i)] & PAGE_MASK) 539 (ctx)->t_addrdata[(i)] = (va) | ((ctx)->t_addrdata[(i)] & PAGE_MASK)
525 540
526struct pmap_tlb_context { 541struct pmap_tlb_context {
527 uintptr_t t_addrdata[TLB_CTX_MAXVA]; 542 uintptr_t t_addrdata[TLB_CTX_MAXVA];
528 pmap_t t_pmap; 543 pmap_t t_pmap;
529 LIST_HEAD(, vm_page) t_freeptq; 544 struct pmap_pagelist t_freeptq;
530}; 545};
531 546
532static struct { 547static struct {
533 kmutex_t lock; 548 kmutex_t lock;
534 struct evcnt events; 549 struct evcnt events;
535} tlb_shootdown __cacheline_aligned; 550} tlb_shootdown __cacheline_aligned;
536#define tlb_lock tlb_shootdown.lock 551#define tlb_lock tlb_shootdown.lock
537#define tlb_evcnt tlb_shootdown.events 552#define tlb_evcnt tlb_shootdown.events
538#if defined(MULTIPROCESSOR) 553#if defined(MULTIPROCESSOR)
539static const struct pmap_tlb_context *tlb_context __cacheline_aligned; 554static const struct pmap_tlb_context *tlb_context __cacheline_aligned;
540static unsigned long tlb_pending __cacheline_aligned; 555static unsigned long tlb_pending __cacheline_aligned;
541#endif /* MULTIPROCESSOR */ 556#endif /* MULTIPROCESSOR */
542 557
@@ -1072,35 +1087,30 @@ pmap_tlb_physpage_free(paddr_t const ptp @@ -1072,35 +1087,30 @@ pmap_tlb_physpage_free(paddr_t const ptp
1072{ 1087{
1073 struct vm_page * const pg = PHYS_TO_VM_PAGE(ptpa); 1088 struct vm_page * const pg = PHYS_TO_VM_PAGE(ptpa);
1074 1089
1075 KASSERT(pg != NULL); 1090 KASSERT(pg != NULL);
1076 1091
1077#ifdef DEBUG 1092#ifdef DEBUG
1078 struct vm_page_md * const md = VM_PAGE_TO_MD(pg); 1093 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1079 KDASSERT(md->pvh_refcnt == 0); 1094 KDASSERT(md->pvh_refcnt == 0);
1080#endif 1095#endif
1081 1096
1082 LIST_INSERT_HEAD(&tlbctx->t_freeptq, pg, pageq.list); 1097 LIST_INSERT_HEAD(&tlbctx->t_freeptq, pg, pageq.list);
1083} 1098}
1084 1099
1085static void 1100static __inline void
1086pmap_tlb_ptpage_drain(struct pmap_tlb_context * const tlbctx) 1101pmap_tlb_ptpage_drain(struct pmap_tlb_context * const tlbctx)
1087{ 1102{
1088 struct vm_page *pg; 1103 pmap_pagelist_free(&tlbctx->t_freeptq);
1089 
1090 while ((pg = LIST_FIRST(&tlbctx->t_freeptq)) != NULL) { 
1091 LIST_REMOVE(pg, pageq.list); 
1092 uvm_pagefree(pg); 
1093 } 
1094} 1104}
1095 1105
1096/* 1106/*
1097 * Internal routines 1107 * Internal routines
1098 */ 1108 */
1099static void alpha_protection_init(void); 1109static void alpha_protection_init(void);
1100static pt_entry_t pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, bool, 1110static pt_entry_t pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, bool,
1101 pv_entry_t *, 1111 pv_entry_t *,
1102 struct pmap_tlb_context *); 1112 struct pmap_tlb_context *);
1103static void pmap_changebit(struct vm_page *, pt_entry_t, pt_entry_t, 1113static void pmap_changebit(struct vm_page *, pt_entry_t, pt_entry_t,
1104 struct pmap_tlb_context *); 1114 struct pmap_tlb_context *);
1105 1115
1106/* 1116/*
@@ -1710,27 +1720,28 @@ pmap_remove_internal(pmap_t pmap, vaddr_ @@ -1710,27 +1720,28 @@ pmap_remove_internal(pmap_t pmap, vaddr_
1710 l3pte = PMAP_KERNEL_PTE(sva); 1720 l3pte = PMAP_KERNEL_PTE(sva);
1711 if (pmap_pte_v(l3pte)) { 1721 if (pmap_pte_v(l3pte)) {
1712 pte_bits = pmap_remove_mapping(pmap, sva, 1722 pte_bits = pmap_remove_mapping(pmap, sva,
1713 l3pte, true, NULL, tlbctx); 1723 l3pte, true, NULL, tlbctx);
1714 pmap_tlb_shootdown(pmap, sva, pte_bits, 1724 pmap_tlb_shootdown(pmap, sva, pte_bits,
1715 tlbctx); 1725 tlbctx);
1716 } 1726 }
1717 sva += PAGE_SIZE; 1727 sva += PAGE_SIZE;
1718 } 1728 }
1719 1729
1720 PMAP_MAP_TO_HEAD_UNLOCK(); 1730 PMAP_MAP_TO_HEAD_UNLOCK();
1721 PMAP_UNLOCK(pmap); 1731 PMAP_UNLOCK(pmap);
1722 pmap_tlb_shootnow(tlbctx); 1732 pmap_tlb_shootnow(tlbctx);
1723 pmap_tlb_ptpage_drain(tlbctx); 1733 /* kernel PT pages are never freed. */
 1734 KASSERT(LIST_EMPTY(&tlbctx->t_freeptq));
1724 TLB_COUNT(reason_remove_kernel); 1735 TLB_COUNT(reason_remove_kernel);
1725 1736
1726 return; 1737 return;
1727 } 1738 }
1728 1739
1729 pt_entry_t * const lev1map = pmap_lev1map(pmap); 1740 pt_entry_t * const lev1map = pmap_lev1map(pmap);
1730 1741
1731 KASSERT(sva < VM_MAXUSER_ADDRESS); 1742 KASSERT(sva < VM_MAXUSER_ADDRESS);
1732 KASSERT(eva <= VM_MAXUSER_ADDRESS); 1743 KASSERT(eva <= VM_MAXUSER_ADDRESS);
1733 KASSERT(lev1map != kernel_lev1map); 1744 KASSERT(lev1map != kernel_lev1map);
1734 1745
1735 PMAP_MAP_TO_HEAD_LOCK(); 1746 PMAP_MAP_TO_HEAD_LOCK();
1736 PMAP_LOCK(pmap); 1747 PMAP_LOCK(pmap);

cvs diff -r1.88 -r1.89 src/sys/arch/alpha/include/pmap.h (expand / switch to unified diff)

--- src/sys/arch/alpha/include/pmap.h 2021/05/29 23:27:22 1.88
+++ src/sys/arch/alpha/include/pmap.h 2021/05/30 00:34:27 1.89
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.h,v 1.88 2021/05/29 23:27:22 thorpej Exp $ */ 1/* $NetBSD: pmap.h,v 1.89 2021/05/30 00:34:27 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 1999, 2000, 2001, 2007 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 1999, 2000, 2001, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center and by Chris G. Demetriou. 9 * NASA Ames Research Center and by Chris G. Demetriou.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -121,26 +121,29 @@ @@ -121,26 +121,29 @@
121 * If we ever support processor numbers higher than 63, we'll have to 121 * If we ever support processor numbers higher than 63, we'll have to
122 * rethink the CPU mask. 122 * rethink the CPU mask.
123 * 123 *
124 * Note pm_asn and pm_asngen are arrays allocated in pmap_create(). 124 * Note pm_asn and pm_asngen are arrays allocated in pmap_create().
125 * Their size is based on the PCS count from the HWRPB, and indexed 125 * Their size is based on the PCS count from the HWRPB, and indexed
126 * by processor ID (from `whami'). This is all padded to COHERENCY_UNIT 126 * by processor ID (from `whami'). This is all padded to COHERENCY_UNIT
127 * to avoid false sharing. 127 * to avoid false sharing.
128 * 128 *
129 * The kernel pmap is a special case; since the kernel uses only ASM 129 * The kernel pmap is a special case; since the kernel uses only ASM
130 * mappings and uses a reserved ASN to keep the TLB clean, we don't 130 * mappings and uses a reserved ASN to keep the TLB clean, we don't
131 * allocate any ASN info for the kernel pmap at all. 131 * allocate any ASN info for the kernel pmap at all.
132 * arrays which hold enough for ALPHA_MAXPROCS. 132 * arrays which hold enough for ALPHA_MAXPROCS.
133 */ 133 */
 134
 135LIST_HEAD(pmap_pagelist, vm_page);
 136
134struct pmap_percpu { 137struct pmap_percpu {
135 unsigned int pmc_asn; /* address space number */ 138 unsigned int pmc_asn; /* address space number */
136 unsigned int pmc_pad0; 139 unsigned int pmc_pad0;
137 unsigned long pmc_asngen; /* ASN generation number */ 140 unsigned long pmc_asngen; /* ASN generation number */
138 unsigned int pmc_needisync; /* CPU needes isync */ 141 unsigned int pmc_needisync; /* CPU needes isync */
139 unsigned int pmc_pad1; 142 unsigned int pmc_pad1;
140 pt_entry_t *pmc_lev1map; /* level 1 map */ 143 pt_entry_t *pmc_lev1map; /* level 1 map */
141 unsigned long pmc_padN[(COHERENCY_UNIT / 8) - 4]; 144 unsigned long pmc_padN[(COHERENCY_UNIT / 8) - 4];
142}; 145};
143 146
144struct pmap { /* pmaps are aligned to COHERENCY_UNIT boundaries */ 147struct pmap { /* pmaps are aligned to COHERENCY_UNIT boundaries */
145 /* pmaps are locked by hashed mutexes */ 148 /* pmaps are locked by hashed mutexes */
146 unsigned long pm_cpus; /* [ 0] CPUs using pmap */ 149 unsigned long pm_cpus; /* [ 0] CPUs using pmap */