Sat May 29 21:54:51 2021 UTC ()
Rename pmap_asn_info to pmap_percpu, and pmap::pm_asni to pmap::pm_percpu.
No functional change.


(thorpej)
diff -r1.278 -r1.279 src/sys/arch/alpha/alpha/pmap.c
diff -r1.85 -r1.86 src/sys/arch/alpha/include/pmap.h

cvs diff -r1.278 -r1.279 src/sys/arch/alpha/alpha/pmap.c (expand / switch to unified diff)

--- src/sys/arch/alpha/alpha/pmap.c 2021/05/24 03:43:24 1.278
+++ src/sys/arch/alpha/alpha/pmap.c 2021/05/29 21:54:50 1.279
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.278 2021/05/24 03:43:24 thorpej Exp $ */ 1/* $NetBSD: pmap.c,v 1.279 2021/05/29 21:54:50 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008, 2020 4 * Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008, 2020
5 * The NetBSD Foundation, Inc. 5 * The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 * NASA Ames Research Center, by Andrew Doran and Mindaugas Rasiukevicius, 10 * NASA Ames Research Center, by Andrew Doran and Mindaugas Rasiukevicius,
11 * and by Chris G. Demetriou. 11 * and by Chris G. Demetriou.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions 14 * modification, are permitted provided that the following conditions
@@ -125,27 +125,27 @@ @@ -125,27 +125,27 @@
125 * this module may delay invalidate or reduced protection 125 * this module may delay invalidate or reduced protection
126 * operations until such time as they are actually 126 * operations until such time as they are actually
127 * necessary. This module is given full information as 127 * necessary. This module is given full information as
128 * to which processors are currently using which maps, 128 * to which processors are currently using which maps,
129 * and to when physical maps must be made correct. 129 * and to when physical maps must be made correct.
130 */ 130 */
131 131
132#include "opt_lockdebug.h" 132#include "opt_lockdebug.h"
133#include "opt_sysv.h" 133#include "opt_sysv.h"
134#include "opt_multiprocessor.h" 134#include "opt_multiprocessor.h"
135 135
136#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ 136#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
137 137
138__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.278 2021/05/24 03:43:24 thorpej Exp $"); 138__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.279 2021/05/29 21:54:50 thorpej Exp $");
139 139
140#include <sys/param.h> 140#include <sys/param.h>
141#include <sys/systm.h> 141#include <sys/systm.h>
142#include <sys/kernel.h> 142#include <sys/kernel.h>
143#include <sys/proc.h> 143#include <sys/proc.h>
144#include <sys/malloc.h> 144#include <sys/malloc.h>
145#include <sys/pool.h> 145#include <sys/pool.h>
146#include <sys/buf.h> 146#include <sys/buf.h>
147#include <sys/evcnt.h> 147#include <sys/evcnt.h>
148#include <sys/atomic.h> 148#include <sys/atomic.h>
149#include <sys/cpu.h> 149#include <sys/cpu.h>
150 150
151#include <uvm/uvm.h> 151#include <uvm/uvm.h>
@@ -255,29 +255,29 @@ int pmap_pv_lowat __read_mostly = PMAP_ @@ -255,29 +255,29 @@ int pmap_pv_lowat __read_mostly = PMAP_
255 * List of all pmaps, used to update them when e.g. additional kernel 255 * List of all pmaps, used to update them when e.g. additional kernel
256 * page tables are allocated. This list is kept LRU-ordered by 256 * page tables are allocated. This list is kept LRU-ordered by
257 * pmap_activate(). 257 * pmap_activate().
258 */ 258 */
259static TAILQ_HEAD(, pmap) pmap_all_pmaps __cacheline_aligned; 259static TAILQ_HEAD(, pmap) pmap_all_pmaps __cacheline_aligned;
260 260
261/* 261/*
262 * The pools from which pmap structures and sub-structures are allocated. 262 * The pools from which pmap structures and sub-structures are allocated.
263 */ 263 */
264static struct pool_cache pmap_pmap_cache __read_mostly; 264static struct pool_cache pmap_pmap_cache __read_mostly;
265static struct pool_cache pmap_l1pt_cache __read_mostly; 265static struct pool_cache pmap_l1pt_cache __read_mostly;
266static struct pool_cache pmap_pv_cache __read_mostly; 266static struct pool_cache pmap_pv_cache __read_mostly;
267 267
268CTASSERT(offsetof(struct pmap, pm_asni[0]) == COHERENCY_UNIT); 268CTASSERT(offsetof(struct pmap, pm_percpu[0]) == COHERENCY_UNIT);
269CTASSERT(PMAP_SIZEOF(ALPHA_MAXPROCS) < ALPHA_PGBYTES); 269CTASSERT(PMAP_SIZEOF(ALPHA_MAXPROCS) < ALPHA_PGBYTES);
270CTASSERT(sizeof(struct pmap_asn_info) == COHERENCY_UNIT); 270CTASSERT(sizeof(struct pmap_percpu) == COHERENCY_UNIT);
271 271
272/* 272/*
273 * Address Space Numbers. 273 * Address Space Numbers.
274 * 274 *
275 * On many implementations of the Alpha architecture, the TLB entries and 275 * On many implementations of the Alpha architecture, the TLB entries and
276 * I-cache blocks are tagged with a unique number within an implementation- 276 * I-cache blocks are tagged with a unique number within an implementation-
277 * specified range. When a process context becomes active, the ASN is used 277 * specified range. When a process context becomes active, the ASN is used
278 * to match TLB entries; if a TLB entry for a particular VA does not match 278 * to match TLB entries; if a TLB entry for a particular VA does not match
279 * the current ASN, it is ignored (one could think of the processor as 279 * the current ASN, it is ignored (one could think of the processor as
280 * having a collection of <max ASN> separate TLBs). This allows operating 280 * having a collection of <max ASN> separate TLBs). This allows operating
281 * system software to skip the TLB flush that would otherwise be necessary 281 * system software to skip the TLB flush that would otherwise be necessary
282 * at context switch time. 282 * at context switch time.
283 * 283 *
@@ -870,27 +870,27 @@ pmap_tlb_invalidate(const struct pmap_tl @@ -870,27 +870,27 @@ pmap_tlb_invalidate(const struct pmap_tl
870 /* 870 /*
871 * For CPUs that don't implement ASNs, the SWPCTX call 871 * For CPUs that don't implement ASNs, the SWPCTX call
872 * does all of the TLB invalidation work for us. 872 * does all of the TLB invalidation work for us.
873 */ 873 */
874 if (__predict_false(pmap_max_asn == 0)) { 874 if (__predict_false(pmap_max_asn == 0)) {
875 return; 875 return;
876 } 876 }
877 877
878 /* 878 /*
879 * We cannot directly invalidate the TLB in this case, 879 * We cannot directly invalidate the TLB in this case,
880 * so force allocation of a new ASN when the pmap becomes 880 * so force allocation of a new ASN when the pmap becomes
881 * active again. 881 * active again.
882 */ 882 */
883 pmap->pm_asni[ci->ci_cpuid].pma_asngen = PMAP_ASNGEN_INVALID; 883 pmap->pm_percpu[ci->ci_cpuid].pmc_asngen = PMAP_ASNGEN_INVALID;
884 atomic_and_ulong(&pmap->pm_cpus, ~cpu_mask); 884 atomic_and_ulong(&pmap->pm_cpus, ~cpu_mask);
885 885
886 /* 886 /*
887 * This isn't strictly necessary; when we allocate a 887 * This isn't strictly necessary; when we allocate a
888 * new ASN, we're going to clear this bit and skip 888 * new ASN, we're going to clear this bit and skip
889 * syncing the I-stream. But we will keep this bit 889 * syncing the I-stream. But we will keep this bit
890 * of accounting for internal consistency. 890 * of accounting for internal consistency.
891 */ 891 */
892 if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_IMB) { 892 if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_IMB) {
893 atomic_or_ulong(&pmap->pm_needisync, cpu_mask); 893 atomic_or_ulong(&pmap->pm_needisync, cpu_mask);
894 } 894 }
895 return; 895 return;
896 } 896 }
@@ -1570,28 +1570,28 @@ pmap_create(void) @@ -1570,28 +1570,28 @@ pmap_create(void)
1570#endif 1570#endif
1571 1571
1572 pmap = pool_cache_get(&pmap_pmap_cache, PR_WAITOK); 1572 pmap = pool_cache_get(&pmap_pmap_cache, PR_WAITOK);
1573 memset(pmap, 0, sizeof(*pmap)); 1573 memset(pmap, 0, sizeof(*pmap));
1574 1574
1575 atomic_store_relaxed(&pmap->pm_count, 1); 1575 atomic_store_relaxed(&pmap->pm_count, 1);
1576 1576
1577 /* 1577 /*
1578 * There are only kernel mappings at this point; give the pmap 1578 * There are only kernel mappings at this point; give the pmap
1579 * the kernel ASN. This will be initialized to correct values 1579 * the kernel ASN. This will be initialized to correct values
1580 * when the pmap is activated. 1580 * when the pmap is activated.
1581 */ 1581 */
1582 for (i = 0; i < pmap_ncpuids; i++) { 1582 for (i = 0; i < pmap_ncpuids; i++) {
1583 pmap->pm_asni[i].pma_asn = PMAP_ASN_KERNEL; 1583 pmap->pm_percpu[i].pmc_asn = PMAP_ASN_KERNEL;
1584 pmap->pm_asni[i].pma_asngen = PMAP_ASNGEN_INVALID; 1584 pmap->pm_percpu[i].pmc_asngen = PMAP_ASNGEN_INVALID;
1585 } 1585 }
1586 1586
1587 try_again: 1587 try_again:
1588 rw_enter(&pmap_growkernel_lock, RW_READER); 1588 rw_enter(&pmap_growkernel_lock, RW_READER);
1589 1589
1590 pmap->pm_lev1map = pool_cache_get(&pmap_l1pt_cache, PR_NOWAIT); 1590 pmap->pm_lev1map = pool_cache_get(&pmap_l1pt_cache, PR_NOWAIT);
1591 if (__predict_false(pmap->pm_lev1map == NULL)) { 1591 if (__predict_false(pmap->pm_lev1map == NULL)) {
1592 rw_exit(&pmap_growkernel_lock); 1592 rw_exit(&pmap_growkernel_lock);
1593 (void) kpause("pmap_create", false, hz >> 2, NULL); 1593 (void) kpause("pmap_create", false, hz >> 2, NULL);
1594 goto try_again; 1594 goto try_again;
1595 } 1595 }
1596 1596
1597 mutex_enter(&pmap_all_pmaps_lock); 1597 mutex_enter(&pmap_all_pmaps_lock);
@@ -3804,44 +3804,44 @@ pmap_asn_alloc(pmap_t const pmap, struct @@ -3804,44 +3804,44 @@ pmap_asn_alloc(pmap_t const pmap, struct
3804#ifdef DEBUG 3804#ifdef DEBUG
3805 if (pmapdebug & (PDB_FOLLOW|PDB_ASN)) 3805 if (pmapdebug & (PDB_FOLLOW|PDB_ASN))
3806 printf("pmap_asn_alloc(%p)\n", pmap); 3806 printf("pmap_asn_alloc(%p)\n", pmap);
3807#endif 3807#endif
3808 3808
3809 KASSERT(pmap != pmap_kernel()); 3809 KASSERT(pmap != pmap_kernel());
3810 KASSERT(pmap->pm_lev1map != kernel_lev1map); 3810 KASSERT(pmap->pm_lev1map != kernel_lev1map);
3811 KASSERT(kpreempt_disabled()); 3811 KASSERT(kpreempt_disabled());
3812 3812
3813 /* No work to do if the the CPU does not implement ASNs. */ 3813 /* No work to do if the the CPU does not implement ASNs. */
3814 if (pmap_max_asn == 0) 3814 if (pmap_max_asn == 0)
3815 return 0; 3815 return 0;
3816 3816
3817 struct pmap_asn_info * const pma = &pmap->pm_asni[ci->ci_cpuid]; 3817 struct pmap_percpu * const pmc = &pmap->pm_percpu[ci->ci_cpuid];
3818 3818
3819 /* 3819 /*
3820 * Hopefully, we can continue using the one we have... 3820 * Hopefully, we can continue using the one we have...
3821 * 3821 *
3822 * N.B. the generation check will fail the first time 3822 * N.B. the generation check will fail the first time
3823 * any pmap is activated on a given CPU, because we start 3823 * any pmap is activated on a given CPU, because we start
3824 * the generation counter at 1, but initialize pmaps with 3824 * the generation counter at 1, but initialize pmaps with
3825 * 0; this forces the first ASN allocation to occur. 3825 * 0; this forces the first ASN allocation to occur.
3826 */ 3826 */
3827 if (pma->pma_asngen == ci->ci_asn_gen) { 3827 if (pmc->pmc_asngen == ci->ci_asn_gen) {
3828#ifdef DEBUG 3828#ifdef DEBUG
3829 if (pmapdebug & PDB_ASN) 3829 if (pmapdebug & PDB_ASN)
3830 printf("pmap_asn_alloc: same generation, keeping %u\n", 3830 printf("pmap_asn_alloc: same generation, keeping %u\n",
3831 pma->pma_asn); 3831 pmc->pmc_asn);
3832#endif 3832#endif
3833 TLB_COUNT(asn_reuse); 3833 TLB_COUNT(asn_reuse);
3834 return pma->pma_asn; 3834 return pmc->pmc_asn;
3835 } 3835 }
3836 3836
3837 /* 3837 /*
3838 * Need to assign a new ASN. Grab the next one, incrementing 3838 * Need to assign a new ASN. Grab the next one, incrementing
3839 * the generation number if we have to. 3839 * the generation number if we have to.
3840 */ 3840 */
3841 if (ci->ci_next_asn > pmap_max_asn) { 3841 if (ci->ci_next_asn > pmap_max_asn) {
3842 /* 3842 /*
3843 * Invalidate all non-PG_ASM TLB entries and the 3843 * Invalidate all non-PG_ASM TLB entries and the
3844 * I-cache, and bump the generation number. 3844 * I-cache, and bump the generation number.
3845 */ 3845 */
3846 ALPHA_TBIAP(); 3846 ALPHA_TBIAP();
3847 alpha_pal_imb(); 3847 alpha_pal_imb();
@@ -3864,30 +3864,30 @@ pmap_asn_alloc(pmap_t const pmap, struct @@ -3864,30 +3864,30 @@ pmap_asn_alloc(pmap_t const pmap, struct
3864 * So, we don't bother. 3864 * So, we don't bother.
3865 */ 3865 */
3866 KASSERT(ci->ci_asn_gen != PMAP_ASNGEN_INVALID); 3866 KASSERT(ci->ci_asn_gen != PMAP_ASNGEN_INVALID);
3867#ifdef DEBUG 3867#ifdef DEBUG
3868 if (pmapdebug & PDB_ASN) 3868 if (pmapdebug & PDB_ASN)
3869 printf("pmap_asn_alloc: generation bumped to %lu\n", 3869 printf("pmap_asn_alloc: generation bumped to %lu\n",
3870 ci->ci_asn_ge); 3870 ci->ci_asn_ge);
3871#endif 3871#endif
3872 } 3872 }
3873 3873
3874 /* 3874 /*
3875 * Assign the new ASN and validate the generation number. 3875 * Assign the new ASN and validate the generation number.
3876 */ 3876 */
3877 pma->pma_asn = ci->ci_next_asn++; 3877 pmc->pmc_asn = ci->ci_next_asn++;
3878 pma->pma_asngen = ci->ci_asn_gen; 3878 pmc->pmc_asngen = ci->ci_asn_gen;
3879 TLB_COUNT(asn_assign); 3879 TLB_COUNT(asn_assign);
3880 3880
3881 /* 3881 /*
3882 * We have a new ASN, so we can skip any pending I-stream sync 3882 * We have a new ASN, so we can skip any pending I-stream sync
3883 * on the way back out to user space. 3883 * on the way back out to user space.
3884 */ 3884 */
3885 atomic_and_ulong(&pmap->pm_needisync, ~(1UL << ci->ci_cpuid)); 3885 atomic_and_ulong(&pmap->pm_needisync, ~(1UL << ci->ci_cpuid));
3886 3886
3887#ifdef DEBUG 3887#ifdef DEBUG
3888 if (pmapdebug & PDB_ASN) 3888 if (pmapdebug & PDB_ASN)
3889 printf("pmap_asn_alloc: assigning %u to pmap %p\n", 3889 printf("pmap_asn_alloc: assigning %u to pmap %p\n",
3890 pma->pma_asn, pmap); 3890 pmc->pmc_asn, pmap);
3891#endif 3891#endif
3892 return pma->pma_asn; 3892 return pmc->pmc_asn;
3893} 3893}

cvs diff -r1.85 -r1.86 src/sys/arch/alpha/include/pmap.h (expand / switch to unified diff)

--- src/sys/arch/alpha/include/pmap.h 2021/05/24 03:43:24 1.85
+++ src/sys/arch/alpha/include/pmap.h 2021/05/29 21:54:51 1.86
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.h,v 1.85 2021/05/24 03:43:24 thorpej Exp $ */ 1/* $NetBSD: pmap.h,v 1.86 2021/05/29 21:54:51 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 1999, 2000, 2001, 2007 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 1999, 2000, 2001, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center and by Chris G. Demetriou. 9 * NASA Ames Research Center and by Chris G. Demetriou.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -121,49 +121,49 @@ @@ -121,49 +121,49 @@
121 * If we ever support processor numbers higher than 63, we'll have to 121 * If we ever support processor numbers higher than 63, we'll have to
122 * rethink the CPU mask. 122 * rethink the CPU mask.
123 * 123 *
124 * Note pm_asn and pm_asngen are arrays allocated in pmap_create(). 124 * Note pm_asn and pm_asngen are arrays allocated in pmap_create().
125 * Their size is based on the PCS count from the HWRPB, and indexed 125 * Their size is based on the PCS count from the HWRPB, and indexed
126 * by processor ID (from `whami'). This is all padded to COHERENCY_UNIT 126 * by processor ID (from `whami'). This is all padded to COHERENCY_UNIT
127 * to avoid false sharing. 127 * to avoid false sharing.
128 * 128 *
129 * The kernel pmap is a special case; since the kernel uses only ASM 129 * The kernel pmap is a special case; since the kernel uses only ASM
130 * mappings and uses a reserved ASN to keep the TLB clean, we don't 130 * mappings and uses a reserved ASN to keep the TLB clean, we don't
131 * allocate any ASN info for the kernel pmap at all. 131 * allocate any ASN info for the kernel pmap at all.
132 * arrays which hold enough for ALPHA_MAXPROCS. 132 * arrays which hold enough for ALPHA_MAXPROCS.
133 */ 133 */
134struct pmap_asn_info { 134struct pmap_percpu {
135 unsigned int pma_asn; /* address space number */ 135 unsigned int pmc_asn; /* address space number */
136 unsigned int pma_pad0; 136 unsigned int pmc_pad0;
137 unsigned long pma_asngen; /* ASN generation number */ 137 unsigned long pmc_asngen; /* ASN generation number */
138 unsigned long pma_padN[(COHERENCY_UNIT / 8) - 2]; 138 unsigned long pmc_padN[(COHERENCY_UNIT / 8) - 2];
139}; 139};
140 140
141struct pmap { /* pmaps are aligned to COHERENCY_UNIT boundaries */ 141struct pmap { /* pmaps are aligned to COHERENCY_UNIT boundaries */
142 /* pmaps are locked by hashed mutexes */ 142 /* pmaps are locked by hashed mutexes */
143 pt_entry_t *pm_lev1map; /* [ 0] level 1 map */ 143 pt_entry_t *pm_lev1map; /* [ 0] level 1 map */
144 unsigned long pm_cpus; /* [ 8] CPUs using pmap */ 144 unsigned long pm_cpus; /* [ 8] CPUs using pmap */
145 unsigned long pm_needisync; /* [16] CPUs needing isync */ 145 unsigned long pm_needisync; /* [16] CPUs needing isync */
146 struct pmap_statistics pm_stats; /* [32] statistics */ 146 struct pmap_statistics pm_stats; /* [32] statistics */
147 unsigned int pm_count; /* [40] reference count */ 147 unsigned int pm_count; /* [40] reference count */
148 unsigned int __pm_spare; /* [44] spare field */ 148 unsigned int __pm_spare; /* [44] spare field */
149 TAILQ_ENTRY(pmap) pm_list; /* [48] list of all pmaps */ 149 TAILQ_ENTRY(pmap) pm_list; /* [48] list of all pmaps */
150 /* -- COHERENCY_UNIT boundary -- */ 150 /* -- COHERENCY_UNIT boundary -- */
151 struct pmap_asn_info pm_asni[]; /* [64] ASN information */ 151 struct pmap_percpu pm_percpu[]; /* [64] per-CPU data */
152 /* variable length */ 152 /* variable length */
153}; 153};
154 154
155#define PMAP_SIZEOF(x) \ 155#define PMAP_SIZEOF(x) \
156 (ALIGN(offsetof(struct pmap, pm_asni[(x)]))) 156 (ALIGN(offsetof(struct pmap, pm_percpu[(x)])))
157 157
158#define PMAP_ASN_KERNEL 0 /* kernel-reserved ASN */ 158#define PMAP_ASN_KERNEL 0 /* kernel-reserved ASN */
159#define PMAP_ASN_FIRST_USER 1 /* first user ASN */ 159#define PMAP_ASN_FIRST_USER 1 /* first user ASN */
160#define PMAP_ASNGEN_INVALID 0 /* reserved (invalid) ASN generation */ 160#define PMAP_ASNGEN_INVALID 0 /* reserved (invalid) ASN generation */
161#define PMAP_ASNGEN_INITIAL 1 /* first valid generatation */ 161#define PMAP_ASNGEN_INITIAL 1 /* first valid generatation */
162 162
163/* 163/*
164 * For each struct vm_page, there is a list of all currently valid virtual 164 * For each struct vm_page, there is a list of all currently valid virtual
165 * mappings of that page. An entry is a pv_entry_t, the list is pv_table. 165 * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
166 */ 166 */
167typedef struct pv_entry { 167typedef struct pv_entry {
168 struct pv_entry *pv_next; /* next pv_entry on list */ 168 struct pv_entry *pv_next; /* next pv_entry on list */
169 struct pmap *pv_pmap; /* pmap where mapping lies */ 169 struct pmap *pv_pmap; /* pmap where mapping lies */