| @@ -1,14 +1,14 @@ | | | @@ -1,14 +1,14 @@ |
1 | /* $NetBSD: pmap.c,v 1.278 2021/05/24 03:43:24 thorpej Exp $ */ | | 1 | /* $NetBSD: pmap.c,v 1.279 2021/05/29 21:54:50 thorpej Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008, 2020 | | 4 | * Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008, 2020 |
5 | * The NetBSD Foundation, Inc. | | 5 | * The NetBSD Foundation, Inc. |
6 | * All rights reserved. | | 6 | * All rights reserved. |
7 | * | | 7 | * |
8 | * This code is derived from software contributed to The NetBSD Foundation | | 8 | * This code is derived from software contributed to The NetBSD Foundation |
9 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, | | 9 | * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, |
10 | * NASA Ames Research Center, by Andrew Doran and Mindaugas Rasiukevicius, | | 10 | * NASA Ames Research Center, by Andrew Doran and Mindaugas Rasiukevicius, |
11 | * and by Chris G. Demetriou. | | 11 | * and by Chris G. Demetriou. |
12 | * | | 12 | * |
13 | * Redistribution and use in source and binary forms, with or without | | 13 | * Redistribution and use in source and binary forms, with or without |
14 | * modification, are permitted provided that the following conditions | | 14 | * modification, are permitted provided that the following conditions |
| @@ -125,27 +125,27 @@ | | | @@ -125,27 +125,27 @@ |
125 | * this module may delay invalidate or reduced protection | | 125 | * this module may delay invalidate or reduced protection |
126 | * operations until such time as they are actually | | 126 | * operations until such time as they are actually |
127 | * necessary. This module is given full information as | | 127 | * necessary. This module is given full information as |
128 | * to which processors are currently using which maps, | | 128 | * to which processors are currently using which maps, |
129 | * and to when physical maps must be made correct. | | 129 | * and to when physical maps must be made correct. |
130 | */ | | 130 | */ |
131 | | | 131 | |
132 | #include "opt_lockdebug.h" | | 132 | #include "opt_lockdebug.h" |
133 | #include "opt_sysv.h" | | 133 | #include "opt_sysv.h" |
134 | #include "opt_multiprocessor.h" | | 134 | #include "opt_multiprocessor.h" |
135 | | | 135 | |
136 | #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ | | 136 | #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ |
137 | | | 137 | |
138 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.278 2021/05/24 03:43:24 thorpej Exp $"); | | 138 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.279 2021/05/29 21:54:50 thorpej Exp $"); |
139 | | | 139 | |
140 | #include <sys/param.h> | | 140 | #include <sys/param.h> |
141 | #include <sys/systm.h> | | 141 | #include <sys/systm.h> |
142 | #include <sys/kernel.h> | | 142 | #include <sys/kernel.h> |
143 | #include <sys/proc.h> | | 143 | #include <sys/proc.h> |
144 | #include <sys/malloc.h> | | 144 | #include <sys/malloc.h> |
145 | #include <sys/pool.h> | | 145 | #include <sys/pool.h> |
146 | #include <sys/buf.h> | | 146 | #include <sys/buf.h> |
147 | #include <sys/evcnt.h> | | 147 | #include <sys/evcnt.h> |
148 | #include <sys/atomic.h> | | 148 | #include <sys/atomic.h> |
149 | #include <sys/cpu.h> | | 149 | #include <sys/cpu.h> |
150 | | | 150 | |
151 | #include <uvm/uvm.h> | | 151 | #include <uvm/uvm.h> |
| @@ -255,29 +255,29 @@ int pmap_pv_lowat __read_mostly = PMAP_ | | | @@ -255,29 +255,29 @@ int pmap_pv_lowat __read_mostly = PMAP_ |
255 | * List of all pmaps, used to update them when e.g. additional kernel | | 255 | * List of all pmaps, used to update them when e.g. additional kernel |
256 | * page tables are allocated. This list is kept LRU-ordered by | | 256 | * page tables are allocated. This list is kept LRU-ordered by |
257 | * pmap_activate(). | | 257 | * pmap_activate(). |
258 | */ | | 258 | */ |
259 | static TAILQ_HEAD(, pmap) pmap_all_pmaps __cacheline_aligned; | | 259 | static TAILQ_HEAD(, pmap) pmap_all_pmaps __cacheline_aligned; |
260 | | | 260 | |
261 | /* | | 261 | /* |
262 | * The pools from which pmap structures and sub-structures are allocated. | | 262 | * The pools from which pmap structures and sub-structures are allocated. |
263 | */ | | 263 | */ |
264 | static struct pool_cache pmap_pmap_cache __read_mostly; | | 264 | static struct pool_cache pmap_pmap_cache __read_mostly; |
265 | static struct pool_cache pmap_l1pt_cache __read_mostly; | | 265 | static struct pool_cache pmap_l1pt_cache __read_mostly; |
266 | static struct pool_cache pmap_pv_cache __read_mostly; | | 266 | static struct pool_cache pmap_pv_cache __read_mostly; |
267 | | | 267 | |
268 | CTASSERT(offsetof(struct pmap, pm_asni[0]) == COHERENCY_UNIT); | | 268 | CTASSERT(offsetof(struct pmap, pm_percpu[0]) == COHERENCY_UNIT); |
269 | CTASSERT(PMAP_SIZEOF(ALPHA_MAXPROCS) < ALPHA_PGBYTES); | | 269 | CTASSERT(PMAP_SIZEOF(ALPHA_MAXPROCS) < ALPHA_PGBYTES); |
270 | CTASSERT(sizeof(struct pmap_asn_info) == COHERENCY_UNIT); | | 270 | CTASSERT(sizeof(struct pmap_percpu) == COHERENCY_UNIT); |
271 | | | 271 | |
272 | /* | | 272 | /* |
273 | * Address Space Numbers. | | 273 | * Address Space Numbers. |
274 | * | | 274 | * |
275 | * On many implementations of the Alpha architecture, the TLB entries and | | 275 | * On many implementations of the Alpha architecture, the TLB entries and |
276 | * I-cache blocks are tagged with a unique number within an implementation- | | 276 | * I-cache blocks are tagged with a unique number within an implementation- |
277 | * specified range. When a process context becomes active, the ASN is used | | 277 | * specified range. When a process context becomes active, the ASN is used |
278 | * to match TLB entries; if a TLB entry for a particular VA does not match | | 278 | * to match TLB entries; if a TLB entry for a particular VA does not match |
279 | * the current ASN, it is ignored (one could think of the processor as | | 279 | * the current ASN, it is ignored (one could think of the processor as |
280 | * having a collection of <max ASN> separate TLBs). This allows operating | | 280 | * having a collection of <max ASN> separate TLBs). This allows operating |
281 | * system software to skip the TLB flush that would otherwise be necessary | | 281 | * system software to skip the TLB flush that would otherwise be necessary |
282 | * at context switch time. | | 282 | * at context switch time. |
283 | * | | 283 | * |
| @@ -870,27 +870,27 @@ pmap_tlb_invalidate(const struct pmap_tl | | | @@ -870,27 +870,27 @@ pmap_tlb_invalidate(const struct pmap_tl |
870 | /* | | 870 | /* |
871 | * For CPUs that don't implement ASNs, the SWPCTX call | | 871 | * For CPUs that don't implement ASNs, the SWPCTX call |
872 | * does all of the TLB invalidation work for us. | | 872 | * does all of the TLB invalidation work for us. |
873 | */ | | 873 | */ |
874 | if (__predict_false(pmap_max_asn == 0)) { | | 874 | if (__predict_false(pmap_max_asn == 0)) { |
875 | return; | | 875 | return; |
876 | } | | 876 | } |
877 | | | 877 | |
878 | /* | | 878 | /* |
879 | * We cannot directly invalidate the TLB in this case, | | 879 | * We cannot directly invalidate the TLB in this case, |
880 | * so force allocation of a new ASN when the pmap becomes | | 880 | * so force allocation of a new ASN when the pmap becomes |
881 | * active again. | | 881 | * active again. |
882 | */ | | 882 | */ |
883 | pmap->pm_asni[ci->ci_cpuid].pma_asngen = PMAP_ASNGEN_INVALID; | | 883 | pmap->pm_percpu[ci->ci_cpuid].pmc_asngen = PMAP_ASNGEN_INVALID; |
884 | atomic_and_ulong(&pmap->pm_cpus, ~cpu_mask); | | 884 | atomic_and_ulong(&pmap->pm_cpus, ~cpu_mask); |
885 | | | 885 | |
886 | /* | | 886 | /* |
887 | * This isn't strictly necessary; when we allocate a | | 887 | * This isn't strictly necessary; when we allocate a |
888 | * new ASN, we're going to clear this bit and skip | | 888 | * new ASN, we're going to clear this bit and skip |
889 | * syncing the I-stream. But we will keep this bit | | 889 | * syncing the I-stream. But we will keep this bit |
890 | * of accounting for internal consistency. | | 890 | * of accounting for internal consistency. |
891 | */ | | 891 | */ |
892 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_IMB) { | | 892 | if (TLB_CTX_FLAGS(tlbctx) & TLB_CTX_F_IMB) { |
893 | atomic_or_ulong(&pmap->pm_needisync, cpu_mask); | | 893 | atomic_or_ulong(&pmap->pm_needisync, cpu_mask); |
894 | } | | 894 | } |
895 | return; | | 895 | return; |
896 | } | | 896 | } |
| @@ -1570,28 +1570,28 @@ pmap_create(void) | | | @@ -1570,28 +1570,28 @@ pmap_create(void) |
1570 | #endif | | 1570 | #endif |
1571 | | | 1571 | |
1572 | pmap = pool_cache_get(&pmap_pmap_cache, PR_WAITOK); | | 1572 | pmap = pool_cache_get(&pmap_pmap_cache, PR_WAITOK); |
1573 | memset(pmap, 0, sizeof(*pmap)); | | 1573 | memset(pmap, 0, sizeof(*pmap)); |
1574 | | | 1574 | |
1575 | atomic_store_relaxed(&pmap->pm_count, 1); | | 1575 | atomic_store_relaxed(&pmap->pm_count, 1); |
1576 | | | 1576 | |
1577 | /* | | 1577 | /* |
1578 | * There are only kernel mappings at this point; give the pmap | | 1578 | * There are only kernel mappings at this point; give the pmap |
1579 | * the kernel ASN. This will be initialized to correct values | | 1579 | * the kernel ASN. This will be initialized to correct values |
1580 | * when the pmap is activated. | | 1580 | * when the pmap is activated. |
1581 | */ | | 1581 | */ |
1582 | for (i = 0; i < pmap_ncpuids; i++) { | | 1582 | for (i = 0; i < pmap_ncpuids; i++) { |
1583 | pmap->pm_asni[i].pma_asn = PMAP_ASN_KERNEL; | | 1583 | pmap->pm_percpu[i].pmc_asn = PMAP_ASN_KERNEL; |
1584 | pmap->pm_asni[i].pma_asngen = PMAP_ASNGEN_INVALID; | | 1584 | pmap->pm_percpu[i].pmc_asngen = PMAP_ASNGEN_INVALID; |
1585 | } | | 1585 | } |
1586 | | | 1586 | |
1587 | try_again: | | 1587 | try_again: |
1588 | rw_enter(&pmap_growkernel_lock, RW_READER); | | 1588 | rw_enter(&pmap_growkernel_lock, RW_READER); |
1589 | | | 1589 | |
1590 | pmap->pm_lev1map = pool_cache_get(&pmap_l1pt_cache, PR_NOWAIT); | | 1590 | pmap->pm_lev1map = pool_cache_get(&pmap_l1pt_cache, PR_NOWAIT); |
1591 | if (__predict_false(pmap->pm_lev1map == NULL)) { | | 1591 | if (__predict_false(pmap->pm_lev1map == NULL)) { |
1592 | rw_exit(&pmap_growkernel_lock); | | 1592 | rw_exit(&pmap_growkernel_lock); |
1593 | (void) kpause("pmap_create", false, hz >> 2, NULL); | | 1593 | (void) kpause("pmap_create", false, hz >> 2, NULL); |
1594 | goto try_again; | | 1594 | goto try_again; |
1595 | } | | 1595 | } |
1596 | | | 1596 | |
1597 | mutex_enter(&pmap_all_pmaps_lock); | | 1597 | mutex_enter(&pmap_all_pmaps_lock); |
| @@ -3804,44 +3804,44 @@ pmap_asn_alloc(pmap_t const pmap, struct | | | @@ -3804,44 +3804,44 @@ pmap_asn_alloc(pmap_t const pmap, struct |
3804 | #ifdef DEBUG | | 3804 | #ifdef DEBUG |
3805 | if (pmapdebug & (PDB_FOLLOW|PDB_ASN)) | | 3805 | if (pmapdebug & (PDB_FOLLOW|PDB_ASN)) |
3806 | printf("pmap_asn_alloc(%p)\n", pmap); | | 3806 | printf("pmap_asn_alloc(%p)\n", pmap); |
3807 | #endif | | 3807 | #endif |
3808 | | | 3808 | |
3809 | KASSERT(pmap != pmap_kernel()); | | 3809 | KASSERT(pmap != pmap_kernel()); |
3810 | KASSERT(pmap->pm_lev1map != kernel_lev1map); | | 3810 | KASSERT(pmap->pm_lev1map != kernel_lev1map); |
3811 | KASSERT(kpreempt_disabled()); | | 3811 | KASSERT(kpreempt_disabled()); |
3812 | | | 3812 | |
3813 | /* No work to do if the the CPU does not implement ASNs. */ | | 3813 | /* No work to do if the the CPU does not implement ASNs. */ |
3814 | if (pmap_max_asn == 0) | | 3814 | if (pmap_max_asn == 0) |
3815 | return 0; | | 3815 | return 0; |
3816 | | | 3816 | |
3817 | struct pmap_asn_info * const pma = &pmap->pm_asni[ci->ci_cpuid]; | | 3817 | struct pmap_percpu * const pmc = &pmap->pm_percpu[ci->ci_cpuid]; |
3818 | | | 3818 | |
3819 | /* | | 3819 | /* |
3820 | * Hopefully, we can continue using the one we have... | | 3820 | * Hopefully, we can continue using the one we have... |
3821 | * | | 3821 | * |
3822 | * N.B. the generation check will fail the first time | | 3822 | * N.B. the generation check will fail the first time |
3823 | * any pmap is activated on a given CPU, because we start | | 3823 | * any pmap is activated on a given CPU, because we start |
3824 | * the generation counter at 1, but initialize pmaps with | | 3824 | * the generation counter at 1, but initialize pmaps with |
3825 | * 0; this forces the first ASN allocation to occur. | | 3825 | * 0; this forces the first ASN allocation to occur. |
3826 | */ | | 3826 | */ |
3827 | if (pma->pma_asngen == ci->ci_asn_gen) { | | 3827 | if (pmc->pmc_asngen == ci->ci_asn_gen) { |
3828 | #ifdef DEBUG | | 3828 | #ifdef DEBUG |
3829 | if (pmapdebug & PDB_ASN) | | 3829 | if (pmapdebug & PDB_ASN) |
3830 | printf("pmap_asn_alloc: same generation, keeping %u\n", | | 3830 | printf("pmap_asn_alloc: same generation, keeping %u\n", |
3831 | pma->pma_asn); | | 3831 | pmc->pmc_asn); |
3832 | #endif | | 3832 | #endif |
3833 | TLB_COUNT(asn_reuse); | | 3833 | TLB_COUNT(asn_reuse); |
3834 | return pma->pma_asn; | | 3834 | return pmc->pmc_asn; |
3835 | } | | 3835 | } |
3836 | | | 3836 | |
3837 | /* | | 3837 | /* |
3838 | * Need to assign a new ASN. Grab the next one, incrementing | | 3838 | * Need to assign a new ASN. Grab the next one, incrementing |
3839 | * the generation number if we have to. | | 3839 | * the generation number if we have to. |
3840 | */ | | 3840 | */ |
3841 | if (ci->ci_next_asn > pmap_max_asn) { | | 3841 | if (ci->ci_next_asn > pmap_max_asn) { |
3842 | /* | | 3842 | /* |
3843 | * Invalidate all non-PG_ASM TLB entries and the | | 3843 | * Invalidate all non-PG_ASM TLB entries and the |
3844 | * I-cache, and bump the generation number. | | 3844 | * I-cache, and bump the generation number. |
3845 | */ | | 3845 | */ |
3846 | ALPHA_TBIAP(); | | 3846 | ALPHA_TBIAP(); |
3847 | alpha_pal_imb(); | | 3847 | alpha_pal_imb(); |
| @@ -3864,30 +3864,30 @@ pmap_asn_alloc(pmap_t const pmap, struct | | | @@ -3864,30 +3864,30 @@ pmap_asn_alloc(pmap_t const pmap, struct |
3864 | * So, we don't bother. | | 3864 | * So, we don't bother. |
3865 | */ | | 3865 | */ |
3866 | KASSERT(ci->ci_asn_gen != PMAP_ASNGEN_INVALID); | | 3866 | KASSERT(ci->ci_asn_gen != PMAP_ASNGEN_INVALID); |
3867 | #ifdef DEBUG | | 3867 | #ifdef DEBUG |
3868 | if (pmapdebug & PDB_ASN) | | 3868 | if (pmapdebug & PDB_ASN) |
3869 | printf("pmap_asn_alloc: generation bumped to %lu\n", | | 3869 | printf("pmap_asn_alloc: generation bumped to %lu\n", |
3870 | ci->ci_asn_ge); | | 3870 | ci->ci_asn_ge); |
3871 | #endif | | 3871 | #endif |
3872 | } | | 3872 | } |
3873 | | | 3873 | |
3874 | /* | | 3874 | /* |
3875 | * Assign the new ASN and validate the generation number. | | 3875 | * Assign the new ASN and validate the generation number. |
3876 | */ | | 3876 | */ |
3877 | pma->pma_asn = ci->ci_next_asn++; | | 3877 | pmc->pmc_asn = ci->ci_next_asn++; |
3878 | pma->pma_asngen = ci->ci_asn_gen; | | 3878 | pmc->pmc_asngen = ci->ci_asn_gen; |
3879 | TLB_COUNT(asn_assign); | | 3879 | TLB_COUNT(asn_assign); |
3880 | | | 3880 | |
3881 | /* | | 3881 | /* |
3882 | * We have a new ASN, so we can skip any pending I-stream sync | | 3882 | * We have a new ASN, so we can skip any pending I-stream sync |
3883 | * on the way back out to user space. | | 3883 | * on the way back out to user space. |
3884 | */ | | 3884 | */ |
3885 | atomic_and_ulong(&pmap->pm_needisync, ~(1UL << ci->ci_cpuid)); | | 3885 | atomic_and_ulong(&pmap->pm_needisync, ~(1UL << ci->ci_cpuid)); |
3886 | | | 3886 | |
3887 | #ifdef DEBUG | | 3887 | #ifdef DEBUG |
3888 | if (pmapdebug & PDB_ASN) | | 3888 | if (pmapdebug & PDB_ASN) |
3889 | printf("pmap_asn_alloc: assigning %u to pmap %p\n", | | 3889 | printf("pmap_asn_alloc: assigning %u to pmap %p\n", |
3890 | pma->pma_asn, pmap); | | 3890 | pmc->pmc_asn, pmap); |
3891 | #endif | | 3891 | #endif |
3892 | return pma->pma_asn; | | 3892 | return pmc->pmc_asn; |
3893 | } | | 3893 | } |