Sun May 30 13:34:21 2021 UTC ()
Keep track of a pmap's PV entries with a list hanging off the pmap.


(thorpej)
diff -r1.287 -r1.288 src/sys/arch/alpha/alpha/pmap.c
diff -r1.93 -r1.94 src/sys/arch/alpha/include/pmap.h

cvs diff -r1.287 -r1.288 src/sys/arch/alpha/alpha/pmap.c (expand / switch to unified diff)

--- src/sys/arch/alpha/alpha/pmap.c 2021/05/30 06:41:19 1.287
+++ src/sys/arch/alpha/alpha/pmap.c 2021/05/30 13:34:21 1.288
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.287 2021/05/30 06:41:19 thorpej Exp $ */ 1/* $NetBSD: pmap.c,v 1.288 2021/05/30 13:34:21 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008, 2020 4 * Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008, 2020
5 * The NetBSD Foundation, Inc. 5 * The NetBSD Foundation, Inc.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * This code is derived from software contributed to The NetBSD Foundation 8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
10 * NASA Ames Research Center, by Andrew Doran and Mindaugas Rasiukevicius, 10 * NASA Ames Research Center, by Andrew Doran and Mindaugas Rasiukevicius,
11 * and by Chris G. Demetriou. 11 * and by Chris G. Demetriou.
12 * 12 *
13 * Redistribution and use in source and binary forms, with or without 13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions 14 * modification, are permitted provided that the following conditions
@@ -125,27 +125,27 @@ @@ -125,27 +125,27 @@
125 * this module may delay invalidate or reduced protection 125 * this module may delay invalidate or reduced protection
126 * operations until such time as they are actually 126 * operations until such time as they are actually
127 * necessary. This module is given full information as 127 * necessary. This module is given full information as
128 * to which processors are currently using which maps, 128 * to which processors are currently using which maps,
129 * and to when physical maps must be made correct. 129 * and to when physical maps must be made correct.
130 */ 130 */
131 131
132#include "opt_lockdebug.h" 132#include "opt_lockdebug.h"
133#include "opt_sysv.h" 133#include "opt_sysv.h"
134#include "opt_multiprocessor.h" 134#include "opt_multiprocessor.h"
135 135
136#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ 136#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
137 137
138__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.287 2021/05/30 06:41:19 thorpej Exp $"); 138__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.288 2021/05/30 13:34:21 thorpej Exp $");
139 139
140#include <sys/param.h> 140#include <sys/param.h>
141#include <sys/systm.h> 141#include <sys/systm.h>
142#include <sys/kernel.h> 142#include <sys/kernel.h>
143#include <sys/proc.h> 143#include <sys/proc.h>
144#include <sys/malloc.h> 144#include <sys/malloc.h>
145#include <sys/pool.h> 145#include <sys/pool.h>
146#include <sys/buf.h> 146#include <sys/buf.h>
147#include <sys/evcnt.h> 147#include <sys/evcnt.h>
148#include <sys/atomic.h> 148#include <sys/atomic.h>
149#include <sys/cpu.h> 149#include <sys/cpu.h>
150 150
151#include <uvm/uvm.h> 151#include <uvm/uvm.h>
@@ -1374,26 +1374,27 @@ pmap_bootstrap(paddr_t ptaddr, u_int max @@ -1374,26 +1374,27 @@ pmap_bootstrap(paddr_t ptaddr, u_int max
1374 * could be issued, but must NOT block IPIs. 1374 * could be issued, but must NOT block IPIs.
1375 */ 1375 */
1376 mutex_init(&tlb_lock, MUTEX_SPIN, IPL_VM); 1376 mutex_init(&tlb_lock, MUTEX_SPIN, IPL_VM);
1377 1377
1378 /* 1378 /*
1379 * Initialize kernel pmap. Note that all kernel mappings 1379 * Initialize kernel pmap. Note that all kernel mappings
1380 * have PG_ASM set, so the ASN doesn't really matter for 1380 * have PG_ASM set, so the ASN doesn't really matter for
1381 * the kernel pmap. Also, since the kernel pmap always 1381 * the kernel pmap. Also, since the kernel pmap always
1382 * references kernel_lev1map, it always has an invalid ASN 1382 * references kernel_lev1map, it always has an invalid ASN
1383 * generation. 1383 * generation.
1384 */ 1384 */
1385 memset(pmap_kernel(), 0, sizeof(struct pmap)); 1385 memset(pmap_kernel(), 0, sizeof(struct pmap));
1386 LIST_INIT(&pmap_kernel()->pm_ptpages); 1386 LIST_INIT(&pmap_kernel()->pm_ptpages);
 1387 LIST_INIT(&pmap_kernel()->pm_pvents);
1387 atomic_store_relaxed(&pmap_kernel()->pm_count, 1); 1388 atomic_store_relaxed(&pmap_kernel()->pm_count, 1);
1388 /* Kernel pmap does not have per-CPU info. */ 1389 /* Kernel pmap does not have per-CPU info. */
1389 TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap_kernel(), pm_list); 1390 TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap_kernel(), pm_list);
1390 1391
1391 /* 1392 /*
1392 * Set up lwp0's PCB such that the ptbr points to the right place 1393 * Set up lwp0's PCB such that the ptbr points to the right place
1393 * and has the kernel pmap's (really unused) ASN. 1394 * and has the kernel pmap's (really unused) ASN.
1394 */ 1395 */
1395 pcb = lwp_getpcb(&lwp0); 1396 pcb = lwp_getpcb(&lwp0);
1396 pcb->pcb_hw.apcb_ptbr = 1397 pcb->pcb_hw.apcb_ptbr =
1397 ALPHA_K0SEG_TO_PHYS((vaddr_t)kernel_lev1map) >> PGSHIFT; 1398 ALPHA_K0SEG_TO_PHYS((vaddr_t)kernel_lev1map) >> PGSHIFT;
1398 pcb->pcb_hw.apcb_asn = PMAP_ASN_KERNEL; 1399 pcb->pcb_hw.apcb_asn = PMAP_ASN_KERNEL;
1399 1400
@@ -1558,26 +1559,27 @@ pmap_create(void) @@ -1558,26 +1559,27 @@ pmap_create(void)
1558{ 1559{
1559 pmap_t pmap; 1560 pmap_t pmap;
1560 pt_entry_t *lev1map; 1561 pt_entry_t *lev1map;
1561 int i; 1562 int i;
1562 1563
1563#ifdef DEBUG 1564#ifdef DEBUG
1564 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE)) 1565 if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
1565 printf("pmap_create()\n"); 1566 printf("pmap_create()\n");
1566#endif 1567#endif
1567 1568
1568 pmap = pool_cache_get(&pmap_pmap_cache, PR_WAITOK); 1569 pmap = pool_cache_get(&pmap_pmap_cache, PR_WAITOK);
1569 memset(pmap, 0, sizeof(*pmap)); 1570 memset(pmap, 0, sizeof(*pmap));
1570 LIST_INIT(&pmap->pm_ptpages); 1571 LIST_INIT(&pmap->pm_ptpages);
 1572 LIST_INIT(&pmap->pm_pvents);
1571 1573
1572 atomic_store_relaxed(&pmap->pm_count, 1); 1574 atomic_store_relaxed(&pmap->pm_count, 1);
1573 1575
1574 try_again: 1576 try_again:
1575 rw_enter(&pmap_growkernel_lock, RW_READER); 1577 rw_enter(&pmap_growkernel_lock, RW_READER);
1576 1578
1577 lev1map = pool_cache_get(&pmap_l1pt_cache, PR_NOWAIT); 1579 lev1map = pool_cache_get(&pmap_l1pt_cache, PR_NOWAIT);
1578 if (__predict_false(lev1map == NULL)) { 1580 if (__predict_false(lev1map == NULL)) {
1579 rw_exit(&pmap_growkernel_lock); 1581 rw_exit(&pmap_growkernel_lock);
1580 (void) kpause("pmap_create", false, hz >> 2, NULL); 1582 (void) kpause("pmap_create", false, hz >> 2, NULL);
1581 goto try_again; 1583 goto try_again;
1582 } 1584 }
1583 1585
@@ -3268,26 +3270,27 @@ pmap_pv_enter(pmap_t pmap, struct vm_pag @@ -3268,26 +3270,27 @@ pmap_pv_enter(pmap_t pmap, struct vm_pag
3268 printf("pmap = %p, va = 0x%lx\n", pmap, va); 3270 printf("pmap = %p, va = 0x%lx\n", pmap, va);
3269 panic("pmap_pv_enter: already in pv table"); 3271 panic("pmap_pv_enter: already in pv table");
3270 } 3272 }
3271 } 3273 }
3272 } 3274 }
3273#endif 3275#endif
3274 3276
3275 /* 3277 /*
3276 * ...and put it in the list. 3278 * ...and put it in the list.
3277 */ 3279 */
3278 uintptr_t const attrs = md->pvh_listx & PGA_ATTRS; 3280 uintptr_t const attrs = md->pvh_listx & PGA_ATTRS;
3279 newpv->pv_next = (struct pv_entry *)(md->pvh_listx & ~PGA_ATTRS); 3281 newpv->pv_next = (struct pv_entry *)(md->pvh_listx & ~PGA_ATTRS);
3280 md->pvh_listx = (uintptr_t)newpv | attrs; 3282 md->pvh_listx = (uintptr_t)newpv | attrs;
 3283 LIST_INSERT_HEAD(&pmap->pm_pvents, newpv, pv_link);
3281 3284
3282 if (dolock) { 3285 if (dolock) {
3283 mutex_exit(lock); 3286 mutex_exit(lock);
3284 } 3287 }
3285 3288
3286 return 0; 3289 return 0;
3287} 3290}
3288 3291
3289/* 3292/*
3290 * pmap_pv_remove: 3293 * pmap_pv_remove:
3291 * 3294 *
3292 * Remove a physical->virtual entry from the pv_table. 3295 * Remove a physical->virtual entry from the pv_table.
3293 */ 3296 */
@@ -3306,28 +3309,35 @@ pmap_pv_remove(pmap_t pmap, struct vm_pa @@ -3306,28 +3309,35 @@ pmap_pv_remove(pmap_t pmap, struct vm_pa
3306 lock = NULL; /* XXX stupid gcc */ 3309 lock = NULL; /* XXX stupid gcc */
3307 } 3310 }
3308 3311
3309 /* 3312 /*
3310 * Find the entry to remove. 3313 * Find the entry to remove.
3311 */ 3314 */
3312 for (pvp = (struct pv_entry **)&md->pvh_listx, pv = VM_MDPAGE_PVS(pg); 3315 for (pvp = (struct pv_entry **)&md->pvh_listx, pv = VM_MDPAGE_PVS(pg);
3313 pv != NULL; pvp = &pv->pv_next, pv = *pvp) 3316 pv != NULL; pvp = &pv->pv_next, pv = *pvp)
3314 if (pmap == pv->pv_pmap && va == pv->pv_va) 3317 if (pmap == pv->pv_pmap && va == pv->pv_va)
3315 break; 3318 break;
3316 3319
3317 KASSERT(pv != NULL); 3320 KASSERT(pv != NULL);
3318 3321
 3322 /*
 3323 * The page attributes are in the lower 2 bits of the first
 3324 * PV entry pointer. Rather than comparing the pointer address
 3325 * and branching, we just always preserve what might be there
 3326 * (either attribute bits or zero bits).
 3327 */
3319 *pvp = (pv_entry_t)((uintptr_t)pv->pv_next | 3328 *pvp = (pv_entry_t)((uintptr_t)pv->pv_next |
3320 (((uintptr_t)*pvp) & PGA_ATTRS)); 3329 (((uintptr_t)*pvp) & PGA_ATTRS));
 3330 LIST_REMOVE(pv, pv_link);
3321 3331
3322 if (dolock) { 3332 if (dolock) {
3323 mutex_exit(lock); 3333 mutex_exit(lock);
3324 } 3334 }
3325 3335
3326 if (opvp != NULL) 3336 if (opvp != NULL)
3327 *opvp = pv; 3337 *opvp = pv;
3328 else 3338 else
3329 pmap_pv_free(pv); 3339 pmap_pv_free(pv);
3330} 3340}
3331 3341
3332/* 3342/*
3333 * pmap_pv_page_alloc: 3343 * pmap_pv_page_alloc:

cvs diff -r1.93 -r1.94 src/sys/arch/alpha/include/pmap.h (expand / switch to unified diff)

--- src/sys/arch/alpha/include/pmap.h 2021/05/30 06:41:19 1.93
+++ src/sys/arch/alpha/include/pmap.h 2021/05/30 13:34:21 1.94
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.h,v 1.93 2021/05/30 06:41:19 thorpej Exp $ */ 1/* $NetBSD: pmap.h,v 1.94 2021/05/30 13:34:21 thorpej Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 1998, 1999, 2000, 2001, 2007 The NetBSD Foundation, Inc. 4 * Copyright (c) 1998, 1999, 2000, 2001, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center and by Chris G. Demetriou. 9 * NASA Ames Research Center and by Chris G. Demetriou.
10 * 10 *
11 * Redistribution and use in source and binary forms, with or without 11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions 12 * modification, are permitted provided that the following conditions
13 * are met: 13 * are met:
14 * 1. Redistributions of source code must retain the above copyright 14 * 1. Redistributions of source code must retain the above copyright
@@ -141,47 +141,48 @@ struct pmap_percpu { @@ -141,47 +141,48 @@ struct pmap_percpu {
141 unsigned int pmc_needisync; /* CPU needes isync */ 141 unsigned int pmc_needisync; /* CPU needes isync */
142 unsigned int pmc_pad1; 142 unsigned int pmc_pad1;
143 pt_entry_t *pmc_lev1map; /* level 1 map */ 143 pt_entry_t *pmc_lev1map; /* level 1 map */
144 unsigned long pmc_padN[(COHERENCY_UNIT / 8) - 4]; 144 unsigned long pmc_padN[(COHERENCY_UNIT / 8) - 4];
145}; 145};
146 146
147struct pmap { /* pmaps are aligned to COHERENCY_UNIT boundaries */ 147struct pmap { /* pmaps are aligned to COHERENCY_UNIT boundaries */
148 /* pmaps are locked by hashed mutexes */ 148 /* pmaps are locked by hashed mutexes */
149 unsigned long pm_cpus; /* [ 0] CPUs using pmap */ 149 unsigned long pm_cpus; /* [ 0] CPUs using pmap */
150 struct pmap_statistics pm_stats; /* [ 8] statistics */ 150 struct pmap_statistics pm_stats; /* [ 8] statistics */
151 unsigned int pm_count; /* [24] reference count */ 151 unsigned int pm_count; /* [24] reference count */
152 unsigned int __pm_spare0; /* [28] spare field */ 152 unsigned int __pm_spare0; /* [28] spare field */
153 struct pmap_pagelist pm_ptpages; /* [32] list of PT pages */ 153 struct pmap_pagelist pm_ptpages; /* [32] list of PT pages */
154 unsigned long __pm_spare1; /* [40] spare field */ 154 LIST_HEAD(, pv_entry) pm_pvents; /* [40] list of PV entries */
155 TAILQ_ENTRY(pmap) pm_list; /* [48] list of all pmaps */ 155 TAILQ_ENTRY(pmap) pm_list; /* [48] list of all pmaps */
156 /* -- COHERENCY_UNIT boundary -- */ 156 /* -- COHERENCY_UNIT boundary -- */
157 struct pmap_percpu pm_percpu[]; /* [64] per-CPU data */ 157 struct pmap_percpu pm_percpu[]; /* [64] per-CPU data */
158 /* variable length */ 158 /* variable length */
159}; 159};
160 160
161#define PMAP_SIZEOF(x) \ 161#define PMAP_SIZEOF(x) \
162 (ALIGN(offsetof(struct pmap, pm_percpu[(x)]))) 162 (ALIGN(offsetof(struct pmap, pm_percpu[(x)])))
163 163
164#define PMAP_ASN_KERNEL 0 /* kernel-reserved ASN */ 164#define PMAP_ASN_KERNEL 0 /* kernel-reserved ASN */
165#define PMAP_ASN_FIRST_USER 1 /* first user ASN */ 165#define PMAP_ASN_FIRST_USER 1 /* first user ASN */
166#define PMAP_ASNGEN_INVALID 0 /* reserved (invalid) ASN generation */ 166#define PMAP_ASNGEN_INVALID 0 /* reserved (invalid) ASN generation */
167#define PMAP_ASNGEN_INITIAL 1 /* first valid generatation */ 167#define PMAP_ASNGEN_INITIAL 1 /* first valid generatation */
168 168
169/* 169/*
170 * For each struct vm_page, there is a list of all currently valid virtual 170 * For each struct vm_page, there is a list of all currently valid virtual
171 * mappings of that page. An entry is a pv_entry_t, the list is pv_table. 171 * mappings of that page. An entry is a pv_entry_t, the list is pv_table.
172 */ 172 */
173typedef struct pv_entry { 173typedef struct pv_entry {
174 struct pv_entry *pv_next; /* next pv_entry on list */ 174 struct pv_entry *pv_next; /* next pv_entry on page list */
 175 LIST_ENTRY(pv_entry) pv_link; /* link on owning pmap's list */
175 struct pmap *pv_pmap; /* pmap where mapping lies */ 176 struct pmap *pv_pmap; /* pmap where mapping lies */
176 vaddr_t pv_va; /* virtual address for mapping */ 177 vaddr_t pv_va; /* virtual address for mapping */
177 pt_entry_t *pv_pte; /* PTE that maps the VA */ 178 pt_entry_t *pv_pte; /* PTE that maps the VA */
178} *pv_entry_t; 179} *pv_entry_t;
179 180
180/* attrs in pvh_listx */ 181/* attrs in pvh_listx */
181#define PGA_MODIFIED 0x01UL /* modified */ 182#define PGA_MODIFIED 0x01UL /* modified */
182#define PGA_REFERENCED 0x02UL /* referenced */ 183#define PGA_REFERENCED 0x02UL /* referenced */
183#define PGA_ATTRS (PGA_MODIFIED | PGA_REFERENCED) 184#define PGA_ATTRS (PGA_MODIFIED | PGA_REFERENCED)
184 185
185/* pvh_usage */ 186/* pvh_usage */
186#define PGU_NORMAL 0 /* free or normal use */ 187#define PGU_NORMAL 0 /* free or normal use */
187#define PGU_PVENT 1 /* PV entries */ 188#define PGU_PVENT 1 /* PV entries */