Sun May 30 13:34:21 2021 UTC ()
Keep track of a pmap's PV entries with a list hanging off the pmap.


(thorpej)
diff -r1.287 -r1.288 src/sys/arch/alpha/alpha/pmap.c
diff -r1.93 -r1.94 src/sys/arch/alpha/include/pmap.h

cvs diff -r1.287 -r1.288 src/sys/arch/alpha/alpha/pmap.c (expand / switch to context diff)
--- src/sys/arch/alpha/alpha/pmap.c 2021/05/30 06:41:19 1.287
+++ src/sys/arch/alpha/alpha/pmap.c 2021/05/30 13:34:21 1.288
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.c,v 1.287 2021/05/30 06:41:19 thorpej Exp $ */
+/* $NetBSD: pmap.c,v 1.288 2021/05/30 13:34:21 thorpej Exp $ */
 
 /*-
  * Copyright (c) 1998, 1999, 2000, 2001, 2007, 2008, 2020
@@ -135,7 +135,7 @@
 
 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.287 2021/05/30 06:41:19 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.288 2021/05/30 13:34:21 thorpej Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -1384,6 +1384,7 @@
 	 */
 	memset(pmap_kernel(), 0, sizeof(struct pmap));
 	LIST_INIT(&pmap_kernel()->pm_ptpages);
+	LIST_INIT(&pmap_kernel()->pm_pvents);
 	atomic_store_relaxed(&pmap_kernel()->pm_count, 1);
 	/* Kernel pmap does not have per-CPU info. */
 	TAILQ_INSERT_TAIL(&pmap_all_pmaps, pmap_kernel(), pm_list);
@@ -1568,6 +1569,7 @@
 	pmap = pool_cache_get(&pmap_pmap_cache, PR_WAITOK);
 	memset(pmap, 0, sizeof(*pmap));
 	LIST_INIT(&pmap->pm_ptpages);
+	LIST_INIT(&pmap->pm_pvents);
 
 	atomic_store_relaxed(&pmap->pm_count, 1);
 
@@ -3278,6 +3280,7 @@
 	uintptr_t const attrs = md->pvh_listx & PGA_ATTRS;
 	newpv->pv_next = (struct pv_entry *)(md->pvh_listx & ~PGA_ATTRS);
 	md->pvh_listx = (uintptr_t)newpv | attrs;
+	LIST_INSERT_HEAD(&pmap->pm_pvents, newpv, pv_link);
 
 	if (dolock) {
 		mutex_exit(lock);
@@ -3316,8 +3319,15 @@
 
 	KASSERT(pv != NULL);
 
+	/*
+	 * The page attributes are in the lower 2 bits of the first
+	 * PV entry pointer.  Rather than comparing the pointer address
+	 * and branching, we just always preserve what might be there
+	 * (either attribute bits or zero bits).
+	 */
 	*pvp = (pv_entry_t)((uintptr_t)pv->pv_next |
 			    (((uintptr_t)*pvp) & PGA_ATTRS));
+	LIST_REMOVE(pv, pv_link);
 
 	if (dolock) {
 		mutex_exit(lock);

cvs diff -r1.93 -r1.94 src/sys/arch/alpha/include/pmap.h (expand / switch to context diff)
--- src/sys/arch/alpha/include/pmap.h 2021/05/30 06:41:19 1.93
+++ src/sys/arch/alpha/include/pmap.h 2021/05/30 13:34:21 1.94
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.93 2021/05/30 06:41:19 thorpej Exp $ */
+/* $NetBSD: pmap.h,v 1.94 2021/05/30 13:34:21 thorpej Exp $ */
 
 /*-
  * Copyright (c) 1998, 1999, 2000, 2001, 2007 The NetBSD Foundation, Inc.
@@ -151,7 +151,7 @@
 	unsigned int		pm_count;	/* [24] reference count */
 	unsigned int		__pm_spare0;	/* [28] spare field */
 	struct pmap_pagelist	pm_ptpages;	/* [32] list of PT pages */
-	unsigned long		__pm_spare1;	/* [40] spare field */
+	LIST_HEAD(, pv_entry)	pm_pvents;	/* [40] list of PV entries */
 	TAILQ_ENTRY(pmap)	pm_list;	/* [48] list of all pmaps */
 	/* -- COHERENCY_UNIT boundary -- */
 	struct pmap_percpu	pm_percpu[];	/* [64] per-CPU data */
@@ -171,7 +171,8 @@
  * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
  */
 typedef struct pv_entry {
-	struct pv_entry	*pv_next;	/* next pv_entry on list */
+	struct pv_entry	*pv_next;	/* next pv_entry on page list */
+	LIST_ENTRY(pv_entry) pv_link;	/* link on owning pmap's list */
 	struct pmap	*pv_pmap;	/* pmap where mapping lies */
 	vaddr_t		pv_va;		/* virtual address for mapping */
 	pt_entry_t	*pv_pte;	/* PTE that maps the VA */