Sat Oct 30 17:20:44 2010 UTC ()
Use VM_PAGE_TO_MD() to locate struct vm_page_md.  No functional
changes.


(uebayasi)
diff -r1.76 -r1.77 src/sys/arch/hppa/hppa/pmap.c

cvs diff -r1.76 -r1.77 src/sys/arch/hppa/hppa/pmap.c (expand / switch to unified diff)

--- src/sys/arch/hppa/hppa/pmap.c 2010/06/21 14:43:34 1.76
+++ src/sys/arch/hppa/hppa/pmap.c 2010/10/30 17:20:43 1.77
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.76 2010/06/21 14:43:34 skrll Exp $ */ 1/* $NetBSD: pmap.c,v 1.77 2010/10/30 17:20:43 uebayasi Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc. 4 * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matthew Fredette. 8 * by Matthew Fredette.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -55,52 +55,54 @@ @@ -55,52 +55,54 @@
55 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 55 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
56 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 56 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
57 * THE POSSIBILITY OF SUCH DAMAGE. 57 * THE POSSIBILITY OF SUCH DAMAGE.
58 */ 58 */
59/* 59/*
60 * References: 60 * References:
61 * 1. PA7100LC ERS, Hewlett-Packard, March 30 1999, Public version 1.0 61 * 1. PA7100LC ERS, Hewlett-Packard, March 30 1999, Public version 1.0
62 * 2. PA7300LC ERS, Hewlett-Packard, March 18 1996, Version 1.0 62 * 2. PA7300LC ERS, Hewlett-Packard, March 18 1996, Version 1.0
63 * 3. PA-RISC 1.1 Architecture and Instruction Set Reference Manual, 63 * 3. PA-RISC 1.1 Architecture and Instruction Set Reference Manual,
64 * Hewlett-Packard, February 1994, Third Edition 64 * Hewlett-Packard, February 1994, Third Edition
65 */ 65 */
66 66
67#include <sys/cdefs.h> 67#include <sys/cdefs.h>
68__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.76 2010/06/21 14:43:34 skrll Exp $"); 68__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.77 2010/10/30 17:20:43 uebayasi Exp $");
69 69
70#include "opt_cputype.h" 70#include "opt_cputype.h"
71 71
72#include <sys/param.h> 72#include <sys/param.h>
73#include <sys/systm.h> 73#include <sys/systm.h>
74#include <sys/malloc.h> 74#include <sys/malloc.h>
75#include <sys/proc.h> 75#include <sys/proc.h>
76 76
77#include <uvm/uvm.h> 77#include <uvm/uvm.h>
78 78
79#include <machine/reg.h> 79#include <machine/reg.h>
80#include <machine/psl.h> 80#include <machine/psl.h>
81#include <machine/cpu.h> 81#include <machine/cpu.h>
82#include <machine/pmap.h> 82#include <machine/pmap.h>
83#include <machine/pte.h> 83#include <machine/pte.h>
84#include <machine/cpufunc.h> 84#include <machine/cpufunc.h>
85#include <machine/iomod.h> 85#include <machine/iomod.h>
86 86
87#include <hppa/hppa/hpt.h> 87#include <hppa/hppa/hpt.h>
88#include <hppa/hppa/machdep.h> 88#include <hppa/hppa/machdep.h>
89 89
90#if defined(DDB) 90#if defined(DDB)
91#include <ddb/db_output.h> 91#include <ddb/db_output.h>
92#endif 92#endif
93 93
 94#define VM_PAGE_TO_MD(pg) (&(pg)->mdpage)
 95
94#ifdef PMAPDEBUG 96#ifdef PMAPDEBUG
95 97
96#define static /**/ 98#define static /**/
97#define inline /**/ 99#define inline /**/
98 100
99#define DPRINTF(l,s) do { \ 101#define DPRINTF(l,s) do { \
100 if ((pmapdebug & (l)) == (l)) \ 102 if ((pmapdebug & (l)) == (l)) \
101 printf s; \ 103 printf s; \
102} while(0) 104} while(0)
103 105
104#define PDB_FOLLOW 0x00000001 106#define PDB_FOLLOW 0x00000001
105#define PDB_INIT 0x00000002 107#define PDB_INIT 0x00000002
106#define PDB_ENTER 0x00000004 108#define PDB_ENTER 0x00000004
@@ -487,47 +489,50 @@ pmap_dump_table(pa_space_t space, vaddr_ @@ -487,47 +489,50 @@ pmap_dump_table(pa_space_t space, vaddr_
487 snprintb(buf, sizeof(buf), TLB_BITS, 489 snprintb(buf, sizeof(buf), TLB_BITS,
488 TLB_PROT(pte & PAGE_MASK)); 490 TLB_PROT(pte & PAGE_MASK));
489 db_printf("0x%08lx-0x%08x:%s\n", va, pte & ~PAGE_MASK, 491 db_printf("0x%08lx-0x%08x:%s\n", va, pte & ~PAGE_MASK,
490 buf); 492 buf);
491 } 493 }
492 va += PAGE_SIZE; 494 va += PAGE_SIZE;
493 } while (va != 0); 495 } while (va != 0);
494} 496}
495 497
496void 498void
497pmap_dump_pv(paddr_t pa) 499pmap_dump_pv(paddr_t pa)
498{ 500{
499 struct vm_page *pg; 501 struct vm_page *pg;
 502 struct vm_page_md *md;
500 struct pv_entry *pve; 503 struct pv_entry *pve;
501 504
502 pg = PHYS_TO_VM_PAGE(pa); 505 pg = PHYS_TO_VM_PAGE(pa);
503 mutex_enter(&pg->mdpage.pvh_lock); 506 md = VM_PAGE_TO_MD(pg);
504 db_printf("pg %p attr 0x%08x aliases %d\n", pg, pg->mdpage.pvh_attrs, 507 mutex_enter(&md->pvh_lock);
505 pg->mdpage.pvh_aliases); 508 db_printf("pg %p attr 0x%08x aliases %d\n", pg, md->pvh_attrs,
506 for (pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next) 509 md->pvh_aliases);
 510 for (pve = md->pvh_list; pve; pve = pve->pv_next)
507 db_printf("%x:%lx\n", pve->pv_pmap->pm_space, 511 db_printf("%x:%lx\n", pve->pv_pmap->pm_space,
508 pve->pv_va & PV_VAMASK); 512 pve->pv_va & PV_VAMASK);
509 mutex_exit(&pg->mdpage.pvh_lock); 513 mutex_exit(&md->pvh_lock);
510} 514}
511#endif 515#endif
512 516
513int 517int
514pmap_check_alias(struct vm_page *pg, vaddr_t va, pt_entry_t pte) 518pmap_check_alias(struct vm_page *pg, vaddr_t va, pt_entry_t pte)
515{ 519{
 520 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
516 struct pv_entry *pve; 521 struct pv_entry *pve;
517 int ret = 0; 522 int ret = 0;
518 523
519 /* check for non-equ aliased mappings */ 524 /* check for non-equ aliased mappings */
520 for (pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next) { 525 for (pve = md->pvh_list; pve; pve = pve->pv_next) {
521 vaddr_t pva = pve->pv_va & PV_VAMASK; 526 vaddr_t pva = pve->pv_va & PV_VAMASK;
522 527
523 pte |= pmap_vp_find(pve->pv_pmap, pva); 528 pte |= pmap_vp_find(pve->pv_pmap, pva);
524 if ((va & HPPA_PGAOFF) != (pva & HPPA_PGAOFF) && 529 if ((va & HPPA_PGAOFF) != (pva & HPPA_PGAOFF) &&
525 (pte & PTE_PROT(TLB_WRITE))) { 530 (pte & PTE_PROT(TLB_WRITE))) {
526 531
527 DPRINTF(PDB_FOLLOW|PDB_ALIAS, 532 DPRINTF(PDB_FOLLOW|PDB_ALIAS,
528 ("%s: aliased writable mapping 0x%x:0x%lx\n", 533 ("%s: aliased writable mapping 0x%x:0x%lx\n",
529 __func__, pve->pv_pmap->pm_space, pve->pv_va)); 534 __func__, pve->pv_pmap->pm_space, pve->pv_va));
530 ret++; 535 ret++;
531 } 536 }
532 } 537 }
533 538
@@ -556,46 +561,49 @@ pmap_pv_free(struct pv_entry *pv) @@ -556,46 +561,49 @@ pmap_pv_free(struct pv_entry *pv)
556{ 561{
557 562
558 if (pv->pv_ptp) 563 if (pv->pv_ptp)
559 pmap_pde_release(pv->pv_pmap, pv->pv_va & PV_VAMASK, 564 pmap_pde_release(pv->pv_pmap, pv->pv_va & PV_VAMASK,
560 pv->pv_ptp); 565 pv->pv_ptp);
561 566
562 pool_put(&pmap_pv_pool, pv); 567 pool_put(&pmap_pv_pool, pv);
563} 568}
564 569
565static inline void 570static inline void
566pmap_pv_enter(struct vm_page *pg, struct pv_entry *pve, pmap_t pm, 571pmap_pv_enter(struct vm_page *pg, struct pv_entry *pve, pmap_t pm,
567 vaddr_t va, struct vm_page *pdep, u_int flags) 572 vaddr_t va, struct vm_page *pdep, u_int flags)
568{ 573{
 574 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 575
569 DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p, %p, %p, 0x%lx, %p, 0x%x)\n", 576 DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p, %p, %p, 0x%lx, %p, 0x%x)\n",
570 __func__, pg, pve, pm, va, pdep, flags)); 577 __func__, pg, pve, pm, va, pdep, flags));
571 578
572 KASSERT(mutex_owned(&pg->mdpage.pvh_lock)); 579 KASSERT(mutex_owned(&md->pvh_lock));
573 580
574 pve->pv_pmap = pm; 581 pve->pv_pmap = pm;
575 pve->pv_va = va | flags; 582 pve->pv_va = va | flags;
576 pve->pv_ptp = pdep; 583 pve->pv_ptp = pdep;
577 pve->pv_next = pg->mdpage.pvh_list; 584 pve->pv_next = md->pvh_list;
578 pg->mdpage.pvh_list = pve; 585 md->pvh_list = pve;
579} 586}
580 587
581static inline struct pv_entry * 588static inline struct pv_entry *
582pmap_pv_remove(struct vm_page *pg, pmap_t pmap, vaddr_t va) 589pmap_pv_remove(struct vm_page *pg, pmap_t pmap, vaddr_t va)
583{ 590{
 591 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
584 struct pv_entry **pve, *pv; 592 struct pv_entry **pve, *pv;
585 593
586 KASSERT(mutex_owned(&pg->mdpage.pvh_lock)); 594 KASSERT(mutex_owned(&md->pvh_lock));
587 595
588 for (pv = *(pve = &pg->mdpage.pvh_list); 596 for (pv = *(pve = &md->pvh_list);
589 pv; pv = *(pve = &(*pve)->pv_next)) 597 pv; pv = *(pve = &(*pve)->pv_next))
590 if (pv->pv_pmap == pmap && (pv->pv_va & PV_VAMASK) == va) { 598 if (pv->pv_pmap == pmap && (pv->pv_va & PV_VAMASK) == va) {
591 *pve = pv->pv_next; 599 *pve = pv->pv_next;
592 break; 600 break;
593 } 601 }
594 return (pv); 602 return (pv);
595} 603}
596 604
597#define FIRST_16M atop(16 * 1024 * 1024) 605#define FIRST_16M atop(16 * 1024 * 1024)
598 606
599static void 607static void
600pmap_page_physload(paddr_t spa, paddr_t epa) 608pmap_page_physload(paddr_t spa, paddr_t epa)
601{ 609{
@@ -1099,41 +1107,42 @@ pmap_destroy(pmap_t pmap) @@ -1099,41 +1107,42 @@ pmap_destroy(pmap_t pmap)
1099 continue; 1107 continue;
1100 1108
1101 DPRINTF(PDB_FOLLOW, ("%s(%p): stray ptp " 1109 DPRINTF(PDB_FOLLOW, ("%s(%p): stray ptp "
1102 "0x%lx w/ %d ents:", __func__, pmap, VM_PAGE_TO_PHYS(pg), 1110 "0x%lx w/ %d ents:", __func__, pmap, VM_PAGE_TO_PHYS(pg),
1103 pg->wire_count - 1)); 1111 pg->wire_count - 1));
1104 1112
1105 pde = (pt_entry_t *)VM_PAGE_TO_PHYS(pg); 1113 pde = (pt_entry_t *)VM_PAGE_TO_PHYS(pg);
1106 epde = (pt_entry_t *)(VM_PAGE_TO_PHYS(pg) + PAGE_SIZE); 1114 epde = (pt_entry_t *)(VM_PAGE_TO_PHYS(pg) + PAGE_SIZE);
1107 for (; pde < epde; pde++) { 1115 for (; pde < epde; pde++) {
1108 if (*pde == 0) 1116 if (*pde == 0)
1109 continue; 1117 continue;
1110 1118
1111 sheep = PHYS_TO_VM_PAGE(PTE_PAGE(*pde)); 1119 sheep = PHYS_TO_VM_PAGE(PTE_PAGE(*pde));
1112 for (haggis = sheep->mdpage.pvh_list; haggis != NULL; ) 1120 struct vm_page_md * const md = VM_PAGE_TO_MD(sheep);
 1121 for (haggis = md->pvh_list; haggis != NULL; )
1113 if (haggis->pv_pmap == pmap) { 1122 if (haggis->pv_pmap == pmap) {
1114 1123
1115 DPRINTF(PDB_FOLLOW, (" 0x%lx", 1124 DPRINTF(PDB_FOLLOW, (" 0x%lx",
1116 haggis->pv_va)); 1125 haggis->pv_va));
1117 1126
1118 pmap_remove(pmap, 1127 pmap_remove(pmap,
1119 haggis->pv_va & PV_VAMASK, 1128 haggis->pv_va & PV_VAMASK,
1120 haggis->pv_va + PAGE_SIZE); 1129 haggis->pv_va + PAGE_SIZE);
1121 1130
1122 /* 1131 /*
1123 * exploit the sacred knowledge of 1132 * exploit the sacred knowledge of
1124 * lambeous ozzmosis 1133 * lambeous ozzmosis
1125 */ 1134 */
1126 haggis = sheep->mdpage.pvh_list; 1135 haggis = md->pvh_list;
1127 } else 1136 } else
1128 haggis = haggis->pv_next; 1137 haggis = haggis->pv_next;
1129 } 1138 }
1130 DPRINTF(PDB_FOLLOW, ("\n")); 1139 DPRINTF(PDB_FOLLOW, ("\n"));
1131 } 1140 }
1132#endif 1141#endif
1133 pmap_sdir_set(pmap->pm_space, 0); 1142 pmap_sdir_set(pmap->pm_space, 0);
1134 mutex_enter(&pmap->pm_lock); 1143 mutex_enter(&pmap->pm_lock);
1135 pmap_pagefree(pmap->pm_pdir_pg); 1144 pmap_pagefree(pmap->pm_pdir_pg);
1136 mutex_exit(&pmap->pm_lock); 1145 mutex_exit(&pmap->pm_lock);
1137 mutex_destroy(&pmap->pm_lock); 1146 mutex_destroy(&pmap->pm_lock);
1138 pmap->pm_pdir_pg = NULL; 1147 pmap->pm_pdir_pg = NULL;
1139 pool_put(&pmap_pool, pmap); 1148 pool_put(&pmap_pool, pmap);
@@ -1196,58 +1205,60 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd @@ -1196,58 +1205,60 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd
1196 pmap_pte_flush(pmap, va, pte); 1205 pmap_pte_flush(pmap, va, pte);
1197 if (wired && !(pte & PTE_PROT(TLB_WIRED))) 1206 if (wired && !(pte & PTE_PROT(TLB_WIRED)))
1198 pmap->pm_stats.wired_count++; 1207 pmap->pm_stats.wired_count++;
1199 else if (!wired && (pte & PTE_PROT(TLB_WIRED))) 1208 else if (!wired && (pte & PTE_PROT(TLB_WIRED)))
1200 pmap->pm_stats.wired_count--; 1209 pmap->pm_stats.wired_count--;
1201 1210
1202 if (PTE_PAGE(pte) == pa) { 1211 if (PTE_PAGE(pte) == pa) {
1203 DPRINTF(PDB_FOLLOW|PDB_ENTER, 1212 DPRINTF(PDB_FOLLOW|PDB_ENTER,
1204 ("%s: same page\n", __func__)); 1213 ("%s: same page\n", __func__));
1205 goto enter; 1214 goto enter;
1206 } 1215 }
1207 1216
1208 pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)); 1217 pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
1209 mutex_enter(&pg->mdpage.pvh_lock); 1218 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 1219 mutex_enter(&md->pvh_lock);
1210 pve = pmap_pv_remove(pg, pmap, va); 1220 pve = pmap_pv_remove(pg, pmap, va);
1211 pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte); 1221 md->pvh_attrs |= pmap_pvh_attrs(pte);
1212 mutex_exit(&pg->mdpage.pvh_lock); 1222 mutex_exit(&md->pvh_lock);
1213 } else { 1223 } else {
1214 DPRINTF(PDB_ENTER, ("%s: new mapping 0x%lx -> 0x%lx\n", 1224 DPRINTF(PDB_ENTER, ("%s: new mapping 0x%lx -> 0x%lx\n",
1215 __func__, va, pa)); 1225 __func__, va, pa));
1216 pte = PTE_PROT(TLB_REFTRAP); 1226 pte = PTE_PROT(TLB_REFTRAP);
1217 pve = NULL; 1227 pve = NULL;
1218 pmap->pm_stats.resident_count++; 1228 pmap->pm_stats.resident_count++;
1219 if (wired) 1229 if (wired)
1220 pmap->pm_stats.wired_count++; 1230 pmap->pm_stats.wired_count++;
1221 if (ptp) 1231 if (ptp)
1222 ptp->wire_count++; 1232 ptp->wire_count++;
1223 } 1233 }
1224 1234
1225 if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(pa))) { 1235 if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(pa))) {
 1236 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1226 1237
1227 if (!pve && !(pve = pmap_pv_alloc())) { 1238 if (!pve && !(pve = pmap_pv_alloc())) {
1228 if (flags & PMAP_CANFAIL) { 1239 if (flags & PMAP_CANFAIL) {
1229 mutex_exit(&pg->mdpage.pvh_lock); 1240 mutex_exit(&md->pvh_lock);
1230 PMAP_UNLOCK(pmap); 1241 PMAP_UNLOCK(pmap);
1231 return (ENOMEM); 1242 return (ENOMEM);
1232 } 1243 }
1233 panic("%s: no pv entries available", __func__); 1244 panic("%s: no pv entries available", __func__);
1234 } 1245 }
1235 pte |= PTE_PROT(pmap_prot(pmap, prot)); 1246 pte |= PTE_PROT(pmap_prot(pmap, prot));
1236 mutex_enter(&pg->mdpage.pvh_lock); 1247 mutex_enter(&md->pvh_lock);
1237 if (pmap_check_alias(pg, va, pte)) 1248 if (pmap_check_alias(pg, va, pte))
1238 pmap_page_remove_locked(pg); 1249 pmap_page_remove_locked(pg);
1239 pmap_pv_enter(pg, pve, pmap, va, ptp, 0); 1250 pmap_pv_enter(pg, pve, pmap, va, ptp, 0);
1240 mutex_exit(&pg->mdpage.pvh_lock); 1251 mutex_exit(&md->pvh_lock);
1241 } else if (pve) { 1252 } else if (pve) {
1242 pmap_pv_free(pve); 1253 pmap_pv_free(pve);
1243 } 1254 }
1244 1255
1245enter: 1256enter:
1246 /* preserve old ref & mod */ 1257 /* preserve old ref & mod */
1247 pte = pa | PTE_PROT(pmap_prot(pmap, prot)) | 1258 pte = pa | PTE_PROT(pmap_prot(pmap, prot)) |
1248 (pte & PTE_PROT(TLB_UNCACHEABLE|TLB_DIRTY|TLB_REFTRAP)); 1259 (pte & PTE_PROT(TLB_UNCACHEABLE|TLB_DIRTY|TLB_REFTRAP));
1249 if (wired) 1260 if (wired)
1250 pte |= PTE_PROT(TLB_WIRED); 1261 pte |= PTE_PROT(TLB_WIRED);
1251 pmap_pte_set(pde, va, pte); 1262 pmap_pte_set(pde, va, pte);
1252 1263
1253 PMAP_UNLOCK(pmap); 1264 PMAP_UNLOCK(pmap);
@@ -1294,33 +1305,34 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va @@ -1294,33 +1305,34 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va
1294 * in case of non-complete pde fill 1305 * in case of non-complete pde fill
1295 */ 1306 */
1296 pmap_pte_flush(pmap, sva, pte); 1307 pmap_pte_flush(pmap, sva, pte);
1297 if (pte & PTE_PROT(TLB_WIRED)) 1308 if (pte & PTE_PROT(TLB_WIRED))
1298 pmap->pm_stats.wired_count--; 1309 pmap->pm_stats.wired_count--;
1299 pmap->pm_stats.resident_count--; 1310 pmap->pm_stats.resident_count--;
1300 1311
1301 /* iff properly accounted pde will be dropped anyway */ 1312 /* iff properly accounted pde will be dropped anyway */
1302 if (!batch) 1313 if (!batch)
1303 pmap_pte_set(pde, sva, 0); 1314 pmap_pte_set(pde, sva, 0);
1304 1315
1305 if (pmap_initialized && 1316 if (pmap_initialized &&
1306 (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)))) { 1317 (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)))) {
 1318 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1307 1319
1308 mutex_enter(&pg->mdpage.pvh_lock); 1320 mutex_enter(&md->pvh_lock);
1309 1321
1310 pve = pmap_pv_remove(pg, pmap, sva); 1322 pve = pmap_pv_remove(pg, pmap, sva);
1311 pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte); 1323 md->pvh_attrs |= pmap_pvh_attrs(pte);
1312 1324
1313 mutex_exit(&pg->mdpage.pvh_lock); 1325 mutex_exit(&md->pvh_lock);
1314 1326
1315 if (pve != NULL) 1327 if (pve != NULL)
1316 pmap_pv_free(pve); 1328 pmap_pv_free(pve);
1317 } 1329 }
1318 } 1330 }
1319 } 1331 }
1320 1332
1321 PMAP_UNLOCK(pmap); 1333 PMAP_UNLOCK(pmap);
1322 1334
1323 DPRINTF(PDB_FOLLOW|PDB_REMOVE, ("%s: leaving\n", __func__)); 1335 DPRINTF(PDB_FOLLOW|PDB_REMOVE, ("%s: leaving\n", __func__));
1324} 1336}
1325 1337
1326 1338
@@ -1350,81 +1362,84 @@ pmap_write_protect(pmap_t pmap, vaddr_t  @@ -1350,81 +1362,84 @@ pmap_write_protect(pmap_t pmap, vaddr_t
1350 } 1362 }
1351 if ((pte = pmap_pte_get(pde, sva))) { 1363 if ((pte = pmap_pte_get(pde, sva))) {
1352 1364
1353 DPRINTF(PDB_PMAP, 1365 DPRINTF(PDB_PMAP,
1354 ("%s: va=0x%lx pte=0x%x\n", __func__, sva, pte)); 1366 ("%s: va=0x%lx pte=0x%x\n", __func__, sva, pte));
1355 /* 1367 /*
1356 * Determine if mapping is changing. 1368 * Determine if mapping is changing.
1357 * If not, nothing to do. 1369 * If not, nothing to do.
1358 */ 1370 */
1359 if ((pte & PTE_PROT(TLB_AR_MASK)) == pteprot) 1371 if ((pte & PTE_PROT(TLB_AR_MASK)) == pteprot)
1360 continue; 1372 continue;
1361 1373
1362 pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)); 1374 pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
1363 mutex_enter(&pg->mdpage.pvh_lock); 1375 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1364 pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte); 1376 mutex_enter(&md->pvh_lock);
1365 mutex_exit(&pg->mdpage.pvh_lock); 1377 md->pvh_attrs |= pmap_pvh_attrs(pte);
 1378 mutex_exit(&md->pvh_lock);
1366 1379
1367 pmap_pte_flush(pmap, sva, pte); 1380 pmap_pte_flush(pmap, sva, pte);
1368 pte &= ~PTE_PROT(TLB_AR_MASK); 1381 pte &= ~PTE_PROT(TLB_AR_MASK);
1369 pte |= pteprot; 1382 pte |= pteprot;
1370 pmap_pte_set(pde, sva, pte); 1383 pmap_pte_set(pde, sva, pte);
1371 } 1384 }
1372 } 1385 }
1373 1386
1374 PMAP_UNLOCK(pmap); 1387 PMAP_UNLOCK(pmap);
1375} 1388}
1376 1389
1377void 1390void
1378pmap_page_remove(struct vm_page *pg) 1391pmap_page_remove(struct vm_page *pg)
1379{ 1392{
 1393 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1380 1394
1381 mutex_enter(&pg->mdpage.pvh_lock); 1395 mutex_enter(&md->pvh_lock);
1382 pmap_page_remove_locked(pg); 1396 pmap_page_remove_locked(pg);
1383 mutex_exit(&pg->mdpage.pvh_lock); 1397 mutex_exit(&md->pvh_lock);
1384} 1398}
1385 1399
1386void 1400void
1387pmap_page_remove_locked(struct vm_page *pg) 1401pmap_page_remove_locked(struct vm_page *pg)
1388{ 1402{
 1403 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1389 struct pv_entry *pve, *npve, **pvp; 1404 struct pv_entry *pve, *npve, **pvp;
1390 1405
1391 DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p)\n", __func__, pg)); 1406 DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p)\n", __func__, pg));
1392 1407
1393 if (pg->mdpage.pvh_list == NULL) 1408 if (md->pvh_list == NULL)
1394 return; 1409 return;
1395 1410
1396 pvp = &pg->mdpage.pvh_list; 1411 pvp = &md->pvh_list;
1397 for (pve = pg->mdpage.pvh_list; pve; pve = npve) { 1412 for (pve = md->pvh_list; pve; pve = npve) {
1398 pmap_t pmap = pve->pv_pmap; 1413 pmap_t pmap = pve->pv_pmap;
1399 vaddr_t va = pve->pv_va & PV_VAMASK; 1414 vaddr_t va = pve->pv_va & PV_VAMASK;
1400 volatile pt_entry_t *pde; 1415 volatile pt_entry_t *pde;
1401 pt_entry_t pte; 1416 pt_entry_t pte;
1402 1417
1403 PMAP_LOCK(pmap); 1418 PMAP_LOCK(pmap);
1404 1419
1405 pde = pmap_pde_get(pmap->pm_pdir, va); 1420 pde = pmap_pde_get(pmap->pm_pdir, va);
1406 pte = pmap_pte_get(pde, va); 1421 pte = pmap_pte_get(pde, va);
1407 1422
1408 npve = pve->pv_next; 1423 npve = pve->pv_next;
1409 /* 1424 /*
1410 * If this was an unmanaged mapping, it must be preserved. Move 1425 * If this was an unmanaged mapping, it must be preserved. Move
1411 * it back on the list and advance the end-of-list pointer. 1426 * it back on the list and advance the end-of-list pointer.
1412 */ 1427 */
1413 if (pve->pv_va & PV_KENTER) { 1428 if (pve->pv_va & PV_KENTER) {
1414 *pvp = pve; 1429 *pvp = pve;
1415 pvp = &pve->pv_next; 1430 pvp = &pve->pv_next;
1416 } else 1431 } else
1417 pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte); 1432 md->pvh_attrs |= pmap_pvh_attrs(pte);
1418 1433
1419 pmap_pte_flush(pmap, va, pte); 1434 pmap_pte_flush(pmap, va, pte);
1420 if (pte & PTE_PROT(TLB_WIRED)) 1435 if (pte & PTE_PROT(TLB_WIRED))
1421 pmap->pm_stats.wired_count--; 1436 pmap->pm_stats.wired_count--;
1422 pmap->pm_stats.resident_count--; 1437 pmap->pm_stats.resident_count--;
1423 1438
1424 if (!(pve->pv_va & PV_KENTER)) { 1439 if (!(pve->pv_va & PV_KENTER)) {
1425 pmap_pte_set(pde, va, 0); 1440 pmap_pte_set(pde, va, 0);
1426 pmap_pv_free(pve); 1441 pmap_pv_free(pve);
1427 } 1442 }
1428 PMAP_UNLOCK(pmap); 1443 PMAP_UNLOCK(pmap);
1429 } 1444 }
1430 *pvp = NULL; 1445 *pvp = NULL;
@@ -1460,99 +1475,101 @@ pmap_unwire(pmap_t pmap, vaddr_t va) @@ -1460,99 +1475,101 @@ pmap_unwire(pmap_t pmap, vaddr_t va)
1460 pte &= ~PTE_PROT(TLB_WIRED); 1475 pte &= ~PTE_PROT(TLB_WIRED);
1461 pmap->pm_stats.wired_count--; 1476 pmap->pm_stats.wired_count--;
1462 pmap_pte_set(pde, va, pte); 1477 pmap_pte_set(pde, va, pte);
1463 } 1478 }
1464 } 1479 }
1465 PMAP_UNLOCK(pmap); 1480 PMAP_UNLOCK(pmap);
1466 1481
1467 DPRINTF(PDB_FOLLOW|PDB_PMAP, ("%s: leaving\n", __func__)); 1482 DPRINTF(PDB_FOLLOW|PDB_PMAP, ("%s: leaving\n", __func__));
1468} 1483}
1469 1484
1470bool 1485bool
1471pmap_changebit(struct vm_page *pg, u_int set, u_int clear) 1486pmap_changebit(struct vm_page *pg, u_int set, u_int clear)
1472{ 1487{
 1488 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1473 struct pv_entry *pve; 1489 struct pv_entry *pve;
1474 int res; 1490 int res;
1475 1491
1476 DPRINTF(PDB_FOLLOW|PDB_BITS,  1492 DPRINTF(PDB_FOLLOW|PDB_BITS,
1477 ("%s(%p, %x, %x)\n", __func__, pg, set, clear)); 1493 ("%s(%p, %x, %x)\n", __func__, pg, set, clear));
1478 1494
1479 KASSERT((set & ~(PVF_REF|PVF_UNCACHEABLE)) == 0); 1495 KASSERT((set & ~(PVF_REF|PVF_UNCACHEABLE)) == 0);
1480 KASSERT((clear & ~(PVF_MOD|PVF_WRITE|PVF_UNCACHEABLE)) == 0); 1496 KASSERT((clear & ~(PVF_MOD|PVF_WRITE|PVF_UNCACHEABLE)) == 0);
1481 1497
1482 mutex_enter(&pg->mdpage.pvh_lock); 1498 mutex_enter(&md->pvh_lock);
1483 1499
1484 /* preserve other bits */ 1500 /* preserve other bits */
1485 res = pg->mdpage.pvh_attrs & (set | clear); 1501 res = md->pvh_attrs & (set | clear);
1486 pg->mdpage.pvh_attrs ^= res; 1502 md->pvh_attrs ^= res;
1487 1503
1488 for (pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next) { 1504 for (pve = md->pvh_list; pve; pve = pve->pv_next) {
1489 pmap_t pmap = pve->pv_pmap; 1505 pmap_t pmap = pve->pv_pmap;
1490 vaddr_t va = pve->pv_va & PV_VAMASK; 1506 vaddr_t va = pve->pv_va & PV_VAMASK;
1491 volatile pt_entry_t *pde; 1507 volatile pt_entry_t *pde;
1492 pt_entry_t opte, pte; 1508 pt_entry_t opte, pte;
1493 1509
1494 if ((pde = pmap_pde_get(pmap->pm_pdir, va))) { 1510 if ((pde = pmap_pde_get(pmap->pm_pdir, va))) {
1495 opte = pte = pmap_pte_get(pde, va); 1511 opte = pte = pmap_pte_get(pde, va);
1496#ifdef PMAPDEBUG 1512#ifdef PMAPDEBUG
1497 if (!pte) { 1513 if (!pte) {
1498 DPRINTF(PDB_FOLLOW|PDB_BITS, 1514 DPRINTF(PDB_FOLLOW|PDB_BITS,
1499 ("%s: zero pte for 0x%lx\n", __func__, 1515 ("%s: zero pte for 0x%lx\n", __func__,
1500 va)); 1516 va));
1501 continue; 1517 continue;
1502 } 1518 }
1503#endif 1519#endif
1504 pte &= ~clear; 1520 pte &= ~clear;
1505 pte |= set; 1521 pte |= set;
1506 1522
1507 if (!(pve->pv_va & PV_KENTER)) { 1523 if (!(pve->pv_va & PV_KENTER)) {
1508 pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte); 1524 md->pvh_attrs |= pmap_pvh_attrs(pte);
1509 res |= pmap_pvh_attrs(opte); 1525 res |= pmap_pvh_attrs(opte);
1510 } 1526 }
1511 1527
1512 if (opte != pte) { 1528 if (opte != pte) {
1513 pmap_pte_flush(pmap, va, opte); 1529 pmap_pte_flush(pmap, va, opte);
1514 pmap_pte_set(pde, va, pte); 1530 pmap_pte_set(pde, va, pte);
1515 } 1531 }
1516 } 1532 }
1517 } 1533 }
1518 mutex_exit(&pg->mdpage.pvh_lock); 1534 mutex_exit(&md->pvh_lock);
1519 1535
1520 return ((res & (clear | set)) != 0); 1536 return ((res & (clear | set)) != 0);
1521} 1537}
1522 1538
1523bool 1539bool
1524pmap_testbit(struct vm_page *pg, u_int bit) 1540pmap_testbit(struct vm_page *pg, u_int bit)
1525{ 1541{
 1542 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1526 struct pv_entry *pve; 1543 struct pv_entry *pve;
1527 pt_entry_t pte; 1544 pt_entry_t pte;
1528 int ret; 1545 int ret;
1529 1546
1530 DPRINTF(PDB_FOLLOW|PDB_BITS, ("%s(%p, %x)\n", __func__, pg, bit)); 1547 DPRINTF(PDB_FOLLOW|PDB_BITS, ("%s(%p, %x)\n", __func__, pg, bit));
1531 1548
1532 mutex_enter(&pg->mdpage.pvh_lock); 1549 mutex_enter(&md->pvh_lock);
1533 1550
1534 for (pve = pg->mdpage.pvh_list; !(pg->mdpage.pvh_attrs & bit) && pve; 1551 for (pve = md->pvh_list; !(md->pvh_attrs & bit) && pve;
1535 pve = pve->pv_next) { 1552 pve = pve->pv_next) {
1536 pmap_t pm = pve->pv_pmap; 1553 pmap_t pm = pve->pv_pmap;
1537 1554
1538 pte = pmap_vp_find(pm, pve->pv_va & PV_VAMASK); 1555 pte = pmap_vp_find(pm, pve->pv_va & PV_VAMASK);
1539 if (pve->pv_va & PV_KENTER) 1556 if (pve->pv_va & PV_KENTER)
1540 continue; 1557 continue;
1541 1558
1542 pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte); 1559 md->pvh_attrs |= pmap_pvh_attrs(pte);
1543 } 1560 }
1544 ret = ((pg->mdpage.pvh_attrs & bit) != 0); 1561 ret = ((md->pvh_attrs & bit) != 0);
1545 mutex_exit(&pg->mdpage.pvh_lock); 1562 mutex_exit(&md->pvh_lock);
1546 1563
1547 return ret; 1564 return ret;
1548} 1565}
1549 1566
1550/* 1567/*
1551 * pmap_extract(pmap, va, pap) 1568 * pmap_extract(pmap, va, pap)
1552 * fills in the physical address corresponding to the 1569 * fills in the physical address corresponding to the
1553 * virtual address specified by pmap and va into the 1570 * virtual address specified by pmap and va into the
1554 * storage pointed to by pap and returns true if the 1571 * storage pointed to by pap and returns true if the
1555 * virtual address is mapped. returns false in not mapped. 1572 * virtual address is mapped. returns false in not mapped.
1556 */ 1573 */
1557bool 1574bool
1558pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap) 1575pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
@@ -1591,32 +1608,33 @@ pmap_activate(struct lwp *l) @@ -1591,32 +1608,33 @@ pmap_activate(struct lwp *l)
1591 1608
1592 /* space is cached for the copy{in,out}'s pleasure */ 1609 /* space is cached for the copy{in,out}'s pleasure */
1593 pcb->pcb_space = space; 1610 pcb->pcb_space = space;
1594 fdcache(HPPA_SID_KERNEL, (vaddr_t)pcb, sizeof(struct pcb)); 1611 fdcache(HPPA_SID_KERNEL, (vaddr_t)pcb, sizeof(struct pcb));
1595 1612
1596 if (p == curproc) 1613 if (p == curproc)
1597 mtctl(pmap->pm_pid, CR_PIDR2); 1614 mtctl(pmap->pm_pid, CR_PIDR2);
1598} 1615}
1599 1616
1600 1617
1601static inline void 1618static inline void
1602pmap_flush_page(struct vm_page *pg, bool purge) 1619pmap_flush_page(struct vm_page *pg, bool purge)
1603{ 1620{
 1621 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1604 struct pv_entry *pve; 1622 struct pv_entry *pve;
1605 1623
1606 DPRINTF(PDB_FOLLOW|PDB_CACHE, ("%s(%p, %d)\n", __func__, pg, purge)); 1624 DPRINTF(PDB_FOLLOW|PDB_CACHE, ("%s(%p, %d)\n", __func__, pg, purge));
1607 1625
1608 /* purge cache for all possible mappings for the pa */ 1626 /* purge cache for all possible mappings for the pa */
1609 for (pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next) { 1627 for (pve = md->pvh_list; pve; pve = pve->pv_next) {
1610 vaddr_t va = pve->pv_va & PV_VAMASK; 1628 vaddr_t va = pve->pv_va & PV_VAMASK;
1611 pa_space_t sp = pve->pv_pmap->pm_space; 1629 pa_space_t sp = pve->pv_pmap->pm_space;
1612 1630
1613 if (purge) 1631 if (purge)
1614 pdcache(sp, va, PAGE_SIZE); 1632 pdcache(sp, va, PAGE_SIZE);
1615 else 1633 else
1616 fdcache(sp, va, PAGE_SIZE); 1634 fdcache(sp, va, PAGE_SIZE);
1617#if defined(HP8000_CPU) || defined(HP8200_CPU) || \ 1635#if defined(HP8000_CPU) || defined(HP8200_CPU) || \
1618 defined(HP8500_CPU) || defined(HP8600_CPU) 1636 defined(HP8500_CPU) || defined(HP8600_CPU)
1619 ficache(sp, va, PAGE_SIZE); 1637 ficache(sp, va, PAGE_SIZE);
1620 pdtlb(sp, va); 1638 pdtlb(sp, va);
1621 pitlb(sp, va); 1639 pitlb(sp, va);
1622#endif 1640#endif
@@ -1624,52 +1642,52 @@ pmap_flush_page(struct vm_page *pg, bool @@ -1624,52 +1642,52 @@ pmap_flush_page(struct vm_page *pg, bool
1624} 1642}
1625 1643
1626/* 1644/*
1627 * pmap_zero_page(pa) 1645 * pmap_zero_page(pa)
1628 * 1646 *
1629 * Zeros the specified page. 1647 * Zeros the specified page.
1630 */ 1648 */
1631void 1649void
1632pmap_zero_page(paddr_t pa) 1650pmap_zero_page(paddr_t pa)
1633{ 1651{
1634 1652
1635 DPRINTF(PDB_FOLLOW|PDB_PHYS, ("%s(%lx)\n", __func__, pa)); 1653 DPRINTF(PDB_FOLLOW|PDB_PHYS, ("%s(%lx)\n", __func__, pa));
1636 1654
1637 KASSERT(PHYS_TO_VM_PAGE(pa)->mdpage.pvh_list == NULL); 1655 KASSERT(VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(pa))->pvh_list == NULL);
1638 1656
1639 memset((void *)pa, 0, PAGE_SIZE); 1657 memset((void *)pa, 0, PAGE_SIZE);
1640 fdcache(HPPA_SID_KERNEL, pa, PAGE_SIZE); 1658 fdcache(HPPA_SID_KERNEL, pa, PAGE_SIZE);
1641 1659
1642#if defined(HP8000_CPU) || defined(HP8200_CPU) || \ 1660#if defined(HP8000_CPU) || defined(HP8200_CPU) || \
1643 defined(HP8500_CPU) || defined(HP8600_CPU) 1661 defined(HP8500_CPU) || defined(HP8600_CPU)
1644 ficache(HPPA_SID_KERNEL, pa, PAGE_SIZE); 1662 ficache(HPPA_SID_KERNEL, pa, PAGE_SIZE);
1645 pdtlb(HPPA_SID_KERNEL, pa); 1663 pdtlb(HPPA_SID_KERNEL, pa);
1646 pitlb(HPPA_SID_KERNEL, pa); 1664 pitlb(HPPA_SID_KERNEL, pa);
1647#endif 1665#endif
1648} 1666}
1649 1667
1650/* 1668/*
1651 * pmap_copy_page(src, dst) 1669 * pmap_copy_page(src, dst)
1652 * 1670 *
1653 * pmap_copy_page copies the source page to the destination page. 1671 * pmap_copy_page copies the source page to the destination page.
1654 */ 1672 */
1655void 1673void
1656pmap_copy_page(paddr_t spa, paddr_t dpa) 1674pmap_copy_page(paddr_t spa, paddr_t dpa)
1657{ 1675{
1658 struct vm_page *srcpg = PHYS_TO_VM_PAGE(spa); 1676 struct vm_page *srcpg = PHYS_TO_VM_PAGE(spa);
1659 1677
1660 DPRINTF(PDB_FOLLOW|PDB_PHYS, ("%s(%lx, %lx)\n", __func__, spa, dpa)); 1678 DPRINTF(PDB_FOLLOW|PDB_PHYS, ("%s(%lx, %lx)\n", __func__, spa, dpa));
1661 1679
1662 KASSERT(PHYS_TO_VM_PAGE(dpa)->mdpage.pvh_list == NULL); 1680 KASSERT(VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(dpa))->pvh_list == NULL);
1663 1681
1664 pmap_flush_page(srcpg, false); 1682 pmap_flush_page(srcpg, false);
1665 1683
1666 memcpy((void *)dpa, (void *)spa, PAGE_SIZE); 1684 memcpy((void *)dpa, (void *)spa, PAGE_SIZE);
1667 1685
1668 pdcache(HPPA_SID_KERNEL, spa, PAGE_SIZE); 1686 pdcache(HPPA_SID_KERNEL, spa, PAGE_SIZE);
1669 fdcache(HPPA_SID_KERNEL, dpa, PAGE_SIZE); 1687 fdcache(HPPA_SID_KERNEL, dpa, PAGE_SIZE);
1670#if defined(HP8000_CPU) || defined(HP8200_CPU) || \ 1688#if defined(HP8000_CPU) || defined(HP8200_CPU) || \
1671 defined(HP8500_CPU) || defined(HP8600_CPU) 1689 defined(HP8500_CPU) || defined(HP8600_CPU)
1672 ficache(HPPA_SID_KERNEL, spa, PAGE_SIZE); 1690 ficache(HPPA_SID_KERNEL, spa, PAGE_SIZE);
1673 ficache(HPPA_SID_KERNEL, dpa, PAGE_SIZE); 1691 ficache(HPPA_SID_KERNEL, dpa, PAGE_SIZE);
1674 pdtlb(HPPA_SID_KERNEL, spa); 1692 pdtlb(HPPA_SID_KERNEL, spa);
1675 pdtlb(HPPA_SID_KERNEL, dpa); 1693 pdtlb(HPPA_SID_KERNEL, dpa);
@@ -1707,44 +1725,46 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v @@ -1707,44 +1725,46 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v
1707 pmap_prot(pmap_kernel(), prot & VM_PROT_ALL)); 1725 pmap_prot(pmap_kernel(), prot & VM_PROT_ALL));
1708 if (pa >= HPPA_IOBEGIN || (flags & PMAP_NOCACHE)) 1726 if (pa >= HPPA_IOBEGIN || (flags & PMAP_NOCACHE))
1709 pte |= PTE_PROT(TLB_UNCACHEABLE); 1727 pte |= PTE_PROT(TLB_UNCACHEABLE);
1710 pmap_kernel()->pm_stats.wired_count++; 1728 pmap_kernel()->pm_stats.wired_count++;
1711 pmap_kernel()->pm_stats.resident_count++; 1729 pmap_kernel()->pm_stats.resident_count++;
1712 if (opte) 1730 if (opte)
1713 pmap_pte_flush(pmap_kernel(), va, opte); 1731 pmap_pte_flush(pmap_kernel(), va, opte);
1714 1732
1715 if (pmap_initialized) { 1733 if (pmap_initialized) {
1716 struct vm_page *pg; 1734 struct vm_page *pg;
1717 1735
1718 pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)); 1736 pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
1719 if (pg != NULL) { 1737 if (pg != NULL) {
 1738 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 1739
1720 KASSERT(pa < HPPA_IOBEGIN); 1740 KASSERT(pa < HPPA_IOBEGIN);
1721 1741
1722 struct pv_entry *pve; 1742 struct pv_entry *pve;
1723  1743
1724 pve = pmap_pv_alloc(); 1744 pve = pmap_pv_alloc();
1725 if (!pve) 1745 if (!pve)
1726 panic("%s: no pv entries available", 1746 panic("%s: no pv entries available",
1727 __func__); 1747 __func__);
1728 DPRINTF(PDB_FOLLOW|PDB_ENTER, 1748 DPRINTF(PDB_FOLLOW|PDB_ENTER,
1729 ("%s(%lx, %lx, %x) TLB_KENTER\n", __func__, 1749 ("%s(%lx, %lx, %x) TLB_KENTER\n", __func__,
1730 va, pa, pte)); 1750 va, pa, pte));
1731 1751
1732 mutex_enter(&pg->mdpage.pvh_lock); 1752 mutex_enter(&md->pvh_lock);
1733 if (pmap_check_alias(pg, va, pte)) 1753 if (pmap_check_alias(pg, va, pte))
1734 pmap_page_remove_locked(pg); 1754 pmap_page_remove_locked(pg);
1735 pmap_pv_enter(pg, pve, pmap_kernel(), va, NULL, 1755 pmap_pv_enter(pg, pve, pmap_kernel(), va, NULL,
1736 PV_KENTER); 1756 PV_KENTER);
1737 mutex_exit(&pg->mdpage.pvh_lock); 1757 mutex_exit(&md->pvh_lock);
1738 } 1758 }
1739 } 1759 }
1740 pmap_pte_set(pde, va, pte); 1760 pmap_pte_set(pde, va, pte);
1741 1761
1742 DPRINTF(PDB_FOLLOW|PDB_ENTER, ("%s: leaving\n", __func__)); 1762 DPRINTF(PDB_FOLLOW|PDB_ENTER, ("%s: leaving\n", __func__));
1743 1763
1744#ifdef PMAPDEBUG 1764#ifdef PMAPDEBUG
1745 pmapdebug = opmapdebug; 1765 pmapdebug = opmapdebug;
1746#endif /* PMAPDEBUG */ 1766#endif /* PMAPDEBUG */
1747} 1767}
1748 1768
1749void 1769void
1750pmap_kremove(vaddr_t va, vsize_t size) 1770pmap_kremove(vaddr_t va, vsize_t size)
@@ -1793,32 +1813,33 @@ pmap_kremove(vaddr_t va, vsize_t size) @@ -1793,32 +1813,33 @@ pmap_kremove(vaddr_t va, vsize_t size)
1793 continue; 1813 continue;
1794 } 1814 }
1795 } 1815 }
1796 if (!(pte = pmap_pte_get(pde, va))) { 1816 if (!(pte = pmap_pte_get(pde, va))) {
1797 DPRINTF(PDB_FOLLOW|PDB_REMOVE, 1817 DPRINTF(PDB_FOLLOW|PDB_REMOVE,
1798 ("%s: unmapping unmapped 0x%lx\n", __func__, 1818 ("%s: unmapping unmapped 0x%lx\n", __func__,
1799 va)); 1819 va));
1800 continue; 1820 continue;
1801 } 1821 }
1802 1822
1803 pmap_pte_flush(pmap, va, pte); 1823 pmap_pte_flush(pmap, va, pte);
1804 pmap_pte_set(pde, va, 0); 1824 pmap_pte_set(pde, va, 0);
1805 if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)))) { 1825 if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)))) {
 1826 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1806 1827
1807 mutex_enter(&pg->mdpage.pvh_lock); 1828 mutex_enter(&md->pvh_lock);
1808 1829
1809 pve = pmap_pv_remove(pg, pmap, va); 1830 pve = pmap_pv_remove(pg, pmap, va);
1810 1831
1811 mutex_exit(&pg->mdpage.pvh_lock); 1832 mutex_exit(&md->pvh_lock);
1812 if (pve != NULL) 1833 if (pve != NULL)
1813 pmap_pv_free(pve); 1834 pmap_pv_free(pve);
1814 } 1835 }
1815 } 1836 }
1816 DPRINTF(PDB_FOLLOW|PDB_REMOVE, ("%s: leaving\n", __func__)); 1837 DPRINTF(PDB_FOLLOW|PDB_REMOVE, ("%s: leaving\n", __func__));
1817 1838
1818#ifdef PMAPDEBUG 1839#ifdef PMAPDEBUG
1819 pmapdebug = opmapdebug; 1840 pmapdebug = opmapdebug;
1820#endif /* PMAPDEBUG */ 1841#endif /* PMAPDEBUG */
1821} 1842}
1822 1843
1823#if defined(USE_HPT) 1844#if defined(USE_HPT)
1824#if defined(DDB) 1845#if defined(DDB)