Sat Oct 30 18:30:55 2010 UTC ()
Use VM_PAGE_TO_MD() to locate struct vm_page_md.  No functional
changes.


(uebayasi)
diff -r1.265 -r1.266 src/sys/arch/sparc64/sparc64/pmap.c

cvs diff -r1.265 -r1.266 src/sys/arch/sparc64/sparc64/pmap.c (expand / switch to unified diff)

--- src/sys/arch/sparc64/sparc64/pmap.c 2010/07/29 10:54:50 1.265
+++ src/sys/arch/sparc64/sparc64/pmap.c 2010/10/30 18:30:55 1.266
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.265 2010/07/29 10:54:50 hannken Exp $ */ 1/* $NetBSD: pmap.c,v 1.266 2010/10/30 18:30:55 uebayasi Exp $ */
2/* 2/*
3 * 3 *
4 * Copyright (C) 1996-1999 Eduardo Horvath. 4 * Copyright (C) 1996-1999 Eduardo Horvath.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
@@ -16,27 +16,27 @@ @@ -16,27 +16,27 @@
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE. 24 * SUCH DAMAGE.
25 * 25 *
26 */ 26 */
27 27
28#include <sys/cdefs.h> 28#include <sys/cdefs.h>
29__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.265 2010/07/29 10:54:50 hannken Exp $"); 29__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.266 2010/10/30 18:30:55 uebayasi Exp $");
30 30
31#undef NO_VCACHE /* Don't forget the locked TLB in dostart */ 31#undef NO_VCACHE /* Don't forget the locked TLB in dostart */
32#define HWREF 32#define HWREF
33 33
34#include "opt_ddb.h" 34#include "opt_ddb.h"
35#include "opt_multiprocessor.h" 35#include "opt_multiprocessor.h"
36 36
37#include <sys/param.h> 37#include <sys/param.h>
38#include <sys/malloc.h> 38#include <sys/malloc.h>
39#include <sys/queue.h> 39#include <sys/queue.h>
40#include <sys/systm.h> 40#include <sys/systm.h>
41#include <sys/msgbuf.h> 41#include <sys/msgbuf.h>
42#include <sys/pool.h> 42#include <sys/pool.h>
@@ -63,26 +63,28 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.2 @@ -63,26 +63,28 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.2
63#ifdef DDB 63#ifdef DDB
64#include <machine/db_machdep.h> 64#include <machine/db_machdep.h>
65#include <ddb/db_command.h> 65#include <ddb/db_command.h>
66#include <ddb/db_sym.h> 66#include <ddb/db_sym.h>
67#include <ddb/db_variables.h> 67#include <ddb/db_variables.h>
68#include <ddb/db_extern.h> 68#include <ddb/db_extern.h>
69#include <ddb/db_access.h> 69#include <ddb/db_access.h>
70#include <ddb/db_output.h> 70#include <ddb/db_output.h>
71#else 71#else
72#define Debugger() 72#define Debugger()
73#define db_printf printf 73#define db_printf printf
74#endif 74#endif
75 75
 76#define VM_PAGE_TO_MD(pg) (&(pg)->mdpage)
 77
76#define MEG (1<<20) /* 1MB */ 78#define MEG (1<<20) /* 1MB */
77#define KB (1<<10) /* 1KB */ 79#define KB (1<<10) /* 1KB */
78 80
79paddr_t cpu0paddr; /* contigious phys memory preallocated for cpus */ 81paddr_t cpu0paddr; /* contigious phys memory preallocated for cpus */
80 82
81/* These routines are in assembly to allow access thru physical mappings */ 83/* These routines are in assembly to allow access thru physical mappings */
82extern int64_t pseg_get_real(struct pmap *, vaddr_t); 84extern int64_t pseg_get_real(struct pmap *, vaddr_t);
83extern int pseg_set_real(struct pmap *, vaddr_t, int64_t, paddr_t); 85extern int pseg_set_real(struct pmap *, vaddr_t, int64_t, paddr_t);
84 86
85/* 87/*
86 * Diatribe on ref/mod counting: 88 * Diatribe on ref/mod counting:
87 * 89 *
88 * First of all, ref/mod info must be non-volatile. Hence we need to keep it 90 * First of all, ref/mod info must be non-volatile. Hence we need to keep it
@@ -1437,27 +1439,27 @@ pmap_destroy(struct pmap *pm) @@ -1437,27 +1439,27 @@ pmap_destroy(struct pmap *pm)
1437#else 1439#else
1438 if (pmap_ctx(pm)) { 1440 if (pmap_ctx(pm)) {
1439 mutex_enter(&curcpu()->ci_ctx_lock); 1441 mutex_enter(&curcpu()->ci_ctx_lock);
1440 ctx_free(pm, curcpu()); 1442 ctx_free(pm, curcpu());
1441 mutex_exit(&curcpu()->ci_ctx_lock); 1443 mutex_exit(&curcpu()->ci_ctx_lock);
1442 } 1444 }
1443#endif 1445#endif
1444 1446
1445 /* we could be a little smarter and leave pages zeroed */ 1447 /* we could be a little smarter and leave pages zeroed */
1446 for (pg = TAILQ_FIRST(&pm->pm_obj.memq); pg != NULL; pg = nextpg) { 1448 for (pg = TAILQ_FIRST(&pm->pm_obj.memq); pg != NULL; pg = nextpg) {
1447 KASSERT((pg->flags & PG_MARKER) == 0); 1449 KASSERT((pg->flags & PG_MARKER) == 0);
1448 nextpg = TAILQ_NEXT(pg, listq.queue); 1450 nextpg = TAILQ_NEXT(pg, listq.queue);
1449 TAILQ_REMOVE(&pm->pm_obj.memq, pg, listq.queue); 1451 TAILQ_REMOVE(&pm->pm_obj.memq, pg, listq.queue);
1450 KASSERT(pg->mdpage.mdpg_pvh.pv_pmap == NULL); 1452 KASSERT(md->mdpg_pvh.pv_pmap == NULL);
1451 dcache_flush_page_cpuset(VM_PAGE_TO_PHYS(pg), pmap_cpus_active); 1453 dcache_flush_page_cpuset(VM_PAGE_TO_PHYS(pg), pmap_cpus_active);
1452 uvm_pagefree(pg); 1454 uvm_pagefree(pg);
1453 } 1455 }
1454 pmap_free_page((paddr_t)(u_long)pm->pm_segs, pmap_cpus_active); 1456 pmap_free_page((paddr_t)(u_long)pm->pm_segs, pmap_cpus_active);
1455 UVM_OBJ_DESTROY(&pm->pm_obj); 1457 UVM_OBJ_DESTROY(&pm->pm_obj);
1456 pool_cache_put(&pmap_cache, pm); 1458 pool_cache_put(&pmap_cache, pm);
1457} 1459}
1458 1460
1459/* 1461/*
1460 * Copy the range specified by src_addr/len 1462 * Copy the range specified by src_addr/len
1461 * from the source map to the range dst_addr/len 1463 * from the source map to the range dst_addr/len
1462 * in the destination map. 1464 * in the destination map.
1463 * 1465 *
@@ -1704,27 +1706,29 @@ pmap_enter(struct pmap *pm, vaddr_t va,  @@ -1704,27 +1706,29 @@ pmap_enter(struct pmap *pm, vaddr_t va,
1704 if (opa != pa) { 1706 if (opa != pa) {
1705 opg = PHYS_TO_VM_PAGE(opa); 1707 opg = PHYS_TO_VM_PAGE(opa);
1706 if (opg != NULL) { 1708 if (opg != NULL) {
1707 npv = pmap_remove_pv(pm, va, opg); 1709 npv = pmap_remove_pv(pm, va, opg);
1708 } 1710 }
1709 } 1711 }
1710 } 1712 }
1711 1713
1712 /* 1714 /*
1713 * Construct the TTE. 1715 * Construct the TTE.
1714 */ 1716 */
1715 pg = PHYS_TO_VM_PAGE(pa); 1717 pg = PHYS_TO_VM_PAGE(pa);
1716 if (pg) { 1718 if (pg) {
1717 pvh = &pg->mdpage.mdpg_pvh; 1719 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 1720
 1721 pvh = &md->mdpg_pvh;
1718 uncached = (pvh->pv_va & (PV_ALIAS|PV_NVC)); 1722 uncached = (pvh->pv_va & (PV_ALIAS|PV_NVC));
1719#ifdef DIAGNOSTIC 1723#ifdef DIAGNOSTIC
1720 if ((flags & VM_PROT_ALL) & ~prot) 1724 if ((flags & VM_PROT_ALL) & ~prot)
1721 panic("pmap_enter: access_type exceeds prot"); 1725 panic("pmap_enter: access_type exceeds prot");
1722#endif 1726#endif
1723 /* 1727 /*
1724 * If we don't have the traphandler do it, 1728 * If we don't have the traphandler do it,
1725 * set the ref/mod bits now. 1729 * set the ref/mod bits now.
1726 */ 1730 */
1727 if (flags & VM_PROT_ALL) 1731 if (flags & VM_PROT_ALL)
1728 pvh->pv_va |= PV_REF; 1732 pvh->pv_va |= PV_REF;
1729 if (flags & VM_PROT_WRITE) 1733 if (flags & VM_PROT_WRITE)
1730 pvh->pv_va |= PV_MOD; 1734 pvh->pv_va |= PV_MOD;
@@ -2122,28 +2126,30 @@ pmap_protect(struct pmap *pm, vaddr_t sv @@ -2122,28 +2126,30 @@ pmap_protect(struct pmap *pm, vaddr_t sv
2122 if ((data & TLB_V) == 0) { 2126 if ((data & TLB_V) == 0) {
2123 continue; 2127 continue;
2124 } 2128 }
2125 2129
2126 pa = data & TLB_PA_MASK; 2130 pa = data & TLB_PA_MASK;
2127 DPRINTF(PDB_CHANGEPROT|PDB_REF, 2131 DPRINTF(PDB_CHANGEPROT|PDB_REF,
2128 ("pmap_protect: va=%08x data=%08llx " 2132 ("pmap_protect: va=%08x data=%08llx "
2129 "seg=%08x pte=%08x\n", 2133 "seg=%08x pte=%08x\n",
2130 (u_int)sva, (long long)pa, (int)va_to_seg(sva), 2134 (u_int)sva, (long long)pa, (int)va_to_seg(sva),
2131 (int)va_to_pte(sva))); 2135 (int)va_to_pte(sva)));
2132 2136
2133 pg = PHYS_TO_VM_PAGE(pa); 2137 pg = PHYS_TO_VM_PAGE(pa);
2134 if (pg) { 2138 if (pg) {
 2139 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 2140
2135 /* Save REF/MOD info */ 2141 /* Save REF/MOD info */
2136 pv = &pg->mdpage.mdpg_pvh; 2142 pv = &md->mdpg_pvh;
2137 if (data & TLB_ACCESS) 2143 if (data & TLB_ACCESS)
2138 pv->pv_va |= PV_REF; 2144 pv->pv_va |= PV_REF;
2139 if (data & TLB_MODIFY) 2145 if (data & TLB_MODIFY)
2140 pv->pv_va |= PV_MOD; 2146 pv->pv_va |= PV_MOD;
2141 } 2147 }
2142 2148
2143 /* Just do the pmap and TSB, not the pv_list */ 2149 /* Just do the pmap and TSB, not the pv_list */
2144 if ((prot & VM_PROT_WRITE) == 0) 2150 if ((prot & VM_PROT_WRITE) == 0)
2145 data &= ~(TLB_W|TLB_REAL_W); 2151 data &= ~(TLB_W|TLB_REAL_W);
2146 if ((prot & VM_PROT_EXECUTE) == 0) 2152 if ((prot & VM_PROT_EXECUTE) == 0)
2147 data &= ~(TLB_EXEC); 2153 data &= ~(TLB_EXEC);
2148 2154
2149 rv = pseg_set(pm, sva, data, 0); 2155 rv = pseg_set(pm, sva, data, 0);
@@ -2462,39 +2468,40 @@ ptelookup_va(vaddr_t va) @@ -2462,39 +2468,40 @@ ptelookup_va(vaddr_t va)
2462#define TSBBASEMASK (0xffffffffffffe000LL << tsbsize) 2468#define TSBBASEMASK (0xffffffffffffe000LL << tsbsize)
2463 2469
2464 tsbptr = (((va >> 9) & 0xfffffffffffffff0LL) & ~TSBBASEMASK); 2470 tsbptr = (((va >> 9) & 0xfffffffffffffff0LL) & ~TSBBASEMASK);
2465 return (tsbptr / sizeof(pte_t)); 2471 return (tsbptr / sizeof(pte_t));
2466} 2472}
2467 2473
2468/* 2474/*
2469 * Do whatever is needed to sync the MOD/REF flags 2475 * Do whatever is needed to sync the MOD/REF flags
2470 */ 2476 */
2471 2477
2472bool 2478bool
2473pmap_clear_modify(struct vm_page *pg) 2479pmap_clear_modify(struct vm_page *pg)
2474{ 2480{
 2481 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
2475 pv_entry_t pv; 2482 pv_entry_t pv;
2476 int rv; 2483 int rv;
2477 int changed = 0; 2484 int changed = 0;
2478#ifdef DEBUG 2485#ifdef DEBUG
2479 int modified = 0; 2486 int modified = 0;
2480 2487
2481 DPRINTF(PDB_CHANGEPROT|PDB_REF, ("pmap_clear_modify(%p)\n", pg)); 2488 DPRINTF(PDB_CHANGEPROT|PDB_REF, ("pmap_clear_modify(%p)\n", pg));
2482 2489
2483 modified = pmap_is_modified(pg); 2490 modified = pmap_is_modified(pg);
2484#endif 2491#endif
2485 mutex_enter(&pmap_lock); 2492 mutex_enter(&pmap_lock);
2486 /* Clear all mappings */ 2493 /* Clear all mappings */
2487 pv = &pg->mdpage.mdpg_pvh; 2494 pv = &md->mdpg_pvh;
2488#ifdef DEBUG 2495#ifdef DEBUG
2489 if (pv->pv_va & PV_MOD) 2496 if (pv->pv_va & PV_MOD)
2490 pv->pv_va |= PV_WE; /* Remember this was modified */ 2497 pv->pv_va |= PV_WE; /* Remember this was modified */
2491#endif 2498#endif
2492 if (pv->pv_va & PV_MOD) { 2499 if (pv->pv_va & PV_MOD) {
2493 changed |= 1; 2500 changed |= 1;
2494 pv->pv_va &= ~PV_MOD; 2501 pv->pv_va &= ~PV_MOD;
2495 } 2502 }
2496#ifdef DEBUG 2503#ifdef DEBUG
2497 if (pv->pv_next && !pv->pv_pmap) { 2504 if (pv->pv_next && !pv->pv_pmap) {
2498 printf("pmap_clear_modify: npv but no pmap for pv %p\n", pv); 2505 printf("pmap_clear_modify: npv but no pmap for pv %p\n", pv);
2499 Debugger(); 2506 Debugger();
2500 } 2507 }
@@ -2543,40 +2550,41 @@ pmap_clear_modify(struct vm_page *pg) @@ -2543,40 +2550,41 @@ pmap_clear_modify(struct vm_page *pg)
2543 (changed ? "was modified" : "was not modified"))); 2550 (changed ? "was modified" : "was not modified")));
2544 if (modified != changed) { 2551 if (modified != changed) {
2545 printf("pmap_clear_modify: modified %d changed %d\n", 2552 printf("pmap_clear_modify: modified %d changed %d\n",
2546 modified, changed); 2553 modified, changed);
2547 Debugger(); 2554 Debugger();
2548 } else return (modified); 2555 } else return (modified);
2549#endif 2556#endif
2550 return (changed); 2557 return (changed);
2551} 2558}
2552 2559
2553bool 2560bool
2554pmap_clear_reference(struct vm_page *pg) 2561pmap_clear_reference(struct vm_page *pg)
2555{ 2562{
 2563 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
2556 pv_entry_t pv; 2564 pv_entry_t pv;
2557 int rv; 2565 int rv;
2558 int changed = 0; 2566 int changed = 0;
2559#ifdef DEBUG 2567#ifdef DEBUG
2560 int referenced = 0; 2568 int referenced = 0;
2561#endif 2569#endif
2562 2570
2563 mutex_enter(&pmap_lock); 2571 mutex_enter(&pmap_lock);
2564#ifdef DEBUG 2572#ifdef DEBUG
2565 DPRINTF(PDB_CHANGEPROT|PDB_REF, ("pmap_clear_reference(%p)\n", pg)); 2573 DPRINTF(PDB_CHANGEPROT|PDB_REF, ("pmap_clear_reference(%p)\n", pg));
2566 referenced = pmap_is_referenced_locked(pg); 2574 referenced = pmap_is_referenced_locked(pg);
2567#endif 2575#endif
2568 /* Clear all references */ 2576 /* Clear all references */
2569 pv = &pg->mdpage.mdpg_pvh; 2577 pv = &md->mdpg_pvh;
2570 if (pv->pv_va & PV_REF) { 2578 if (pv->pv_va & PV_REF) {
2571 changed |= 1; 2579 changed |= 1;
2572 pv->pv_va &= ~PV_REF; 2580 pv->pv_va &= ~PV_REF;
2573 } 2581 }
2574#ifdef DEBUG 2582#ifdef DEBUG
2575 if (pv->pv_next && !pv->pv_pmap) { 2583 if (pv->pv_next && !pv->pv_pmap) {
2576 printf("pmap_clear_reference: npv but no pmap for pv %p\n", pv); 2584 printf("pmap_clear_reference: npv but no pmap for pv %p\n", pv);
2577 Debugger(); 2585 Debugger();
2578 } 2586 }
2579#endif 2587#endif
2580 if (pv->pv_pmap != NULL) { 2588 if (pv->pv_pmap != NULL) {
2581 for (; pv; pv = pv->pv_next) { 2589 for (; pv; pv = pv->pv_next) {
2582 int64_t data; 2590 int64_t data;
@@ -2609,56 +2617,57 @@ pmap_clear_reference(struct vm_page *pg) @@ -2609,56 +2617,57 @@ pmap_clear_reference(struct vm_page *pg)
2609 tsb_invalidate(va, pmap); 2617 tsb_invalidate(va, pmap);
2610 tlb_flush_pte(va, pmap); 2618 tlb_flush_pte(va, pmap);
2611 } 2619 }
2612 if (pv->pv_va & PV_REF) { 2620 if (pv->pv_va & PV_REF) {
2613 changed |= 1; 2621 changed |= 1;
2614 pv->pv_va &= ~PV_REF; 2622 pv->pv_va &= ~PV_REF;
2615 } 2623 }
2616 } 2624 }
2617 } 2625 }
2618 dcache_flush_page_all(VM_PAGE_TO_PHYS(pg)); 2626 dcache_flush_page_all(VM_PAGE_TO_PHYS(pg));
2619 pv_check(); 2627 pv_check();
2620#ifdef DEBUG 2628#ifdef DEBUG
2621 if (pmap_is_referenced_locked(pg)) { 2629 if (pmap_is_referenced_locked(pg)) {
2622 pv = &pg->mdpage.mdpg_pvh; 2630 pv = &md->mdpg_pvh;
2623 printf("pmap_clear_reference(): %p still referenced " 2631 printf("pmap_clear_reference(): %p still referenced "
2624 "(pmap = %p, ctx = %d)\n", pg, pv->pv_pmap, 2632 "(pmap = %p, ctx = %d)\n", pg, pv->pv_pmap,
2625 pv->pv_pmap ? pmap_ctx(pv->pv_pmap) : 0); 2633 pv->pv_pmap ? pmap_ctx(pv->pv_pmap) : 0);
2626 Debugger(); 2634 Debugger();
2627 } 2635 }
2628 DPRINTF(PDB_CHANGEPROT|PDB_REF, 2636 DPRINTF(PDB_CHANGEPROT|PDB_REF,
2629 ("pmap_clear_reference: pg %p %s\n", pg, 2637 ("pmap_clear_reference: pg %p %s\n", pg,
2630 (changed ? "was referenced" : "was not referenced"))); 2638 (changed ? "was referenced" : "was not referenced")));
2631 if (referenced != changed) { 2639 if (referenced != changed) {
2632 printf("pmap_clear_reference: referenced %d changed %d\n", 2640 printf("pmap_clear_reference: referenced %d changed %d\n",
2633 referenced, changed); 2641 referenced, changed);
2634 Debugger(); 2642 Debugger();
2635 } else { 2643 } else {
2636 mutex_exit(&pmap_lock); 2644 mutex_exit(&pmap_lock);
2637 return (referenced); 2645 return (referenced);
2638 } 2646 }
2639#endif 2647#endif
2640 mutex_exit(&pmap_lock); 2648 mutex_exit(&pmap_lock);
2641 return (changed); 2649 return (changed);
2642} 2650}
2643 2651
2644bool 2652bool
2645pmap_is_modified(struct vm_page *pg) 2653pmap_is_modified(struct vm_page *pg)
2646{ 2654{
 2655 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
2647 pv_entry_t pv, npv; 2656 pv_entry_t pv, npv;
2648 bool res = false; 2657 bool res = false;
2649 2658
2650 /* Check if any mapping has been modified */ 2659 /* Check if any mapping has been modified */
2651 pv = &pg->mdpage.mdpg_pvh; 2660 pv = &md->mdpg_pvh;
2652 if (pv->pv_va & PV_MOD) 2661 if (pv->pv_va & PV_MOD)
2653 res = true; 2662 res = true;
2654#ifdef HWREF 2663#ifdef HWREF
2655#ifdef DEBUG 2664#ifdef DEBUG
2656 if (pv->pv_next && !pv->pv_pmap) { 2665 if (pv->pv_next && !pv->pv_pmap) {
2657 printf("pmap_is_modified: npv but no pmap for pv %p\n", pv); 2666 printf("pmap_is_modified: npv but no pmap for pv %p\n", pv);
2658 Debugger(); 2667 Debugger();
2659 } 2668 }
2660#endif 2669#endif
2661 if (!res && pv->pv_pmap != NULL) { 2670 if (!res && pv->pv_pmap != NULL) {
2662 mutex_enter(&pmap_lock); 2671 mutex_enter(&pmap_lock);
2663 for (npv = pv; !res && npv && npv->pv_pmap; 2672 for (npv = pv; !res && npv && npv->pv_pmap;
2664 npv = npv->pv_next) { 2673 npv = npv->pv_next) {
@@ -2688,33 +2697,34 @@ pmap_is_modified(struct vm_page *pg) @@ -2688,33 +2697,34 @@ pmap_is_modified(struct vm_page *pg)
2688 2697
2689 DPRINTF(PDB_CHANGEPROT|PDB_REF, ("pmap_is_modified(%p) = %d\n", pg, 2698 DPRINTF(PDB_CHANGEPROT|PDB_REF, ("pmap_is_modified(%p) = %d\n", pg,
2690 res)); 2699 res));
2691 pv_check(); 2700 pv_check();
2692 return res; 2701 return res;
2693} 2702}
2694 2703
2695/* 2704/*
2696 * Variant of pmap_is_reference() where caller already holds pmap_lock 2705 * Variant of pmap_is_reference() where caller already holds pmap_lock
2697 */ 2706 */
2698static bool 2707static bool
2699pmap_is_referenced_locked(struct vm_page *pg) 2708pmap_is_referenced_locked(struct vm_page *pg)
2700{ 2709{
 2710 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
2701 pv_entry_t pv, npv; 2711 pv_entry_t pv, npv;
2702 bool res = false; 2712 bool res = false;
2703 2713
2704 KASSERT(mutex_owned(&pmap_lock)); 2714 KASSERT(mutex_owned(&pmap_lock));
2705 2715
2706 /* Check if any mapping has been referenced */ 2716 /* Check if any mapping has been referenced */
2707 pv = &pg->mdpage.mdpg_pvh; 2717 pv = &md->mdpg_pvh;
2708 if (pv->pv_va & PV_REF) 2718 if (pv->pv_va & PV_REF)
2709 return true; 2719 return true;
2710 2720
2711#ifdef HWREF 2721#ifdef HWREF
2712#ifdef DEBUG 2722#ifdef DEBUG
2713 if (pv->pv_next && !pv->pv_pmap) { 2723 if (pv->pv_next && !pv->pv_pmap) {
2714 printf("pmap_is_referenced: npv but no pmap for pv %p\n", pv); 2724 printf("pmap_is_referenced: npv but no pmap for pv %p\n", pv);
2715 Debugger(); 2725 Debugger();
2716 } 2726 }
2717#endif 2727#endif
2718 if (pv->pv_pmap == NULL) 2728 if (pv->pv_pmap == NULL)
2719 return false; 2729 return false;
2720 2730
@@ -2736,31 +2746,32 @@ pmap_is_referenced_locked(struct vm_page @@ -2736,31 +2746,32 @@ pmap_is_referenced_locked(struct vm_page
2736 if (res) 2746 if (res)
2737 pv->pv_va |= PV_REF; 2747 pv->pv_va |= PV_REF;
2738#endif 2748#endif
2739 2749
2740 DPRINTF(PDB_CHANGEPROT|PDB_REF, 2750 DPRINTF(PDB_CHANGEPROT|PDB_REF,
2741 ("pmap_is_referenced(%p) = %d\n", pg, res)); 2751 ("pmap_is_referenced(%p) = %d\n", pg, res));
2742 pv_check(); 2752 pv_check();
2743 return res; 2753 return res;
2744} 2754}
2745 2755
2746bool 2756bool
2747pmap_is_referenced(struct vm_page *pg) 2757pmap_is_referenced(struct vm_page *pg)
2748{ 2758{
 2759 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
2749 pv_entry_t pv; 2760 pv_entry_t pv;
2750 bool res = false; 2761 bool res = false;
2751 2762
2752 /* Check if any mapping has been referenced */ 2763 /* Check if any mapping has been referenced */
2753 pv = &pg->mdpage.mdpg_pvh; 2764 pv = &md->mdpg_pvh;
2754 if (pv->pv_va & PV_REF) 2765 if (pv->pv_va & PV_REF)
2755 return true; 2766 return true;
2756 2767
2757#ifdef HWREF 2768#ifdef HWREF
2758#ifdef DEBUG 2769#ifdef DEBUG
2759 if (pv->pv_next && !pv->pv_pmap) { 2770 if (pv->pv_next && !pv->pv_pmap) {
2760 printf("pmap_is_referenced: npv but no pmap for pv %p\n", pv); 2771 printf("pmap_is_referenced: npv but no pmap for pv %p\n", pv);
2761 Debugger(); 2772 Debugger();
2762 } 2773 }
2763#endif 2774#endif
2764 if (pv->pv_pmap != NULL) { 2775 if (pv->pv_pmap != NULL) {
2765 mutex_enter(&pmap_lock); 2776 mutex_enter(&pmap_lock);
2766 res = pmap_is_referenced_locked(pg); 2777 res = pmap_is_referenced_locked(pg);
@@ -2812,39 +2823,40 @@ pmap_unwire(pmap_t pmap, vaddr_t va) @@ -2812,39 +2823,40 @@ pmap_unwire(pmap_t pmap, vaddr_t va)
2812 pv_check(); 2823 pv_check();
2813 mutex_exit(&pmap_lock); 2824 mutex_exit(&pmap_lock);
2814} 2825}
2815 2826
2816/* 2827/*
2817 * Lower the protection on the specified physical page. 2828 * Lower the protection on the specified physical page.
2818 * 2829 *
2819 * Never enable writing as it will break COW 2830 * Never enable writing as it will break COW
2820 */ 2831 */
2821 2832
2822void 2833void
2823pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 2834pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
2824{ 2835{
 2836 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
2825 int64_t clear, set; 2837 int64_t clear, set;
2826 int64_t data = 0; 2838 int64_t data = 0;
2827 int rv; 2839 int rv;
2828 pv_entry_t pv, npv, freepv = NULL; 2840 pv_entry_t pv, npv, freepv = NULL;
2829 struct pmap *pmap; 2841 struct pmap *pmap;
2830 vaddr_t va; 2842 vaddr_t va;
2831 bool needflush = FALSE; 2843 bool needflush = FALSE;
2832 2844
2833 DPRINTF(PDB_CHANGEPROT, 2845 DPRINTF(PDB_CHANGEPROT,
2834 ("pmap_page_protect: pg %p prot %x\n", pg, prot)); 2846 ("pmap_page_protect: pg %p prot %x\n", pg, prot));
2835 2847
2836 mutex_enter(&pmap_lock); 2848 mutex_enter(&pmap_lock);
2837 pv = &pg->mdpage.mdpg_pvh; 2849 pv = &md->mdpg_pvh;
2838 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) { 2850 if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
2839 /* copy_on_write */ 2851 /* copy_on_write */
2840 2852
2841 set = TLB_V; 2853 set = TLB_V;
2842 clear = TLB_REAL_W|TLB_W; 2854 clear = TLB_REAL_W|TLB_W;
2843 if (VM_PROT_EXECUTE & prot) 2855 if (VM_PROT_EXECUTE & prot)
2844 set |= TLB_EXEC; 2856 set |= TLB_EXEC;
2845 else 2857 else
2846 clear |= TLB_EXEC; 2858 clear |= TLB_EXEC;
2847 if (VM_PROT_EXECUTE == prot) 2859 if (VM_PROT_EXECUTE == prot)
2848 set |= TLB_EXEC_ONLY; 2860 set |= TLB_EXEC_ONLY;
2849 2861
2850#ifdef DEBUG 2862#ifdef DEBUG
@@ -3182,31 +3194,32 @@ ctx_free(struct pmap *pm, struct cpu_inf @@ -3182,31 +3194,32 @@ ctx_free(struct pmap *pm, struct cpu_inf
3182} 3194}
3183 3195
3184/* 3196/*
3185 * Enter the pmap and virtual address into the 3197 * Enter the pmap and virtual address into the
3186 * physical to virtual map table. 3198 * physical to virtual map table.
3187 * 3199 *
3188 * We enter here with the pmap locked. 3200 * We enter here with the pmap locked.
3189 */ 3201 */
3190 3202
3191void 3203void
3192pmap_enter_pv(struct pmap *pmap, vaddr_t va, paddr_t pa, struct vm_page *pg, 3204pmap_enter_pv(struct pmap *pmap, vaddr_t va, paddr_t pa, struct vm_page *pg,
3193 pv_entry_t npv) 3205 pv_entry_t npv)
3194{ 3206{
 3207 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
3195 pv_entry_t pvh; 3208 pv_entry_t pvh;
3196 3209
3197 KASSERT(mutex_owned(&pmap_lock)); 3210 KASSERT(mutex_owned(&pmap_lock));
3198 3211
3199 pvh = &pg->mdpage.mdpg_pvh; 3212 pvh = &md->mdpg_pvh;
3200 DPRINTF(PDB_ENTER, ("pmap_enter: pvh %p: was %lx/%p/%p\n", 3213 DPRINTF(PDB_ENTER, ("pmap_enter: pvh %p: was %lx/%p/%p\n",
3201 pvh, pvh->pv_va, pvh->pv_pmap, pvh->pv_next)); 3214 pvh, pvh->pv_va, pvh->pv_pmap, pvh->pv_next));
3202 if (pvh->pv_pmap == NULL) { 3215 if (pvh->pv_pmap == NULL) {
3203 3216
3204 /* 3217 /*
3205 * No entries yet, use header as the first entry 3218 * No entries yet, use header as the first entry
3206 */ 3219 */
3207 DPRINTF(PDB_ENTER, ("pmap_enter: first pv: pmap %p va %lx\n", 3220 DPRINTF(PDB_ENTER, ("pmap_enter: first pv: pmap %p va %lx\n",
3208 pmap, va)); 3221 pmap, va));
3209 ENTER_STAT(firstpv); 3222 ENTER_STAT(firstpv);
3210 PV_SETVA(pvh, va); 3223 PV_SETVA(pvh, va);
3211 pvh->pv_pmap = pmap; 3224 pvh->pv_pmap = pmap;
3212 pvh->pv_next = NULL; 3225 pvh->pv_next = NULL;
@@ -3243,32 +3256,33 @@ pmap_enter_pv(struct pmap *pmap, vaddr_t @@ -3243,32 +3256,33 @@ pmap_enter_pv(struct pmap *pmap, vaddr_t
3243 if (!npv->pv_next) { 3256 if (!npv->pv_next) {
3244 ENTER_STAT(secondpv); 3257 ENTER_STAT(secondpv);
3245 } 3258 }
3246 } 3259 }
3247} 3260}
3248 3261
3249/* 3262/*
3250 * Remove a physical to virtual address translation. 3263 * Remove a physical to virtual address translation.
3251 */ 3264 */
3252 3265
3253pv_entry_t 3266pv_entry_t
3254pmap_remove_pv(struct pmap *pmap, vaddr_t va, struct vm_page *pg) 3267pmap_remove_pv(struct pmap *pmap, vaddr_t va, struct vm_page *pg)
3255{ 3268{
 3269 struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
3256 pv_entry_t pvh, npv, pv; 3270 pv_entry_t pvh, npv, pv;
3257 int64_t data = 0; 3271 int64_t data = 0;
3258 3272
3259 KASSERT(mutex_owned(&pmap_lock)); 3273 KASSERT(mutex_owned(&pmap_lock));
3260 3274
3261 pvh = &pg->mdpage.mdpg_pvh; 3275 pvh = &md->mdpg_pvh;
3262 3276
3263 DPRINTF(PDB_REMOVE, ("pmap_remove_pv(pm=%p, va=%p, pg=%p)\n", pmap, 3277 DPRINTF(PDB_REMOVE, ("pmap_remove_pv(pm=%p, va=%p, pg=%p)\n", pmap,
3264 (void *)(u_long)va, pg)); 3278 (void *)(u_long)va, pg));
3265 pv_check(); 3279 pv_check();
3266 3280
3267 /* 3281 /*
3268 * Remove page from the PV table. 3282 * Remove page from the PV table.
3269 * If it is the first entry on the list, it is actually 3283 * If it is the first entry on the list, it is actually
3270 * in the header and we must copy the following entry up 3284 * in the header and we must copy the following entry up
3271 * to the header. Otherwise we must search the list for 3285 * to the header. Otherwise we must search the list for
3272 * the entry. In either case we free the now unused entry. 3286 * the entry. In either case we free the now unused entry.
3273 */ 3287 */
3274 if (pmap == pvh->pv_pmap && PV_MATCH(pvh, va)) { 3288 if (pmap == pvh->pv_pmap && PV_MATCH(pvh, va)) {
@@ -3319,36 +3333,38 @@ pmap_remove_pv(struct pmap *pmap, vaddr_ @@ -3319,36 +3333,38 @@ pmap_remove_pv(struct pmap *pmap, vaddr_
3319 pv_check(); 3333 pv_check();
3320 return npv; 3334 return npv;
3321} 3335}
3322 3336
3323/* 3337/*
3324 * pmap_page_cache: 3338 * pmap_page_cache:
3325 * 3339 *
3326 * Change all mappings of a page to cached/uncached. 3340 * Change all mappings of a page to cached/uncached.
3327 */ 3341 */
3328void 3342void
3329pmap_page_cache(struct pmap *pm, paddr_t pa, int mode) 3343pmap_page_cache(struct pmap *pm, paddr_t pa, int mode)
3330{ 3344{
3331 struct vm_page *pg; 3345 struct vm_page *pg;
 3346 struct vm_page_md *md;
3332 pv_entry_t pv; 3347 pv_entry_t pv;
3333 vaddr_t va; 3348 vaddr_t va;
3334 int rv; 3349 int rv;
3335 3350
3336 KASSERT(mutex_owned(&pmap_lock)); 3351 KASSERT(mutex_owned(&pmap_lock));
3337 3352
3338 DPRINTF(PDB_ENTER, ("pmap_page_uncache(%llx)\n", 3353 DPRINTF(PDB_ENTER, ("pmap_page_uncache(%llx)\n",
3339 (unsigned long long)pa)); 3354 (unsigned long long)pa));
3340 pg = PHYS_TO_VM_PAGE(pa); 3355 pg = PHYS_TO_VM_PAGE(pa);
3341 pv = &pg->mdpage.mdpg_pvh; 3356 md = VM_PAGE_TO_MD(pg);
 3357 pv = &md->mdpg_pvh;
3342 while (pv) { 3358 while (pv) {
3343 va = pv->pv_va & PV_VAMASK; 3359 va = pv->pv_va & PV_VAMASK;
3344 if (pv->pv_va & PV_NC) { 3360 if (pv->pv_va & PV_NC) {
3345 int64_t data; 3361 int64_t data;
3346 3362
3347 /* Non-cached -- I/O mapping */ 3363 /* Non-cached -- I/O mapping */
3348 data = pseg_get(pv->pv_pmap, va); 3364 data = pseg_get(pv->pv_pmap, va);
3349 KASSERT(data & TLB_V); 3365 KASSERT(data & TLB_V);
3350 rv = pseg_set(pv->pv_pmap, va, 3366 rv = pseg_set(pv->pv_pmap, va,
3351 data & ~(TLB_CV|TLB_CP), 0); 3367 data & ~(TLB_CV|TLB_CP), 0);
3352 if (rv & 1) 3368 if (rv & 1)
3353 panic("pmap_page_cache: pseg_set needs" 3369 panic("pmap_page_cache: pseg_set needs"
3354 " spare! rv=%d\n", rv); 3370 " spare! rv=%d\n", rv);
@@ -3421,39 +3437,41 @@ pmap_free_page_noflush(paddr_t pa) @@ -3421,39 +3437,41 @@ pmap_free_page_noflush(paddr_t pa)
3421{ 3437{
3422 struct vm_page *pg = PHYS_TO_VM_PAGE(pa); 3438 struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
3423 3439
3424 uvm_pagefree(pg); 3440 uvm_pagefree(pg);
3425} 3441}
3426 3442
3427#ifdef DDB 3443#ifdef DDB
3428 3444
3429void db_dump_pv(db_expr_t, int, db_expr_t, const char *); 3445void db_dump_pv(db_expr_t, int, db_expr_t, const char *);
3430void 3446void
3431db_dump_pv(db_expr_t addr, int have_addr, db_expr_t count, const char *modif) 3447db_dump_pv(db_expr_t addr, int have_addr, db_expr_t count, const char *modif)
3432{ 3448{
3433 struct vm_page *pg; 3449 struct vm_page *pg;
 3450 struct vm_page_md *md;
3434 struct pv_entry *pv; 3451 struct pv_entry *pv;
3435 3452
3436 if (!have_addr) { 3453 if (!have_addr) {
3437 db_printf("Need addr for pv\n"); 3454 db_printf("Need addr for pv\n");
3438 return; 3455 return;
3439 } 3456 }
3440 3457
3441 pg = PHYS_TO_VM_PAGE((paddr_t)addr); 3458 pg = PHYS_TO_VM_PAGE((paddr_t)addr);
3442 if (pg == NULL) { 3459 if (pg == NULL) {
3443 db_printf("page is not managed\n"); 3460 db_printf("page is not managed\n");
3444 return; 3461 return;
3445 } 3462 }
3446 for (pv = &pg->mdpage.mdpg_pvh; pv; pv = pv->pv_next) 3463 md = VM_PAGE_TO_MD(pg);
 3464 for (pv = &md->mdpg_pvh; pv; pv = pv->pv_next)
3447 db_printf("pv@%p: next=%p pmap=%p va=0x%llx\n", 3465 db_printf("pv@%p: next=%p pmap=%p va=0x%llx\n",
3448 pv, pv->pv_next, pv->pv_pmap, 3466 pv, pv->pv_next, pv->pv_pmap,
3449 (unsigned long long)pv->pv_va); 3467 (unsigned long long)pv->pv_va);
3450} 3468}
3451 3469
3452#endif 3470#endif
3453 3471
3454#ifdef DEBUG 3472#ifdef DEBUG
3455/* 3473/*
3456 * Test ref/modify handling. */ 3474 * Test ref/modify handling. */
3457void pmap_testout(void); 3475void pmap_testout(void);
3458void 3476void
3459pmap_testout(void) 3477pmap_testout(void)