Sun Jun 27 12:26:33 2021 UTC ()
PR 55325: unify both pvo pools (for managed and unmanaged pages).
Analyzis by rin, fix suggested by chs.


(martin)
diff -r1.105 -r1.106 src/sys/arch/powerpc/oea/pmap.c

cvs diff -r1.105 -r1.106 src/sys/arch/powerpc/oea/pmap.c (expand / switch to unified diff)

--- src/sys/arch/powerpc/oea/pmap.c 2021/03/12 18:10:00 1.105
+++ src/sys/arch/powerpc/oea/pmap.c 2021/06/27 12:26:33 1.106
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.105 2021/03/12 18:10:00 thorpej Exp $ */ 1/* $NetBSD: pmap.c,v 1.106 2021/06/27 12:26:33 martin Exp $ */
2/*- 2/*-
3 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * Copyright (c) 2001 The NetBSD Foundation, Inc.
4 * All rights reserved. 4 * All rights reserved.
5 * 5 *
6 * This code is derived from software contributed to The NetBSD Foundation 6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
8 * 8 *
9 * Support for PPC64 Bridge mode added by Sanjay Lal <sanjayl@kymasys.com> 9 * Support for PPC64 Bridge mode added by Sanjay Lal <sanjayl@kymasys.com>
10 * of Kyma Systems LLC. 10 * of Kyma Systems LLC.
11 * 11 *
12 * Redistribution and use in source and binary forms, with or without 12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions 13 * modification, are permitted provided that the following conditions
14 * are met: 14 * are met:
@@ -53,27 +53,27 @@ @@ -53,27 +53,27 @@
53 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 53 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
54 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 54 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
55 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 55 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
56 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
58 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 58 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
59 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 59 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
60 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 60 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
61 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 61 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
62 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 */ 63 */
64 64
65#include <sys/cdefs.h> 65#include <sys/cdefs.h>
66__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.105 2021/03/12 18:10:00 thorpej Exp $"); 66__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.106 2021/06/27 12:26:33 martin Exp $");
67 67
68#define PMAP_NOOPNAMES 68#define PMAP_NOOPNAMES
69 69
70#ifdef _KERNEL_OPT 70#ifdef _KERNEL_OPT
71#include "opt_altivec.h" 71#include "opt_altivec.h"
72#include "opt_multiprocessor.h" 72#include "opt_multiprocessor.h"
73#include "opt_pmap.h" 73#include "opt_pmap.h"
74#include "opt_ppcarch.h" 74#include "opt_ppcarch.h"
75#endif 75#endif
76 76
77#include <sys/param.h> 77#include <sys/param.h>
78#include <sys/proc.h> 78#include <sys/proc.h>
79#include <sys/pool.h> 79#include <sys/pool.h>
@@ -162,28 +162,27 @@ static u_int mem_cnt, avail_cnt; @@ -162,28 +162,27 @@ static u_int mem_cnt, avail_cnt;
162#define pmap_protect PMAPNAME(protect) 162#define pmap_protect PMAPNAME(protect)
163#define pmap_unwire PMAPNAME(unwire) 163#define pmap_unwire PMAPNAME(unwire)
164#define pmap_page_protect PMAPNAME(page_protect) 164#define pmap_page_protect PMAPNAME(page_protect)
165#define pmap_query_bit PMAPNAME(query_bit) 165#define pmap_query_bit PMAPNAME(query_bit)
166#define pmap_clear_bit PMAPNAME(clear_bit) 166#define pmap_clear_bit PMAPNAME(clear_bit)
167 167
168#define pmap_activate PMAPNAME(activate) 168#define pmap_activate PMAPNAME(activate)
169#define pmap_deactivate PMAPNAME(deactivate) 169#define pmap_deactivate PMAPNAME(deactivate)
170 170
171#define pmap_pinit PMAPNAME(pinit) 171#define pmap_pinit PMAPNAME(pinit)
172#define pmap_procwr PMAPNAME(procwr) 172#define pmap_procwr PMAPNAME(procwr)
173 173
174#define pmap_pool PMAPNAME(pool) 174#define pmap_pool PMAPNAME(pool)
175#define pmap_upvo_pool PMAPNAME(upvo_pool) 175#define pmap_pvo_pool PMAPNAME(pvo_pool)
176#define pmap_mpvo_pool PMAPNAME(mpvo_pool) 
177#define pmap_pvo_table PMAPNAME(pvo_table) 176#define pmap_pvo_table PMAPNAME(pvo_table)
178#if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 177#if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
179#define pmap_pte_print PMAPNAME(pte_print) 178#define pmap_pte_print PMAPNAME(pte_print)
180#define pmap_pteg_check PMAPNAME(pteg_check) 179#define pmap_pteg_check PMAPNAME(pteg_check)
181#define pmap_print_mmruregs PMAPNAME(print_mmuregs) 180#define pmap_print_mmruregs PMAPNAME(print_mmuregs)
182#define pmap_print_pte PMAPNAME(print_pte) 181#define pmap_print_pte PMAPNAME(print_pte)
183#define pmap_pteg_dist PMAPNAME(pteg_dist) 182#define pmap_pteg_dist PMAPNAME(pteg_dist)
184#endif 183#endif
185#if defined(DEBUG) || defined(PMAPCHECK) 184#if defined(DEBUG) || defined(PMAPCHECK)
186#define pmap_pvo_verify PMAPNAME(pvo_verify) 185#define pmap_pvo_verify PMAPNAME(pvo_verify)
187#define pmapcheck PMAPNAME(check) 186#define pmapcheck PMAPNAME(check)
188#endif 187#endif
189#if defined(DEBUG) || defined(PMAPDEBUG) 188#if defined(DEBUG) || defined(PMAPDEBUG)
@@ -323,59 +322,46 @@ struct pvo_entry { @@ -323,59 +322,46 @@ struct pvo_entry {
323 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 322 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
324#define PVO_PTEGIDX_SET(pvo,i) \ 323#define PVO_PTEGIDX_SET(pvo,i) \
325 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 324 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
326#define PVO_WHERE(pvo,w) \ 325#define PVO_WHERE(pvo,w) \
327 ((pvo)->pvo_vaddr &= ~(PVO_WHERE_MASK << PVO_WHERE_SHFT), \ 326 ((pvo)->pvo_vaddr &= ~(PVO_WHERE_MASK << PVO_WHERE_SHFT), \
328 (pvo)->pvo_vaddr |= ((PVO_ ## w) << PVO_WHERE_SHFT)) 327 (pvo)->pvo_vaddr |= ((PVO_ ## w) << PVO_WHERE_SHFT))
329 328
330TAILQ_HEAD(pvo_tqhead, pvo_entry); 329TAILQ_HEAD(pvo_tqhead, pvo_entry);
331struct pvo_tqhead *pmap_pvo_table; /* pvo entries by ptegroup index */ 330struct pvo_tqhead *pmap_pvo_table; /* pvo entries by ptegroup index */
332static struct pvo_head pmap_pvo_kunmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */ 331static struct pvo_head pmap_pvo_kunmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */
333static struct pvo_head pmap_pvo_unmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */ 332static struct pvo_head pmap_pvo_unmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */
334 333
335struct pool pmap_pool; /* pool for pmap structures */ 334struct pool pmap_pool; /* pool for pmap structures */
336struct pool pmap_upvo_pool; /* pool for pvo entries for unmanaged pages */ 335struct pool pmap_pvo_pool; /* pool for pvo entries */
337struct pool pmap_mpvo_pool; /* pool for pvo entries for managed pages */ 
338 336
339/* 337/*
340 * We keep a cache of unmanaged pages to be used for pvo entries for 338 * We keep a cache of unmanaged pages to be used for pvo entries for
341 * unmanaged pages. 339 * unmanaged pages.
342 */ 340 */
343struct pvo_page { 341struct pvo_page {
344 SIMPLEQ_ENTRY(pvo_page) pvop_link; 342 SIMPLEQ_ENTRY(pvo_page) pvop_link;
345}; 343};
346SIMPLEQ_HEAD(pvop_head, pvo_page); 344SIMPLEQ_HEAD(pvop_head, pvo_page);
347static struct pvop_head pmap_upvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_upvop_head); 345static struct pvop_head pmap_pvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_pvop_head);
348static struct pvop_head pmap_mpvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_mpvop_head); 346static u_long pmap_pvop_free;
349static u_long pmap_upvop_free; 347static u_long pmap_pvop_maxfree;
350static u_long pmap_upvop_maxfree; 348
351static u_long pmap_mpvop_free; 349static void *pmap_pool_alloc(struct pool *, int);
352static u_long pmap_mpvop_maxfree; 350static void pmap_pool_free(struct pool *, void *);
353 351
354static void *pmap_pool_ualloc(struct pool *, int); 352static struct pool_allocator pmap_pool_allocator = {
355static void *pmap_pool_malloc(struct pool *, int); 353 .pa_alloc = pmap_pool_alloc,
356 354 .pa_free = pmap_pool_free,
357static void pmap_pool_ufree(struct pool *, void *); 
358static void pmap_pool_mfree(struct pool *, void *); 
359 
360static struct pool_allocator pmap_pool_mallocator = { 
361 .pa_alloc = pmap_pool_malloc, 
362 .pa_free = pmap_pool_mfree, 
363 .pa_pagesz = 0, 
364}; 
365 
366static struct pool_allocator pmap_pool_uallocator = { 
367 .pa_alloc = pmap_pool_ualloc, 
368 .pa_free = pmap_pool_ufree, 
369 .pa_pagesz = 0, 355 .pa_pagesz = 0,
370}; 356};
371 357
372#if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB) 358#if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
373void pmap_pte_print(volatile struct pte *); 359void pmap_pte_print(volatile struct pte *);
374void pmap_pteg_check(void); 360void pmap_pteg_check(void);
375void pmap_pteg_dist(void); 361void pmap_pteg_dist(void);
376void pmap_print_pte(pmap_t, vaddr_t); 362void pmap_print_pte(pmap_t, vaddr_t);
377void pmap_print_mmuregs(void); 363void pmap_print_mmuregs(void);
378#endif 364#endif
379 365
380#if defined(DEBUG) || defined(PMAPCHECK) 366#if defined(DEBUG) || defined(PMAPCHECK)
381#ifdef PMAPCHECK 367#ifdef PMAPCHECK
@@ -1091,34 +1077,28 @@ pmap_real_memory(paddr_t *start, psize_t @@ -1091,34 +1077,28 @@ pmap_real_memory(paddr_t *start, psize_t
1091 return; 1077 return;
1092 } 1078 }
1093 } 1079 }
1094 *size = 0; 1080 *size = 0;
1095} 1081}
1096 1082
1097/* 1083/*
1098 * Initialize anything else for pmap handling. 1084 * Initialize anything else for pmap handling.
1099 * Called during vm_init(). 1085 * Called during vm_init().
1100 */ 1086 */
1101void 1087void
1102pmap_init(void) 1088pmap_init(void)
1103{ 1089{
1104 pool_init(&pmap_mpvo_pool, sizeof(struct pvo_entry), 
1105 sizeof(struct pvo_entry), 0, 0, "pmap_mpvopl", 
1106 &pmap_pool_mallocator, IPL_NONE); 
1107 
1108 pool_setlowat(&pmap_mpvo_pool, 1008); 
1109 1090
1110 pmap_initialized = 1; 1091 pmap_initialized = 1;
1111 
1112} 1092}
1113 1093
1114/* 1094/*
1115 * How much virtual space does the kernel get? 1095 * How much virtual space does the kernel get?
1116 */ 1096 */
1117void 1097void
1118pmap_virtual_space(vaddr_t *start, vaddr_t *end) 1098pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1119{ 1099{
1120 /* 1100 /*
1121 * For now, reserve one segment (minus some overhead) for kernel 1101 * For now, reserve one segment (minus some overhead) for kernel
1122 * virtual memory 1102 * virtual memory
1123 */ 1103 */
1124 *start = VM_MIN_KERNEL_ADDRESS; 1104 *start = VM_MIN_KERNEL_ADDRESS;
@@ -1522,33 +1502,26 @@ pmap_pvo_reclaim(struct pmap *pm) @@ -1522,33 +1502,26 @@ pmap_pvo_reclaim(struct pmap *pm)
1522 pvoh = &pmap_pvo_table[idx]; 1502 pvoh = &pmap_pvo_table[idx];
1523 TAILQ_FOREACH(pvo, pvoh, pvo_olink) { 1503 TAILQ_FOREACH(pvo, pvoh, pvo_olink) {
1524 if (!PVO_WIRED_P(pvo)) { 1504 if (!PVO_WIRED_P(pvo)) {
1525 pmap_pvo_remove(pvo, -1, NULL); 1505 pmap_pvo_remove(pvo, -1, NULL);
1526 pmap_pvo_reclaim_nextidx = idx; 1506 pmap_pvo_reclaim_nextidx = idx;
1527 PMAPCOUNT(pvos_reclaimed); 1507 PMAPCOUNT(pvos_reclaimed);
1528 return pvo; 1508 return pvo;
1529 } 1509 }
1530 } 1510 }
1531 } 1511 }
1532 return NULL; 1512 return NULL;
1533} 1513}
1534 1514
1535static struct pool * 
1536pmap_pvo_pl(struct pvo_entry *pvo) 
1537{ 
1538 
1539 return PVO_MANAGED_P(pvo) ? &pmap_mpvo_pool : &pmap_upvo_pool; 
1540} 
1541 
1542/* 1515/*
1543 * This returns whether this is the first mapping of a page. 1516 * This returns whether this is the first mapping of a page.
1544 */ 1517 */
1545int 1518int
1546pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head, 1519pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
1547 vaddr_t va, paddr_t pa, register_t pte_lo, int flags) 1520 vaddr_t va, paddr_t pa, register_t pte_lo, int flags)
1548{ 1521{
1549 struct pvo_entry *pvo; 1522 struct pvo_entry *pvo;
1550 struct pvo_tqhead *pvoh; 1523 struct pvo_tqhead *pvoh;
1551 register_t msr; 1524 register_t msr;
1552 int ptegidx; 1525 int ptegidx;
1553 int i; 1526 int i;
1554 int poolflags = PR_NOWAIT; 1527 int poolflags = PR_NOWAIT;
@@ -1593,29 +1566,27 @@ pmap_pvo_enter(pmap_t pm, struct pool *p @@ -1593,29 +1566,27 @@ pmap_pvo_enter(pmap_t pm, struct pool *p
1593 PMAPCOUNT(mappings_replaced); 1566 PMAPCOUNT(mappings_replaced);
1594 pmap_pvo_remove(pvo, -1, NULL); 1567 pmap_pvo_remove(pvo, -1, NULL);
1595 break; 1568 break;
1596 } 1569 }
1597 } 1570 }
1598 1571
1599 /* 1572 /*
1600 * If we aren't overwriting an mapping, try to allocate 1573 * If we aren't overwriting an mapping, try to allocate
1601 */ 1574 */
1602#if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1575#if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1603 --pmap_pvo_enter_depth; 1576 --pmap_pvo_enter_depth;
1604#endif 1577#endif
1605 pmap_interrupts_restore(msr); 1578 pmap_interrupts_restore(msr);
1606 if (pvo) { 1579 if (pvo == NULL) {
1607 KASSERT(pmap_pvo_pl(pvo) == pl); 
1608 } else { 
1609 pvo = pool_get(pl, poolflags); 1580 pvo = pool_get(pl, poolflags);
1610 } 1581 }
1611 KASSERT((vaddr_t)pvo < VM_MIN_KERNEL_ADDRESS); 1582 KASSERT((vaddr_t)pvo < VM_MIN_KERNEL_ADDRESS);
1612 1583
1613#ifdef DEBUG 1584#ifdef DEBUG
1614 /* 1585 /*
1615 * Exercise pmap_pvo_reclaim() a little. 1586 * Exercise pmap_pvo_reclaim() a little.
1616 */ 1587 */
1617 if (pvo && (flags & PMAP_CANFAIL) != 0 && 1588 if (pvo && (flags & PMAP_CANFAIL) != 0 &&
1618 pmap_pvo_reclaim_debugctr++ > 0x1000 && 1589 pmap_pvo_reclaim_debugctr++ > 0x1000 &&
1619 (pmap_pvo_reclaim_debugctr & 0xff) == 0) { 1590 (pmap_pvo_reclaim_debugctr & 0xff) == 0) {
1620 pool_put(pl, pvo); 1591 pool_put(pl, pvo);
1621 pvo = NULL; 1592 pvo = NULL;
@@ -1801,27 +1772,27 @@ pmap_pvo_remove(struct pvo_entry *pvo, i @@ -1801,27 +1772,27 @@ pmap_pvo_remove(struct pvo_entry *pvo, i
1801 TAILQ_REMOVE(&pmap_pvo_table[ptegidx], pvo, pvo_olink); 1772 TAILQ_REMOVE(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
1802 if (pvol) { 1773 if (pvol) {
1803 LIST_INSERT_HEAD(pvol, pvo, pvo_vlink); 1774 LIST_INSERT_HEAD(pvol, pvo, pvo_vlink);
1804 } 1775 }
1805#if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK) 1776#if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1806 pmap_pvo_remove_depth--; 1777 pmap_pvo_remove_depth--;
1807#endif 1778#endif
1808} 1779}
1809 1780
1810void 1781void
1811pmap_pvo_free(struct pvo_entry *pvo) 1782pmap_pvo_free(struct pvo_entry *pvo)
1812{ 1783{
1813 1784
1814 pool_put(pmap_pvo_pl(pvo), pvo); 1785 pool_put(&pmap_pvo_pool, pvo);
1815} 1786}
1816 1787
1817void 1788void
1818pmap_pvo_free_list(struct pvo_head *pvol) 1789pmap_pvo_free_list(struct pvo_head *pvol)
1819{ 1790{
1820 struct pvo_entry *pvo, *npvo; 1791 struct pvo_entry *pvo, *npvo;
1821 1792
1822 for (pvo = LIST_FIRST(pvol); pvo != NULL; pvo = npvo) { 1793 for (pvo = LIST_FIRST(pvol); pvo != NULL; pvo = npvo) {
1823 npvo = LIST_NEXT(pvo, pvo_vlink); 1794 npvo = LIST_NEXT(pvo, pvo_vlink);
1824 LIST_REMOVE(pvo, pvo_vlink); 1795 LIST_REMOVE(pvo, pvo_vlink);
1825 pmap_pvo_free(pvo); 1796 pmap_pvo_free(pvo);
1826 } 1797 }
1827} 1798}
@@ -1873,41 +1844,38 @@ pvo_clear_exec(struct pvo_entry *pvo) @@ -1873,41 +1844,38 @@ pvo_clear_exec(struct pvo_entry *pvo)
1873 } 1844 }
1874#endif 1845#endif
1875} 1846}
1876 1847
1877/* 1848/*
1878 * Insert physical page at pa into the given pmap at virtual address va. 1849 * Insert physical page at pa into the given pmap at virtual address va.
1879 */ 1850 */
1880int 1851int
1881pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) 1852pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1882{ 1853{
1883 struct mem_region *mp; 1854 struct mem_region *mp;
1884 struct pvo_head *pvo_head; 1855 struct pvo_head *pvo_head;
1885 struct vm_page *pg; 1856 struct vm_page *pg;
1886 struct pool *pl; 
1887 register_t pte_lo; 1857 register_t pte_lo;
1888 int error; 1858 int error;
1889 u_int was_exec = 0; 1859 u_int was_exec = 0;
1890 1860
1891 PMAP_LOCK(); 1861 PMAP_LOCK();
1892 1862
1893 if (__predict_false(!pmap_initialized)) { 1863 if (__predict_false(!pmap_initialized)) {
1894 pvo_head = &pmap_pvo_kunmanaged; 1864 pvo_head = &pmap_pvo_kunmanaged;
1895 pl = &pmap_upvo_pool; 
1896 pg = NULL; 1865 pg = NULL;
1897 was_exec = PTE_EXEC; 1866 was_exec = PTE_EXEC;
1898 } else { 1867 } else {
1899 pvo_head = pa_to_pvoh(pa, &pg); 1868 pvo_head = pa_to_pvoh(pa, &pg);
1900 pl = &pmap_mpvo_pool; 
1901 } 1869 }
1902 1870
1903 DPRINTFN(ENTER, 1871 DPRINTFN(ENTER,
1904 "pmap_enter(%p, %#" _PRIxva ", %#" _PRIxpa ", 0x%x, 0x%x):", 1872 "pmap_enter(%p, %#" _PRIxva ", %#" _PRIxpa ", 0x%x, 0x%x):",
1905 pm, va, pa, prot, flags); 1873 pm, va, pa, prot, flags);
1906 1874
1907 /* 1875 /*
1908 * If this is a managed page, and it's the first reference to the 1876 * If this is a managed page, and it's the first reference to the
1909 * page clear the execness of the page. Otherwise fetch the execness. 1877 * page clear the execness of the page. Otherwise fetch the execness.
1910 */ 1878 */
1911 if (pg != NULL) 1879 if (pg != NULL)
1912 was_exec = pmap_attr_fetch(pg) & PTE_EXEC; 1880 was_exec = pmap_attr_fetch(pg) & PTE_EXEC;
1913 1881
@@ -1951,27 +1919,27 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_ @@ -1951,27 +1919,27 @@ pmap_enter(pmap_t pm, vaddr_t va, paddr_
1951 pte_lo |= PTE_CHG; 1919 pte_lo |= PTE_CHG;
1952 if (flags & VM_PROT_ALL) 1920 if (flags & VM_PROT_ALL)
1953 pte_lo |= PTE_REF; 1921 pte_lo |= PTE_REF;
1954 1922
1955 /* 1923 /*
1956 * We need to know if this page can be executable 1924 * We need to know if this page can be executable
1957 */ 1925 */
1958 flags |= (prot & VM_PROT_EXECUTE); 1926 flags |= (prot & VM_PROT_EXECUTE);
1959 1927
1960 /* 1928 /*
1961 * Record mapping for later back-translation and pte spilling. 1929 * Record mapping for later back-translation and pte spilling.
1962 * This will overwrite any existing mapping. 1930 * This will overwrite any existing mapping.
1963 */ 1931 */
1964 error = pmap_pvo_enter(pm, pl, pvo_head, va, pa, pte_lo, flags); 1932 error = pmap_pvo_enter(pm, &pmap_pvo_pool, pvo_head, va, pa, pte_lo, flags);
1965 1933
1966 /*  1934 /*
1967 * Flush the real page from the instruction cache if this page is 1935 * Flush the real page from the instruction cache if this page is
1968 * mapped executable and cacheable and has not been flushed since 1936 * mapped executable and cacheable and has not been flushed since
1969 * the last time it was modified. 1937 * the last time it was modified.
1970 */ 1938 */
1971 if (error == 0 && 1939 if (error == 0 &&
1972 (flags & VM_PROT_EXECUTE) && 1940 (flags & VM_PROT_EXECUTE) &&
1973 (pte_lo & PTE_I) == 0 && 1941 (pte_lo & PTE_I) == 0 &&
1974 was_exec == 0) { 1942 was_exec == 0) {
1975 DPRINTFN(ENTER, " %s", "syncicache"); 1943 DPRINTFN(ENTER, " %s", "syncicache");
1976 PMAPCOUNT(exec_synced); 1944 PMAPCOUNT(exec_synced);
1977 pmap_syncicache(pa, PAGE_SIZE); 1945 pmap_syncicache(pa, PAGE_SIZE);
@@ -2031,27 +1999,27 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v @@ -2031,27 +1999,27 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v
2031 if (((mfpvr() >> 16) & 0xffff) == MPC603e) 1999 if (((mfpvr() >> 16) & 0xffff) == MPC603e)
2032 pte_lo = PTE_M; 2000 pte_lo = PTE_M;
2033#endif 2001#endif
2034 } 2002 }
2035 2003
2036 if (prot & VM_PROT_WRITE) 2004 if (prot & VM_PROT_WRITE)
2037 pte_lo |= PTE_BW; 2005 pte_lo |= PTE_BW;
2038 else 2006 else
2039 pte_lo |= PTE_BR; 2007 pte_lo |= PTE_BR;
2040 2008
2041 /* 2009 /*
2042 * We don't care about REF/CHG on PVOs on the unmanaged list. 2010 * We don't care about REF/CHG on PVOs on the unmanaged list.
2043 */ 2011 */
2044 error = pmap_pvo_enter(pmap_kernel(), &pmap_upvo_pool, 2012 error = pmap_pvo_enter(pmap_kernel(), &pmap_pvo_pool,
2045 &pmap_pvo_kunmanaged, va, pa, pte_lo, prot|PMAP_WIRED); 2013 &pmap_pvo_kunmanaged, va, pa, pte_lo, prot|PMAP_WIRED);
2046 2014
2047 if (error != 0) 2015 if (error != 0)
2048 panic("pmap_kenter_pa: failed to enter va %#" _PRIxva " pa %#" _PRIxpa ": %d", 2016 panic("pmap_kenter_pa: failed to enter va %#" _PRIxva " pa %#" _PRIxpa ": %d",
2049 va, pa, error); 2017 va, pa, error);
2050 2018
2051 PMAP_UNLOCK(); 2019 PMAP_UNLOCK();
2052} 2020}
2053 2021
2054void 2022void
2055pmap_kremove(vaddr_t va, vsize_t len) 2023pmap_kremove(vaddr_t va, vsize_t len)
2056{ 2024{
2057 if (va < VM_MIN_KERNEL_ADDRESS) 2025 if (va < VM_MIN_KERNEL_ADDRESS)
@@ -2831,108 +2799,71 @@ pmap_pvo_verify(void) @@ -2831,108 +2799,71 @@ pmap_pvo_verify(void)
2831 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) { 2799 for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
2832 struct pvo_entry *pvo; 2800 struct pvo_entry *pvo;
2833 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 2801 TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
2834 if ((uintptr_t) pvo >= SEGMENT_LENGTH) 2802 if ((uintptr_t) pvo >= SEGMENT_LENGTH)
2835 panic("pmap_pvo_verify: invalid pvo %p " 2803 panic("pmap_pvo_verify: invalid pvo %p "
2836 "on list %#x", pvo, ptegidx); 2804 "on list %#x", pvo, ptegidx);
2837 pmap_pvo_check(pvo); 2805 pmap_pvo_check(pvo);
2838 } 2806 }
2839 } 2807 }
2840 splx(s); 2808 splx(s);
2841} 2809}
2842#endif /* PMAPCHECK */ 2810#endif /* PMAPCHECK */
2843 2811
2844 
2845void * 2812void *
2846pmap_pool_ualloc(struct pool *pp, int flags) 2813pmap_pool_alloc(struct pool *pp, int flags)
2847{ 2814{
2848 struct pvo_page *pvop; 2815 struct pvo_page *pvop;
 2816 struct vm_page *pg;
2849 2817
2850 if (uvm.page_init_done != true) { 2818 if (uvm.page_init_done != true) {
2851 return (void *) uvm_pageboot_alloc(PAGE_SIZE); 2819 return (void *) uvm_pageboot_alloc(PAGE_SIZE);
2852 } 2820 }
2853 2821
2854 PMAP_LOCK(); 2822 PMAP_LOCK();
2855 pvop = SIMPLEQ_FIRST(&pmap_upvop_head); 2823 pvop = SIMPLEQ_FIRST(&pmap_pvop_head);
2856 if (pvop != NULL) { 
2857 pmap_upvop_free--; 
2858 SIMPLEQ_REMOVE_HEAD(&pmap_upvop_head, pvop_link); 
2859 PMAP_UNLOCK(); 
2860 return pvop; 
2861 } 
2862 PMAP_UNLOCK(); 
2863 return pmap_pool_malloc(pp, flags); 
2864} 
2865 
2866void * 
2867pmap_pool_malloc(struct pool *pp, int flags) 
2868{ 
2869 struct pvo_page *pvop; 
2870 struct vm_page *pg; 
2871 
2872 PMAP_LOCK(); 
2873 pvop = SIMPLEQ_FIRST(&pmap_mpvop_head); 
2874 if (pvop != NULL) { 2824 if (pvop != NULL) {
2875 pmap_mpvop_free--; 2825 pmap_pvop_free--;
2876 SIMPLEQ_REMOVE_HEAD(&pmap_mpvop_head, pvop_link); 2826 SIMPLEQ_REMOVE_HEAD(&pmap_pvop_head, pvop_link);
2877 PMAP_UNLOCK(); 2827 PMAP_UNLOCK();
2878 return pvop; 2828 return pvop;
2879 } 2829 }
2880 PMAP_UNLOCK(); 2830 PMAP_UNLOCK();
2881 again: 2831 again:
2882 pg = uvm_pagealloc_strat(NULL, 0, NULL, UVM_PGA_USERESERVE, 2832 pg = uvm_pagealloc_strat(NULL, 0, NULL, UVM_PGA_USERESERVE,
2883 UVM_PGA_STRAT_ONLY, VM_FREELIST_FIRST256); 2833 UVM_PGA_STRAT_ONLY, VM_FREELIST_FIRST256);
2884 if (__predict_false(pg == NULL)) { 2834 if (__predict_false(pg == NULL)) {
2885 if (flags & PR_WAITOK) { 2835 if (flags & PR_WAITOK) {
2886 uvm_wait("plpg"); 2836 uvm_wait("plpg");
2887 goto again; 2837 goto again;
2888 } else { 2838 } else {
2889 return (0); 2839 return (0);
2890 } 2840 }
2891 } 2841 }
2892 KDASSERT(VM_PAGE_TO_PHYS(pg) == (uintptr_t)VM_PAGE_TO_PHYS(pg)); 2842 KDASSERT(VM_PAGE_TO_PHYS(pg) == (uintptr_t)VM_PAGE_TO_PHYS(pg));
2893 return (void *)(uintptr_t) VM_PAGE_TO_PHYS(pg); 2843 return (void *)(uintptr_t) VM_PAGE_TO_PHYS(pg);
2894} 2844}
2895 2845
2896void 2846void
2897pmap_pool_ufree(struct pool *pp, void *va) 2847pmap_pool_free(struct pool *pp, void *va)
2898{ 
2899 struct pvo_page *pvop; 
2900#if 0 
2901 if (PHYS_TO_VM_PAGE((paddr_t) va) != NULL) { 
2902 pmap_pool_mfree(va, size, tag); 
2903 return; 
2904 } 
2905#endif 
2906 PMAP_LOCK(); 
2907 pvop = va; 
2908 SIMPLEQ_INSERT_HEAD(&pmap_upvop_head, pvop, pvop_link); 
2909 pmap_upvop_free++; 
2910 if (pmap_upvop_free > pmap_upvop_maxfree) 
2911 pmap_upvop_maxfree = pmap_upvop_free; 
2912 PMAP_UNLOCK(); 
2913} 
2914 
2915void 
2916pmap_pool_mfree(struct pool *pp, void *va) 
2917{ 2848{
2918 struct pvo_page *pvop; 2849 struct pvo_page *pvop;
2919 2850
2920 PMAP_LOCK(); 2851 PMAP_LOCK();
2921 pvop = va; 2852 pvop = va;
2922 SIMPLEQ_INSERT_HEAD(&pmap_mpvop_head, pvop, pvop_link); 2853 SIMPLEQ_INSERT_HEAD(&pmap_pvop_head, pvop, pvop_link);
2923 pmap_mpvop_free++; 2854 pmap_pvop_free++;
2924 if (pmap_mpvop_free > pmap_mpvop_maxfree) 2855 if (pmap_pvop_free > pmap_pvop_maxfree)
2925 pmap_mpvop_maxfree = pmap_mpvop_free; 2856 pmap_pvop_maxfree = pmap_pvop_free;
2926 PMAP_UNLOCK(); 2857 PMAP_UNLOCK();
2927#if 0 2858#if 0
2928 uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t) va)); 2859 uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t) va));
2929#endif 2860#endif
2930} 2861}
2931 2862
2932/* 2863/*
2933 * This routine in bootstraping to steal to-be-managed memory (which will 2864 * This routine in bootstraping to steal to-be-managed memory (which will
2934 * then be unmanaged). We use it to grab from the first 256MB for our 2865 * then be unmanaged). We use it to grab from the first 256MB for our
2935 * pmap needs and above 256MB for other stuff. 2866 * pmap needs and above 256MB for other stuff.
2936 */ 2867 */
2937vaddr_t 2868vaddr_t
2938pmap_steal_memory(vsize_t vsize, vaddr_t *vstartp, vaddr_t *vendp) 2869pmap_steal_memory(vsize_t vsize, vaddr_t *vstartp, vaddr_t *vendp)
@@ -3457,34 +3388,34 @@ pmap_bootstrap1(paddr_t kernelstart, pad @@ -3457,34 +3388,34 @@ pmap_bootstrap1(paddr_t kernelstart, pad
3457 uvm_physseg_get_avail_start(bank); 3388 uvm_physseg_get_avail_start(bank);
3458 printf("pmap_bootstrap: vm_physmem[%d]=%#" _PRIxpa "-%#" _PRIxpa "/%#" _PRIxpa "\n", 3389 printf("pmap_bootstrap: vm_physmem[%d]=%#" _PRIxpa "-%#" _PRIxpa "/%#" _PRIxpa "\n",
3459 bank, 3390 bank,
3460 ptoa(uvm_physseg_get_avail_start(bank)), 3391 ptoa(uvm_physseg_get_avail_start(bank)),
3461 ptoa(uvm_physseg_get_avail_end(bank)), 3392 ptoa(uvm_physseg_get_avail_end(bank)),
3462 ptoa(uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank))); 3393 ptoa(uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank)));
3463 } 3394 }
3464 format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt)); 3395 format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt));
3465 printf("pmap_bootstrap: UVM memory = %s (%u pages)\n", 3396 printf("pmap_bootstrap: UVM memory = %s (%u pages)\n",
3466 pbuf, cnt); 3397 pbuf, cnt);
3467 } 3398 }
3468#endif 3399#endif
3469 3400
3470 pool_init(&pmap_upvo_pool, sizeof(struct pvo_entry), 3401 pool_init(&pmap_pvo_pool, sizeof(struct pvo_entry),
3471 sizeof(struct pvo_entry), 0, 0, "pmap_upvopl", 3402 sizeof(struct pvo_entry), 0, 0, "pmap_pvopl",
3472 &pmap_pool_uallocator, IPL_VM); 3403 &pmap_pool_allocator, IPL_VM);
3473 3404
3474 pool_setlowat(&pmap_upvo_pool, 252); 3405 pool_setlowat(&pmap_pvo_pool, 1008);
3475 3406
3476 pool_init(&pmap_pool, sizeof(struct pmap), 3407 pool_init(&pmap_pool, sizeof(struct pmap),
3477 sizeof(void *), 0, 0, "pmap_pl", &pmap_pool_uallocator, 3408 sizeof(void *), 0, 0, "pmap_pl", &pmap_pool_allocator,
3478 IPL_NONE); 3409 IPL_NONE);
3479 3410
3480#if defined(PMAP_NEED_MAPKERNEL) 3411#if defined(PMAP_NEED_MAPKERNEL)
3481 { 3412 {
3482 struct pmap *pm = pmap_kernel(); 3413 struct pmap *pm = pmap_kernel();
3483#if defined(PMAP_NEED_FULL_MAPKERNEL) 3414#if defined(PMAP_NEED_FULL_MAPKERNEL)
3484 extern int etext[], kernel_text[]; 3415 extern int etext[], kernel_text[];
3485 vaddr_t va, va_etext = (paddr_t) etext; 3416 vaddr_t va, va_etext = (paddr_t) etext;
3486#endif 3417#endif
3487 paddr_t pa, pa_end; 3418 paddr_t pa, pa_end;
3488 register_t sr; 3419 register_t sr;
3489 struct pte pt; 3420 struct pte pt;
3490 unsigned int ptegidx; 3421 unsigned int ptegidx;