Fri Dec 2 16:33:10 2011 UTC ()
adapt sparc64.  compile tested.


(yamt)
diff -r1.55 -r1.55.2.1 src/sys/arch/sparc64/include/pmap.h
diff -r1.275 -r1.275.2.1 src/sys/arch/sparc64/sparc64/pmap.c

cvs diff -r1.55 -r1.55.2.1 src/sys/arch/sparc64/include/pmap.h (expand / switch to unified diff)

--- src/sys/arch/sparc64/include/pmap.h 2011/10/06 06:55:34 1.55
+++ src/sys/arch/sparc64/include/pmap.h 2011/12/02 16:33:09 1.55.2.1
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.h,v 1.55 2011/10/06 06:55:34 mrg Exp $ */ 1/* $NetBSD: pmap.h,v 1.55.2.1 2011/12/02 16:33:09 yamt Exp $ */
2 2
3/*- 3/*-
4 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 4 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
5 * Copyright (C) 1995, 1996 TooLs GmbH. 5 * Copyright (C) 1995, 1996 TooLs GmbH.
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright 13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the 14 * notice, this list of conditions and the following disclaimer in the
@@ -108,30 +108,28 @@ extern struct page_size_map page_size_ma @@ -108,30 +108,28 @@ extern struct page_size_map page_size_ma
108 */ 108 */
109 109
110#define va_to_seg(v) (int)((((paddr_t)(v))>>STSHIFT)&STMASK) 110#define va_to_seg(v) (int)((((paddr_t)(v))>>STSHIFT)&STMASK)
111#define va_to_dir(v) (int)((((paddr_t)(v))>>PDSHIFT)&PDMASK) 111#define va_to_dir(v) (int)((((paddr_t)(v))>>PDSHIFT)&PDMASK)
112#define va_to_pte(v) (int)((((paddr_t)(v))>>PTSHIFT)&PTMASK) 112#define va_to_pte(v) (int)((((paddr_t)(v))>>PTSHIFT)&PTMASK)
113 113
114#ifdef MULTIPROCESSOR 114#ifdef MULTIPROCESSOR
115#define PMAP_LIST_MAXNUMCPU CPUSET_MAXNUMCPU 115#define PMAP_LIST_MAXNUMCPU CPUSET_MAXNUMCPU
116#else 116#else
117#define PMAP_LIST_MAXNUMCPU 1 117#define PMAP_LIST_MAXNUMCPU 1
118#endif 118#endif
119 119
120struct pmap { 120struct pmap {
121 struct uvm_object pm_obj; 121 unsigned int pm_refs;
122 kmutex_t pm_obj_lock; 122 TAILQ_HEAD(, vm_page) pm_ptps;
123#define pm_lock pm_obj.vmobjlock 
124#define pm_refs pm_obj.uo_refs 
125 LIST_ENTRY(pmap) pm_list[PMAP_LIST_MAXNUMCPU]; /* per cpu ctx used list */ 123 LIST_ENTRY(pmap) pm_list[PMAP_LIST_MAXNUMCPU]; /* per cpu ctx used list */
126 124
127 struct pmap_statistics pm_stats; 125 struct pmap_statistics pm_stats;
128 126
129 /* 127 /*
130 * We record the context used on any cpu here. If the context 128 * We record the context used on any cpu here. If the context
131 * is actually present in the TLB, it will be the plain context 129 * is actually present in the TLB, it will be the plain context
132 * number. If the context is allocated, but has been flushed 130 * number. If the context is allocated, but has been flushed
133 * from the tlb, the number will be negative. 131 * from the tlb, the number will be negative.
134 * If this pmap has no context allocated on that cpu, the entry 132 * If this pmap has no context allocated on that cpu, the entry
135 * will be 0. 133 * will be 0.
136 */ 134 */
137 int pm_ctx[PMAP_LIST_MAXNUMCPU]; /* Current context per cpu */ 135 int pm_ctx[PMAP_LIST_MAXNUMCPU]; /* Current context per cpu */

cvs diff -r1.275 -r1.275.2.1 src/sys/arch/sparc64/sparc64/pmap.c (expand / switch to unified diff)

--- src/sys/arch/sparc64/sparc64/pmap.c 2011/07/12 07:51:34 1.275
+++ src/sys/arch/sparc64/sparc64/pmap.c 2011/12/02 16:33:09 1.275.2.1
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.275 2011/07/12 07:51:34 mrg Exp $ */ 1/* $NetBSD: pmap.c,v 1.275.2.1 2011/12/02 16:33:09 yamt Exp $ */
2/* 2/*
3 * 3 *
4 * Copyright (C) 1996-1999 Eduardo Horvath. 4 * Copyright (C) 1996-1999 Eduardo Horvath.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions 9 * modification, are permitted provided that the following conditions
10 * are met: 10 * are met:
11 * 1. Redistributions of source code must retain the above copyright 11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer. 12 * notice, this list of conditions and the following disclaimer.
13 * 13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
@@ -16,27 +16,27 @@ @@ -16,27 +16,27 @@
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE. 24 * SUCH DAMAGE.
25 * 25 *
26 */ 26 */
27 27
28#include <sys/cdefs.h> 28#include <sys/cdefs.h>
29__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.275 2011/07/12 07:51:34 mrg Exp $"); 29__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.275.2.1 2011/12/02 16:33:09 yamt Exp $");
30 30
31#undef NO_VCACHE /* Don't forget the locked TLB in dostart */ 31#undef NO_VCACHE /* Don't forget the locked TLB in dostart */
32#define HWREF 32#define HWREF
33 33
34#include "opt_ddb.h" 34#include "opt_ddb.h"
35#include "opt_multiprocessor.h" 35#include "opt_multiprocessor.h"
36 36
37#include <sys/param.h> 37#include <sys/param.h>
38#include <sys/malloc.h> 38#include <sys/malloc.h>
39#include <sys/queue.h> 39#include <sys/queue.h>
40#include <sys/systm.h> 40#include <sys/systm.h>
41#include <sys/msgbuf.h> 41#include <sys/msgbuf.h>
42#include <sys/pool.h> 42#include <sys/pool.h>
@@ -1369,30 +1369,28 @@ pmap_growkernel(vaddr_t maxkvaddr) @@ -1369,30 +1369,28 @@ pmap_growkernel(vaddr_t maxkvaddr)
1369 * Create and return a physical map. 1369 * Create and return a physical map.
1370 */ 1370 */
1371struct pmap * 1371struct pmap *
1372pmap_create(void) 1372pmap_create(void)
1373{ 1373{
1374 struct pmap *pm; 1374 struct pmap *pm;
1375 1375
1376 DPRINTF(PDB_CREATE, ("pmap_create()\n")); 1376 DPRINTF(PDB_CREATE, ("pmap_create()\n"));
1377 1377
1378 pm = pool_cache_get(&pmap_cache, PR_WAITOK); 1378 pm = pool_cache_get(&pmap_cache, PR_WAITOK);
1379 memset(pm, 0, sizeof *pm); 1379 memset(pm, 0, sizeof *pm);
1380 DPRINTF(PDB_CREATE, ("pmap_create(): created %p\n", pm)); 1380 DPRINTF(PDB_CREATE, ("pmap_create(): created %p\n", pm));
1381 1381
1382 mutex_init(&pm->pm_obj_lock, MUTEX_DEFAULT, IPL_NONE); 1382 pm->pm_refs = 1;
1383 uvm_obj_init(&pm->pm_obj, NULL, false, 1); 1383 TAILQ_INIT(&pm->pm_ptps);
1384 uvm_obj_setlock(&pm->pm_obj, &pm->pm_obj_lock); 
1385 
1386 if (pm != pmap_kernel()) { 1384 if (pm != pmap_kernel()) {
1387 while (!pmap_get_page(&pm->pm_physaddr)) { 1385 while (!pmap_get_page(&pm->pm_physaddr)) {
1388 uvm_wait("pmap_create"); 1386 uvm_wait("pmap_create");
1389 } 1387 }
1390 pm->pm_segs = (paddr_t *)(u_long)pm->pm_physaddr; 1388 pm->pm_segs = (paddr_t *)(u_long)pm->pm_physaddr;
1391 } 1389 }
1392 DPRINTF(PDB_CREATE, ("pmap_create(%p): ctx %d\n", pm, pmap_ctx(pm))); 1390 DPRINTF(PDB_CREATE, ("pmap_create(%p): ctx %d\n", pm, pmap_ctx(pm)));
1393 return pm; 1391 return pm;
1394} 1392}
1395 1393
1396/* 1394/*
1397 * Add a reference to the given pmap. 1395 * Add a reference to the given pmap.
1398 */ 1396 */
@@ -1406,27 +1404,27 @@ pmap_reference(struct pmap *pm) @@ -1406,27 +1404,27 @@ pmap_reference(struct pmap *pm)
1406/* 1404/*
1407 * Retire the given pmap from service. 1405 * Retire the given pmap from service.
1408 * Should only be called if the map contains no valid mappings. 1406 * Should only be called if the map contains no valid mappings.
1409 */ 1407 */
1410void 1408void
1411pmap_destroy(struct pmap *pm) 1409pmap_destroy(struct pmap *pm)
1412{ 1410{
1413#ifdef MULTIPROCESSOR 1411#ifdef MULTIPROCESSOR
1414 struct cpu_info *ci; 1412 struct cpu_info *ci;
1415 sparc64_cpuset_t pmap_cpus_active; 1413 sparc64_cpuset_t pmap_cpus_active;
1416#else 1414#else
1417#define pmap_cpus_active 0 1415#define pmap_cpus_active 0
1418#endif 1416#endif
1419 struct vm_page *pg, *nextpg; 1417 struct vm_page *pg;
1420 1418
1421 if ((int)atomic_dec_uint_nv(&pm->pm_refs) > 0) { 1419 if ((int)atomic_dec_uint_nv(&pm->pm_refs) > 0) {
1422 return; 1420 return;
1423 } 1421 }
1424 DPRINTF(PDB_DESTROY, ("pmap_destroy: freeing pmap %p\n", pm)); 1422 DPRINTF(PDB_DESTROY, ("pmap_destroy: freeing pmap %p\n", pm));
1425#ifdef MULTIPROCESSOR 1423#ifdef MULTIPROCESSOR
1426 CPUSET_CLEAR(pmap_cpus_active); 1424 CPUSET_CLEAR(pmap_cpus_active);
1427 for (ci = cpus; ci != NULL; ci = ci->ci_next) { 1425 for (ci = cpus; ci != NULL; ci = ci->ci_next) {
1428 /* XXXMRG: Move the lock inside one or both tests? */ 1426 /* XXXMRG: Move the lock inside one or both tests? */
1429 mutex_enter(&ci->ci_ctx_lock); 1427 mutex_enter(&ci->ci_ctx_lock);
1430 if (CPUSET_HAS(cpus_active, ci->ci_index)) { 1428 if (CPUSET_HAS(cpus_active, ci->ci_index)) {
1431 if (pm->pm_ctx[ci->ci_index] > 0) { 1429 if (pm->pm_ctx[ci->ci_index] > 0) {
1432 CPUSET_ADD(pmap_cpus_active, ci->ci_index); 1430 CPUSET_ADD(pmap_cpus_active, ci->ci_index);
@@ -1434,42 +1432,38 @@ pmap_destroy(struct pmap *pm) @@ -1434,42 +1432,38 @@ pmap_destroy(struct pmap *pm)
1434 } 1432 }
1435 } 1433 }
1436 mutex_exit(&ci->ci_ctx_lock); 1434 mutex_exit(&ci->ci_ctx_lock);
1437 } 1435 }
1438#else 1436#else
1439 if (pmap_ctx(pm)) { 1437 if (pmap_ctx(pm)) {
1440 mutex_enter(&curcpu()->ci_ctx_lock); 1438 mutex_enter(&curcpu()->ci_ctx_lock);
1441 ctx_free(pm, curcpu()); 1439 ctx_free(pm, curcpu());
1442 mutex_exit(&curcpu()->ci_ctx_lock); 1440 mutex_exit(&curcpu()->ci_ctx_lock);
1443 } 1441 }
1444#endif 1442#endif
1445 1443
1446 /* we could be a little smarter and leave pages zeroed */ 1444 /* we could be a little smarter and leave pages zeroed */
1447 for (pg = TAILQ_FIRST(&pm->pm_obj.memq); pg != NULL; pg = nextpg) { 1445 while ((pg = TAILQ_FIRST(&pm->pm_ptps)) != NULL) {
1448#ifdef DIAGNOSTIC 1446#ifdef DIAGNOSTIC
1449 struct vm_page_md *md = VM_PAGE_TO_MD(pg); 1447 struct vm_page_md *md = VM_PAGE_TO_MD(pg);
1450#endif 1448#endif
1451 1449
1452 KASSERT((pg->flags & PG_MARKER) == 0); 1450 TAILQ_REMOVE(&pm->pm_ptps, pg, pageq.queue);
1453 nextpg = TAILQ_NEXT(pg, listq.queue); 
1454 TAILQ_REMOVE(&pm->pm_obj.memq, pg, listq.queue); 
1455 KASSERT(md->mdpg_pvh.pv_pmap == NULL); 1451 KASSERT(md->mdpg_pvh.pv_pmap == NULL);
1456 dcache_flush_page_cpuset(VM_PAGE_TO_PHYS(pg), pmap_cpus_active); 1452 dcache_flush_page_cpuset(VM_PAGE_TO_PHYS(pg), pmap_cpus_active);
1457 uvm_pagefree(pg); 1453 uvm_pagefree(pg);
1458 } 1454 }
1459 pmap_free_page((paddr_t)(u_long)pm->pm_segs, pmap_cpus_active); 1455 pmap_free_page((paddr_t)(u_long)pm->pm_segs, pmap_cpus_active);
1460 1456
1461 uvm_obj_destroy(&pm->pm_obj, false); 
1462 mutex_destroy(&pm->pm_obj_lock); 
1463 pool_cache_put(&pmap_cache, pm); 1457 pool_cache_put(&pmap_cache, pm);
1464} 1458}
1465 1459
1466/* 1460/*
1467 * Copy the range specified by src_addr/len 1461 * Copy the range specified by src_addr/len
1468 * from the source map to the range dst_addr/len 1462 * from the source map to the range dst_addr/len
1469 * in the destination map. 1463 * in the destination map.
1470 * 1464 *
1471 * This routine is only advisory and need not do anything. 1465 * This routine is only advisory and need not do anything.
1472 */ 1466 */
1473void 1467void
1474pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vaddr_t dst_addr, vsize_t len, vaddr_t src_addr) 1468pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vaddr_t dst_addr, vsize_t len, vaddr_t src_addr)
1475{ 1469{
@@ -1799,39 +1793,39 @@ pmap_enter(struct pmap *pm, vaddr_t va,  @@ -1799,39 +1793,39 @@ pmap_enter(struct pmap *pm, vaddr_t va,
1799 if (wired) 1793 if (wired)
1800 tte.data |= TLB_TSB_LOCK; 1794 tte.data |= TLB_TSB_LOCK;
1801 ptp = 0; 1795 ptp = 0;
1802 1796
1803 retry: 1797 retry:
1804 i = pseg_set(pm, va, tte.data, ptp); 1798 i = pseg_set(pm, va, tte.data, ptp);
1805 if (i & 4) { 1799 if (i & 4) {
1806 /* ptp used as L3 */ 1800 /* ptp used as L3 */
1807 KASSERT(ptp != 0); 1801 KASSERT(ptp != 0);
1808 KASSERT((i & 3) == 0); 1802 KASSERT((i & 3) == 0);
1809 ptpg = PHYS_TO_VM_PAGE(ptp); 1803 ptpg = PHYS_TO_VM_PAGE(ptp);
1810 if (ptpg) { 1804 if (ptpg) {
1811 ptpg->offset = (uint64_t)va & (0xfffffLL << 23); 1805 ptpg->offset = (uint64_t)va & (0xfffffLL << 23);
1812 TAILQ_INSERT_TAIL(&pm->pm_obj.memq, ptpg, listq.queue); 1806 TAILQ_INSERT_TAIL(&pm->pm_ptps, ptpg, pageq.queue);
1813 } else { 1807 } else {
1814 KASSERT(pm == pmap_kernel()); 1808 KASSERT(pm == pmap_kernel());
1815 } 1809 }
1816 } 1810 }
1817 if (i & 2) { 1811 if (i & 2) {
1818 /* ptp used as L2 */ 1812 /* ptp used as L2 */
1819 KASSERT(ptp != 0); 1813 KASSERT(ptp != 0);
1820 KASSERT((i & 4) == 0); 1814 KASSERT((i & 4) == 0);
1821 ptpg = PHYS_TO_VM_PAGE(ptp); 1815 ptpg = PHYS_TO_VM_PAGE(ptp);
1822 if (ptpg) { 1816 if (ptpg) {
1823 ptpg->offset = (((uint64_t)va >> 43) & 0x3ffLL) << 13; 1817 ptpg->offset = (((uint64_t)va >> 43) & 0x3ffLL) << 13;
1824 TAILQ_INSERT_TAIL(&pm->pm_obj.memq, ptpg, listq.queue); 1818 TAILQ_INSERT_TAIL(&pm->pm_ptps, ptpg, pageq.queue);
1825 } else { 1819 } else {
1826 KASSERT(pm == pmap_kernel()); 1820 KASSERT(pm == pmap_kernel());
1827 } 1821 }
1828 } 1822 }
1829 if (i & 1) { 1823 if (i & 1) {
1830 KASSERT((i & 4) == 0); 1824 KASSERT((i & 4) == 0);
1831 ptp = 0; 1825 ptp = 0;
1832 if (!pmap_get_page(&ptp)) { 1826 if (!pmap_get_page(&ptp)) {
1833 mutex_exit(&pmap_lock); 1827 mutex_exit(&pmap_lock);
1834 if (flags & PMAP_CANFAIL) { 1828 if (flags & PMAP_CANFAIL) {
1835 if (npv != NULL) { 1829 if (npv != NULL) {
1836 /* free it */ 1830 /* free it */
1837 npv->pv_next = freepv; 1831 npv->pv_next = freepv;