| @@ -1,14 +1,14 @@ | | | @@ -1,14 +1,14 @@ |
1 | /* $NetBSD: pmap.c,v 1.340 2010/04/26 09:26:25 martin Exp $ */ | | 1 | /* $NetBSD: pmap.c,v 1.341 2011/02/15 09:56:32 mrg Exp $ */ |
2 | | | 2 | |
3 | /* | | 3 | /* |
4 | * Copyright (c) 1996 | | 4 | * Copyright (c) 1996 |
5 | * The President and Fellows of Harvard College. All rights reserved. | | 5 | * The President and Fellows of Harvard College. All rights reserved. |
6 | * Copyright (c) 1992, 1993 | | 6 | * Copyright (c) 1992, 1993 |
7 | * The Regents of the University of California. All rights reserved. | | 7 | * The Regents of the University of California. All rights reserved. |
8 | * | | 8 | * |
9 | * This software was developed by the Computer Systems Engineering group | | 9 | * This software was developed by the Computer Systems Engineering group |
10 | * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and | | 10 | * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and |
11 | * contributed to Berkeley. | | 11 | * contributed to Berkeley. |
12 | * | | 12 | * |
13 | * All advertising materials mentioning features or use of this software | | 13 | * All advertising materials mentioning features or use of this software |
14 | * must display the following acknowledgement: | | 14 | * must display the following acknowledgement: |
| @@ -46,27 +46,27 @@ | | | @@ -46,27 +46,27 @@ |
46 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 46 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
47 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 47 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
48 | * SUCH DAMAGE. | | 48 | * SUCH DAMAGE. |
49 | * | | 49 | * |
50 | * @(#)pmap.c 8.4 (Berkeley) 2/5/94 | | 50 | * @(#)pmap.c 8.4 (Berkeley) 2/5/94 |
51 | * | | 51 | * |
52 | */ | | 52 | */ |
53 | | | 53 | |
54 | /* | | 54 | /* |
55 | * SPARC physical map management code. | | 55 | * SPARC physical map management code. |
56 | */ | | 56 | */ |
57 | | | 57 | |
58 | #include <sys/cdefs.h> | | 58 | #include <sys/cdefs.h> |
59 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.340 2010/04/26 09:26:25 martin Exp $"); | | 59 | __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.341 2011/02/15 09:56:32 mrg Exp $"); |
60 | | | 60 | |
61 | #include "opt_ddb.h" | | 61 | #include "opt_ddb.h" |
62 | #include "opt_kgdb.h" | | 62 | #include "opt_kgdb.h" |
63 | #include "opt_sparc_arch.h" | | 63 | #include "opt_sparc_arch.h" |
64 | | | 64 | |
65 | #include <sys/param.h> | | 65 | #include <sys/param.h> |
66 | #include <sys/systm.h> | | 66 | #include <sys/systm.h> |
67 | #include <sys/device.h> | | 67 | #include <sys/device.h> |
68 | #include <sys/proc.h> | | 68 | #include <sys/proc.h> |
69 | #include <sys/queue.h> | | 69 | #include <sys/queue.h> |
70 | #include <sys/pool.h> | | 70 | #include <sys/pool.h> |
71 | #include <sys/exec.h> | | 71 | #include <sys/exec.h> |
72 | #include <sys/core.h> | | 72 | #include <sys/core.h> |
| @@ -808,40 +808,44 @@ updatepte4m(vaddr_t va, int *pte, int bi | | | @@ -808,40 +808,44 @@ updatepte4m(vaddr_t va, int *pte, int bi |
808 | swapval = (oldval & ~bic) | bis; | | 808 | swapval = (oldval & ~bic) | bis; |
809 | swap(vpte, swapval); | | 809 | swap(vpte, swapval); |
810 | | | 810 | |
811 | if (__predict_true(can_lock)) | | 811 | if (__predict_true(can_lock)) |
812 | mutex_spin_exit(&demap_lock); | | 812 | mutex_spin_exit(&demap_lock); |
813 | | | 813 | |
814 | return (oldval); | | 814 | return (oldval); |
815 | } | | 815 | } |
816 | | | 816 | |
817 | inline void | | 817 | inline void |
818 | setpgt4m(int *ptep, int pte) | | 818 | setpgt4m(int *ptep, int pte) |
819 | { | | 819 | { |
820 | | | 820 | |
| | | 821 | kpreempt_disable(); |
821 | swap(ptep, pte); | | 822 | swap(ptep, pte); |
| | | 823 | kpreempt_enable(); |
822 | } | | 824 | } |
823 | | | 825 | |
824 | inline void | | 826 | inline void |
825 | setpgt4m_va(vaddr_t va, int *ptep, int pte, int pageflush, int ctx, | | 827 | setpgt4m_va(vaddr_t va, int *ptep, int pte, int pageflush, int ctx, |
826 | u_int cpuset) | | 828 | u_int cpuset) |
827 | { | | 829 | { |
828 | | | 830 | |
829 | #if defined(MULTIPROCESSOR) | | 831 | #if defined(MULTIPROCESSOR) |
830 | updatepte4m(va, ptep, 0xffffffff, pte, pageflush ? ctx : 0, cpuset); | | 832 | updatepte4m(va, ptep, 0xffffffff, pte, pageflush ? ctx : 0, cpuset); |
831 | #else | | 833 | #else |
| | | 834 | kpreempt_disable(); |
832 | if (__predict_true(pageflush)) | | 835 | if (__predict_true(pageflush)) |
833 | tlb_flush_page(va, ctx, 0); | | 836 | tlb_flush_page(va, ctx, 0); |
834 | setpgt4m(ptep, pte); | | 837 | setpgt4m(ptep, pte); |
| | | 838 | kpreempt_enable(); |
835 | #endif /* MULTIPROCESSOR */ | | 839 | #endif /* MULTIPROCESSOR */ |
836 | } | | 840 | } |
837 | | | 841 | |
838 | /* Set the page table entry for va to pte. */ | | 842 | /* Set the page table entry for va to pte. */ |
839 | void | | 843 | void |
840 | setpte4m(vaddr_t va, int pte) | | 844 | setpte4m(vaddr_t va, int pte) |
841 | { | | 845 | { |
842 | struct pmap *pm; | | 846 | struct pmap *pm; |
843 | struct regmap *rp; | | 847 | struct regmap *rp; |
844 | struct segmap *sp; | | 848 | struct segmap *sp; |
845 | | | 849 | |
846 | #ifdef DEBUG | | 850 | #ifdef DEBUG |
847 | if (getcontext4m() != 0) | | 851 | if (getcontext4m() != 0) |
| @@ -6237,43 +6241,43 @@ pmap_enk4m(struct pmap *pm, vaddr_t va, | | | @@ -6237,43 +6241,43 @@ pmap_enk4m(struct pmap *pm, vaddr_t va, |
6237 | struct regmap *rp; | | 6241 | struct regmap *rp; |
6238 | struct segmap *sp; | | 6242 | struct segmap *sp; |
6239 | int error = 0; | | 6243 | int error = 0; |
6240 | | | 6244 | |
6241 | #ifdef DEBUG | | 6245 | #ifdef DEBUG |
6242 | if (va < KERNBASE) | | 6246 | if (va < KERNBASE) |
6243 | panic("pmap_enk4m: can't enter va 0x%lx below KERNBASE", va); | | 6247 | panic("pmap_enk4m: can't enter va 0x%lx below KERNBASE", va); |
6244 | #endif | | 6248 | #endif |
6245 | vr = VA_VREG(va); | | 6249 | vr = VA_VREG(va); |
6246 | vs = VA_VSEG(va); | | 6250 | vs = VA_VSEG(va); |
6247 | rp = &pm->pm_regmap[vr]; | | 6251 | rp = &pm->pm_regmap[vr]; |
6248 | sp = &rp->rg_segmap[vs]; | | 6252 | sp = &rp->rg_segmap[vs]; |
6249 | | | 6253 | |
| | | 6254 | kpreempt_disable(); |
6250 | s = splvm(); /* XXX way too conservative */ | | 6255 | s = splvm(); /* XXX way too conservative */ |
6251 | PMAP_LOCK(); | | 6256 | PMAP_LOCK(); |
6252 | | | 6257 | |
6253 | if (rp->rg_seg_ptps == NULL) /* enter new region */ | | 6258 | if (rp->rg_seg_ptps == NULL) /* enter new region */ |
6254 | panic("pmap_enk4m: missing kernel region table for va 0x%lx",va); | | 6259 | panic("pmap_enk4m: missing kernel region table for va 0x%lx",va); |
6255 | | | 6260 | |
6256 | tpte = sp->sg_pte[VA_SUN4M_VPG(va)]; | | 6261 | tpte = sp->sg_pte[VA_SUN4M_VPG(va)]; |
6257 | if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE) { | | 6262 | if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE) { |
6258 | | | 6263 | |
6259 | /* old mapping exists, and is of the same pa type */ | | 6264 | /* old mapping exists, and is of the same pa type */ |
6260 | | | 6265 | |
6261 | if ((tpte & SRMMU_PPNMASK) == (pteproto & SRMMU_PPNMASK)) { | | 6266 | if ((tpte & SRMMU_PPNMASK) == (pteproto & SRMMU_PPNMASK)) { |
6262 | /* just changing protection and/or wiring */ | | 6267 | /* just changing protection and/or wiring */ |
6263 | pmap_changeprot4m(pm, va, prot, flags); | | 6268 | pmap_changeprot4m(pm, va, prot, flags); |
6264 | PMAP_UNLOCK(); | | 6269 | error = 0; |
6265 | splx(s); | | 6270 | goto out; |
6266 | return (0); | | | |
6267 | } | | 6271 | } |
6268 | | | 6272 | |
6269 | if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) { | | 6273 | if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) { |
6270 | struct vm_page *opg; | | 6274 | struct vm_page *opg; |
6271 | #ifdef DEBUG | | 6275 | #ifdef DEBUG |
6272 | printf("pmap_enk4m: changing existing va=>pa entry: va 0x%lx, pteproto 0x%x, " | | 6276 | printf("pmap_enk4m: changing existing va=>pa entry: va 0x%lx, pteproto 0x%x, " |
6273 | "oldpte 0x%x\n", va, pteproto, tpte); | | 6277 | "oldpte 0x%x\n", va, pteproto, tpte); |
6274 | #endif | | 6278 | #endif |
6275 | /* | | 6279 | /* |
6276 | * Switcheroo: changing pa for this va. | | 6280 | * Switcheroo: changing pa for this va. |
6277 | * If old pa was managed, remove from pvlist. | | 6281 | * If old pa was managed, remove from pvlist. |
6278 | * If old page was cached, flush cache. | | 6282 | * If old page was cached, flush cache. |
6279 | */ | | 6283 | */ |
| @@ -6299,26 +6303,27 @@ printf("pmap_enk4m: changing existing va | | | @@ -6299,26 +6303,27 @@ printf("pmap_enk4m: changing existing va |
6299 | * If the new mapping is for a managed PA, enter into pvlist. | | 6303 | * If the new mapping is for a managed PA, enter into pvlist. |
6300 | */ | | 6304 | */ |
6301 | if (pg != NULL && (error = pv_link4m(pg, pm, va, &pteproto)) != 0) { | | 6305 | if (pg != NULL && (error = pv_link4m(pg, pm, va, &pteproto)) != 0) { |
6302 | if ((flags & PMAP_CANFAIL) != 0) | | 6306 | if ((flags & PMAP_CANFAIL) != 0) |
6303 | goto out; | | 6307 | goto out; |
6304 | panic("pmap_enter: cannot allocate PV entry"); | | 6308 | panic("pmap_enter: cannot allocate PV entry"); |
6305 | } | | 6309 | } |
6306 | | | 6310 | |
6307 | setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], pteproto); | | 6311 | setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], pteproto); |
6308 | pm->pm_stats.resident_count++; | | 6312 | pm->pm_stats.resident_count++; |
6309 | out: | | 6313 | out: |
6310 | PMAP_UNLOCK(); | | 6314 | PMAP_UNLOCK(); |
6311 | splx(s); | | 6315 | splx(s); |
| | | 6316 | kpreempt_enable(); |
6312 | return (error); | | 6317 | return (error); |
6313 | } | | 6318 | } |
6314 | | | 6319 | |
6315 | /* enter new (or change existing) user mapping */ | | 6320 | /* enter new (or change existing) user mapping */ |
6316 | int | | 6321 | int |
6317 | pmap_enu4m(struct pmap *pm, vaddr_t va, vm_prot_t prot, int flags, | | 6322 | pmap_enu4m(struct pmap *pm, vaddr_t va, vm_prot_t prot, int flags, |
6318 | struct vm_page *pg, int pteproto) | | 6323 | struct vm_page *pg, int pteproto) |
6319 | { | | 6324 | { |
6320 | int vr, vs, *pte, tpte, s; | | 6325 | int vr, vs, *pte, tpte, s; |
6321 | int error = 0; | | 6326 | int error = 0; |
6322 | struct regmap *rp; | | 6327 | struct regmap *rp; |
6323 | struct segmap *sp; | | 6328 | struct segmap *sp; |
6324 | bool owired; | | 6329 | bool owired; |
| @@ -6428,29 +6433,28 @@ pmap_enu4m(struct pmap *pm, vaddr_t va, | | | @@ -6428,29 +6433,28 @@ pmap_enu4m(struct pmap *pm, vaddr_t va, |
6428 | /* | | 6433 | /* |
6429 | * Might be a change: fetch old pte | | 6434 | * Might be a change: fetch old pte |
6430 | */ | | 6435 | */ |
6431 | tpte = pte[VA_SUN4M_VPG(va)]; | | 6436 | tpte = pte[VA_SUN4M_VPG(va)]; |
6432 | | | 6437 | |
6433 | if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE) { | | 6438 | if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE) { |
6434 | | | 6439 | |
6435 | /* old mapping exists, and is of the same pa type */ | | 6440 | /* old mapping exists, and is of the same pa type */ |
6436 | if ((tpte & SRMMU_PPNMASK) == | | 6441 | if ((tpte & SRMMU_PPNMASK) == |
6437 | (pteproto & SRMMU_PPNMASK)) { | | 6442 | (pteproto & SRMMU_PPNMASK)) { |
6438 | /* just changing prot and/or wiring */ | | 6443 | /* just changing prot and/or wiring */ |
6439 | /* caller should call this directly: */ | | 6444 | /* caller should call this directly: */ |
6440 | pmap_changeprot4m(pm, va, prot, flags); | | 6445 | pmap_changeprot4m(pm, va, prot, flags); |
6441 | PMAP_UNLOCK(); | | 6446 | error = 0; |
6442 | splx(s); | | 6447 | goto out; |
6443 | return (0); | | | |
6444 | } | | 6448 | } |
6445 | /* | | 6449 | /* |
6446 | * Switcheroo: changing pa for this va. | | 6450 | * Switcheroo: changing pa for this va. |
6447 | * If old pa was managed, remove from pvlist. | | 6451 | * If old pa was managed, remove from pvlist. |
6448 | * If old page was cached, flush cache. | | 6452 | * If old page was cached, flush cache. |
6449 | */ | | 6453 | */ |
6450 | #ifdef DEBUG | | 6454 | #ifdef DEBUG |
6451 | if (pmapdebug & PDB_SWITCHMAP) | | 6455 | if (pmapdebug & PDB_SWITCHMAP) |
6452 | printf("%s[%d]: pmap_enu: changing existing " | | 6456 | printf("%s[%d]: pmap_enu: changing existing " |
6453 | "va 0x%x: pte 0x%x=>0x%x\n", | | 6457 | "va 0x%x: pte 0x%x=>0x%x\n", |
6454 | curproc->p_comm, curproc->p_pid, | | 6458 | curproc->p_comm, curproc->p_pid, |
6455 | (int)va, tpte, pteproto); | | 6459 | (int)va, tpte, pteproto); |
6456 | #endif | | 6460 | #endif |
| @@ -6541,26 +6545,27 @@ void | | | @@ -6541,26 +6545,27 @@ void |
6541 | pmap_kremove4m(vaddr_t va, vsize_t len) | | 6545 | pmap_kremove4m(vaddr_t va, vsize_t len) |
6542 | { | | 6546 | { |
6543 | struct pmap *pm = pmap_kernel(); | | 6547 | struct pmap *pm = pmap_kernel(); |
6544 | struct regmap *rp; | | 6548 | struct regmap *rp; |
6545 | struct segmap *sp; | | 6549 | struct segmap *sp; |
6546 | vaddr_t endva, nva; | | 6550 | vaddr_t endva, nva; |
6547 | int vr, vs; | | 6551 | int vr, vs; |
6548 | int tpte, perpage, npg, s; | | 6552 | int tpte, perpage, npg, s; |
6549 | | | 6553 | |
6550 | /* | | 6554 | /* |
6551 | * The kernel pmap doesn't need to be locked, but the demap lock | | 6555 | * The kernel pmap doesn't need to be locked, but the demap lock |
6552 | * in updatepte() requires interrupt protection. | | 6556 | * in updatepte() requires interrupt protection. |
6553 | */ | | 6557 | */ |
| | | 6558 | kpreempt_disable(); |
6554 | s = splvm(); | | 6559 | s = splvm(); |
6555 | | | 6560 | |
6556 | endva = va + len; | | 6561 | endva = va + len; |
6557 | for (; va < endva; va = nva) { | | 6562 | for (; va < endva; va = nva) { |
6558 | /* do one virtual segment at a time */ | | 6563 | /* do one virtual segment at a time */ |
6559 | vr = VA_VREG(va); | | 6564 | vr = VA_VREG(va); |
6560 | vs = VA_VSEG(va); | | 6565 | vs = VA_VSEG(va); |
6561 | nva = VSTOVA(vr, vs + 1); | | 6566 | nva = VSTOVA(vr, vs + 1); |
6562 | if (nva == 0 || nva > endva) { | | 6567 | if (nva == 0 || nva > endva) { |
6563 | nva = endva; | | 6568 | nva = endva; |
6564 | } | | 6569 | } |
6565 | | | 6570 | |
6566 | rp = &pm->pm_regmap[vr]; | | 6571 | rp = &pm->pm_regmap[vr]; |
| @@ -6586,116 +6591,122 @@ pmap_kremove4m(vaddr_t va, vsize_t len) | | | @@ -6586,116 +6591,122 @@ pmap_kremove4m(vaddr_t va, vsize_t len) |
6586 | if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) | | 6591 | if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) |
6587 | continue; | | 6592 | continue; |
6588 | | | 6593 | |
6589 | if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) { | | 6594 | if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) { |
6590 | /* if cacheable, flush page as needed */ | | 6595 | /* if cacheable, flush page as needed */ |
6591 | if (perpage && (tpte & SRMMU_PG_C)) | | 6596 | if (perpage && (tpte & SRMMU_PG_C)) |
6592 | cache_flush_page(va, 0); | | 6597 | cache_flush_page(va, 0); |
6593 | } | | 6598 | } |
6594 | setpgt4m_va(va, &sp->sg_pte[VA_SUN4M_VPG(va)], | | 6599 | setpgt4m_va(va, &sp->sg_pte[VA_SUN4M_VPG(va)], |
6595 | SRMMU_TEINVALID, 1, 0, CPUSET_ALL); | | 6600 | SRMMU_TEINVALID, 1, 0, CPUSET_ALL); |
6596 | } | | 6601 | } |
6597 | } | | 6602 | } |
6598 | splx(s); | | 6603 | splx(s); |
| | | 6604 | kpreempt_enable(); |
6599 | } | | 6605 | } |
6600 | | | 6606 | |
6601 | /* | | 6607 | /* |
6602 | * Change protection on a range of kernel addresses. | | 6608 | * Change protection on a range of kernel addresses. |
6603 | */ | | 6609 | */ |
6604 | void | | 6610 | void |
6605 | pmap_kprotect4m(vaddr_t va, vsize_t size, vm_prot_t prot) | | 6611 | pmap_kprotect4m(vaddr_t va, vsize_t size, vm_prot_t prot) |
6606 | { | | 6612 | { |
6607 | struct pmap *pm = pmap_kernel(); | | 6613 | struct pmap *pm = pmap_kernel(); |
6608 | int pte, newprot, s; | | 6614 | int pte, newprot, s; |
6609 | struct regmap *rp; | | 6615 | struct regmap *rp; |
6610 | struct segmap *sp; | | 6616 | struct segmap *sp; |
6611 | | | 6617 | |
6612 | size = roundup(size,NBPG); | | 6618 | size = roundup(size,NBPG); |
6613 | newprot = pte_kprot4m(prot); | | 6619 | newprot = pte_kprot4m(prot); |
6614 | | | 6620 | |
6615 | /* | | 6621 | /* |
6616 | * The kernel pmap doesn't need to be locked, but the demap lock | | 6622 | * The kernel pmap doesn't need to be locked, but the demap lock |
6617 | * in updatepte() requires interrupt protection. | | 6623 | * in updatepte() requires interrupt protection. |
6618 | */ | | 6624 | */ |
| | | 6625 | kpreempt_disable(); |
6619 | s = splvm(); | | 6626 | s = splvm(); |
6620 | | | 6627 | |
6621 | while (size > 0) { | | 6628 | while (size > 0) { |
6622 | rp = &pm->pm_regmap[VA_VREG(va)]; | | 6629 | rp = &pm->pm_regmap[VA_VREG(va)]; |
6623 | sp = &rp->rg_segmap[VA_VSEG(va)]; | | 6630 | sp = &rp->rg_segmap[VA_VSEG(va)]; |
6624 | pte = sp->sg_pte[VA_SUN4M_VPG(va)]; | | 6631 | pte = sp->sg_pte[VA_SUN4M_VPG(va)]; |
6625 | | | 6632 | |
6626 | /* | | 6633 | /* |
6627 | * Flush cache if page has been referenced to | | 6634 | * Flush cache if page has been referenced to |
6628 | * avoid stale protection bits in the cache tags. | | 6635 | * avoid stale protection bits in the cache tags. |
6629 | */ | | 6636 | */ |
6630 | if ((pte & (SRMMU_PG_C|SRMMU_PGTYPE)) == | | 6637 | if ((pte & (SRMMU_PG_C|SRMMU_PGTYPE)) == |
6631 | (SRMMU_PG_C|PG_SUN4M_OBMEM)) | | 6638 | (SRMMU_PG_C|PG_SUN4M_OBMEM)) |
6632 | cache_flush_page(va, 0); | | 6639 | cache_flush_page(va, 0); |
6633 | | | 6640 | |
6634 | setpgt4m_va(va, &sp->sg_pte[VA_SUN4M_VPG(va)], | | 6641 | setpgt4m_va(va, &sp->sg_pte[VA_SUN4M_VPG(va)], |
6635 | (pte & ~SRMMU_PROT_MASK) | newprot, | | 6642 | (pte & ~SRMMU_PROT_MASK) | newprot, |
6636 | 1, pm->pm_ctxnum, PMAP_CPUSET(pm)); | | 6643 | 1, pm->pm_ctxnum, PMAP_CPUSET(pm)); |
6637 | | | 6644 | |
6638 | va += NBPG; | | 6645 | va += NBPG; |
6639 | size -= NBPG; | | 6646 | size -= NBPG; |
6640 | } | | 6647 | } |
6641 | splx(s); | | 6648 | splx(s); |
| | | 6649 | kpreempt_enable(); |
6642 | } | | 6650 | } |
6643 | #endif /* SUN4M || SUN4D */ | | 6651 | #endif /* SUN4M || SUN4D */ |
6644 | | | 6652 | |
6645 | /* | | 6653 | /* |
6646 | * Clear the wiring attribute for a map/virtual-address pair. | | 6654 | * Clear the wiring attribute for a map/virtual-address pair. |
6647 | */ | | 6655 | */ |
6648 | /* ARGSUSED */ | | 6656 | /* ARGSUSED */ |
6649 | void | | 6657 | void |
6650 | pmap_unwire(struct pmap *pm, vaddr_t va) | | 6658 | pmap_unwire(struct pmap *pm, vaddr_t va) |
6651 | { | | 6659 | { |
6652 | int vr, vs, *ptep; | | 6660 | int vr, vs, *ptep; |
6653 | struct regmap *rp; | | 6661 | struct regmap *rp; |
6654 | struct segmap *sp; | | 6662 | struct segmap *sp; |
6655 | bool owired; | | 6663 | bool owired; |
6656 | | | 6664 | |
| | | 6665 | kpreempt_disable(); |
6657 | vr = VA_VREG(va); | | 6666 | vr = VA_VREG(va); |
6658 | vs = VA_VSEG(va); | | 6667 | vs = VA_VSEG(va); |
6659 | rp = &pm->pm_regmap[vr]; | | 6668 | rp = &pm->pm_regmap[vr]; |
6660 | sp = &rp->rg_segmap[vs]; | | 6669 | sp = &rp->rg_segmap[vs]; |
6661 | | | 6670 | |
6662 | owired = false; | | 6671 | owired = false; |
6663 | if (CPU_HAS_SUNMMU) { | | 6672 | if (CPU_HAS_SUNMMU) { |
6664 | ptep = &sp->sg_pte[VA_VPG(va)]; | | 6673 | ptep = &sp->sg_pte[VA_VPG(va)]; |
6665 | owired = *ptep & PG_WIRED; | | 6674 | owired = *ptep & PG_WIRED; |
6666 | *ptep &= ~PG_WIRED; | | 6675 | *ptep &= ~PG_WIRED; |
6667 | } | | 6676 | } |
6668 | if (CPU_HAS_SRMMU) { | | 6677 | if (CPU_HAS_SRMMU) { |
6669 | owired = sp->sg_wiremap & (1 << VA_SUN4M_VPG(va)); | | 6678 | owired = sp->sg_wiremap & (1 << VA_SUN4M_VPG(va)); |
6670 | sp->sg_wiremap &= ~(1 << VA_SUN4M_VPG(va)); | | 6679 | sp->sg_wiremap &= ~(1 << VA_SUN4M_VPG(va)); |
6671 | } | | 6680 | } |
6672 | if (!owired) { | | 6681 | if (!owired) { |
6673 | pmap_stats.ps_useless_changewire++; | | 6682 | pmap_stats.ps_useless_changewire++; |
6674 | return; | | 6683 | return; |
| | | 6684 | kpreempt_enable(); |
6675 | } | | 6685 | } |
6676 | | | 6686 | |
6677 | pm->pm_stats.wired_count--; | | 6687 | pm->pm_stats.wired_count--; |
6678 | #if defined(SUN4) || defined(SUN4C) | | 6688 | #if defined(SUN4) || defined(SUN4C) |
6679 | if (CPU_HAS_SUNMMU && --sp->sg_nwired <= 0) { | | 6689 | if (CPU_HAS_SUNMMU && --sp->sg_nwired <= 0) { |
6680 | #ifdef DIAGNOSTIC | | 6690 | #ifdef DIAGNOSTIC |
6681 | if (sp->sg_nwired > sp->sg_npte || sp->sg_nwired < 0) | | 6691 | if (sp->sg_nwired > sp->sg_npte || sp->sg_nwired < 0) |
6682 | panic("pmap_unwire: pm %p, va %lx: nleft=%d, nwired=%d", | | 6692 | panic("pmap_unwire: pm %p, va %lx: nleft=%d, nwired=%d", |
6683 | pm, va, sp->sg_npte, sp->sg_nwired); | | 6693 | pm, va, sp->sg_npte, sp->sg_nwired); |
6684 | #endif | | 6694 | #endif |
6685 | if (sp->sg_pmeg != seginval) | | 6695 | if (sp->sg_pmeg != seginval) |
6686 | mmu_pmeg_unlock(sp->sg_pmeg); | | 6696 | mmu_pmeg_unlock(sp->sg_pmeg); |
6687 | } | | 6697 | } |
6688 | #endif /* SUN4 || SUN4C */ | | 6698 | #endif /* SUN4 || SUN4C */ |
| | | 6699 | kpreempt_enable(); |
6689 | } | | 6700 | } |
6690 | | | 6701 | |
6691 | /* | | 6702 | /* |
6692 | * Extract the physical page address associated | | 6703 | * Extract the physical page address associated |
6693 | * with the given map/virtual_address pair. | | 6704 | * with the given map/virtual_address pair. |
6694 | * GRR, the vm code knows; we should not have to do this! | | 6705 | * GRR, the vm code knows; we should not have to do this! |
6695 | */ | | 6706 | */ |
6696 | | | 6707 | |
6697 | #if defined(SUN4) || defined(SUN4C) | | 6708 | #if defined(SUN4) || defined(SUN4C) |
6698 | bool | | 6709 | bool |
6699 | pmap_extract4_4c(struct pmap *pm, vaddr_t va, paddr_t *pap) | | 6710 | pmap_extract4_4c(struct pmap *pm, vaddr_t va, paddr_t *pap) |
6700 | { | | 6711 | { |
6701 | int vr, vs; | | 6712 | int vr, vs; |
| @@ -7078,26 +7089,27 @@ pmap_copy_page4_4c(paddr_t src, paddr_t | | | @@ -7078,26 +7089,27 @@ pmap_copy_page4_4c(paddr_t src, paddr_t |
7078 | /* | | 7089 | /* |
7079 | * Fill the given MI physical page with zero bytes. | | 7090 | * Fill the given MI physical page with zero bytes. |
7080 | * | | 7091 | * |
7081 | * We avoid stomping on the cache. | | 7092 | * We avoid stomping on the cache. |
7082 | * XXX might be faster to use destination's context and allow cache to fill? | | 7093 | * XXX might be faster to use destination's context and allow cache to fill? |
7083 | */ | | 7094 | */ |
7084 | void | | 7095 | void |
7085 | pmap_zero_page4m(paddr_t pa) | | 7096 | pmap_zero_page4m(paddr_t pa) |
7086 | { | | 7097 | { |
7087 | struct vm_page *pg; | | 7098 | struct vm_page *pg; |
7088 | void *va; | | 7099 | void *va; |
7089 | int pte; | | 7100 | int pte; |
7090 | | | 7101 | |
| | | 7102 | kpreempt_disable(); |
7091 | if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { | | 7103 | if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { |
7092 | /* | | 7104 | /* |
7093 | * The following VAC flush might not be necessary since the | | 7105 | * The following VAC flush might not be necessary since the |
7094 | * page is being cleared because it is about to be allocated, | | 7106 | * page is being cleared because it is about to be allocated, |
7095 | * i.e., is in use by no one. | | 7107 | * i.e., is in use by no one. |
7096 | * In the case of a physical cache, a flush (or just an | | 7108 | * In the case of a physical cache, a flush (or just an |
7097 | * invalidate, if possible) is usually necessary when using | | 7109 | * invalidate, if possible) is usually necessary when using |
7098 | * uncached access to clear it. | | 7110 | * uncached access to clear it. |
7099 | */ | | 7111 | */ |
7100 | if (CACHEINFO.c_vactype != VAC_NONE) | | 7112 | if (CACHEINFO.c_vactype != VAC_NONE) |
7101 | pv_flushcache4m(pg); | | 7113 | pv_flushcache4m(pg); |
7102 | else | | 7114 | else |
7103 | pcache_flush_page(pa, 1); | | 7115 | pcache_flush_page(pa, 1); |
| @@ -7105,106 +7117,112 @@ pmap_zero_page4m(paddr_t pa) | | | @@ -7105,106 +7117,112 @@ pmap_zero_page4m(paddr_t pa) |
7105 | pte = SRMMU_TEPTE | PPROT_N_RWX | (pa >> SRMMU_PPNPASHIFT); | | 7117 | pte = SRMMU_TEPTE | PPROT_N_RWX | (pa >> SRMMU_PPNPASHIFT); |
7106 | if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY) | | 7118 | if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY) |
7107 | pte |= SRMMU_PG_C; | | 7119 | pte |= SRMMU_PG_C; |
7108 | | | 7120 | |
7109 | va = cpuinfo.vpage[0]; | | 7121 | va = cpuinfo.vpage[0]; |
7110 | setpgt4m(cpuinfo.vpage_pte[0], pte); | | 7122 | setpgt4m(cpuinfo.vpage_pte[0], pte); |
7111 | qzero(va, NBPG); | | 7123 | qzero(va, NBPG); |
7112 | /* | | 7124 | /* |
7113 | * Remove temporary mapping (which is kernel-only, so the | | 7125 | * Remove temporary mapping (which is kernel-only, so the |
7114 | * context used for TLB flushing does not matter) | | 7126 | * context used for TLB flushing does not matter) |
7115 | */ | | 7127 | */ |
7116 | sp_tlb_flush((int)va, 0, ASI_SRMMUFP_L3); | | 7128 | sp_tlb_flush((int)va, 0, ASI_SRMMUFP_L3); |
7117 | setpgt4m(cpuinfo.vpage_pte[0], SRMMU_TEINVALID); | | 7129 | setpgt4m(cpuinfo.vpage_pte[0], SRMMU_TEINVALID); |
| | | 7130 | kpreempt_enable(); |
7118 | } | | 7131 | } |
7119 | | | 7132 | |
7120 | /* | | 7133 | /* |
7121 | * Viking/MXCC specific version of pmap_zero_page | | 7134 | * Viking/MXCC specific version of pmap_zero_page |
7122 | */ | | 7135 | */ |
7123 | void | | 7136 | void |
7124 | pmap_zero_page_viking_mxcc(paddr_t pa) | | 7137 | pmap_zero_page_viking_mxcc(paddr_t pa) |
7125 | { | | 7138 | { |
7126 | u_int offset; | | 7139 | u_int offset; |
7127 | u_int stream_data_addr = MXCC_STREAM_DATA; | | 7140 | u_int stream_data_addr = MXCC_STREAM_DATA; |
7128 | uint64_t v = (uint64_t)pa; | | 7141 | uint64_t v = (uint64_t)pa; |
7129 | | | 7142 | |
| | | 7143 | kpreempt_disable(); |
7130 | /* Load MXCC stream data register with 0 (bottom 32 bytes only) */ | | 7144 | /* Load MXCC stream data register with 0 (bottom 32 bytes only) */ |
7131 | stda(stream_data_addr+0, ASI_CONTROL, 0); | | 7145 | stda(stream_data_addr+0, ASI_CONTROL, 0); |
7132 | stda(stream_data_addr+8, ASI_CONTROL, 0); | | 7146 | stda(stream_data_addr+8, ASI_CONTROL, 0); |
7133 | stda(stream_data_addr+16, ASI_CONTROL, 0); | | 7147 | stda(stream_data_addr+16, ASI_CONTROL, 0); |
7134 | stda(stream_data_addr+24, ASI_CONTROL, 0); | | 7148 | stda(stream_data_addr+24, ASI_CONTROL, 0); |
7135 | | | 7149 | |
7136 | /* Then write the stream data register to each block in the page */ | | 7150 | /* Then write the stream data register to each block in the page */ |
7137 | v |= MXCC_STREAM_C; | | 7151 | v |= MXCC_STREAM_C; |
7138 | for (offset = 0; offset < NBPG; offset += MXCC_STREAM_BLKSZ) { | | 7152 | for (offset = 0; offset < NBPG; offset += MXCC_STREAM_BLKSZ) { |
7139 | stda(MXCC_STREAM_DST, ASI_CONTROL, v | offset); | | 7153 | stda(MXCC_STREAM_DST, ASI_CONTROL, v | offset); |
7140 | } | | 7154 | } |
| | | 7155 | kpreempt_enable(); |
7141 | } | | 7156 | } |
7142 | | | 7157 | |
7143 | /* | | 7158 | /* |
7144 | * HyperSPARC/RT625 specific version of pmap_zero_page | | 7159 | * HyperSPARC/RT625 specific version of pmap_zero_page |
7145 | */ | | 7160 | */ |
7146 | void | | 7161 | void |
7147 | pmap_zero_page_hypersparc(paddr_t pa) | | 7162 | pmap_zero_page_hypersparc(paddr_t pa) |
7148 | { | | 7163 | { |
7149 | struct vm_page *pg; | | 7164 | struct vm_page *pg; |
7150 | void *va; | | 7165 | void *va; |
7151 | int pte; | | 7166 | int pte; |
7152 | int offset; | | 7167 | int offset; |
7153 | | | 7168 | |
| | | 7169 | kpreempt_disable(); |
7154 | /* | | 7170 | /* |
7155 | * We still have to map the page, since ASI_BLOCKFILL | | 7171 | * We still have to map the page, since ASI_BLOCKFILL |
7156 | * takes virtual addresses. This also means we have to | | 7172 | * takes virtual addresses. This also means we have to |
7157 | * consider cache aliasing; therefore we still need | | 7173 | * consider cache aliasing; therefore we still need |
7158 | * to flush the cache here. All we gain is the speed-up | | 7174 | * to flush the cache here. All we gain is the speed-up |
7159 | * in zero-fill loop itself.. | | 7175 | * in zero-fill loop itself.. |
7160 | */ | | 7176 | */ |
7161 | if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { | | 7177 | if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { |
7162 | /* | | 7178 | /* |
7163 | * The following might not be necessary since the page | | 7179 | * The following might not be necessary since the page |
7164 | * is being cleared because it is about to be allocated, | | 7180 | * is being cleared because it is about to be allocated, |
7165 | * i.e., is in use by no one. | | 7181 | * i.e., is in use by no one. |
7166 | */ | | 7182 | */ |
7167 | if (CACHEINFO.c_vactype != VAC_NONE) | | 7183 | if (CACHEINFO.c_vactype != VAC_NONE) |
7168 | pv_flushcache4m(pg); | | 7184 | pv_flushcache4m(pg); |
7169 | } | | 7185 | } |
7170 | pte = SRMMU_TEPTE | SRMMU_PG_C | PPROT_N_RWX | (pa >> SRMMU_PPNPASHIFT); | | 7186 | pte = SRMMU_TEPTE | SRMMU_PG_C | PPROT_N_RWX | (pa >> SRMMU_PPNPASHIFT); |
7171 | | | 7187 | |
7172 | va = cpuinfo.vpage[0]; | | 7188 | va = cpuinfo.vpage[0]; |
7173 | setpgt4m(cpuinfo.vpage_pte[0], pte); | | 7189 | setpgt4m(cpuinfo.vpage_pte[0], pte); |
7174 | for (offset = 0; offset < NBPG; offset += 32) { | | 7190 | for (offset = 0; offset < NBPG; offset += 32) { |
7175 | sta((char *)va + offset, ASI_BLOCKFILL, 0); | | 7191 | sta((char *)va + offset, ASI_BLOCKFILL, 0); |
7176 | } | | 7192 | } |
7177 | /* Remove temporary mapping */ | | 7193 | /* Remove temporary mapping */ |
7178 | sp_tlb_flush((int)va, 0, ASI_SRMMUFP_L3); | | 7194 | sp_tlb_flush((int)va, 0, ASI_SRMMUFP_L3); |
7179 | setpgt4m(cpuinfo.vpage_pte[0], SRMMU_TEINVALID); | | 7195 | setpgt4m(cpuinfo.vpage_pte[0], SRMMU_TEINVALID); |
| | | 7196 | kpreempt_enable(); |
7180 | } | | 7197 | } |
7181 | | | 7198 | |
7182 | /* | | 7199 | /* |
7183 | * Copy the given MI physical source page to its destination. | | 7200 | * Copy the given MI physical source page to its destination. |
7184 | * | | 7201 | * |
7185 | * We avoid stomping on the cache as above (with same `XXX' note). | | 7202 | * We avoid stomping on the cache as above (with same `XXX' note). |
7186 | * We must first flush any write-back cache for the source page. | | 7203 | * We must first flush any write-back cache for the source page. |
7187 | * We go ahead and stomp on the kernel's virtual cache for the | | 7204 | * We go ahead and stomp on the kernel's virtual cache for the |
7188 | * source page, since the cache can read memory MUCH faster than | | 7205 | * source page, since the cache can read memory MUCH faster than |
7189 | * the processor. | | 7206 | * the processor. |
7190 | */ | | 7207 | */ |
7191 | void | | 7208 | void |
7192 | pmap_copy_page4m(paddr_t src, paddr_t dst) | | 7209 | pmap_copy_page4m(paddr_t src, paddr_t dst) |
7193 | { | | 7210 | { |
7194 | struct vm_page *pg; | | 7211 | struct vm_page *pg; |
7195 | void *sva, *dva; | | 7212 | void *sva, *dva; |
7196 | int spte, dpte; | | 7213 | int spte, dpte; |
7197 | | | 7214 | |
| | | 7215 | kpreempt_disable(); |
7198 | if ((pg = PHYS_TO_VM_PAGE(src)) != NULL) { | | 7216 | if ((pg = PHYS_TO_VM_PAGE(src)) != NULL) { |
7199 | if (CACHEINFO.c_vactype == VAC_WRITEBACK) | | 7217 | if (CACHEINFO.c_vactype == VAC_WRITEBACK) |
7200 | pv_flushcache4m(pg); | | 7218 | pv_flushcache4m(pg); |
7201 | } | | 7219 | } |
7202 | | | 7220 | |
7203 | spte = SRMMU_TEPTE | SRMMU_PG_C | PPROT_N_RX | | | 7221 | spte = SRMMU_TEPTE | SRMMU_PG_C | PPROT_N_RX | |
7204 | (src >> SRMMU_PPNPASHIFT); | | 7222 | (src >> SRMMU_PPNPASHIFT); |
7205 | | | 7223 | |
7206 | if ((pg = PHYS_TO_VM_PAGE(dst)) != NULL) { | | 7224 | if ((pg = PHYS_TO_VM_PAGE(dst)) != NULL) { |
7207 | /* similar `might not be necessary' comment applies */ | | 7225 | /* similar `might not be necessary' comment applies */ |
7208 | if (CACHEINFO.c_vactype != VAC_NONE) | | 7226 | if (CACHEINFO.c_vactype != VAC_NONE) |
7209 | pv_flushcache4m(pg); | | 7227 | pv_flushcache4m(pg); |
7210 | else | | 7228 | else |
| @@ -7215,60 +7233,64 @@ pmap_copy_page4m(paddr_t src, paddr_t ds | | | @@ -7215,60 +7233,64 @@ pmap_copy_page4m(paddr_t src, paddr_t ds |
7215 | if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY) | | 7233 | if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY) |
7216 | dpte |= SRMMU_PG_C; | | 7234 | dpte |= SRMMU_PG_C; |
7217 | | | 7235 | |
7218 | sva = cpuinfo.vpage[0]; | | 7236 | sva = cpuinfo.vpage[0]; |
7219 | dva = cpuinfo.vpage[1]; | | 7237 | dva = cpuinfo.vpage[1]; |
7220 | setpgt4m(cpuinfo.vpage_pte[0], spte); | | 7238 | setpgt4m(cpuinfo.vpage_pte[0], spte); |
7221 | setpgt4m(cpuinfo.vpage_pte[1], dpte); | | 7239 | setpgt4m(cpuinfo.vpage_pte[1], dpte); |
7222 | qcopy(sva, dva, NBPG); /* loads cache, so we must ... */ | | 7240 | qcopy(sva, dva, NBPG); /* loads cache, so we must ... */ |
7223 | cpuinfo.sp_vcache_flush_page((vaddr_t)sva, getcontext4m()); | | 7241 | cpuinfo.sp_vcache_flush_page((vaddr_t)sva, getcontext4m()); |
7224 | sp_tlb_flush((int)sva, 0, ASI_SRMMUFP_L3); | | 7242 | sp_tlb_flush((int)sva, 0, ASI_SRMMUFP_L3); |
7225 | setpgt4m(cpuinfo.vpage_pte[0], SRMMU_TEINVALID); | | 7243 | setpgt4m(cpuinfo.vpage_pte[0], SRMMU_TEINVALID); |
7226 | sp_tlb_flush((int)dva, 0, ASI_SRMMUFP_L3); | | 7244 | sp_tlb_flush((int)dva, 0, ASI_SRMMUFP_L3); |
7227 | setpgt4m(cpuinfo.vpage_pte[1], SRMMU_TEINVALID); | | 7245 | setpgt4m(cpuinfo.vpage_pte[1], SRMMU_TEINVALID); |
| | | 7246 | kpreempt_enable(); |
7228 | } | | 7247 | } |
7229 | | | 7248 | |
7230 | /* | | 7249 | /* |
7231 | * Viking/MXCC specific version of pmap_copy_page | | 7250 | * Viking/MXCC specific version of pmap_copy_page |
7232 | */ | | 7251 | */ |
7233 | void | | 7252 | void |
7234 | pmap_copy_page_viking_mxcc(paddr_t src, paddr_t dst) | | 7253 | pmap_copy_page_viking_mxcc(paddr_t src, paddr_t dst) |
7235 | { | | 7254 | { |
7236 | u_int offset; | | 7255 | u_int offset; |
7237 | uint64_t v1 = (uint64_t)src; | | 7256 | uint64_t v1 = (uint64_t)src; |
7238 | uint64_t v2 = (uint64_t)dst; | | 7257 | uint64_t v2 = (uint64_t)dst; |
7239 | | | 7258 | |
| | | 7259 | kpreempt_disable(); |
7240 | /* Enable cache-coherency */ | | 7260 | /* Enable cache-coherency */ |
7241 | v1 |= MXCC_STREAM_C; | | 7261 | v1 |= MXCC_STREAM_C; |
7242 | v2 |= MXCC_STREAM_C; | | 7262 | v2 |= MXCC_STREAM_C; |
7243 | | | 7263 | |
7244 | /* Copy through stream data register */ | | 7264 | /* Copy through stream data register */ |
7245 | for (offset = 0; offset < NBPG; offset += MXCC_STREAM_BLKSZ) { | | 7265 | for (offset = 0; offset < NBPG; offset += MXCC_STREAM_BLKSZ) { |
7246 | stda(MXCC_STREAM_SRC, ASI_CONTROL, v1 | offset); | | 7266 | stda(MXCC_STREAM_SRC, ASI_CONTROL, v1 | offset); |
7247 | stda(MXCC_STREAM_DST, ASI_CONTROL, v2 | offset); | | 7267 | stda(MXCC_STREAM_DST, ASI_CONTROL, v2 | offset); |
7248 | } | | 7268 | } |
| | | 7269 | kpreempt_enable(); |
7249 | } | | 7270 | } |
7250 | | | 7271 | |
7251 | /* | | 7272 | /* |
7252 | * HyperSPARC/RT625 specific version of pmap_copy_page | | 7273 | * HyperSPARC/RT625 specific version of pmap_copy_page |
7253 | */ | | 7274 | */ |
7254 | void | | 7275 | void |
7255 | pmap_copy_page_hypersparc(paddr_t src, paddr_t dst) | | 7276 | pmap_copy_page_hypersparc(paddr_t src, paddr_t dst) |
7256 | { | | 7277 | { |
7257 | struct vm_page *pg; | | 7278 | struct vm_page *pg; |
7258 | void *sva, *dva; | | 7279 | void *sva, *dva; |
7259 | int spte, dpte; | | 7280 | int spte, dpte; |
7260 | int offset; | | 7281 | int offset; |
7261 | | | 7282 | |
| | | 7283 | kpreempt_disable(); |
7262 | /* | | 7284 | /* |
7263 | * We still have to map the pages, since ASI_BLOCKCOPY | | 7285 | * We still have to map the pages, since ASI_BLOCKCOPY |
7264 | * takes virtual addresses. This also means we have to | | 7286 | * takes virtual addresses. This also means we have to |
7265 | * consider cache aliasing; therefore we still need | | 7287 | * consider cache aliasing; therefore we still need |
7266 | * to flush the cache here. All we gain is the speed-up | | 7288 | * to flush the cache here. All we gain is the speed-up |
7267 | * in copy loop itself.. | | 7289 | * in copy loop itself.. |
7268 | */ | | 7290 | */ |
7269 | | | 7291 | |
7270 | if ((pg = PHYS_TO_VM_PAGE(src)) != NULL) { | | 7292 | if ((pg = PHYS_TO_VM_PAGE(src)) != NULL) { |
7271 | if (CACHEINFO.c_vactype == VAC_WRITEBACK) | | 7293 | if (CACHEINFO.c_vactype == VAC_WRITEBACK) |
7272 | pv_flushcache4m(pg); | | 7294 | pv_flushcache4m(pg); |
7273 | } | | 7295 | } |
7274 | | | 7296 | |
| @@ -7287,26 +7309,27 @@ pmap_copy_page_hypersparc(paddr_t src, p | | | @@ -7287,26 +7309,27 @@ pmap_copy_page_hypersparc(paddr_t src, p |
7287 | sva = cpuinfo.vpage[0]; | | 7309 | sva = cpuinfo.vpage[0]; |
7288 | dva = cpuinfo.vpage[1]; | | 7310 | dva = cpuinfo.vpage[1]; |
7289 | setpgt4m(cpuinfo.vpage_pte[0], spte); | | 7311 | setpgt4m(cpuinfo.vpage_pte[0], spte); |
7290 | setpgt4m(cpuinfo.vpage_pte[1], dpte); | | 7312 | setpgt4m(cpuinfo.vpage_pte[1], dpte); |
7291 | | | 7313 | |
7292 | for (offset = 0; offset < NBPG; offset += 32) { | | 7314 | for (offset = 0; offset < NBPG; offset += 32) { |
7293 | sta((char *)dva + offset, ASI_BLOCKCOPY, (char *)sva + offset); | | 7315 | sta((char *)dva + offset, ASI_BLOCKCOPY, (char *)sva + offset); |
7294 | } | | 7316 | } |
7295 | | | 7317 | |
7296 | sp_tlb_flush((int)sva, 0, ASI_SRMMUFP_L3); | | 7318 | sp_tlb_flush((int)sva, 0, ASI_SRMMUFP_L3); |
7297 | setpgt4m(cpuinfo.vpage_pte[0], SRMMU_TEINVALID); | | 7319 | setpgt4m(cpuinfo.vpage_pte[0], SRMMU_TEINVALID); |
7298 | sp_tlb_flush((int)dva, 0, ASI_SRMMUFP_L3); | | 7320 | sp_tlb_flush((int)dva, 0, ASI_SRMMUFP_L3); |
7299 | setpgt4m(cpuinfo.vpage_pte[1], SRMMU_TEINVALID); | | 7321 | setpgt4m(cpuinfo.vpage_pte[1], SRMMU_TEINVALID); |
| | | 7322 | kpreempt_enable(); |
7300 | } | | 7323 | } |
7301 | #endif /* SUN4M || SUN4D */ | | 7324 | #endif /* SUN4M || SUN4D */ |
7302 | | | 7325 | |
7303 | /* | | 7326 | /* |
7304 | * Turn off cache for a given (va, number of pages). | | 7327 | * Turn off cache for a given (va, number of pages). |
7305 | * | | 7328 | * |
7306 | * We just assert PG_NC for each PTE; the addresses must reside | | 7329 | * We just assert PG_NC for each PTE; the addresses must reside |
7307 | * in locked kernel space. A cache flush is also done. | | 7330 | * in locked kernel space. A cache flush is also done. |
7308 | */ | | 7331 | */ |
7309 | void | | 7332 | void |
7310 | kvm_uncache(char *va, int npages) | | 7333 | kvm_uncache(char *va, int npages) |
7311 | { | | 7334 | { |
7312 | struct vm_page *pg; | | 7335 | struct vm_page *pg; |