Tue Feb 15 09:56:33 2011 UTC ()
sprinkle some kpreempt_{dis,en}able() in various strategic points
we will need when we get to actually enabling kernel preemption.


(mrg)
diff -r1.112 -r1.113 src/sys/arch/sparc/sparc/intr.c
diff -r1.340 -r1.341 src/sys/arch/sparc/sparc/pmap.c
diff -r1.24 -r1.25 src/sys/arch/sparc/sparc/timer_sun4m.c

cvs diff -r1.112 -r1.113 src/sys/arch/sparc/sparc/intr.c (expand / switch to unified diff)

--- src/sys/arch/sparc/sparc/intr.c 2011/02/15 09:05:14 1.112
+++ src/sys/arch/sparc/sparc/intr.c 2011/02/15 09:56:32 1.113
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: intr.c,v 1.112 2011/02/15 09:05:14 mrg Exp $ */ 1/* $NetBSD: intr.c,v 1.113 2011/02/15 09:56:32 mrg Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1992, 1993 4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * 6 *
7 * This software was developed by the Computer Systems Engineering group 7 * This software was developed by the Computer Systems Engineering group
8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9 * contributed to Berkeley. 9 * contributed to Berkeley.
10 * 10 *
11 * All advertising materials mentioning features or use of this software 11 * All advertising materials mentioning features or use of this software
12 * must display the following acknowledgement: 12 * must display the following acknowledgement:
13 * This product includes software developed by the University of 13 * This product includes software developed by the University of
14 * California, Lawrence Berkeley Laboratory. 14 * California, Lawrence Berkeley Laboratory.
@@ -31,27 +31,27 @@ @@ -31,27 +31,27 @@
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE. 38 * SUCH DAMAGE.
39 * 39 *
40 * @(#)intr.c 8.3 (Berkeley) 11/11/93 40 * @(#)intr.c 8.3 (Berkeley) 11/11/93
41 */ 41 */
42 42
43#include <sys/cdefs.h> 43#include <sys/cdefs.h>
44__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.112 2011/02/15 09:05:14 mrg Exp $"); 44__KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.113 2011/02/15 09:56:32 mrg Exp $");
45 45
46#include "opt_multiprocessor.h" 46#include "opt_multiprocessor.h"
47#include "opt_sparc_arch.h" 47#include "opt_sparc_arch.h"
48 48
49#include <sys/param.h> 49#include <sys/param.h>
50#include <sys/systm.h> 50#include <sys/systm.h>
51#include <sys/kernel.h> 51#include <sys/kernel.h>
52#include <sys/malloc.h> 52#include <sys/malloc.h>
53#include <sys/cpu.h> 53#include <sys/cpu.h>
54#include <sys/intr.h> 54#include <sys/intr.h>
55#include <sys/simplelock.h> 55#include <sys/simplelock.h>
56 56
57#include <uvm/uvm_extern.h> 57#include <uvm/uvm_extern.h>
@@ -343,43 +343,47 @@ nmi_soft(struct trapframe *tf) @@ -343,43 +343,47 @@ nmi_soft(struct trapframe *tf)
343#if defined(MULTIPROCESSOR) 343#if defined(MULTIPROCESSOR)
344/* 344/*
345 * Respond to an xcall() request from another CPU. 345 * Respond to an xcall() request from another CPU.
346 * 346 *
347 * This is also called directly from xcall() if we notice an 347 * This is also called directly from xcall() if we notice an
348 * incoming message while we're waiting to grab the xpmsg_lock. 348 * incoming message while we're waiting to grab the xpmsg_lock.
349 * We pass the address of xcallintr() itself to indicate that 349 * We pass the address of xcallintr() itself to indicate that
350 * this is not a real interrupt. 350 * this is not a real interrupt.
351 */ 351 */
352void 352void
353xcallintr(void *v) 353xcallintr(void *v)
354{ 354{
355 355
 356 kpreempt_disable();
 357
356 /* Tally */ 358 /* Tally */
357 if (v != xcallintr) 359 if (v != xcallintr)
358 cpuinfo.ci_sintrcnt[13].ev_count++; 360 cpuinfo.ci_sintrcnt[13].ev_count++;
359 361
360 /* notyet - cpuinfo.msg.received = 1; */ 362 /* notyet - cpuinfo.msg.received = 1; */
361 switch (cpuinfo.msg.tag) { 363 switch (cpuinfo.msg.tag) {
362 case XPMSG_FUNC: 364 case XPMSG_FUNC:
363 { 365 {
364 volatile struct xpmsg_func *p = &cpuinfo.msg.u.xpmsg_func; 366 volatile struct xpmsg_func *p = &cpuinfo.msg.u.xpmsg_func;
365 367
366 if (p->func) 368 if (p->func)
367 (*p->func)(p->arg0, p->arg1, p->arg2); 369 (*p->func)(p->arg0, p->arg1, p->arg2);
368 break; 370 break;
369 } 371 }
370 } 372 }
371 cpuinfo.msg.tag = 0; 373 cpuinfo.msg.tag = 0;
372 cpuinfo.msg.complete = 1; 374 cpuinfo.msg.complete = 1;
 375
 376 kpreempt_enable();
373} 377}
374#endif /* MULTIPROCESSOR */ 378#endif /* MULTIPROCESSOR */
375#endif /* SUN4M || SUN4D */ 379#endif /* SUN4M || SUN4D */
376 380
377 381
378#ifdef MSIIEP 382#ifdef MSIIEP
379/* 383/*
380 * It's easier to make this separate so that not to further obscure 384 * It's easier to make this separate so that not to further obscure
381 * SUN4M case with more ifdefs. There's no common functionality 385 * SUN4M case with more ifdefs. There's no common functionality
382 * anyway. 386 * anyway.
383 */ 387 */
384 388
385#include <sparc/sparc/msiiepreg.h> 389#include <sparc/sparc/msiiepreg.h>
@@ -833,16 +837,21 @@ intr_biglock_wrapper(void *vp) @@ -833,16 +837,21 @@ intr_biglock_wrapper(void *vp)
833 KERNEL_LOCK(1, NULL); 837 KERNEL_LOCK(1, NULL);
834 838
835 ret = (*ih->ih_realfun)(ih->ih_realarg); 839 ret = (*ih->ih_realfun)(ih->ih_realarg);
836 840
837 KERNEL_UNLOCK_ONE(NULL); 841 KERNEL_UNLOCK_ONE(NULL);
838 842
839 return ret; 843 return ret;
840} 844}
841#endif /* MULTIPROCESSOR */ 845#endif /* MULTIPROCESSOR */
842 846
843bool 847bool
844cpu_intr_p(void) 848cpu_intr_p(void)
845{ 849{
 850 int idepth;
 851
 852 kpreempt_disable();
 853 idepth = curcpu()->ci_idepth;
 854 kpreempt_enable();
846 855
847 return curcpu()->ci_idepth != 0; 856 return idepth != 0;
848} 857}

cvs diff -r1.340 -r1.341 src/sys/arch/sparc/sparc/pmap.c (expand / switch to unified diff)

--- src/sys/arch/sparc/sparc/pmap.c 2010/04/26 09:26:25 1.340
+++ src/sys/arch/sparc/sparc/pmap.c 2011/02/15 09:56:32 1.341
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: pmap.c,v 1.340 2010/04/26 09:26:25 martin Exp $ */ 1/* $NetBSD: pmap.c,v 1.341 2011/02/15 09:56:32 mrg Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1996 4 * Copyright (c) 1996
5 * The President and Fellows of Harvard College. All rights reserved. 5 * The President and Fellows of Harvard College. All rights reserved.
6 * Copyright (c) 1992, 1993 6 * Copyright (c) 1992, 1993
7 * The Regents of the University of California. All rights reserved. 7 * The Regents of the University of California. All rights reserved.
8 * 8 *
9 * This software was developed by the Computer Systems Engineering group 9 * This software was developed by the Computer Systems Engineering group
10 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 10 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
11 * contributed to Berkeley. 11 * contributed to Berkeley.
12 * 12 *
13 * All advertising materials mentioning features or use of this software 13 * All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement: 14 * must display the following acknowledgement:
@@ -46,27 +46,27 @@ @@ -46,27 +46,27 @@
46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 46 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 47 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48 * SUCH DAMAGE. 48 * SUCH DAMAGE.
49 * 49 *
50 * @(#)pmap.c 8.4 (Berkeley) 2/5/94 50 * @(#)pmap.c 8.4 (Berkeley) 2/5/94
51 * 51 *
52 */ 52 */
53 53
54/* 54/*
55 * SPARC physical map management code. 55 * SPARC physical map management code.
56 */ 56 */
57 57
58#include <sys/cdefs.h> 58#include <sys/cdefs.h>
59__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.340 2010/04/26 09:26:25 martin Exp $"); 59__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.341 2011/02/15 09:56:32 mrg Exp $");
60 60
61#include "opt_ddb.h" 61#include "opt_ddb.h"
62#include "opt_kgdb.h" 62#include "opt_kgdb.h"
63#include "opt_sparc_arch.h" 63#include "opt_sparc_arch.h"
64 64
65#include <sys/param.h> 65#include <sys/param.h>
66#include <sys/systm.h> 66#include <sys/systm.h>
67#include <sys/device.h> 67#include <sys/device.h>
68#include <sys/proc.h> 68#include <sys/proc.h>
69#include <sys/queue.h> 69#include <sys/queue.h>
70#include <sys/pool.h> 70#include <sys/pool.h>
71#include <sys/exec.h> 71#include <sys/exec.h>
72#include <sys/core.h> 72#include <sys/core.h>
@@ -808,40 +808,44 @@ updatepte4m(vaddr_t va, int *pte, int bi @@ -808,40 +808,44 @@ updatepte4m(vaddr_t va, int *pte, int bi
808 swapval = (oldval & ~bic) | bis; 808 swapval = (oldval & ~bic) | bis;
809 swap(vpte, swapval); 809 swap(vpte, swapval);
810 810
811 if (__predict_true(can_lock)) 811 if (__predict_true(can_lock))
812 mutex_spin_exit(&demap_lock); 812 mutex_spin_exit(&demap_lock);
813 813
814 return (oldval); 814 return (oldval);
815} 815}
816 816
817inline void 817inline void
818setpgt4m(int *ptep, int pte) 818setpgt4m(int *ptep, int pte)
819{ 819{
820 820
 821 kpreempt_disable();
821 swap(ptep, pte); 822 swap(ptep, pte);
 823 kpreempt_enable();
822} 824}
823 825
824inline void 826inline void
825setpgt4m_va(vaddr_t va, int *ptep, int pte, int pageflush, int ctx, 827setpgt4m_va(vaddr_t va, int *ptep, int pte, int pageflush, int ctx,
826 u_int cpuset) 828 u_int cpuset)
827{ 829{
828 830
829#if defined(MULTIPROCESSOR) 831#if defined(MULTIPROCESSOR)
830 updatepte4m(va, ptep, 0xffffffff, pte, pageflush ? ctx : 0, cpuset); 832 updatepte4m(va, ptep, 0xffffffff, pte, pageflush ? ctx : 0, cpuset);
831#else 833#else
 834 kpreempt_disable();
832 if (__predict_true(pageflush)) 835 if (__predict_true(pageflush))
833 tlb_flush_page(va, ctx, 0); 836 tlb_flush_page(va, ctx, 0);
834 setpgt4m(ptep, pte); 837 setpgt4m(ptep, pte);
 838 kpreempt_enable();
835#endif /* MULTIPROCESSOR */ 839#endif /* MULTIPROCESSOR */
836} 840}
837 841
838/* Set the page table entry for va to pte. */ 842/* Set the page table entry for va to pte. */
839void 843void
840setpte4m(vaddr_t va, int pte) 844setpte4m(vaddr_t va, int pte)
841{ 845{
842 struct pmap *pm; 846 struct pmap *pm;
843 struct regmap *rp; 847 struct regmap *rp;
844 struct segmap *sp; 848 struct segmap *sp;
845 849
846#ifdef DEBUG 850#ifdef DEBUG
847 if (getcontext4m() != 0) 851 if (getcontext4m() != 0)
@@ -6237,43 +6241,43 @@ pmap_enk4m(struct pmap *pm, vaddr_t va,  @@ -6237,43 +6241,43 @@ pmap_enk4m(struct pmap *pm, vaddr_t va,
6237 struct regmap *rp; 6241 struct regmap *rp;
6238 struct segmap *sp; 6242 struct segmap *sp;
6239 int error = 0; 6243 int error = 0;
6240 6244
6241#ifdef DEBUG 6245#ifdef DEBUG
6242 if (va < KERNBASE) 6246 if (va < KERNBASE)
6243 panic("pmap_enk4m: can't enter va 0x%lx below KERNBASE", va); 6247 panic("pmap_enk4m: can't enter va 0x%lx below KERNBASE", va);
6244#endif 6248#endif
6245 vr = VA_VREG(va); 6249 vr = VA_VREG(va);
6246 vs = VA_VSEG(va); 6250 vs = VA_VSEG(va);
6247 rp = &pm->pm_regmap[vr]; 6251 rp = &pm->pm_regmap[vr];
6248 sp = &rp->rg_segmap[vs]; 6252 sp = &rp->rg_segmap[vs];
6249 6253
 6254 kpreempt_disable();
6250 s = splvm(); /* XXX way too conservative */ 6255 s = splvm(); /* XXX way too conservative */
6251 PMAP_LOCK(); 6256 PMAP_LOCK();
6252 6257
6253 if (rp->rg_seg_ptps == NULL) /* enter new region */ 6258 if (rp->rg_seg_ptps == NULL) /* enter new region */
6254 panic("pmap_enk4m: missing kernel region table for va 0x%lx",va); 6259 panic("pmap_enk4m: missing kernel region table for va 0x%lx",va);
6255 6260
6256 tpte = sp->sg_pte[VA_SUN4M_VPG(va)]; 6261 tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
6257 if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE) { 6262 if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE) {
6258 6263
6259 /* old mapping exists, and is of the same pa type */ 6264 /* old mapping exists, and is of the same pa type */
6260 6265
6261 if ((tpte & SRMMU_PPNMASK) == (pteproto & SRMMU_PPNMASK)) { 6266 if ((tpte & SRMMU_PPNMASK) == (pteproto & SRMMU_PPNMASK)) {
6262 /* just changing protection and/or wiring */ 6267 /* just changing protection and/or wiring */
6263 pmap_changeprot4m(pm, va, prot, flags); 6268 pmap_changeprot4m(pm, va, prot, flags);
6264 PMAP_UNLOCK(); 6269 error = 0;
6265 splx(s); 6270 goto out;
6266 return (0); 
6267 } 6271 }
6268 6272
6269 if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) { 6273 if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
6270 struct vm_page *opg; 6274 struct vm_page *opg;
6271#ifdef DEBUG 6275#ifdef DEBUG
6272printf("pmap_enk4m: changing existing va=>pa entry: va 0x%lx, pteproto 0x%x, " 6276printf("pmap_enk4m: changing existing va=>pa entry: va 0x%lx, pteproto 0x%x, "
6273 "oldpte 0x%x\n", va, pteproto, tpte); 6277 "oldpte 0x%x\n", va, pteproto, tpte);
6274#endif 6278#endif
6275 /* 6279 /*
6276 * Switcheroo: changing pa for this va. 6280 * Switcheroo: changing pa for this va.
6277 * If old pa was managed, remove from pvlist. 6281 * If old pa was managed, remove from pvlist.
6278 * If old page was cached, flush cache. 6282 * If old page was cached, flush cache.
6279 */ 6283 */
@@ -6299,26 +6303,27 @@ printf("pmap_enk4m: changing existing va @@ -6299,26 +6303,27 @@ printf("pmap_enk4m: changing existing va
6299 * If the new mapping is for a managed PA, enter into pvlist. 6303 * If the new mapping is for a managed PA, enter into pvlist.
6300 */ 6304 */
6301 if (pg != NULL && (error = pv_link4m(pg, pm, va, &pteproto)) != 0) { 6305 if (pg != NULL && (error = pv_link4m(pg, pm, va, &pteproto)) != 0) {
6302 if ((flags & PMAP_CANFAIL) != 0) 6306 if ((flags & PMAP_CANFAIL) != 0)
6303 goto out; 6307 goto out;
6304 panic("pmap_enter: cannot allocate PV entry"); 6308 panic("pmap_enter: cannot allocate PV entry");
6305 } 6309 }
6306 6310
6307 setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], pteproto); 6311 setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], pteproto);
6308 pm->pm_stats.resident_count++; 6312 pm->pm_stats.resident_count++;
6309out: 6313out:
6310 PMAP_UNLOCK(); 6314 PMAP_UNLOCK();
6311 splx(s); 6315 splx(s);
 6316 kpreempt_enable();
6312 return (error); 6317 return (error);
6313} 6318}
6314 6319
6315/* enter new (or change existing) user mapping */ 6320/* enter new (or change existing) user mapping */
6316int 6321int
6317pmap_enu4m(struct pmap *pm, vaddr_t va, vm_prot_t prot, int flags, 6322pmap_enu4m(struct pmap *pm, vaddr_t va, vm_prot_t prot, int flags,
6318 struct vm_page *pg, int pteproto) 6323 struct vm_page *pg, int pteproto)
6319{ 6324{
6320 int vr, vs, *pte, tpte, s; 6325 int vr, vs, *pte, tpte, s;
6321 int error = 0; 6326 int error = 0;
6322 struct regmap *rp; 6327 struct regmap *rp;
6323 struct segmap *sp; 6328 struct segmap *sp;
6324 bool owired; 6329 bool owired;
@@ -6428,29 +6433,28 @@ pmap_enu4m(struct pmap *pm, vaddr_t va,  @@ -6428,29 +6433,28 @@ pmap_enu4m(struct pmap *pm, vaddr_t va,
6428 /* 6433 /*
6429 * Might be a change: fetch old pte 6434 * Might be a change: fetch old pte
6430 */ 6435 */
6431 tpte = pte[VA_SUN4M_VPG(va)]; 6436 tpte = pte[VA_SUN4M_VPG(va)];
6432 6437
6433 if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE) { 6438 if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE) {
6434 6439
6435 /* old mapping exists, and is of the same pa type */ 6440 /* old mapping exists, and is of the same pa type */
6436 if ((tpte & SRMMU_PPNMASK) == 6441 if ((tpte & SRMMU_PPNMASK) ==
6437 (pteproto & SRMMU_PPNMASK)) { 6442 (pteproto & SRMMU_PPNMASK)) {
6438 /* just changing prot and/or wiring */ 6443 /* just changing prot and/or wiring */
6439 /* caller should call this directly: */ 6444 /* caller should call this directly: */
6440 pmap_changeprot4m(pm, va, prot, flags); 6445 pmap_changeprot4m(pm, va, prot, flags);
6441 PMAP_UNLOCK(); 6446 error = 0;
6442 splx(s); 6447 goto out;
6443 return (0); 
6444 } 6448 }
6445 /* 6449 /*
6446 * Switcheroo: changing pa for this va. 6450 * Switcheroo: changing pa for this va.
6447 * If old pa was managed, remove from pvlist. 6451 * If old pa was managed, remove from pvlist.
6448 * If old page was cached, flush cache. 6452 * If old page was cached, flush cache.
6449 */ 6453 */
6450#ifdef DEBUG 6454#ifdef DEBUG
6451 if (pmapdebug & PDB_SWITCHMAP) 6455 if (pmapdebug & PDB_SWITCHMAP)
6452 printf("%s[%d]: pmap_enu: changing existing " 6456 printf("%s[%d]: pmap_enu: changing existing "
6453 "va 0x%x: pte 0x%x=>0x%x\n", 6457 "va 0x%x: pte 0x%x=>0x%x\n",
6454 curproc->p_comm, curproc->p_pid, 6458 curproc->p_comm, curproc->p_pid,
6455 (int)va, tpte, pteproto); 6459 (int)va, tpte, pteproto);
6456#endif 6460#endif
@@ -6541,26 +6545,27 @@ void @@ -6541,26 +6545,27 @@ void
6541pmap_kremove4m(vaddr_t va, vsize_t len) 6545pmap_kremove4m(vaddr_t va, vsize_t len)
6542{ 6546{
6543 struct pmap *pm = pmap_kernel(); 6547 struct pmap *pm = pmap_kernel();
6544 struct regmap *rp; 6548 struct regmap *rp;
6545 struct segmap *sp; 6549 struct segmap *sp;
6546 vaddr_t endva, nva; 6550 vaddr_t endva, nva;
6547 int vr, vs; 6551 int vr, vs;
6548 int tpte, perpage, npg, s; 6552 int tpte, perpage, npg, s;
6549 6553
6550 /* 6554 /*
6551 * The kernel pmap doesn't need to be locked, but the demap lock 6555 * The kernel pmap doesn't need to be locked, but the demap lock
6552 * in updatepte() requires interrupt protection. 6556 * in updatepte() requires interrupt protection.
6553 */ 6557 */
 6558 kpreempt_disable();
6554 s = splvm(); 6559 s = splvm();
6555 6560
6556 endva = va + len; 6561 endva = va + len;
6557 for (; va < endva; va = nva) { 6562 for (; va < endva; va = nva) {
6558 /* do one virtual segment at a time */ 6563 /* do one virtual segment at a time */
6559 vr = VA_VREG(va); 6564 vr = VA_VREG(va);
6560 vs = VA_VSEG(va); 6565 vs = VA_VSEG(va);
6561 nva = VSTOVA(vr, vs + 1); 6566 nva = VSTOVA(vr, vs + 1);
6562 if (nva == 0 || nva > endva) { 6567 if (nva == 0 || nva > endva) {
6563 nva = endva; 6568 nva = endva;
6564 } 6569 }
6565 6570
6566 rp = &pm->pm_regmap[vr]; 6571 rp = &pm->pm_regmap[vr];
@@ -6586,116 +6591,122 @@ pmap_kremove4m(vaddr_t va, vsize_t len) @@ -6586,116 +6591,122 @@ pmap_kremove4m(vaddr_t va, vsize_t len)
6586 if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) 6591 if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE)
6587 continue; 6592 continue;
6588 6593
6589 if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) { 6594 if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
6590 /* if cacheable, flush page as needed */ 6595 /* if cacheable, flush page as needed */
6591 if (perpage && (tpte & SRMMU_PG_C)) 6596 if (perpage && (tpte & SRMMU_PG_C))
6592 cache_flush_page(va, 0); 6597 cache_flush_page(va, 0);
6593 } 6598 }
6594 setpgt4m_va(va, &sp->sg_pte[VA_SUN4M_VPG(va)], 6599 setpgt4m_va(va, &sp->sg_pte[VA_SUN4M_VPG(va)],
6595 SRMMU_TEINVALID, 1, 0, CPUSET_ALL); 6600 SRMMU_TEINVALID, 1, 0, CPUSET_ALL);
6596 } 6601 }
6597 } 6602 }
6598 splx(s); 6603 splx(s);
 6604 kpreempt_enable();
6599} 6605}
6600 6606
6601/* 6607/*
6602 * Change protection on a range of kernel addresses. 6608 * Change protection on a range of kernel addresses.
6603 */ 6609 */
6604void 6610void
6605pmap_kprotect4m(vaddr_t va, vsize_t size, vm_prot_t prot) 6611pmap_kprotect4m(vaddr_t va, vsize_t size, vm_prot_t prot)
6606{ 6612{
6607 struct pmap *pm = pmap_kernel(); 6613 struct pmap *pm = pmap_kernel();
6608 int pte, newprot, s; 6614 int pte, newprot, s;
6609 struct regmap *rp; 6615 struct regmap *rp;
6610 struct segmap *sp; 6616 struct segmap *sp;
6611 6617
6612 size = roundup(size,NBPG); 6618 size = roundup(size,NBPG);
6613 newprot = pte_kprot4m(prot); 6619 newprot = pte_kprot4m(prot);
6614 6620
6615 /* 6621 /*
6616 * The kernel pmap doesn't need to be locked, but the demap lock 6622 * The kernel pmap doesn't need to be locked, but the demap lock
6617 * in updatepte() requires interrupt protection. 6623 * in updatepte() requires interrupt protection.
6618 */ 6624 */
 6625 kpreempt_disable();
6619 s = splvm(); 6626 s = splvm();
6620 6627
6621 while (size > 0) { 6628 while (size > 0) {
6622 rp = &pm->pm_regmap[VA_VREG(va)]; 6629 rp = &pm->pm_regmap[VA_VREG(va)];
6623 sp = &rp->rg_segmap[VA_VSEG(va)]; 6630 sp = &rp->rg_segmap[VA_VSEG(va)];
6624 pte = sp->sg_pte[VA_SUN4M_VPG(va)]; 6631 pte = sp->sg_pte[VA_SUN4M_VPG(va)];
6625 6632
6626 /* 6633 /*
6627 * Flush cache if page has been referenced to 6634 * Flush cache if page has been referenced to
6628 * avoid stale protection bits in the cache tags. 6635 * avoid stale protection bits in the cache tags.
6629 */ 6636 */
6630 if ((pte & (SRMMU_PG_C|SRMMU_PGTYPE)) == 6637 if ((pte & (SRMMU_PG_C|SRMMU_PGTYPE)) ==
6631 (SRMMU_PG_C|PG_SUN4M_OBMEM)) 6638 (SRMMU_PG_C|PG_SUN4M_OBMEM))
6632 cache_flush_page(va, 0); 6639 cache_flush_page(va, 0);
6633 6640
6634 setpgt4m_va(va, &sp->sg_pte[VA_SUN4M_VPG(va)], 6641 setpgt4m_va(va, &sp->sg_pte[VA_SUN4M_VPG(va)],
6635 (pte & ~SRMMU_PROT_MASK) | newprot, 6642 (pte & ~SRMMU_PROT_MASK) | newprot,
6636 1, pm->pm_ctxnum, PMAP_CPUSET(pm)); 6643 1, pm->pm_ctxnum, PMAP_CPUSET(pm));
6637 6644
6638 va += NBPG; 6645 va += NBPG;
6639 size -= NBPG; 6646 size -= NBPG;
6640 } 6647 }
6641 splx(s); 6648 splx(s);
 6649 kpreempt_enable();
6642} 6650}
6643#endif /* SUN4M || SUN4D */ 6651#endif /* SUN4M || SUN4D */
6644 6652
6645/* 6653/*
6646 * Clear the wiring attribute for a map/virtual-address pair. 6654 * Clear the wiring attribute for a map/virtual-address pair.
6647 */ 6655 */
6648/* ARGSUSED */ 6656/* ARGSUSED */
6649void 6657void
6650pmap_unwire(struct pmap *pm, vaddr_t va) 6658pmap_unwire(struct pmap *pm, vaddr_t va)
6651{ 6659{
6652 int vr, vs, *ptep; 6660 int vr, vs, *ptep;
6653 struct regmap *rp; 6661 struct regmap *rp;
6654 struct segmap *sp; 6662 struct segmap *sp;
6655 bool owired; 6663 bool owired;
6656 6664
 6665 kpreempt_disable();
6657 vr = VA_VREG(va); 6666 vr = VA_VREG(va);
6658 vs = VA_VSEG(va); 6667 vs = VA_VSEG(va);
6659 rp = &pm->pm_regmap[vr]; 6668 rp = &pm->pm_regmap[vr];
6660 sp = &rp->rg_segmap[vs]; 6669 sp = &rp->rg_segmap[vs];
6661 6670
6662 owired = false; 6671 owired = false;
6663 if (CPU_HAS_SUNMMU) { 6672 if (CPU_HAS_SUNMMU) {
6664 ptep = &sp->sg_pte[VA_VPG(va)]; 6673 ptep = &sp->sg_pte[VA_VPG(va)];
6665 owired = *ptep & PG_WIRED; 6674 owired = *ptep & PG_WIRED;
6666 *ptep &= ~PG_WIRED; 6675 *ptep &= ~PG_WIRED;
6667 } 6676 }
6668 if (CPU_HAS_SRMMU) { 6677 if (CPU_HAS_SRMMU) {
6669 owired = sp->sg_wiremap & (1 << VA_SUN4M_VPG(va)); 6678 owired = sp->sg_wiremap & (1 << VA_SUN4M_VPG(va));
6670 sp->sg_wiremap &= ~(1 << VA_SUN4M_VPG(va)); 6679 sp->sg_wiremap &= ~(1 << VA_SUN4M_VPG(va));
6671 } 6680 }
6672 if (!owired) { 6681 if (!owired) {
6673 pmap_stats.ps_useless_changewire++; 6682 pmap_stats.ps_useless_changewire++;
6674 return; 6683 return;
 6684 kpreempt_enable();
6675 } 6685 }
6676 6686
6677 pm->pm_stats.wired_count--; 6687 pm->pm_stats.wired_count--;
6678#if defined(SUN4) || defined(SUN4C) 6688#if defined(SUN4) || defined(SUN4C)
6679 if (CPU_HAS_SUNMMU && --sp->sg_nwired <= 0) { 6689 if (CPU_HAS_SUNMMU && --sp->sg_nwired <= 0) {
6680#ifdef DIAGNOSTIC 6690#ifdef DIAGNOSTIC
6681 if (sp->sg_nwired > sp->sg_npte || sp->sg_nwired < 0) 6691 if (sp->sg_nwired > sp->sg_npte || sp->sg_nwired < 0)
6682 panic("pmap_unwire: pm %p, va %lx: nleft=%d, nwired=%d", 6692 panic("pmap_unwire: pm %p, va %lx: nleft=%d, nwired=%d",
6683 pm, va, sp->sg_npte, sp->sg_nwired); 6693 pm, va, sp->sg_npte, sp->sg_nwired);
6684#endif 6694#endif
6685 if (sp->sg_pmeg != seginval) 6695 if (sp->sg_pmeg != seginval)
6686 mmu_pmeg_unlock(sp->sg_pmeg); 6696 mmu_pmeg_unlock(sp->sg_pmeg);
6687 } 6697 }
6688#endif /* SUN4 || SUN4C */ 6698#endif /* SUN4 || SUN4C */
 6699 kpreempt_enable();
6689} 6700}
6690 6701
6691/* 6702/*
6692 * Extract the physical page address associated 6703 * Extract the physical page address associated
6693 * with the given map/virtual_address pair. 6704 * with the given map/virtual_address pair.
6694 * GRR, the vm code knows; we should not have to do this! 6705 * GRR, the vm code knows; we should not have to do this!
6695 */ 6706 */
6696 6707
6697#if defined(SUN4) || defined(SUN4C) 6708#if defined(SUN4) || defined(SUN4C)
6698bool 6709bool
6699pmap_extract4_4c(struct pmap *pm, vaddr_t va, paddr_t *pap) 6710pmap_extract4_4c(struct pmap *pm, vaddr_t va, paddr_t *pap)
6700{ 6711{
6701 int vr, vs; 6712 int vr, vs;
@@ -7078,26 +7089,27 @@ pmap_copy_page4_4c(paddr_t src, paddr_t  @@ -7078,26 +7089,27 @@ pmap_copy_page4_4c(paddr_t src, paddr_t
7078/* 7089/*
7079 * Fill the given MI physical page with zero bytes. 7090 * Fill the given MI physical page with zero bytes.
7080 * 7091 *
7081 * We avoid stomping on the cache. 7092 * We avoid stomping on the cache.
7082 * XXX might be faster to use destination's context and allow cache to fill? 7093 * XXX might be faster to use destination's context and allow cache to fill?
7083 */ 7094 */
7084void 7095void
7085pmap_zero_page4m(paddr_t pa) 7096pmap_zero_page4m(paddr_t pa)
7086{ 7097{
7087 struct vm_page *pg; 7098 struct vm_page *pg;
7088 void *va; 7099 void *va;
7089 int pte; 7100 int pte;
7090 7101
 7102 kpreempt_disable();
7091 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { 7103 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
7092 /* 7104 /*
7093 * The following VAC flush might not be necessary since the 7105 * The following VAC flush might not be necessary since the
7094 * page is being cleared because it is about to be allocated, 7106 * page is being cleared because it is about to be allocated,
7095 * i.e., is in use by no one. 7107 * i.e., is in use by no one.
7096 * In the case of a physical cache, a flush (or just an 7108 * In the case of a physical cache, a flush (or just an
7097 * invalidate, if possible) is usually necessary when using 7109 * invalidate, if possible) is usually necessary when using
7098 * uncached access to clear it. 7110 * uncached access to clear it.
7099 */ 7111 */
7100 if (CACHEINFO.c_vactype != VAC_NONE) 7112 if (CACHEINFO.c_vactype != VAC_NONE)
7101 pv_flushcache4m(pg); 7113 pv_flushcache4m(pg);
7102 else 7114 else
7103 pcache_flush_page(pa, 1); 7115 pcache_flush_page(pa, 1);
@@ -7105,106 +7117,112 @@ pmap_zero_page4m(paddr_t pa) @@ -7105,106 +7117,112 @@ pmap_zero_page4m(paddr_t pa)
7105 pte = SRMMU_TEPTE | PPROT_N_RWX | (pa >> SRMMU_PPNPASHIFT); 7117 pte = SRMMU_TEPTE | PPROT_N_RWX | (pa >> SRMMU_PPNPASHIFT);
7106 if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY) 7118 if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY)
7107 pte |= SRMMU_PG_C; 7119 pte |= SRMMU_PG_C;
7108 7120
7109 va = cpuinfo.vpage[0]; 7121 va = cpuinfo.vpage[0];
7110 setpgt4m(cpuinfo.vpage_pte[0], pte); 7122 setpgt4m(cpuinfo.vpage_pte[0], pte);
7111 qzero(va, NBPG); 7123 qzero(va, NBPG);
7112 /* 7124 /*
7113 * Remove temporary mapping (which is kernel-only, so the 7125 * Remove temporary mapping (which is kernel-only, so the
7114 * context used for TLB flushing does not matter) 7126 * context used for TLB flushing does not matter)
7115 */ 7127 */
7116 sp_tlb_flush((int)va, 0, ASI_SRMMUFP_L3); 7128 sp_tlb_flush((int)va, 0, ASI_SRMMUFP_L3);
7117 setpgt4m(cpuinfo.vpage_pte[0], SRMMU_TEINVALID); 7129 setpgt4m(cpuinfo.vpage_pte[0], SRMMU_TEINVALID);
 7130 kpreempt_enable();
7118} 7131}
7119 7132
7120/* 7133/*
7121 * Viking/MXCC specific version of pmap_zero_page 7134 * Viking/MXCC specific version of pmap_zero_page
7122 */ 7135 */
7123void 7136void
7124pmap_zero_page_viking_mxcc(paddr_t pa) 7137pmap_zero_page_viking_mxcc(paddr_t pa)
7125{ 7138{
7126 u_int offset; 7139 u_int offset;
7127 u_int stream_data_addr = MXCC_STREAM_DATA; 7140 u_int stream_data_addr = MXCC_STREAM_DATA;
7128 uint64_t v = (uint64_t)pa; 7141 uint64_t v = (uint64_t)pa;
7129 7142
 7143 kpreempt_disable();
7130 /* Load MXCC stream data register with 0 (bottom 32 bytes only) */ 7144 /* Load MXCC stream data register with 0 (bottom 32 bytes only) */
7131 stda(stream_data_addr+0, ASI_CONTROL, 0); 7145 stda(stream_data_addr+0, ASI_CONTROL, 0);
7132 stda(stream_data_addr+8, ASI_CONTROL, 0); 7146 stda(stream_data_addr+8, ASI_CONTROL, 0);
7133 stda(stream_data_addr+16, ASI_CONTROL, 0); 7147 stda(stream_data_addr+16, ASI_CONTROL, 0);
7134 stda(stream_data_addr+24, ASI_CONTROL, 0); 7148 stda(stream_data_addr+24, ASI_CONTROL, 0);
7135 7149
7136 /* Then write the stream data register to each block in the page */ 7150 /* Then write the stream data register to each block in the page */
7137 v |= MXCC_STREAM_C; 7151 v |= MXCC_STREAM_C;
7138 for (offset = 0; offset < NBPG; offset += MXCC_STREAM_BLKSZ) { 7152 for (offset = 0; offset < NBPG; offset += MXCC_STREAM_BLKSZ) {
7139 stda(MXCC_STREAM_DST, ASI_CONTROL, v | offset); 7153 stda(MXCC_STREAM_DST, ASI_CONTROL, v | offset);
7140 } 7154 }
 7155 kpreempt_enable();
7141} 7156}
7142 7157
7143/* 7158/*
7144 * HyperSPARC/RT625 specific version of pmap_zero_page 7159 * HyperSPARC/RT625 specific version of pmap_zero_page
7145 */ 7160 */
7146void 7161void
7147pmap_zero_page_hypersparc(paddr_t pa) 7162pmap_zero_page_hypersparc(paddr_t pa)
7148{ 7163{
7149 struct vm_page *pg; 7164 struct vm_page *pg;
7150 void *va; 7165 void *va;
7151 int pte; 7166 int pte;
7152 int offset; 7167 int offset;
7153 7168
 7169 kpreempt_disable();
7154 /* 7170 /*
7155 * We still have to map the page, since ASI_BLOCKFILL 7171 * We still have to map the page, since ASI_BLOCKFILL
7156 * takes virtual addresses. This also means we have to 7172 * takes virtual addresses. This also means we have to
7157 * consider cache aliasing; therefore we still need 7173 * consider cache aliasing; therefore we still need
7158 * to flush the cache here. All we gain is the speed-up 7174 * to flush the cache here. All we gain is the speed-up
7159 * in zero-fill loop itself.. 7175 * in zero-fill loop itself..
7160 */ 7176 */
7161 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { 7177 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
7162 /* 7178 /*
7163 * The following might not be necessary since the page 7179 * The following might not be necessary since the page
7164 * is being cleared because it is about to be allocated, 7180 * is being cleared because it is about to be allocated,
7165 * i.e., is in use by no one. 7181 * i.e., is in use by no one.
7166 */ 7182 */
7167 if (CACHEINFO.c_vactype != VAC_NONE) 7183 if (CACHEINFO.c_vactype != VAC_NONE)
7168 pv_flushcache4m(pg); 7184 pv_flushcache4m(pg);
7169 } 7185 }
7170 pte = SRMMU_TEPTE | SRMMU_PG_C | PPROT_N_RWX | (pa >> SRMMU_PPNPASHIFT); 7186 pte = SRMMU_TEPTE | SRMMU_PG_C | PPROT_N_RWX | (pa >> SRMMU_PPNPASHIFT);
7171 7187
7172 va = cpuinfo.vpage[0]; 7188 va = cpuinfo.vpage[0];
7173 setpgt4m(cpuinfo.vpage_pte[0], pte); 7189 setpgt4m(cpuinfo.vpage_pte[0], pte);
7174 for (offset = 0; offset < NBPG; offset += 32) { 7190 for (offset = 0; offset < NBPG; offset += 32) {
7175 sta((char *)va + offset, ASI_BLOCKFILL, 0); 7191 sta((char *)va + offset, ASI_BLOCKFILL, 0);
7176 } 7192 }
7177 /* Remove temporary mapping */ 7193 /* Remove temporary mapping */
7178 sp_tlb_flush((int)va, 0, ASI_SRMMUFP_L3); 7194 sp_tlb_flush((int)va, 0, ASI_SRMMUFP_L3);
7179 setpgt4m(cpuinfo.vpage_pte[0], SRMMU_TEINVALID); 7195 setpgt4m(cpuinfo.vpage_pte[0], SRMMU_TEINVALID);
 7196 kpreempt_enable();
7180} 7197}
7181 7198
7182/* 7199/*
7183 * Copy the given MI physical source page to its destination. 7200 * Copy the given MI physical source page to its destination.
7184 * 7201 *
7185 * We avoid stomping on the cache as above (with same `XXX' note). 7202 * We avoid stomping on the cache as above (with same `XXX' note).
7186 * We must first flush any write-back cache for the source page. 7203 * We must first flush any write-back cache for the source page.
7187 * We go ahead and stomp on the kernel's virtual cache for the 7204 * We go ahead and stomp on the kernel's virtual cache for the
7188 * source page, since the cache can read memory MUCH faster than 7205 * source page, since the cache can read memory MUCH faster than
7189 * the processor. 7206 * the processor.
7190 */ 7207 */
7191void 7208void
7192pmap_copy_page4m(paddr_t src, paddr_t dst) 7209pmap_copy_page4m(paddr_t src, paddr_t dst)
7193{ 7210{
7194 struct vm_page *pg; 7211 struct vm_page *pg;
7195 void *sva, *dva; 7212 void *sva, *dva;
7196 int spte, dpte; 7213 int spte, dpte;
7197 7214
 7215 kpreempt_disable();
7198 if ((pg = PHYS_TO_VM_PAGE(src)) != NULL) { 7216 if ((pg = PHYS_TO_VM_PAGE(src)) != NULL) {
7199 if (CACHEINFO.c_vactype == VAC_WRITEBACK) 7217 if (CACHEINFO.c_vactype == VAC_WRITEBACK)
7200 pv_flushcache4m(pg); 7218 pv_flushcache4m(pg);
7201 } 7219 }
7202 7220
7203 spte = SRMMU_TEPTE | SRMMU_PG_C | PPROT_N_RX | 7221 spte = SRMMU_TEPTE | SRMMU_PG_C | PPROT_N_RX |
7204 (src >> SRMMU_PPNPASHIFT); 7222 (src >> SRMMU_PPNPASHIFT);
7205 7223
7206 if ((pg = PHYS_TO_VM_PAGE(dst)) != NULL) { 7224 if ((pg = PHYS_TO_VM_PAGE(dst)) != NULL) {
7207 /* similar `might not be necessary' comment applies */ 7225 /* similar `might not be necessary' comment applies */
7208 if (CACHEINFO.c_vactype != VAC_NONE) 7226 if (CACHEINFO.c_vactype != VAC_NONE)
7209 pv_flushcache4m(pg); 7227 pv_flushcache4m(pg);
7210 else 7228 else
@@ -7215,60 +7233,64 @@ pmap_copy_page4m(paddr_t src, paddr_t ds @@ -7215,60 +7233,64 @@ pmap_copy_page4m(paddr_t src, paddr_t ds
7215 if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY) 7233 if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY)
7216 dpte |= SRMMU_PG_C; 7234 dpte |= SRMMU_PG_C;
7217 7235
7218 sva = cpuinfo.vpage[0]; 7236 sva = cpuinfo.vpage[0];
7219 dva = cpuinfo.vpage[1]; 7237 dva = cpuinfo.vpage[1];
7220 setpgt4m(cpuinfo.vpage_pte[0], spte); 7238 setpgt4m(cpuinfo.vpage_pte[0], spte);
7221 setpgt4m(cpuinfo.vpage_pte[1], dpte); 7239 setpgt4m(cpuinfo.vpage_pte[1], dpte);
7222 qcopy(sva, dva, NBPG); /* loads cache, so we must ... */ 7240 qcopy(sva, dva, NBPG); /* loads cache, so we must ... */
7223 cpuinfo.sp_vcache_flush_page((vaddr_t)sva, getcontext4m()); 7241 cpuinfo.sp_vcache_flush_page((vaddr_t)sva, getcontext4m());
7224 sp_tlb_flush((int)sva, 0, ASI_SRMMUFP_L3); 7242 sp_tlb_flush((int)sva, 0, ASI_SRMMUFP_L3);
7225 setpgt4m(cpuinfo.vpage_pte[0], SRMMU_TEINVALID); 7243 setpgt4m(cpuinfo.vpage_pte[0], SRMMU_TEINVALID);
7226 sp_tlb_flush((int)dva, 0, ASI_SRMMUFP_L3); 7244 sp_tlb_flush((int)dva, 0, ASI_SRMMUFP_L3);
7227 setpgt4m(cpuinfo.vpage_pte[1], SRMMU_TEINVALID); 7245 setpgt4m(cpuinfo.vpage_pte[1], SRMMU_TEINVALID);
 7246 kpreempt_enable();
7228} 7247}
7229 7248
7230/* 7249/*
7231 * Viking/MXCC specific version of pmap_copy_page 7250 * Viking/MXCC specific version of pmap_copy_page
7232 */ 7251 */
7233void 7252void
7234pmap_copy_page_viking_mxcc(paddr_t src, paddr_t dst) 7253pmap_copy_page_viking_mxcc(paddr_t src, paddr_t dst)
7235{ 7254{
7236 u_int offset; 7255 u_int offset;
7237 uint64_t v1 = (uint64_t)src; 7256 uint64_t v1 = (uint64_t)src;
7238 uint64_t v2 = (uint64_t)dst; 7257 uint64_t v2 = (uint64_t)dst;
7239 7258
 7259 kpreempt_disable();
7240 /* Enable cache-coherency */ 7260 /* Enable cache-coherency */
7241 v1 |= MXCC_STREAM_C; 7261 v1 |= MXCC_STREAM_C;
7242 v2 |= MXCC_STREAM_C; 7262 v2 |= MXCC_STREAM_C;
7243 7263
7244 /* Copy through stream data register */ 7264 /* Copy through stream data register */
7245 for (offset = 0; offset < NBPG; offset += MXCC_STREAM_BLKSZ) { 7265 for (offset = 0; offset < NBPG; offset += MXCC_STREAM_BLKSZ) {
7246 stda(MXCC_STREAM_SRC, ASI_CONTROL, v1 | offset); 7266 stda(MXCC_STREAM_SRC, ASI_CONTROL, v1 | offset);
7247 stda(MXCC_STREAM_DST, ASI_CONTROL, v2 | offset); 7267 stda(MXCC_STREAM_DST, ASI_CONTROL, v2 | offset);
7248 } 7268 }
 7269 kpreempt_enable();
7249} 7270}
7250 7271
7251/* 7272/*
7252 * HyperSPARC/RT625 specific version of pmap_copy_page 7273 * HyperSPARC/RT625 specific version of pmap_copy_page
7253 */ 7274 */
7254void 7275void
7255pmap_copy_page_hypersparc(paddr_t src, paddr_t dst) 7276pmap_copy_page_hypersparc(paddr_t src, paddr_t dst)
7256{ 7277{
7257 struct vm_page *pg; 7278 struct vm_page *pg;
7258 void *sva, *dva; 7279 void *sva, *dva;
7259 int spte, dpte; 7280 int spte, dpte;
7260 int offset; 7281 int offset;
7261 7282
 7283 kpreempt_disable();
7262 /* 7284 /*
7263 * We still have to map the pages, since ASI_BLOCKCOPY 7285 * We still have to map the pages, since ASI_BLOCKCOPY
7264 * takes virtual addresses. This also means we have to 7286 * takes virtual addresses. This also means we have to
7265 * consider cache aliasing; therefore we still need 7287 * consider cache aliasing; therefore we still need
7266 * to flush the cache here. All we gain is the speed-up 7288 * to flush the cache here. All we gain is the speed-up
7267 * in copy loop itself.. 7289 * in copy loop itself..
7268 */ 7290 */
7269 7291
7270 if ((pg = PHYS_TO_VM_PAGE(src)) != NULL) { 7292 if ((pg = PHYS_TO_VM_PAGE(src)) != NULL) {
7271 if (CACHEINFO.c_vactype == VAC_WRITEBACK) 7293 if (CACHEINFO.c_vactype == VAC_WRITEBACK)
7272 pv_flushcache4m(pg); 7294 pv_flushcache4m(pg);
7273 } 7295 }
7274 7296
@@ -7287,26 +7309,27 @@ pmap_copy_page_hypersparc(paddr_t src, p @@ -7287,26 +7309,27 @@ pmap_copy_page_hypersparc(paddr_t src, p
7287 sva = cpuinfo.vpage[0]; 7309 sva = cpuinfo.vpage[0];
7288 dva = cpuinfo.vpage[1]; 7310 dva = cpuinfo.vpage[1];
7289 setpgt4m(cpuinfo.vpage_pte[0], spte); 7311 setpgt4m(cpuinfo.vpage_pte[0], spte);
7290 setpgt4m(cpuinfo.vpage_pte[1], dpte); 7312 setpgt4m(cpuinfo.vpage_pte[1], dpte);
7291 7313
7292 for (offset = 0; offset < NBPG; offset += 32) { 7314 for (offset = 0; offset < NBPG; offset += 32) {
7293 sta((char *)dva + offset, ASI_BLOCKCOPY, (char *)sva + offset); 7315 sta((char *)dva + offset, ASI_BLOCKCOPY, (char *)sva + offset);
7294 } 7316 }
7295 7317
7296 sp_tlb_flush((int)sva, 0, ASI_SRMMUFP_L3); 7318 sp_tlb_flush((int)sva, 0, ASI_SRMMUFP_L3);
7297 setpgt4m(cpuinfo.vpage_pte[0], SRMMU_TEINVALID); 7319 setpgt4m(cpuinfo.vpage_pte[0], SRMMU_TEINVALID);
7298 sp_tlb_flush((int)dva, 0, ASI_SRMMUFP_L3); 7320 sp_tlb_flush((int)dva, 0, ASI_SRMMUFP_L3);
7299 setpgt4m(cpuinfo.vpage_pte[1], SRMMU_TEINVALID); 7321 setpgt4m(cpuinfo.vpage_pte[1], SRMMU_TEINVALID);
 7322 kpreempt_enable();
7300} 7323}
7301#endif /* SUN4M || SUN4D */ 7324#endif /* SUN4M || SUN4D */
7302 7325
7303/* 7326/*
7304 * Turn off cache for a given (va, number of pages). 7327 * Turn off cache for a given (va, number of pages).
7305 * 7328 *
7306 * We just assert PG_NC for each PTE; the addresses must reside 7329 * We just assert PG_NC for each PTE; the addresses must reside
7307 * in locked kernel space. A cache flush is also done. 7330 * in locked kernel space. A cache flush is also done.
7308 */ 7331 */
7309void 7332void
7310kvm_uncache(char *va, int npages) 7333kvm_uncache(char *va, int npages)
7311{ 7334{
7312 struct vm_page *pg; 7335 struct vm_page *pg;

cvs diff -r1.24 -r1.25 src/sys/arch/sparc/sparc/timer_sun4m.c (expand / switch to unified diff)

--- src/sys/arch/sparc/sparc/timer_sun4m.c 2011/02/14 10:21:05 1.24
+++ src/sys/arch/sparc/sparc/timer_sun4m.c 2011/02/15 09:56:32 1.25
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: timer_sun4m.c,v 1.24 2011/02/14 10:21:05 mrg Exp $ */ 1/* $NetBSD: timer_sun4m.c,v 1.25 2011/02/15 09:56:32 mrg Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 1992, 1993 4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved. 5 * The Regents of the University of California. All rights reserved.
6 * Copyright (c) 1994 Gordon W. Ross 6 * Copyright (c) 1994 Gordon W. Ross
7 * Copyright (c) 1993 Adam Glass 7 * Copyright (c) 1993 Adam Glass
8 * Copyright (c) 1996 Paul Kranenburg 8 * Copyright (c) 1996 Paul Kranenburg
9 * Copyright (c) 1996 9 * Copyright (c) 1996
10 * The President and Fellows of Harvard College. All rights reserved. 10 * The President and Fellows of Harvard College. All rights reserved.
11 * 11 *
12 * This software was developed by the Computer Systems Engineering group 12 * This software was developed by the Computer Systems Engineering group
13 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 13 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
14 * contributed to Berkeley. 14 * contributed to Berkeley.
@@ -48,27 +48,27 @@ @@ -48,27 +48,27 @@
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE. 51 * SUCH DAMAGE.
52 * 52 *
53 * @(#)clock.c 8.1 (Berkeley) 6/11/93 53 * @(#)clock.c 8.1 (Berkeley) 6/11/93
54 */ 54 */
55 55
56/* 56/*
57 * Sun4m timer support. 57 * Sun4m timer support.
58 */ 58 */
59 59
60#include <sys/cdefs.h> 60#include <sys/cdefs.h>
61__KERNEL_RCSID(0, "$NetBSD: timer_sun4m.c,v 1.24 2011/02/14 10:21:05 mrg Exp $"); 61__KERNEL_RCSID(0, "$NetBSD: timer_sun4m.c,v 1.25 2011/02/15 09:56:32 mrg Exp $");
62 62
63#include <sys/param.h> 63#include <sys/param.h>
64#include <sys/kernel.h> 64#include <sys/kernel.h>
65#include <sys/device.h> 65#include <sys/device.h>
66#include <sys/systm.h> 66#include <sys/systm.h>
67#include <sys/cpu.h> 67#include <sys/cpu.h>
68 68
69#include <machine/autoconf.h> 69#include <machine/autoconf.h>
70#include <machine/bus.h> 70#include <machine/bus.h>
71 71
72#include <sparc/sparc/vaddrs.h> 72#include <sparc/sparc/vaddrs.h>
73#include <sparc/sparc/cpuvar.h> 73#include <sparc/sparc/cpuvar.h>
74#include <sparc/sparc/timerreg.h> 74#include <sparc/sparc/timerreg.h>
@@ -90,82 +90,88 @@ timer_init_4m(void) @@ -90,82 +90,88 @@ timer_init_4m(void)
90 int n; 90 int n;
91 91
92 timerreg4m->t_limit = tmr_ustolim4m(tick); 92 timerreg4m->t_limit = tmr_ustolim4m(tick);
93 for (CPU_INFO_FOREACH(n, cpi)) { 93 for (CPU_INFO_FOREACH(n, cpi)) {
94 cpi->counterreg_4m->t_limit = tmr_ustolim4m(statint); 94 cpi->counterreg_4m->t_limit = tmr_ustolim4m(statint);
95 } 95 }
96 icr_si_bic(SINTR_T); 96 icr_si_bic(SINTR_T);
97} 97}
98 98
99void 99void
100schedintr_4m(void *v) 100schedintr_4m(void *v)
101{ 101{
102 102
 103 kpreempt_disable();
103#ifdef MULTIPROCESSOR 104#ifdef MULTIPROCESSOR
104 /* 105 /*
105 * We call hardclock() here so that we make sure it is called on 106 * We call hardclock() here so that we make sure it is called on
106 * all CPUs. This function ends up being called on sun4m systems 107 * all CPUs. This function ends up being called on sun4m systems
107 * every tick. 108 * every tick.
108 */ 109 */
109 if (!CPU_IS_PRIMARY(curcpu())) 110 if (!CPU_IS_PRIMARY(curcpu()))
110 hardclock(v); 111 hardclock(v);
111 112
112 /* 113 /*
113 * The factor 8 is only valid for stathz==100. 114 * The factor 8 is only valid for stathz==100.
114 * See also clock.c 115 * See also clock.c
115 */ 116 */
116 if ((++cpuinfo.ci_schedstate.spc_schedticks & 7) == 0 && schedhz != 0) 117 if ((++cpuinfo.ci_schedstate.spc_schedticks & 7) == 0 && schedhz != 0)
117#endif 118#endif
118 schedclock(curlwp); 119 schedclock(curlwp);
 120 kpreempt_enable();
119} 121}
120 122
121 123
122/* 124/*
123 * Level 10 (clock) interrupts from system counter. 125 * Level 10 (clock) interrupts from system counter.
124 */ 126 */
125int 127int
126clockintr_4m(void *cap) 128clockintr_4m(void *cap)
127{ 129{
128 130
129 KASSERT(CPU_IS_PRIMARY(curcpu())); 131 KASSERT(CPU_IS_PRIMARY(curcpu()));
130 /* 132 /*
131 * XXX this needs to be fixed in a more general way 133 * XXX this needs to be fixed in a more general way
132 * problem is that the kernel enables interrupts and THEN 134 * problem is that the kernel enables interrupts and THEN
133 * sets up clocks. In between there's an opportunity to catch 135 * sets up clocks. In between there's an opportunity to catch
134 * a timer interrupt - if we call hardclock() at that point we'll 136 * a timer interrupt - if we call hardclock() at that point we'll
135 * panic 137 * panic
136 * so for now just bail when cold 138 * so for now just bail when cold
137 * 139 *
138 * For MP, we defer calling hardclock() to the schedintr so 140 * For MP, we defer calling hardclock() to the schedintr so
139 * that we call it on all cpus. 141 * that we call it on all cpus.
140 */ 142 */
 143 kpreempt_disable();
141 if (cold) 144 if (cold)
142 return 0; 145 return 0;
143 /* read the limit register to clear the interrupt */ 146 /* read the limit register to clear the interrupt */
144 *((volatile int *)&timerreg4m->t_limit); 147 *((volatile int *)&timerreg4m->t_limit);
145 tickle_tc(); 148 tickle_tc();
146 hardclock((struct clockframe *)cap); 149 hardclock((struct clockframe *)cap);
 150 kpreempt_enable();
147 return (1); 151 return (1);
148} 152}
149 153
150/* 154/*
151 * Level 14 (stat clock) interrupts from processor counter. 155 * Level 14 (stat clock) interrupts from processor counter.
152 */ 156 */
153int 157int
154statintr_4m(void *cap) 158statintr_4m(void *cap)
155{ 159{
156 struct clockframe *frame = cap; 160 struct clockframe *frame = cap;
157 u_long newint; 161 u_long newint;
158 162
 163 kpreempt_disable();
 164
159 /* read the limit register to clear the interrupt */ 165 /* read the limit register to clear the interrupt */
160 *((volatile int *)&counterreg4m->t_limit); 166 *((volatile int *)&counterreg4m->t_limit);
161 167
162 statclock(frame); 168 statclock(frame);
163 169
164 /* 170 /*
165 * Compute new randomized interval. 171 * Compute new randomized interval.
166 */ 172 */
167 newint = new_interval(); 173 newint = new_interval();
168 174
169 /* 175 /*
170 * Use the `non-resetting' limit register, so we don't 176 * Use the `non-resetting' limit register, so we don't
171 * loose the counter ticks that happened since this 177 * loose the counter ticks that happened since this
@@ -184,26 +190,27 @@ statintr_4m(void *cap) @@ -184,26 +190,27 @@ statintr_4m(void *cap)
184 /* No need to schedule a soft interrupt */ 190 /* No need to schedule a soft interrupt */
185 spllowerschedclock(); 191 spllowerschedclock();
186 schedintr_4m(cap); 192 schedintr_4m(cap);
187 } else { 193 } else {
188 /* 194 /*
189 * We're interrupting a thread that may have the 195 * We're interrupting a thread that may have the
190 * scheduler lock; run schedintr_4m() on this CPU later. 196 * scheduler lock; run schedintr_4m() on this CPU later.
191 */ 197 */
192 raise_ipi(&cpuinfo, IPL_SCHED); /* sched_cookie->pil */ 198 raise_ipi(&cpuinfo, IPL_SCHED); /* sched_cookie->pil */
193 } 199 }
194#if !defined(MULTIPROCESSOR) 200#if !defined(MULTIPROCESSOR)
195 } 201 }
196#endif 202#endif
 203 kpreempt_enable();
197 204
198 return (1); 205 return (1);
199} 206}
200 207
201void 208void
202timerattach_obio_4m(struct device *parent, struct device *self, void *aux) 209timerattach_obio_4m(struct device *parent, struct device *self, void *aux)
203{ 210{
204 union obio_attach_args *uoba = aux; 211 union obio_attach_args *uoba = aux;
205 struct sbus_attach_args *sa = &uoba->uoba_sbus; 212 struct sbus_attach_args *sa = &uoba->uoba_sbus;
206 struct cpu_info *cpi; 213 struct cpu_info *cpi;
207 bus_space_handle_t bh; 214 bus_space_handle_t bh;
208 int i, n; 215 int i, n;
209 216