Mon Jun 6 02:49:39 2011 UTC ()
convert dcache_flush_page() into a function pointer.  introduce a
sp_dcache_flush_page() to help the smp version.

XXX: probably best to have usI/II and usIII (and nop) of this and
XXX: avoid the need for sp_dcache_flush_page at all.


(mrg)
diff -r1.7 -r1.8 src/sys/arch/sparc64/sparc64/cache.c
diff -r1.21 -r1.22 src/sys/arch/sparc64/sparc64/cache.h
diff -r1.41 -r1.42 src/sys/arch/sparc64/sparc64/ipifuncs.c

cvs diff -r1.7 -r1.8 src/sys/arch/sparc64/sparc64/cache.c (switch to unified diff)

--- src/sys/arch/sparc64/sparc64/cache.c 2011/06/06 01:16:48 1.7
+++ src/sys/arch/sparc64/sparc64/cache.c 2011/06/06 02:49:39 1.8
@@ -1,105 +1,106 @@ @@ -1,105 +1,106 @@
1/* $NetBSD: cache.c,v 1.7 2011/06/06 01:16:48 mrg Exp $ */ 1/* $NetBSD: cache.c,v 1.8 2011/06/06 02:49:39 mrg Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2011 Matthew R. Green 4 * Copyright (c) 2011 Matthew R. Green
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products 15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission. 16 * derived from this software without specific prior written permission.
17 * 17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE. 28 * SUCH DAMAGE.
29 */ 29 */
30 30
31/* 31/*
32 * Handle picking the right types of the different cache call. 32 * Handle picking the right types of the different cache call.
33 * 33 *
34 * This module could take on a larger role. 34 * This module could take on a larger role.
35 */ 35 */
36 36
37#include <sys/cdefs.h> 37#include <sys/cdefs.h>
38__KERNEL_RCSID(0, "$NetBSD: cache.c,v 1.7 2011/06/06 01:16:48 mrg Exp $"); 38__KERNEL_RCSID(0, "$NetBSD: cache.c,v 1.8 2011/06/06 02:49:39 mrg Exp $");
39 39
40#include "opt_multiprocessor.h" 40#include "opt_multiprocessor.h"
41 41
42#include <sys/param.h> 42#include <sys/param.h>
43#include <sys/reboot.h> 43#include <sys/reboot.h>
44 44
45#include <machine/cpu.h> 45#include <machine/cpu.h>
46 46
47#include <sparc64/sparc64/cache.h> 47#include <sparc64/sparc64/cache.h>
48 48
49static void 49static void
50cache_nop(void) 50cache_nop(void)
51{ 51{
52} 52}
53 53
54static void 54static void
55blast_dcache_real(void) 55blast_dcache_real(void)
56{ 56{
57 57
58 sp_blast_dcache(dcache_size, dcache_line_size); 58 sp_blast_dcache(dcache_size, dcache_line_size);
59} 59}
60 60
61#if 0 
62static void 61static void
63sp_dcache_flush_page_cpuset(paddr_t pa, sparc64_cpuset_t cs) 62sp_dcache_flush_page_cpuset(paddr_t pa, sparc64_cpuset_t cs)
64{ 63{
65 64
66 dcache_flush_page(pa); 65 dcache_flush_page(pa);
67} 66}
68 67
69void (*dcache_flush_page)(paddr_t) = dcache_flush_page_us; 68void (*dcache_flush_page)(paddr_t) = dcache_flush_page_us;
70void (*dcache_flush_page_cpuset)(paddr_t, sparc64_cpuset_t) = 69void (*dcache_flush_page_cpuset)(paddr_t, sparc64_cpuset_t) =
71 sp_dcache_flush_page_cpuset; 70 sp_dcache_flush_page_cpuset;
72#endif 
73void (*blast_dcache)(void) = blast_dcache_real; 71void (*blast_dcache)(void) = blast_dcache_real;
74void (*blast_icache)(void) = blast_icache_us; 72void (*blast_icache)(void) = blast_icache_us;
75 73
 74#ifdef MULTIPROCESSOR
 75void (*sp_dcache_flush_page)(paddr_t) = dcache_flush_page_us;
 76#endif
 77
76void 78void
77cache_setup_funcs(void) 79cache_setup_funcs(void)
78{ 80{
79 81
80 if (CPU_ISSUN4US || CPU_ISSUN4V) { 82 if (CPU_ISSUN4US || CPU_ISSUN4V) {
81#if 0 
82 dcache_flush_page = (void (*)(paddr_t)) cache_nop; 83 dcache_flush_page = (void (*)(paddr_t)) cache_nop;
 84#ifdef MULTIPROCESSOR
 85 /* XXXMRG shouldn't be necessary -- only caller is nop'ed out */
 86 sp_dcache_flush_page = (void (*)(paddr_t)) cache_nop;
83#endif 87#endif
84 blast_dcache = cache_nop; 88 blast_dcache = cache_nop;
85 blast_icache = cache_nop; 89 blast_icache = cache_nop;
86 } else { 90 } else {
87 if (CPU_IS_USIII_UP()) { 91 if (CPU_IS_USIII_UP()) {
88#if 0 
89 dcache_flush_page = dcache_flush_page_usiii; 92 dcache_flush_page = dcache_flush_page_usiii;
 93#ifdef MULTIPROCESSOR
 94 sp_dcache_flush_page = dcache_flush_page_usiii;
90#endif 95#endif
91 blast_icache = blast_icache_usiii; 96 blast_icache = blast_icache_usiii;
92printf("set usIII dcache/icache funcs\n"); 
93 } 97 }
94#ifdef MULTIPROCESSOR 98#ifdef MULTIPROCESSOR
95 if (sparc_ncpus > 1 && (boothowto & RB_MD1) == 0) { 99 if (sparc_ncpus > 1 && (boothowto & RB_MD1) == 0) {
96printf("set MP dcache funcs\n"); 
97#if 0 
98 dcache_flush_page = smp_dcache_flush_page_allcpu; 100 dcache_flush_page = smp_dcache_flush_page_allcpu;
99 dcache_flush_page_cpuset = smp_dcache_flush_page_cpuset; 101 dcache_flush_page_cpuset = smp_dcache_flush_page_cpuset;
100#endif 
101 blast_dcache = smp_blast_dcache; 102 blast_dcache = smp_blast_dcache;
102 } 103 }
103#endif 104#endif
104 } 105 }
105} 106}

cvs diff -r1.21 -r1.22 src/sys/arch/sparc64/sparc64/cache.h (switch to unified diff)

--- src/sys/arch/sparc64/sparc64/cache.h 2011/06/06 01:16:48 1.21
+++ src/sys/arch/sparc64/sparc64/cache.h 2011/06/06 02:49:39 1.22
@@ -1,170 +1,159 @@ @@ -1,170 +1,159 @@
1/* $NetBSD: cache.h,v 1.21 2011/06/06 01:16:48 mrg Exp $ */ 1/* $NetBSD: cache.h,v 1.22 2011/06/06 02:49:39 mrg Exp $ */
2 2
3/* 3/*
4 * Copyright (c) 2011 Matthew R. Green 4 * Copyright (c) 2011 Matthew R. Green
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products 15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission. 16 * derived from this software without specific prior written permission.
17 * 17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE. 28 * SUCH DAMAGE.
29 */ 29 */
30 30
31/* 31/*
32 * Copyright (C) 1996-1999 Eduardo Horvath. 32 * Copyright (C) 1996-1999 Eduardo Horvath.
33 * All rights reserved. 33 * All rights reserved.
34 * 34 *
35 * Redistribution and use in source and binary forms, with or without 35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions 36 * modification, are permitted provided that the following conditions
37 * are met: 37 * are met:
38 * 1. Redistributions of source code must retain the above copyright 38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer. 39 * notice, this list of conditions and the following disclaimer.
40 * 40 *
41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 41 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE. 51 * SUCH DAMAGE.
52 * 52 *
53 */ 53 */
54 54
55/* 55/*
56 * The spitfire has a 16K two-way set-associative L1 I$ and a separate 56 * The spitfire has a 16K two-way set-associative L1 I$ and a separate
57 * 16K L2 D$. The I$ can be invalidated using the FLUSH instructions, 57 * 16K L2 D$. The I$ can be invalidated using the FLUSH instructions,
58 * so we don't really need to worry about it much. The D$ is a 16K 58 * so we don't really need to worry about it much. The D$ is a 16K
59 * write-through, direct mapped virtually-addressed cache with two 16-byte 59 * write-through, direct mapped virtually-addressed cache with two 16-byte
60 * sub-blocks per line. The E$ is a 512KB to 4MB direct mapped 60 * sub-blocks per line. The E$ is a 512KB to 4MB direct mapped
61 * physically-indexed physically-tagged cache. Since the L1 caches 61 * physically-indexed physically-tagged cache. Since the L1 caches
62 * are write-through, they don't need flushing and can be invalidated directly. 62 * are write-through, they don't need flushing and can be invalidated directly.
63 * 63 *
64 * The spitfire sees virtual addresses as: 64 * The spitfire sees virtual addresses as:
65 * 65 *
66 * struct cache_va { 66 * struct cache_va {
67 * uint64_t :22, (unused; VAs are only 40 bits) 67 * uint64_t :22, (unused; VAs are only 40 bits)
68 * cva_tag:28, (tag ID) 68 * cva_tag:28, (tag ID)
69 * cva_line:9, (cache line number) 69 * cva_line:9, (cache line number)
70 * cva_byte:5; (byte within line) 70 * cva_byte:5; (byte within line)
71 * }; 71 * };
72 * 72 *
73 * Since there is one bit of overlap between the page offset and the line index, 73 * Since there is one bit of overlap between the page offset and the line index,
74 * all we need to do is make sure that bit 14 of the va remains constant 74 * all we need to do is make sure that bit 14 of the va remains constant
75 * and we have no aliasing problems. 75 * and we have no aliasing problems.
76 * 76 *
77 * Let me try again... 77 * Let me try again...
78 * Page size is 8K, cache size is 16K so if (va1 & 0x3fff != va2 & 0x3fff) 78 * Page size is 8K, cache size is 16K so if (va1 & 0x3fff != va2 & 0x3fff)
79 * then we have a problem. Bit 14 *must* be the same for all mappings 79 * then we have a problem. Bit 14 *must* be the same for all mappings
80 * of a page to be cacheable in the D$. (The I$ is 16K 2-way 80 * of a page to be cacheable in the D$. (The I$ is 16K 2-way
81 * set-associative -- each bank is 8K. No conflict there.) 81 * set-associative -- each bank is 8K. No conflict there.)
82 */ 82 */
83 83
84#include <machine/psl.h> 84#include <machine/psl.h>
85 85
86/* Various cache size/line sizes */ 86/* Various cache size/line sizes */
87extern int ecache_min_line_size; 87extern int ecache_min_line_size;
88extern int dcache_line_size; 88extern int dcache_line_size;
89extern int dcache_size; 89extern int dcache_size;
90extern int icache_line_size; 90extern int icache_line_size;
91extern int icache_size; 91extern int icache_size;
92 92
93/* The following are for I$ and D$ flushes and are in locore.s */ 93/* The following are for I$ and D$ flushes and are in locore.s */
94void dcache_flush_page_us(paddr_t); /* flush page from D$ */ 94void dcache_flush_page_us(paddr_t); /* flush page from D$ */
95void dcache_flush_page_usiii(paddr_t); /* flush page from D$ */ 95void dcache_flush_page_usiii(paddr_t); /* flush page from D$ */
96void sp_blast_dcache(int, int); /* Clear entire D$ */ 96void sp_blast_dcache(int, int); /* Clear entire D$ */
97void blast_icache_us(void); /* Clear entire I$ */ 97void blast_icache_us(void); /* Clear entire I$ */
98void blast_icache_usiii(void); /* Clear entire I$ */ 98void blast_icache_usiii(void); /* Clear entire I$ */
99 99
100/* The following flush a range from the D$ and I$ but not E$. */ 100/* The following flush a range from the D$ and I$ but not E$. */
101void cache_flush_phys_us(paddr_t, psize_t, int); 101void cache_flush_phys_us(paddr_t, psize_t, int);
102void cache_flush_phys_usiii(paddr_t, psize_t, int); 102void cache_flush_phys_usiii(paddr_t, psize_t, int);
103 103
104static __inline__ void 104static __inline__ void
105dcache_flush_page(paddr_t pa) 
106{ 
107 if (CPU_ISSUN4US || CPU_ISSUN4V) 
108 return; 
109 if (CPU_IS_USIII_UP()) 
110 dcache_flush_page_usiii(pa); 
111 else 
112 dcache_flush_page_us(pa); 
113} 
114 
115static __inline__ void 
116cache_flush_phys(paddr_t pa, psize_t size, int ecache) 105cache_flush_phys(paddr_t pa, psize_t size, int ecache)
117{ 106{
118 if (CPU_IS_USIII_UP() || CPU_IS_SPARC64_V_UP()) 107 if (CPU_IS_USIII_UP() || CPU_IS_SPARC64_V_UP())
119 cache_flush_phys_usiii(pa, size, ecache); 108 cache_flush_phys_usiii(pa, size, ecache);
120 else 109 else
121 cache_flush_phys_us(pa, size, ecache); 110 cache_flush_phys_us(pa, size, ecache);
122} 111}
123 112
124/* SPARC64 specific */ 113/* SPARC64 specific */
125/* Assembly routines to flush TLB mappings */ 114/* Assembly routines to flush TLB mappings */
126void sp_tlb_flush_pte_us(vaddr_t, int); 115void sp_tlb_flush_pte_us(vaddr_t, int);
127void sp_tlb_flush_pte_usiii(vaddr_t, int); 116void sp_tlb_flush_pte_usiii(vaddr_t, int);
128void sp_tlb_flush_all_us(void); 117void sp_tlb_flush_all_us(void);
129void sp_tlb_flush_all_usiii(void); 118void sp_tlb_flush_all_usiii(void);
130 119
131static __inline__ void 120static __inline__ void
132sp_tlb_flush_pte(vaddr_t va, int ctx) 121sp_tlb_flush_pte(vaddr_t va, int ctx)
133{ 122{
134 if (CPU_IS_USIII_UP() || CPU_IS_SPARC64_V_UP()) 123 if (CPU_IS_USIII_UP() || CPU_IS_SPARC64_V_UP())
135 sp_tlb_flush_pte_usiii(va, ctx); 124 sp_tlb_flush_pte_usiii(va, ctx);
136 else 125 else
137 sp_tlb_flush_pte_us(va, ctx); 126 sp_tlb_flush_pte_us(va, ctx);
138} 127}
139 128
140static __inline__ void 129static __inline__ void
141sp_tlb_flush_all(void) 130sp_tlb_flush_all(void)
142{ 131{
143 if (CPU_IS_USIII_UP() || CPU_IS_SPARC64_V_UP()) 132 if (CPU_IS_USIII_UP() || CPU_IS_SPARC64_V_UP())
144 sp_tlb_flush_all_usiii(); 133 sp_tlb_flush_all_usiii();
145 else 134 else
146 sp_tlb_flush_all_us(); 135 sp_tlb_flush_all_us();
147} 136}
148 137
149#if 0 
150extern void (*dcache_flush_page)(paddr_t); 138extern void (*dcache_flush_page)(paddr_t);
151extern void (*dcache_flush_page_cpuset)(paddr_t, sparc64_cpuset_t); 139extern void (*dcache_flush_page_cpuset)(paddr_t, sparc64_cpuset_t);
152#endif 
153extern void (*blast_dcache)(void); 140extern void (*blast_dcache)(void);
154extern void (*blast_icache)(void); 141extern void (*blast_icache)(void);
155 142
156void cache_setup_funcs(void); 143void cache_setup_funcs(void);
157 144
158#ifdef MULTIPROCESSOR 145#ifdef MULTIPROCESSOR
 146extern void (*sp_dcache_flush_page)(paddr_t);
 147
159void smp_tlb_flush_pte(vaddr_t, struct pmap *); 148void smp_tlb_flush_pte(vaddr_t, struct pmap *);
160void smp_dcache_flush_page_cpuset(paddr_t pa, sparc64_cpuset_t); 149void smp_dcache_flush_page_cpuset(paddr_t pa, sparc64_cpuset_t);
 150void smp_dcache_flush_page_allcpu(paddr_t pa);
161void smp_blast_dcache(void); 151void smp_blast_dcache(void);
162#define tlb_flush_pte(va,pm ) smp_tlb_flush_pte(va, pm) 152#define tlb_flush_pte(va,pm) smp_tlb_flush_pte(va, pm)
163#define dcache_flush_page_all(pa) smp_dcache_flush_page_cpuset(pa, cpus_active) 153#define dcache_flush_page_all(pa) smp_dcache_flush_page_cpuset(pa, cpus_active)
164#define dcache_flush_page_cpuset(pa,cs) smp_dcache_flush_page_cpuset(pa, cs) 154#define dcache_flush_page_cpuset(pa,cs) smp_dcache_flush_page_cpuset(pa, cs)
165#else 155#else
166#define tlb_flush_pte(va,pm) sp_tlb_flush_pte(va, (pm)->pm_ctx[0]) 156#define tlb_flush_pte(va,pm) sp_tlb_flush_pte(va, (pm)->pm_ctx[0])
167#define dcache_flush_page_all(pa) dcache_flush_page(pa) 157#define dcache_flush_page_all(pa) dcache_flush_page(pa)
168#define dcache_flush_page_cpuset(pa,cs) dcache_flush_page(pa) 158#define dcache_flush_page_cpuset(pa,cs) dcache_flush_page(pa)
169 
170#endif 159#endif

cvs diff -r1.41 -r1.42 src/sys/arch/sparc64/sparc64/ipifuncs.c (switch to unified diff)

--- src/sys/arch/sparc64/sparc64/ipifuncs.c 2011/06/06 01:16:48 1.41
+++ src/sys/arch/sparc64/sparc64/ipifuncs.c 2011/06/06 02:49:39 1.42
@@ -1,507 +1,499 @@ @@ -1,507 +1,499 @@
1/* $NetBSD: ipifuncs.c,v 1.41 2011/06/06 01:16:48 mrg Exp $ */ 1/* $NetBSD: ipifuncs.c,v 1.42 2011/06/06 02:49:39 mrg Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2004 The NetBSD Foundation, Inc. 4 * Copyright (c) 2004 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions 8 * modification, are permitted provided that the following conditions
9 * are met: 9 * are met:
10 * 1. Redistributions of source code must retain the above copyright 10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer. 11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright 12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the 13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution. 14 * documentation and/or other materials provided with the distribution.
15 * 15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE. 26 * POSSIBILITY OF SUCH DAMAGE.
27 */ 27 */
28 28
29#include <sys/cdefs.h> 29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: ipifuncs.c,v 1.41 2011/06/06 01:16:48 mrg Exp $"); 30__KERNEL_RCSID(0, "$NetBSD: ipifuncs.c,v 1.42 2011/06/06 02:49:39 mrg Exp $");
31 31
32#include "opt_ddb.h" 32#include "opt_ddb.h"
33 33
34#include <sys/param.h> 34#include <sys/param.h>
35#include <sys/systm.h> 35#include <sys/systm.h>
36#include <sys/malloc.h> 36#include <sys/malloc.h>
37#include <sys/xcall.h> 37#include <sys/xcall.h>
38 38
39#include <machine/db_machdep.h> 39#include <machine/db_machdep.h>
40 40
41#include <machine/cpu.h> 41#include <machine/cpu.h>
42#include <machine/cpu_counter.h> 42#include <machine/cpu_counter.h>
43#include <machine/ctlreg.h> 43#include <machine/ctlreg.h>
44#include <machine/pmap.h> 44#include <machine/pmap.h>
45#include <machine/sparc64.h> 45#include <machine/sparc64.h>
46 46
47#include <sparc64/sparc64/cache.h> 47#include <sparc64/sparc64/cache.h>
48 48
49#if defined(DDB) || defined(KGDB) 49#if defined(DDB) || defined(KGDB)
50#ifdef DDB 50#ifdef DDB
51#include <ddb/db_command.h> 51#include <ddb/db_command.h>
52#include <ddb/db_output.h> 52#include <ddb/db_output.h>
53#endif 53#endif
54#endif 54#endif
55 55
56/* CPU sets containing halted, paused and resumed cpus */ 56/* CPU sets containing halted, paused and resumed cpus */
57static volatile sparc64_cpuset_t cpus_halted; 57static volatile sparc64_cpuset_t cpus_halted;
58static volatile sparc64_cpuset_t cpus_spinning; 58static volatile sparc64_cpuset_t cpus_spinning;
59static volatile sparc64_cpuset_t cpus_paused; 59static volatile sparc64_cpuset_t cpus_paused;
60static volatile sparc64_cpuset_t cpus_resumed; 60static volatile sparc64_cpuset_t cpus_resumed;
61 61
62/* IPI handlers. */ 62/* IPI handlers. */
63static int sparc64_ipi_wait(sparc64_cpuset_t volatile *, sparc64_cpuset_t); 63static int sparc64_ipi_wait(sparc64_cpuset_t volatile *, sparc64_cpuset_t);
64static void sparc64_ipi_error(const char *, sparc64_cpuset_t, sparc64_cpuset_t); 64static void sparc64_ipi_error(const char *, sparc64_cpuset_t, sparc64_cpuset_t);
65 65
66  66
67/* 67/*
68 * These are the "function" entry points in locore.s/mp_subr.s to handle IPI's. 68 * These are the "function" entry points in locore.s/mp_subr.s to handle IPI's.
69 */ 69 */
70void sparc64_ipi_halt(void *, void *); 70void sparc64_ipi_halt(void *, void *);
71void sparc64_ipi_pause(void *, void *); 71void sparc64_ipi_pause(void *, void *);
72void sparc64_ipi_flush_pte_us(void *, void *); 72void sparc64_ipi_flush_pte_us(void *, void *);
73void sparc64_ipi_flush_pte_usiii(void *, void *); 73void sparc64_ipi_flush_pte_usiii(void *, void *);
74void sparc64_ipi_dcache_flush_page_us(void *, void *); 74void sparc64_ipi_dcache_flush_page_us(void *, void *);
75void sparc64_ipi_dcache_flush_page_usiii(void *, void *); 75void sparc64_ipi_dcache_flush_page_usiii(void *, void *);
76void sparc64_ipi_blast_dcache(void *, void *); 76void sparc64_ipi_blast_dcache(void *, void *);
77void sparc64_ipi_ccall(void *, void *); 77void sparc64_ipi_ccall(void *, void *);
78 78
79/* 79/*
80 * Process cpu stop-self event. 80 * Process cpu stop-self event.
81 */ 81 */
82void 82void
83sparc64_ipi_halt_thiscpu(void *arg, void *arg2) 83sparc64_ipi_halt_thiscpu(void *arg, void *arg2)
84{ 84{
85 extern void prom_printf(const char *fmt, ...); 85 extern void prom_printf(const char *fmt, ...);
86 86
87 printf("cpu%d: shutting down\n", cpu_number()); 87 printf("cpu%d: shutting down\n", cpu_number());
88 if (prom_has_stop_other() || !prom_has_stopself()) { 88 if (prom_has_stop_other() || !prom_has_stopself()) {
89 /* 89 /*
90 * just loop here, the final cpu will stop us later 90 * just loop here, the final cpu will stop us later
91 */ 91 */
92 CPUSET_ADD(cpus_spinning, cpu_number()); 92 CPUSET_ADD(cpus_spinning, cpu_number());
93 CPUSET_ADD(cpus_halted, cpu_number()); 93 CPUSET_ADD(cpus_halted, cpu_number());
94 spl0(); 94 spl0();
95 while (1) 95 while (1)
96 /* nothing */; 96 /* nothing */;
97 } else { 97 } else {
98 CPUSET_ADD(cpus_halted, cpu_number()); 98 CPUSET_ADD(cpus_halted, cpu_number());
99 prom_stopself(); 99 prom_stopself();
100 } 100 }
101} 101}
102 102
103void 103void
104sparc64_do_pause(void) 104sparc64_do_pause(void)
105{ 105{
106#if defined(DDB) 106#if defined(DDB)
107 extern bool ddb_running_on_this_cpu(void); 107 extern bool ddb_running_on_this_cpu(void);
108 extern void db_resume_others(void); 108 extern void db_resume_others(void);
109#endif 109#endif
110 110
111 CPUSET_ADD(cpus_paused, cpu_number()); 111 CPUSET_ADD(cpus_paused, cpu_number());
112 112
113 do { 113 do {
114 membar_sync(); 114 membar_sync();
115 } while(CPUSET_HAS(cpus_paused, cpu_number())); 115 } while(CPUSET_HAS(cpus_paused, cpu_number()));
116 membar_sync(); 116 membar_sync();
117 CPUSET_ADD(cpus_resumed, cpu_number()); 117 CPUSET_ADD(cpus_resumed, cpu_number());
118 118
119#if defined(DDB) 119#if defined(DDB)
120 if (ddb_running_on_this_cpu()) { 120 if (ddb_running_on_this_cpu()) {
121 db_command_loop(); 121 db_command_loop();
122 db_resume_others(); 122 db_resume_others();
123 } 123 }
124#endif 124#endif
125} 125}
126 126
127/* 127/*
128 * Pause cpu. This is called from locore.s after setting up a trapframe. 128 * Pause cpu. This is called from locore.s after setting up a trapframe.
129 */ 129 */
130void 130void
131sparc64_ipi_pause_thiscpu(void *arg) 131sparc64_ipi_pause_thiscpu(void *arg)
132{ 132{
133 int s; 133 int s;
134#if defined(DDB) 134#if defined(DDB)
135 extern void fill_ddb_regs_from_tf(struct trapframe64 *tf); 135 extern void fill_ddb_regs_from_tf(struct trapframe64 *tf);
136 extern void ddb_restore_state(void); 136 extern void ddb_restore_state(void);
137  137
138 if (arg) 138 if (arg)
139 fill_ddb_regs_from_tf(arg); 139 fill_ddb_regs_from_tf(arg);
140#endif 140#endif
141 141
142 s = intr_disable(); 142 s = intr_disable();
143 sparc64_do_pause(); 143 sparc64_do_pause();
144 144
145#if defined(DDB) 145#if defined(DDB)
146 if (arg) { 146 if (arg) {
147 ddb_restore_state(); 147 ddb_restore_state();
148 curcpu()->ci_ddb_regs = NULL; 148 curcpu()->ci_ddb_regs = NULL;
149 } 149 }
150#endif 150#endif
151 151
152 intr_restore(s); 152 intr_restore(s);
153} 153}
154 154
155/* 155/*
156 * Initialize IPI machinery. 156 * Initialize IPI machinery.
157 */ 157 */
158void 158void
159sparc64_ipi_init() 159sparc64_ipi_init()
160{ 160{
161 161
162 /* Clear all cpu sets. */ 162 /* Clear all cpu sets. */
163 CPUSET_CLEAR(cpus_halted); 163 CPUSET_CLEAR(cpus_halted);
164 CPUSET_CLEAR(cpus_spinning); 164 CPUSET_CLEAR(cpus_spinning);
165 CPUSET_CLEAR(cpus_paused); 165 CPUSET_CLEAR(cpus_paused);
166 CPUSET_CLEAR(cpus_resumed); 166 CPUSET_CLEAR(cpus_resumed);
167} 167}
168 168
169/* 169/*
170 * Send an IPI to all in the list but ourselves. 170 * Send an IPI to all in the list but ourselves.
171 */ 171 */
172void 172void
173sparc64_multicast_ipi(sparc64_cpuset_t cpuset, ipifunc_t func, uint64_t arg1, 173sparc64_multicast_ipi(sparc64_cpuset_t cpuset, ipifunc_t func, uint64_t arg1,
174 uint64_t arg2) 174 uint64_t arg2)
175{ 175{
176 struct cpu_info *ci; 176 struct cpu_info *ci;
177 177
178 CPUSET_DEL(cpuset, cpu_number()); 178 CPUSET_DEL(cpuset, cpu_number());
179 if (CPUSET_EMPTY(cpuset)) 179 if (CPUSET_EMPTY(cpuset))
180 return; 180 return;
181 181
182 for (ci = cpus; ci != NULL; ci = ci->ci_next) { 182 for (ci = cpus; ci != NULL; ci = ci->ci_next) {
183 if (CPUSET_HAS(cpuset, ci->ci_index)) { 183 if (CPUSET_HAS(cpuset, ci->ci_index)) {
184 CPUSET_DEL(cpuset, ci->ci_index); 184 CPUSET_DEL(cpuset, ci->ci_index);
185 sparc64_send_ipi(ci->ci_cpuid, func, arg1, arg2); 185 sparc64_send_ipi(ci->ci_cpuid, func, arg1, arg2);
186 } 186 }
187 } 187 }
188} 188}
189 189
190/* 190/*
191 * Broadcast an IPI to all but ourselves. 191 * Broadcast an IPI to all but ourselves.
192 */ 192 */
193void 193void
194sparc64_broadcast_ipi(ipifunc_t func, uint64_t arg1, uint64_t arg2) 194sparc64_broadcast_ipi(ipifunc_t func, uint64_t arg1, uint64_t arg2)
195{ 195{
196 196
197 sparc64_multicast_ipi(CPUSET_EXCEPT(cpus_active, cpu_number()), func, 197 sparc64_multicast_ipi(CPUSET_EXCEPT(cpus_active, cpu_number()), func,
198 arg1, arg2); 198 arg1, arg2);
199} 199}
200 200
201/* 201/*
202 * Send an interprocessor interrupt. 202 * Send an interprocessor interrupt.
203 */ 203 */
204void 204void
205sparc64_send_ipi(int upaid, ipifunc_t func, uint64_t arg1, uint64_t arg2) 205sparc64_send_ipi(int upaid, ipifunc_t func, uint64_t arg1, uint64_t arg2)
206{ 206{
207 int i, ik, shift = 0; 207 int i, ik, shift = 0;
208 uint64_t intr_func; 208 uint64_t intr_func;
209 209
210 KASSERT(upaid != curcpu()->ci_cpuid); 210 KASSERT(upaid != curcpu()->ci_cpuid);
211 211
212 /* 212 /*
213 * UltraSPARC-IIIi CPUs select the BUSY/NACK pair based on the 213 * UltraSPARC-IIIi CPUs select the BUSY/NACK pair based on the
214 * lower two bits of the ITID. 214 * lower two bits of the ITID.
215 */ 215 */
216 if (CPU_IS_USIIIi()) 216 if (CPU_IS_USIIIi())
217 shift = (upaid & 0x3) * 2; 217 shift = (upaid & 0x3) * 2;
218 218
219 if (ldxa(0, ASR_IDSR) & (IDSR_BUSY << shift)) 219 if (ldxa(0, ASR_IDSR) & (IDSR_BUSY << shift))
220 panic("recursive IPI?"); 220 panic("recursive IPI?");
221 221
222 intr_func = (uint64_t)(u_long)func; 222 intr_func = (uint64_t)(u_long)func;
223 223
224 /* Schedule an interrupt. */ 224 /* Schedule an interrupt. */
225 for (i = 0; i < 1000; i++) { 225 for (i = 0; i < 1000; i++) {
226 int s = intr_disable(); 226 int s = intr_disable();
227 227
228 stxa(IDDR_0H, ASI_INTERRUPT_DISPATCH, intr_func); 228 stxa(IDDR_0H, ASI_INTERRUPT_DISPATCH, intr_func);
229 stxa(IDDR_1H, ASI_INTERRUPT_DISPATCH, arg1); 229 stxa(IDDR_1H, ASI_INTERRUPT_DISPATCH, arg1);
230 stxa(IDDR_2H, ASI_INTERRUPT_DISPATCH, arg2); 230 stxa(IDDR_2H, ASI_INTERRUPT_DISPATCH, arg2);
231 stxa(IDCR(upaid), ASI_INTERRUPT_DISPATCH, 0); 231 stxa(IDCR(upaid), ASI_INTERRUPT_DISPATCH, 0);
232 membar_sync(); 232 membar_sync();
233 /* Workaround for SpitFire erratum #54, from FreeBSD */ 233 /* Workaround for SpitFire erratum #54, from FreeBSD */
234 if (CPU_IS_SPITFIRE()) { 234 if (CPU_IS_SPITFIRE()) {
235 (void)ldxa(P_DCR_0, ASI_INTERRUPT_RECEIVE_DATA); 235 (void)ldxa(P_DCR_0, ASI_INTERRUPT_RECEIVE_DATA);
236 membar_sync(); 236 membar_sync();
237 } 237 }
238 238
239 for (ik = 0; ik < 1000000; ik++) { 239 for (ik = 0; ik < 1000000; ik++) {
240 if (ldxa(0, ASR_IDSR) & (IDSR_BUSY << shift)) 240 if (ldxa(0, ASR_IDSR) & (IDSR_BUSY << shift))
241 continue; 241 continue;
242 else 242 else
243 break; 243 break;
244 } 244 }
245 intr_restore(s); 245 intr_restore(s);
246 246
247 if (ik == 1000000) 247 if (ik == 1000000)
248 break; 248 break;
249 249
250 if ((ldxa(0, ASR_IDSR) & (IDSR_NACK << shift)) == 0) 250 if ((ldxa(0, ASR_IDSR) & (IDSR_NACK << shift)) == 0)
251 return; 251 return;
252 /* 252 /*
253 * Wait for a while with enabling interrupts to avoid 253 * Wait for a while with enabling interrupts to avoid
254 * deadlocks. XXX - random value is better. 254 * deadlocks. XXX - random value is better.
255 */ 255 */
256 DELAY(1); 256 DELAY(1);
257 } 257 }
258 258
259 if (panicstr == NULL) 259 if (panicstr == NULL)
260 panic("cpu%d: ipi_send: couldn't send ipi to UPAID %u" 260 panic("cpu%d: ipi_send: couldn't send ipi to UPAID %u"
261 " (tried %d times)", cpu_number(), upaid, i); 261 " (tried %d times)", cpu_number(), upaid, i);
262} 262}
263 263
264/* 264/*
265 * Wait for IPI operation to complete. 265 * Wait for IPI operation to complete.
266 * Return 0 on success. 266 * Return 0 on success.
267 */ 267 */
268int 268int
269sparc64_ipi_wait(sparc64_cpuset_t volatile *cpus_watchset, sparc64_cpuset_t cpus_mask) 269sparc64_ipi_wait(sparc64_cpuset_t volatile *cpus_watchset, sparc64_cpuset_t cpus_mask)
270{ 270{
271 uint64_t limit = gettick() + cpu_frequency(curcpu()); 271 uint64_t limit = gettick() + cpu_frequency(curcpu());
272 272
273 while (gettick() < limit) { 273 while (gettick() < limit) {
274 membar_sync(); 274 membar_sync();
275 if (CPUSET_EQUAL(*cpus_watchset, cpus_mask)) 275 if (CPUSET_EQUAL(*cpus_watchset, cpus_mask))
276 return 0; 276 return 0;
277 } 277 }
278 return 1; 278 return 1;
279} 279}
280 280
281/* 281/*
282 * Halt all cpus but ourselves. 282 * Halt all cpus but ourselves.
283 */ 283 */
284void 284void
285mp_halt_cpus(void) 285mp_halt_cpus(void)
286{ 286{
287 sparc64_cpuset_t cpumask, cpuset; 287 sparc64_cpuset_t cpumask, cpuset;
288 struct cpu_info *ci; 288 struct cpu_info *ci;
289 289
290 CPUSET_ASSIGN(cpuset, cpus_active); 290 CPUSET_ASSIGN(cpuset, cpus_active);
291 CPUSET_DEL(cpuset, cpu_number()); 291 CPUSET_DEL(cpuset, cpu_number());
292 CPUSET_ASSIGN(cpumask, cpuset); 292 CPUSET_ASSIGN(cpumask, cpuset);
293 CPUSET_SUB(cpuset, cpus_halted); 293 CPUSET_SUB(cpuset, cpus_halted);
294 294
295 if (CPUSET_EMPTY(cpuset)) 295 if (CPUSET_EMPTY(cpuset))
296 return; 296 return;
297 297
298 CPUSET_CLEAR(cpus_spinning); 298 CPUSET_CLEAR(cpus_spinning);
299 sparc64_multicast_ipi(cpuset, sparc64_ipi_halt, 0, 0); 299 sparc64_multicast_ipi(cpuset, sparc64_ipi_halt, 0, 0);
300 if (sparc64_ipi_wait(&cpus_halted, cpumask)) 300 if (sparc64_ipi_wait(&cpus_halted, cpumask))
301 sparc64_ipi_error("halt", cpumask, cpus_halted); 301 sparc64_ipi_error("halt", cpumask, cpus_halted);
302 302
303 /* 303 /*
304 * Depending on available firmware methods, other cpus will 304 * Depending on available firmware methods, other cpus will
305 * either shut down themselfs, or spin and wait for us to 305 * either shut down themselfs, or spin and wait for us to
306 * stop them. 306 * stop them.
307 */ 307 */
308 if (CPUSET_EMPTY(cpus_spinning)) { 308 if (CPUSET_EMPTY(cpus_spinning)) {
309 /* give other cpus a few cycles to actually power down */ 309 /* give other cpus a few cycles to actually power down */
310 delay(10000); 310 delay(10000);
311 return; 311 return;
312 } 312 }
313 /* there are cpus spinning - shut them down if we can */ 313 /* there are cpus spinning - shut them down if we can */
314 if (prom_has_stop_other()) { 314 if (prom_has_stop_other()) {
315 for (ci = cpus; ci != NULL; ci = ci->ci_next) { 315 for (ci = cpus; ci != NULL; ci = ci->ci_next) {
316 if (!CPUSET_HAS(cpus_spinning, ci->ci_index)) continue; 316 if (!CPUSET_HAS(cpus_spinning, ci->ci_index)) continue;
317 prom_stop_other(ci->ci_cpuid); 317 prom_stop_other(ci->ci_cpuid);
318 } 318 }
319 } 319 }
320} 320}
321 321
322/* 322/*
323 * Pause all cpus but ourselves. 323 * Pause all cpus but ourselves.
324 */ 324 */
325void 325void
326mp_pause_cpus(void) 326mp_pause_cpus(void)
327{ 327{
328 sparc64_cpuset_t cpuset; 328 sparc64_cpuset_t cpuset;
329 329
330 CPUSET_ASSIGN(cpuset, cpus_active); 330 CPUSET_ASSIGN(cpuset, cpus_active);
331 CPUSET_DEL(cpuset, cpu_number()); 331 CPUSET_DEL(cpuset, cpu_number());
332 332
333 if (CPUSET_EMPTY(cpuset)) 333 if (CPUSET_EMPTY(cpuset))
334 return; 334 return;
335 335
336 sparc64_multicast_ipi(cpuset, sparc64_ipi_pause, 0, 0); 336 sparc64_multicast_ipi(cpuset, sparc64_ipi_pause, 0, 0);
337 if (sparc64_ipi_wait(&cpus_paused, cpuset)) 337 if (sparc64_ipi_wait(&cpus_paused, cpuset))
338 sparc64_ipi_error("pause", cpus_paused, cpuset); 338 sparc64_ipi_error("pause", cpus_paused, cpuset);
339} 339}
340 340
341/* 341/*
342 * Resume a single cpu 342 * Resume a single cpu
343 */ 343 */
344void 344void
345mp_resume_cpu(int cno) 345mp_resume_cpu(int cno)
346{ 346{
347 CPUSET_DEL(cpus_paused, cno); 347 CPUSET_DEL(cpus_paused, cno);
348 membar_sync(); 348 membar_sync();
349} 349}
350 350
351/* 351/*
352 * Resume all paused cpus. 352 * Resume all paused cpus.
353 */ 353 */
354void 354void
355mp_resume_cpus(void) 355mp_resume_cpus(void)
356{ 356{
357 sparc64_cpuset_t cpuset; 357 sparc64_cpuset_t cpuset;
358 358
359 CPUSET_CLEAR(cpus_resumed); 359 CPUSET_CLEAR(cpus_resumed);
360 CPUSET_ASSIGN(cpuset, cpus_paused); 360 CPUSET_ASSIGN(cpuset, cpus_paused);
361 membar_sync(); 361 membar_sync();
362 CPUSET_CLEAR(cpus_paused); 362 CPUSET_CLEAR(cpus_paused);
363 363
364 /* CPUs awake on cpus_paused clear */ 364 /* CPUs awake on cpus_paused clear */
365 if (sparc64_ipi_wait(&cpus_resumed, cpuset)) 365 if (sparc64_ipi_wait(&cpus_resumed, cpuset))
366 sparc64_ipi_error("resume", cpus_resumed, cpuset); 366 sparc64_ipi_error("resume", cpus_resumed, cpuset);
367} 367}
368 368
369int 369int
370mp_cpu_is_paused(sparc64_cpuset_t cpunum) 370mp_cpu_is_paused(sparc64_cpuset_t cpunum)
371{ 371{
372 372
373 return CPUSET_HAS(cpus_paused, cpunum); 373 return CPUSET_HAS(cpus_paused, cpunum);
374} 374}
375 375
376/* 376/*
377 * Flush pte on all active processors. 377 * Flush pte on all active processors.
378 */ 378 */
379void 379void
380smp_tlb_flush_pte(vaddr_t va, struct pmap * pm) 380smp_tlb_flush_pte(vaddr_t va, struct pmap * pm)
381{ 381{
382 sparc64_cpuset_t cpuset; 382 sparc64_cpuset_t cpuset;
383 struct cpu_info *ci; 383 struct cpu_info *ci;
384 int ctx; 384 int ctx;
385 bool kpm = (pm == pmap_kernel()); 385 bool kpm = (pm == pmap_kernel());
386 ipifunc_t func; 386 ipifunc_t func;
387 387
388 if (CPU_IS_USIII_UP()) 388 if (CPU_IS_USIII_UP())
389 func = sparc64_ipi_flush_pte_usiii; 389 func = sparc64_ipi_flush_pte_usiii;
390 else 390 else
391 func = sparc64_ipi_flush_pte_us; 391 func = sparc64_ipi_flush_pte_us;
392 392
393 /* Flush our own TLB */ 393 /* Flush our own TLB */
394 ctx = pm->pm_ctx[cpu_number()]; 394 ctx = pm->pm_ctx[cpu_number()];
395 KASSERT(ctx >= 0); 395 KASSERT(ctx >= 0);
396 if (kpm || ctx > 0) 396 if (kpm || ctx > 0)
397 sp_tlb_flush_pte(va, ctx); 397 sp_tlb_flush_pte(va, ctx);
398 398
399 CPUSET_ASSIGN(cpuset, cpus_active); 399 CPUSET_ASSIGN(cpuset, cpus_active);
400 CPUSET_DEL(cpuset, cpu_number()); 400 CPUSET_DEL(cpuset, cpu_number());
401 if (CPUSET_EMPTY(cpuset)) 401 if (CPUSET_EMPTY(cpuset))
402 return; 402 return;
403 403
404 /* Flush others */ 404 /* Flush others */
405 for (ci = cpus; ci != NULL; ci = ci->ci_next) { 405 for (ci = cpus; ci != NULL; ci = ci->ci_next) {
406 if (CPUSET_HAS(cpuset, ci->ci_index)) { 406 if (CPUSET_HAS(cpuset, ci->ci_index)) {
407 CPUSET_DEL(cpuset, ci->ci_index); 407 CPUSET_DEL(cpuset, ci->ci_index);
408 ctx = pm->pm_ctx[ci->ci_index]; 408 ctx = pm->pm_ctx[ci->ci_index];
409 KASSERT(ctx >= 0); 409 KASSERT(ctx >= 0);
410 if (!kpm && ctx == 0) 410 if (!kpm && ctx == 0)
411 continue; 411 continue;
412 sparc64_send_ipi(ci->ci_cpuid, func, va, ctx); 412 sparc64_send_ipi(ci->ci_cpuid, func, va, ctx);
413 } 413 }
414 } 414 }
415} 415}
416 416
417/* 417/*
418 * Make sure this page is flushed from all/some CPUs. 418 * Make sure this page is flushed from all/some CPUs.
419 */ 419 */
420void 420void
421smp_dcache_flush_page_cpuset(paddr_t pa, sparc64_cpuset_t activecpus) 421smp_dcache_flush_page_cpuset(paddr_t pa, sparc64_cpuset_t activecpus)
422{ 422{
423 ipifunc_t func; 423 ipifunc_t func;
424 424
425 if (CPU_ISSUN4US || CPU_ISSUN4V) 
426 return; 
427 
428 if (CPU_IS_USIII_UP()) 425 if (CPU_IS_USIII_UP())
429 func = sparc64_ipi_dcache_flush_page_usiii; 426 func = sparc64_ipi_dcache_flush_page_usiii;
430 else 427 else
431 func = sparc64_ipi_dcache_flush_page_us; 428 func = sparc64_ipi_dcache_flush_page_us;
432 429
433 sparc64_multicast_ipi(activecpus, func, pa, dcache_line_size); 430 sparc64_multicast_ipi(activecpus, func, pa, dcache_line_size);
434 dcache_flush_page(pa); 431 sp_dcache_flush_page(pa);
435} 432}
436 433
437#if 0 
438void 434void
439smp_dcache_flush_page_allcpu(paddr_t pa) 435smp_dcache_flush_page_allcpu(paddr_t pa)
440{ 436{
441 437
442 smp_dcache_flush_page_cpuset(pa, cpus_active); 438 smp_dcache_flush_page_cpuset(pa, cpus_active);
443} 439}
444#endif 
445 440
446/* 441/*
447 * Flush the D$ on all CPUs. 442 * Flush the D$ on all CPUs.
448 */ 443 */
449void 444void
450smp_blast_dcache(void) 445smp_blast_dcache(void)
451{ 446{
452 447
453 if (CPU_ISSUN4US || CPU_ISSUN4V) 
454 return; 
455 
456 sparc64_multicast_ipi(cpus_active, sparc64_ipi_blast_dcache, 448 sparc64_multicast_ipi(cpus_active, sparc64_ipi_blast_dcache,
457 dcache_size, dcache_line_size); 449 dcache_size, dcache_line_size);
458 sp_blast_dcache(dcache_size, dcache_line_size); 450 sp_blast_dcache(dcache_size, dcache_line_size);
459} 451}
460 452
461/* 453/*
462 * Print an error message. 454 * Print an error message.
463 */ 455 */
464void 456void
465sparc64_ipi_error(const char *s, sparc64_cpuset_t cpus_succeeded, 457sparc64_ipi_error(const char *s, sparc64_cpuset_t cpus_succeeded,
466 sparc64_cpuset_t cpus_expected) 458 sparc64_cpuset_t cpus_expected)
467{ 459{
468 int cpuid; 460 int cpuid;
469 461
470 CPUSET_DEL(cpus_expected, cpus_succeeded); 462 CPUSET_DEL(cpus_expected, cpus_succeeded);
471 if (!CPUSET_EMPTY(cpus_expected)) { 463 if (!CPUSET_EMPTY(cpus_expected)) {
472 printf("Failed to %s:", s); 464 printf("Failed to %s:", s);
473 do { 465 do {
474 cpuid = CPUSET_NEXT(cpus_expected); 466 cpuid = CPUSET_NEXT(cpus_expected);
475 CPUSET_DEL(cpus_expected, cpuid); 467 CPUSET_DEL(cpus_expected, cpuid);
476 printf(" cpu%d", cpuid); 468 printf(" cpu%d", cpuid);
477 } while(!CPUSET_EMPTY(cpus_expected)); 469 } while(!CPUSET_EMPTY(cpus_expected));
478 } 470 }
479 471
480 printf("\n"); 472 printf("\n");
481} 473}
482 474
483/* 475/*
484 * MD support for xcall(9) interface. 476 * MD support for xcall(9) interface.
485 */ 477 */
486 478
487void 479void
488sparc64_generic_xcall(struct cpu_info *target, ipi_c_call_func_t func, 480sparc64_generic_xcall(struct cpu_info *target, ipi_c_call_func_t func,
489 void *arg) 481 void *arg)
490{ 482{
491 /* if target == NULL broadcast to everything but curcpu */ 483 /* if target == NULL broadcast to everything but curcpu */
492 if (target) 484 if (target)
493 sparc64_send_ipi(target->ci_cpuid, sparc64_ipi_ccall, 485 sparc64_send_ipi(target->ci_cpuid, sparc64_ipi_ccall,
494 (uint64_t)(uintptr_t)func, (uint64_t)(uintptr_t)arg); 486 (uint64_t)(uintptr_t)func, (uint64_t)(uintptr_t)arg);
495 else { 487 else {
496  488
497 sparc64_multicast_ipi(cpus_active, sparc64_ipi_ccall, 489 sparc64_multicast_ipi(cpus_active, sparc64_ipi_ccall,
498 (uint64_t)(uintptr_t)func, (uint64_t)(uintptr_t)arg); 490 (uint64_t)(uintptr_t)func, (uint64_t)(uintptr_t)arg);
499 } 491 }
500} 492}
501 493
502void 494void
503xc_send_ipi(struct cpu_info *target) 495xc_send_ipi(struct cpu_info *target)
504{ 496{
505 497
506 sparc64_generic_xcall(target, (ipi_c_call_func_t)xc_ipi_handler, NULL); 498 sparc64_generic_xcall(target, (ipi_c_call_func_t)xc_ipi_handler, NULL);
507} 499}