Wed Jun 8 17:47:48 2011 UTC ()
Make GDIUM build again after matt-nb5-mips64 merge. untested as I don't have
this hardware, but I'll use this as a base for Lemote Fulong support.


(bouyer)
diff -r1.1 -r1.2 src/sys/arch/evbmips/conf/files.gdium
diff -r1.2 -r1.3 src/sys/arch/evbmips/gdium/gdium_dma.c
diff -r1.2 -r1.3 src/sys/arch/evbmips/gdium/gdium_intr.c
diff -r1.13 -r1.14 src/sys/arch/evbmips/gdium/machdep.c
diff -r1.46 -r1.47 src/sys/arch/mips/mips/cache.c
diff -r1.3 -r1.4 src/sys/arch/mips/mips/cache_ls2.c

cvs diff -r1.1 -r1.2 src/sys/arch/evbmips/conf/files.gdium (expand / switch to unified diff)

--- src/sys/arch/evbmips/conf/files.gdium 2009/08/06 00:50:25 1.1
+++ src/sys/arch/evbmips/conf/files.gdium 2011/06/08 17:47:48 1.2
@@ -1,31 +1,30 @@ @@ -1,31 +1,30 @@
1# $NetBSD: files.gdium,v 1.1 2009/08/06 00:50:25 matt Exp $ 1# $NetBSD: files.gdium,v 1.2 2011/06/08 17:47:48 bouyer Exp $
2 2
3file arch/evbmips/gdium/gdium_bus_io.c 3file arch/evbmips/gdium/gdium_bus_io.c
4file arch/evbmips/gdium/gdium_bus_mem.c 4file arch/evbmips/gdium/gdium_bus_mem.c
5file arch/evbmips/gdium/gdium_dma.c 5file arch/evbmips/gdium/gdium_dma.c
6file arch/evbmips/gdium/gdium_genfb.c wsdisplay 6file arch/evbmips/gdium/gdium_genfb.c wsdisplay
7file arch/evbmips/gdium/gdium_intr.c 7file arch/evbmips/gdium/gdium_intr.c
8 8
9file arch/evbmips/gdium/autoconf.c 9file arch/evbmips/gdium/autoconf.c
10file arch/evbmips/gdium/machdep.c 10file arch/evbmips/gdium/machdep.c
11 11
12file arch/mips/mips/bus_dma.c 12file arch/mips/mips/bus_dma.c
13file arch/evbmips/evbmips/disksubr.c 13file arch/evbmips/evbmips/disksubr.c
14file arch/evbmips/evbmips/interrupt.c 14file arch/evbmips/evbmips/interrupt.c
15 15
16file arch/mips/mips/mips3_clock.c 16file arch/mips/mips/mips3_clock.c
17file arch/mips/mips/mips3_clockintr.c 17file arch/mips/mips/mips3_clockintr.c
18file arch/mips/mips/softintr.c 
19 18
20# The autoconfiguration root. 19# The autoconfiguration root.
21device mainbus { [addr = -1] } 20device mainbus { [addr = -1] }
22attach mainbus at root 21attach mainbus at root
23file arch/evbmips/gdium/mainbus.c mainbus 22file arch/evbmips/gdium/mainbus.c mainbus
24 23
25device cpu 24device cpu
26attach cpu at mainbus 25attach cpu at mainbus
27file arch/evbmips/evbmips/cpu.c cpu 26file arch/evbmips/evbmips/cpu.c cpu
28 27
29# Machine-independent I2O drivers. 28# Machine-independent I2O drivers.
30include "dev/i2o/files.i2o" 29include "dev/i2o/files.i2o"
31 30

cvs diff -r1.2 -r1.3 src/sys/arch/evbmips/gdium/gdium_dma.c (expand / switch to unified diff)

--- src/sys/arch/evbmips/gdium/gdium_dma.c 2009/08/06 16:37:01 1.2
+++ src/sys/arch/evbmips/gdium/gdium_dma.c 2011/06/08 17:47:48 1.3
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: gdium_dma.c,v 1.2 2009/08/06 16:37:01 matt Exp $ */ 1/* $NetBSD: gdium_dma.c,v 1.3 2011/06/08 17:47:48 bouyer Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe. 8 * by Jason R. Thorpe.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -24,51 +24,40 @@ @@ -24,51 +24,40 @@
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Platform-specific DMA support for the Gdium Liberty 1000. 33 * Platform-specific DMA support for the Gdium Liberty 1000.
34 */ 34 */
35 35
36#include <sys/cdefs.h> 36#include <sys/cdefs.h>
37__KERNEL_RCSID(0, "$NetBSD: gdium_dma.c,v 1.2 2009/08/06 16:37:01 matt Exp $"); 37__KERNEL_RCSID(0, "$NetBSD: gdium_dma.c,v 1.3 2011/06/08 17:47:48 bouyer Exp $");
38 38
39#include <sys/param.h> 39#include <sys/param.h>
40 40
41#define _MIPS_BUS_DMA_PRIVATE 41#define _MIPS_BUS_DMA_PRIVATE
42#include <machine/bus.h> 42#include <machine/bus.h>
43 43
44#include <evbmips/gdium/gdiumreg.h> 44#include <evbmips/gdium/gdiumreg.h>
45#include <evbmips/gdium/gdiumvar.h> 45#include <evbmips/gdium/gdiumvar.h>
46 46
47void 47void
48gdium_dma_init(struct gdium_config *gc) 48gdium_dma_init(struct gdium_config *gc)
49{ 49{
50 bus_dma_tag_t t; 50 bus_dma_tag_t t;
51 51
52 /* 52 /*
53 * Initialize the DMA tag used for PCI DMA. 53 * Initialize the DMA tag used for PCI DMA.
54 */ 54 */
55 t = &gc->gc_pci_dmat; 55 t = &gc->gc_pci_dmat;
56 t->_cookie = gc; 56 t->_cookie = gc;
57 t->_wbase = GDIUM_DMA_PCI_PCIBASE; 57 t->_wbase = GDIUM_DMA_PCI_PCIBASE;
58 t->_physbase = GDIUM_DMA_PCI_PHYSBASE; 58 t->_bounce_alloc_lo = GDIUM_DMA_PCI_PHYSBASE;
59 t->_wsize = GDIUM_DMA_PCI_SIZE; 59 t->_bounce_alloc_hi = GDIUM_DMA_PCI_PHYSBASE + GDIUM_DMA_PCI_SIZE;
60 t->_dmamap_create = _bus_dmamap_create; 60 t->_dmamap_ops = mips_bus_dmamap_ops;
61 t->_dmamap_destroy = _bus_dmamap_destroy; 61 t->_dmamem_ops = mips_bus_dmamem_ops;
62 t->_dmamap_load = _bus_dmamap_load; 62 t->_dmatag_ops = mips_bus_dmatag_ops;
63 t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf; 
64 t->_dmamap_load_uio = _bus_dmamap_load_uio; 
65 t->_dmamap_load_raw = _bus_dmamap_load_raw; 
66 t->_dmamap_unload = _bus_dmamap_unload; 
67 t->_dmamap_sync = _bus_dmamap_sync; 
68 
69 t->_dmamem_alloc = _bus_dmamem_alloc; 
70 t->_dmamem_free = _bus_dmamem_free; 
71 t->_dmamem_map = _bus_dmamem_map; 
72 t->_dmamem_unmap = _bus_dmamem_unmap; 
73 t->_dmamem_mmap = _bus_dmamem_mmap; 
74} 63}

cvs diff -r1.2 -r1.3 src/sys/arch/evbmips/gdium/gdium_intr.c (expand / switch to unified diff)

--- src/sys/arch/evbmips/gdium/gdium_intr.c 2009/08/07 01:27:14 1.2
+++ src/sys/arch/evbmips/gdium/gdium_intr.c 2011/06/08 17:47:48 1.3
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: gdium_intr.c,v 1.2 2009/08/07 01:27:14 matt Exp $ */ 1/* $NetBSD: gdium_intr.c,v 1.3 2011/06/08 17:47:48 bouyer Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe. 8 * by Jason R. Thorpe.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -27,27 +27,30 @@ @@ -27,27 +27,30 @@
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Platform-specific interrupt support for the Algorithmics P-6032. 33 * Platform-specific interrupt support for the Algorithmics P-6032.
34 * 34 *
35 * The Algorithmics P-6032's interrupts are wired to GPIO pins 35 * The Algorithmics P-6032's interrupts are wired to GPIO pins
36 * on the BONITO system controller. 36 * on the BONITO system controller.
37 */ 37 */
38 38
39#include <sys/cdefs.h> 39#include <sys/cdefs.h>
40__KERNEL_RCSID(0, "$NetBSD: gdium_intr.c,v 1.2 2009/08/07 01:27:14 matt Exp $"); 40__KERNEL_RCSID(0, "$NetBSD: gdium_intr.c,v 1.3 2011/06/08 17:47:48 bouyer Exp $");
 41
 42#define __INTR_PRIVATE
 43
41 44
42#include "opt_ddb.h" 45#include "opt_ddb.h"
43 46
44#include <sys/param.h> 47#include <sys/param.h>
45#include <sys/queue.h> 48#include <sys/queue.h>
46#include <sys/malloc.h> 49#include <sys/malloc.h>
47#include <sys/systm.h> 50#include <sys/systm.h>
48#include <sys/device.h> 51#include <sys/device.h>
49#include <sys/kernel.h> 52#include <sys/kernel.h>
50#include <sys/cpu.h> 53#include <sys/cpu.h>
51 54
52#include <machine/bus.h> 55#include <machine/bus.h>
53#include <machine/intr.h> 56#include <machine/intr.h>
@@ -119,98 +122,78 @@ struct gdium_intrhead { @@ -119,98 +122,78 @@ struct gdium_intrhead {
119 int intr_refcnt; 122 int intr_refcnt;
120}; 123};
121struct gdium_intrhead gdium_intrtab[__arraycount(gdium_irqmap)]; 124struct gdium_intrhead gdium_intrtab[__arraycount(gdium_irqmap)];
122 125
123#define NINTRS 2 /* MIPS INT0 - INT1 */ 126#define NINTRS 2 /* MIPS INT0 - INT1 */
124 127
125struct gdium_cpuintr { 128struct gdium_cpuintr {
126 LIST_HEAD(, evbmips_intrhand) cintr_list; 129 LIST_HEAD(, evbmips_intrhand) cintr_list;
127 struct evcnt cintr_count; 130 struct evcnt cintr_count;
128 int cintr_refcnt; 131 int cintr_refcnt;
129}; 132};
130 133
131struct gdium_cpuintr gdium_cpuintrs[NINTRS]; 134struct gdium_cpuintr gdium_cpuintrs[NINTRS];
132const char *gdium_cpuintrnames[NINTRS] = { 135const char * const gdium_cpuintrnames[NINTRS] = {
133 "int 0 (pci)", 136 "int 0 (pci)",
134 "int 1 (errors)", 137 "int 1 (errors)",
135}; 138};
136 139
137/* 140/*
138 * This is a mask of bits to clear in the SR when we go to a 141 * This is a mask of bits to clear in the SR when we go to a
139 * given hardware interrupt priority level. 142 * given hardware interrupt priority level.
140 */ 143 */
141const uint32_t ipl_sr_bits[_IPL_N] = { 144static const struct ipl_sr_map gdium_ipl_sr_map = {
142 [IPL_NONE] = 0, 145 .sr_bits = {
143 [IPL_SOFTCLOCK] = 146 [IPL_NONE] = 0,
144 MIPS_SOFT_INT_MASK_0, 147 [IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_0,
145#if IPL_SOFTCLOCK != IPL_SOFTBIO 148 [IPL_SOFTNET] = MIPS_SOFT_INT_MASK_0 | MIPS_SOFT_INT_MASK_1,
146 [IPL_SOFTBIO] = 
147 MIPS_SOFT_INT_MASK_0, 
148#endif 
149 [IPL_SOFTNET] = 
150 MIPS_SOFT_INT_MASK_0 | MIPS_SOFT_INT_MASK_1, 
151#if IPL_SOFTNET != IPL_SOFTSERIAL 
152 [IPL_SOFTSERIAL] = 
153 MIPS_SOFT_INT_MASK_0 | MIPS_SOFT_INT_MASK_1, 
154#endif 
155 [IPL_VM] = 149 [IPL_VM] =
156 MIPS_SOFT_INT_MASK_0 | MIPS_SOFT_INT_MASK_1 | 150 MIPS_SOFT_INT_MASK_0 | MIPS_SOFT_INT_MASK_1 |
157 MIPS_INT_MASK_0 | 151 MIPS_INT_MASK_0 |
158 MIPS_INT_MASK_1 | 152 MIPS_INT_MASK_1 |
159 MIPS_INT_MASK_2 | 153 MIPS_INT_MASK_2 |
160 MIPS_INT_MASK_3 | 154 MIPS_INT_MASK_3 |
161 MIPS_INT_MASK_4, 155 MIPS_INT_MASK_4,
162 [IPL_SCHED] = 156 [IPL_SCHED] =
163 MIPS_SOFT_INT_MASK_0 | MIPS_SOFT_INT_MASK_1 | 157 MIPS_SOFT_INT_MASK_0 | MIPS_SOFT_INT_MASK_1 |
164 MIPS_INT_MASK_0 | 158 MIPS_INT_MASK_0 |
165 MIPS_INT_MASK_1 | 159 MIPS_INT_MASK_1 |
166 MIPS_INT_MASK_2 | 160 MIPS_INT_MASK_2 |
167 MIPS_INT_MASK_3 | 161 MIPS_INT_MASK_3 |
168 MIPS_INT_MASK_4 | 162 MIPS_INT_MASK_4 |
169 MIPS_INT_MASK_5, 163 MIPS_INT_MASK_5,
 164 [IPL_DDB] = MIPS_INT_MASK,
 165 [IPL_HIGH] = MIPS_INT_MASK,
 166 },
170}; 167};
171 168
172/* 169int gdium_pci_intr_map(const struct pci_attach_args *, pci_intr_handle_t *);
173 * This is a mask of bits to clear in the SR when we go to a 
174 * given software interrupt priority level. 
175 * Hardware ipls are port/board specific. 
176 */ 
177const uint32_t mips_ipl_si_to_sr[] = { 
178 [IPL_SOFTCLOCK-IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_0, 
179#if IPL_SOFTCLOCK != IPL_SOFTBIO 
180 [IPL_SOFTBIO-IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_0, 
181#endif 
182 [IPL_SOFTNET-IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_1, 
183#if IPL_SOFTNET != IPL_SOFTSERIAL 
184 [IPL_SOFTSERIAL-IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_1, 
185#endif 
186}; 
187 
188int gdium_pci_intr_map(struct pci_attach_args *, pci_intr_handle_t *); 
189const char *gdium_pci_intr_string(void *, pci_intr_handle_t); 170const char *gdium_pci_intr_string(void *, pci_intr_handle_t);
190const struct evcnt *gdium_pci_intr_evcnt(void *, pci_intr_handle_t); 171const struct evcnt *gdium_pci_intr_evcnt(void *, pci_intr_handle_t);
191void *gdium_pci_intr_establish(void *, pci_intr_handle_t, int, 172void *gdium_pci_intr_establish(void *, pci_intr_handle_t, int,
192 int (*)(void *), void *); 173 int (*)(void *), void *);
193void gdium_pci_intr_disestablish(void *, void *); 174void gdium_pci_intr_disestablish(void *, void *);
194void gdium_pci_conf_interrupt(void *, int, int, int, int, int *); 175void gdium_pci_conf_interrupt(void *, int, int, int, int, int *);
195 176
196void 177void
197evbmips_intr_init(void) 178evbmips_intr_init(void)
198{ 179{
199 struct gdium_config *gc = &gdium_configuration; 180 struct gdium_config * const gc = &gdium_configuration;
200 struct bonito_config *bc = &gc->gc_bonito; 181 struct bonito_config *bc = &gc->gc_bonito;
201 const struct gdium_irqmap *irqmap; 182 const struct gdium_irqmap *irqmap;
202 uint32_t intbit; 183 uint32_t intbit;
203 int i; 184 size_t i;
 185
 186 ipl_sr_map = gdium_ipl_sr_map;
204 187
205 for (i = 0; i < NINTRS; i++) { 188 for (i = 0; i < NINTRS; i++) {
206 LIST_INIT(&gdium_cpuintrs[i].cintr_list); 189 LIST_INIT(&gdium_cpuintrs[i].cintr_list);
207 evcnt_attach_dynamic(&gdium_cpuintrs[i].cintr_count, 190 evcnt_attach_dynamic(&gdium_cpuintrs[i].cintr_count,
208 EVCNT_TYPE_INTR, NULL, "mips", gdium_cpuintrnames[i]); 191 EVCNT_TYPE_INTR, NULL, "mips", gdium_cpuintrnames[i]);
209 } 192 }
210 //evcnt_attach_static(&mips_int5_evcnt); 193 //evcnt_attach_static(&mips_int5_evcnt);
211 194
212 for (i = 0; i < __arraycount(gdium_irqmap); i++) { 195 for (i = 0; i < __arraycount(gdium_irqmap); i++) {
213 irqmap = &gdium_irqmap[i]; 196 irqmap = &gdium_irqmap[i];
214 intbit = 1 << irqmap->irqidx; 197 intbit = 1 << irqmap->irqidx;
215 198
216 evcnt_attach_dynamic(&gdium_intrtab[i].intr_count, 199 evcnt_attach_dynamic(&gdium_intrtab[i].intr_count,
@@ -306,64 +289,59 @@ evbmips_intr_disestablish(void *cookie) @@ -306,64 +289,59 @@ evbmips_intr_disestablish(void *cookie)
306 /* 289 /*
307 * Now, disable it, if there is nothing remaining on the 290 * Now, disable it, if there is nothing remaining on the
308 * list. 291 * list.
309 */ 292 */
310 if (gdium_intrtab[ih->ih_irq].intr_refcnt-- == 1) 293 if (gdium_intrtab[ih->ih_irq].intr_refcnt-- == 1)
311 REGVAL(BONITO_INTENCLR) = (1 << ih->ih_irq); 294 REGVAL(BONITO_INTENCLR) = (1 << ih->ih_irq);
312 295
313 splx(s); 296 splx(s);
314 297
315 free(ih, M_DEVBUF); 298 free(ih, M_DEVBUF);
316} 299}
317 300
318void 301void
319evbmips_iointr(uint32_t status, uint32_t cause, uint32_t pc, 302evbmips_iointr(int ipl, vaddr_t pc, uint32_t ipending)
320 uint32_t ipending) 
321{ 303{
322 const struct gdium_irqmap *irqmap; 304 const struct gdium_irqmap *irqmap;
323 struct evbmips_intrhand *ih; 305 struct evbmips_intrhand *ih;
324 int level; 306 int level;
325 uint32_t isr; 307 uint32_t isr;
326 308
327 /* 309 /*
328 * Read the interrupt pending registers, mask them with the 310 * Read the interrupt pending registers, mask them with the
329 * ones we have enabled, and service them in order of decreasing 311 * ones we have enabled, and service them in order of decreasing
330 * priority. 312 * priority.
331 */ 313 */
332 isr = REGVAL(BONITO_INTISR) & REGVAL(BONITO_INTEN); 314 isr = REGVAL(BONITO_INTISR) & REGVAL(BONITO_INTEN);
333 for (level = 1; level >= 0; level--) { 315 for (level = 1; level >= 0; level--) {
334 if ((ipending & (MIPS_INT_MASK_4 << level)) == 0) 316 if ((ipending & (MIPS_INT_MASK_4 << level)) == 0)
335 continue; 317 continue;
336 gdium_cpuintrs[level].cintr_count.ev_count++; 318 gdium_cpuintrs[level].cintr_count.ev_count++;
337 LIST_FOREACH (ih, &gdium_cpuintrs[level].cintr_list, ih_q) { 319 LIST_FOREACH (ih, &gdium_cpuintrs[level].cintr_list, ih_q) {
338 irqmap = &gdium_irqmap[ih->ih_irq]; 320 irqmap = &gdium_irqmap[ih->ih_irq];
339 if (isr & (1 << ih->ih_irq)) { 321 if (isr & (1 << ih->ih_irq)) {
340 gdium_intrtab[ih->ih_irq].intr_count.ev_count++; 322 gdium_intrtab[ih->ih_irq].intr_count.ev_count++;
341 (*ih->ih_func)(ih->ih_arg); 323 (*ih->ih_func)(ih->ih_arg);
342 } 324 }
343 } 325 }
344 cause &= ~(MIPS_INT_MASK_0 << level); 
345 } 326 }
346 
347 /* Re-enable anything that we have processed. */ 
348 _splset(MIPS_SR_INT_IE | ((status & ~cause) & MIPS_HARD_INT_MASK)); 
349} 327}
350 328
351/***************************************************************************** 329/*****************************************************************************
352 * PCI interrupt support 330 * PCI interrupt support
353 *****************************************************************************/ 331 *****************************************************************************/
354 332
355int 333int
356gdium_pci_intr_map(struct pci_attach_args *pa, 334gdium_pci_intr_map(const struct pci_attach_args *pa,
357 pci_intr_handle_t *ihp) 335 pci_intr_handle_t *ihp)
358{ 336{
359 static const int8_t pciirqmap[5/*device*/] = { 337 static const int8_t pciirqmap[5/*device*/] = {
360 GDIUM_IRQ_PCI_INTC, /* 13: PCI 802.11 */ 338 GDIUM_IRQ_PCI_INTC, /* 13: PCI 802.11 */
361 GDIUM_IRQ_PCI_INTA, /* 14: SM501 */ 339 GDIUM_IRQ_PCI_INTA, /* 14: SM501 */
362 GDIUM_IRQ_PCI_INTB, /* 15: NEC USB (2 func) */ 340 GDIUM_IRQ_PCI_INTB, /* 15: NEC USB (2 func) */
363 GDIUM_IRQ_PCI_INTD, /* 16: Ethernet */ 341 GDIUM_IRQ_PCI_INTD, /* 16: Ethernet */
364 GDIUM_IRQ_PCI_INTC, /* 17: NEC USB (2 func) */ 342 GDIUM_IRQ_PCI_INTC, /* 17: NEC USB (2 func) */
365 }; 343 };
366 pcitag_t bustag = pa->pa_intrtag; 344 pcitag_t bustag = pa->pa_intrtag;
367 int buspin = pa->pa_intrpin; 345 int buspin = pa->pa_intrpin;
368 pci_chipset_tag_t pc = pa->pa_pc; 346 pci_chipset_tag_t pc = pa->pa_pc;
369 int device; 347 int device;

cvs diff -r1.13 -r1.14 src/sys/arch/evbmips/gdium/machdep.c (expand / switch to unified diff)

--- src/sys/arch/evbmips/gdium/machdep.c 2011/02/20 07:48:34 1.13
+++ src/sys/arch/evbmips/gdium/machdep.c 2011/06/08 17:47:48 1.14
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: machdep.c,v 1.13 2011/02/20 07:48:34 matt Exp $ */ 1/* $NetBSD: machdep.c,v 1.14 2011/06/08 17:47:48 bouyer Exp $ */
2 2
3/* 3/*
4 * Copyright 2001, 2002 Wasabi Systems, Inc. 4 * Copyright 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -64,27 +64,27 @@ @@ -64,27 +64,27 @@
64 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
65 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
66 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
67 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
68 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
69 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
70 * SUCH DAMAGE. 70 * SUCH DAMAGE.
71 * 71 *
72 * @(#)machdep.c 8.3 (Berkeley) 1/12/94 72 * @(#)machdep.c 8.3 (Berkeley) 1/12/94
73 * from: Utah Hdr: machdep.c 1.63 91/04/24 73 * from: Utah Hdr: machdep.c 1.63 91/04/24
74 */ 74 */
75 75
76#include <sys/cdefs.h> 76#include <sys/cdefs.h>
77__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.13 2011/02/20 07:48:34 matt Exp $"); 77__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.14 2011/06/08 17:47:48 bouyer Exp $");
78 78
79#include "opt_ddb.h" 79#include "opt_ddb.h"
80#include "opt_execfmt.h" 80#include "opt_execfmt.h"
81#include "opt_modular.h" 81#include "opt_modular.h"
82 82
83#include <sys/param.h> 83#include <sys/param.h>
84#include <sys/systm.h> 84#include <sys/systm.h>
85#include <sys/kernel.h> 85#include <sys/kernel.h>
86#include <sys/buf.h> 86#include <sys/buf.h>
87#include <sys/reboot.h> 87#include <sys/reboot.h>
88#include <sys/mount.h> 88#include <sys/mount.h>
89#include <sys/kcore.h> 89#include <sys/kcore.h>
90#include <sys/boot_flag.h> 90#include <sys/boot_flag.h>
@@ -96,49 +96,48 @@ __KERNEL_RCSID(0, "$NetBSD: machdep.c,v  @@ -96,49 +96,48 @@ __KERNEL_RCSID(0, "$NetBSD: machdep.c,v
96 96
97#include <dev/cons.h> 97#include <dev/cons.h>
98 98
99#include "ksyms.h" 99#include "ksyms.h"
100 100
101#if NKSYMS || defined(DDB) || defined(MODULAR) 101#if NKSYMS || defined(DDB) || defined(MODULAR)
102#include <machine/db_machdep.h> 102#include <machine/db_machdep.h>
103#include <ddb/db_extern.h> 103#include <ddb/db_extern.h>
104#endif 104#endif
105 105
106#include <machine/cpu.h> 106#include <machine/cpu.h>
107#include <machine/psl.h> 107#include <machine/psl.h>
108 108
 109#include <mips/locore.h>
 110
109#include <mips/bonito/bonitoreg.h> 111#include <mips/bonito/bonitoreg.h>
110#include <evbmips/gdium/gdiumvar.h> 112#include <evbmips/gdium/gdiumvar.h>
111 113
112#include "com.h" 114#include "com.h"
113#if NCOM > 0 115#if NCOM > 0
114#include <dev/ic/comreg.h> 116#include <dev/ic/comreg.h>
115#include <dev/ic/comvar.h> 117#include <dev/ic/comvar.h>
116 118
117int comcnrate = 38400; /* XXX should be config option */ 119int comcnrate = 38400; /* XXX should be config option */
118#endif /* NCOM > 0 */ 120#endif /* NCOM > 0 */
119 121
120struct gdium_config gdium_configuration = { 122struct gdium_config gdium_configuration = {
121 .gc_bonito = { 123 .gc_bonito = {
122 .bc_adbase = 11, /* magic */ 124 .bc_adbase = 11, /* magic */
123 }, 125 },
124}; 126};
125 127
126/* For sysctl_hw. */ 128/* For sysctl_hw. */
127extern char cpu_model[]; 129extern char cpu_model[];
128 130
129/* Our exported CPU info; we can have only one. */  
130struct cpu_info cpu_info_store; 
131 
132/* Maps for VM objects. */ 131/* Maps for VM objects. */
133struct vm_map *phys_map = NULL; 132struct vm_map *phys_map = NULL;
134 133
135int netboot; /* Are we netbooting? */ 134int netboot; /* Are we netbooting? */
136 135
137phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX]; 136phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX];
138int mem_cluster_cnt; 137int mem_cluster_cnt;
139 138
140void configure(void); 139void configure(void);
141void mach_init(int, char **, char **, void *); 140void mach_init(int, char **, char **, void *);
142 141
143/* 142/*
144 * For some reason, PMON doesn't assign a real address to the Ralink's BAR. 143 * For some reason, PMON doesn't assign a real address to the Ralink's BAR.
@@ -190,52 +189,49 @@ gdium_pci_attach_hook(device_t parent, d @@ -190,52 +189,49 @@ gdium_pci_attach_hook(device_t parent, d
190 } 189 }
191 } 190 }
192#endif 191#endif
193} 192}
194 193
195/* 194/*
196 * Do all the stuff that locore normally does before calling main(). 195 * Do all the stuff that locore normally does before calling main().
197 */ 196 */
198void 197void
199mach_init(int argc, char **argv, char **envp, void *callvec) 198mach_init(int argc, char **argv, char **envp, void *callvec)
200{ 199{
201 struct gdium_config *gc = &gdium_configuration; 200 struct gdium_config *gc = &gdium_configuration;
202 void *kernend; 201 void *kernend;
203 u_long first, last; 
204 struct pcb *pcb0; 
205 vaddr_t v; 
206#ifdef NOTYET 202#ifdef NOTYET
207 char *cp; 203 char *cp;
208 int howto; 204 int howto;
209#endif 205#endif
210 int i; 206 int i;
211 psize_t memsize; 207 psize_t memsize;
212 208
213 extern char edata[], end[]; 209 extern char edata[], end[];
214 210
215 /* 211 /*
216 * Clear the BSS segment. 212 * Clear the BSS segment.
217 */ 213 */
218 kernend = (void *)mips_round_page(end); 214 kernend = (void *)mips_round_page(end);
219 memset(edata, 0, (char *)kernend - edata); 215 memset(edata, 0, (char *)kernend - edata);
220 216
221 /* 217 /*
222 * Set up the exception vectors and CPU-specific function 218 * Set up the exception vectors and CPU-specific function
223 * vectors early on. We need the wbflush() vector set up 219 * vectors early on. We need the wbflush() vector set up
224 * before comcnattach() is called (or at least before the 220 * before comcnattach() is called (or at least before the
225 * first printf() after that is called). 221 * first printf() after that is called).
226 * Also clears the I+D caches. 222 * Also clears the I+D caches.
227 */ 223 */
228 mips_vector_init(NULL, bool); 224 mips_vector_init(NULL, false);
229 225
230 /* set the VM page size */ 226 /* set the VM page size */
231 uvm_setpagesize(); 227 uvm_setpagesize();
232 228
233 memsize = 256*1024*1024; 229 memsize = 256*1024*1024;
234 physmem = btoc(memsize); 230 physmem = btoc(memsize);
235 231
236 bonito_pci_init(&gc->gc_pc, &gc->gc_bonito); 232 bonito_pci_init(&gc->gc_pc, &gc->gc_bonito);
237 /* 233 /*
238 * Override the null bonito_pci_attach_hook with our own to we can 234 * Override the null bonito_pci_attach_hook with our own to we can
239 * fix the ralink (device 13). 235 * fix the ralink (device 13).
240 */ 236 */
241 gc->gc_pc.pc_attach_hook = gdium_pci_attach_hook; 237 gc->gc_pc.pc_attach_hook = gdium_pci_attach_hook;
@@ -251,41 +247,41 @@ mach_init(int argc, char **argv, char ** @@ -251,41 +247,41 @@ mach_init(int argc, char **argv, char **
251 pci_conf_write(&gc->gc_pc, pci_make_tag(&gc->gc_pc, 0, 0, 0), 18, 0); 247 pci_conf_write(&gc->gc_pc, pci_make_tag(&gc->gc_pc, 0, 0, 0), 18, 0);
252 248
253 /* 249 /*
254 * Get the timer from PMON. 250 * Get the timer from PMON.
255 */ 251 */
256 for (i = 0; envp[i] != NULL; i++) { 252 for (i = 0; envp[i] != NULL; i++) {
257 if (!strncmp(envp[i], "cpuclock=", 9)) { 253 if (!strncmp(envp[i], "cpuclock=", 9)) {
258 curcpu()->ci_cpu_freq = 254 curcpu()->ci_cpu_freq =
259 strtoul(&envp[i][9], NULL, 10); 255 strtoul(&envp[i][9], NULL, 10);
260 break; 256 break;
261 } 257 }
262 } 258 }
263  259
264 if (mips_cpu_flags & CPU_MIPS_DOUBLE_COUNT) 260 if (mips_options.mips_cpu_flags & CPU_MIPS_DOUBLE_COUNT)
265 curcpu()->ci_cpu_freq /= 2; 261 curcpu()->ci_cpu_freq /= 2;
266 262
267 /* Compute the number of ticks for hz. */ 263 /* Compute the number of ticks for hz. */
268 curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz; 264 curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz;
269 265
270 /* Compute the delay divisor. */ 266 /* Compute the delay divisor. */
271 curcpu()->ci_divisor_delay = 267 curcpu()->ci_divisor_delay =
272 ((curcpu()->ci_cpu_freq + 500000) / 1000000); 268 ((curcpu()->ci_cpu_freq + 500000) / 1000000);
273 269
274 /* 270 /*
275 * Get correct cpu frequency if the CPU runs at twice the 271 * Get correct cpu frequency if the CPU runs at twice the
276 * external/cp0-count frequency. 272 * external/cp0-count frequency.
277 */ 273 */
278 if (mips_cpu_flags & CPU_MIPS_DOUBLE_COUNT) 274 if (mips_options.mips_cpu_flags & CPU_MIPS_DOUBLE_COUNT)
279 curcpu()->ci_cpu_freq *= 2; 275 curcpu()->ci_cpu_freq *= 2;
280 276
281#ifdef DEBUG 277#ifdef DEBUG
282 printf("Timer calibration: %lu cycles/sec\n", 278 printf("Timer calibration: %lu cycles/sec\n",
283 curcpu()->ci_cpu_freq); 279 curcpu()->ci_cpu_freq);
284#endif 280#endif
285 281
286#if NCOM > 0 282#if NCOM > 0
287 /* 283 /*
288 * Delay to allow firmware putchars to complete. 284 * Delay to allow firmware putchars to complete.
289 * FIFO depth * character time. 285 * FIFO depth * character time.
290 * character time = (1000000 / (defaultrate / 10)) 286 * character time = (1000000 / (defaultrate / 10))
291 */ 287 */
@@ -320,116 +316,87 @@ mach_init(int argc, char **argv, char ** @@ -320,116 +316,87 @@ mach_init(int argc, char **argv, char **
320 howto = 0; 316 howto = 0;
321 BOOT_FLAG(*cp, howto); 317 BOOT_FLAG(*cp, howto);
322 if (! howto) 318 if (! howto)
323 printf("bootflag '%c' not recognised\n", *cp); 319 printf("bootflag '%c' not recognised\n", *cp);
324 else 320 else
325 boothowto |= howto; 321 boothowto |= howto;
326 } 322 }
327 } 323 }
328#endif 324#endif
329 325
330 /* 326 /*
331 * Load the rest of the available pages into the VM system. 327 * Load the rest of the available pages into the VM system.
332 */ 328 */
333 first = round_page(MIPS_KSEG0_TO_PHYS(kernend)); 329 mips_page_physload(MIPS_KSEG0_START, (vaddr_t)kernend,
334 last = mem_clusters[0].start + mem_clusters[0].size; 330 mem_clusters, mem_cluster_cnt, NULL, 0);
335 uvm_page_physload(atop(first), atop(last), atop(first), atop(last), 
336 VM_FREELIST_DEFAULT); 
337 331
338 /* 332 /*
339 * Initialize error message buffer (at end of core). 333 * Initialize error message buffer (at end of core).
340 */ 334 */
341 mips_init_msgbuf(); 335 mips_init_msgbuf();
342 336
343 pmap_bootstrap(); 337 pmap_bootstrap();
344 338
345 /* 339 /*
346 * Allocate uarea page for lwp0 and set it. 340 * Allocate uarea page for lwp0 and set it.
347 */ 341 */
348 v = uvm_pageboot_alloc(USPACE);  342 mips_init_lwp0_uarea();
349 uvm_lwp_setuarea(&lwp0, v); 
350 
351 pcb0 = lwp_getpcb(&lwp0); 
352 pcb0->pcb_context[11] = MIPS_INT_MASK | MIPS_SR_INT_IE; /* SR */ 
353 
354 lwp0.l_md.md_regs = (struct frame *)(v + USPACE) - 1; 
355 343
356 /* 344 /*
357 * Initialize debuggers, and break into them, if appropriate. 345 * Initialize debuggers, and break into them, if appropriate.
358 */ 346 */
359#if defined(DDB) 347#if defined(DDB)
360 if (boothowto & RB_KDB) 348 if (boothowto & RB_KDB)
361 Debugger(); 349 Debugger();
362#endif 350#endif
363} 351}
364 352
365void 353void
366consinit(void) 354consinit(void)
367{ 355{
368 356
369 /* 357 /*
370 * Everything related to console initialization is done 358 * Everything related to console initialization is done
371 * in mach_init(). 359 * in mach_init().
372 */ 360 */
373} 361}
374 362
375/* 363/*
376 * Allocate memory for variable-sized tables, 364 * Allocate memory for variable-sized tables,
377 */ 365 */
378void 366void
379cpu_startup(void) 367cpu_startup(void)
380{ 368{
381 vaddr_t minaddr, maxaddr; 
382 char pbuf[9]; 
383 
384 /* 369 /*
385 * Good {morning,afternoon,evening,night}. 370 * Do the common startup items.
386 */ 371 */
387 printf("%s%s", copyright, version); 372 cpu_startup_common();
388 format_bytes(pbuf, sizeof(pbuf), ctob(physmem)); 
389 printf("total memory = %s\n", pbuf); 
390 373
391 /* 374 /*
392 * Virtual memory is bootstrapped -- notify the bus spaces 375 * Virtual memory is bootstrapped -- notify the bus spaces
393 * that memory allocation is now safe. 376 * that memory allocation is now safe.
394 */ 377 */
395 gdium_configuration.gc_mallocsafe = 1; 378 gdium_configuration.gc_mallocsafe = 1;
396 379
397 minaddr = 0; 
398 /* 
399 * Allocate a submap for physio. 
400 */ 
401 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 
402 VM_PHYS_SIZE, 0, FALSE, NULL); 
403 
404 /* 
405 * (No need to allocate an mbuf cluster submap. Mbuf clusters 
406 * are allocated via the pool allocator, and we use KSEG to 
407 * map those pages.) 
408 */ 
409 
410 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); 
411 printf("avail memory = %s\n", pbuf); 
412} 380}
413 381
414int waittime = -1; 382int waittime = -1;
415 383
416void 384void
417cpu_reboot(int howto, char *bootstr) 385cpu_reboot(int howto, char *bootstr)
418{ 386{
419 387
420 /* Take a snapshot before clobbering any registers. */ 388 /* Take a snapshot before clobbering any registers. */
421 if (curproc) 389 savectx(curpcb);
422 savectx(curpcb); 
423 390
424 if (cold) { 391 if (cold) {
425 howto |= RB_HALT; 392 howto |= RB_HALT;
426 goto haltsys; 393 goto haltsys;
427 } 394 }
428 395
429 /* If "always halt" was specified as a boot flag, obey. */ 396 /* If "always halt" was specified as a boot flag, obey. */
430 if (boothowto & RB_HALT) 397 if (boothowto & RB_HALT)
431 howto |= RB_HALT; 398 howto |= RB_HALT;
432 399
433 boothowto = howto; 400 boothowto = howto;
434 if ((howto & RB_NOSYNC) == 0 && (waittime < 0)) { 401 if ((howto & RB_NOSYNC) == 0 && (waittime < 0)) {
435 waittime = 0; 402 waittime = 0;

cvs diff -r1.46 -r1.47 src/sys/arch/mips/mips/cache.c (expand / switch to unified diff)

--- src/sys/arch/mips/mips/cache.c 2011/03/15 07:39:22 1.46
+++ src/sys/arch/mips/mips/cache.c 2011/06/08 17:47:48 1.47
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cache.c,v 1.46 2011/03/15 07:39:22 matt Exp $ */ 1/* $NetBSD: cache.c,v 1.47 2011/06/08 17:47:48 bouyer Exp $ */
2 2
3/* 3/*
4 * Copyright 2001, 2002 Wasabi Systems, Inc. 4 * Copyright 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
@@ -58,27 +58,27 @@ @@ -58,27 +58,27 @@
58 * WARRANTIES, INCLUDING BUT NOT LIMITED TO, ANY IMPLIED WARRANTIES OF 58 * WARRANTIES, INCLUDING BUT NOT LIMITED TO, ANY IMPLIED WARRANTIES OF
59 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR 59 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
60 * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM BE LIABLE 60 * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM BE LIABLE
61 * FOR ANY DAMAGES WHATSOEVER, AND IN PARTICULAR, BROADCOM SHALL NOT BE 61 * FOR ANY DAMAGES WHATSOEVER, AND IN PARTICULAR, BROADCOM SHALL NOT BE
62 * LIABLE FOR DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 62 * LIABLE FOR DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
63 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 63 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
64 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 64 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
65 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 65 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
66 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 66 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
67 * OR OTHERWISE), EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 67 * OR OTHERWISE), EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
68 */ 68 */
69 69
70#include <sys/cdefs.h> 70#include <sys/cdefs.h>
71__KERNEL_RCSID(0, "$NetBSD: cache.c,v 1.46 2011/03/15 07:39:22 matt Exp $"); 71__KERNEL_RCSID(0, "$NetBSD: cache.c,v 1.47 2011/06/08 17:47:48 bouyer Exp $");
72 72
73#include "opt_cputype.h" 73#include "opt_cputype.h"
74#include "opt_mips_cache.h" 74#include "opt_mips_cache.h"
75 75
76#include <sys/param.h> 76#include <sys/param.h>
77 77
78#include <uvm/uvm_extern.h> 78#include <uvm/uvm_extern.h>
79 79
80#include <mips/cache.h> 80#include <mips/cache.h>
81#include <mips/locore.h> 81#include <mips/locore.h>
82 82
83#ifdef MIPS1 83#ifdef MIPS1
84#include <mips/cache_r3k.h> 84#include <mips/cache_r3k.h>
@@ -576,60 +576,60 @@ primary_cache_is_2way: @@ -576,60 +576,60 @@ primary_cache_is_2way:
576 r10k_pdcache_wbinv_all; 576 r10k_pdcache_wbinv_all;
577 mco->mco_pdcache_wbinv_range = 577 mco->mco_pdcache_wbinv_range =
578 r10k_pdcache_wbinv_range; 578 r10k_pdcache_wbinv_range;
579 mco->mco_pdcache_wbinv_range_index = 579 mco->mco_pdcache_wbinv_range_index =
580 r10k_pdcache_wbinv_range_index; 580 r10k_pdcache_wbinv_range_index;
581 mco->mco_pdcache_inv_range = 581 mco->mco_pdcache_inv_range =
582 r10k_pdcache_inv_range; 582 r10k_pdcache_inv_range;
583 mco->mco_pdcache_wb_range = 583 mco->mco_pdcache_wb_range =
584 r10k_pdcache_wb_range; 584 r10k_pdcache_wb_range;
585 break; 585 break;
586#endif /* ENABLE_MIPS4_CACHE_R10K */ 586#endif /* ENABLE_MIPS4_CACHE_R10K */
587#ifdef MIPS3_LOONGSON2 587#ifdef MIPS3_LOONGSON2
588 case MIPS_LOONGSON2: 588 case MIPS_LOONGSON2:
589 mips_picache_ways = 4; 589 mci->mci_picache_ways = 4;
590 mips_pdcache_ways = 4; 590 mci->mci_pdcache_ways = 4;
591 591
592 mips3_get_cache_config(csizebase); 592 mips3_get_cache_config(csizebase);
593 593
594 mips_sdcache_line_size = 32; /* don't trust config reg */ 594 mci->mci_sdcache_line_size = 32; /* don't trust config reg */
595 595
596 if (mips_picache_size / mips_picache_ways > PAGE_SIZE || 596 if (mci->mci_picache_size / mci->mci_picache_ways > PAGE_SIZE ||
597 mips_pdcache_size / mips_pdcache_ways > PAGE_SIZE) 597 mci->mci_pdcache_size / mci->mci_pdcache_ways > PAGE_SIZE)
598 mips_cache_virtual_alias = 1; 598 mci->mci_cache_virtual_alias = 1;
599 599
600 mips_cache_ops.mco_icache_sync_all = 600 mco->mco_icache_sync_all =
601 ls2_icache_sync_all; 601 ls2_icache_sync_all;
602 mips_cache_ops.mco_icache_sync_range = 602 mco->mco_icache_sync_range =
603 ls2_icache_sync_range; 603 ls2_icache_sync_range;
604 mips_cache_ops.mco_icache_sync_range_index = 604 mco->mco_icache_sync_range_index =
605 ls2_icache_sync_range_index; 605 ls2_icache_sync_range_index;
606 606
607 mips_cache_ops.mco_pdcache_wbinv_all = 607 mco->mco_pdcache_wbinv_all =
608 ls2_pdcache_wbinv_all; 608 ls2_pdcache_wbinv_all;
609 mips_cache_ops.mco_pdcache_wbinv_range = 609 mco->mco_pdcache_wbinv_range =
610 ls2_pdcache_wbinv_range; 610 ls2_pdcache_wbinv_range;
611 mips_cache_ops.mco_pdcache_wbinv_range_index = 611 mco->mco_pdcache_wbinv_range_index =
612 ls2_pdcache_wbinv_range_index; 612 ls2_pdcache_wbinv_range_index;
613 mips_cache_ops.mco_pdcache_inv_range = 613 mco->mco_pdcache_inv_range =
614 ls2_pdcache_inv_range; 614 ls2_pdcache_inv_range;
615 mips_cache_ops.mco_pdcache_wb_range = 615 mco->mco_pdcache_wb_range =
616 ls2_pdcache_wb_range; 616 ls2_pdcache_wb_range;
617 617
618 /* 618 /*
619 * For current version chips, [the] operating system is 619 * For current version chips, [the] operating system is
620 * obliged to eliminate the potential for virtual aliasing. 620 * obliged to eliminate the potential for virtual aliasing.
621 */ 621 */
622 uvmexp.ncolors = mips_pdcache_ways; 622 uvmexp.ncolors = mci->mci_pdcache_ways;
623 break; 623 break;
624#endif 624#endif
625#endif /* MIPS3 || MIPS4 */ 625#endif /* MIPS3 || MIPS4 */
626 default: 626 default:
627 panic("can't handle primary cache on impl 0x%x", 627 panic("can't handle primary cache on impl 0x%x",
628 MIPS_PRID_IMPL(cpu_id)); 628 MIPS_PRID_IMPL(cpu_id));
629 } 629 }
630 630
631 /* 631 /*
632 * Compute the "way mask" for each cache. 632 * Compute the "way mask" for each cache.
633 */ 633 */
634 if (mci->mci_picache_size) { 634 if (mci->mci_picache_size) {
635 KASSERT(mci->mci_picache_ways != 0); 635 KASSERT(mci->mci_picache_ways != 0);
@@ -758,39 +758,39 @@ primary_cache_is_2way: @@ -758,39 +758,39 @@ primary_cache_is_2way:
758 r10k_sdcache_wbinv_all; 758 r10k_sdcache_wbinv_all;
759 mco->mco_sdcache_wbinv_range = 759 mco->mco_sdcache_wbinv_range =
760 r10k_sdcache_wbinv_range; 760 r10k_sdcache_wbinv_range;
761 mco->mco_sdcache_wbinv_range_index = 761 mco->mco_sdcache_wbinv_range_index =
762 r10k_sdcache_wbinv_range_index; 762 r10k_sdcache_wbinv_range_index;
763 mco->mco_sdcache_inv_range = 763 mco->mco_sdcache_inv_range =
764 r10k_sdcache_inv_range; 764 r10k_sdcache_inv_range;
765 mco->mco_sdcache_wb_range = 765 mco->mco_sdcache_wb_range =
766 r10k_sdcache_wb_range; 766 r10k_sdcache_wb_range;
767 break; 767 break;
768#endif /* ENABLE_MIPS4_CACHE_R10K */ 768#endif /* ENABLE_MIPS4_CACHE_R10K */
769#ifdef MIPS3_LOONGSON2 769#ifdef MIPS3_LOONGSON2
770 case MIPS_LOONGSON2: 770 case MIPS_LOONGSON2:
771 mips_sdcache_ways = 4; 771 mci->mci_sdcache_ways = 4;
772 mips_sdcache_size = 512*1024; 772 mci->mci_sdcache_size = 512*1024;
773 mips_scache_unified = 1; 773 mci->mci_scache_unified = 1;
774 774
775 mips_cache_ops.mco_sdcache_wbinv_all = 775 mco->mco_sdcache_wbinv_all =
776 ls2_sdcache_wbinv_all; 776 ls2_sdcache_wbinv_all;
777 mips_cache_ops.mco_sdcache_wbinv_range = 777 mco->mco_sdcache_wbinv_range =
778 ls2_sdcache_wbinv_range; 778 ls2_sdcache_wbinv_range;
779 mips_cache_ops.mco_sdcache_wbinv_range_index = 779 mco->mco_sdcache_wbinv_range_index =
780 ls2_sdcache_wbinv_range_index; 780 ls2_sdcache_wbinv_range_index;
781 mips_cache_ops.mco_sdcache_inv_range = 781 mco->mco_sdcache_inv_range =
782 ls2_sdcache_inv_range; 782 ls2_sdcache_inv_range;
783 mips_cache_ops.mco_sdcache_wb_range = 783 mco->mco_sdcache_wb_range =
784 ls2_sdcache_wb_range; 784 ls2_sdcache_wb_range;
785 785
786 /* 786 /*
787 * The secondary cache is physically indexed and tagged 787 * The secondary cache is physically indexed and tagged
788 */ 788 */
789 break; 789 break;
790#endif 790#endif
791#endif /* MIPS3 || MIPS4 */ 791#endif /* MIPS3 || MIPS4 */
792 792
793 default: 793 default:
794 panic("can't handle secondary cache on impl 0x%x", 794 panic("can't handle secondary cache on impl 0x%x",
795 MIPS_PRID_IMPL(cpu_id)); 795 MIPS_PRID_IMPL(cpu_id));
796 } 796 }

cvs diff -r1.3 -r1.4 src/sys/arch/mips/mips/cache_ls2.c (expand / switch to unified diff)

--- src/sys/arch/mips/mips/cache_ls2.c 2009/08/11 00:34:29 1.3
+++ src/sys/arch/mips/mips/cache_ls2.c 2011/06/08 17:47:48 1.4
@@ -1,14 +1,14 @@ @@ -1,14 +1,14 @@
1/* $NetBSD: cache_ls2.c,v 1.3 2009/08/11 00:34:29 matt Exp $ */ 1/* $NetBSD: cache_ls2.c,v 1.4 2011/06/08 17:47:48 bouyer Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2009 The NetBSD Foundation, Inc. 4 * Copyright (c) 2009 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas <matt@3am-software.com>. 8 * by Matt Thomas <matt@3am-software.com>.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
@@ -20,111 +20,114 @@ @@ -20,111 +20,114 @@
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: cache_ls2.c,v 1.3 2009/08/11 00:34:29 matt Exp $"); 33__KERNEL_RCSID(0, "$NetBSD: cache_ls2.c,v 1.4 2011/06/08 17:47:48 bouyer Exp $");
34 34
35#include <sys/param.h> 35#include <sys/param.h>
36 36
37#include <mips/cache.h> 37#include <mips/cache.h>
38#include <mips/cache_ls2.h> 38#include <mips/cache_ls2.h>
39#include <mips/locore.h> 39#include <mips/locore.h>
40 40
41/* 41/*
42 * Cache operations for Loongson2-style caches: 42 * Cache operations for Loongson2-style caches:
43 * 43 *
44 * - 4-way set-associative 32b/l 44 * - 4-way set-associative 32b/l
45 * - Write-back 45 * - Write-back
46 * - Primary is virtually indexed, physically tagged 46 * - Primary is virtually indexed, physically tagged
47 * - Seconadry is physically indexed, physically tagged 47 * - Seconadry is physically indexed, physically tagged
48 */ 48 */
49 49
50#define round_line(x) (((x) + 31) & ~31) 50#define round_line(x) (((x) + 31) & ~31)
51#define trunc_line(x) ((x) & ~31) 51#define trunc_line(x) ((x) & ~31)
52 52
53__asm(".set mips3"); 53__asm(".set mips3");
54 54
55void 55void
56ls2_icache_sync_range(vaddr_t va, vsize_t size) 56ls2_icache_sync_range(vaddr_t va, vsize_t size)
57{ 57{
 58 struct mips_cache_info * const mci = &mips_cache_info;
58 const vaddr_t eva = round_line(va + size); 59 const vaddr_t eva = round_line(va + size);
59 60
60 va = trunc_line(va); 61 va = trunc_line(va);
61 62
62 if (va + mips_picache_size <= eva) { 63 if (va + mci->mci_picache_size <= eva) {
63 ls2_icache_sync_all(); 64 ls2_icache_sync_all();
64 return; 65 return;
65 } 66 }
66 67
67 for (; va + 8 * 32 <= eva; va += 8 * 32) { 68 for (; va + 8 * 32 <= eva; va += 8 * 32) {
68 cache_op_ls2_8line(va, CACHEOP_LS2_D_HIT_WB_INV); 69 cache_op_ls2_8line(va, CACHEOP_LS2_D_HIT_WB_INV);
69 cache_op_ls2_8line(va, CACHEOP_LS2_I_INDEX_INV); 70 cache_op_ls2_8line(va, CACHEOP_LS2_I_INDEX_INV);
70 } 71 }
71 72
72 for (; va < eva; va += 32) { 73 for (; va < eva; va += 32) {
73 cache_op_ls2_line(va, CACHEOP_LS2_D_HIT_WB_INV); 74 cache_op_ls2_line(va, CACHEOP_LS2_D_HIT_WB_INV);
74 cache_op_ls2_line(va, CACHEOP_LS2_I_INDEX_INV); 75 cache_op_ls2_line(va, CACHEOP_LS2_I_INDEX_INV);
75 } 76 }
76 77
77 __asm volatile("sync"); 78 __asm volatile("sync");
78} 79}
79 80
80void 81void
81ls2_icache_sync_range_index(vaddr_t va, vsize_t size) 82ls2_icache_sync_range_index(vaddr_t va, vsize_t size)
82{ 83{
83 vaddr_t eva; 84 vaddr_t eva;
 85 struct mips_cache_info * const mci = &mips_cache_info;
84 86
85 /* 87 /*
86 * Since we're doing Index ops, we expect to not be able 88 * Since we're doing Index ops, we expect to not be able
87 * to access the address we've been given. So, get the 89 * to access the address we've been given. So, get the
88 * bits that determine the cache index, and make a KSEG0 90 * bits that determine the cache index, and make a KSEG0
89 * address out of them. 91 * address out of them.
90 */ 92 */
91 93
92 va = MIPS_PHYS_TO_KSEG0(va & mips_picache_way_mask); 94 va = MIPS_PHYS_TO_KSEG0(va & mci->mci_picache_way_mask);
93 eva = round_line(va + size); 95 eva = round_line(va + size);
94 va = trunc_line(va); 96 va = trunc_line(va);
95 97
96 if (va + mips_picache_way_size < eva) { 98 if (va + mci->mci_picache_way_size < eva) {
97 va = MIPS_PHYS_TO_KSEG0(0); 99 va = MIPS_PHYS_TO_KSEG0(0);
98 eva = mips_picache_way_size; 100 eva = mci->mci_picache_way_size;
99 } 101 }
100 102
101 for (; va + 8 * 32 <= eva; va += 8 * 32) { 103 for (; va + 8 * 32 <= eva; va += 8 * 32) {
102 cache_op_ls2_8line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV); 104 cache_op_ls2_8line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
103 cache_op_ls2_8line(va, CACHEOP_LS2_I_INDEX_INV); 105 cache_op_ls2_8line(va, CACHEOP_LS2_I_INDEX_INV);
104 } 106 }
105 107
106 for (; va < eva; va += 32) { 108 for (; va < eva; va += 32) {
107 cache_op_ls2_line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV); 109 cache_op_ls2_line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
108 cache_op_ls2_line(va, CACHEOP_LS2_I_INDEX_INV); 110 cache_op_ls2_line(va, CACHEOP_LS2_I_INDEX_INV);
109 } 111 }
110 112
111 __asm volatile("sync"); 113 __asm volatile("sync");
112} 114}
113 115
114void 116void
115ls2_icache_sync_all(void) 117ls2_icache_sync_all(void)
116{ 118{
117 ls2_icache_sync_range_index(0, mips_picache_way_size); 119 struct mips_cache_info * const mci = &mips_cache_info;
 120 ls2_icache_sync_range_index(0, mci->mci_picache_way_size);
118} 121}
119 122
120void 123void
121ls2_pdcache_inv_range(vaddr_t va, vsize_t size) 124ls2_pdcache_inv_range(vaddr_t va, vsize_t size)
122{ 125{
123 const vaddr_t eva = round_line(va + size); 126 const vaddr_t eva = round_line(va + size);
124 127
125 va = trunc_line(va); 128 va = trunc_line(va);
126 129
127 for (; va + 8 * 32 <= eva; va += 8 * 32) { 130 for (; va + 8 * 32 <= eva; va += 8 * 32) {
128 cache_op_ls2_8line(va, CACHEOP_LS2_D_HIT_INV); 131 cache_op_ls2_8line(va, CACHEOP_LS2_D_HIT_INV);
129 } 132 }
130 133
@@ -156,58 +159,60 @@ ls2_pdcache_wbinv_range(vaddr_t va, vsiz @@ -156,58 +159,60 @@ ls2_pdcache_wbinv_range(vaddr_t va, vsiz
156void 159void
157ls2_pdcache_wb_range(vaddr_t va, vsize_t size) 160ls2_pdcache_wb_range(vaddr_t va, vsize_t size)
158{ 161{
159 /* 162 /*
160 * Alas, can't writeback without invalidating... 163 * Alas, can't writeback without invalidating...
161 */ 164 */
162 ls2_pdcache_wbinv_range(va, size); 165 ls2_pdcache_wbinv_range(va, size);
163} 166}
164 167
165void 168void
166ls2_pdcache_wbinv_range_index(vaddr_t va, vsize_t size) 169ls2_pdcache_wbinv_range_index(vaddr_t va, vsize_t size)
167{ 170{
168 vaddr_t eva; 171 vaddr_t eva;
 172 struct mips_cache_info * const mci = &mips_cache_info;
169 173
170 /* 174 /*
171 * Since we're doing Index ops, we expect to not be able 175 * Since we're doing Index ops, we expect to not be able
172 * to access the address we've been given. So, get the 176 * to access the address we've been given. So, get the
173 * bits that determine the cache index, and make a KSEG0 177 * bits that determine the cache index, and make a KSEG0
174 * address out of them. 178 * address out of them.
175 */ 179 */
176 va = MIPS_PHYS_TO_KSEG0(va & mips_pdcache_way_mask); 180 va = MIPS_PHYS_TO_KSEG0(va & mci->mci_pdcache_way_mask);
177 181
178 eva = round_line(va + size); 182 eva = round_line(va + size);
179 va = trunc_line(va); 183 va = trunc_line(va);
180 184
181 if (va + mips_pdcache_way_size > eva) { 185 if (va + mci->mci_pdcache_way_size > eva) {
182 va = MIPS_PHYS_TO_KSEG0(0); 186 va = MIPS_PHYS_TO_KSEG0(0);
183 eva = mips_pdcache_way_size; 187 eva = mci->mci_pdcache_way_size;
184 } 188 }
185 189
186 for (; va + 8 * 32 <= eva; va += 8 * 32) { 190 for (; va + 8 * 32 <= eva; va += 8 * 32) {
187 cache_op_ls2_8line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV); 191 cache_op_ls2_8line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
188 } 192 }
189 193
190 for (; va < eva; va += 32) { 194 for (; va < eva; va += 32) {
191 cache_op_ls2_line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV); 195 cache_op_ls2_line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
192 } 196 }
193 197
194 __asm volatile("sync"); 198 __asm volatile("sync");
195} 199}
196 200
197void 201void
198ls2_pdcache_wbinv_all(void) 202ls2_pdcache_wbinv_all(void)
199{ 203{
200 ls2_pdcache_wbinv_range_index(0, mips_pdcache_way_size); 204 struct mips_cache_info * const mci = &mips_cache_info;
 205 ls2_pdcache_wbinv_range_index(0, mci->mci_pdcache_way_size);
201} 206}
202 207
203/* 208/*
204 * Cache operations for secondary caches: 209 * Cache operations for secondary caches:
205 * 210 *
206 * - Direct-mapped 211 * - Direct-mapped
207 * - Write-back 212 * - Write-back
208 * - Physically indexed, physically tagged 213 * - Physically indexed, physically tagged
209 * 214 *
210 */ 215 */
211 216
212void 217void
213ls2_sdcache_inv_range(vaddr_t va, vsize_t size) 218ls2_sdcache_inv_range(vaddr_t va, vsize_t size)
@@ -252,48 +257,50 @@ ls2_sdcache_wbinv_range(vaddr_t va, vsiz @@ -252,48 +257,50 @@ ls2_sdcache_wbinv_range(vaddr_t va, vsiz
252void 257void
253ls2_sdcache_wb_range(vaddr_t va, vsize_t size) 258ls2_sdcache_wb_range(vaddr_t va, vsize_t size)
254{ 259{
255 /* 260 /*
256 * Alas, can't writeback without invalidating... 261 * Alas, can't writeback without invalidating...
257 */ 262 */
258 ls2_sdcache_wbinv_range(va, size); 263 ls2_sdcache_wbinv_range(va, size);
259} 264}
260 265
261void 266void
262ls2_sdcache_wbinv_range_index(vaddr_t va, vsize_t size) 267ls2_sdcache_wbinv_range_index(vaddr_t va, vsize_t size)
263{ 268{
264 vaddr_t eva; 269 vaddr_t eva;
 270 struct mips_cache_info * const mci = &mips_cache_info;
265 271
266 /* 272 /*
267 * Since we're doing Index ops, we expect to not be able 273 * Since we're doing Index ops, we expect to not be able
268 * to access the address we've been given. So, get the 274 * to access the address we've been given. So, get the
269 * bits that determine the cache index, and make a KSEG0 275 * bits that determine the cache index, and make a KSEG0
270 * address out of them. 276 * address out of them.
271 */ 277 */
272 va = MIPS_PHYS_TO_KSEG0(va & mips_sdcache_way_mask); 278 va = MIPS_PHYS_TO_KSEG0(va & mci->mci_sdcache_way_mask);
273 279
274 eva = round_line(va + size); 280 eva = round_line(va + size);
275 va = trunc_line(va); 281 va = trunc_line(va);
276 282
277 if (va + mips_sdcache_way_size > eva) { 283 if (va + mci->mci_sdcache_way_size > eva) {
278 va = MIPS_PHYS_TO_KSEG0(0); 284 va = MIPS_PHYS_TO_KSEG0(0);
279 eva = va + mips_sdcache_way_size; 285 eva = va + mci->mci_sdcache_way_size;
280 } 286 }
281 287
282 for (; va + 8 * 32 <= eva; va += 8 * 32) { 288 for (; va + 8 * 32 <= eva; va += 8 * 32) {
283 cache_op_ls2_8line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV); 289 cache_op_ls2_8line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
284 cache_op_ls2_8line_4way(va, CACHEOP_LS2_S_INDEX_WB_INV); 290 cache_op_ls2_8line_4way(va, CACHEOP_LS2_S_INDEX_WB_INV);
285 } 291 }
286 292
287 for (; va < eva; va += 32) { 293 for (; va < eva; va += 32) {
288 cache_op_ls2_line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV); 294 cache_op_ls2_line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
289 cache_op_ls2_line_4way(va, CACHEOP_LS2_S_INDEX_WB_INV); 295 cache_op_ls2_line_4way(va, CACHEOP_LS2_S_INDEX_WB_INV);
290 } 296 }
291 297
292 __asm volatile("sync"); 298 __asm volatile("sync");
293} 299}
294 300
295void 301void
296ls2_sdcache_wbinv_all(void) 302ls2_sdcache_wbinv_all(void)
297{ 303{
298 ls2_sdcache_wbinv_range_index(0, mips_sdcache_way_size); 304 struct mips_cache_info * const mci = &mips_cache_info;
 305 ls2_sdcache_wbinv_range_index(0, mci->mci_sdcache_way_size);
299} 306}