Wed Jun 8 17:47:48 2011 UTC ()
Make GDIUM build again after matt-nb5-mips64 merge. untested as I don't have
this hardware, but I'll use this as a base for Lemote Fulong support.


(bouyer)
diff -r1.1 -r1.2 src/sys/arch/evbmips/conf/files.gdium
diff -r1.2 -r1.3 src/sys/arch/evbmips/gdium/gdium_dma.c
diff -r1.2 -r1.3 src/sys/arch/evbmips/gdium/gdium_intr.c
diff -r1.13 -r1.14 src/sys/arch/evbmips/gdium/machdep.c
diff -r1.46 -r1.47 src/sys/arch/mips/mips/cache.c
diff -r1.3 -r1.4 src/sys/arch/mips/mips/cache_ls2.c

cvs diff -r1.1 -r1.2 src/sys/arch/evbmips/conf/files.gdium (switch to unified diff)

--- src/sys/arch/evbmips/conf/files.gdium 2009/08/06 00:50:25 1.1
+++ src/sys/arch/evbmips/conf/files.gdium 2011/06/08 17:47:48 1.2
@@ -1,48 +1,47 @@ @@ -1,48 +1,47 @@
1# $NetBSD: files.gdium,v 1.1 2009/08/06 00:50:25 matt Exp $ 1# $NetBSD: files.gdium,v 1.2 2011/06/08 17:47:48 bouyer Exp $
2 2
3file arch/evbmips/gdium/gdium_bus_io.c 3file arch/evbmips/gdium/gdium_bus_io.c
4file arch/evbmips/gdium/gdium_bus_mem.c 4file arch/evbmips/gdium/gdium_bus_mem.c
5file arch/evbmips/gdium/gdium_dma.c 5file arch/evbmips/gdium/gdium_dma.c
6file arch/evbmips/gdium/gdium_genfb.c wsdisplay 6file arch/evbmips/gdium/gdium_genfb.c wsdisplay
7file arch/evbmips/gdium/gdium_intr.c 7file arch/evbmips/gdium/gdium_intr.c
8 8
9file arch/evbmips/gdium/autoconf.c 9file arch/evbmips/gdium/autoconf.c
10file arch/evbmips/gdium/machdep.c 10file arch/evbmips/gdium/machdep.c
11 11
12file arch/mips/mips/bus_dma.c 12file arch/mips/mips/bus_dma.c
13file arch/evbmips/evbmips/disksubr.c 13file arch/evbmips/evbmips/disksubr.c
14file arch/evbmips/evbmips/interrupt.c 14file arch/evbmips/evbmips/interrupt.c
15 15
16file arch/mips/mips/mips3_clock.c 16file arch/mips/mips/mips3_clock.c
17file arch/mips/mips/mips3_clockintr.c 17file arch/mips/mips/mips3_clockintr.c
18file arch/mips/mips/softintr.c 
19 18
20# The autoconfiguration root. 19# The autoconfiguration root.
21device mainbus { [addr = -1] } 20device mainbus { [addr = -1] }
22attach mainbus at root 21attach mainbus at root
23file arch/evbmips/gdium/mainbus.c mainbus 22file arch/evbmips/gdium/mainbus.c mainbus
24 23
25device cpu 24device cpu
26attach cpu at mainbus 25attach cpu at mainbus
27file arch/evbmips/evbmips/cpu.c cpu 26file arch/evbmips/evbmips/cpu.c cpu
28 27
29# Machine-independent I2O drivers. 28# Machine-independent I2O drivers.
30include "dev/i2o/files.i2o" 29include "dev/i2o/files.i2o"
31 30
32# Machine-independent SCSI drivers 31# Machine-independent SCSI drivers
33include "dev/scsipi/files.scsipi" 32include "dev/scsipi/files.scsipi"
34 33
35# Machine-independent USB device support 34# Machine-independent USB device support
36include "dev/usb/files.usb" 35include "dev/usb/files.usb"
37include "dev/ata/files.ata" # to make umass happy 36include "dev/ata/files.ata" # to make umass happy
38 37
39# Memory Disk 38# Memory Disk
40file dev/md_root.c memory_disk_hooks 39file dev/md_root.c memory_disk_hooks
41 40
42# 41#
43# PCI bus support. 42# PCI bus support.
44# 43#
45include "dev/pci/files.pci" 44include "dev/pci/files.pci"
46include "arch/mips/conf/files.bonito" 45include "arch/mips/conf/files.bonito"
47 46
48file arch/evbmips/gdium/bonito_mainbus.c bonito_mainbus 47file arch/evbmips/gdium/bonito_mainbus.c bonito_mainbus

cvs diff -r1.2 -r1.3 src/sys/arch/evbmips/gdium/gdium_dma.c (switch to unified diff)

--- src/sys/arch/evbmips/gdium/gdium_dma.c 2009/08/06 16:37:01 1.2
+++ src/sys/arch/evbmips/gdium/gdium_dma.c 2011/06/08 17:47:48 1.3
@@ -1,74 +1,63 @@ @@ -1,74 +1,63 @@
1/* $NetBSD: gdium_dma.c,v 1.2 2009/08/06 16:37:01 matt Exp $ */ 1/* $NetBSD: gdium_dma.c,v 1.3 2011/06/08 17:47:48 bouyer Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe. 8 * by Jason R. Thorpe.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Platform-specific DMA support for the Gdium Liberty 1000. 33 * Platform-specific DMA support for the Gdium Liberty 1000.
34 */ 34 */
35 35
36#include <sys/cdefs.h> 36#include <sys/cdefs.h>
37__KERNEL_RCSID(0, "$NetBSD: gdium_dma.c,v 1.2 2009/08/06 16:37:01 matt Exp $"); 37__KERNEL_RCSID(0, "$NetBSD: gdium_dma.c,v 1.3 2011/06/08 17:47:48 bouyer Exp $");
38 38
39#include <sys/param.h> 39#include <sys/param.h>
40 40
41#define _MIPS_BUS_DMA_PRIVATE 41#define _MIPS_BUS_DMA_PRIVATE
42#include <machine/bus.h> 42#include <machine/bus.h>
43 43
44#include <evbmips/gdium/gdiumreg.h> 44#include <evbmips/gdium/gdiumreg.h>
45#include <evbmips/gdium/gdiumvar.h> 45#include <evbmips/gdium/gdiumvar.h>
46 46
47void 47void
48gdium_dma_init(struct gdium_config *gc) 48gdium_dma_init(struct gdium_config *gc)
49{ 49{
50 bus_dma_tag_t t; 50 bus_dma_tag_t t;
51 51
52 /* 52 /*
53 * Initialize the DMA tag used for PCI DMA. 53 * Initialize the DMA tag used for PCI DMA.
54 */ 54 */
55 t = &gc->gc_pci_dmat; 55 t = &gc->gc_pci_dmat;
56 t->_cookie = gc; 56 t->_cookie = gc;
57 t->_wbase = GDIUM_DMA_PCI_PCIBASE; 57 t->_wbase = GDIUM_DMA_PCI_PCIBASE;
58 t->_physbase = GDIUM_DMA_PCI_PHYSBASE; 58 t->_bounce_alloc_lo = GDIUM_DMA_PCI_PHYSBASE;
59 t->_wsize = GDIUM_DMA_PCI_SIZE; 59 t->_bounce_alloc_hi = GDIUM_DMA_PCI_PHYSBASE + GDIUM_DMA_PCI_SIZE;
60 t->_dmamap_create = _bus_dmamap_create; 60 t->_dmamap_ops = mips_bus_dmamap_ops;
61 t->_dmamap_destroy = _bus_dmamap_destroy; 61 t->_dmamem_ops = mips_bus_dmamem_ops;
62 t->_dmamap_load = _bus_dmamap_load; 62 t->_dmatag_ops = mips_bus_dmatag_ops;
63 t->_dmamap_load_mbuf = _bus_dmamap_load_mbuf; 
64 t->_dmamap_load_uio = _bus_dmamap_load_uio; 
65 t->_dmamap_load_raw = _bus_dmamap_load_raw; 
66 t->_dmamap_unload = _bus_dmamap_unload; 
67 t->_dmamap_sync = _bus_dmamap_sync; 
68 
69 t->_dmamem_alloc = _bus_dmamem_alloc; 
70 t->_dmamem_free = _bus_dmamem_free; 
71 t->_dmamem_map = _bus_dmamem_map; 
72 t->_dmamem_unmap = _bus_dmamem_unmap; 
73 t->_dmamem_mmap = _bus_dmamem_mmap; 
74} 63}

cvs diff -r1.2 -r1.3 src/sys/arch/evbmips/gdium/gdium_intr.c (switch to unified diff)

--- src/sys/arch/evbmips/gdium/gdium_intr.c 2009/08/07 01:27:14 1.2
+++ src/sys/arch/evbmips/gdium/gdium_intr.c 2011/06/08 17:47:48 1.3
@@ -1,438 +1,416 @@ @@ -1,438 +1,416 @@
1/* $NetBSD: gdium_intr.c,v 1.2 2009/08/07 01:27:14 matt Exp $ */ 1/* $NetBSD: gdium_intr.c,v 1.3 2011/06/08 17:47:48 bouyer Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe. 8 * by Jason R. Thorpe.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32/* 32/*
33 * Platform-specific interrupt support for the Algorithmics P-6032. 33 * Platform-specific interrupt support for the Algorithmics P-6032.
34 * 34 *
35 * The Algorithmics P-6032's interrupts are wired to GPIO pins 35 * The Algorithmics P-6032's interrupts are wired to GPIO pins
36 * on the BONITO system controller. 36 * on the BONITO system controller.
37 */ 37 */
38 38
39#include <sys/cdefs.h> 39#include <sys/cdefs.h>
40__KERNEL_RCSID(0, "$NetBSD: gdium_intr.c,v 1.2 2009/08/07 01:27:14 matt Exp $"); 40__KERNEL_RCSID(0, "$NetBSD: gdium_intr.c,v 1.3 2011/06/08 17:47:48 bouyer Exp $");
 41
 42#define __INTR_PRIVATE
 43
41 44
42#include "opt_ddb.h" 45#include "opt_ddb.h"
43 46
44#include <sys/param.h> 47#include <sys/param.h>
45#include <sys/queue.h> 48#include <sys/queue.h>
46#include <sys/malloc.h> 49#include <sys/malloc.h>
47#include <sys/systm.h> 50#include <sys/systm.h>
48#include <sys/device.h> 51#include <sys/device.h>
49#include <sys/kernel.h> 52#include <sys/kernel.h>
50#include <sys/cpu.h> 53#include <sys/cpu.h>
51 54
52#include <machine/bus.h> 55#include <machine/bus.h>
53#include <machine/intr.h> 56#include <machine/intr.h>
54 57
55#include <mips/locore.h> 58#include <mips/locore.h>
56 59
57#include <mips/bonito/bonitoreg.h> 60#include <mips/bonito/bonitoreg.h>
58#include <evbmips/gdium/gdiumvar.h> 61#include <evbmips/gdium/gdiumvar.h>
59 62
60#include <dev/pci/pcireg.h> 63#include <dev/pci/pcireg.h>
61#include <dev/pci/pcivar.h> 64#include <dev/pci/pcivar.h>
62 65
63/* 66/*
64 * The GDIUM interrupts are wired up in the following way: 67 * The GDIUM interrupts are wired up in the following way:
65 * 68 *
66 * GPIN0 ISA_NMI (in) 69 * GPIN0 ISA_NMI (in)
67 * GPIN1 ISA_INTR (in) 70 * GPIN1 ISA_INTR (in)
68 * GPIN2 ETH_INT~ (in) 71 * GPIN2 ETH_INT~ (in)
69 * GPIN3 BONIDE_INT (in) 72 * GPIN3 BONIDE_INT (in)
70 * 73 *
71 * PCI_INTA  74 * PCI_INTA
72 * GPIN4 ISA IRQ3 (in, also on piix4) 75 * GPIN4 ISA IRQ3 (in, also on piix4)
73 * GPIN5 ISA IRQ4 (in, also on piix4) 76 * GPIN5 ISA IRQ4 (in, also on piix4)
74 * 77 *
75 * GPIO0 PIRQ A~ (in) 78 * GPIO0 PIRQ A~ (in)
76 * GPIO1 PIRQ B~ (in) 79 * GPIO1 PIRQ B~ (in)
77 * GPIO2 PIRQ C~ (in) 80 * GPIO2 PIRQ C~ (in)
78 * GPIO3 PIRQ D~ (in) 81 * GPIO3 PIRQ D~ (in)
79 */ 82 */
80 83
81struct gdium_irqmap { 84struct gdium_irqmap {
82 const char *name; 85 const char *name;
83 uint8_t irqidx; 86 uint8_t irqidx;
84 uint8_t flags; 87 uint8_t flags;
85}; 88};
86 89
87#define IRQ_F_INVERT 0x80 /* invert polarity */ 90#define IRQ_F_INVERT 0x80 /* invert polarity */
88#define IRQ_F_EDGE 0x40 /* edge trigger */ 91#define IRQ_F_EDGE 0x40 /* edge trigger */
89#define IRQ_F_INT0 0x00 /* INT0 */ 92#define IRQ_F_INT0 0x00 /* INT0 */
90#define IRQ_F_INT1 0x01 /* INT1 */ 93#define IRQ_F_INT1 0x01 /* INT1 */
91#define IRQ_F_INT2 0x02 /* INT2 */ 94#define IRQ_F_INT2 0x02 /* INT2 */
92#define IRQ_F_INT3 0x03 /* INT3 */ 95#define IRQ_F_INT3 0x03 /* INT3 */
93#define IRQ_F_INTMASK 0x07 /* INT mask */ 96#define IRQ_F_INTMASK 0x07 /* INT mask */
94 97
95const struct gdium_irqmap gdium_irqmap[] = { 98const struct gdium_irqmap gdium_irqmap[] = {
96 { "gpio0", GDIUM_IRQ_GPIO0, IRQ_F_INT0 }, 99 { "gpio0", GDIUM_IRQ_GPIO0, IRQ_F_INT0 },
97 { "gpio1", GDIUM_IRQ_GPIO1, IRQ_F_INT0 }, 100 { "gpio1", GDIUM_IRQ_GPIO1, IRQ_F_INT0 },
98 { "gpio2", GDIUM_IRQ_GPIO2, IRQ_F_INT0 }, 101 { "gpio2", GDIUM_IRQ_GPIO2, IRQ_F_INT0 },
99 { "gpio3", GDIUM_IRQ_GPIO3, IRQ_F_INT0 }, 102 { "gpio3", GDIUM_IRQ_GPIO3, IRQ_F_INT0 },
100 103
101 { "pci inta", GDIUM_IRQ_PCI_INTA, IRQ_F_INT0 }, 104 { "pci inta", GDIUM_IRQ_PCI_INTA, IRQ_F_INT0 },
102 { "pci intb", GDIUM_IRQ_PCI_INTB, IRQ_F_INT0 }, 105 { "pci intb", GDIUM_IRQ_PCI_INTB, IRQ_F_INT0 },
103 { "pci intc", GDIUM_IRQ_PCI_INTC, IRQ_F_INT0 }, 106 { "pci intc", GDIUM_IRQ_PCI_INTC, IRQ_F_INT0 },
104 { "pci intd", GDIUM_IRQ_PCI_INTD, IRQ_F_INT0 }, 107 { "pci intd", GDIUM_IRQ_PCI_INTD, IRQ_F_INT0 },
105  108
106 { "pci perr", GDIUM_IRQ_PCI_PERR, IRQ_F_EDGE|IRQ_F_INT1 }, 109 { "pci perr", GDIUM_IRQ_PCI_PERR, IRQ_F_EDGE|IRQ_F_INT1 },
107 { "pci serr", GDIUM_IRQ_PCI_SERR, IRQ_F_EDGE|IRQ_F_INT1 }, 110 { "pci serr", GDIUM_IRQ_PCI_SERR, IRQ_F_EDGE|IRQ_F_INT1 },
108 111
109 { "denali", GDIUM_IRQ_DENALI, IRQ_F_INT1 }, 112 { "denali", GDIUM_IRQ_DENALI, IRQ_F_INT1 },
110 113
111 { "mips int0", GDIUM_IRQ_INT0, IRQ_F_INT0 }, 114 { "mips int0", GDIUM_IRQ_INT0, IRQ_F_INT0 },
112 { "mips int1", GDIUM_IRQ_INT1, IRQ_F_INT1 }, 115 { "mips int1", GDIUM_IRQ_INT1, IRQ_F_INT1 },
113 { "mips int2", GDIUM_IRQ_INT2, IRQ_F_INT2 }, 116 { "mips int2", GDIUM_IRQ_INT2, IRQ_F_INT2 },
114 { "mips int3", GDIUM_IRQ_INT3, IRQ_F_INT3 }, 117 { "mips int3", GDIUM_IRQ_INT3, IRQ_F_INT3 },
115}; 118};
116 119
117struct gdium_intrhead { 120struct gdium_intrhead {
118 struct evcnt intr_count; 121 struct evcnt intr_count;
119 int intr_refcnt; 122 int intr_refcnt;
120}; 123};
121struct gdium_intrhead gdium_intrtab[__arraycount(gdium_irqmap)]; 124struct gdium_intrhead gdium_intrtab[__arraycount(gdium_irqmap)];
122 125
123#define NINTRS 2 /* MIPS INT0 - INT1 */ 126#define NINTRS 2 /* MIPS INT0 - INT1 */
124 127
125struct gdium_cpuintr { 128struct gdium_cpuintr {
126 LIST_HEAD(, evbmips_intrhand) cintr_list; 129 LIST_HEAD(, evbmips_intrhand) cintr_list;
127 struct evcnt cintr_count; 130 struct evcnt cintr_count;
128 int cintr_refcnt; 131 int cintr_refcnt;
129}; 132};
130 133
131struct gdium_cpuintr gdium_cpuintrs[NINTRS]; 134struct gdium_cpuintr gdium_cpuintrs[NINTRS];
132const char *gdium_cpuintrnames[NINTRS] = { 135const char * const gdium_cpuintrnames[NINTRS] = {
133 "int 0 (pci)", 136 "int 0 (pci)",
134 "int 1 (errors)", 137 "int 1 (errors)",
135}; 138};
136 139
137/* 140/*
138 * This is a mask of bits to clear in the SR when we go to a 141 * This is a mask of bits to clear in the SR when we go to a
139 * given hardware interrupt priority level. 142 * given hardware interrupt priority level.
140 */ 143 */
141const uint32_t ipl_sr_bits[_IPL_N] = { 144static const struct ipl_sr_map gdium_ipl_sr_map = {
142 [IPL_NONE] = 0, 145 .sr_bits = {
143 [IPL_SOFTCLOCK] = 146 [IPL_NONE] = 0,
144 MIPS_SOFT_INT_MASK_0, 147 [IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_0,
145#if IPL_SOFTCLOCK != IPL_SOFTBIO 148 [IPL_SOFTNET] = MIPS_SOFT_INT_MASK_0 | MIPS_SOFT_INT_MASK_1,
146 [IPL_SOFTBIO] = 
147 MIPS_SOFT_INT_MASK_0, 
148#endif 
149 [IPL_SOFTNET] = 
150 MIPS_SOFT_INT_MASK_0 | MIPS_SOFT_INT_MASK_1, 
151#if IPL_SOFTNET != IPL_SOFTSERIAL 
152 [IPL_SOFTSERIAL] = 
153 MIPS_SOFT_INT_MASK_0 | MIPS_SOFT_INT_MASK_1, 
154#endif 
155 [IPL_VM] = 149 [IPL_VM] =
156 MIPS_SOFT_INT_MASK_0 | MIPS_SOFT_INT_MASK_1 | 150 MIPS_SOFT_INT_MASK_0 | MIPS_SOFT_INT_MASK_1 |
157 MIPS_INT_MASK_0 | 151 MIPS_INT_MASK_0 |
158 MIPS_INT_MASK_1 | 152 MIPS_INT_MASK_1 |
159 MIPS_INT_MASK_2 | 153 MIPS_INT_MASK_2 |
160 MIPS_INT_MASK_3 | 154 MIPS_INT_MASK_3 |
161 MIPS_INT_MASK_4, 155 MIPS_INT_MASK_4,
162 [IPL_SCHED] = 156 [IPL_SCHED] =
163 MIPS_SOFT_INT_MASK_0 | MIPS_SOFT_INT_MASK_1 | 157 MIPS_SOFT_INT_MASK_0 | MIPS_SOFT_INT_MASK_1 |
164 MIPS_INT_MASK_0 | 158 MIPS_INT_MASK_0 |
165 MIPS_INT_MASK_1 | 159 MIPS_INT_MASK_1 |
166 MIPS_INT_MASK_2 | 160 MIPS_INT_MASK_2 |
167 MIPS_INT_MASK_3 | 161 MIPS_INT_MASK_3 |
168 MIPS_INT_MASK_4 | 162 MIPS_INT_MASK_4 |
169 MIPS_INT_MASK_5, 163 MIPS_INT_MASK_5,
 164 [IPL_DDB] = MIPS_INT_MASK,
 165 [IPL_HIGH] = MIPS_INT_MASK,
 166 },
170}; 167};
171 168
172/* 169int gdium_pci_intr_map(const struct pci_attach_args *, pci_intr_handle_t *);
173 * This is a mask of bits to clear in the SR when we go to a 
174 * given software interrupt priority level. 
175 * Hardware ipls are port/board specific. 
176 */ 
177const uint32_t mips_ipl_si_to_sr[] = { 
178 [IPL_SOFTCLOCK-IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_0, 
179#if IPL_SOFTCLOCK != IPL_SOFTBIO 
180 [IPL_SOFTBIO-IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_0, 
181#endif 
182 [IPL_SOFTNET-IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_1, 
183#if IPL_SOFTNET != IPL_SOFTSERIAL 
184 [IPL_SOFTSERIAL-IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_1, 
185#endif 
186}; 
187 
188int gdium_pci_intr_map(struct pci_attach_args *, pci_intr_handle_t *); 
189const char *gdium_pci_intr_string(void *, pci_intr_handle_t); 170const char *gdium_pci_intr_string(void *, pci_intr_handle_t);
190const struct evcnt *gdium_pci_intr_evcnt(void *, pci_intr_handle_t); 171const struct evcnt *gdium_pci_intr_evcnt(void *, pci_intr_handle_t);
191void *gdium_pci_intr_establish(void *, pci_intr_handle_t, int, 172void *gdium_pci_intr_establish(void *, pci_intr_handle_t, int,
192 int (*)(void *), void *); 173 int (*)(void *), void *);
193void gdium_pci_intr_disestablish(void *, void *); 174void gdium_pci_intr_disestablish(void *, void *);
194void gdium_pci_conf_interrupt(void *, int, int, int, int, int *); 175void gdium_pci_conf_interrupt(void *, int, int, int, int, int *);
195 176
196void 177void
197evbmips_intr_init(void) 178evbmips_intr_init(void)
198{ 179{
199 struct gdium_config *gc = &gdium_configuration; 180 struct gdium_config * const gc = &gdium_configuration;
200 struct bonito_config *bc = &gc->gc_bonito; 181 struct bonito_config *bc = &gc->gc_bonito;
201 const struct gdium_irqmap *irqmap; 182 const struct gdium_irqmap *irqmap;
202 uint32_t intbit; 183 uint32_t intbit;
203 int i; 184 size_t i;
 185
 186 ipl_sr_map = gdium_ipl_sr_map;
204 187
205 for (i = 0; i < NINTRS; i++) { 188 for (i = 0; i < NINTRS; i++) {
206 LIST_INIT(&gdium_cpuintrs[i].cintr_list); 189 LIST_INIT(&gdium_cpuintrs[i].cintr_list);
207 evcnt_attach_dynamic(&gdium_cpuintrs[i].cintr_count, 190 evcnt_attach_dynamic(&gdium_cpuintrs[i].cintr_count,
208 EVCNT_TYPE_INTR, NULL, "mips", gdium_cpuintrnames[i]); 191 EVCNT_TYPE_INTR, NULL, "mips", gdium_cpuintrnames[i]);
209 } 192 }
210 //evcnt_attach_static(&mips_int5_evcnt); 193 //evcnt_attach_static(&mips_int5_evcnt);
211 194
212 for (i = 0; i < __arraycount(gdium_irqmap); i++) { 195 for (i = 0; i < __arraycount(gdium_irqmap); i++) {
213 irqmap = &gdium_irqmap[i]; 196 irqmap = &gdium_irqmap[i];
214 intbit = 1 << irqmap->irqidx; 197 intbit = 1 << irqmap->irqidx;
215 198
216 evcnt_attach_dynamic(&gdium_intrtab[i].intr_count, 199 evcnt_attach_dynamic(&gdium_intrtab[i].intr_count,
217 EVCNT_TYPE_INTR, NULL, "bonito", irqmap->name); 200 EVCNT_TYPE_INTR, NULL, "bonito", irqmap->name);
218 201
219 if (irqmap->irqidx < 4) 202 if (irqmap->irqidx < 4)
220 bc->bc_gpioIE |= intbit; 203 bc->bc_gpioIE |= intbit;
221 if (irqmap->flags & IRQ_F_INVERT) 204 if (irqmap->flags & IRQ_F_INVERT)
222 bc->bc_intPol |= intbit; 205 bc->bc_intPol |= intbit;
223 if (irqmap->flags & IRQ_F_EDGE) 206 if (irqmap->flags & IRQ_F_EDGE)
224 bc->bc_intEdge |= intbit; 207 bc->bc_intEdge |= intbit;
225 if ((irqmap->flags & IRQ_F_INTMASK) == IRQ_F_INT1) 208 if ((irqmap->flags & IRQ_F_INTMASK) == IRQ_F_INT1)
226 bc->bc_intSteer |= intbit; 209 bc->bc_intSteer |= intbit;
227 210
228 REGVAL(BONITO_INTENCLR) = intbit; 211 REGVAL(BONITO_INTENCLR) = intbit;
229 } 212 }
230 213
231 REGVAL(BONITO_GPIOIE) = bc->bc_gpioIE; 214 REGVAL(BONITO_GPIOIE) = bc->bc_gpioIE;
232 REGVAL(BONITO_INTEDGE) = bc->bc_intEdge; 215 REGVAL(BONITO_INTEDGE) = bc->bc_intEdge;
233 REGVAL(BONITO_INTSTEER) = bc->bc_intSteer; 216 REGVAL(BONITO_INTSTEER) = bc->bc_intSteer;
234 REGVAL(BONITO_INTPOL) = bc->bc_intPol; 217 REGVAL(BONITO_INTPOL) = bc->bc_intPol;
235 218
236 gc->gc_pc.pc_intr_v = NULL; 219 gc->gc_pc.pc_intr_v = NULL;
237 gc->gc_pc.pc_intr_map = gdium_pci_intr_map; 220 gc->gc_pc.pc_intr_map = gdium_pci_intr_map;
238 gc->gc_pc.pc_intr_string = gdium_pci_intr_string; 221 gc->gc_pc.pc_intr_string = gdium_pci_intr_string;
239 gc->gc_pc.pc_intr_evcnt = gdium_pci_intr_evcnt; 222 gc->gc_pc.pc_intr_evcnt = gdium_pci_intr_evcnt;
240 gc->gc_pc.pc_intr_establish = gdium_pci_intr_establish; 223 gc->gc_pc.pc_intr_establish = gdium_pci_intr_establish;
241 gc->gc_pc.pc_intr_disestablish = gdium_pci_intr_disestablish; 224 gc->gc_pc.pc_intr_disestablish = gdium_pci_intr_disestablish;
242 gc->gc_pc.pc_conf_interrupt = gdium_pci_conf_interrupt; 225 gc->gc_pc.pc_conf_interrupt = gdium_pci_conf_interrupt;
243 226
244 /* We let the PCI-ISA bridge code handle this. */ 227 /* We let the PCI-ISA bridge code handle this. */
245 gc->gc_pc.pc_pciide_compat_intr_establish = NULL; 228 gc->gc_pc.pc_pciide_compat_intr_establish = NULL;
246} 229}
247 230
248void * 231void *
249evbmips_intr_establish(int irq, int (*func)(void *), void *arg) 232evbmips_intr_establish(int irq, int (*func)(void *), void *arg)
250{ 233{
251 const struct gdium_irqmap *irqmap; 234 const struct gdium_irqmap *irqmap;
252 struct evbmips_intrhand *ih; 235 struct evbmips_intrhand *ih;
253 int level; 236 int level;
254 int s; 237 int s;
255 238
256 irqmap = &gdium_irqmap[irq]; 239 irqmap = &gdium_irqmap[irq];
257 KASSERT(irq < __arraycount(gdium_irqmap)); 240 KASSERT(irq < __arraycount(gdium_irqmap));
258 241
259 KASSERT(irq == irqmap->irqidx); 242 KASSERT(irq == irqmap->irqidx);
260 243
261 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT|M_ZERO); 244 ih = malloc(sizeof(*ih), M_DEVBUF, M_NOWAIT|M_ZERO);
262 if (ih == NULL) 245 if (ih == NULL)
263 return NULL; 246 return NULL;
264 247
265 ih->ih_func = func; 248 ih->ih_func = func;
266 ih->ih_arg = arg; 249 ih->ih_arg = arg;
267 ih->ih_irq = irq; 250 ih->ih_irq = irq;
268 251
269 s = splhigh(); 252 s = splhigh();
270 253
271 /* 254 /*
272 * First, link it into the tables. 255 * First, link it into the tables.
273 */ 256 */
274 level = (irqmap->flags & IRQ_F_INT1) != 0; 257 level = (irqmap->flags & IRQ_F_INT1) != 0;
275 LIST_INSERT_HEAD(&gdium_cpuintrs[level].cintr_list, ih, ih_q); 258 LIST_INSERT_HEAD(&gdium_cpuintrs[level].cintr_list, ih, ih_q);
276 gdium_cpuintrs[level].cintr_refcnt++; 259 gdium_cpuintrs[level].cintr_refcnt++;
277 260
278 /* 261 /*
279 * Now enable it. 262 * Now enable it.
280 */ 263 */
281 if (gdium_intrtab[ih->ih_irq].intr_refcnt++ == 0) 264 if (gdium_intrtab[ih->ih_irq].intr_refcnt++ == 0)
282 REGVAL(BONITO_INTENSET) = (1 << ih->ih_irq); 265 REGVAL(BONITO_INTENSET) = (1 << ih->ih_irq);
283 266
284 splx(s); 267 splx(s);
285 268
286 return (ih); 269 return (ih);
287} 270}
288 271
289void 272void
290evbmips_intr_disestablish(void *cookie) 273evbmips_intr_disestablish(void *cookie)
291{ 274{
292 const struct gdium_irqmap *irqmap; 275 const struct gdium_irqmap *irqmap;
293 struct evbmips_intrhand *ih = cookie; 276 struct evbmips_intrhand *ih = cookie;
294 int s; 277 int s;
295 278
296 irqmap = &gdium_irqmap[ih->ih_irq]; 279 irqmap = &gdium_irqmap[ih->ih_irq];
297 280
298 s = splhigh(); 281 s = splhigh();
299 282
300 /* 283 /*
301 * First, remove it from the table. 284 * First, remove it from the table.
302 */ 285 */
303 LIST_REMOVE(ih, ih_q); 286 LIST_REMOVE(ih, ih_q);
304 gdium_cpuintrs[(irqmap->flags & IRQ_F_INT1) != 0].cintr_refcnt--; 287 gdium_cpuintrs[(irqmap->flags & IRQ_F_INT1) != 0].cintr_refcnt--;
305 288
306 /* 289 /*
307 * Now, disable it, if there is nothing remaining on the 290 * Now, disable it, if there is nothing remaining on the
308 * list. 291 * list.
309 */ 292 */
310 if (gdium_intrtab[ih->ih_irq].intr_refcnt-- == 1) 293 if (gdium_intrtab[ih->ih_irq].intr_refcnt-- == 1)
311 REGVAL(BONITO_INTENCLR) = (1 << ih->ih_irq); 294 REGVAL(BONITO_INTENCLR) = (1 << ih->ih_irq);
312 295
313 splx(s); 296 splx(s);
314 297
315 free(ih, M_DEVBUF); 298 free(ih, M_DEVBUF);
316} 299}
317 300
318void 301void
319evbmips_iointr(uint32_t status, uint32_t cause, uint32_t pc, 302evbmips_iointr(int ipl, vaddr_t pc, uint32_t ipending)
320 uint32_t ipending) 
321{ 303{
322 const struct gdium_irqmap *irqmap; 304 const struct gdium_irqmap *irqmap;
323 struct evbmips_intrhand *ih; 305 struct evbmips_intrhand *ih;
324 int level; 306 int level;
325 uint32_t isr; 307 uint32_t isr;
326 308
327 /* 309 /*
328 * Read the interrupt pending registers, mask them with the 310 * Read the interrupt pending registers, mask them with the
329 * ones we have enabled, and service them in order of decreasing 311 * ones we have enabled, and service them in order of decreasing
330 * priority. 312 * priority.
331 */ 313 */
332 isr = REGVAL(BONITO_INTISR) & REGVAL(BONITO_INTEN); 314 isr = REGVAL(BONITO_INTISR) & REGVAL(BONITO_INTEN);
333 for (level = 1; level >= 0; level--) { 315 for (level = 1; level >= 0; level--) {
334 if ((ipending & (MIPS_INT_MASK_4 << level)) == 0) 316 if ((ipending & (MIPS_INT_MASK_4 << level)) == 0)
335 continue; 317 continue;
336 gdium_cpuintrs[level].cintr_count.ev_count++; 318 gdium_cpuintrs[level].cintr_count.ev_count++;
337 LIST_FOREACH (ih, &gdium_cpuintrs[level].cintr_list, ih_q) { 319 LIST_FOREACH (ih, &gdium_cpuintrs[level].cintr_list, ih_q) {
338 irqmap = &gdium_irqmap[ih->ih_irq]; 320 irqmap = &gdium_irqmap[ih->ih_irq];
339 if (isr & (1 << ih->ih_irq)) { 321 if (isr & (1 << ih->ih_irq)) {
340 gdium_intrtab[ih->ih_irq].intr_count.ev_count++; 322 gdium_intrtab[ih->ih_irq].intr_count.ev_count++;
341 (*ih->ih_func)(ih->ih_arg); 323 (*ih->ih_func)(ih->ih_arg);
342 } 324 }
343 } 325 }
344 cause &= ~(MIPS_INT_MASK_0 << level); 
345 } 326 }
346 
347 /* Re-enable anything that we have processed. */ 
348 _splset(MIPS_SR_INT_IE | ((status & ~cause) & MIPS_HARD_INT_MASK)); 
349} 327}
350 328
351/***************************************************************************** 329/*****************************************************************************
352 * PCI interrupt support 330 * PCI interrupt support
353 *****************************************************************************/ 331 *****************************************************************************/
354 332
355int 333int
356gdium_pci_intr_map(struct pci_attach_args *pa, 334gdium_pci_intr_map(const struct pci_attach_args *pa,
357 pci_intr_handle_t *ihp) 335 pci_intr_handle_t *ihp)
358{ 336{
359 static const int8_t pciirqmap[5/*device*/] = { 337 static const int8_t pciirqmap[5/*device*/] = {
360 GDIUM_IRQ_PCI_INTC, /* 13: PCI 802.11 */ 338 GDIUM_IRQ_PCI_INTC, /* 13: PCI 802.11 */
361 GDIUM_IRQ_PCI_INTA, /* 14: SM501 */ 339 GDIUM_IRQ_PCI_INTA, /* 14: SM501 */
362 GDIUM_IRQ_PCI_INTB, /* 15: NEC USB (2 func) */ 340 GDIUM_IRQ_PCI_INTB, /* 15: NEC USB (2 func) */
363 GDIUM_IRQ_PCI_INTD, /* 16: Ethernet */ 341 GDIUM_IRQ_PCI_INTD, /* 16: Ethernet */
364 GDIUM_IRQ_PCI_INTC, /* 17: NEC USB (2 func) */ 342 GDIUM_IRQ_PCI_INTC, /* 17: NEC USB (2 func) */
365 }; 343 };
366 pcitag_t bustag = pa->pa_intrtag; 344 pcitag_t bustag = pa->pa_intrtag;
367 int buspin = pa->pa_intrpin; 345 int buspin = pa->pa_intrpin;
368 pci_chipset_tag_t pc = pa->pa_pc; 346 pci_chipset_tag_t pc = pa->pa_pc;
369 int device; 347 int device;
370 348
371 if (buspin == 0) { 349 if (buspin == 0) {
372 /* No IRQ used. */ 350 /* No IRQ used. */
373 return (1); 351 return (1);
374 } 352 }
375 353
376 if (buspin > 4) { 354 if (buspin > 4) {
377 printf("gdium_pci_intr_map: bad interrupt pin %d\n", 355 printf("gdium_pci_intr_map: bad interrupt pin %d\n",
378 buspin); 356 buspin);
379 return (1); 357 return (1);
380 } 358 }
381 359
382 pci_decompose_tag(pc, bustag, NULL, &device, NULL); 360 pci_decompose_tag(pc, bustag, NULL, &device, NULL);
383 if (device < 13 || device > 17) { 361 if (device < 13 || device > 17) {
384 printf("gdium_pci_intr_map: bad device %d\n", 362 printf("gdium_pci_intr_map: bad device %d\n",
385 device); 363 device);
386 return (1); 364 return (1);
387 } 365 }
388 366
389 *ihp = pciirqmap[device - 13]; 367 *ihp = pciirqmap[device - 13];
390 return (0); 368 return (0);
391} 369}
392 370
393const char * 371const char *
394gdium_pci_intr_string(void *v, pci_intr_handle_t ih) 372gdium_pci_intr_string(void *v, pci_intr_handle_t ih)
395{ 373{
396 374
397 if (ih >= __arraycount(gdium_irqmap)) 375 if (ih >= __arraycount(gdium_irqmap))
398 panic("gdium_intr_string: bogus IRQ %ld", ih); 376 panic("gdium_intr_string: bogus IRQ %ld", ih);
399 377
400 return gdium_irqmap[ih].name; 378 return gdium_irqmap[ih].name;
401} 379}
402 380
403const struct evcnt * 381const struct evcnt *
404gdium_pci_intr_evcnt(void *v, pci_intr_handle_t ih) 382gdium_pci_intr_evcnt(void *v, pci_intr_handle_t ih)
405{ 383{
406 384
407 return &gdium_intrtab[ih].intr_count; 385 return &gdium_intrtab[ih].intr_count;
408} 386}
409 387
410void * 388void *
411gdium_pci_intr_establish(void *v, pci_intr_handle_t ih, int level, 389gdium_pci_intr_establish(void *v, pci_intr_handle_t ih, int level,
412 int (*func)(void *), void *arg) 390 int (*func)(void *), void *arg)
413{ 391{
414 392
415 if (ih >= __arraycount(gdium_irqmap)) 393 if (ih >= __arraycount(gdium_irqmap))
416 panic("gdium_pci_intr_establish: bogus IRQ %ld", ih); 394 panic("gdium_pci_intr_establish: bogus IRQ %ld", ih);
417 395
418 return evbmips_intr_establish(ih, func, arg); 396 return evbmips_intr_establish(ih, func, arg);
419} 397}
420 398
421void 399void
422gdium_pci_intr_disestablish(void *v, void *cookie) 400gdium_pci_intr_disestablish(void *v, void *cookie)
423{ 401{
424 402
425 return (evbmips_intr_disestablish(cookie)); 403 return (evbmips_intr_disestablish(cookie));
426} 404}
427 405
428void 406void
429gdium_pci_conf_interrupt(void *v, int bus, int dev, int pin, int swiz, 407gdium_pci_conf_interrupt(void *v, int bus, int dev, int pin, int swiz,
430 int *iline) 408 int *iline)
431{ 409{
432 410
433 /* 411 /*
434 * We actually don't need to do anything; everything is handled 412 * We actually don't need to do anything; everything is handled
435 * in pci_intr_map(). 413 * in pci_intr_map().
436 */ 414 */
437 *iline = 0; 415 *iline = 0;
438} 416}

cvs diff -r1.13 -r1.14 src/sys/arch/evbmips/gdium/machdep.c (switch to unified diff)

--- src/sys/arch/evbmips/gdium/machdep.c 2011/02/20 07:48:34 1.13
+++ src/sys/arch/evbmips/gdium/machdep.c 2011/06/08 17:47:48 1.14
@@ -1,484 +1,451 @@ @@ -1,484 +1,451 @@
1/* $NetBSD: machdep.c,v 1.13 2011/02/20 07:48:34 matt Exp $ */ 1/* $NetBSD: machdep.c,v 1.14 2011/06/08 17:47:48 bouyer Exp $ */
2 2
3/* 3/*
4 * Copyright 2001, 2002 Wasabi Systems, Inc. 4 * Copyright 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the 15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution. 16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software 17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement: 18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by 19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc. 20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior 22 * or promote products derived from this software without specific prior
23 * written permission. 23 * written permission.
24 * 24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE. 35 * POSSIBILITY OF SUCH DAMAGE.
36 */ 36 */
37 37
38/* 38/*
39 * Copyright (c) 1988 University of Utah. 39 * Copyright (c) 1988 University of Utah.
40 * Copyright (c) 1992, 1993 40 * Copyright (c) 1992, 1993
41 * The Regents of the University of California. All rights reserved. 41 * The Regents of the University of California. All rights reserved.
42 * 42 *
43 * This code is derived from software contributed to Berkeley by 43 * This code is derived from software contributed to Berkeley by
44 * the Systems Programming Group of the University of Utah Computer 44 * the Systems Programming Group of the University of Utah Computer
45 * Science Department, The Mach Operating System project at 45 * Science Department, The Mach Operating System project at
46 * Carnegie-Mellon University and Ralph Campbell. 46 * Carnegie-Mellon University and Ralph Campbell.
47 * 47 *
48 * Redistribution and use in source and binary forms, with or without 48 * Redistribution and use in source and binary forms, with or without
49 * modification, are permitted provided that the following conditions 49 * modification, are permitted provided that the following conditions
50 * are met: 50 * are met:
51 * 1. Redistributions of source code must retain the above copyright 51 * 1. Redistributions of source code must retain the above copyright
52 * notice, this list of conditions and the following disclaimer. 52 * notice, this list of conditions and the following disclaimer.
53 * 2. Redistributions in binary form must reproduce the above copyright 53 * 2. Redistributions in binary form must reproduce the above copyright
54 * notice, this list of conditions and the following disclaimer in the 54 * notice, this list of conditions and the following disclaimer in the
55 * documentation and/or other materials provided with the distribution. 55 * documentation and/or other materials provided with the distribution.
56 * 3. Neither the name of the University nor the names of its contributors 56 * 3. Neither the name of the University nor the names of its contributors
57 * may be used to endorse or promote products derived from this software 57 * may be used to endorse or promote products derived from this software
58 * without specific prior written permission. 58 * without specific prior written permission.
59 * 59 *
60 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 60 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
61 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
62 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
63 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 63 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
64 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
65 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
66 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
67 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
68 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
69 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
70 * SUCH DAMAGE. 70 * SUCH DAMAGE.
71 * 71 *
72 * @(#)machdep.c 8.3 (Berkeley) 1/12/94 72 * @(#)machdep.c 8.3 (Berkeley) 1/12/94
73 * from: Utah Hdr: machdep.c 1.63 91/04/24 73 * from: Utah Hdr: machdep.c 1.63 91/04/24
74 */ 74 */
75 75
76#include <sys/cdefs.h> 76#include <sys/cdefs.h>
77__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.13 2011/02/20 07:48:34 matt Exp $"); 77__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.14 2011/06/08 17:47:48 bouyer Exp $");
78 78
79#include "opt_ddb.h" 79#include "opt_ddb.h"
80#include "opt_execfmt.h" 80#include "opt_execfmt.h"
81#include "opt_modular.h" 81#include "opt_modular.h"
82 82
83#include <sys/param.h> 83#include <sys/param.h>
84#include <sys/systm.h> 84#include <sys/systm.h>
85#include <sys/kernel.h> 85#include <sys/kernel.h>
86#include <sys/buf.h> 86#include <sys/buf.h>
87#include <sys/reboot.h> 87#include <sys/reboot.h>
88#include <sys/mount.h> 88#include <sys/mount.h>
89#include <sys/kcore.h> 89#include <sys/kcore.h>
90#include <sys/boot_flag.h> 90#include <sys/boot_flag.h>
91#include <sys/termios.h> 91#include <sys/termios.h>
92#include <sys/ksyms.h> 92#include <sys/ksyms.h>
93#include <sys/device.h> 93#include <sys/device.h>
94 94
95#include <uvm/uvm_extern.h> 95#include <uvm/uvm_extern.h>
96 96
97#include <dev/cons.h> 97#include <dev/cons.h>
98 98
99#include "ksyms.h" 99#include "ksyms.h"
100 100
101#if NKSYMS || defined(DDB) || defined(MODULAR) 101#if NKSYMS || defined(DDB) || defined(MODULAR)
102#include <machine/db_machdep.h> 102#include <machine/db_machdep.h>
103#include <ddb/db_extern.h> 103#include <ddb/db_extern.h>
104#endif 104#endif
105 105
106#include <machine/cpu.h> 106#include <machine/cpu.h>
107#include <machine/psl.h> 107#include <machine/psl.h>
108 108
 109#include <mips/locore.h>
 110
109#include <mips/bonito/bonitoreg.h> 111#include <mips/bonito/bonitoreg.h>
110#include <evbmips/gdium/gdiumvar.h> 112#include <evbmips/gdium/gdiumvar.h>
111 113
112#include "com.h" 114#include "com.h"
113#if NCOM > 0 115#if NCOM > 0
114#include <dev/ic/comreg.h> 116#include <dev/ic/comreg.h>
115#include <dev/ic/comvar.h> 117#include <dev/ic/comvar.h>
116 118
117int comcnrate = 38400; /* XXX should be config option */ 119int comcnrate = 38400; /* XXX should be config option */
118#endif /* NCOM > 0 */ 120#endif /* NCOM > 0 */
119 121
120struct gdium_config gdium_configuration = { 122struct gdium_config gdium_configuration = {
121 .gc_bonito = { 123 .gc_bonito = {
122 .bc_adbase = 11, /* magic */ 124 .bc_adbase = 11, /* magic */
123 }, 125 },
124}; 126};
125 127
126/* For sysctl_hw. */ 128/* For sysctl_hw. */
127extern char cpu_model[]; 129extern char cpu_model[];
128 130
129/* Our exported CPU info; we can have only one. */  
130struct cpu_info cpu_info_store; 
131 
132/* Maps for VM objects. */ 131/* Maps for VM objects. */
133struct vm_map *phys_map = NULL; 132struct vm_map *phys_map = NULL;
134 133
135int netboot; /* Are we netbooting? */ 134int netboot; /* Are we netbooting? */
136 135
137phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX]; 136phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX];
138int mem_cluster_cnt; 137int mem_cluster_cnt;
139 138
140void configure(void); 139void configure(void);
141void mach_init(int, char **, char **, void *); 140void mach_init(int, char **, char **, void *);
142 141
143/* 142/*
144 * For some reason, PMON doesn't assign a real address to the Ralink's BAR. 143 * For some reason, PMON doesn't assign a real address to the Ralink's BAR.
145 * So we have to do it. 144 * So we have to do it.
146 */ 145 */
147static void 146static void
148gdium_pci_attach_hook(device_t parent, device_t self, 147gdium_pci_attach_hook(device_t parent, device_t self,
149 struct pcibus_attach_args *pba) 148 struct pcibus_attach_args *pba)
150{ 149{
151 const pcitag_t high_dev = pci_make_tag(pba->pba_pc, 0, 17, 1); 150 const pcitag_t high_dev = pci_make_tag(pba->pba_pc, 0, 17, 1);
152 const pcitag_t ralink_dev = pci_make_tag(pba->pba_pc, 0, 13, 0); 151 const pcitag_t ralink_dev = pci_make_tag(pba->pba_pc, 0, 13, 0);
153 bus_size_t high_size, ralink_size; 152 bus_size_t high_size, ralink_size;
154 pcireg_t v; 153 pcireg_t v;
155 154
156 /* 155 /*
157 * Get the highest PCI addr used from the last PCI dev. 156 * Get the highest PCI addr used from the last PCI dev.
158 */ 157 */
159 v = pci_conf_read(pba->pba_pc, high_dev, PCI_MAPREG_START); 158 v = pci_conf_read(pba->pba_pc, high_dev, PCI_MAPREG_START);
160 v &= PCI_MAPREG_MEM_ADDR_MASK; 159 v &= PCI_MAPREG_MEM_ADDR_MASK;
161 160
162 /* 161 /*
163 * Get the sizes of the map registers. 162 * Get the sizes of the map registers.
164 */ 163 */
165 pci_mapreg_info(pba->pba_pc, high_dev, PCI_MAPREG_START, 164 pci_mapreg_info(pba->pba_pc, high_dev, PCI_MAPREG_START,
166 PCI_MAPREG_MEM_TYPE_32BIT, NULL, &high_size, NULL); 165 PCI_MAPREG_MEM_TYPE_32BIT, NULL, &high_size, NULL);
167 pci_mapreg_info(pba->pba_pc, ralink_dev, PCI_MAPREG_START, 166 pci_mapreg_info(pba->pba_pc, ralink_dev, PCI_MAPREG_START,
168 PCI_MAPREG_MEM_TYPE_32BIT, NULL, &ralink_size, NULL); 167 PCI_MAPREG_MEM_TYPE_32BIT, NULL, &ralink_size, NULL);
169 168
170 /* 169 /*
171 * Position the ralink register space after the last device. 170 * Position the ralink register space after the last device.
172 */ 171 */
173 v = (v + high_size + ralink_size - 1) & ~(ralink_size - 1); 172 v = (v + high_size + ralink_size - 1) & ~(ralink_size - 1);
174 173
175 /* 174 /*
176 * Set the mapreg. 175 * Set the mapreg.
177 */ 176 */
178 pci_conf_write(pba->pba_pc, ralink_dev, PCI_MAPREG_START, v); 177 pci_conf_write(pba->pba_pc, ralink_dev, PCI_MAPREG_START, v);
179 178
180#if 0 179#if 0
181 /* 180 /*
182 * Why does linux do this? 181 * Why does linux do this?
183 */ 182 */
184 for (int dev = 15; dev <= 17; dev +=2) { 183 for (int dev = 15; dev <= 17; dev +=2) {
185 for (int func = 0; func <= 1; func++) { 184 for (int func = 0; func <= 1; func++) {
186 pcitag_t usb_dev = pci_make_tag(pba->pba_pc, 0, dev, func); 185 pcitag_t usb_dev = pci_make_tag(pba->pba_pc, 0, dev, func);
187 v = pci_conf_read(pba->pba_pc, usb_dev, 0xe0); 186 v = pci_conf_read(pba->pba_pc, usb_dev, 0xe0);
188 v |= 3; 187 v |= 3;
189 pci_conf_write(pba->pba_pc, usb_dev, 0xe0, v); 188 pci_conf_write(pba->pba_pc, usb_dev, 0xe0, v);
190 } 189 }
191 } 190 }
192#endif 191#endif
193} 192}
194 193
195/* 194/*
196 * Do all the stuff that locore normally does before calling main(). 195 * Do all the stuff that locore normally does before calling main().
197 */ 196 */
198void 197void
199mach_init(int argc, char **argv, char **envp, void *callvec) 198mach_init(int argc, char **argv, char **envp, void *callvec)
200{ 199{
201 struct gdium_config *gc = &gdium_configuration; 200 struct gdium_config *gc = &gdium_configuration;
202 void *kernend; 201 void *kernend;
203 u_long first, last; 
204 struct pcb *pcb0; 
205 vaddr_t v; 
206#ifdef NOTYET 202#ifdef NOTYET
207 char *cp; 203 char *cp;
208 int howto; 204 int howto;
209#endif 205#endif
210 int i; 206 int i;
211 psize_t memsize; 207 psize_t memsize;
212 208
213 extern char edata[], end[]; 209 extern char edata[], end[];
214 210
215 /* 211 /*
216 * Clear the BSS segment. 212 * Clear the BSS segment.
217 */ 213 */
218 kernend = (void *)mips_round_page(end); 214 kernend = (void *)mips_round_page(end);
219 memset(edata, 0, (char *)kernend - edata); 215 memset(edata, 0, (char *)kernend - edata);
220 216
221 /* 217 /*
222 * Set up the exception vectors and CPU-specific function 218 * Set up the exception vectors and CPU-specific function
223 * vectors early on. We need the wbflush() vector set up 219 * vectors early on. We need the wbflush() vector set up
224 * before comcnattach() is called (or at least before the 220 * before comcnattach() is called (or at least before the
225 * first printf() after that is called). 221 * first printf() after that is called).
226 * Also clears the I+D caches. 222 * Also clears the I+D caches.
227 */ 223 */
228 mips_vector_init(NULL, bool); 224 mips_vector_init(NULL, false);
229 225
230 /* set the VM page size */ 226 /* set the VM page size */
231 uvm_setpagesize(); 227 uvm_setpagesize();
232 228
233 memsize = 256*1024*1024; 229 memsize = 256*1024*1024;
234 physmem = btoc(memsize); 230 physmem = btoc(memsize);
235 231
236 bonito_pci_init(&gc->gc_pc, &gc->gc_bonito); 232 bonito_pci_init(&gc->gc_pc, &gc->gc_bonito);
237 /* 233 /*
238 * Override the null bonito_pci_attach_hook with our own to we can 234 * Override the null bonito_pci_attach_hook with our own to we can
239 * fix the ralink (device 13). 235 * fix the ralink (device 13).
240 */ 236 */
241 gc->gc_pc.pc_attach_hook = gdium_pci_attach_hook; 237 gc->gc_pc.pc_attach_hook = gdium_pci_attach_hook;
242 gdium_bus_io_init(&gc->gc_iot, gc); 238 gdium_bus_io_init(&gc->gc_iot, gc);
243 gdium_bus_mem_init(&gc->gc_memt, gc); 239 gdium_bus_mem_init(&gc->gc_memt, gc);
244 gdium_dma_init(gc); 240 gdium_dma_init(gc);
245 gdium_cnattach(gc); 241 gdium_cnattach(gc);
246 242
247 /* 243 /*
248 * Disable the 2nd PCI window since we don't need it. 244 * Disable the 2nd PCI window since we don't need it.
249 */ 245 */
250 mips3_sd((uint64_t *)MIPS_PHYS_TO_KSEG1(BONITO_REGBASE + 0x158), 0xe); 246 mips3_sd((uint64_t *)MIPS_PHYS_TO_KSEG1(BONITO_REGBASE + 0x158), 0xe);
251 pci_conf_write(&gc->gc_pc, pci_make_tag(&gc->gc_pc, 0, 0, 0), 18, 0); 247 pci_conf_write(&gc->gc_pc, pci_make_tag(&gc->gc_pc, 0, 0, 0), 18, 0);
252 248
253 /* 249 /*
254 * Get the timer from PMON. 250 * Get the timer from PMON.
255 */ 251 */
256 for (i = 0; envp[i] != NULL; i++) { 252 for (i = 0; envp[i] != NULL; i++) {
257 if (!strncmp(envp[i], "cpuclock=", 9)) { 253 if (!strncmp(envp[i], "cpuclock=", 9)) {
258 curcpu()->ci_cpu_freq = 254 curcpu()->ci_cpu_freq =
259 strtoul(&envp[i][9], NULL, 10); 255 strtoul(&envp[i][9], NULL, 10);
260 break; 256 break;
261 } 257 }
262 } 258 }
263  259
264 if (mips_cpu_flags & CPU_MIPS_DOUBLE_COUNT) 260 if (mips_options.mips_cpu_flags & CPU_MIPS_DOUBLE_COUNT)
265 curcpu()->ci_cpu_freq /= 2; 261 curcpu()->ci_cpu_freq /= 2;
266 262
267 /* Compute the number of ticks for hz. */ 263 /* Compute the number of ticks for hz. */
268 curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz; 264 curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz;
269 265
270 /* Compute the delay divisor. */ 266 /* Compute the delay divisor. */
271 curcpu()->ci_divisor_delay = 267 curcpu()->ci_divisor_delay =
272 ((curcpu()->ci_cpu_freq + 500000) / 1000000); 268 ((curcpu()->ci_cpu_freq + 500000) / 1000000);
273 269
274 /* 270 /*
275 * Get correct cpu frequency if the CPU runs at twice the 271 * Get correct cpu frequency if the CPU runs at twice the
276 * external/cp0-count frequency. 272 * external/cp0-count frequency.
277 */ 273 */
278 if (mips_cpu_flags & CPU_MIPS_DOUBLE_COUNT) 274 if (mips_options.mips_cpu_flags & CPU_MIPS_DOUBLE_COUNT)
279 curcpu()->ci_cpu_freq *= 2; 275 curcpu()->ci_cpu_freq *= 2;
280 276
281#ifdef DEBUG 277#ifdef DEBUG
282 printf("Timer calibration: %lu cycles/sec\n", 278 printf("Timer calibration: %lu cycles/sec\n",
283 curcpu()->ci_cpu_freq); 279 curcpu()->ci_cpu_freq);
284#endif 280#endif
285 281
286#if NCOM > 0 282#if NCOM > 0
287 /* 283 /*
288 * Delay to allow firmware putchars to complete. 284 * Delay to allow firmware putchars to complete.
289 * FIFO depth * character time. 285 * FIFO depth * character time.
290 * character time = (1000000 / (defaultrate / 10)) 286 * character time = (1000000 / (defaultrate / 10))
291 */ 287 */
292 delay(160000000 / comcnrate); 288 delay(160000000 / comcnrate);
293 if (comcnattach(&gc->gc_iot, MALTA_UART0ADR, comcnrate, 289 if (comcnattach(&gc->gc_iot, MALTA_UART0ADR, comcnrate,
294 COM_FREQ, COM_TYPE_NORMAL, 290 COM_FREQ, COM_TYPE_NORMAL,
295 (TTYDEF_CFLAG & ~(CSIZE | PARENB)) | CS8) != 0) 291 (TTYDEF_CFLAG & ~(CSIZE | PARENB)) | CS8) != 0)
296 panic("malta: unable to initialize serial console"); 292 panic("malta: unable to initialize serial console");
297#endif /* NCOM > 0 */ 293#endif /* NCOM > 0 */
298 294
299 mem_clusters[0].start = 0; 295 mem_clusters[0].start = 0;
300 mem_clusters[0].size = ctob(physmem); 296 mem_clusters[0].size = ctob(physmem);
301 mem_cluster_cnt = 1; 297 mem_cluster_cnt = 1;
302 298
303 strcpy(cpu_model, "Gdium Liberty 1000"); 299 strcpy(cpu_model, "Gdium Liberty 1000");
304 300
305 /* 301 /*
306 * XXX: check argv[0] - do something if "gdb"??? 302 * XXX: check argv[0] - do something if "gdb"???
307 */ 303 */
308 304
309 /* 305 /*
310 * Look at arguments passed to us and compute boothowto. 306 * Look at arguments passed to us and compute boothowto.
311 */ 307 */
312 boothowto = RB_AUTOBOOT; 308 boothowto = RB_AUTOBOOT;
313#ifdef NOTYET 309#ifdef NOTYET
314 for (i = 1; i < argc; i++) { 310 for (i = 1; i < argc; i++) {
315 for (cp = argv[i]; *cp; cp++) { 311 for (cp = argv[i]; *cp; cp++) {
316 /* Ignore superfluous '-', if there is one */ 312 /* Ignore superfluous '-', if there is one */
317 if (*cp == '-') 313 if (*cp == '-')
318 continue; 314 continue;
319 315
320 howto = 0; 316 howto = 0;
321 BOOT_FLAG(*cp, howto); 317 BOOT_FLAG(*cp, howto);
322 if (! howto) 318 if (! howto)
323 printf("bootflag '%c' not recognised\n", *cp); 319 printf("bootflag '%c' not recognised\n", *cp);
324 else 320 else
325 boothowto |= howto; 321 boothowto |= howto;
326 } 322 }
327 } 323 }
328#endif 324#endif
329 325
330 /* 326 /*
331 * Load the rest of the available pages into the VM system. 327 * Load the rest of the available pages into the VM system.
332 */ 328 */
333 first = round_page(MIPS_KSEG0_TO_PHYS(kernend)); 329 mips_page_physload(MIPS_KSEG0_START, (vaddr_t)kernend,
334 last = mem_clusters[0].start + mem_clusters[0].size; 330 mem_clusters, mem_cluster_cnt, NULL, 0);
335 uvm_page_physload(atop(first), atop(last), atop(first), atop(last), 
336 VM_FREELIST_DEFAULT); 
337 331
338 /* 332 /*
339 * Initialize error message buffer (at end of core). 333 * Initialize error message buffer (at end of core).
340 */ 334 */
341 mips_init_msgbuf(); 335 mips_init_msgbuf();
342 336
343 pmap_bootstrap(); 337 pmap_bootstrap();
344 338
345 /* 339 /*
346 * Allocate uarea page for lwp0 and set it. 340 * Allocate uarea page for lwp0 and set it.
347 */ 341 */
348 v = uvm_pageboot_alloc(USPACE);  342 mips_init_lwp0_uarea();
349 uvm_lwp_setuarea(&lwp0, v); 
350 
351 pcb0 = lwp_getpcb(&lwp0); 
352 pcb0->pcb_context[11] = MIPS_INT_MASK | MIPS_SR_INT_IE; /* SR */ 
353 
354 lwp0.l_md.md_regs = (struct frame *)(v + USPACE) - 1; 
355 343
356 /* 344 /*
357 * Initialize debuggers, and break into them, if appropriate. 345 * Initialize debuggers, and break into them, if appropriate.
358 */ 346 */
359#if defined(DDB) 347#if defined(DDB)
360 if (boothowto & RB_KDB) 348 if (boothowto & RB_KDB)
361 Debugger(); 349 Debugger();
362#endif 350#endif
363} 351}
364 352
365void 353void
366consinit(void) 354consinit(void)
367{ 355{
368 356
369 /* 357 /*
370 * Everything related to console initialization is done 358 * Everything related to console initialization is done
371 * in mach_init(). 359 * in mach_init().
372 */ 360 */
373} 361}
374 362
375/* 363/*
376 * Allocate memory for variable-sized tables, 364 * Allocate memory for variable-sized tables,
377 */ 365 */
378void 366void
379cpu_startup(void) 367cpu_startup(void)
380{ 368{
381 vaddr_t minaddr, maxaddr; 
382 char pbuf[9]; 
383 
384 /* 369 /*
385 * Good {morning,afternoon,evening,night}. 370 * Do the common startup items.
386 */ 371 */
387 printf("%s%s", copyright, version); 372 cpu_startup_common();
388 format_bytes(pbuf, sizeof(pbuf), ctob(physmem)); 
389 printf("total memory = %s\n", pbuf); 
390 373
391 /* 374 /*
392 * Virtual memory is bootstrapped -- notify the bus spaces 375 * Virtual memory is bootstrapped -- notify the bus spaces
393 * that memory allocation is now safe. 376 * that memory allocation is now safe.
394 */ 377 */
395 gdium_configuration.gc_mallocsafe = 1; 378 gdium_configuration.gc_mallocsafe = 1;
396 379
397 minaddr = 0; 
398 /* 
399 * Allocate a submap for physio. 
400 */ 
401 phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 
402 VM_PHYS_SIZE, 0, FALSE, NULL); 
403 
404 /* 
405 * (No need to allocate an mbuf cluster submap. Mbuf clusters 
406 * are allocated via the pool allocator, and we use KSEG to 
407 * map those pages.) 
408 */ 
409 
410 format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); 
411 printf("avail memory = %s\n", pbuf); 
412} 380}
413 381
414int waittime = -1; 382int waittime = -1;
415 383
416void 384void
417cpu_reboot(int howto, char *bootstr) 385cpu_reboot(int howto, char *bootstr)
418{ 386{
419 387
420 /* Take a snapshot before clobbering any registers. */ 388 /* Take a snapshot before clobbering any registers. */
421 if (curproc) 389 savectx(curpcb);
422 savectx(curpcb); 
423 390
424 if (cold) { 391 if (cold) {
425 howto |= RB_HALT; 392 howto |= RB_HALT;
426 goto haltsys; 393 goto haltsys;
427 } 394 }
428 395
429 /* If "always halt" was specified as a boot flag, obey. */ 396 /* If "always halt" was specified as a boot flag, obey. */
430 if (boothowto & RB_HALT) 397 if (boothowto & RB_HALT)
431 howto |= RB_HALT; 398 howto |= RB_HALT;
432 399
433 boothowto = howto; 400 boothowto = howto;
434 if ((howto & RB_NOSYNC) == 0 && (waittime < 0)) { 401 if ((howto & RB_NOSYNC) == 0 && (waittime < 0)) {
435 waittime = 0; 402 waittime = 0;
436 vfs_shutdown(); 403 vfs_shutdown();
437 404
438 /* 405 /*
439 * If we've been adjusting the clock, the todr 406 * If we've been adjusting the clock, the todr
440 * will be out of synch; adjust it now. 407 * will be out of synch; adjust it now.
441 */ 408 */
442 resettodr(); 409 resettodr();
443 } 410 }
444 411
445 splhigh(); 412 splhigh();
446 413
447 if (howto & RB_DUMP) 414 if (howto & RB_DUMP)
448 dumpsys(); 415 dumpsys();
449 416
450haltsys: 417haltsys:
451 doshutdownhooks(); 418 doshutdownhooks();
452 419
453 pmf_system_shutdown(boothowto); 420 pmf_system_shutdown(boothowto);
454 421
455 if ((howto & RB_POWERDOWN) == RB_POWERDOWN) { 422 if ((howto & RB_POWERDOWN) == RB_POWERDOWN) {
456 /* 423 /*
457 * Turning on GPIO1 as output will cause a powerdown. 424 * Turning on GPIO1 as output will cause a powerdown.
458 */ 425 */
459 REGVAL(BONITO_GPIODATA) |= 2; 426 REGVAL(BONITO_GPIODATA) |= 2;
460 REGVAL(BONITO_GPIOIE) &= ~2; 427 REGVAL(BONITO_GPIOIE) &= ~2;
461 } 428 }
462 429
463 if (howto & RB_HALT) { 430 if (howto & RB_HALT) {
464 printf("\n"); 431 printf("\n");
465 printf("The operating system has halted.\n"); 432 printf("The operating system has halted.\n");
466 printf("Please press any key to reboot.\n\n"); 433 printf("Please press any key to reboot.\n\n");
467 cnpollc(1); /* For proper keyboard command handling */ 434 cnpollc(1); /* For proper keyboard command handling */
468 cngetc(); 435 cngetc();
469 cnpollc(0); 436 cnpollc(0);
470 } 437 }
471 438
472 printf("%s\n\n", ((howto & RB_HALT) != 0) ? "halted." : "rebooting..."); 439 printf("%s\n\n", ((howto & RB_HALT) != 0) ? "halted." : "rebooting...");
473 440
474 /* 441 /*
475 * Turning off GPIO2 as output will cause a reset. 442 * Turning off GPIO2 as output will cause a reset.
476 */ 443 */
477 REGVAL(BONITO_GPIODATA) &= ~4; 444 REGVAL(BONITO_GPIODATA) &= ~4;
478 REGVAL(BONITO_GPIOIE) &= ~4; 445 REGVAL(BONITO_GPIOIE) &= ~4;
479 446
480 __asm__ __volatile__ ( 447 __asm__ __volatile__ (
481 "\t.long 0x3c02bfc0\n" 448 "\t.long 0x3c02bfc0\n"
482 "\t.long 0x00400008\n" 449 "\t.long 0x00400008\n"
483 ::: "v0"); 450 ::: "v0");
484} 451}

cvs diff -r1.46 -r1.47 src/sys/arch/mips/mips/cache.c (switch to unified diff)

--- src/sys/arch/mips/mips/cache.c 2011/03/15 07:39:22 1.46
+++ src/sys/arch/mips/mips/cache.c 2011/06/08 17:47:48 1.47
@@ -1,1225 +1,1225 @@ @@ -1,1225 +1,1225 @@
1/* $NetBSD: cache.c,v 1.46 2011/03/15 07:39:22 matt Exp $ */ 1/* $NetBSD: cache.c,v 1.47 2011/06/08 17:47:48 bouyer Exp $ */
2 2
3/* 3/*
4 * Copyright 2001, 2002 Wasabi Systems, Inc. 4 * Copyright 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc. 7 * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 1. Redistributions of source code must retain the above copyright 12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer. 13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright 14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the 15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution. 16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software 17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement: 18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by 19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc. 20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior 22 * or promote products derived from this software without specific prior
23 * written permission. 23 * written permission.
24 * 24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE. 35 * POSSIBILITY OF SUCH DAMAGE.
36 */ 36 */
37 37
38/* 38/*
39 * Copyright 2000, 2001 39 * Copyright 2000, 2001
40 * Broadcom Corporation. All rights reserved. 40 * Broadcom Corporation. All rights reserved.
41 * 41 *
42 * This software is furnished under license and may be used and copied only 42 * This software is furnished under license and may be used and copied only
43 * in accordance with the following terms and conditions. Subject to these 43 * in accordance with the following terms and conditions. Subject to these
44 * conditions, you may download, copy, install, use, modify and distribute 44 * conditions, you may download, copy, install, use, modify and distribute
45 * modified or unmodified copies of this software in source and/or binary 45 * modified or unmodified copies of this software in source and/or binary
46 * form. No title or ownership is transferred hereby. 46 * form. No title or ownership is transferred hereby.
47 * 47 *
48 * 1) Any source code used, modified or distributed must reproduce and 48 * 1) Any source code used, modified or distributed must reproduce and
49 * retain this copyright notice and list of conditions as they appear in 49 * retain this copyright notice and list of conditions as they appear in
50 * the source file. 50 * the source file.
51 * 51 *
52 * 2) No right is granted to use any trade name, trademark, or logo of 52 * 2) No right is granted to use any trade name, trademark, or logo of
53 * Broadcom Corporation. The "Broadcom Corporation" name may not be 53 * Broadcom Corporation. The "Broadcom Corporation" name may not be
54 * used to endorse or promote products derived from this software 54 * used to endorse or promote products derived from this software
55 * without the prior written permission of Broadcom Corporation. 55 * without the prior written permission of Broadcom Corporation.
56 * 56 *
57 * 3) THIS SOFTWARE IS PROVIDED "AS-IS" AND ANY EXPRESS OR IMPLIED 57 * 3) THIS SOFTWARE IS PROVIDED "AS-IS" AND ANY EXPRESS OR IMPLIED
58 * WARRANTIES, INCLUDING BUT NOT LIMITED TO, ANY IMPLIED WARRANTIES OF 58 * WARRANTIES, INCLUDING BUT NOT LIMITED TO, ANY IMPLIED WARRANTIES OF
59 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR 59 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
60 * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM BE LIABLE 60 * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM BE LIABLE
61 * FOR ANY DAMAGES WHATSOEVER, AND IN PARTICULAR, BROADCOM SHALL NOT BE 61 * FOR ANY DAMAGES WHATSOEVER, AND IN PARTICULAR, BROADCOM SHALL NOT BE
62 * LIABLE FOR DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 62 * LIABLE FOR DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
63 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 63 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
64 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 64 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
65 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 65 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
66 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 66 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
67 * OR OTHERWISE), EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 67 * OR OTHERWISE), EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
68 */ 68 */
69 69
70#include <sys/cdefs.h> 70#include <sys/cdefs.h>
71__KERNEL_RCSID(0, "$NetBSD: cache.c,v 1.46 2011/03/15 07:39:22 matt Exp $"); 71__KERNEL_RCSID(0, "$NetBSD: cache.c,v 1.47 2011/06/08 17:47:48 bouyer Exp $");
72 72
73#include "opt_cputype.h" 73#include "opt_cputype.h"
74#include "opt_mips_cache.h" 74#include "opt_mips_cache.h"
75 75
76#include <sys/param.h> 76#include <sys/param.h>
77 77
78#include <uvm/uvm_extern.h> 78#include <uvm/uvm_extern.h>
79 79
80#include <mips/cache.h> 80#include <mips/cache.h>
81#include <mips/locore.h> 81#include <mips/locore.h>
82 82
83#ifdef MIPS1 83#ifdef MIPS1
84#include <mips/cache_r3k.h> 84#include <mips/cache_r3k.h>
85#endif 85#endif
86 86
87#ifdef MIPS3_PLUS 87#ifdef MIPS3_PLUS
88#include <mips/cache_r4k.h> 88#include <mips/cache_r4k.h>
89#include <mips/cache_r5k.h> 89#include <mips/cache_r5k.h>
90#ifdef ENABLE_MIPS4_CACHE_R10K 90#ifdef ENABLE_MIPS4_CACHE_R10K
91#include <mips/cache_r10k.h> 91#include <mips/cache_r10k.h>
92#endif 92#endif
93#ifdef MIPS3_LOONGSON2 93#ifdef MIPS3_LOONGSON2
94#include <mips/cache_ls2.h> 94#include <mips/cache_ls2.h>
95#endif 95#endif
96#endif 96#endif
97 97
98#if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0 98#if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
99#include <mips/mipsNN.h> /* MIPS32/MIPS64 registers */ 99#include <mips/mipsNN.h> /* MIPS32/MIPS64 registers */
100#include <mips/cache_mipsNN.h> 100#include <mips/cache_mipsNN.h>
101#endif 101#endif
102 102
103struct mips_cache_info mips_cache_info; 103struct mips_cache_info mips_cache_info;
104struct mips_cache_ops mips_cache_ops; 104struct mips_cache_ops mips_cache_ops;
105 105
106#ifdef MIPS1 106#ifdef MIPS1
107#ifdef ENABLE_MIPS_TX3900 107#ifdef ENABLE_MIPS_TX3900
108#include <mips/cache_tx39.h> 108#include <mips/cache_tx39.h>
109void tx3900_get_cache_config(void); 109void tx3900_get_cache_config(void);
110void tx3920_get_cache_config(void); 110void tx3920_get_cache_config(void);
111void tx39_cache_config_write_through(void); 111void tx39_cache_config_write_through(void);
112#endif /* ENABLE_MIPS_TX3900 */ 112#endif /* ENABLE_MIPS_TX3900 */
113#endif /* MIPS1 */ 113#endif /* MIPS1 */
114 114
115#if defined(MIPS3) || defined(MIPS4) 115#if defined(MIPS3) || defined(MIPS4)
116void mips3_get_cache_config(int); 116void mips3_get_cache_config(int);
117#ifdef ENABLE_MIPS4_CACHE_R10K 117#ifdef ENABLE_MIPS4_CACHE_R10K
118void mips4_get_cache_config(int); 118void mips4_get_cache_config(int);
119#endif /* ENABLE_MIPS4_CACHE_R10K */ 119#endif /* ENABLE_MIPS4_CACHE_R10K */
120#endif /* MIPS3 || MIPS4 */ 120#endif /* MIPS3 || MIPS4 */
121 121
122#if defined(MIPS1) || defined(MIPS3) || defined(MIPS4) 122#if defined(MIPS1) || defined(MIPS3) || defined(MIPS4)
123static void mips_config_cache_prehistoric(void); 123static void mips_config_cache_prehistoric(void);
124static void mips_config_cache_emips(void); 124static void mips_config_cache_emips(void);
125#endif 125#endif
126#if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0 126#if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
127static void mips_config_cache_modern(void); 127static void mips_config_cache_modern(void);
128#endif 128#endif
129 129
130#if defined(MIPS1) || defined(MIPS3) || defined(MIPS4) 130#if defined(MIPS1) || defined(MIPS3) || defined(MIPS4)
131/* no-cache definition */ 131/* no-cache definition */
132static void no_cache_op(void); 132static void no_cache_op(void);
133static void no_cache_op_range(vaddr_t va, vsize_t size); 133static void no_cache_op_range(vaddr_t va, vsize_t size);
134 134
135/* no-cache implementation */ 135/* no-cache implementation */
136static void no_cache_op(void) {} 136static void no_cache_op(void) {}
137static void no_cache_op_range(vaddr_t va, vsize_t size) {} 137static void no_cache_op_range(vaddr_t va, vsize_t size) {}
138#endif 138#endif
139 139
140/* 140/*
141 * mips_dcache_compute_align: 141 * mips_dcache_compute_align:
142 * 142 *
143 * Compute the D-cache alignment values. 143 * Compute the D-cache alignment values.
144 */ 144 */
145void 145void
146mips_dcache_compute_align(void) 146mips_dcache_compute_align(void)
147{ 147{
148 struct mips_cache_info * const mci = &mips_cache_info; 148 struct mips_cache_info * const mci = &mips_cache_info;
149 u_int align; 149 u_int align;
150 150
151 align = mci->mci_pdcache_line_size; 151 align = mci->mci_pdcache_line_size;
152 152
153 if (mci->mci_sdcache_line_size > align) 153 if (mci->mci_sdcache_line_size > align)
154 align = mci->mci_sdcache_line_size; 154 align = mci->mci_sdcache_line_size;
155 155
156 if (mci->mci_tcache_line_size > align) 156 if (mci->mci_tcache_line_size > align)
157 align = mci->mci_tcache_line_size; 157 align = mci->mci_tcache_line_size;
158 158
159 mci->mci_dcache_align = align; 159 mci->mci_dcache_align = align;
160 mci->mci_dcache_align_mask = align - 1; 160 mci->mci_dcache_align_mask = align - 1;
161} 161}
162 162
163/* 163/*
164 * mips_config_cache: 164 * mips_config_cache:
165 * 165 *
166 * Configure the cache for the system. 166 * Configure the cache for the system.
167 * 167 *
168 * XXX DOES NOT HANDLE SPLIT SECONDARY CACHES. 168 * XXX DOES NOT HANDLE SPLIT SECONDARY CACHES.
169 */ 169 */
170void 170void
171mips_config_cache(void) 171mips_config_cache(void)
172{ 172{
173#ifdef DIAGNOSTIC 173#ifdef DIAGNOSTIC
174 struct mips_cache_info * const mci = &mips_cache_info; 174 struct mips_cache_info * const mci = &mips_cache_info;
175 struct mips_cache_ops * const mco = &mips_cache_ops; 175 struct mips_cache_ops * const mco = &mips_cache_ops;
176#endif 176#endif
177 const mips_prid_t cpu_id = mips_options.mips_cpu_id; 177 const mips_prid_t cpu_id = mips_options.mips_cpu_id;
178  178
179#if defined(MIPS1) || defined(MIPS3) || defined(MIPS4) 179#if defined(MIPS1) || defined(MIPS3) || defined(MIPS4)
180 if (MIPS_PRID_CID(cpu_id) == MIPS_PRID_CID_PREHISTORIC) 180 if (MIPS_PRID_CID(cpu_id) == MIPS_PRID_CID_PREHISTORIC)
181 mips_config_cache_prehistoric(); 181 mips_config_cache_prehistoric();
182 else if (MIPS_PRID_CID(cpu_id) == MIPS_PRID_CID_MICROSOFT) 182 else if (MIPS_PRID_CID(cpu_id) == MIPS_PRID_CID_MICROSOFT)
183 mips_config_cache_emips(); 183 mips_config_cache_emips();
184#endif 184#endif
185#if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0 185#if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
186 if (MIPS_PRID_CID(cpu_id) != MIPS_PRID_CID_PREHISTORIC) 186 if (MIPS_PRID_CID(cpu_id) != MIPS_PRID_CID_PREHISTORIC)
187 mips_config_cache_modern(); 187 mips_config_cache_modern();
188#endif 188#endif
189 189
190#ifdef DIAGNOSTIC 190#ifdef DIAGNOSTIC
191 /* Check that all cache ops are set up. */ 191 /* Check that all cache ops are set up. */
192 if (mci->mci_picache_size || 1) { /* XXX- must have primary Icache */ 192 if (mci->mci_picache_size || 1) { /* XXX- must have primary Icache */
193 if (!mco->mco_icache_sync_all) 193 if (!mco->mco_icache_sync_all)
194 panic("no icache_sync_all cache op"); 194 panic("no icache_sync_all cache op");
195 if (!mco->mco_icache_sync_range) 195 if (!mco->mco_icache_sync_range)
196 panic("no icache_sync_range cache op"); 196 panic("no icache_sync_range cache op");
197 if (!mco->mco_icache_sync_range_index) 197 if (!mco->mco_icache_sync_range_index)
198 panic("no icache_sync_range_index cache op"); 198 panic("no icache_sync_range_index cache op");
199 } 199 }
200 if (mci->mci_pdcache_size || 1) { /* XXX- must have primary Dcache */ 200 if (mci->mci_pdcache_size || 1) { /* XXX- must have primary Dcache */
201 if (!mco->mco_pdcache_wbinv_all) 201 if (!mco->mco_pdcache_wbinv_all)
202 panic("no pdcache_wbinv_all"); 202 panic("no pdcache_wbinv_all");
203 if (!mco->mco_pdcache_wbinv_range) 203 if (!mco->mco_pdcache_wbinv_range)
204 panic("no pdcache_wbinv_range"); 204 panic("no pdcache_wbinv_range");
205 if (!mco->mco_pdcache_wbinv_range_index) 205 if (!mco->mco_pdcache_wbinv_range_index)
206 panic("no pdcache_wbinv_range_index"); 206 panic("no pdcache_wbinv_range_index");
207 if (!mco->mco_pdcache_inv_range) 207 if (!mco->mco_pdcache_inv_range)
208 panic("no pdcache_inv_range"); 208 panic("no pdcache_inv_range");
209 if (!mco->mco_pdcache_wb_range) 209 if (!mco->mco_pdcache_wb_range)
210 panic("no pdcache_wb_range"); 210 panic("no pdcache_wb_range");
211 } 211 }
212 if (mci->mci_sdcache_size) { 212 if (mci->mci_sdcache_size) {
213 if (!mco->mco_sdcache_wbinv_all) 213 if (!mco->mco_sdcache_wbinv_all)
214 panic("no sdcache_wbinv_all"); 214 panic("no sdcache_wbinv_all");
215 if (!mco->mco_sdcache_wbinv_range) 215 if (!mco->mco_sdcache_wbinv_range)
216 panic("no sdcache_wbinv_range"); 216 panic("no sdcache_wbinv_range");
217 if (!mco->mco_sdcache_wbinv_range_index) 217 if (!mco->mco_sdcache_wbinv_range_index)
218 panic("no sdcache_wbinv_range_index"); 218 panic("no sdcache_wbinv_range_index");
219 if (!mco->mco_sdcache_inv_range) 219 if (!mco->mco_sdcache_inv_range)
220 panic("no sdcache_inv_range"); 220 panic("no sdcache_inv_range");
221 if (!mco->mco_sdcache_wb_range) 221 if (!mco->mco_sdcache_wb_range)
222 panic("no sdcache_wb_range"); 222 panic("no sdcache_wb_range");
223 } 223 }
224#endif /* DIAGNOSTIC */ 224#endif /* DIAGNOSTIC */
225} 225}
226 226
227#if defined(MIPS1) || defined(MIPS3) || defined(MIPS4) 227#if defined(MIPS1) || defined(MIPS3) || defined(MIPS4)
228/* 228/*
229 * XXX DOES NOT HANDLE SPLIT SECONDARY CACHES. 229 * XXX DOES NOT HANDLE SPLIT SECONDARY CACHES.
230 */ 230 */
231void 231void
232mips_config_cache_prehistoric(void) 232mips_config_cache_prehistoric(void)
233{ 233{
234 struct mips_cache_info * const mci = &mips_cache_info; 234 struct mips_cache_info * const mci = &mips_cache_info;
235 struct mips_cache_ops * const mco = &mips_cache_ops; 235 struct mips_cache_ops * const mco = &mips_cache_ops;
236 const mips_prid_t cpu_id = mips_options.mips_cpu_id; 236 const mips_prid_t cpu_id = mips_options.mips_cpu_id;
237#if defined(MIPS3) || defined(MIPS4) 237#if defined(MIPS3) || defined(MIPS4)
238 int csizebase = MIPS3_CONFIG_C_DEFBASE; 238 int csizebase = MIPS3_CONFIG_C_DEFBASE;
239#endif 239#endif
240 240
241 KASSERT(PAGE_SIZE != 0); 241 KASSERT(PAGE_SIZE != 0);
242 242
243 /* 243 /*
244 * Configure primary caches. 244 * Configure primary caches.
245 */ 245 */
246 switch (MIPS_PRID_IMPL(cpu_id)) { 246 switch (MIPS_PRID_IMPL(cpu_id)) {
247#ifdef MIPS1 247#ifdef MIPS1
248 case MIPS_R2000: 248 case MIPS_R2000:
249 case MIPS_R3000: 249 case MIPS_R3000:
250 mci->mci_picache_size = r3k_picache_size(); 250 mci->mci_picache_size = r3k_picache_size();
251 mci->mci_pdcache_size = r3k_pdcache_size(); 251 mci->mci_pdcache_size = r3k_pdcache_size();
252 252
253 mci->mci_picache_line_size = 4; 253 mci->mci_picache_line_size = 4;
254 mci->mci_pdcache_line_size = 4; 254 mci->mci_pdcache_line_size = 4;
255 255
256 mci->mci_picache_ways = 1; 256 mci->mci_picache_ways = 1;
257 mci->mci_pdcache_ways = 1; 257 mci->mci_pdcache_ways = 1;
258 258
259 mci->mci_pdcache_write_through = true; 259 mci->mci_pdcache_write_through = true;
260 260
261 mco->mco_icache_sync_all = 261 mco->mco_icache_sync_all =
262 r3k_icache_sync_all; 262 r3k_icache_sync_all;
263 mco->mco_icache_sync_range = 263 mco->mco_icache_sync_range =
264 r3k_icache_sync_range; 264 r3k_icache_sync_range;
265 mco->mco_icache_sync_range_index = 265 mco->mco_icache_sync_range_index =
266 mco->mco_icache_sync_range; 266 mco->mco_icache_sync_range;
267 267
268 mco->mco_pdcache_wbinv_all = 268 mco->mco_pdcache_wbinv_all =
269 r3k_pdcache_wbinv_all; 269 r3k_pdcache_wbinv_all;
270 mco->mco_pdcache_wbinv_range = 270 mco->mco_pdcache_wbinv_range =
271 r3k_pdcache_inv_range; 271 r3k_pdcache_inv_range;
272 mco->mco_pdcache_wbinv_range_index = 272 mco->mco_pdcache_wbinv_range_index =
273 mco->mco_pdcache_wbinv_range; 273 mco->mco_pdcache_wbinv_range;
274 mco->mco_pdcache_inv_range = 274 mco->mco_pdcache_inv_range =
275 r3k_pdcache_inv_range; 275 r3k_pdcache_inv_range;
276 mco->mco_pdcache_wb_range = 276 mco->mco_pdcache_wb_range =
277 r3k_pdcache_wb_range; 277 r3k_pdcache_wb_range;
278 278
279 uvmexp.ncolors = atop(mci->mci_pdcache_size); 279 uvmexp.ncolors = atop(mci->mci_pdcache_size);
280 break; 280 break;
281 281
282#ifdef ENABLE_MIPS_TX3900 282#ifdef ENABLE_MIPS_TX3900
283 case MIPS_TX3900: 283 case MIPS_TX3900:
284 switch (MIPS_PRID_REV_MAJ(cpu_id)) { 284 switch (MIPS_PRID_REV_MAJ(cpu_id)) {
285 case 1: /* TX3912 */ 285 case 1: /* TX3912 */
286 mci->mci_picache_ways = 1; 286 mci->mci_picache_ways = 1;
287 mci->mci_picache_line_size = 16; 287 mci->mci_picache_line_size = 16;
288 mci->mci_pdcache_line_size = 4; 288 mci->mci_pdcache_line_size = 4;
289 289
290 tx3900_get_cache_config(); 290 tx3900_get_cache_config();
291 291
292 mci->mci_pdcache_write_through = true; 292 mci->mci_pdcache_write_through = true;
293 293
294 mco->mco_icache_sync_all = 294 mco->mco_icache_sync_all =
295 tx3900_icache_sync_all_16; 295 tx3900_icache_sync_all_16;
296 mco->mco_icache_sync_range = 296 mco->mco_icache_sync_range =
297 tx3900_icache_sync_range_16; 297 tx3900_icache_sync_range_16;
298 mco->mco_icache_sync_range_index = 298 mco->mco_icache_sync_range_index =
299 tx3900_icache_sync_range_16; 299 tx3900_icache_sync_range_16;
300 300
301 mco->mco_pdcache_wbinv_all = 301 mco->mco_pdcache_wbinv_all =
302 tx3900_pdcache_wbinv_all_4; 302 tx3900_pdcache_wbinv_all_4;
303 mco->mco_pdcache_wbinv_range = 303 mco->mco_pdcache_wbinv_range =
304 tx3900_pdcache_inv_range_4; 304 tx3900_pdcache_inv_range_4;
305 mco->mco_pdcache_wbinv_range_index = 305 mco->mco_pdcache_wbinv_range_index =
306 tx3900_pdcache_inv_range_4; 306 tx3900_pdcache_inv_range_4;
307 mco->mco_pdcache_inv_range = 307 mco->mco_pdcache_inv_range =
308 tx3900_pdcache_inv_range_4; 308 tx3900_pdcache_inv_range_4;
309 mco->mco_pdcache_wb_range = 309 mco->mco_pdcache_wb_range =
310 tx3900_pdcache_wb_range_4; 310 tx3900_pdcache_wb_range_4;
311 break; 311 break;
312 312
313 case 3: /* TX3922 */ 313 case 3: /* TX3922 */
314 mci->mci_picache_ways = 2; 314 mci->mci_picache_ways = 2;
315 mci->mci_picache_line_size = 16; 315 mci->mci_picache_line_size = 16;
316 mci->mci_pdcache_line_size = 16; 316 mci->mci_pdcache_line_size = 16;
317 317
318 tx3920_get_cache_config(); 318 tx3920_get_cache_config();
319 319
320 mco->mco_icache_sync_all = 320 mco->mco_icache_sync_all =
321 mci->mci_pdcache_write_through ? 321 mci->mci_pdcache_write_through ?
322 tx3900_icache_sync_all_16 : 322 tx3900_icache_sync_all_16 :
323 tx3920_icache_sync_all_16wb; 323 tx3920_icache_sync_all_16wb;
324 mco->mco_icache_sync_range = 324 mco->mco_icache_sync_range =
325 mci->mci_pdcache_write_through ? 325 mci->mci_pdcache_write_through ?
326 tx3920_icache_sync_range_16wt : 326 tx3920_icache_sync_range_16wt :
327 tx3920_icache_sync_range_16wb; 327 tx3920_icache_sync_range_16wb;
328 mco->mco_icache_sync_range_index = 328 mco->mco_icache_sync_range_index =
329 mco->mco_icache_sync_range; 329 mco->mco_icache_sync_range;
330 330
331 mco->mco_pdcache_wbinv_all = 331 mco->mco_pdcache_wbinv_all =
332 mci->mci_pdcache_write_through ? 332 mci->mci_pdcache_write_through ?
333 tx3920_pdcache_wbinv_all_16wt : 333 tx3920_pdcache_wbinv_all_16wt :
334 tx3920_pdcache_wbinv_all_16wb; 334 tx3920_pdcache_wbinv_all_16wb;
335 mco->mco_pdcache_wbinv_range = 335 mco->mco_pdcache_wbinv_range =
336 mci->mci_pdcache_write_through ? 336 mci->mci_pdcache_write_through ?
337 tx3920_pdcache_inv_range_16 : 337 tx3920_pdcache_inv_range_16 :
338 tx3920_pdcache_wbinv_range_16wb; 338 tx3920_pdcache_wbinv_range_16wb;
339 mco->mco_pdcache_wbinv_range_index = 339 mco->mco_pdcache_wbinv_range_index =
340 mco->mco_pdcache_wbinv_range; 340 mco->mco_pdcache_wbinv_range;
341 mco->mco_pdcache_inv_range = 341 mco->mco_pdcache_inv_range =
342 tx3920_pdcache_inv_range_16; 342 tx3920_pdcache_inv_range_16;
343 mco->mco_pdcache_wb_range = 343 mco->mco_pdcache_wb_range =
344 mci->mci_pdcache_write_through ? 344 mci->mci_pdcache_write_through ?
345 tx3920_pdcache_wb_range_16wt : 345 tx3920_pdcache_wb_range_16wt :
346 tx3920_pdcache_wb_range_16wb; 346 tx3920_pdcache_wb_range_16wb;
347 break; 347 break;
348 348
349 default: 349 default:
350 panic("mips_config_cache: unsupported TX3900"); 350 panic("mips_config_cache: unsupported TX3900");
351 } 351 }
352 352
353 mci->mci_pdcache_ways = 2; 353 mci->mci_pdcache_ways = 2;
354 tx3900_get_cache_config(); 354 tx3900_get_cache_config();
355 /* change to write-through mode */ 355 /* change to write-through mode */
356 tx39_cache_config_write_through(); 356 tx39_cache_config_write_through();
357 357
358 uvmexp.ncolors = atop(mci->mci_pdcache_size) / mci->mci_pdcache_ways; 358 uvmexp.ncolors = atop(mci->mci_pdcache_size) / mci->mci_pdcache_ways;
359 break; 359 break;
360#endif /* ENABLE_MIPS_TX3900 */ 360#endif /* ENABLE_MIPS_TX3900 */
361#endif /* MIPS1 */ 361#endif /* MIPS1 */
362 362
363#if defined(MIPS3) || defined(MIPS4) 363#if defined(MIPS3) || defined(MIPS4)
364 case MIPS_R4100: 364 case MIPS_R4100:
365 if ((mips3_cp0_config_read() & MIPS3_CONFIG_CS) != 0) 365 if ((mips3_cp0_config_read() & MIPS3_CONFIG_CS) != 0)
366 csizebase = MIPS3_CONFIG_C_4100BASE; 366 csizebase = MIPS3_CONFIG_C_4100BASE;
367 367
368 /* 368 /*
369 * R4100 (NEC VR series) revision number means: 369 * R4100 (NEC VR series) revision number means:
370 * 370 *
371 * MIPS_PRID_REV_MAJ MIPS_PRID_REV_MIN 371 * MIPS_PRID_REV_MAJ MIPS_PRID_REV_MIN
372 * VR4102 4 ? 372 * VR4102 4 ?
373 * VR4111 5 ? 373 * VR4111 5 ?
374 * VR4181 5 ? 374 * VR4181 5 ?
375 * VR4121 6 ? 375 * VR4121 6 ?
376 * VR4122 7 0 or 1 376 * VR4122 7 0 or 1
377 * VR4181A 7 3 < 377 * VR4181A 7 3 <
378 * VR4131 8 ? 378 * VR4131 8 ?
379 */ 379 */
380 /* Vr4131 has R4600 style 2-way set-associative cache */ 380 /* Vr4131 has R4600 style 2-way set-associative cache */
381 if (MIPS_PRID_REV_MAJ(cpu_id) == 8) 381 if (MIPS_PRID_REV_MAJ(cpu_id) == 8)
382 goto primary_cache_is_2way; 382 goto primary_cache_is_2way;
383 /* FALLTHROUGH */ 383 /* FALLTHROUGH */
384 384
385 case MIPS_R4000: 385 case MIPS_R4000:
386 case MIPS_R4300: 386 case MIPS_R4300:
387 mci->mci_picache_ways = 1; 387 mci->mci_picache_ways = 1;
388 mci->mci_pdcache_ways = 1; 388 mci->mci_pdcache_ways = 1;
389 mci->mci_sdcache_ways = 1; 389 mci->mci_sdcache_ways = 1;
390 390
391 mips3_get_cache_config(csizebase); 391 mips3_get_cache_config(csizebase);
392 392
393 if (mci->mci_picache_size > PAGE_SIZE || 393 if (mci->mci_picache_size > PAGE_SIZE ||
394 mci->mci_pdcache_size > PAGE_SIZE) 394 mci->mci_pdcache_size > PAGE_SIZE)
395 /* no VCE support if there is no L2 cache */ 395 /* no VCE support if there is no L2 cache */
396 mci->mci_cache_virtual_alias = true; 396 mci->mci_cache_virtual_alias = true;
397 397
398 switch (mci->mci_picache_line_size) { 398 switch (mci->mci_picache_line_size) {
399 case 16: 399 case 16:
400 mco->mco_icache_sync_all = 400 mco->mco_icache_sync_all =
401 r4k_icache_sync_all_16; 401 r4k_icache_sync_all_16;
402 mco->mco_icache_sync_range = 402 mco->mco_icache_sync_range =
403 r4k_icache_sync_range_16; 403 r4k_icache_sync_range_16;
404 mco->mco_icache_sync_range_index = 404 mco->mco_icache_sync_range_index =
405 r4k_icache_sync_range_index_16; 405 r4k_icache_sync_range_index_16;
406 break; 406 break;
407 407
408 case 32: 408 case 32:
409 mco->mco_icache_sync_all = 409 mco->mco_icache_sync_all =
410 r4k_icache_sync_all_32; 410 r4k_icache_sync_all_32;
411 mco->mco_icache_sync_range = 411 mco->mco_icache_sync_range =
412 r4k_icache_sync_range_32; 412 r4k_icache_sync_range_32;
413 mco->mco_icache_sync_range_index = 413 mco->mco_icache_sync_range_index =
414 r4k_icache_sync_range_index_32; 414 r4k_icache_sync_range_index_32;
415 break; 415 break;
416 416
417 default: 417 default:
418 panic("r4k picache line size %d", 418 panic("r4k picache line size %d",
419 mci->mci_picache_line_size); 419 mci->mci_picache_line_size);
420 } 420 }
421 421
422 switch (mci->mci_pdcache_line_size) { 422 switch (mci->mci_pdcache_line_size) {
423 case 16: 423 case 16:
424 mco->mco_pdcache_wbinv_all = 424 mco->mco_pdcache_wbinv_all =
425 r4k_pdcache_wbinv_all_16; 425 r4k_pdcache_wbinv_all_16;
426 mco->mco_pdcache_wbinv_range = 426 mco->mco_pdcache_wbinv_range =
427 r4k_pdcache_wbinv_range_16; 427 r4k_pdcache_wbinv_range_16;
428 mco->mco_pdcache_wbinv_range_index = 428 mco->mco_pdcache_wbinv_range_index =
429 r4k_pdcache_wbinv_range_index_16; 429 r4k_pdcache_wbinv_range_index_16;
430 mco->mco_pdcache_inv_range = 430 mco->mco_pdcache_inv_range =
431 r4k_pdcache_inv_range_16; 431 r4k_pdcache_inv_range_16;
432 mco->mco_pdcache_wb_range = 432 mco->mco_pdcache_wb_range =
433 r4k_pdcache_wb_range_16; 433 r4k_pdcache_wb_range_16;
434 break; 434 break;
435 435
436 case 32: 436 case 32:
437 mco->mco_pdcache_wbinv_all = 437 mco->mco_pdcache_wbinv_all =
438 r4k_pdcache_wbinv_all_32; 438 r4k_pdcache_wbinv_all_32;
439 mco->mco_pdcache_wbinv_range = 439 mco->mco_pdcache_wbinv_range =
440 r4k_pdcache_wbinv_range_32; 440 r4k_pdcache_wbinv_range_32;
441 mco->mco_pdcache_wbinv_range_index = 441 mco->mco_pdcache_wbinv_range_index =
442 r4k_pdcache_wbinv_range_index_32; 442 r4k_pdcache_wbinv_range_index_32;
443 mco->mco_pdcache_inv_range = 443 mco->mco_pdcache_inv_range =
444 r4k_pdcache_inv_range_32; 444 r4k_pdcache_inv_range_32;
445 mco->mco_pdcache_wb_range = 445 mco->mco_pdcache_wb_range =
446 r4k_pdcache_wb_range_32; 446 r4k_pdcache_wb_range_32;
447 break; 447 break;
448 448
449 default: 449 default:
450 panic("r4k pdcache line size %d", 450 panic("r4k pdcache line size %d",
451 mci->mci_pdcache_line_size); 451 mci->mci_pdcache_line_size);
452 } 452 }
453 453
454 /* Virtually-indexed cache; no use for colors. */ 454 /* Virtually-indexed cache; no use for colors. */
455 break; 455 break;
456 456
457 case MIPS_R4600: 457 case MIPS_R4600:
458#ifdef ENABLE_MIPS_R4700 458#ifdef ENABLE_MIPS_R4700
459 case MIPS_R4700: 459 case MIPS_R4700:
460#endif 460#endif
461#ifndef ENABLE_MIPS_R3NKK 461#ifndef ENABLE_MIPS_R3NKK
462 case MIPS_R5000: 462 case MIPS_R5000:
463#endif 463#endif
464 case MIPS_RM5200: 464 case MIPS_RM5200:
465primary_cache_is_2way: 465primary_cache_is_2way:
466 mci->mci_picache_ways = 2; 466 mci->mci_picache_ways = 2;
467 mci->mci_pdcache_ways = 2; 467 mci->mci_pdcache_ways = 2;
468 468
469 mips3_get_cache_config(csizebase); 469 mips3_get_cache_config(csizebase);
470 470
471 if ((mci->mci_picache_size / mci->mci_picache_ways) > PAGE_SIZE || 471 if ((mci->mci_picache_size / mci->mci_picache_ways) > PAGE_SIZE ||
472 (mci->mci_pdcache_size / mci->mci_pdcache_ways) > PAGE_SIZE) 472 (mci->mci_pdcache_size / mci->mci_pdcache_ways) > PAGE_SIZE)
473 mci->mci_cache_virtual_alias = true; 473 mci->mci_cache_virtual_alias = true;
474 474
475 switch (mci->mci_picache_line_size) { 475 switch (mci->mci_picache_line_size) {
476 case 32: 476 case 32:
477 mco->mco_icache_sync_all = 477 mco->mco_icache_sync_all =
478 r5k_icache_sync_all_32; 478 r5k_icache_sync_all_32;
479 mco->mco_icache_sync_range = 479 mco->mco_icache_sync_range =
480 r5k_icache_sync_range_32; 480 r5k_icache_sync_range_32;
481 mco->mco_icache_sync_range_index = 481 mco->mco_icache_sync_range_index =
482 r5k_icache_sync_range_index_32; 482 r5k_icache_sync_range_index_32;
483 break; 483 break;
484 484
485 default: 485 default:
486 panic("r5k picache line size %d", 486 panic("r5k picache line size %d",
487 mci->mci_picache_line_size); 487 mci->mci_picache_line_size);
488 } 488 }
489 489
490 switch (mci->mci_pdcache_line_size) { 490 switch (mci->mci_pdcache_line_size) {
491 case 16: 491 case 16:
492 mco->mco_pdcache_wbinv_all = 492 mco->mco_pdcache_wbinv_all =
493 r5k_pdcache_wbinv_all_16; 493 r5k_pdcache_wbinv_all_16;
494 mco->mco_pdcache_wbinv_range = 494 mco->mco_pdcache_wbinv_range =
495 r5k_pdcache_wbinv_range_16; 495 r5k_pdcache_wbinv_range_16;
496 mco->mco_pdcache_wbinv_range_index = 496 mco->mco_pdcache_wbinv_range_index =
497 r5k_pdcache_wbinv_range_index_16; 497 r5k_pdcache_wbinv_range_index_16;
498 mco->mco_pdcache_inv_range = 498 mco->mco_pdcache_inv_range =
499 r5k_pdcache_inv_range_16; 499 r5k_pdcache_inv_range_16;
500 mco->mco_pdcache_wb_range = 500 mco->mco_pdcache_wb_range =
501 r5k_pdcache_wb_range_16; 501 r5k_pdcache_wb_range_16;
502 break; 502 break;
503 503
504 case 32: 504 case 32:
505 mco->mco_pdcache_wbinv_all = 505 mco->mco_pdcache_wbinv_all =
506 r5k_pdcache_wbinv_all_32; 506 r5k_pdcache_wbinv_all_32;
507 mco->mco_pdcache_wbinv_range = 507 mco->mco_pdcache_wbinv_range =
508 r5k_pdcache_wbinv_range_32; 508 r5k_pdcache_wbinv_range_32;
509 mco->mco_pdcache_wbinv_range_index = 509 mco->mco_pdcache_wbinv_range_index =
510 r5k_pdcache_wbinv_range_index_32; 510 r5k_pdcache_wbinv_range_index_32;
511 mco->mco_pdcache_inv_range = 511 mco->mco_pdcache_inv_range =
512 r5k_pdcache_inv_range_32; 512 r5k_pdcache_inv_range_32;
513 mco->mco_pdcache_wb_range = 513 mco->mco_pdcache_wb_range =
514 r5k_pdcache_wb_range_32; 514 r5k_pdcache_wb_range_32;
515 break; 515 break;
516 516
517 default: 517 default:
518 panic("r5k pdcache line size %d", 518 panic("r5k pdcache line size %d",
519 mci->mci_pdcache_line_size); 519 mci->mci_pdcache_line_size);
520 } 520 }
521 521
522 /* 522 /*
523 * Deal with R4600 chip bugs. 523 * Deal with R4600 chip bugs.
524 */ 524 */
525 if (MIPS_PRID_IMPL(cpu_id) == MIPS_R4600 && 525 if (MIPS_PRID_IMPL(cpu_id) == MIPS_R4600 &&
526 MIPS_PRID_REV_MAJ(cpu_id) == 1) { 526 MIPS_PRID_REV_MAJ(cpu_id) == 1) {
527 KASSERT(mci->mci_pdcache_line_size == 32); 527 KASSERT(mci->mci_pdcache_line_size == 32);
528 mco->mco_pdcache_wbinv_range = 528 mco->mco_pdcache_wbinv_range =
529 r4600v1_pdcache_wbinv_range_32; 529 r4600v1_pdcache_wbinv_range_32;
530 mco->mco_pdcache_inv_range = 530 mco->mco_pdcache_inv_range =
531 r4600v1_pdcache_inv_range_32; 531 r4600v1_pdcache_inv_range_32;
532 mco->mco_pdcache_wb_range = 532 mco->mco_pdcache_wb_range =
533 r4600v1_pdcache_wb_range_32; 533 r4600v1_pdcache_wb_range_32;
534 } else if (MIPS_PRID_IMPL(cpu_id) == MIPS_R4600 && 534 } else if (MIPS_PRID_IMPL(cpu_id) == MIPS_R4600 &&
535 MIPS_PRID_REV_MAJ(cpu_id) == 2) { 535 MIPS_PRID_REV_MAJ(cpu_id) == 2) {
536 KASSERT(mci->mci_pdcache_line_size == 32); 536 KASSERT(mci->mci_pdcache_line_size == 32);
537 mco->mco_pdcache_wbinv_range = 537 mco->mco_pdcache_wbinv_range =
538 r4600v2_pdcache_wbinv_range_32; 538 r4600v2_pdcache_wbinv_range_32;
539 mco->mco_pdcache_inv_range = 539 mco->mco_pdcache_inv_range =
540 r4600v2_pdcache_inv_range_32; 540 r4600v2_pdcache_inv_range_32;
541 mco->mco_pdcache_wb_range = 541 mco->mco_pdcache_wb_range =
542 r4600v2_pdcache_wb_range_32; 542 r4600v2_pdcache_wb_range_32;
543 } 543 }
544 544
545 /* 545 /*
546 * Deal with VR4131 chip bugs. 546 * Deal with VR4131 chip bugs.
547 */ 547 */
548 if (MIPS_PRID_IMPL(cpu_id) == MIPS_R4100 && 548 if (MIPS_PRID_IMPL(cpu_id) == MIPS_R4100 &&
549 MIPS_PRID_REV_MAJ(cpu_id) == 8) { 549 MIPS_PRID_REV_MAJ(cpu_id) == 8) {
550 KASSERT(mci->mci_pdcache_line_size == 16); 550 KASSERT(mci->mci_pdcache_line_size == 16);
551 mco->mco_pdcache_wbinv_range = 551 mco->mco_pdcache_wbinv_range =
552 vr4131v1_pdcache_wbinv_range_16; 552 vr4131v1_pdcache_wbinv_range_16;
553 } 553 }
554 554
555 /* Virtually-indexed cache; no use for colors. */ 555 /* Virtually-indexed cache; no use for colors. */
556 break; 556 break;
557#ifdef ENABLE_MIPS4_CACHE_R10K 557#ifdef ENABLE_MIPS4_CACHE_R10K
558 case MIPS_R10000: 558 case MIPS_R10000:
559 case MIPS_R12000: 559 case MIPS_R12000:
560 case MIPS_R14000: 560 case MIPS_R14000:
561 mci->mci_picache_ways = 2; 561 mci->mci_picache_ways = 2;
562 mci->mci_pdcache_ways = 2; 562 mci->mci_pdcache_ways = 2;
563 mci->mci_sdcache_ways = 2; 563 mci->mci_sdcache_ways = 2;
564 564
565 mips4_get_cache_config(csizebase); 565 mips4_get_cache_config(csizebase);
566 566
567 /* VCE is handled by hardware */ 567 /* VCE is handled by hardware */
568 568
569 mco->mco_icache_sync_all = 569 mco->mco_icache_sync_all =
570 r10k_icache_sync_all; 570 r10k_icache_sync_all;
571 mco->mco_icache_sync_range = 571 mco->mco_icache_sync_range =
572 r10k_icache_sync_range; 572 r10k_icache_sync_range;
573 mco->mco_icache_sync_range_index = 573 mco->mco_icache_sync_range_index =
574 r10k_icache_sync_range_index; 574 r10k_icache_sync_range_index;
575 mco->mco_pdcache_wbinv_all = 575 mco->mco_pdcache_wbinv_all =
576 r10k_pdcache_wbinv_all; 576 r10k_pdcache_wbinv_all;
577 mco->mco_pdcache_wbinv_range = 577 mco->mco_pdcache_wbinv_range =
578 r10k_pdcache_wbinv_range; 578 r10k_pdcache_wbinv_range;
579 mco->mco_pdcache_wbinv_range_index = 579 mco->mco_pdcache_wbinv_range_index =
580 r10k_pdcache_wbinv_range_index; 580 r10k_pdcache_wbinv_range_index;
581 mco->mco_pdcache_inv_range = 581 mco->mco_pdcache_inv_range =
582 r10k_pdcache_inv_range; 582 r10k_pdcache_inv_range;
583 mco->mco_pdcache_wb_range = 583 mco->mco_pdcache_wb_range =
584 r10k_pdcache_wb_range; 584 r10k_pdcache_wb_range;
585 break; 585 break;
586#endif /* ENABLE_MIPS4_CACHE_R10K */ 586#endif /* ENABLE_MIPS4_CACHE_R10K */
587#ifdef MIPS3_LOONGSON2 587#ifdef MIPS3_LOONGSON2
588 case MIPS_LOONGSON2: 588 case MIPS_LOONGSON2:
589 mips_picache_ways = 4; 589 mci->mci_picache_ways = 4;
590 mips_pdcache_ways = 4; 590 mci->mci_pdcache_ways = 4;
591 591
592 mips3_get_cache_config(csizebase); 592 mips3_get_cache_config(csizebase);
593 593
594 mips_sdcache_line_size = 32; /* don't trust config reg */ 594 mci->mci_sdcache_line_size = 32; /* don't trust config reg */
595 595
596 if (mips_picache_size / mips_picache_ways > PAGE_SIZE || 596 if (mci->mci_picache_size / mci->mci_picache_ways > PAGE_SIZE ||
597 mips_pdcache_size / mips_pdcache_ways > PAGE_SIZE) 597 mci->mci_pdcache_size / mci->mci_pdcache_ways > PAGE_SIZE)
598 mips_cache_virtual_alias = 1; 598 mci->mci_cache_virtual_alias = 1;
599 599
600 mips_cache_ops.mco_icache_sync_all = 600 mco->mco_icache_sync_all =
601 ls2_icache_sync_all; 601 ls2_icache_sync_all;
602 mips_cache_ops.mco_icache_sync_range = 602 mco->mco_icache_sync_range =
603 ls2_icache_sync_range; 603 ls2_icache_sync_range;
604 mips_cache_ops.mco_icache_sync_range_index = 604 mco->mco_icache_sync_range_index =
605 ls2_icache_sync_range_index; 605 ls2_icache_sync_range_index;
606 606
607 mips_cache_ops.mco_pdcache_wbinv_all = 607 mco->mco_pdcache_wbinv_all =
608 ls2_pdcache_wbinv_all; 608 ls2_pdcache_wbinv_all;
609 mips_cache_ops.mco_pdcache_wbinv_range = 609 mco->mco_pdcache_wbinv_range =
610 ls2_pdcache_wbinv_range; 610 ls2_pdcache_wbinv_range;
611 mips_cache_ops.mco_pdcache_wbinv_range_index = 611 mco->mco_pdcache_wbinv_range_index =
612 ls2_pdcache_wbinv_range_index; 612 ls2_pdcache_wbinv_range_index;
613 mips_cache_ops.mco_pdcache_inv_range = 613 mco->mco_pdcache_inv_range =
614 ls2_pdcache_inv_range; 614 ls2_pdcache_inv_range;
615 mips_cache_ops.mco_pdcache_wb_range = 615 mco->mco_pdcache_wb_range =
616 ls2_pdcache_wb_range; 616 ls2_pdcache_wb_range;
617 617
618 /* 618 /*
619 * For current version chips, [the] operating system is 619 * For current version chips, [the] operating system is
620 * obliged to eliminate the potential for virtual aliasing. 620 * obliged to eliminate the potential for virtual aliasing.
621 */ 621 */
622 uvmexp.ncolors = mips_pdcache_ways; 622 uvmexp.ncolors = mci->mci_pdcache_ways;
623 break; 623 break;
624#endif 624#endif
625#endif /* MIPS3 || MIPS4 */ 625#endif /* MIPS3 || MIPS4 */
626 default: 626 default:
627 panic("can't handle primary cache on impl 0x%x", 627 panic("can't handle primary cache on impl 0x%x",
628 MIPS_PRID_IMPL(cpu_id)); 628 MIPS_PRID_IMPL(cpu_id));
629 } 629 }
630 630
631 /* 631 /*
632 * Compute the "way mask" for each cache. 632 * Compute the "way mask" for each cache.
633 */ 633 */
634 if (mci->mci_picache_size) { 634 if (mci->mci_picache_size) {
635 KASSERT(mci->mci_picache_ways != 0); 635 KASSERT(mci->mci_picache_ways != 0);
636 mci->mci_picache_way_size = (mci->mci_picache_size / mci->mci_picache_ways); 636 mci->mci_picache_way_size = (mci->mci_picache_size / mci->mci_picache_ways);
637 mci->mci_picache_way_mask = mci->mci_picache_way_size - 1; 637 mci->mci_picache_way_mask = mci->mci_picache_way_size - 1;
638 } 638 }
639 if (mci->mci_pdcache_size) { 639 if (mci->mci_pdcache_size) {
640 KASSERT(mci->mci_pdcache_ways != 0); 640 KASSERT(mci->mci_pdcache_ways != 0);
641 mci->mci_pdcache_way_size = (mci->mci_pdcache_size / mci->mci_pdcache_ways); 641 mci->mci_pdcache_way_size = (mci->mci_pdcache_size / mci->mci_pdcache_ways);
642 mci->mci_pdcache_way_mask = mci->mci_pdcache_way_size - 1; 642 mci->mci_pdcache_way_mask = mci->mci_pdcache_way_size - 1;
643 } 643 }
644 644
645 mips_dcache_compute_align(); 645 mips_dcache_compute_align();
646 646
647 if (mci->mci_sdcache_line_size == 0) 647 if (mci->mci_sdcache_line_size == 0)
648 return; 648 return;
649 649
650 /* 650 /*
651 * Configure the secondary cache. 651 * Configure the secondary cache.
652 */ 652 */
653 switch (MIPS_PRID_IMPL(cpu_id)) { 653 switch (MIPS_PRID_IMPL(cpu_id)) {
654#if defined(MIPS3) || defined(MIPS4) 654#if defined(MIPS3) || defined(MIPS4)
655 case MIPS_R4000: 655 case MIPS_R4000:
656 /* 656 /*
657 * R4000/R4400 detects virtual alias by VCE as if 657 * R4000/R4400 detects virtual alias by VCE as if
658 * its primary cache size were 32KB, because it always 658 * its primary cache size were 32KB, because it always
659 * compares 3 bits of vaddr[14:12] which causes 659 * compares 3 bits of vaddr[14:12] which causes
660 * primary cache miss and PIdx[2:0] in the secondary 660 * primary cache miss and PIdx[2:0] in the secondary
661 * cache tag regardless of its primary cache size. 661 * cache tag regardless of its primary cache size.
662 * i.e. VCE could happen even if there is no actual 662 * i.e. VCE could happen even if there is no actual
663 * virtual alias on its 8KB or 16KB primary cache 663 * virtual alias on its 8KB or 16KB primary cache
664 * which has only 1 or 2 bit valid PIdx in 4KB page. 664 * which has only 1 or 2 bit valid PIdx in 4KB page.
665 * Actual primary cache size is ignored wrt VCE 665 * Actual primary cache size is ignored wrt VCE
666 * and virtual aliases are resolved by the VCE hander, 666 * and virtual aliases are resolved by the VCE hander,
667 * but it's still worth to avoid unnecessary VCE by 667 * but it's still worth to avoid unnecessary VCE by
668 * setting alias mask and prefer mask to 32K, though 668 * setting alias mask and prefer mask to 32K, though
669 * some other possible aliases (maybe caused by KSEG0 669 * some other possible aliases (maybe caused by KSEG0
670 * accesses which can't be managed by PMAP_PREFER(9)) 670 * accesses which can't be managed by PMAP_PREFER(9))
671 * will still be resolved by the VCED/VCEI handler. 671 * will still be resolved by the VCED/VCEI handler.
672 */ 672 */
673 mci->mci_cache_alias_mask = 673 mci->mci_cache_alias_mask =
674 (MIPS3_MAX_PCACHE_SIZE - 1) & ~PAGE_MASK; /* va[14:12] */ 674 (MIPS3_MAX_PCACHE_SIZE - 1) & ~PAGE_MASK; /* va[14:12] */
675 mci->mci_cache_prefer_mask = MIPS3_MAX_PCACHE_SIZE - 1; 675 mci->mci_cache_prefer_mask = MIPS3_MAX_PCACHE_SIZE - 1;
676 676
677 mci->mci_cache_virtual_alias = 0; 677 mci->mci_cache_virtual_alias = 0;
678 /* FALLTHROUGH */ 678 /* FALLTHROUGH */
679 case MIPS_R4600: 679 case MIPS_R4600:
680#ifdef ENABLE_MIPS_R4700 680#ifdef ENABLE_MIPS_R4700
681 case MIPS_R4700: 681 case MIPS_R4700:
682#endif 682#endif
683 switch (mci->mci_sdcache_ways) { 683 switch (mci->mci_sdcache_ways) {
684 case 1: 684 case 1:
685 switch (mci->mci_sdcache_line_size) { 685 switch (mci->mci_sdcache_line_size) {
686 case 32: 686 case 32:
687 mco->mco_sdcache_wbinv_all = 687 mco->mco_sdcache_wbinv_all =
688 r4k_sdcache_wbinv_all_32; 688 r4k_sdcache_wbinv_all_32;
689 mco->mco_sdcache_wbinv_range = 689 mco->mco_sdcache_wbinv_range =
690 r4k_sdcache_wbinv_range_32; 690 r4k_sdcache_wbinv_range_32;
691 mco->mco_sdcache_wbinv_range_index = 691 mco->mco_sdcache_wbinv_range_index =
692 r4k_sdcache_wbinv_range_index_32; 692 r4k_sdcache_wbinv_range_index_32;
693 mco->mco_sdcache_inv_range = 693 mco->mco_sdcache_inv_range =
694 r4k_sdcache_inv_range_32; 694 r4k_sdcache_inv_range_32;
695 mco->mco_sdcache_wb_range = 695 mco->mco_sdcache_wb_range =
696 r4k_sdcache_wb_range_32; 696 r4k_sdcache_wb_range_32;
697 break; 697 break;
698 698
699 case 16: 699 case 16:
700 case 64: 700 case 64:
701 mco->mco_sdcache_wbinv_all = 701 mco->mco_sdcache_wbinv_all =
702 r4k_sdcache_wbinv_all_generic; 702 r4k_sdcache_wbinv_all_generic;
703 mco->mco_sdcache_wbinv_range = 703 mco->mco_sdcache_wbinv_range =
704 r4k_sdcache_wbinv_range_generic; 704 r4k_sdcache_wbinv_range_generic;
705 mco->mco_sdcache_wbinv_range_index = 705 mco->mco_sdcache_wbinv_range_index =
706 r4k_sdcache_wbinv_range_index_generic; 706 r4k_sdcache_wbinv_range_index_generic;
707 mco->mco_sdcache_inv_range = 707 mco->mco_sdcache_inv_range =
708 r4k_sdcache_inv_range_generic; 708 r4k_sdcache_inv_range_generic;
709 mco->mco_sdcache_wb_range = 709 mco->mco_sdcache_wb_range =
710 r4k_sdcache_wb_range_generic; 710 r4k_sdcache_wb_range_generic;
711 break; 711 break;
712 712
713 case 128: 713 case 128:
714 mco->mco_sdcache_wbinv_all = 714 mco->mco_sdcache_wbinv_all =
715 r4k_sdcache_wbinv_all_128; 715 r4k_sdcache_wbinv_all_128;
716 mco->mco_sdcache_wbinv_range = 716 mco->mco_sdcache_wbinv_range =
717 r4k_sdcache_wbinv_range_128; 717 r4k_sdcache_wbinv_range_128;
718 mco->mco_sdcache_wbinv_range_index = 718 mco->mco_sdcache_wbinv_range_index =
719 r4k_sdcache_wbinv_range_index_128; 719 r4k_sdcache_wbinv_range_index_128;
720 mco->mco_sdcache_inv_range = 720 mco->mco_sdcache_inv_range =
721 r4k_sdcache_inv_range_128; 721 r4k_sdcache_inv_range_128;
722 mco->mco_sdcache_wb_range = 722 mco->mco_sdcache_wb_range =
723 r4k_sdcache_wb_range_128; 723 r4k_sdcache_wb_range_128;
724 break; 724 break;
725 725
726 default: 726 default:
727 panic("r4k sdcache %d way line size %d", 727 panic("r4k sdcache %d way line size %d",
728 mci->mci_sdcache_ways, mci->mci_sdcache_line_size); 728 mci->mci_sdcache_ways, mci->mci_sdcache_line_size);
729 } 729 }
730 break; 730 break;
731 731
732 default: 732 default:
733 panic("r4k sdcache %d way line size %d", 733 panic("r4k sdcache %d way line size %d",
734 mci->mci_sdcache_ways, mci->mci_sdcache_line_size); 734 mci->mci_sdcache_ways, mci->mci_sdcache_line_size);
735 } 735 }
736 break; 736 break;
737#ifndef ENABLE_MIPS_R3NKK 737#ifndef ENABLE_MIPS_R3NKK
738 case MIPS_R5000: 738 case MIPS_R5000:
739#endif 739#endif
740 case MIPS_RM5200: 740 case MIPS_RM5200:
741 mci->mci_sdcache_write_through = true; 741 mci->mci_sdcache_write_through = true;
742 mco->mco_sdcache_wbinv_all = 742 mco->mco_sdcache_wbinv_all =
743 r5k_sdcache_wbinv_all; 743 r5k_sdcache_wbinv_all;
744 mco->mco_sdcache_wbinv_range = 744 mco->mco_sdcache_wbinv_range =
745 r5k_sdcache_wbinv_range; 745 r5k_sdcache_wbinv_range;
746 mco->mco_sdcache_wbinv_range_index = 746 mco->mco_sdcache_wbinv_range_index =
747 r5k_sdcache_wbinv_range_index; 747 r5k_sdcache_wbinv_range_index;
748 mco->mco_sdcache_inv_range = 748 mco->mco_sdcache_inv_range =
749 r5k_sdcache_wbinv_range; 749 r5k_sdcache_wbinv_range;
750 mco->mco_sdcache_wb_range = 750 mco->mco_sdcache_wb_range =
751 r5k_sdcache_wb_range; 751 r5k_sdcache_wb_range;
752 break; 752 break;
753#ifdef ENABLE_MIPS4_CACHE_R10K 753#ifdef ENABLE_MIPS4_CACHE_R10K
754 case MIPS_R10000: 754 case MIPS_R10000:
755 case MIPS_R12000: 755 case MIPS_R12000:
756 case MIPS_R14000: 756 case MIPS_R14000:
757 mco->mco_sdcache_wbinv_all = 757 mco->mco_sdcache_wbinv_all =
758 r10k_sdcache_wbinv_all; 758 r10k_sdcache_wbinv_all;
759 mco->mco_sdcache_wbinv_range = 759 mco->mco_sdcache_wbinv_range =
760 r10k_sdcache_wbinv_range; 760 r10k_sdcache_wbinv_range;
761 mco->mco_sdcache_wbinv_range_index = 761 mco->mco_sdcache_wbinv_range_index =
762 r10k_sdcache_wbinv_range_index; 762 r10k_sdcache_wbinv_range_index;
763 mco->mco_sdcache_inv_range = 763 mco->mco_sdcache_inv_range =
764 r10k_sdcache_inv_range; 764 r10k_sdcache_inv_range;
765 mco->mco_sdcache_wb_range = 765 mco->mco_sdcache_wb_range =
766 r10k_sdcache_wb_range; 766 r10k_sdcache_wb_range;
767 break; 767 break;
768#endif /* ENABLE_MIPS4_CACHE_R10K */ 768#endif /* ENABLE_MIPS4_CACHE_R10K */
769#ifdef MIPS3_LOONGSON2 769#ifdef MIPS3_LOONGSON2
770 case MIPS_LOONGSON2: 770 case MIPS_LOONGSON2:
771 mips_sdcache_ways = 4; 771 mci->mci_sdcache_ways = 4;
772 mips_sdcache_size = 512*1024; 772 mci->mci_sdcache_size = 512*1024;
773 mips_scache_unified = 1; 773 mci->mci_scache_unified = 1;
774 774
775 mips_cache_ops.mco_sdcache_wbinv_all = 775 mco->mco_sdcache_wbinv_all =
776 ls2_sdcache_wbinv_all; 776 ls2_sdcache_wbinv_all;
777 mips_cache_ops.mco_sdcache_wbinv_range = 777 mco->mco_sdcache_wbinv_range =
778 ls2_sdcache_wbinv_range; 778 ls2_sdcache_wbinv_range;
779 mips_cache_ops.mco_sdcache_wbinv_range_index = 779 mco->mco_sdcache_wbinv_range_index =
780 ls2_sdcache_wbinv_range_index; 780 ls2_sdcache_wbinv_range_index;
781 mips_cache_ops.mco_sdcache_inv_range = 781 mco->mco_sdcache_inv_range =
782 ls2_sdcache_inv_range; 782 ls2_sdcache_inv_range;
783 mips_cache_ops.mco_sdcache_wb_range = 783 mco->mco_sdcache_wb_range =
784 ls2_sdcache_wb_range; 784 ls2_sdcache_wb_range;
785 785
786 /* 786 /*
787 * The secondary cache is physically indexed and tagged 787 * The secondary cache is physically indexed and tagged
788 */ 788 */
789 break; 789 break;
790#endif 790#endif
791#endif /* MIPS3 || MIPS4 */ 791#endif /* MIPS3 || MIPS4 */
792 792
793 default: 793 default:
794 panic("can't handle secondary cache on impl 0x%x", 794 panic("can't handle secondary cache on impl 0x%x",
795 MIPS_PRID_IMPL(cpu_id)); 795 MIPS_PRID_IMPL(cpu_id));
796 } 796 }
797 797
798 /* 798 /*
799 * Compute the "way mask" for each secondary cache. 799 * Compute the "way mask" for each secondary cache.
800 */ 800 */
801 if (mci->mci_sdcache_size) { 801 if (mci->mci_sdcache_size) {
802 KASSERT(mci->mci_sdcache_ways != 0); 802 KASSERT(mci->mci_sdcache_ways != 0);
803 mci->mci_sdcache_way_size = (mci->mci_sdcache_size / mci->mci_sdcache_ways); 803 mci->mci_sdcache_way_size = (mci->mci_sdcache_size / mci->mci_sdcache_ways);
804 mci->mci_sdcache_way_mask = mci->mci_sdcache_way_size - 1; 804 mci->mci_sdcache_way_mask = mci->mci_sdcache_way_size - 1;
805 } 805 }
806 806
807 mips_dcache_compute_align(); 807 mips_dcache_compute_align();
808} 808}
809 809
810#if defined(MIPS1) || defined(MIPS3) || defined(MIPS4) 810#if defined(MIPS1) || defined(MIPS3) || defined(MIPS4)
811void 811void
812mips_config_cache_emips(void) 812mips_config_cache_emips(void)
813{ 813{
814 struct mips_cache_info * const mci = &mips_cache_info; 814 struct mips_cache_info * const mci = &mips_cache_info;
815 struct mips_cache_ops * const mco = &mips_cache_ops; 815 struct mips_cache_ops * const mco = &mips_cache_ops;
816 const mips_prid_t cpu_id = mips_options.mips_cpu_id; 816 const mips_prid_t cpu_id = mips_options.mips_cpu_id;
817 KASSERT(PAGE_SIZE != 0); 817 KASSERT(PAGE_SIZE != 0);
818 818
819 /* 819 /*
820 * Configure primary caches. 820 * Configure primary caches.
821 */ 821 */
822 switch (MIPS_PRID_IMPL(cpu_id)) { 822 switch (MIPS_PRID_IMPL(cpu_id)) {
823 case MIPS_eMIPS: 823 case MIPS_eMIPS:
824 mci->mci_picache_size = 0; 824 mci->mci_picache_size = 0;
825 mci->mci_pdcache_size = 0; 825 mci->mci_pdcache_size = 0;
826 826
827 mci->mci_picache_line_size = 4; 827 mci->mci_picache_line_size = 4;
828 mci->mci_pdcache_line_size = 4; 828 mci->mci_pdcache_line_size = 4;
829 829
830 mci->mci_picache_ways = 1; 830 mci->mci_picache_ways = 1;
831 mci->mci_pdcache_ways = 1; 831 mci->mci_pdcache_ways = 1;
832 832
833 mci->mci_pdcache_write_through = 1; 833 mci->mci_pdcache_write_through = 1;
834 834
835 mco->mco_icache_sync_all = no_cache_op; 835 mco->mco_icache_sync_all = no_cache_op;
836 mco->mco_icache_sync_range = no_cache_op_range; 836 mco->mco_icache_sync_range = no_cache_op_range;
837 mco->mco_icache_sync_range_index = mco->mco_icache_sync_range; 837 mco->mco_icache_sync_range_index = mco->mco_icache_sync_range;
838 838
839 mco->mco_pdcache_wbinv_all = no_cache_op; 839 mco->mco_pdcache_wbinv_all = no_cache_op;
840 mco->mco_pdcache_wbinv_range = no_cache_op_range; 840 mco->mco_pdcache_wbinv_range = no_cache_op_range;
841 mco->mco_pdcache_wbinv_range_index = 841 mco->mco_pdcache_wbinv_range_index =
842 mco->mco_pdcache_wbinv_range; 842 mco->mco_pdcache_wbinv_range;
843 mco->mco_pdcache_inv_range = no_cache_op_range; 843 mco->mco_pdcache_inv_range = no_cache_op_range;
844 mco->mco_pdcache_wb_range = no_cache_op_range; 844 mco->mco_pdcache_wb_range = no_cache_op_range;
845 845
846 uvmexp.ncolors = 1; 846 uvmexp.ncolors = 1;
847 break; 847 break;
848 848
849 default: 849 default:
850 panic("%s: unsupported eMIPS", __func__); 850 panic("%s: unsupported eMIPS", __func__);
851 } 851 }
852} 852}
853#endif 853#endif
854 854
855#ifdef MIPS1 855#ifdef MIPS1
856#ifdef ENABLE_MIPS_TX3900 856#ifdef ENABLE_MIPS_TX3900
857/* 857/*
858 * tx3900_get_cache_config: 858 * tx3900_get_cache_config:
859 * 859 *
860 * Fetch cache size information for the TX3900. 860 * Fetch cache size information for the TX3900.
861 */ 861 */
862void 862void
863tx3900_get_cache_config(void) 863tx3900_get_cache_config(void)
864{ 864{
865 struct mips_cache_info * const mci = &mips_cache_info; 865 struct mips_cache_info * const mci = &mips_cache_info;
866 uint32_t config; 866 uint32_t config;
867 867
868 config = tx3900_cp0_config_read(); 868 config = tx3900_cp0_config_read();
869 869
870 mci->mci_picache_size = R3900_C_SIZE_MIN << 870 mci->mci_picache_size = R3900_C_SIZE_MIN <<
871 ((config & R3900_CONFIG_ICS_MASK) >> R3900_CONFIG_ICS_SHIFT); 871 ((config & R3900_CONFIG_ICS_MASK) >> R3900_CONFIG_ICS_SHIFT);
872 872
873 mci->mci_pdcache_size = R3900_C_SIZE_MIN << 873 mci->mci_pdcache_size = R3900_C_SIZE_MIN <<
874 ((config & R3900_CONFIG_DCS_MASK) >> R3900_CONFIG_DCS_SHIFT); 874 ((config & R3900_CONFIG_DCS_MASK) >> R3900_CONFIG_DCS_SHIFT);
875} 875}
876 876
877/* 877/*
878 * tx3920_get_cache_config: 878 * tx3920_get_cache_config:
879 * 879 *
880 * Fetch cache size information for the TX3920. 880 * Fetch cache size information for the TX3920.
881 */ 881 */
882void 882void
883tx3920_get_cache_config(void) 883tx3920_get_cache_config(void)
884{ 884{
885 struct mips_cache_info * const mci = &mips_cache_info; 885 struct mips_cache_info * const mci = &mips_cache_info;
886 886
887 /* Size is the same as TX3900. */ 887 /* Size is the same as TX3900. */
888 tx3900_get_cache_config(); 888 tx3900_get_cache_config();
889 889
890 /* Now determine write-through/write-back mode. */ 890 /* Now determine write-through/write-back mode. */
891 if ((tx3900_cp0_config_read() & R3900_CONFIG_WBON) == 0) 891 if ((tx3900_cp0_config_read() & R3900_CONFIG_WBON) == 0)
892 mci->mci_pdcache_write_through = true; 892 mci->mci_pdcache_write_through = true;
893} 893}
894 894
895/* 895/*
896 * tx39_cache_config_write_through: 896 * tx39_cache_config_write_through:
897 * 897 *
898 * TX3922 write-through D-cache mode. 898 * TX3922 write-through D-cache mode.
899 * for TX3912, no meaning. (no write-back mode) 899 * for TX3912, no meaning. (no write-back mode)
900 */ 900 */
901void 901void
902tx39_cache_config_write_through(void) 902tx39_cache_config_write_through(void)
903{ 903{
904 u_int32_t r; 904 u_int32_t r;
905 905
906 mips_dcache_wbinv_all(); 906 mips_dcache_wbinv_all();
907 907
908 __asm volatile("mfc0 %0, $3" : "=r"(r)); 908 __asm volatile("mfc0 %0, $3" : "=r"(r));
909 r &= 0xffffdfff; 909 r &= 0xffffdfff;
910 __asm volatile("mtc0 %0, $3" : : "r"(r)); 910 __asm volatile("mtc0 %0, $3" : : "r"(r));
911} 911}
912 912
913#endif /* ENABLE_MIPS_TX3900 */ 913#endif /* ENABLE_MIPS_TX3900 */
914#endif /* MIPS1 */ 914#endif /* MIPS1 */
915 915
916#if defined(MIPS3) || defined(MIPS4) 916#if defined(MIPS3) || defined(MIPS4)
917/* 917/*
918 * mips3_get_cache_config: 918 * mips3_get_cache_config:
919 * 919 *
920 * Fetch the cache config information for a MIPS-3 or MIPS-4 920 * Fetch the cache config information for a MIPS-3 or MIPS-4
921 * processor (virtually-indexed cache). 921 * processor (virtually-indexed cache).
922 * 922 *
923 * NOTE: Fetching the size of the secondary cache is something 923 * NOTE: Fetching the size of the secondary cache is something
924 * that platform specific code has to do. We'd appreciate it 924 * that platform specific code has to do. We'd appreciate it
925 * if they initialized the size before now. 925 * if they initialized the size before now.
926 * 926 *
927 * ALSO NOTE: The number of ways in the cache must already be 927 * ALSO NOTE: The number of ways in the cache must already be
928 * initialized. 928 * initialized.
929 */ 929 */
930void 930void
931mips3_get_cache_config(int csizebase) 931mips3_get_cache_config(int csizebase)
932{ 932{
933 struct mips_cache_info * const mci = &mips_cache_info; 933 struct mips_cache_info * const mci = &mips_cache_info;
934 const mips_prid_t cpu_id = mips_options.mips_cpu_id; 934 const mips_prid_t cpu_id = mips_options.mips_cpu_id;
935 bool has_sdcache_enable = false; 935 bool has_sdcache_enable = false;
936 uint32_t config = mips3_cp0_config_read(); 936 uint32_t config = mips3_cp0_config_read();
937 937
938 mci->mci_picache_size = MIPS3_CONFIG_CACHE_SIZE(config, 938 mci->mci_picache_size = MIPS3_CONFIG_CACHE_SIZE(config,
939 MIPS3_CONFIG_IC_MASK, csizebase, MIPS3_CONFIG_IC_SHIFT); 939 MIPS3_CONFIG_IC_MASK, csizebase, MIPS3_CONFIG_IC_SHIFT);
940 mci->mci_picache_line_size = MIPS3_CONFIG_CACHE_L1_LSIZE(config, 940 mci->mci_picache_line_size = MIPS3_CONFIG_CACHE_L1_LSIZE(config,
941 MIPS3_CONFIG_IB); 941 MIPS3_CONFIG_IB);
942 942
943 mci->mci_pdcache_size = MIPS3_CONFIG_CACHE_SIZE(config, 943 mci->mci_pdcache_size = MIPS3_CONFIG_CACHE_SIZE(config,
944 MIPS3_CONFIG_DC_MASK, csizebase, MIPS3_CONFIG_DC_SHIFT); 944 MIPS3_CONFIG_DC_MASK, csizebase, MIPS3_CONFIG_DC_SHIFT);
945 mci->mci_pdcache_line_size = MIPS3_CONFIG_CACHE_L1_LSIZE(config, 945 mci->mci_pdcache_line_size = MIPS3_CONFIG_CACHE_L1_LSIZE(config,
946 MIPS3_CONFIG_DB); 946 MIPS3_CONFIG_DB);
947 947
948 mci->mci_cache_alias_mask = 948 mci->mci_cache_alias_mask =
949 ((mci->mci_pdcache_size / mci->mci_pdcache_ways) - 1) & ~PAGE_MASK; 949 ((mci->mci_pdcache_size / mci->mci_pdcache_ways) - 1) & ~PAGE_MASK;
950 mci->mci_cache_prefer_mask = 950 mci->mci_cache_prefer_mask =
951 max(mci->mci_pdcache_size, mci->mci_picache_size) - 1; 951 max(mci->mci_pdcache_size, mci->mci_picache_size) - 1;
952 uvmexp.ncolors = (mci->mci_cache_alias_mask >> PAGE_SHIFT) + 1; 952 uvmexp.ncolors = (mci->mci_cache_alias_mask >> PAGE_SHIFT) + 1;
953 953
954 switch(MIPS_PRID_IMPL(cpu_id)) { 954 switch(MIPS_PRID_IMPL(cpu_id)) {
955#ifndef ENABLE_MIPS_R3NKK 955#ifndef ENABLE_MIPS_R3NKK
956 case MIPS_R5000: 956 case MIPS_R5000:
957#endif 957#endif
958 case MIPS_RM5200: 958 case MIPS_RM5200:
959 has_sdcache_enable = true; 959 has_sdcache_enable = true;
960 break; 960 break;
961 } 961 }
962 962
963 /*  963 /*
964 * If CPU has a software-enabled L2 cache, check both if it's 964 * If CPU has a software-enabled L2 cache, check both if it's
965 * present and if it's enabled before making assumptions the 965 * present and if it's enabled before making assumptions the
966 * L2 is usable. If the L2 is disabled, we treat it the same 966 * L2 is usable. If the L2 is disabled, we treat it the same
967 * as if there were no L2 cache. 967 * as if there were no L2 cache.
968 */ 968 */
969 if ((config & MIPS3_CONFIG_SC) == 0) { 969 if ((config & MIPS3_CONFIG_SC) == 0) {
970 if (has_sdcache_enable == 0 || 970 if (has_sdcache_enable == 0 ||
971 (has_sdcache_enable && (config & MIPS3_CONFIG_SE))) { 971 (has_sdcache_enable && (config & MIPS3_CONFIG_SE))) {
972 mci->mci_sdcache_line_size =  972 mci->mci_sdcache_line_size =
973 MIPS3_CONFIG_CACHE_L2_LSIZE(config); 973 MIPS3_CONFIG_CACHE_L2_LSIZE(config);
974 if ((config & MIPS3_CONFIG_SS) == 0) 974 if ((config & MIPS3_CONFIG_SS) == 0)
975 mci->mci_scache_unified = true; 975 mci->mci_scache_unified = true;
976 } else { 976 } else {
977#ifdef CACHE_DEBUG 977#ifdef CACHE_DEBUG
978 printf("External cache detected, but is disabled -- WILL NOT ENABLE!\n"); 978 printf("External cache detected, but is disabled -- WILL NOT ENABLE!\n");
979#endif /* CACHE_DEBUG */ 979#endif /* CACHE_DEBUG */
980 } 980 }
981 } 981 }
982} 982}
983 983
984#ifdef ENABLE_MIPS4_CACHE_R10K 984#ifdef ENABLE_MIPS4_CACHE_R10K
985void 985void
986mips4_get_cache_config(int csizebase) 986mips4_get_cache_config(int csizebase)
987{ 987{
988 struct mips_cache_info * const mci = &mips_cache_info; 988 struct mips_cache_info * const mci = &mips_cache_info;
989 uint32_t config = mips3_cp0_config_read(); 989 uint32_t config = mips3_cp0_config_read();
990 990
991 mci->mci_picache_size = MIPS4_CONFIG_CACHE_SIZE(config, 991 mci->mci_picache_size = MIPS4_CONFIG_CACHE_SIZE(config,
992 MIPS4_CONFIG_IC_MASK, csizebase, MIPS4_CONFIG_IC_SHIFT); 992 MIPS4_CONFIG_IC_MASK, csizebase, MIPS4_CONFIG_IC_SHIFT);
993 mci->mci_picache_line_size = 64; /* 64 Byte */ 993 mci->mci_picache_line_size = 64; /* 64 Byte */
994 994
995 mci->mci_pdcache_size = MIPS4_CONFIG_CACHE_SIZE(config, 995 mci->mci_pdcache_size = MIPS4_CONFIG_CACHE_SIZE(config,
996 MIPS4_CONFIG_DC_MASK, csizebase, MIPS4_CONFIG_DC_SHIFT); 996 MIPS4_CONFIG_DC_MASK, csizebase, MIPS4_CONFIG_DC_SHIFT);
997 mci->mci_pdcache_line_size = 32; /* 32 Byte */ 997 mci->mci_pdcache_line_size = 32; /* 32 Byte */
998 998
999 mci->mci_cache_alias_mask = 999 mci->mci_cache_alias_mask =
1000 ((mci->mci_pdcache_size / mci->mci_pdcache_ways) - 1) & ~PAGE_MASK; 1000 ((mci->mci_pdcache_size / mci->mci_pdcache_ways) - 1) & ~PAGE_MASK;
1001 mci->mci_cache_prefer_mask = 1001 mci->mci_cache_prefer_mask =
1002 max(mci->mci_pdcache_size, mci->mci_picache_size) - 1; 1002 max(mci->mci_pdcache_size, mci->mci_picache_size) - 1;
1003} 1003}
1004#endif /* ENABLE_MIPS4_CACHE_R10K */ 1004#endif /* ENABLE_MIPS4_CACHE_R10K */
1005#endif /* MIPS3 || MIPS4 */ 1005#endif /* MIPS3 || MIPS4 */
1006#endif /* MIPS1 || MIPS3 || MIPS4 */ 1006#endif /* MIPS1 || MIPS3 || MIPS4 */
1007 1007
1008#if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0 1008#if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
1009 1009
1010static void cache_noop(void) __unused; 1010static void cache_noop(void) __unused;
1011static void cache_noop(void) {} 1011static void cache_noop(void) {}
1012 1012
1013static void 1013static void
1014mips_config_cache_modern(void) 1014mips_config_cache_modern(void)
1015{ 1015{
1016 struct mips_cache_info * const mci = &mips_cache_info; 1016 struct mips_cache_info * const mci = &mips_cache_info;
1017 struct mips_cache_ops * const mco = &mips_cache_ops; 1017 struct mips_cache_ops * const mco = &mips_cache_ops;
1018 /* MIPS32/MIPS64, use coprocessor 0 config registers */ 1018 /* MIPS32/MIPS64, use coprocessor 0 config registers */
1019 uint32_t cfg, cfg1; 1019 uint32_t cfg, cfg1;
1020 1020
1021 cfg = mips3_cp0_config_read(); 1021 cfg = mips3_cp0_config_read();
1022 cfg1 = mipsNN_cp0_config1_read(); 1022 cfg1 = mipsNN_cp0_config1_read();
1023 1023
1024#ifdef MIPS_DISABLE_L1_CACHE 1024#ifdef MIPS_DISABLE_L1_CACHE
1025 cfg1 &= ~MIPSNN_CFG1_IL_MASK; 1025 cfg1 &= ~MIPSNN_CFG1_IL_MASK;
1026 cfg1 &= ~MIPSNN_CFG1_DL_MASK; 1026 cfg1 &= ~MIPSNN_CFG1_DL_MASK;
1027 mipsNN_cp0_config1_write(cfg1); 1027 mipsNN_cp0_config1_write(cfg1);
1028#endif 1028#endif
1029 1029
1030 /* figure out Dcache params. */ 1030 /* figure out Dcache params. */
1031 switch (MIPSNN_GET(CFG1_DL, cfg1)) { 1031 switch (MIPSNN_GET(CFG1_DL, cfg1)) {
1032 case MIPSNN_CFG1_DL_NONE: 1032 case MIPSNN_CFG1_DL_NONE:
1033 mci->mci_pdcache_line_size = mci->mci_pdcache_way_size = 1033 mci->mci_pdcache_line_size = mci->mci_pdcache_way_size =
1034 mci->mci_pdcache_ways = 0; 1034 mci->mci_pdcache_ways = 0;
1035 break; 1035 break;
1036 case MIPSNN_CFG1_DL_RSVD: 1036 case MIPSNN_CFG1_DL_RSVD:
1037 panic("reserved MIPS32/64 Dcache line size"); 1037 panic("reserved MIPS32/64 Dcache line size");
1038 break; 1038 break;
1039 default: 1039 default:
1040 if (MIPSNN_GET(CFG1_DS, cfg1) == MIPSNN_CFG1_DS_RSVD) 1040 if (MIPSNN_GET(CFG1_DS, cfg1) == MIPSNN_CFG1_DS_RSVD)
1041 panic("reserved MIPS32/64 Dcache sets per way"); 1041 panic("reserved MIPS32/64 Dcache sets per way");
1042 mci->mci_pdcache_line_size = MIPSNN_CFG1_DL(cfg1); 1042 mci->mci_pdcache_line_size = MIPSNN_CFG1_DL(cfg1);
1043 mci->mci_pdcache_way_size = 1043 mci->mci_pdcache_way_size =
1044 mci->mci_pdcache_line_size * MIPSNN_CFG1_DS(cfg1); 1044 mci->mci_pdcache_line_size * MIPSNN_CFG1_DS(cfg1);
1045 mci->mci_pdcache_ways = MIPSNN_CFG1_DA(cfg1) + 1; 1045 mci->mci_pdcache_ways = MIPSNN_CFG1_DA(cfg1) + 1;
1046 1046
1047 /* 1047 /*
1048 * Compute the total size and "way mask" for the 1048 * Compute the total size and "way mask" for the
1049 * primary Dcache. 1049 * primary Dcache.
1050 */ 1050 */
1051 mci->mci_pdcache_size = 1051 mci->mci_pdcache_size =
1052 mci->mci_pdcache_way_size * mci->mci_pdcache_ways; 1052 mci->mci_pdcache_way_size * mci->mci_pdcache_ways;
1053 mci->mci_pdcache_way_mask = mci->mci_pdcache_way_size - 1; 1053 mci->mci_pdcache_way_mask = mci->mci_pdcache_way_size - 1;
1054 uvmexp.ncolors = atop(mci->mci_pdcache_size) / mci->mci_pdcache_ways; 1054 uvmexp.ncolors = atop(mci->mci_pdcache_size) / mci->mci_pdcache_ways;
1055 break; 1055 break;
1056 } 1056 }
1057 1057
1058 /* figure out Icache params. */ 1058 /* figure out Icache params. */
1059 switch (MIPSNN_GET(CFG1_IL, cfg1)) { 1059 switch (MIPSNN_GET(CFG1_IL, cfg1)) {
1060 case MIPSNN_CFG1_IL_NONE: 1060 case MIPSNN_CFG1_IL_NONE:
1061 mci->mci_picache_line_size = mci->mci_picache_way_size = 1061 mci->mci_picache_line_size = mci->mci_picache_way_size =
1062 mci->mci_picache_ways = 0; 1062 mci->mci_picache_ways = 0;
1063 break; 1063 break;
1064 case MIPSNN_CFG1_IL_RSVD: 1064 case MIPSNN_CFG1_IL_RSVD:
1065 panic("reserved MIPS32/64 Icache line size"); 1065 panic("reserved MIPS32/64 Icache line size");
1066 break; 1066 break;
1067 default: 1067 default:
1068 if (MIPSNN_GET(CFG1_IS, cfg1) == MIPSNN_CFG1_IS_RSVD) 1068 if (MIPSNN_GET(CFG1_IS, cfg1) == MIPSNN_CFG1_IS_RSVD)
1069 panic("reserved MIPS32/64 Icache sets per way"); 1069 panic("reserved MIPS32/64 Icache sets per way");
1070 mci->mci_picache_line_size = MIPSNN_CFG1_IL(cfg1); 1070 mci->mci_picache_line_size = MIPSNN_CFG1_IL(cfg1);
1071 mci->mci_picache_way_size = 1071 mci->mci_picache_way_size =
1072 mci->mci_picache_line_size * MIPSNN_CFG1_IS(cfg1); 1072 mci->mci_picache_line_size * MIPSNN_CFG1_IS(cfg1);
1073 mci->mci_picache_ways = MIPSNN_CFG1_IA(cfg1) + 1; 1073 mci->mci_picache_ways = MIPSNN_CFG1_IA(cfg1) + 1;
1074 1074
1075 /* 1075 /*
1076 * Compute the total size and "way mask" for the 1076 * Compute the total size and "way mask" for the
1077 * primary Dcache. 1077 * primary Dcache.
1078 */ 1078 */
1079 mci->mci_picache_size = 1079 mci->mci_picache_size =
1080 mci->mci_picache_way_size * mci->mci_picache_ways; 1080 mci->mci_picache_way_size * mci->mci_picache_ways;
1081 mci->mci_picache_way_mask = mci->mci_picache_way_size - 1; 1081 mci->mci_picache_way_mask = mci->mci_picache_way_size - 1;
1082 break; 1082 break;
1083 } 1083 }
1084 1084
1085#define CACHE_DEBUG 1085#define CACHE_DEBUG
1086#ifdef CACHE_DEBUG 1086#ifdef CACHE_DEBUG
1087 printf("MIPS32/64 params: cpu arch: %d\n", mips_options.mips_cpu_arch); 1087 printf("MIPS32/64 params: cpu arch: %d\n", mips_options.mips_cpu_arch);
1088 printf("MIPS32/64 params: TLB entries: %d\n", mips_options.mips_num_tlb_entries); 1088 printf("MIPS32/64 params: TLB entries: %d\n", mips_options.mips_num_tlb_entries);
1089 if (mci->mci_picache_line_size == 0) 1089 if (mci->mci_picache_line_size == 0)
1090 printf("MIPS32/64 params: no Icache\n"); 1090 printf("MIPS32/64 params: no Icache\n");
1091 else { 1091 else {
1092 printf("MIPS32/64 params: Icache: line = %d, total = %d, " 1092 printf("MIPS32/64 params: Icache: line = %d, total = %d, "
1093 "ways = %d\n", mci->mci_picache_line_size, 1093 "ways = %d\n", mci->mci_picache_line_size,
1094 mci->mci_picache_way_size * mci->mci_picache_ways, 1094 mci->mci_picache_way_size * mci->mci_picache_ways,
1095 mci->mci_picache_ways); 1095 mci->mci_picache_ways);
1096 printf("\t\t sets = %d\n", (mci->mci_picache_way_size * 1096 printf("\t\t sets = %d\n", (mci->mci_picache_way_size *
1097 mci->mci_picache_ways / mci->mci_picache_line_size) / 1097 mci->mci_picache_ways / mci->mci_picache_line_size) /
1098 mci->mci_picache_ways); 1098 mci->mci_picache_ways);
1099 } 1099 }
1100 if (mci->mci_pdcache_line_size == 0) 1100 if (mci->mci_pdcache_line_size == 0)
1101 printf("MIPS32/64 params: no Dcache\n"); 1101 printf("MIPS32/64 params: no Dcache\n");
1102 else { 1102 else {
1103 printf("MIPS32/64 params: Dcache: line = %d, total = %d, " 1103 printf("MIPS32/64 params: Dcache: line = %d, total = %d, "
1104 "ways = %d\n", mci->mci_pdcache_line_size, 1104 "ways = %d\n", mci->mci_pdcache_line_size,
1105 mci->mci_pdcache_way_size * mci->mci_pdcache_ways, 1105 mci->mci_pdcache_way_size * mci->mci_pdcache_ways,
1106 mci->mci_pdcache_ways); 1106 mci->mci_pdcache_ways);
1107 printf("\t\t sets = %d\n", (mci->mci_pdcache_way_size * 1107 printf("\t\t sets = %d\n", (mci->mci_pdcache_way_size *
1108 mci->mci_pdcache_ways / mci->mci_pdcache_line_size) / 1108 mci->mci_pdcache_ways / mci->mci_pdcache_line_size) /
1109 mci->mci_pdcache_ways); 1109 mci->mci_pdcache_ways);
1110 } 1110 }
1111#endif /* CACHE_DEBUG */ 1111#endif /* CACHE_DEBUG */
1112 1112
1113 switch (mci->mci_picache_line_size) { 1113 switch (mci->mci_picache_line_size) {
1114 case 16: 1114 case 16:
1115 mco->mco_icache_sync_all = mipsNN_icache_sync_all_16; 1115 mco->mco_icache_sync_all = mipsNN_icache_sync_all_16;
1116 mco->mco_icache_sync_range = 1116 mco->mco_icache_sync_range =
1117 mipsNN_icache_sync_range_16; 1117 mipsNN_icache_sync_range_16;
1118 mco->mco_icache_sync_range_index = 1118 mco->mco_icache_sync_range_index =
1119 mipsNN_icache_sync_range_index_16; 1119 mipsNN_icache_sync_range_index_16;
1120 break; 1120 break;
1121 case 32: 1121 case 32:
1122 mco->mco_icache_sync_all = mipsNN_icache_sync_all_32; 1122 mco->mco_icache_sync_all = mipsNN_icache_sync_all_32;
1123 mco->mco_icache_sync_range = 1123 mco->mco_icache_sync_range =
1124 mipsNN_icache_sync_range_32; 1124 mipsNN_icache_sync_range_32;
1125 mco->mco_icache_sync_range_index = 1125 mco->mco_icache_sync_range_index =
1126 mipsNN_icache_sync_range_index_32; 1126 mipsNN_icache_sync_range_index_32;
1127 break; 1127 break;
1128#ifdef MIPS_DISABLE_L1_CACHE 1128#ifdef MIPS_DISABLE_L1_CACHE
1129 case 0: 1129 case 0:
1130 mco->mco_icache_sync_all = cache_noop; 1130 mco->mco_icache_sync_all = cache_noop;
1131 mco->mco_icache_sync_range = 1131 mco->mco_icache_sync_range =
1132 (void (*)(vaddr_t, vsize_t))cache_noop; 1132 (void (*)(vaddr_t, vsize_t))cache_noop;
1133 mco->mco_icache_sync_range_index = 1133 mco->mco_icache_sync_range_index =
1134 (void (*)(vaddr_t, vsize_t))cache_noop; 1134 (void (*)(vaddr_t, vsize_t))cache_noop;
1135 break; 1135 break;
1136#endif 1136#endif
1137 default: 1137 default:
1138 panic("no Icache ops for %d byte lines", 1138 panic("no Icache ops for %d byte lines",
1139 mci->mci_picache_line_size); 1139 mci->mci_picache_line_size);
1140 } 1140 }
1141 1141
1142 switch (mci->mci_pdcache_line_size) { 1142 switch (mci->mci_pdcache_line_size) {
1143 case 16: 1143 case 16:
1144 mco->mco_pdcache_wbinv_all = 1144 mco->mco_pdcache_wbinv_all =
1145 mco->mco_intern_pdcache_wbinv_all = 1145 mco->mco_intern_pdcache_wbinv_all =
1146 mipsNN_pdcache_wbinv_all_16; 1146 mipsNN_pdcache_wbinv_all_16;
1147 mco->mco_pdcache_wbinv_range = 1147 mco->mco_pdcache_wbinv_range =
1148 mipsNN_pdcache_wbinv_range_16; 1148 mipsNN_pdcache_wbinv_range_16;
1149 mco->mco_pdcache_wbinv_range_index = 1149 mco->mco_pdcache_wbinv_range_index =
1150 mco->mco_intern_pdcache_wbinv_range_index = 1150 mco->mco_intern_pdcache_wbinv_range_index =
1151 mipsNN_pdcache_wbinv_range_index_16; 1151 mipsNN_pdcache_wbinv_range_index_16;
1152 mco->mco_pdcache_inv_range = 1152 mco->mco_pdcache_inv_range =
1153 mipsNN_pdcache_inv_range_16; 1153 mipsNN_pdcache_inv_range_16;
1154 mco->mco_pdcache_wb_range = 1154 mco->mco_pdcache_wb_range =
1155 mco->mco_intern_pdcache_wb_range = 1155 mco->mco_intern_pdcache_wb_range =
1156 mipsNN_pdcache_wb_range_16; 1156 mipsNN_pdcache_wb_range_16;
1157 break; 1157 break;
1158 case 32: 1158 case 32:
1159 mco->mco_pdcache_wbinv_all = 1159 mco->mco_pdcache_wbinv_all =
1160 mco->mco_intern_pdcache_wbinv_all = 1160 mco->mco_intern_pdcache_wbinv_all =
1161 mipsNN_pdcache_wbinv_all_32; 1161 mipsNN_pdcache_wbinv_all_32;
1162 mco->mco_pdcache_wbinv_range = 1162 mco->mco_pdcache_wbinv_range =
1163 mipsNN_pdcache_wbinv_range_32; 1163 mipsNN_pdcache_wbinv_range_32;
1164 mco->mco_pdcache_wbinv_range_index = 1164 mco->mco_pdcache_wbinv_range_index =
1165 mco->mco_intern_pdcache_wbinv_range_index = 1165 mco->mco_intern_pdcache_wbinv_range_index =
1166 mipsNN_pdcache_wbinv_range_index_32; 1166 mipsNN_pdcache_wbinv_range_index_32;
1167 mco->mco_pdcache_inv_range = 1167 mco->mco_pdcache_inv_range =
1168 mipsNN_pdcache_inv_range_32; 1168 mipsNN_pdcache_inv_range_32;
1169 mco->mco_pdcache_wb_range = 1169 mco->mco_pdcache_wb_range =
1170 mco->mco_intern_pdcache_wb_range = 1170 mco->mco_intern_pdcache_wb_range =
1171 mipsNN_pdcache_wb_range_32; 1171 mipsNN_pdcache_wb_range_32;
1172 break; 1172 break;
1173#ifdef MIPS_DISABLE_L1_CACHE 1173#ifdef MIPS_DISABLE_L1_CACHE
1174 case 0: 1174 case 0:
1175 mco->mco_pdcache_wbinv_all = cache_noop; 1175 mco->mco_pdcache_wbinv_all = cache_noop;
1176 mco->mco_intern_pdcache_wbinv_all = cache_noop; 1176 mco->mco_intern_pdcache_wbinv_all = cache_noop;
1177 mco->mco_pdcache_wbinv_range = 1177 mco->mco_pdcache_wbinv_range =
1178 (void (*)(vaddr_t, vsize_t))cache_noop; 1178 (void (*)(vaddr_t, vsize_t))cache_noop;
1179 mco->mco_pdcache_wbinv_range_index = 1179 mco->mco_pdcache_wbinv_range_index =
1180 (void (*)(vaddr_t, vsize_t))cache_noop; 1180 (void (*)(vaddr_t, vsize_t))cache_noop;
1181 mco->mco_intern_pdcache_wbinv_range_index = 1181 mco->mco_intern_pdcache_wbinv_range_index =
1182 (void (*)(vaddr_t, vsize_t))cache_noop; 1182 (void (*)(vaddr_t, vsize_t))cache_noop;
1183 mco->mco_pdcache_inv_range = 1183 mco->mco_pdcache_inv_range =
1184 (void (*)(vaddr_t, vsize_t))cache_noop; 1184 (void (*)(vaddr_t, vsize_t))cache_noop;
1185 mco->mco_pdcache_wb_range = 1185 mco->mco_pdcache_wb_range =
1186 (void (*)(vaddr_t, vsize_t))cache_noop; 1186 (void (*)(vaddr_t, vsize_t))cache_noop;
1187 mco->mco_intern_pdcache_wb_range = 1187 mco->mco_intern_pdcache_wb_range =
1188 (void (*)(vaddr_t, vsize_t))cache_noop; 1188 (void (*)(vaddr_t, vsize_t))cache_noop;
1189 break; 1189 break;
1190#endif 1190#endif
1191 default: 1191 default:
1192 panic("no Dcache ops for %d byte lines", 1192 panic("no Dcache ops for %d byte lines",
1193 mci->mci_pdcache_line_size); 1193 mci->mci_pdcache_line_size);
1194 } 1194 }
1195 1195
1196 mipsNN_cache_init(cfg, cfg1); 1196 mipsNN_cache_init(cfg, cfg1);
1197 1197
1198 if (mips_options.mips_cpu_flags & 1198 if (mips_options.mips_cpu_flags &
1199 (CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_I_D_CACHE_COHERENT)) { 1199 (CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_I_D_CACHE_COHERENT)) {
1200#ifdef CACHE_DEBUG 1200#ifdef CACHE_DEBUG
1201 printf(" Dcache is coherent\n"); 1201 printf(" Dcache is coherent\n");
1202#endif 1202#endif
1203 mco->mco_pdcache_wbinv_all = cache_noop; 1203 mco->mco_pdcache_wbinv_all = cache_noop;
1204 mco->mco_pdcache_wbinv_range = 1204 mco->mco_pdcache_wbinv_range =
1205 (void (*)(vaddr_t, vsize_t))cache_noop; 1205 (void (*)(vaddr_t, vsize_t))cache_noop;
1206 mco->mco_pdcache_wbinv_range_index = 1206 mco->mco_pdcache_wbinv_range_index =
1207 (void (*)(vaddr_t, vsize_t))cache_noop; 1207 (void (*)(vaddr_t, vsize_t))cache_noop;
1208 mco->mco_pdcache_inv_range = 1208 mco->mco_pdcache_inv_range =
1209 (void (*)(vaddr_t, vsize_t))cache_noop; 1209 (void (*)(vaddr_t, vsize_t))cache_noop;
1210 mco->mco_pdcache_wb_range = 1210 mco->mco_pdcache_wb_range =
1211 (void (*)(vaddr_t, vsize_t))cache_noop; 1211 (void (*)(vaddr_t, vsize_t))cache_noop;
1212 } 1212 }
1213 if (mips_options.mips_cpu_flags & CPU_MIPS_I_D_CACHE_COHERENT) { 1213 if (mips_options.mips_cpu_flags & CPU_MIPS_I_D_CACHE_COHERENT) {
1214#ifdef CACHE_DEBUG 1214#ifdef CACHE_DEBUG
1215 printf(" Icache is coherent against Dcache\n"); 1215 printf(" Icache is coherent against Dcache\n");
1216#endif 1216#endif
1217 mco->mco_intern_pdcache_wbinv_all = 1217 mco->mco_intern_pdcache_wbinv_all =
1218 cache_noop; 1218 cache_noop;
1219 mco->mco_intern_pdcache_wbinv_range_index = 1219 mco->mco_intern_pdcache_wbinv_range_index =
1220 (void (*)(vaddr_t, vsize_t))cache_noop; 1220 (void (*)(vaddr_t, vsize_t))cache_noop;
1221 mco->mco_intern_pdcache_wb_range = 1221 mco->mco_intern_pdcache_wb_range =
1222 (void (*)(vaddr_t, vsize_t))cache_noop; 1222 (void (*)(vaddr_t, vsize_t))cache_noop;
1223 } 1223 }
1224} 1224}
1225#endif /* MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2 > 0 */ 1225#endif /* MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2 > 0 */

cvs diff -r1.3 -r1.4 src/sys/arch/mips/mips/cache_ls2.c (switch to unified diff)

--- src/sys/arch/mips/mips/cache_ls2.c 2009/08/11 00:34:29 1.3
+++ src/sys/arch/mips/mips/cache_ls2.c 2011/06/08 17:47:48 1.4
@@ -1,299 +1,306 @@ @@ -1,299 +1,306 @@
1/* $NetBSD: cache_ls2.c,v 1.3 2009/08/11 00:34:29 matt Exp $ */ 1/* $NetBSD: cache_ls2.c,v 1.4 2011/06/08 17:47:48 bouyer Exp $ */
2 2
3/*- 3/*-
4 * Copyright (c) 2009 The NetBSD Foundation, Inc. 4 * Copyright (c) 2009 The NetBSD Foundation, Inc.
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * This code is derived from software contributed to The NetBSD Foundation 7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas <matt@3am-software.com>. 8 * by Matt Thomas <matt@3am-software.com>.
9 * 9 *
10 * Redistribution and use in source and binary forms, with or without 10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions 11 * modification, are permitted provided that the following conditions
12 * are met: 12 * are met:
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE. 29 * POSSIBILITY OF SUCH DAMAGE.
30 */ 30 */
31 31
32#include <sys/cdefs.h> 32#include <sys/cdefs.h>
33__KERNEL_RCSID(0, "$NetBSD: cache_ls2.c,v 1.3 2009/08/11 00:34:29 matt Exp $"); 33__KERNEL_RCSID(0, "$NetBSD: cache_ls2.c,v 1.4 2011/06/08 17:47:48 bouyer Exp $");
34 34
35#include <sys/param.h> 35#include <sys/param.h>
36 36
37#include <mips/cache.h> 37#include <mips/cache.h>
38#include <mips/cache_ls2.h> 38#include <mips/cache_ls2.h>
39#include <mips/locore.h> 39#include <mips/locore.h>
40 40
41/* 41/*
42 * Cache operations for Loongson2-style caches: 42 * Cache operations for Loongson2-style caches:
43 * 43 *
44 * - 4-way set-associative 32b/l 44 * - 4-way set-associative 32b/l
45 * - Write-back 45 * - Write-back
46 * - Primary is virtually indexed, physically tagged 46 * - Primary is virtually indexed, physically tagged
47 * - Seconadry is physically indexed, physically tagged 47 * - Seconadry is physically indexed, physically tagged
48 */ 48 */
49 49
50#define round_line(x) (((x) + 31) & ~31) 50#define round_line(x) (((x) + 31) & ~31)
51#define trunc_line(x) ((x) & ~31) 51#define trunc_line(x) ((x) & ~31)
52 52
53__asm(".set mips3"); 53__asm(".set mips3");
54 54
55void 55void
56ls2_icache_sync_range(vaddr_t va, vsize_t size) 56ls2_icache_sync_range(vaddr_t va, vsize_t size)
57{ 57{
 58 struct mips_cache_info * const mci = &mips_cache_info;
58 const vaddr_t eva = round_line(va + size); 59 const vaddr_t eva = round_line(va + size);
59 60
60 va = trunc_line(va); 61 va = trunc_line(va);
61 62
62 if (va + mips_picache_size <= eva) { 63 if (va + mci->mci_picache_size <= eva) {
63 ls2_icache_sync_all(); 64 ls2_icache_sync_all();
64 return; 65 return;
65 } 66 }
66 67
67 for (; va + 8 * 32 <= eva; va += 8 * 32) { 68 for (; va + 8 * 32 <= eva; va += 8 * 32) {
68 cache_op_ls2_8line(va, CACHEOP_LS2_D_HIT_WB_INV); 69 cache_op_ls2_8line(va, CACHEOP_LS2_D_HIT_WB_INV);
69 cache_op_ls2_8line(va, CACHEOP_LS2_I_INDEX_INV); 70 cache_op_ls2_8line(va, CACHEOP_LS2_I_INDEX_INV);
70 } 71 }
71 72
72 for (; va < eva; va += 32) { 73 for (; va < eva; va += 32) {
73 cache_op_ls2_line(va, CACHEOP_LS2_D_HIT_WB_INV); 74 cache_op_ls2_line(va, CACHEOP_LS2_D_HIT_WB_INV);
74 cache_op_ls2_line(va, CACHEOP_LS2_I_INDEX_INV); 75 cache_op_ls2_line(va, CACHEOP_LS2_I_INDEX_INV);
75 } 76 }
76 77
77 __asm volatile("sync"); 78 __asm volatile("sync");
78} 79}
79 80
80void 81void
81ls2_icache_sync_range_index(vaddr_t va, vsize_t size) 82ls2_icache_sync_range_index(vaddr_t va, vsize_t size)
82{ 83{
83 vaddr_t eva; 84 vaddr_t eva;
 85 struct mips_cache_info * const mci = &mips_cache_info;
84 86
85 /* 87 /*
86 * Since we're doing Index ops, we expect to not be able 88 * Since we're doing Index ops, we expect to not be able
87 * to access the address we've been given. So, get the 89 * to access the address we've been given. So, get the
88 * bits that determine the cache index, and make a KSEG0 90 * bits that determine the cache index, and make a KSEG0
89 * address out of them. 91 * address out of them.
90 */ 92 */
91 93
92 va = MIPS_PHYS_TO_KSEG0(va & mips_picache_way_mask); 94 va = MIPS_PHYS_TO_KSEG0(va & mci->mci_picache_way_mask);
93 eva = round_line(va + size); 95 eva = round_line(va + size);
94 va = trunc_line(va); 96 va = trunc_line(va);
95 97
96 if (va + mips_picache_way_size < eva) { 98 if (va + mci->mci_picache_way_size < eva) {
97 va = MIPS_PHYS_TO_KSEG0(0); 99 va = MIPS_PHYS_TO_KSEG0(0);
98 eva = mips_picache_way_size; 100 eva = mci->mci_picache_way_size;
99 } 101 }
100 102
101 for (; va + 8 * 32 <= eva; va += 8 * 32) { 103 for (; va + 8 * 32 <= eva; va += 8 * 32) {
102 cache_op_ls2_8line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV); 104 cache_op_ls2_8line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
103 cache_op_ls2_8line(va, CACHEOP_LS2_I_INDEX_INV); 105 cache_op_ls2_8line(va, CACHEOP_LS2_I_INDEX_INV);
104 } 106 }
105 107
106 for (; va < eva; va += 32) { 108 for (; va < eva; va += 32) {
107 cache_op_ls2_line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV); 109 cache_op_ls2_line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
108 cache_op_ls2_line(va, CACHEOP_LS2_I_INDEX_INV); 110 cache_op_ls2_line(va, CACHEOP_LS2_I_INDEX_INV);
109 } 111 }
110 112
111 __asm volatile("sync"); 113 __asm volatile("sync");
112} 114}
113 115
114void 116void
115ls2_icache_sync_all(void) 117ls2_icache_sync_all(void)
116{ 118{
117 ls2_icache_sync_range_index(0, mips_picache_way_size); 119 struct mips_cache_info * const mci = &mips_cache_info;
 120 ls2_icache_sync_range_index(0, mci->mci_picache_way_size);
118} 121}
119 122
120void 123void
121ls2_pdcache_inv_range(vaddr_t va, vsize_t size) 124ls2_pdcache_inv_range(vaddr_t va, vsize_t size)
122{ 125{
123 const vaddr_t eva = round_line(va + size); 126 const vaddr_t eva = round_line(va + size);
124 127
125 va = trunc_line(va); 128 va = trunc_line(va);
126 129
127 for (; va + 8 * 32 <= eva; va += 8 * 32) { 130 for (; va + 8 * 32 <= eva; va += 8 * 32) {
128 cache_op_ls2_8line(va, CACHEOP_LS2_D_HIT_INV); 131 cache_op_ls2_8line(va, CACHEOP_LS2_D_HIT_INV);
129 } 132 }
130 133
131 for (; va < eva; va += 32) { 134 for (; va < eva; va += 32) {
132 cache_op_ls2_line(va, CACHEOP_LS2_D_HIT_INV); 135 cache_op_ls2_line(va, CACHEOP_LS2_D_HIT_INV);
133 } 136 }
134 137
135 __asm volatile("sync"); 138 __asm volatile("sync");
136} 139}
137 140
138void 141void
139ls2_pdcache_wbinv_range(vaddr_t va, vsize_t size) 142ls2_pdcache_wbinv_range(vaddr_t va, vsize_t size)
140{ 143{
141 const vaddr_t eva = round_line(va + size); 144 const vaddr_t eva = round_line(va + size);
142 145
143 va = trunc_line(va); 146 va = trunc_line(va);
144 147
145 for (; va + 8 * 32 <= eva; va += 8 * 32) { 148 for (; va + 8 * 32 <= eva; va += 8 * 32) {
146 cache_op_ls2_8line(va, CACHEOP_LS2_D_HIT_WB_INV); 149 cache_op_ls2_8line(va, CACHEOP_LS2_D_HIT_WB_INV);
147 } 150 }
148 151
149 for (; va < eva; va += 32) { 152 for (; va < eva; va += 32) {
150 cache_op_ls2_line(va, CACHEOP_LS2_D_HIT_WB_INV); 153 cache_op_ls2_line(va, CACHEOP_LS2_D_HIT_WB_INV);
151 } 154 }
152 155
153 __asm volatile("sync"); 156 __asm volatile("sync");
154} 157}
155 158
156void 159void
157ls2_pdcache_wb_range(vaddr_t va, vsize_t size) 160ls2_pdcache_wb_range(vaddr_t va, vsize_t size)
158{ 161{
159 /* 162 /*
160 * Alas, can't writeback without invalidating... 163 * Alas, can't writeback without invalidating...
161 */ 164 */
162 ls2_pdcache_wbinv_range(va, size); 165 ls2_pdcache_wbinv_range(va, size);
163} 166}
164 167
165void 168void
166ls2_pdcache_wbinv_range_index(vaddr_t va, vsize_t size) 169ls2_pdcache_wbinv_range_index(vaddr_t va, vsize_t size)
167{ 170{
168 vaddr_t eva; 171 vaddr_t eva;
 172 struct mips_cache_info * const mci = &mips_cache_info;
169 173
170 /* 174 /*
171 * Since we're doing Index ops, we expect to not be able 175 * Since we're doing Index ops, we expect to not be able
172 * to access the address we've been given. So, get the 176 * to access the address we've been given. So, get the
173 * bits that determine the cache index, and make a KSEG0 177 * bits that determine the cache index, and make a KSEG0
174 * address out of them. 178 * address out of them.
175 */ 179 */
176 va = MIPS_PHYS_TO_KSEG0(va & mips_pdcache_way_mask); 180 va = MIPS_PHYS_TO_KSEG0(va & mci->mci_pdcache_way_mask);
177 181
178 eva = round_line(va + size); 182 eva = round_line(va + size);
179 va = trunc_line(va); 183 va = trunc_line(va);
180 184
181 if (va + mips_pdcache_way_size > eva) { 185 if (va + mci->mci_pdcache_way_size > eva) {
182 va = MIPS_PHYS_TO_KSEG0(0); 186 va = MIPS_PHYS_TO_KSEG0(0);
183 eva = mips_pdcache_way_size; 187 eva = mci->mci_pdcache_way_size;
184 } 188 }
185 189
186 for (; va + 8 * 32 <= eva; va += 8 * 32) { 190 for (; va + 8 * 32 <= eva; va += 8 * 32) {
187 cache_op_ls2_8line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV); 191 cache_op_ls2_8line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
188 } 192 }
189 193
190 for (; va < eva; va += 32) { 194 for (; va < eva; va += 32) {
191 cache_op_ls2_line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV); 195 cache_op_ls2_line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
192 } 196 }
193 197
194 __asm volatile("sync"); 198 __asm volatile("sync");
195} 199}
196 200
197void 201void
198ls2_pdcache_wbinv_all(void) 202ls2_pdcache_wbinv_all(void)
199{ 203{
200 ls2_pdcache_wbinv_range_index(0, mips_pdcache_way_size); 204 struct mips_cache_info * const mci = &mips_cache_info;
 205 ls2_pdcache_wbinv_range_index(0, mci->mci_pdcache_way_size);
201} 206}
202 207
203/* 208/*
204 * Cache operations for secondary caches: 209 * Cache operations for secondary caches:
205 * 210 *
206 * - Direct-mapped 211 * - Direct-mapped
207 * - Write-back 212 * - Write-back
208 * - Physically indexed, physically tagged 213 * - Physically indexed, physically tagged
209 * 214 *
210 */ 215 */
211 216
212void 217void
213ls2_sdcache_inv_range(vaddr_t va, vsize_t size) 218ls2_sdcache_inv_range(vaddr_t va, vsize_t size)
214{ 219{
215 const vaddr_t eva = round_line(va + size); 220 const vaddr_t eva = round_line(va + size);
216 221
217 va = trunc_line(va); 222 va = trunc_line(va);
218 223
219 for (; va + 8 * 32 <= eva; va += 8 * 32) { 224 for (; va + 8 * 32 <= eva; va += 8 * 32) {
220 cache_op_ls2_8line(va, CACHEOP_LS2_D_HIT_INV); 225 cache_op_ls2_8line(va, CACHEOP_LS2_D_HIT_INV);
221 cache_op_ls2_8line(va, CACHEOP_LS2_S_HIT_INV); 226 cache_op_ls2_8line(va, CACHEOP_LS2_S_HIT_INV);
222 } 227 }
223 228
224 for (; va < eva; va += 32) { 229 for (; va < eva; va += 32) {
225 cache_op_ls2_line(va, CACHEOP_LS2_D_HIT_INV); 230 cache_op_ls2_line(va, CACHEOP_LS2_D_HIT_INV);
226 cache_op_ls2_line(va, CACHEOP_LS2_S_HIT_INV); 231 cache_op_ls2_line(va, CACHEOP_LS2_S_HIT_INV);
227 } 232 }
228 233
229 __asm volatile("sync"); 234 __asm volatile("sync");
230} 235}
231 236
232void 237void
233ls2_sdcache_wbinv_range(vaddr_t va, vsize_t size) 238ls2_sdcache_wbinv_range(vaddr_t va, vsize_t size)
234{ 239{
235 const vaddr_t eva = round_line(va + size); 240 const vaddr_t eva = round_line(va + size);
236 241
237 va = trunc_line(va); 242 va = trunc_line(va);
238 243
239 for (; va + 8 * 32 <= eva; va += 8 * 32) { 244 for (; va + 8 * 32 <= eva; va += 8 * 32) {
240 cache_op_ls2_8line(va, CACHEOP_LS2_D_HIT_WB_INV); 245 cache_op_ls2_8line(va, CACHEOP_LS2_D_HIT_WB_INV);
241 cache_op_ls2_8line(va, CACHEOP_LS2_S_HIT_WB_INV); 246 cache_op_ls2_8line(va, CACHEOP_LS2_S_HIT_WB_INV);
242 } 247 }
243 248
244 for (; va < eva; va += 32) { 249 for (; va < eva; va += 32) {
245 cache_op_ls2_line(va, CACHEOP_LS2_D_HIT_WB_INV); 250 cache_op_ls2_line(va, CACHEOP_LS2_D_HIT_WB_INV);
246 cache_op_ls2_line(va, CACHEOP_LS2_S_HIT_WB_INV); 251 cache_op_ls2_line(va, CACHEOP_LS2_S_HIT_WB_INV);
247 } 252 }
248 253
249 __asm volatile("sync"); 254 __asm volatile("sync");
250} 255}
251 256
252void 257void
253ls2_sdcache_wb_range(vaddr_t va, vsize_t size) 258ls2_sdcache_wb_range(vaddr_t va, vsize_t size)
254{ 259{
255 /* 260 /*
256 * Alas, can't writeback without invalidating... 261 * Alas, can't writeback without invalidating...
257 */ 262 */
258 ls2_sdcache_wbinv_range(va, size); 263 ls2_sdcache_wbinv_range(va, size);
259} 264}
260 265
261void 266void
262ls2_sdcache_wbinv_range_index(vaddr_t va, vsize_t size) 267ls2_sdcache_wbinv_range_index(vaddr_t va, vsize_t size)
263{ 268{
264 vaddr_t eva; 269 vaddr_t eva;
 270 struct mips_cache_info * const mci = &mips_cache_info;
265 271
266 /* 272 /*
267 * Since we're doing Index ops, we expect to not be able 273 * Since we're doing Index ops, we expect to not be able
268 * to access the address we've been given. So, get the 274 * to access the address we've been given. So, get the
269 * bits that determine the cache index, and make a KSEG0 275 * bits that determine the cache index, and make a KSEG0
270 * address out of them. 276 * address out of them.
271 */ 277 */
272 va = MIPS_PHYS_TO_KSEG0(va & mips_sdcache_way_mask); 278 va = MIPS_PHYS_TO_KSEG0(va & mci->mci_sdcache_way_mask);
273 279
274 eva = round_line(va + size); 280 eva = round_line(va + size);
275 va = trunc_line(va); 281 va = trunc_line(va);
276 282
277 if (va + mips_sdcache_way_size > eva) { 283 if (va + mci->mci_sdcache_way_size > eva) {
278 va = MIPS_PHYS_TO_KSEG0(0); 284 va = MIPS_PHYS_TO_KSEG0(0);
279 eva = va + mips_sdcache_way_size; 285 eva = va + mci->mci_sdcache_way_size;
280 } 286 }
281 287
282 for (; va + 8 * 32 <= eva; va += 8 * 32) { 288 for (; va + 8 * 32 <= eva; va += 8 * 32) {
283 cache_op_ls2_8line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV); 289 cache_op_ls2_8line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
284 cache_op_ls2_8line_4way(va, CACHEOP_LS2_S_INDEX_WB_INV); 290 cache_op_ls2_8line_4way(va, CACHEOP_LS2_S_INDEX_WB_INV);
285 } 291 }
286 292
287 for (; va < eva; va += 32) { 293 for (; va < eva; va += 32) {
288 cache_op_ls2_line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV); 294 cache_op_ls2_line_4way(va, CACHEOP_LS2_D_INDEX_WB_INV);
289 cache_op_ls2_line_4way(va, CACHEOP_LS2_S_INDEX_WB_INV); 295 cache_op_ls2_line_4way(va, CACHEOP_LS2_S_INDEX_WB_INV);
290 } 296 }
291 297
292 __asm volatile("sync"); 298 __asm volatile("sync");
293} 299}
294 300
295void 301void
296ls2_sdcache_wbinv_all(void) 302ls2_sdcache_wbinv_all(void)
297{ 303{
298 ls2_sdcache_wbinv_range_index(0, mips_sdcache_way_size); 304 struct mips_cache_info * const mci = &mips_cache_info;
 305 ls2_sdcache_wbinv_range_index(0, mci->mci_sdcache_way_size);
299} 306}