| @@ -1,649 +1,650 @@ | | | @@ -1,649 +1,650 @@ |
1 | /* $NetBSD: cpu.h,v 1.103 2011/07/06 09:27:35 matt Exp $ */ | | 1 | /* $NetBSD: cpu.h,v 1.104 2011/07/31 15:36:28 matt Exp $ */ |
2 | | | 2 | |
3 | /*- | | 3 | /*- |
4 | * Copyright (c) 1992, 1993 | | 4 | * Copyright (c) 1992, 1993 |
5 | * The Regents of the University of California. All rights reserved. | | 5 | * The Regents of the University of California. All rights reserved. |
6 | * | | 6 | * |
7 | * This code is derived from software contributed to Berkeley by | | 7 | * This code is derived from software contributed to Berkeley by |
8 | * Ralph Campbell and Rick Macklem. | | 8 | * Ralph Campbell and Rick Macklem. |
9 | * | | 9 | * |
10 | * Redistribution and use in source and binary forms, with or without | | 10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions | | 11 | * modification, are permitted provided that the following conditions |
12 | * are met: | | 12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright | | 13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. | | 14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright | | 15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the | | 16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. | | 17 | * documentation and/or other materials provided with the distribution. |
18 | * 3. Neither the name of the University nor the names of its contributors | | 18 | * 3. Neither the name of the University nor the names of its contributors |
19 | * may be used to endorse or promote products derived from this software | | 19 | * may be used to endorse or promote products derived from this software |
20 | * without specific prior written permission. | | 20 | * without specific prior written permission. |
21 | * | | 21 | * |
22 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | | 22 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
23 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | | 23 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
24 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | | 24 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
25 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | | 25 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | | 26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
27 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | | 27 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
28 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | | 28 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
29 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | | 29 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
30 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | | 30 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
31 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | | 31 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
32 | * SUCH DAMAGE. | | 32 | * SUCH DAMAGE. |
33 | * | | 33 | * |
34 | * @(#)cpu.h 8.4 (Berkeley) 1/4/94 | | 34 | * @(#)cpu.h 8.4 (Berkeley) 1/4/94 |
35 | */ | | 35 | */ |
36 | | | 36 | |
37 | #ifndef _CPU_H_ | | 37 | #ifndef _CPU_H_ |
38 | #define _CPU_H_ | | 38 | #define _CPU_H_ |
39 | | | 39 | |
40 | #include <mips/cpuregs.h> | | 40 | #include <mips/cpuregs.h> |
41 | | | 41 | |
42 | /* | | 42 | /* |
43 | * Exported definitions unique to NetBSD/mips cpu support. | | 43 | * Exported definitions unique to NetBSD/mips cpu support. |
44 | */ | | 44 | */ |
45 | | | 45 | |
46 | #ifdef _KERNEL | | 46 | #ifdef _KERNEL |
47 | | | 47 | |
48 | #if defined(_KERNEL_OPT) | | 48 | #if defined(_KERNEL_OPT) |
49 | #include "opt_cputype.h" | | 49 | #include "opt_cputype.h" |
50 | #include "opt_lockdebug.h" | | 50 | #include "opt_lockdebug.h" |
51 | #include "opt_multiprocessor.h" | | 51 | #include "opt_multiprocessor.h" |
52 | #endif | | 52 | #endif |
53 | | | 53 | |
54 | #ifndef _LOCORE | | 54 | #ifndef _LOCORE |
55 | #include <sys/cpu_data.h> | | 55 | #include <sys/cpu_data.h> |
56 | #include <sys/device_if.h> | | 56 | #include <sys/device_if.h> |
57 | #include <sys/evcnt.h> | | 57 | #include <sys/evcnt.h> |
58 | | | 58 | |
59 | typedef struct cpu_watchpoint { | | 59 | typedef struct cpu_watchpoint { |
60 | register_t cw_addr; | | 60 | register_t cw_addr; |
61 | register_t cw_mask; | | 61 | register_t cw_mask; |
62 | uint32_t cw_asid; | | 62 | uint32_t cw_asid; |
63 | uint32_t cw_mode; | | 63 | uint32_t cw_mode; |
64 | } cpu_watchpoint_t; | | 64 | } cpu_watchpoint_t; |
65 | /* (abstract) mode bits */ | | 65 | /* (abstract) mode bits */ |
66 | #define CPUWATCH_WRITE __BIT(0) | | 66 | #define CPUWATCH_WRITE __BIT(0) |
67 | #define CPUWATCH_READ __BIT(1) | | 67 | #define CPUWATCH_READ __BIT(1) |
68 | #define CPUWATCH_EXEC __BIT(2) | | 68 | #define CPUWATCH_EXEC __BIT(2) |
69 | #define CPUWATCH_MASK __BIT(3) | | 69 | #define CPUWATCH_MASK __BIT(3) |
70 | #define CPUWATCH_ASID __BIT(4) | | 70 | #define CPUWATCH_ASID __BIT(4) |
71 | #define CPUWATCH_RWX (CPUWATCH_EXEC|CPUWATCH_READ|CPUWATCH_WRITE) | | 71 | #define CPUWATCH_RWX (CPUWATCH_EXEC|CPUWATCH_READ|CPUWATCH_WRITE) |
72 | | | 72 | |
73 | #define CPUWATCH_MAX 8 /* max possible number of watchpoints */ | | 73 | #define CPUWATCH_MAX 8 /* max possible number of watchpoints */ |
74 | | | 74 | |
75 | u_int cpuwatch_discover(void); | | 75 | u_int cpuwatch_discover(void); |
76 | void cpuwatch_free(cpu_watchpoint_t *); | | 76 | void cpuwatch_free(cpu_watchpoint_t *); |
77 | cpu_watchpoint_t *cpuwatch_alloc(void); | | 77 | cpu_watchpoint_t *cpuwatch_alloc(void); |
78 | void cpuwatch_set_all(void); | | 78 | void cpuwatch_set_all(void); |
79 | void cpuwatch_clr_all(void); | | 79 | void cpuwatch_clr_all(void); |
80 | void cpuwatch_set(cpu_watchpoint_t *); | | 80 | void cpuwatch_set(cpu_watchpoint_t *); |
81 | void cpuwatch_clr(cpu_watchpoint_t *); | | 81 | void cpuwatch_clr(cpu_watchpoint_t *); |
82 | | | 82 | |
83 | struct cpu_info { | | 83 | struct cpu_info { |
84 | struct cpu_data ci_data; /* MI per-cpu data */ | | 84 | struct cpu_data ci_data; /* MI per-cpu data */ |
85 | struct cpu_info *ci_next; /* Next CPU in list */ | | 85 | struct cpu_info *ci_next; /* Next CPU in list */ |
86 | struct cpu_softc *ci_softc; /* chip-dependent hook */ | | 86 | struct cpu_softc *ci_softc; /* chip-dependent hook */ |
87 | device_t ci_dev; /* owning device */ | | 87 | device_t ci_dev; /* owning device */ |
88 | cpuid_t ci_cpuid; /* Machine-level identifier */ | | 88 | cpuid_t ci_cpuid; /* Machine-level identifier */ |
89 | u_long ci_cctr_freq; /* cycle counter frequency */ | | 89 | u_long ci_cctr_freq; /* cycle counter frequency */ |
90 | u_long ci_cpu_freq; /* CPU frequency */ | | 90 | u_long ci_cpu_freq; /* CPU frequency */ |
91 | u_long ci_cycles_per_hz; /* CPU freq / hz */ | | 91 | u_long ci_cycles_per_hz; /* CPU freq / hz */ |
92 | u_long ci_divisor_delay; /* for delay/DELAY */ | | 92 | u_long ci_divisor_delay; /* for delay/DELAY */ |
93 | u_long ci_divisor_recip; /* unused, for obsolete microtime(9) */ | | 93 | u_long ci_divisor_recip; /* unused, for obsolete microtime(9) */ |
94 | struct lwp *ci_curlwp; /* currently running lwp */ | | 94 | struct lwp *ci_curlwp; /* currently running lwp */ |
95 | volatile int ci_want_resched; /* user preemption pending */ | | 95 | volatile int ci_want_resched; /* user preemption pending */ |
96 | int ci_mtx_count; /* negative count of held mutexes */ | | 96 | int ci_mtx_count; /* negative count of held mutexes */ |
97 | int ci_mtx_oldspl; /* saved SPL value */ | | 97 | int ci_mtx_oldspl; /* saved SPL value */ |
98 | int ci_idepth; /* hardware interrupt depth */ | | 98 | int ci_idepth; /* hardware interrupt depth */ |
99 | int ci_cpl; /* current [interrupt] priority level */ | | 99 | int ci_cpl; /* current [interrupt] priority level */ |
100 | uint32_t ci_next_cp0_clk_intr; /* for hard clock intr scheduling */ | | 100 | uint32_t ci_next_cp0_clk_intr; /* for hard clock intr scheduling */ |
101 | struct evcnt ci_ev_count_compare; /* hard clock intr counter */ | | 101 | struct evcnt ci_ev_count_compare; /* hard clock intr counter */ |
102 | struct evcnt ci_ev_count_compare_missed; /* hard clock miss counter */ | | 102 | struct evcnt ci_ev_count_compare_missed; /* hard clock miss counter */ |
103 | struct lwp *ci_softlwps[SOFTINT_COUNT]; | | 103 | struct lwp *ci_softlwps[SOFTINT_COUNT]; |
104 | volatile u_int ci_softints; | | 104 | volatile u_int ci_softints; |
105 | struct evcnt ci_ev_fpu_loads; /* fpu load counter */ | | 105 | struct evcnt ci_ev_fpu_loads; /* fpu load counter */ |
106 | struct evcnt ci_ev_fpu_saves; /* fpu save counter */ | | 106 | struct evcnt ci_ev_fpu_saves; /* fpu save counter */ |
107 | struct evcnt ci_ev_tlbmisses; | | 107 | struct evcnt ci_ev_tlbmisses; |
108 | | | 108 | |
109 | /* | | 109 | /* |
110 | * Per-cpu pmap information | | 110 | * Per-cpu pmap information |
111 | */ | | 111 | */ |
112 | int ci_tlb_slot; /* reserved tlb entry for cpu_info */ | | 112 | int ci_tlb_slot; /* reserved tlb entry for cpu_info */ |
113 | u_int ci_pmap_asid_cur; /* current ASID */ | | 113 | u_int ci_pmap_asid_cur; /* current ASID */ |
114 | struct pmap_tlb_info *ci_tlb_info; /* tlb information for this cpu */ | | 114 | struct pmap_tlb_info *ci_tlb_info; /* tlb information for this cpu */ |
115 | union segtab *ci_pmap_seg0tab; | | 115 | union segtab *ci_pmap_seg0tab; |
116 | #ifdef _LP64 | | 116 | #ifdef _LP64 |
117 | union segtab *ci_pmap_segtab; | | 117 | union segtab *ci_pmap_segtab; |
118 | #else | | 118 | #else |
119 | vaddr_t ci_pmap_srcbase; /* starting VA of ephemeral src space */ | | 119 | vaddr_t ci_pmap_srcbase; /* starting VA of ephemeral src space */ |
120 | vaddr_t ci_pmap_dstbase; /* starting VA of ephemeral dst space */ | | 120 | vaddr_t ci_pmap_dstbase; /* starting VA of ephemeral dst space */ |
121 | #endif | | 121 | #endif |
122 | | | 122 | |
123 | u_int ci_cpuwatch_count; /* number of watchpoints on this CPU */ | | 123 | u_int ci_cpuwatch_count; /* number of watchpoints on this CPU */ |
124 | cpu_watchpoint_t ci_cpuwatch_tab[CPUWATCH_MAX]; | | 124 | cpu_watchpoint_t ci_cpuwatch_tab[CPUWATCH_MAX]; |
125 | | | 125 | |
126 | #ifdef MULTIPROCESSOR | | 126 | #ifdef MULTIPROCESSOR |
127 | volatile u_long ci_flags; | | 127 | volatile u_long ci_flags; |
128 | volatile uint64_t ci_request_ipis; | | 128 | volatile uint64_t ci_request_ipis; |
129 | /* bitmask of IPIs requested */ | | 129 | /* bitmask of IPIs requested */ |
130 | /* use on chips where hw cannot pass tag */ | | 130 | /* use on chips where hw cannot pass tag */ |
131 | uint64_t ci_active_ipis; /* bitmask of IPIs being serviced */ | | 131 | uint64_t ci_active_ipis; /* bitmask of IPIs being serviced */ |
132 | uint32_t ci_ksp_tlb_slot; /* tlb entry for kernel stack */ | | 132 | uint32_t ci_ksp_tlb_slot; /* tlb entry for kernel stack */ |
133 | struct evcnt ci_evcnt_all_ipis; /* aggregated IPI counter */ | | 133 | struct evcnt ci_evcnt_all_ipis; /* aggregated IPI counter */ |
134 | struct evcnt ci_evcnt_per_ipi[NIPIS]; /* individual IPI counters*/ | | 134 | struct evcnt ci_evcnt_per_ipi[NIPIS]; /* individual IPI counters*/ |
135 | struct evcnt ci_evcnt_synci_activate_rqst; | | 135 | struct evcnt ci_evcnt_synci_activate_rqst; |
136 | struct evcnt ci_evcnt_synci_onproc_rqst; | | 136 | struct evcnt ci_evcnt_synci_onproc_rqst; |
137 | struct evcnt ci_evcnt_synci_deferred_rqst; | | 137 | struct evcnt ci_evcnt_synci_deferred_rqst; |
138 | struct evcnt ci_evcnt_synci_ipi_rqst; | | 138 | struct evcnt ci_evcnt_synci_ipi_rqst; |
139 | | | 139 | |
140 | #define CPUF_PRIMARY 0x01 /* CPU is primary CPU */ | | 140 | #define CPUF_PRIMARY 0x01 /* CPU is primary CPU */ |
141 | #define CPUF_PRESENT 0x02 /* CPU is present */ | | 141 | #define CPUF_PRESENT 0x02 /* CPU is present */ |
142 | #define CPUF_RUNNING 0x04 /* CPU is running */ | | 142 | #define CPUF_RUNNING 0x04 /* CPU is running */ |
143 | #define CPUF_PAUSED 0x08 /* CPU is paused */ | | 143 | #define CPUF_PAUSED 0x08 /* CPU is paused */ |
144 | #define CPUF_USERPMAP 0x20 /* CPU has a user pmap activated */ | | 144 | #define CPUF_USERPMAP 0x20 /* CPU has a user pmap activated */ |
145 | #endif | | 145 | #endif |
146 | | | 146 | |
147 | }; | | 147 | }; |
148 | | | 148 | |
149 | #define CPU_INFO_ITERATOR int | | 149 | #define CPU_INFO_ITERATOR int |
150 | #define CPU_INFO_FOREACH(cii, ci) \ | | 150 | #define CPU_INFO_FOREACH(cii, ci) \ |
151 | (void)(cii), ci = &cpu_info_store; ci != NULL; ci = ci->ci_next | | 151 | (void)(cii), ci = &cpu_info_store; ci != NULL; ci = ci->ci_next |
152 | | | 152 | |
153 | #endif /* !_LOCORE */ | | 153 | #endif /* !_LOCORE */ |
154 | #endif /* _KERNEL */ | | 154 | #endif /* _KERNEL */ |
155 | | | 155 | |
156 | /* | | 156 | /* |
157 | * CTL_MACHDEP definitions. | | 157 | * CTL_MACHDEP definitions. |
158 | */ | | 158 | */ |
159 | #define CPU_CONSDEV 1 /* dev_t: console terminal device */ | | 159 | #define CPU_CONSDEV 1 /* dev_t: console terminal device */ |
160 | #define CPU_BOOTED_KERNEL 2 /* string: booted kernel name */ | | 160 | #define CPU_BOOTED_KERNEL 2 /* string: booted kernel name */ |
161 | #define CPU_ROOT_DEVICE 3 /* string: root device name */ | | 161 | #define CPU_ROOT_DEVICE 3 /* string: root device name */ |
162 | #define CPU_LLSC 4 /* OS/CPU supports LL/SC instruction */ | | 162 | #define CPU_LLSC 4 /* OS/CPU supports LL/SC instruction */ |
163 | | | 163 | |
164 | /* | | 164 | /* |
165 | * Platform can override, but note this breaks userland compatibility | | 165 | * Platform can override, but note this breaks userland compatibility |
166 | * with other mips platforms. | | 166 | * with other mips platforms. |
167 | */ | | 167 | */ |
168 | #ifndef CPU_MAXID | | 168 | #ifndef CPU_MAXID |
169 | #define CPU_MAXID 5 /* number of valid machdep ids */ | | 169 | #define CPU_MAXID 5 /* number of valid machdep ids */ |
170 | #endif | | 170 | #endif |
171 | | | 171 | |
172 | #ifdef _KERNEL | | 172 | #ifdef _KERNEL |
173 | #if defined(_MODULAR) || defined(_LKM) || defined(_STANDALONE) | | 173 | #if defined(_MODULAR) || defined(_LKM) || defined(_STANDALONE) |
174 | /* Assume all CPU architectures are valid for LKM's and standlone progs */ | | 174 | /* Assume all CPU architectures are valid for LKM's and standlone progs */ |
175 | #define MIPS1 1 | | 175 | #define MIPS1 1 |
176 | #define MIPS3 1 | | 176 | #define MIPS3 1 |
177 | #define MIPS4 1 | | 177 | #define MIPS4 1 |
178 | #define MIPS32 1 | | 178 | #define MIPS32 1 |
179 | #define MIPS32R2 1 | | 179 | #define MIPS32R2 1 |
180 | #define MIPS64 1 | | 180 | #define MIPS64 1 |
181 | #define MIPS64R2 1 | | 181 | #define MIPS64R2 1 |
182 | #endif | | 182 | #endif |
183 | | | 183 | |
184 | #if (MIPS1 + MIPS3 + MIPS4 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) == 0 | | 184 | #if (MIPS1 + MIPS3 + MIPS4 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) == 0 |
185 | #error at least one of MIPS1, MIPS3, MIPS4, MIPS32, MIPS32R2, MIPS64, or MIPS64RR2 must be specified | | 185 | #error at least one of MIPS1, MIPS3, MIPS4, MIPS32, MIPS32R2, MIPS64, or MIPS64RR2 must be specified |
186 | #endif | | 186 | #endif |
187 | | | 187 | |
188 | /* Shortcut for MIPS3 or above defined */ | | 188 | /* Shortcut for MIPS3 or above defined */ |
189 | #if defined(MIPS3) || defined(MIPS4) \ | | 189 | #if defined(MIPS3) || defined(MIPS4) \ |
190 | || defined(MIPS32) || defined(MIPS32R2) \ | | 190 | || defined(MIPS32) || defined(MIPS32R2) \ |
191 | || defined(MIPS64) || defined(MIPS64R2) | | 191 | || defined(MIPS64) || defined(MIPS64R2) |
192 | | | 192 | |
193 | #define MIPS3_PLUS 1 | | 193 | #define MIPS3_PLUS 1 |
194 | #define __HAVE_CPU_COUNTER | | 194 | #define __HAVE_CPU_COUNTER |
195 | #else | | 195 | #else |
196 | #undef MIPS3_PLUS | | 196 | #undef MIPS3_PLUS |
197 | #endif | | 197 | #endif |
198 | | | 198 | |
199 | /* | | 199 | /* |
200 | * Macros to find the CPU architecture we're on at run-time, | | 200 | * Macros to find the CPU architecture we're on at run-time, |
201 | * or if possible, at compile-time. | | 201 | * or if possible, at compile-time. |
202 | */ | | 202 | */ |
203 | | | 203 | |
204 | #define CPU_ARCH_MIPSx 0 /* XXX unknown */ | | 204 | #define CPU_ARCH_MIPSx 0 /* XXX unknown */ |
205 | #define CPU_ARCH_MIPS1 (1 << 0) | | 205 | #define CPU_ARCH_MIPS1 (1 << 0) |
206 | #define CPU_ARCH_MIPS2 (1 << 1) | | 206 | #define CPU_ARCH_MIPS2 (1 << 1) |
207 | #define CPU_ARCH_MIPS3 (1 << 2) | | 207 | #define CPU_ARCH_MIPS3 (1 << 2) |
208 | #define CPU_ARCH_MIPS4 (1 << 3) | | 208 | #define CPU_ARCH_MIPS4 (1 << 3) |
209 | #define CPU_ARCH_MIPS5 (1 << 4) | | 209 | #define CPU_ARCH_MIPS5 (1 << 4) |
210 | #define CPU_ARCH_MIPS32 (1 << 5) | | 210 | #define CPU_ARCH_MIPS32 (1 << 5) |
211 | #define CPU_ARCH_MIPS64 (1 << 6) | | 211 | #define CPU_ARCH_MIPS64 (1 << 6) |
212 | #define CPU_ARCH_MIPS32R2 (1 << 7) | | 212 | #define CPU_ARCH_MIPS32R2 (1 << 7) |
213 | #define CPU_ARCH_MIPS64R2 (1 << 8) | | 213 | #define CPU_ARCH_MIPS64R2 (1 << 8) |
214 | | | 214 | |
215 | /* Note: must be kept in sync with -ffixed-?? Makefile.mips. */ | | 215 | /* Note: must be kept in sync with -ffixed-?? Makefile.mips. */ |
216 | #define MIPS_CURLWP $24 | | 216 | #define MIPS_CURLWP $24 |
217 | #define MIPS_CURLWP_QUOTED "$24" | | 217 | #define MIPS_CURLWP_QUOTED "$24" |
218 | #define MIPS_CURLWP_LABEL _L_T8 | | 218 | #define MIPS_CURLWP_LABEL _L_T8 |
219 | #define MIPS_CURLWP_REG _R_T8 | | 219 | #define MIPS_CURLWP_REG _R_T8 |
220 | #define TF_MIPS_CURLWP(x) TF_REG_T8(x) | | 220 | #define TF_MIPS_CURLWP(x) TF_REG_T8(x) |
221 | | | 221 | |
222 | #ifndef _LOCORE | | 222 | #ifndef _LOCORE |
223 | | | 223 | |
224 | extern struct cpu_info cpu_info_store; | | 224 | extern struct cpu_info cpu_info_store; |
225 | register struct lwp *mips_curlwp asm(MIPS_CURLWP_QUOTED); | | 225 | register struct lwp *mips_curlwp asm(MIPS_CURLWP_QUOTED); |
226 | | | 226 | |
227 | #define curlwp mips_curlwp | | 227 | #define curlwp mips_curlwp |
228 | #define curcpu() (curlwp->l_cpu) | | 228 | #define curcpu() (curlwp->l_cpu) |
229 | #define curpcb ((struct pcb *)lwp_getpcb(curlwp)) | | 229 | #define curpcb ((struct pcb *)lwp_getpcb(curlwp)) |
230 | #ifdef MULTIPROCESSOR | | 230 | #ifdef MULTIPROCESSOR |
231 | #define cpu_number() (curcpu()->ci_index) | | 231 | #define cpu_number() (curcpu()->ci_index) |
232 | #define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY) | | 232 | #define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY) |
233 | #else | | 233 | #else |
234 | #define cpu_number() (0) | | 234 | #define cpu_number() (0) |
235 | #define CPU_IS_PRIMARY(ci) (true) | | 235 | #define CPU_IS_PRIMARY(ci) (true) |
236 | #endif | | 236 | #endif |
237 | | | 237 | |
238 | /* XXX simonb | | 238 | /* XXX simonb |
239 | * Should the following be in a cpu_info type structure? | | 239 | * Should the following be in a cpu_info type structure? |
240 | * And how many of these are per-cpu vs. per-system? (Ie, | | 240 | * And how many of these are per-cpu vs. per-system? (Ie, |
241 | * we can assume that all cpus have the same mmu-type, but | | 241 | * we can assume that all cpus have the same mmu-type, but |
242 | * maybe not that all cpus run at the same clock speed. | | 242 | * maybe not that all cpus run at the same clock speed. |
243 | * Some SGI's apparently support R12k and R14k in the same | | 243 | * Some SGI's apparently support R12k and R14k in the same |
244 | * box.) | | 244 | * box.) |
245 | */ | | 245 | */ |
246 | struct mips_options { | | 246 | struct mips_options { |
247 | const struct pridtab *mips_cpu; | | 247 | const struct pridtab *mips_cpu; |
248 | | | 248 | |
249 | u_int mips_cpu_arch; | | 249 | u_int mips_cpu_arch; |
250 | u_int mips_cpu_mhz; /* CPU speed in MHz, estimated by mc_cpuspeed(). */ | | 250 | u_int mips_cpu_mhz; /* CPU speed in MHz, estimated by mc_cpuspeed(). */ |
251 | u_int mips_cpu_flags; | | 251 | u_int mips_cpu_flags; |
252 | u_int mips_num_tlb_entries; | | 252 | u_int mips_num_tlb_entries; |
253 | mips_prid_t mips_cpu_id; | | 253 | mips_prid_t mips_cpu_id; |
254 | mips_prid_t mips_fpu_id; | | 254 | mips_prid_t mips_fpu_id; |
255 | bool mips_has_r4k_mmu; | | 255 | bool mips_has_r4k_mmu; |
256 | bool mips_has_llsc; | | 256 | bool mips_has_llsc; |
257 | u_int mips3_pg_shift; | | 257 | u_int mips3_pg_shift; |
258 | u_int mips3_pg_cached; | | 258 | u_int mips3_pg_cached; |
259 | #ifdef MIPS3_PLUS | | 259 | #ifdef MIPS3_PLUS |
260 | #ifdef _LP64 | | 260 | #ifdef _LP64 |
261 | uint64_t mips3_xkphys_cached; | | 261 | uint64_t mips3_xkphys_cached; |
262 | #endif | | 262 | #endif |
263 | uint64_t mips3_tlb_vpn_mask; | | 263 | uint64_t mips3_tlb_vpn_mask; |
264 | uint64_t mips3_tlb_pfn_mask; | | 264 | uint64_t mips3_tlb_pfn_mask; |
265 | uint32_t mips3_tlb_pg_mask; | | 265 | uint32_t mips3_tlb_pg_mask; |
266 | #endif | | 266 | #endif |
267 | }; | | 267 | }; |
268 | extern struct mips_options mips_options; | | 268 | extern struct mips_options mips_options; |
269 | | | 269 | |
270 | #define CPU_MIPS_R4K_MMU 0x0001 | | 270 | #define CPU_MIPS_R4K_MMU 0x0001 |
271 | #define CPU_MIPS_NO_LLSC 0x0002 | | 271 | #define CPU_MIPS_NO_LLSC 0x0002 |
272 | #define CPU_MIPS_CAUSE_IV 0x0004 | | 272 | #define CPU_MIPS_CAUSE_IV 0x0004 |
273 | #define CPU_MIPS_HAVE_SPECIAL_CCA 0x0008 /* Defaults to '3' if not set. */ | | 273 | #define CPU_MIPS_HAVE_SPECIAL_CCA 0x0008 /* Defaults to '3' if not set. */ |
274 | #define CPU_MIPS_CACHED_CCA_MASK 0x0070 | | 274 | #define CPU_MIPS_CACHED_CCA_MASK 0x0070 |
275 | #define CPU_MIPS_CACHED_CCA_SHIFT 4 | | 275 | #define CPU_MIPS_CACHED_CCA_SHIFT 4 |
276 | #define CPU_MIPS_DOUBLE_COUNT 0x0080 /* 1 cp0 count == 2 clock cycles */ | | 276 | #define CPU_MIPS_DOUBLE_COUNT 0x0080 /* 1 cp0 count == 2 clock cycles */ |
277 | #define CPU_MIPS_USE_WAIT 0x0100 /* Use "wait"-based cpu_idle() */ | | 277 | #define CPU_MIPS_USE_WAIT 0x0100 /* Use "wait"-based cpu_idle() */ |
278 | #define CPU_MIPS_NO_WAIT 0x0200 /* Inverse of previous, for mips32/64 */ | | 278 | #define CPU_MIPS_NO_WAIT 0x0200 /* Inverse of previous, for mips32/64 */ |
279 | #define CPU_MIPS_D_CACHE_COHERENT 0x0400 /* D-cache is fully coherent */ | | 279 | #define CPU_MIPS_D_CACHE_COHERENT 0x0400 /* D-cache is fully coherent */ |
280 | #define CPU_MIPS_I_D_CACHE_COHERENT 0x0800 /* I-cache funcs don't need to flush the D-cache */ | | 280 | #define CPU_MIPS_I_D_CACHE_COHERENT 0x0800 /* I-cache funcs don't need to flush the D-cache */ |
281 | #define CPU_MIPS_NO_LLADDR 0x1000 | | 281 | #define CPU_MIPS_NO_LLADDR 0x1000 |
282 | #define CPU_MIPS_HAVE_MxCR 0x2000 /* have mfcr, mtcr insns */ | | 282 | #define CPU_MIPS_HAVE_MxCR 0x2000 /* have mfcr, mtcr insns */ |
| | | 283 | #define CPU_MIPS_LOONGSON2 0x4000 |
283 | #define MIPS_NOT_SUPP 0x8000 | | 284 | #define MIPS_NOT_SUPP 0x8000 |
284 | | | 285 | |
285 | #endif /* !_LOCORE */ | | 286 | #endif /* !_LOCORE */ |
286 | | | 287 | |
287 | #if ((MIPS1 + MIPS3 + MIPS4 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) == 1) || defined(_LOCORE) | | 288 | #if ((MIPS1 + MIPS3 + MIPS4 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) == 1) || defined(_LOCORE) |
288 | | | 289 | |
289 | #if defined(MIPS1) | | 290 | #if defined(MIPS1) |
290 | | | 291 | |
291 | # define CPUISMIPS3 0 | | 292 | # define CPUISMIPS3 0 |
292 | # define CPUIS64BITS 0 | | 293 | # define CPUIS64BITS 0 |
293 | # define CPUISMIPS32 0 | | 294 | # define CPUISMIPS32 0 |
294 | # define CPUISMIPS32R2 0 | | 295 | # define CPUISMIPS32R2 0 |
295 | # define CPUISMIPS64 0 | | 296 | # define CPUISMIPS64 0 |
296 | # define CPUISMIPS64R2 0 | | 297 | # define CPUISMIPS64R2 0 |
297 | # define CPUISMIPSNN 0 | | 298 | # define CPUISMIPSNN 0 |
298 | # define MIPS_HAS_R4K_MMU 0 | | 299 | # define MIPS_HAS_R4K_MMU 0 |
299 | # define MIPS_HAS_CLOCK 0 | | 300 | # define MIPS_HAS_CLOCK 0 |
300 | # define MIPS_HAS_LLSC 0 | | 301 | # define MIPS_HAS_LLSC 0 |
301 | # define MIPS_HAS_LLADDR 0 | | 302 | # define MIPS_HAS_LLADDR 0 |
302 | | | 303 | |
303 | #elif defined(MIPS3) || defined(MIPS4) | | 304 | #elif defined(MIPS3) || defined(MIPS4) |
304 | | | 305 | |
305 | # define CPUISMIPS3 1 | | 306 | # define CPUISMIPS3 1 |
306 | # define CPUIS64BITS 1 | | 307 | # define CPUIS64BITS 1 |
307 | # define CPUISMIPS32 0 | | 308 | # define CPUISMIPS32 0 |
308 | # define CPUISMIPS32R2 0 | | 309 | # define CPUISMIPS32R2 0 |
309 | # define CPUISMIPS64 0 | | 310 | # define CPUISMIPS64 0 |
310 | # define CPUISMIPS64R2 0 | | 311 | # define CPUISMIPS64R2 0 |
311 | # define CPUISMIPSNN 0 | | 312 | # define CPUISMIPSNN 0 |
312 | # define MIPS_HAS_R4K_MMU 1 | | 313 | # define MIPS_HAS_R4K_MMU 1 |
313 | # define MIPS_HAS_CLOCK 1 | | 314 | # define MIPS_HAS_CLOCK 1 |
314 | # if defined(_LOCORE) | | 315 | # if defined(_LOCORE) |
315 | # if !defined(MIPS3_4100) | | 316 | # if !defined(MIPS3_4100) |
316 | # define MIPS_HAS_LLSC 1 | | 317 | # define MIPS_HAS_LLSC 1 |
317 | # else | | 318 | # else |
318 | # define MIPS_HAS_LLSC 0 | | 319 | # define MIPS_HAS_LLSC 0 |
319 | # endif | | 320 | # endif |
320 | # else /* _LOCORE */ | | 321 | # else /* _LOCORE */ |
321 | # define MIPS_HAS_LLSC (mips_options.mips_has_llsc) | | 322 | # define MIPS_HAS_LLSC (mips_options.mips_has_llsc) |
322 | # endif /* _LOCORE */ | | 323 | # endif /* _LOCORE */ |
323 | # define MIPS_HAS_LLADDR ((mips_options.mips_cpu_flags & CPU_MIPS_NO_LLADDR) == 0) | | 324 | # define MIPS_HAS_LLADDR ((mips_options.mips_cpu_flags & CPU_MIPS_NO_LLADDR) == 0) |
324 | | | 325 | |
325 | #elif defined(MIPS32) | | 326 | #elif defined(MIPS32) |
326 | | | 327 | |
327 | # define CPUISMIPS3 1 | | 328 | # define CPUISMIPS3 1 |
328 | # define CPUIS64BITS 0 | | 329 | # define CPUIS64BITS 0 |
329 | # define CPUISMIPS32 1 | | 330 | # define CPUISMIPS32 1 |
330 | # define CPUISMIPS32R2 0 | | 331 | # define CPUISMIPS32R2 0 |
331 | # define CPUISMIPS64 0 | | 332 | # define CPUISMIPS64 0 |
332 | # define CPUISMIPS64R2 0 | | 333 | # define CPUISMIPS64R2 0 |
333 | # define CPUISMIPSNN 1 | | 334 | # define CPUISMIPSNN 1 |
334 | # define MIPS_HAS_R4K_MMU 1 | | 335 | # define MIPS_HAS_R4K_MMU 1 |
335 | # define MIPS_HAS_CLOCK 1 | | 336 | # define MIPS_HAS_CLOCK 1 |
336 | # define MIPS_HAS_LLSC 1 | | 337 | # define MIPS_HAS_LLSC 1 |
337 | # define MIPS_HAS_LLADDR ((mips_options.mips_cpu_flags & CPU_MIPS_NO_LLADDR) == 0) | | 338 | # define MIPS_HAS_LLADDR ((mips_options.mips_cpu_flags & CPU_MIPS_NO_LLADDR) == 0) |
338 | | | 339 | |
339 | #elif defined(MIPS32R2) | | 340 | #elif defined(MIPS32R2) |
340 | | | 341 | |
341 | # define CPUISMIPS3 1 | | 342 | # define CPUISMIPS3 1 |
342 | # define CPUIS64BITS 0 | | 343 | # define CPUIS64BITS 0 |
343 | # define CPUISMIPS32 0 | | 344 | # define CPUISMIPS32 0 |
344 | # define CPUISMIPS32R2 1 | | 345 | # define CPUISMIPS32R2 1 |
345 | # define CPUISMIPS64 0 | | 346 | # define CPUISMIPS64 0 |
346 | # define CPUISMIPS64R2 0 | | 347 | # define CPUISMIPS64R2 0 |
347 | # define CPUISMIPSNN 1 | | 348 | # define CPUISMIPSNN 1 |
348 | # define MIPS_HAS_R4K_MMU 1 | | 349 | # define MIPS_HAS_R4K_MMU 1 |
349 | # define MIPS_HAS_CLOCK 1 | | 350 | # define MIPS_HAS_CLOCK 1 |
350 | # define MIPS_HAS_LLSC 1 | | 351 | # define MIPS_HAS_LLSC 1 |
351 | # define MIPS_HAS_LLADDR ((mips_options.mips_cpu_flags & CPU_MIPS_NO_LLADDR) == 0) | | 352 | # define MIPS_HAS_LLADDR ((mips_options.mips_cpu_flags & CPU_MIPS_NO_LLADDR) == 0) |
352 | | | 353 | |
353 | #elif defined(MIPS64) | | 354 | #elif defined(MIPS64) |
354 | | | 355 | |
355 | # define CPUISMIPS3 1 | | 356 | # define CPUISMIPS3 1 |
356 | # define CPUIS64BITS 1 | | 357 | # define CPUIS64BITS 1 |
357 | # define CPUISMIPS32 0 | | 358 | # define CPUISMIPS32 0 |
358 | # define CPUISMIPS32R2 0 | | 359 | # define CPUISMIPS32R2 0 |
359 | # define CPUISMIPS64 1 | | 360 | # define CPUISMIPS64 1 |
360 | # define CPUISMIPS64R2 0 | | 361 | # define CPUISMIPS64R2 0 |
361 | # define CPUISMIPSNN 1 | | 362 | # define CPUISMIPSNN 1 |
362 | # define MIPS_HAS_R4K_MMU 1 | | 363 | # define MIPS_HAS_R4K_MMU 1 |
363 | # define MIPS_HAS_CLOCK 1 | | 364 | # define MIPS_HAS_CLOCK 1 |
364 | # define MIPS_HAS_LLSC 1 | | 365 | # define MIPS_HAS_LLSC 1 |
365 | # define MIPS_HAS_LLADDR ((mips_options.mips_cpu_flags & CPU_MIPS_NO_LLADDR) == 0) | | 366 | # define MIPS_HAS_LLADDR ((mips_options.mips_cpu_flags & CPU_MIPS_NO_LLADDR) == 0) |
366 | | | 367 | |
367 | #elif defined(MIPS64R2) | | 368 | #elif defined(MIPS64R2) |
368 | | | 369 | |
369 | # define CPUISMIPS3 1 | | 370 | # define CPUISMIPS3 1 |
370 | # define CPUIS64BITS 1 | | 371 | # define CPUIS64BITS 1 |
371 | # define CPUISMIPS32 0 | | 372 | # define CPUISMIPS32 0 |
372 | # define CPUISMIPS32R2 0 | | 373 | # define CPUISMIPS32R2 0 |
373 | # define CPUISMIPS64 0 | | 374 | # define CPUISMIPS64 0 |
374 | # define CPUISMIPS64R2 1 | | 375 | # define CPUISMIPS64R2 1 |
375 | # define CPUISMIPSNN 1 | | 376 | # define CPUISMIPSNN 1 |
376 | # define MIPS_HAS_R4K_MMU 1 | | 377 | # define MIPS_HAS_R4K_MMU 1 |
377 | # define MIPS_HAS_CLOCK 1 | | 378 | # define MIPS_HAS_CLOCK 1 |
378 | # define MIPS_HAS_LLSC 1 | | 379 | # define MIPS_HAS_LLSC 1 |
379 | # define MIPS_HAS_LLADDR ((mips_options.mips_cpu_flags & CPU_MIPS_NO_LLADDR) == 0) | | 380 | # define MIPS_HAS_LLADDR ((mips_options.mips_cpu_flags & CPU_MIPS_NO_LLADDR) == 0) |
380 | | | 381 | |
381 | #endif | | 382 | #endif |
382 | | | 383 | |
383 | #else /* run-time test */ | | 384 | #else /* run-time test */ |
384 | | | 385 | |
385 | #ifndef _LOCORE | | 386 | #ifndef _LOCORE |
386 | | | 387 | |
387 | #define MIPS_HAS_R4K_MMU (mips_options.mips_has_r4k_mmu) | | 388 | #define MIPS_HAS_R4K_MMU (mips_options.mips_has_r4k_mmu) |
388 | #define MIPS_HAS_LLSC (mips_options.mips_has_llsc) | | 389 | #define MIPS_HAS_LLSC (mips_options.mips_has_llsc) |
389 | #define MIPS_HAS_LLADDR ((mips_options.mips_cpu_flags & CPU_MIPS_NO_LLADDR) == 0) | | 390 | #define MIPS_HAS_LLADDR ((mips_options.mips_cpu_flags & CPU_MIPS_NO_LLADDR) == 0) |
390 | | | 391 | |
391 | /* This test is ... rather bogus */ | | 392 | /* This test is ... rather bogus */ |
392 | #define CPUISMIPS3 ((mips_options.mips_cpu_arch & \ | | 393 | #define CPUISMIPS3 ((mips_options.mips_cpu_arch & \ |
393 | (CPU_ARCH_MIPS3 | CPU_ARCH_MIPS4 | CPU_ARCH_MIPS32 | CPU_ARCH_MIPS64)) != 0) | | 394 | (CPU_ARCH_MIPS3 | CPU_ARCH_MIPS4 | CPU_ARCH_MIPS32 | CPU_ARCH_MIPS64)) != 0) |
394 | | | 395 | |
395 | /* And these aren't much better while the previous test exists as is... */ | | 396 | /* And these aren't much better while the previous test exists as is... */ |
396 | #define CPUISMIPS4 ((mips_options.mips_cpu_arch & CPU_ARCH_MIPS4) != 0) | | 397 | #define CPUISMIPS4 ((mips_options.mips_cpu_arch & CPU_ARCH_MIPS4) != 0) |
397 | #define CPUISMIPS5 ((mips_options.mips_cpu_arch & CPU_ARCH_MIPS5) != 0) | | 398 | #define CPUISMIPS5 ((mips_options.mips_cpu_arch & CPU_ARCH_MIPS5) != 0) |
398 | #define CPUISMIPS32 ((mips_options.mips_cpu_arch & CPU_ARCH_MIPS32) != 0) | | 399 | #define CPUISMIPS32 ((mips_options.mips_cpu_arch & CPU_ARCH_MIPS32) != 0) |
399 | #define CPUISMIPS32R2 ((mips_options.mips_cpu_arch & CPU_ARCH_MIPS32R2) != 0) | | 400 | #define CPUISMIPS32R2 ((mips_options.mips_cpu_arch & CPU_ARCH_MIPS32R2) != 0) |
400 | #define CPUISMIPS64 ((mips_options.mips_cpu_arch & CPU_ARCH_MIPS64) != 0) | | 401 | #define CPUISMIPS64 ((mips_options.mips_cpu_arch & CPU_ARCH_MIPS64) != 0) |
401 | #define CPUISMIPS64R2 ((mips_options.mips_cpu_arch & CPU_ARCH_MIPS64R2) != 0) | | 402 | #define CPUISMIPS64R2 ((mips_options.mips_cpu_arch & CPU_ARCH_MIPS64R2) != 0) |
402 | #define CPUISMIPSNN ((mips_options.mips_cpu_arch & (CPU_ARCH_MIPS32 | CPU_ARCH_MIPS32R2 | CPU_ARCH_MIPS64 | CPU_ARCH_MIPS64R2)) != 0) | | 403 | #define CPUISMIPSNN ((mips_options.mips_cpu_arch & (CPU_ARCH_MIPS32 | CPU_ARCH_MIPS32R2 | CPU_ARCH_MIPS64 | CPU_ARCH_MIPS64R2)) != 0) |
403 | #define CPUIS64BITS ((mips_options.mips_cpu_arch & \ | | 404 | #define CPUIS64BITS ((mips_options.mips_cpu_arch & \ |
404 | (CPU_ARCH_MIPS3 | CPU_ARCH_MIPS4 | CPU_ARCH_MIPS64 | CPU_ARCH_MIPS64R2)) != 0) | | 405 | (CPU_ARCH_MIPS3 | CPU_ARCH_MIPS4 | CPU_ARCH_MIPS64 | CPU_ARCH_MIPS64R2)) != 0) |
405 | | | 406 | |
406 | #define MIPS_HAS_CLOCK (mips_options.mips_cpu_arch >= CPU_ARCH_MIPS3) | | 407 | #define MIPS_HAS_CLOCK (mips_options.mips_cpu_arch >= CPU_ARCH_MIPS3) |
407 | | | 408 | |
408 | #else /* !_LOCORE */ | | 409 | #else /* !_LOCORE */ |
409 | | | 410 | |
410 | #define MIPS_HAS_LLSC 0 | | 411 | #define MIPS_HAS_LLSC 0 |
411 | | | 412 | |
412 | #endif /* !_LOCORE */ | | 413 | #endif /* !_LOCORE */ |
413 | | | 414 | |
414 | #endif /* run-time test */ | | 415 | #endif /* run-time test */ |
415 | | | 416 | |
416 | #ifndef _LOCORE | | 417 | #ifndef _LOCORE |
417 | | | 418 | |
418 | /* | | 419 | /* |
419 | * definitions of cpu-dependent requirements | | 420 | * definitions of cpu-dependent requirements |
420 | * referenced in generic code | | 421 | * referenced in generic code |
421 | */ | | 422 | */ |
422 | | | 423 | |
423 | /* | | 424 | /* |
424 | * Send an inter-processor interupt to each other CPU (excludes curcpu()) | | 425 | * Send an inter-processor interupt to each other CPU (excludes curcpu()) |
425 | */ | | 426 | */ |
426 | void cpu_broadcast_ipi(int); | | 427 | void cpu_broadcast_ipi(int); |
427 | | | 428 | |
428 | /* | | 429 | /* |
429 | * Send an inter-processor interupt to CPUs in cpuset (excludes curcpu()) | | 430 | * Send an inter-processor interupt to CPUs in cpuset (excludes curcpu()) |
430 | */ | | 431 | */ |
431 | void cpu_multicast_ipi(__cpuset_t, int); | | 432 | void cpu_multicast_ipi(__cpuset_t, int); |
432 | | | 433 | |
433 | /* | | 434 | /* |
434 | * Send an inter-processor interupt to another CPU. | | 435 | * Send an inter-processor interupt to another CPU. |
435 | */ | | 436 | */ |
436 | int cpu_send_ipi(struct cpu_info *, int); | | 437 | int cpu_send_ipi(struct cpu_info *, int); |
437 | | | 438 | |
438 | /* | | 439 | /* |
439 | * cpu_intr(ppl, pc, status); (most state needed by clockframe) | | 440 | * cpu_intr(ppl, pc, status); (most state needed by clockframe) |
440 | */ | | 441 | */ |
441 | void cpu_intr(int, vaddr_t, uint32_t); | | 442 | void cpu_intr(int, vaddr_t, uint32_t); |
442 | | | 443 | |
443 | /* | | 444 | /* |
444 | * Arguments to hardclock and gatherstats encapsulate the previous | | 445 | * Arguments to hardclock and gatherstats encapsulate the previous |
445 | * machine state in an opaque clockframe. | | 446 | * machine state in an opaque clockframe. |
446 | */ | | 447 | */ |
447 | struct clockframe { | | 448 | struct clockframe { |
448 | vaddr_t pc; /* program counter at time of interrupt */ | | 449 | vaddr_t pc; /* program counter at time of interrupt */ |
449 | uint32_t sr; /* status register at time of interrupt */ | | 450 | uint32_t sr; /* status register at time of interrupt */ |
450 | bool intr; /* interrupted a interrupt */ | | 451 | bool intr; /* interrupted a interrupt */ |
451 | }; | | 452 | }; |
452 | | | 453 | |
453 | /* | | 454 | /* |
454 | * A port must provde CLKF_USERMODE() for use in machine-independent code. | | 455 | * A port must provde CLKF_USERMODE() for use in machine-independent code. |
455 | * These differ on r4000 and r3000 systems; provide them in the | | 456 | * These differ on r4000 and r3000 systems; provide them in the |
456 | * port-dependent file that includes this one, using the macros below. | | 457 | * port-dependent file that includes this one, using the macros below. |
457 | */ | | 458 | */ |
458 | | | 459 | |
459 | /* mips1 versions */ | | 460 | /* mips1 versions */ |
460 | #define MIPS1_CLKF_USERMODE(framep) ((framep)->sr & MIPS_SR_KU_PREV) | | 461 | #define MIPS1_CLKF_USERMODE(framep) ((framep)->sr & MIPS_SR_KU_PREV) |
461 | | | 462 | |
462 | /* mips3 versions */ | | 463 | /* mips3 versions */ |
463 | #define MIPS3_CLKF_USERMODE(framep) ((framep)->sr & MIPS_SR_KSU_USER) | | 464 | #define MIPS3_CLKF_USERMODE(framep) ((framep)->sr & MIPS_SR_KSU_USER) |
464 | | | 465 | |
465 | #define CLKF_PC(framep) ((framep)->pc) | | 466 | #define CLKF_PC(framep) ((framep)->pc) |
466 | #define CLKF_INTR(framep) ((framep)->intr) | | 467 | #define CLKF_INTR(framep) ((framep)->intr) |
467 | | | 468 | |
468 | #if defined(MIPS3_PLUS) && !defined(MIPS1) /* XXX bogus! */ | | 469 | #if defined(MIPS3_PLUS) && !defined(MIPS1) /* XXX bogus! */ |
469 | #define CLKF_USERMODE(framep) MIPS3_CLKF_USERMODE(framep) | | 470 | #define CLKF_USERMODE(framep) MIPS3_CLKF_USERMODE(framep) |
470 | #endif | | 471 | #endif |
471 | | | 472 | |
472 | #if !defined(MIPS3_PLUS) && defined(MIPS1) /* XXX bogus! */ | | 473 | #if !defined(MIPS3_PLUS) && defined(MIPS1) /* XXX bogus! */ |
473 | #define CLKF_USERMODE(framep) MIPS1_CLKF_USERMODE(framep) | | 474 | #define CLKF_USERMODE(framep) MIPS1_CLKF_USERMODE(framep) |
474 | #endif | | 475 | #endif |
475 | | | 476 | |
476 | #if defined(MIPS3_PLUS) && defined(MIPS1) /* XXX bogus! */ | | 477 | #if defined(MIPS3_PLUS) && defined(MIPS1) /* XXX bogus! */ |
477 | #define CLKF_USERMODE(framep) \ | | 478 | #define CLKF_USERMODE(framep) \ |
478 | ((CPUISMIPS3) ? MIPS3_CLKF_USERMODE(framep): MIPS1_CLKF_USERMODE(framep)) | | 479 | ((CPUISMIPS3) ? MIPS3_CLKF_USERMODE(framep): MIPS1_CLKF_USERMODE(framep)) |
479 | #endif | | 480 | #endif |
480 | | | 481 | |
481 | /* | | 482 | /* |
482 | * Misc prototypes and variable declarations. | | 483 | * Misc prototypes and variable declarations. |
483 | */ | | 484 | */ |
484 | #define LWP_PC(l) cpu_lwp_pc(l) | | 485 | #define LWP_PC(l) cpu_lwp_pc(l) |
485 | | | 486 | |
486 | struct proc; | | 487 | struct proc; |
487 | struct lwp; | | 488 | struct lwp; |
488 | struct pcb; | | 489 | struct pcb; |
489 | struct reg; | | 490 | struct reg; |
490 | | | 491 | |
491 | /* | | 492 | /* |
492 | * Preempt the current process if in interrupt from user mode, | | 493 | * Preempt the current process if in interrupt from user mode, |
493 | * or after the current trap/syscall if in system mode. | | 494 | * or after the current trap/syscall if in system mode. |
494 | */ | | 495 | */ |
495 | void cpu_need_resched(struct cpu_info *, int); | | 496 | void cpu_need_resched(struct cpu_info *, int); |
496 | /* | | 497 | /* |
497 | * Notify the current lwp (l) that it has a signal pending, | | 498 | * Notify the current lwp (l) that it has a signal pending, |
498 | * process as soon as possible. | | 499 | * process as soon as possible. |
499 | */ | | 500 | */ |
500 | void cpu_signotify(struct lwp *); | | 501 | void cpu_signotify(struct lwp *); |
501 | | | 502 | |
502 | /* | | 503 | /* |
503 | * Give a profiling tick to the current process when the user profiling | | 504 | * Give a profiling tick to the current process when the user profiling |
504 | * buffer pages are invalid. On the MIPS, request an ast to send us | | 505 | * buffer pages are invalid. On the MIPS, request an ast to send us |
505 | * through trap, marking the proc as needing a profiling tick. | | 506 | * through trap, marking the proc as needing a profiling tick. |
506 | */ | | 507 | */ |
507 | void cpu_need_proftick(struct lwp *); | | 508 | void cpu_need_proftick(struct lwp *); |
508 | void cpu_set_curpri(int); | | 509 | void cpu_set_curpri(int); |
509 | | | 510 | |
510 | extern int mips_poolpage_vmfreelist; /* freelist to allocate poolpages */ | | 511 | extern int mips_poolpage_vmfreelist; /* freelist to allocate poolpages */ |
511 | | | 512 | |
512 | struct cpu_info * | | 513 | struct cpu_info * |
513 | cpu_info_alloc(struct pmap_tlb_info *, cpuid_t, cpuid_t, cpuid_t, | | 514 | cpu_info_alloc(struct pmap_tlb_info *, cpuid_t, cpuid_t, cpuid_t, |
514 | cpuid_t); | | 515 | cpuid_t); |
515 | void cpu_attach_common(device_t, struct cpu_info *); | | 516 | void cpu_attach_common(device_t, struct cpu_info *); |
516 | void cpu_startup_common(void); | | 517 | void cpu_startup_common(void); |
517 | #ifdef _LP64 | | 518 | #ifdef _LP64 |
518 | void cpu_vmspace_exec(struct lwp *, vaddr_t, vaddr_t); | | 519 | void cpu_vmspace_exec(struct lwp *, vaddr_t, vaddr_t); |
519 | #endif | | 520 | #endif |
520 | | | 521 | |
521 | #ifdef MULTIPROCESSOR | | 522 | #ifdef MULTIPROCESSOR |
522 | void cpu_hatch(struct cpu_info *ci); | | 523 | void cpu_hatch(struct cpu_info *ci); |
523 | void cpu_trampoline(void); | | 524 | void cpu_trampoline(void); |
524 | void cpu_boot_secondary_processors(void); | | 525 | void cpu_boot_secondary_processors(void); |
525 | void cpu_halt(void); | | 526 | void cpu_halt(void); |
526 | void cpu_halt_others(void); | | 527 | void cpu_halt_others(void); |
527 | void cpu_pause(struct reg *); | | 528 | void cpu_pause(struct reg *); |
528 | void cpu_pause_others(void); | | 529 | void cpu_pause_others(void); |
529 | void cpu_resume(int); | | 530 | void cpu_resume(int); |
530 | void cpu_resume_others(void); | | 531 | void cpu_resume_others(void); |
531 | int cpu_is_paused(int); | | 532 | int cpu_is_paused(int); |
532 | void cpu_debug_dump(void); | | 533 | void cpu_debug_dump(void); |
533 | | | 534 | |
534 | extern volatile __cpuset_t cpus_running; | | 535 | extern volatile __cpuset_t cpus_running; |
535 | extern volatile __cpuset_t cpus_hatched; | | 536 | extern volatile __cpuset_t cpus_hatched; |
536 | extern volatile __cpuset_t cpus_paused; | | 537 | extern volatile __cpuset_t cpus_paused; |
537 | extern volatile __cpuset_t cpus_resumed; | | 538 | extern volatile __cpuset_t cpus_resumed; |
538 | extern volatile __cpuset_t cpus_halted; | | 539 | extern volatile __cpuset_t cpus_halted; |
539 | #endif | | 540 | #endif |
540 | | | 541 | |
541 | /* copy.S */ | | 542 | /* copy.S */ |
542 | int32_t kfetch_32(volatile uint32_t *, uint32_t); | | 543 | int32_t kfetch_32(volatile uint32_t *, uint32_t); |
543 | int8_t ufetch_int8(void *); | | 544 | int8_t ufetch_int8(void *); |
544 | int16_t ufetch_int16(void *); | | 545 | int16_t ufetch_int16(void *); |
545 | int32_t ufetch_int32(void *); | | 546 | int32_t ufetch_int32(void *); |
546 | uint8_t ufetch_uint8(void *); | | 547 | uint8_t ufetch_uint8(void *); |
547 | uint16_t ufetch_uint16(void *); | | 548 | uint16_t ufetch_uint16(void *); |
548 | uint32_t ufetch_uint32(void *); | | 549 | uint32_t ufetch_uint32(void *); |
549 | int8_t ufetch_int8_intrsafe(void *); | | 550 | int8_t ufetch_int8_intrsafe(void *); |
550 | int16_t ufetch_int16_intrsafe(void *); | | 551 | int16_t ufetch_int16_intrsafe(void *); |
551 | int32_t ufetch_int32_intrsafe(void *); | | 552 | int32_t ufetch_int32_intrsafe(void *); |
552 | uint8_t ufetch_uint8_intrsafe(void *); | | 553 | uint8_t ufetch_uint8_intrsafe(void *); |
553 | uint16_t ufetch_uint16_intrsafe(void *); | | 554 | uint16_t ufetch_uint16_intrsafe(void *); |
554 | uint32_t ufetch_uint32_intrsafe(void *); | | 555 | uint32_t ufetch_uint32_intrsafe(void *); |
555 | #ifdef _LP64 | | 556 | #ifdef _LP64 |
556 | int64_t ufetch_int64(void *); | | 557 | int64_t ufetch_int64(void *); |
557 | uint64_t ufetch_uint64(void *); | | 558 | uint64_t ufetch_uint64(void *); |
558 | int64_t ufetch_int64_intrsafe(void *); | | 559 | int64_t ufetch_int64_intrsafe(void *); |
559 | uint64_t ufetch_uint64_intrsafe(void *); | | 560 | uint64_t ufetch_uint64_intrsafe(void *); |
560 | #endif | | 561 | #endif |
561 | char ufetch_char(void *); | | 562 | char ufetch_char(void *); |
562 | short ufetch_short(void *); | | 563 | short ufetch_short(void *); |
563 | int ufetch_int(void *); | | 564 | int ufetch_int(void *); |
564 | long ufetch_long(void *); | | 565 | long ufetch_long(void *); |
565 | char ufetch_char_intrsafe(void *); | | 566 | char ufetch_char_intrsafe(void *); |
566 | short ufetch_short_intrsafe(void *); | | 567 | short ufetch_short_intrsafe(void *); |
567 | int ufetch_int_intrsafe(void *); | | 568 | int ufetch_int_intrsafe(void *); |
568 | long ufetch_long_intrsafe(void *); | | 569 | long ufetch_long_intrsafe(void *); |
569 | | | 570 | |
570 | u_char ufetch_uchar(void *); | | 571 | u_char ufetch_uchar(void *); |
571 | u_short ufetch_ushort(void *); | | 572 | u_short ufetch_ushort(void *); |
572 | u_int ufetch_uint(void *); | | 573 | u_int ufetch_uint(void *); |
573 | u_long ufetch_ulong(void *); | | 574 | u_long ufetch_ulong(void *); |
574 | u_char ufetch_uchar_intrsafe(void *); | | 575 | u_char ufetch_uchar_intrsafe(void *); |
575 | u_short ufetch_ushort_intrsafe(void *); | | 576 | u_short ufetch_ushort_intrsafe(void *); |
576 | u_int ufetch_uint_intrsafe(void *); | | 577 | u_int ufetch_uint_intrsafe(void *); |
577 | u_long ufetch_ulong_intrsafe(void *); | | 578 | u_long ufetch_ulong_intrsafe(void *); |
578 | void *ufetch_ptr(void *); | | 579 | void *ufetch_ptr(void *); |
579 | | | 580 | |
580 | int ustore_int8(void *, int8_t); | | 581 | int ustore_int8(void *, int8_t); |
581 | int ustore_int16(void *, int16_t); | | 582 | int ustore_int16(void *, int16_t); |
582 | int ustore_int32(void *, int32_t); | | 583 | int ustore_int32(void *, int32_t); |
583 | int ustore_uint8(void *, uint8_t); | | 584 | int ustore_uint8(void *, uint8_t); |
584 | int ustore_uint16(void *, uint16_t); | | 585 | int ustore_uint16(void *, uint16_t); |
585 | int ustore_uint32(void *, uint32_t); | | 586 | int ustore_uint32(void *, uint32_t); |
586 | int ustore_int8_intrsafe(void *, int8_t); | | 587 | int ustore_int8_intrsafe(void *, int8_t); |
587 | int ustore_int16_intrsafe(void *, int16_t); | | 588 | int ustore_int16_intrsafe(void *, int16_t); |
588 | int ustore_int32_intrsafe(void *, int32_t); | | 589 | int ustore_int32_intrsafe(void *, int32_t); |
589 | int ustore_uint8_intrsafe(void *, uint8_t); | | 590 | int ustore_uint8_intrsafe(void *, uint8_t); |
590 | int ustore_uint16_intrsafe(void *, uint16_t); | | 591 | int ustore_uint16_intrsafe(void *, uint16_t); |
591 | int ustore_uint32_intrsafe(void *, uint32_t); | | 592 | int ustore_uint32_intrsafe(void *, uint32_t); |
592 | #ifdef _LP64 | | 593 | #ifdef _LP64 |
593 | int ustore_int64(void *, int64_t); | | 594 | int ustore_int64(void *, int64_t); |
594 | int ustore_uint64(void *, uint64_t); | | 595 | int ustore_uint64(void *, uint64_t); |
595 | int ustore_int64_intrsafe(void *, int64_t); | | 596 | int ustore_int64_intrsafe(void *, int64_t); |
596 | int ustore_uint64_intrsafe(void *, uint64_t); | | 597 | int ustore_uint64_intrsafe(void *, uint64_t); |
597 | #endif | | 598 | #endif |
598 | int ustore_char(void *, char); | | 599 | int ustore_char(void *, char); |
599 | int ustore_char_intrsafe(void *, char); | | 600 | int ustore_char_intrsafe(void *, char); |
600 | int ustore_short(void *, short); | | 601 | int ustore_short(void *, short); |
601 | int ustore_short_intrsafe(void *, short); | | 602 | int ustore_short_intrsafe(void *, short); |
602 | int ustore_int(void *, int); | | 603 | int ustore_int(void *, int); |
603 | int ustore_int_intrsafe(void *, int); | | 604 | int ustore_int_intrsafe(void *, int); |
604 | int ustore_long(void *, long); | | 605 | int ustore_long(void *, long); |
605 | int ustore_long_intrsafe(void *, long); | | 606 | int ustore_long_intrsafe(void *, long); |
606 | int ustore_uchar(void *, u_char); | | 607 | int ustore_uchar(void *, u_char); |
607 | int ustore_uchar_intrsafe(void *, u_char); | | 608 | int ustore_uchar_intrsafe(void *, u_char); |
608 | int ustore_ushort(void *, u_short); | | 609 | int ustore_ushort(void *, u_short); |
609 | int ustore_ushort_intrsafe(void *, u_short); | | 610 | int ustore_ushort_intrsafe(void *, u_short); |
610 | int ustore_uint(void *, u_int); | | 611 | int ustore_uint(void *, u_int); |
611 | int ustore_uint_intrsafe(void *, u_int); | | 612 | int ustore_uint_intrsafe(void *, u_int); |
612 | int ustore_ulong(void *, u_long); | | 613 | int ustore_ulong(void *, u_long); |
613 | int ustore_ulong_intrsafe(void *, u_long); | | 614 | int ustore_ulong_intrsafe(void *, u_long); |
614 | int ustore_ptr(void *, void *); | | 615 | int ustore_ptr(void *, void *); |
615 | int ustore_ptr_intrsafe(void *, void *); | | 616 | int ustore_ptr_intrsafe(void *, void *); |
616 | | | 617 | |
617 | int ustore_uint32_isync(void *, uint32_t); | | 618 | int ustore_uint32_isync(void *, uint32_t); |
618 | | | 619 | |
619 | /* trap.c */ | | 620 | /* trap.c */ |
620 | void netintr(void); | | 621 | void netintr(void); |
621 | int kdbpeek(vaddr_t); | | 622 | int kdbpeek(vaddr_t); |
622 | | | 623 | |
623 | /* mips_fpu.c */ | | 624 | /* mips_fpu.c */ |
624 | void fpu_init(void); | | 625 | void fpu_init(void); |
625 | void fpu_discard(void); | | 626 | void fpu_discard(void); |
626 | void fpu_load(void); | | 627 | void fpu_load(void); |
627 | void fpu_save(void); | | 628 | void fpu_save(void); |
628 | bool fpu_used_p(void); | | 629 | bool fpu_used_p(void); |
629 | | | 630 | |
630 | /* mips_machdep.c */ | | 631 | /* mips_machdep.c */ |
631 | void dumpsys(void); | | 632 | void dumpsys(void); |
632 | int savectx(struct pcb *); | | 633 | int savectx(struct pcb *); |
633 | void cpu_identify(device_t); | | 634 | void cpu_identify(device_t); |
634 | | | 635 | |
635 | /* locore*.S */ | | 636 | /* locore*.S */ |
636 | int badaddr(void *, size_t); | | 637 | int badaddr(void *, size_t); |
637 | int badaddr64(uint64_t, size_t); | | 638 | int badaddr64(uint64_t, size_t); |
638 | | | 639 | |
639 | /* vm_machdep.c */ | | 640 | /* vm_machdep.c */ |
640 | void * cpu_uarea_alloc(bool); | | 641 | void * cpu_uarea_alloc(bool); |
641 | bool cpu_uarea_free(void *); | | 642 | bool cpu_uarea_free(void *); |
642 | void cpu_proc_fork(struct proc *, struct proc *); | | 643 | void cpu_proc_fork(struct proc *, struct proc *); |
643 | vaddr_t cpu_lwp_pc(struct lwp *); | | 644 | vaddr_t cpu_lwp_pc(struct lwp *); |
644 | int ioaccess(vaddr_t, paddr_t, vsize_t); | | 645 | int ioaccess(vaddr_t, paddr_t, vsize_t); |
645 | int iounaccess(vaddr_t, vsize_t); | | 646 | int iounaccess(vaddr_t, vsize_t); |
646 | | | 647 | |
647 | #endif /* ! _LOCORE */ | | 648 | #endif /* ! _LOCORE */ |
648 | #endif /* _KERNEL */ | | 649 | #endif /* _KERNEL */ |
649 | #endif /* _CPU_H_ */ | | 650 | #endif /* _CPU_H_ */ |